aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-01-13 01:06:28 -0500
committerPaul Mundt <lethal@linux-sh.org>2011-01-13 01:06:28 -0500
commitf43dc23d5ea91fca257be02138a255f02d98e806 (patch)
treeb29722f6e965316e90ac97abf79923ced250dc21 /arch/sh/kernel
parentf8e53553f452dcbf67cb89c8cba63a1cd6eb4cc0 (diff)
parent4162cf64973df51fc885825bc9ca4d055891c49f (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into common/serial-rework
Conflicts: arch/sh/kernel/cpu/sh2/setup-sh7619.c arch/sh/kernel/cpu/sh2a/setup-mxg.c arch/sh/kernel/cpu/sh2a/setup-sh7201.c arch/sh/kernel/cpu/sh2a/setup-sh7203.c arch/sh/kernel/cpu/sh2a/setup-sh7206.c arch/sh/kernel/cpu/sh3/setup-sh7705.c arch/sh/kernel/cpu/sh3/setup-sh770x.c arch/sh/kernel/cpu/sh3/setup-sh7710.c arch/sh/kernel/cpu/sh3/setup-sh7720.c arch/sh/kernel/cpu/sh4/setup-sh4-202.c arch/sh/kernel/cpu/sh4/setup-sh7750.c arch/sh/kernel/cpu/sh4/setup-sh7760.c arch/sh/kernel/cpu/sh4a/setup-sh7343.c arch/sh/kernel/cpu/sh4a/setup-sh7366.c arch/sh/kernel/cpu/sh4a/setup-sh7722.c arch/sh/kernel/cpu/sh4a/setup-sh7723.c arch/sh/kernel/cpu/sh4a/setup-sh7724.c arch/sh/kernel/cpu/sh4a/setup-sh7763.c arch/sh/kernel/cpu/sh4a/setup-sh7770.c arch/sh/kernel/cpu/sh4a/setup-sh7780.c arch/sh/kernel/cpu/sh4a/setup-sh7785.c arch/sh/kernel/cpu/sh4a/setup-sh7786.c arch/sh/kernel/cpu/sh4a/setup-shx3.c arch/sh/kernel/cpu/sh5/setup-sh5.c drivers/serial/sh-sci.c drivers/serial/sh-sci.h include/linux/serial_sci.h
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile54
-rw-r--r--arch/sh/kernel/Makefile_3237
-rw-r--r--arch/sh/kernel/Makefile_6419
-rw-r--r--arch/sh/kernel/asm-offsets.c24
-rw-r--r--arch/sh/kernel/cpu/Makefile7
-rw-r--r--arch/sh/kernel/cpu/adc.c12
-rw-r--r--arch/sh/kernel/cpu/clock-cpg.c218
-rw-r--r--arch/sh/kernel/cpu/clock.c630
-rw-r--r--arch/sh/kernel/cpu/fpu.c85
-rw-r--r--arch/sh/kernel/cpu/hwblk.c159
-rw-r--r--arch/sh/kernel/cpu/init.c179
-rw-r--r--arch/sh/kernel/cpu/irq/imask.c16
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c63
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c37
-rw-r--r--arch/sh/kernel/cpu/proc.c148
-rw-r--r--arch/sh/kernel/cpu/sh2/clock-sh7619.c26
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S3
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c6
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c91
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7201.c26
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7203.c25
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7206.c26
-rw-r--r--arch/sh/kernel/cpu/sh2a/entry.S3
-rw-r--r--arch/sh/kernel/cpu/sh2a/fpu.c110
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c6
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-mxg.c38
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7201.c224
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c119
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c122
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh3.c8
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7705.c8
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7706.c8
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7709.c19
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7710.c8
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7712.c6
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S134
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S6
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c30
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh3.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c70
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c103
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c73
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c83
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile8
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4-202.c32
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4.c8
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c161
-rw-r--r--arch/sh/kernel/cpu/sh4/perf_event.c253
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c39
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh4-202.c40
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c36
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c118
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c25
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile22
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7343.c237
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7366.c216
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c207
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7723.c304
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c362
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7757.c162
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7763.c20
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7770.c8
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7780.c22
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7785.c182
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7786.c314
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-shx3.c228
-rw-r--r--arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c106
-rw-r--r--arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c117
-rw-r--r--arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c121
-rw-r--r--arch/sh/kernel/cpu/sh4a/intc-shx3.c34
-rw-r--r--arch/sh/kernel/cpu/sh4a/perf_event.c287
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c21
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c2287
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-shx3.c587
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7343.c138
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c58
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c357
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c280
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c813
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c623
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c133
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7770.c314
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c231
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c329
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c466
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c196
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c116
-rw-r--r--arch/sh/kernel/cpu/sh4a/ubc.c133
-rw-r--r--arch/sh/kernel/cpu/sh5/clock-sh5.c8
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S8
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c67
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c6
-rw-r--r--arch/sh/kernel/cpu/sh5/setup-sh5.c37
-rw-r--r--arch/sh/kernel/cpu/shmobile/Makefile2
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c119
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm.c134
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm_runtime.c308
-rw-r--r--arch/sh/kernel/cpu/shmobile/sleep.S406
-rw-r--r--arch/sh/kernel/cpu/ubc.S59
-rw-r--r--arch/sh/kernel/cpufreq.c14
-rw-r--r--arch/sh/kernel/crash_dump.c20
-rw-r--r--arch/sh/kernel/debugtraps.S1
-rw-r--r--arch/sh/kernel/dma-nommu.c82
-rw-r--r--arch/sh/kernel/dumpstack.c123
-rw-r--r--arch/sh/kernel/dwarf.c1220
-rw-r--r--arch/sh/kernel/early_printk.c241
-rw-r--r--arch/sh/kernel/entry-common.S111
-rw-r--r--arch/sh/kernel/ftrace.c270
-rw-r--r--arch/sh/kernel/gpio.c584
-rw-r--r--arch/sh/kernel/head_32.S226
-rw-r--r--arch/sh/kernel/head_64.S2
-rw-r--r--arch/sh/kernel/hw_breakpoint.c420
-rw-r--r--arch/sh/kernel/idle.c96
-rw-r--r--arch/sh/kernel/init_task.c5
-rw-r--r--arch/sh/kernel/io.c111
-rw-r--r--arch/sh/kernel/io_generic.c189
-rw-r--r--arch/sh/kernel/io_trapped.c32
-rw-r--r--arch/sh/kernel/iomap.c165
-rw-r--r--arch/sh/kernel/ioport.c43
-rw-r--r--arch/sh/kernel/irq.c138
-rw-r--r--arch/sh/kernel/irq_32.c57
-rw-r--r--arch/sh/kernel/irq_64.c51
-rw-r--r--arch/sh/kernel/kdebugfs.c16
-rw-r--r--arch/sh/kernel/kgdb.c64
-rw-r--r--arch/sh/kernel/kprobes.c101
-rw-r--r--arch/sh/kernel/localtimer.c15
-rw-r--r--arch/sh/kernel/machine_kexec.c84
-rw-r--r--arch/sh/kernel/machvec.c13
-rw-r--r--arch/sh/kernel/module.c9
-rw-r--r--arch/sh/kernel/nmi_debug.c77
-rw-r--r--arch/sh/kernel/perf_callchain.c53
-rw-r--r--arch/sh/kernel/perf_event.c395
-rw-r--r--arch/sh/kernel/process.c101
-rw-r--r--arch/sh/kernel/process_32.c193
-rw-r--r--arch/sh/kernel/process_64.c73
-rw-r--r--arch/sh/kernel/ptrace.c33
-rw-r--r--arch/sh/kernel/ptrace_32.c183
-rw-r--r--arch/sh/kernel/ptrace_64.c153
-rw-r--r--arch/sh/kernel/reboot.c102
-rw-r--r--arch/sh/kernel/return_address.c59
-rw-r--r--arch/sh/kernel/setup.c428
-rw-r--r--arch/sh/kernel/sh_bios.c129
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c92
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c19
-rw-r--r--arch/sh/kernel/signal_32.c57
-rw-r--r--arch/sh/kernel/signal_64.c61
-rw-r--r--arch/sh/kernel/smp.c176
-rw-r--r--arch/sh/kernel/stacktrace.c98
-rw-r--r--arch/sh/kernel/sys_sh.c168
-rw-r--r--arch/sh/kernel/sys_sh32.c5
-rw-r--r--arch/sh/kernel/sys_sh64.c5
-rw-r--r--arch/sh/kernel/syscalls_32.S27
-rw-r--r--arch/sh/kernel/syscalls_64.S9
-rw-r--r--arch/sh/kernel/time.c35
-rw-r--r--arch/sh/kernel/topology.c32
-rw-r--r--arch/sh/kernel/traps.c53
-rw-r--r--arch/sh/kernel/traps_32.c164
-rw-r--r--arch/sh/kernel/traps_64.c61
-rw-r--r--arch/sh/kernel/unwinder.c164
-rw-r--r--arch/sh/kernel/vmlinux.lds.S140
-rw-r--r--arch/sh/kernel/vsyscall/Makefile2
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-trapa.S2
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c1
163 files changed, 16536 insertions, 6293 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 349d833deab5..77f7ae1d4647 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -1,5 +1,51 @@
1ifeq ($(CONFIG_SUPERH32),y) 1#
2include ${srctree}/arch/sh/kernel/Makefile_32 2# Makefile for the Linux/SuperH kernel.
3else 3#
4include ${srctree}/arch/sh/kernel/Makefile_64 4
5extra-y := head_$(BITS).o init_task.o vmlinux.lds
6
7ifdef CONFIG_FUNCTION_TRACER
8# Do not profile debug and lowlevel utilities
9CFLAGS_REMOVE_ftrace.o = -pg
5endif 10endif
11
12CFLAGS_REMOVE_return_address.o = -pg
13
14obj-y := debugtraps.o dma-nommu.o dumpstack.o \
15 idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \
16 machvec.o nmi_debug.o process.o \
17 process_$(BITS).o ptrace.o ptrace_$(BITS).o \
18 reboot.o return_address.o \
19 setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \
20 syscalls_$(BITS).o time.o topology.o traps.o \
21 traps_$(BITS).o unwinder.o
22
23ifndef CONFIG_GENERIC_IOMAP
24obj-y += iomap.o
25obj-$(CONFIG_HAS_IOPORT) += ioport.o
26endif
27
28obj-y += cpu/
29obj-$(CONFIG_VSYSCALL) += vsyscall/
30obj-$(CONFIG_SMP) += smp.o
31obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
32obj-$(CONFIG_KGDB) += kgdb.o
33obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
34obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
35obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
36obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
37obj-$(CONFIG_STACKTRACE) += stacktrace.o
38obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
39obj-$(CONFIG_KPROBES) += kprobes.o
40obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
41obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
42obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
43obj-$(CONFIG_DUMP_CODE) += disassemble.o
44obj-$(CONFIG_HIBERNATION) += swsusp.o
45obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
46obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o
47
48obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
49obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
50
51ccflags-y := -Werror
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
deleted file mode 100644
index 9411e3e31e68..000000000000
--- a/arch/sh/kernel/Makefile_32
+++ /dev/null
@@ -1,37 +0,0 @@
1#
2# Makefile for the Linux/SuperH kernel.
3#
4
5extra-y := head_32.o init_task.o vmlinux.lds
6
7ifdef CONFIG_FUNCTION_TRACER
8# Do not profile debug and lowlevel utilities
9CFLAGS_REMOVE_ftrace.o = -pg
10endif
11
12obj-y := debugtraps.o idle.o io.o io_generic.o irq.o \
13 machvec.o process_32.o ptrace_32.o setup.o signal_32.o \
14 sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \
15 traps.o traps_32.o
16
17obj-y += cpu/
18obj-$(CONFIG_VSYSCALL) += vsyscall/
19obj-$(CONFIG_SMP) += smp.o
20obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
21obj-$(CONFIG_KGDB) += kgdb.o
22obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
23obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
24obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
25obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
26obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
27obj-$(CONFIG_STACKTRACE) += stacktrace.o
28obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
29obj-$(CONFIG_KPROBES) += kprobes.o
30obj-$(CONFIG_GENERIC_GPIO) += gpio.o
31obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
32obj-$(CONFIG_DUMP_CODE) += disassemble.o
33obj-$(CONFIG_HIBERNATION) += swsusp.o
34
35obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
36
37EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
deleted file mode 100644
index 67b9f6c6326b..000000000000
--- a/arch/sh/kernel/Makefile_64
+++ /dev/null
@@ -1,19 +0,0 @@
1extra-y := head_64.o init_task.o vmlinux.lds
2
3obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \
4 ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \
5 syscalls_64.o time.o topology.o traps.o traps_64.o
6
7obj-y += cpu/
8obj-$(CONFIG_SMP) += smp.o
9obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
10obj-$(CONFIG_MODULES) += sh_ksyms_64.o module.o
11obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
12obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
13obj-$(CONFIG_STACKTRACE) += stacktrace.o
14obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
15obj-$(CONFIG_GENERIC_GPIO) += gpio.o
16
17obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
18
19EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c
index 99aceb28ee24..08a2be775b6c 100644
--- a/arch/sh/kernel/asm-offsets.c
+++ b/arch/sh/kernel/asm-offsets.c
@@ -26,6 +26,7 @@ int main(void)
26 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 26 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
27 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); 27 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
28 DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block)); 28 DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
29 DEFINE(TI_SIZE, sizeof(struct thread_info));
29 30
30#ifdef CONFIG_HIBERNATION 31#ifdef CONFIG_HIBERNATION
31 DEFINE(PBE_ADDRESS, offsetof(struct pbe, address)); 32 DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
@@ -33,5 +34,28 @@ int main(void)
33 DEFINE(PBE_NEXT, offsetof(struct pbe, next)); 34 DEFINE(PBE_NEXT, offsetof(struct pbe, next));
34 DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs)); 35 DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs));
35#endif 36#endif
37
38 DEFINE(SH_SLEEP_MODE, offsetof(struct sh_sleep_data, mode));
39 DEFINE(SH_SLEEP_SF_PRE, offsetof(struct sh_sleep_data, sf_pre));
40 DEFINE(SH_SLEEP_SF_POST, offsetof(struct sh_sleep_data, sf_post));
41 DEFINE(SH_SLEEP_RESUME, offsetof(struct sh_sleep_data, resume));
42 DEFINE(SH_SLEEP_VBR, offsetof(struct sh_sleep_data, vbr));
43 DEFINE(SH_SLEEP_SPC, offsetof(struct sh_sleep_data, spc));
44 DEFINE(SH_SLEEP_SR, offsetof(struct sh_sleep_data, sr));
45 DEFINE(SH_SLEEP_SP, offsetof(struct sh_sleep_data, sp));
46 DEFINE(SH_SLEEP_BASE_ADDR, offsetof(struct sh_sleep_data, addr));
47 DEFINE(SH_SLEEP_BASE_DATA, offsetof(struct sh_sleep_data, data));
48 DEFINE(SH_SLEEP_REG_STBCR, offsetof(struct sh_sleep_regs, stbcr));
49 DEFINE(SH_SLEEP_REG_BAR, offsetof(struct sh_sleep_regs, bar));
50 DEFINE(SH_SLEEP_REG_PTEH, offsetof(struct sh_sleep_regs, pteh));
51 DEFINE(SH_SLEEP_REG_PTEL, offsetof(struct sh_sleep_regs, ptel));
52 DEFINE(SH_SLEEP_REG_TTB, offsetof(struct sh_sleep_regs, ttb));
53 DEFINE(SH_SLEEP_REG_TEA, offsetof(struct sh_sleep_regs, tea));
54 DEFINE(SH_SLEEP_REG_MMUCR, offsetof(struct sh_sleep_regs, mmucr));
55 DEFINE(SH_SLEEP_REG_PTEA, offsetof(struct sh_sleep_regs, ptea));
56 DEFINE(SH_SLEEP_REG_PASCR, offsetof(struct sh_sleep_regs, pascr));
57 DEFINE(SH_SLEEP_REG_IRMCR, offsetof(struct sh_sleep_regs, irmcr));
58 DEFINE(SH_SLEEP_REG_CCR, offsetof(struct sh_sleep_regs, ccr));
59 DEFINE(SH_SLEEP_REG_RAMCR, offsetof(struct sh_sleep_regs, ramcr));
36 return 0; 60 return 0;
37} 61}
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index eecad7cbd61e..d49c2135fd48 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -15,8 +15,9 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/
15 15
16# Common interfaces. 16# Common interfaces.
17 17
18obj-$(CONFIG_UBC_WAKEUP) += ubc.o
19obj-$(CONFIG_SH_ADC) += adc.o 18obj-$(CONFIG_SH_ADC) += adc.o
20obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o 19obj-$(CONFIG_SH_CLK_CPG_LEGACY) += clock-cpg.o
20obj-$(CONFIG_SH_FPU) += fpu.o
21obj-$(CONFIG_SH_FPU_EMU) += fpu.o
21 22
22obj-y += irq/ init.o clock.o 23obj-y += irq/ init.o clock.o hwblk.o proc.o
diff --git a/arch/sh/kernel/cpu/adc.c b/arch/sh/kernel/cpu/adc.c
index da3d6877f93d..d307571d54b6 100644
--- a/arch/sh/kernel/cpu/adc.c
+++ b/arch/sh/kernel/cpu/adc.c
@@ -18,19 +18,19 @@ int adc_single(unsigned int channel)
18 18
19 off = (channel & 0x03) << 2; 19 off = (channel & 0x03) << 2;
20 20
21 csr = ctrl_inb(ADCSR); 21 csr = __raw_readb(ADCSR);
22 csr = channel | ADCSR_ADST | ADCSR_CKS; 22 csr = channel | ADCSR_ADST | ADCSR_CKS;
23 ctrl_outb(csr, ADCSR); 23 __raw_writeb(csr, ADCSR);
24 24
25 do { 25 do {
26 csr = ctrl_inb(ADCSR); 26 csr = __raw_readb(ADCSR);
27 } while ((csr & ADCSR_ADF) == 0); 27 } while ((csr & ADCSR_ADF) == 0);
28 28
29 csr &= ~(ADCSR_ADF | ADCSR_ADST); 29 csr &= ~(ADCSR_ADF | ADCSR_ADST);
30 ctrl_outb(csr, ADCSR); 30 __raw_writeb(csr, ADCSR);
31 31
32 return (((ctrl_inb(ADDRAH + off) << 8) | 32 return (((__raw_readb(ADDRAH + off) << 8) |
33 ctrl_inb(ADDRAL + off)) >> 6); 33 __raw_readb(ADDRAL + off)) >> 6);
34} 34}
35 35
36EXPORT_SYMBOL(adc_single); 36EXPORT_SYMBOL(adc_single);
diff --git a/arch/sh/kernel/cpu/clock-cpg.c b/arch/sh/kernel/cpu/clock-cpg.c
index 6dfe2cced3fc..dd0e0f211359 100644
--- a/arch/sh/kernel/cpu/clock-cpg.c
+++ b/arch/sh/kernel/cpu/clock-cpg.c
@@ -2,223 +2,25 @@
2#include <linux/compiler.h> 2#include <linux/compiler.h>
3#include <linux/slab.h> 3#include <linux/slab.h>
4#include <linux/io.h> 4#include <linux/io.h>
5#include <linux/clkdev.h>
5#include <asm/clock.h> 6#include <asm/clock.h>
6 7
7static int sh_clk_mstp32_enable(struct clk *clk)
8{
9 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
10 clk->enable_reg);
11 return 0;
12}
13
14static void sh_clk_mstp32_disable(struct clk *clk)
15{
16 __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
17 clk->enable_reg);
18}
19
20static struct clk_ops sh_clk_mstp32_clk_ops = {
21 .enable = sh_clk_mstp32_enable,
22 .disable = sh_clk_mstp32_disable,
23 .recalc = followparent_recalc,
24};
25
26int __init sh_clk_mstp32_register(struct clk *clks, int nr)
27{
28 struct clk *clkp;
29 int ret = 0;
30 int k;
31
32 for (k = 0; !ret && (k < nr); k++) {
33 clkp = clks + k;
34 clkp->ops = &sh_clk_mstp32_clk_ops;
35 ret |= clk_register(clkp);
36 }
37
38 return ret;
39}
40
41static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
42{
43 return clk_rate_table_round(clk, clk->freq_table, rate);
44}
45
46static int sh_clk_div6_divisors[64] = {
47 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
48 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
49 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
50 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
51};
52
53static struct clk_div_mult_table sh_clk_div6_table = {
54 .divisors = sh_clk_div6_divisors,
55 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
56};
57
58static unsigned long sh_clk_div6_recalc(struct clk *clk)
59{
60 struct clk_div_mult_table *table = &sh_clk_div6_table;
61 unsigned int idx;
62
63 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
64 table, NULL);
65
66 idx = __raw_readl(clk->enable_reg) & 0x003f;
67
68 return clk->freq_table[idx].frequency;
69}
70
71static int sh_clk_div6_set_rate(struct clk *clk,
72 unsigned long rate, int algo_id)
73{
74 unsigned long value;
75 int idx;
76
77 idx = clk_rate_table_find(clk, clk->freq_table, rate);
78 if (idx < 0)
79 return idx;
80
81 value = __raw_readl(clk->enable_reg);
82 value &= ~0x3f;
83 value |= idx;
84 __raw_writel(value, clk->enable_reg);
85 return 0;
86}
87
88static int sh_clk_div6_enable(struct clk *clk)
89{
90 unsigned long value;
91 int ret;
92
93 ret = sh_clk_div6_set_rate(clk, clk->rate, 0);
94 if (ret == 0) {
95 value = __raw_readl(clk->enable_reg);
96 value &= ~0x100; /* clear stop bit to enable clock */
97 __raw_writel(value, clk->enable_reg);
98 }
99 return ret;
100}
101
102static void sh_clk_div6_disable(struct clk *clk)
103{
104 unsigned long value;
105
106 value = __raw_readl(clk->enable_reg);
107 value |= 0x100; /* stop clock */
108 value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
109 __raw_writel(value, clk->enable_reg);
110}
111
112static struct clk_ops sh_clk_div6_clk_ops = {
113 .recalc = sh_clk_div6_recalc,
114 .round_rate = sh_clk_div_round_rate,
115 .set_rate = sh_clk_div6_set_rate,
116 .enable = sh_clk_div6_enable,
117 .disable = sh_clk_div6_disable,
118};
119
120int __init sh_clk_div6_register(struct clk *clks, int nr)
121{
122 struct clk *clkp;
123 void *freq_table;
124 int nr_divs = sh_clk_div6_table.nr_divisors;
125 int freq_table_size = sizeof(struct cpufreq_frequency_table);
126 int ret = 0;
127 int k;
128
129 freq_table_size *= (nr_divs + 1);
130 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
131 if (!freq_table) {
132 pr_err("sh_clk_div6_register: unable to alloc memory\n");
133 return -ENOMEM;
134 }
135
136 for (k = 0; !ret && (k < nr); k++) {
137 clkp = clks + k;
138
139 clkp->ops = &sh_clk_div6_clk_ops;
140 clkp->id = -1;
141 clkp->freq_table = freq_table + (k * freq_table_size);
142 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
143
144 ret = clk_register(clkp);
145 }
146
147 return ret;
148}
149
150static unsigned long sh_clk_div4_recalc(struct clk *clk)
151{
152 struct clk_div_mult_table *table = clk->priv;
153 unsigned int idx;
154
155 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
156 table, &clk->arch_flags);
157
158 idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
159
160 return clk->freq_table[idx].frequency;
161}
162
163static struct clk_ops sh_clk_div4_clk_ops = {
164 .recalc = sh_clk_div4_recalc,
165 .round_rate = sh_clk_div_round_rate,
166};
167
168int __init sh_clk_div4_register(struct clk *clks, int nr,
169 struct clk_div_mult_table *table)
170{
171 struct clk *clkp;
172 void *freq_table;
173 int nr_divs = table->nr_divisors;
174 int freq_table_size = sizeof(struct cpufreq_frequency_table);
175 int ret = 0;
176 int k;
177
178 freq_table_size *= (nr_divs + 1);
179 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
180 if (!freq_table) {
181 pr_err("sh_clk_div4_register: unable to alloc memory\n");
182 return -ENOMEM;
183 }
184
185 for (k = 0; !ret && (k < nr); k++) {
186 clkp = clks + k;
187
188 clkp->ops = &sh_clk_div4_clk_ops;
189 clkp->id = -1;
190 clkp->priv = table;
191
192 clkp->freq_table = freq_table + (k * freq_table_size);
193 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
194
195 ret = clk_register(clkp);
196 }
197
198 return ret;
199}
200
201#ifdef CONFIG_SH_CLK_CPG_LEGACY
202static struct clk master_clk = { 8static struct clk master_clk = {
203 .name = "master_clk",
204 .flags = CLK_ENABLE_ON_INIT, 9 .flags = CLK_ENABLE_ON_INIT,
205 .rate = CONFIG_SH_PCLK_FREQ, 10 .rate = CONFIG_SH_PCLK_FREQ,
206}; 11};
207 12
208static struct clk peripheral_clk = { 13static struct clk peripheral_clk = {
209 .name = "peripheral_clk",
210 .parent = &master_clk, 14 .parent = &master_clk,
211 .flags = CLK_ENABLE_ON_INIT, 15 .flags = CLK_ENABLE_ON_INIT,
212}; 16};
213 17
214static struct clk bus_clk = { 18static struct clk bus_clk = {
215 .name = "bus_clk",
216 .parent = &master_clk, 19 .parent = &master_clk,
217 .flags = CLK_ENABLE_ON_INIT, 20 .flags = CLK_ENABLE_ON_INIT,
218}; 21};
219 22
220static struct clk cpu_clk = { 23static struct clk cpu_clk = {
221 .name = "cpu_clk",
222 .parent = &master_clk, 24 .parent = &master_clk,
223 .flags = CLK_ENABLE_ON_INIT, 25 .flags = CLK_ENABLE_ON_INIT,
224}; 26};
@@ -233,6 +35,16 @@ static struct clk *onchip_clocks[] = {
233 &cpu_clk, 35 &cpu_clk,
234}; 36};
235 37
38#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
39
40static struct clk_lookup lookups[] = {
41 /* main clocks */
42 CLKDEV_CON_ID("master_clk", &master_clk),
43 CLKDEV_CON_ID("peripheral_clk", &peripheral_clk),
44 CLKDEV_CON_ID("bus_clk", &bus_clk),
45 CLKDEV_CON_ID("cpu_clk", &cpu_clk),
46};
47
236int __init __deprecated cpg_clk_init(void) 48int __init __deprecated cpg_clk_init(void)
237{ 49{
238 int i, ret = 0; 50 int i, ret = 0;
@@ -244,6 +56,13 @@ int __init __deprecated cpg_clk_init(void)
244 ret |= clk_register(clk); 56 ret |= clk_register(clk);
245 } 57 }
246 58
59 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
60
61 clk_add_alias("tmu_fck", NULL, "peripheral_clk", NULL);
62 clk_add_alias("mtu2_fck", NULL, "peripheral_clk", NULL);
63 clk_add_alias("cmt_fck", NULL, "peripheral_clk", NULL);
64 clk_add_alias("sci_ick", NULL, "peripheral_clk", NULL);
65
247 return ret; 66 return ret;
248} 67}
249 68
@@ -255,4 +74,3 @@ int __init __weak arch_clk_init(void)
255{ 74{
256 return cpg_clk_init(); 75 return cpg_clk_init();
257} 76}
258#endif /* CONFIG_SH_CPG_CLK_LEGACY */
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index f3a46be2ae81..4187cf4fe185 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -10,558 +10,16 @@
10 * 10 *
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> 11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12 * 12 *
13 * With clkdev bits:
14 *
15 * Copyright (C) 2008 Russell King.
16 *
17 * This file is subject to the terms and conditions of the GNU General Public 13 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file "COPYING" in the main directory of this archive 14 * License. See the file "COPYING" in the main directory of this archive
19 * for more details. 15 * for more details.
20 */ 16 */
21#include <linux/kernel.h> 17#include <linux/kernel.h>
22#include <linux/init.h> 18#include <linux/init.h>
23#include <linux/module.h> 19#include <linux/clk.h>
24#include <linux/mutex.h>
25#include <linux/list.h>
26#include <linux/kobject.h>
27#include <linux/sysdev.h>
28#include <linux/seq_file.h>
29#include <linux/err.h>
30#include <linux/platform_device.h>
31#include <linux/debugfs.h>
32#include <linux/cpufreq.h>
33#include <asm/clock.h> 20#include <asm/clock.h>
34#include <asm/machvec.h> 21#include <asm/machvec.h>
35 22
36static LIST_HEAD(clock_list);
37static DEFINE_SPINLOCK(clock_lock);
38static DEFINE_MUTEX(clock_list_sem);
39
40void clk_rate_table_build(struct clk *clk,
41 struct cpufreq_frequency_table *freq_table,
42 int nr_freqs,
43 struct clk_div_mult_table *src_table,
44 unsigned long *bitmap)
45{
46 unsigned long mult, div;
47 unsigned long freq;
48 int i;
49
50 for (i = 0; i < nr_freqs; i++) {
51 div = 1;
52 mult = 1;
53
54 if (src_table->divisors && i < src_table->nr_divisors)
55 div = src_table->divisors[i];
56
57 if (src_table->multipliers && i < src_table->nr_multipliers)
58 mult = src_table->multipliers[i];
59
60 if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
61 freq = CPUFREQ_ENTRY_INVALID;
62 else
63 freq = clk->parent->rate * mult / div;
64
65 freq_table[i].index = i;
66 freq_table[i].frequency = freq;
67 }
68
69 /* Termination entry */
70 freq_table[i].index = i;
71 freq_table[i].frequency = CPUFREQ_TABLE_END;
72}
73
74long clk_rate_table_round(struct clk *clk,
75 struct cpufreq_frequency_table *freq_table,
76 unsigned long rate)
77{
78 unsigned long rate_error, rate_error_prev = ~0UL;
79 unsigned long rate_best_fit = rate;
80 unsigned long highest, lowest;
81 int i;
82
83 highest = lowest = 0;
84
85 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
86 unsigned long freq = freq_table[i].frequency;
87
88 if (freq == CPUFREQ_ENTRY_INVALID)
89 continue;
90
91 if (freq > highest)
92 highest = freq;
93 if (freq < lowest)
94 lowest = freq;
95
96 rate_error = abs(freq - rate);
97 if (rate_error < rate_error_prev) {
98 rate_best_fit = freq;
99 rate_error_prev = rate_error;
100 }
101
102 if (rate_error == 0)
103 break;
104 }
105
106 if (rate >= highest)
107 rate_best_fit = highest;
108 if (rate <= lowest)
109 rate_best_fit = lowest;
110
111 return rate_best_fit;
112}
113
114int clk_rate_table_find(struct clk *clk,
115 struct cpufreq_frequency_table *freq_table,
116 unsigned long rate)
117{
118 int i;
119
120 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
121 unsigned long freq = freq_table[i].frequency;
122
123 if (freq == CPUFREQ_ENTRY_INVALID)
124 continue;
125
126 if (freq == rate)
127 return i;
128 }
129
130 return -ENOENT;
131}
132
133/* Used for clocks that always have same value as the parent clock */
134unsigned long followparent_recalc(struct clk *clk)
135{
136 return clk->parent ? clk->parent->rate : 0;
137}
138
139int clk_reparent(struct clk *child, struct clk *parent)
140{
141 list_del_init(&child->sibling);
142 if (parent)
143 list_add(&child->sibling, &parent->children);
144 child->parent = parent;
145
146 /* now do the debugfs renaming to reattach the child
147 to the proper parent */
148
149 return 0;
150}
151
152/* Propagate rate to children */
153void propagate_rate(struct clk *tclk)
154{
155 struct clk *clkp;
156
157 list_for_each_entry(clkp, &tclk->children, sibling) {
158 if (clkp->ops && clkp->ops->recalc)
159 clkp->rate = clkp->ops->recalc(clkp);
160
161 propagate_rate(clkp);
162 }
163}
164
165static void __clk_disable(struct clk *clk)
166{
167 if (clk->usecount == 0) {
168 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
169 clk->name);
170 WARN_ON(1);
171 return;
172 }
173
174 if (!(--clk->usecount)) {
175 if (likely(clk->ops && clk->ops->disable))
176 clk->ops->disable(clk);
177 if (likely(clk->parent))
178 __clk_disable(clk->parent);
179 }
180}
181
182void clk_disable(struct clk *clk)
183{
184 unsigned long flags;
185
186 if (!clk)
187 return;
188
189 spin_lock_irqsave(&clock_lock, flags);
190 __clk_disable(clk);
191 spin_unlock_irqrestore(&clock_lock, flags);
192}
193EXPORT_SYMBOL_GPL(clk_disable);
194
195static int __clk_enable(struct clk *clk)
196{
197 int ret = 0;
198
199 if (clk->usecount++ == 0) {
200 if (clk->parent) {
201 ret = __clk_enable(clk->parent);
202 if (unlikely(ret))
203 goto err;
204 }
205
206 if (clk->ops && clk->ops->enable) {
207 ret = clk->ops->enable(clk);
208 if (ret) {
209 if (clk->parent)
210 __clk_disable(clk->parent);
211 goto err;
212 }
213 }
214 }
215
216 return ret;
217err:
218 clk->usecount--;
219 return ret;
220}
221
222int clk_enable(struct clk *clk)
223{
224 unsigned long flags;
225 int ret;
226
227 if (!clk)
228 return -EINVAL;
229
230 spin_lock_irqsave(&clock_lock, flags);
231 ret = __clk_enable(clk);
232 spin_unlock_irqrestore(&clock_lock, flags);
233
234 return ret;
235}
236EXPORT_SYMBOL_GPL(clk_enable);
237
238static LIST_HEAD(root_clks);
239
240/**
241 * recalculate_root_clocks - recalculate and propagate all root clocks
242 *
243 * Recalculates all root clocks (clocks with no parent), which if the
244 * clock's .recalc is set correctly, should also propagate their rates.
245 * Called at init.
246 */
247void recalculate_root_clocks(void)
248{
249 struct clk *clkp;
250
251 list_for_each_entry(clkp, &root_clks, sibling) {
252 if (clkp->ops && clkp->ops->recalc)
253 clkp->rate = clkp->ops->recalc(clkp);
254 propagate_rate(clkp);
255 }
256}
257
258int clk_register(struct clk *clk)
259{
260 if (clk == NULL || IS_ERR(clk))
261 return -EINVAL;
262
263 /*
264 * trap out already registered clocks
265 */
266 if (clk->node.next || clk->node.prev)
267 return 0;
268
269 mutex_lock(&clock_list_sem);
270
271 INIT_LIST_HEAD(&clk->children);
272 clk->usecount = 0;
273
274 if (clk->parent)
275 list_add(&clk->sibling, &clk->parent->children);
276 else
277 list_add(&clk->sibling, &root_clks);
278
279 list_add(&clk->node, &clock_list);
280 if (clk->ops && clk->ops->init)
281 clk->ops->init(clk);
282 mutex_unlock(&clock_list_sem);
283
284 return 0;
285}
286EXPORT_SYMBOL_GPL(clk_register);
287
288void clk_unregister(struct clk *clk)
289{
290 mutex_lock(&clock_list_sem);
291 list_del(&clk->sibling);
292 list_del(&clk->node);
293 mutex_unlock(&clock_list_sem);
294}
295EXPORT_SYMBOL_GPL(clk_unregister);
296
297static void clk_enable_init_clocks(void)
298{
299 struct clk *clkp;
300
301 list_for_each_entry(clkp, &clock_list, node)
302 if (clkp->flags & CLK_ENABLE_ON_INIT)
303 clk_enable(clkp);
304}
305
306unsigned long clk_get_rate(struct clk *clk)
307{
308 return clk->rate;
309}
310EXPORT_SYMBOL_GPL(clk_get_rate);
311
312int clk_set_rate(struct clk *clk, unsigned long rate)
313{
314 return clk_set_rate_ex(clk, rate, 0);
315}
316EXPORT_SYMBOL_GPL(clk_set_rate);
317
318int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
319{
320 int ret = -EOPNOTSUPP;
321 unsigned long flags;
322
323 spin_lock_irqsave(&clock_lock, flags);
324
325 if (likely(clk->ops && clk->ops->set_rate)) {
326 ret = clk->ops->set_rate(clk, rate, algo_id);
327 if (ret != 0)
328 goto out_unlock;
329 } else {
330 clk->rate = rate;
331 ret = 0;
332 }
333
334 if (clk->ops && clk->ops->recalc)
335 clk->rate = clk->ops->recalc(clk);
336
337 propagate_rate(clk);
338
339out_unlock:
340 spin_unlock_irqrestore(&clock_lock, flags);
341
342 return ret;
343}
344EXPORT_SYMBOL_GPL(clk_set_rate_ex);
345
346int clk_set_parent(struct clk *clk, struct clk *parent)
347{
348 unsigned long flags;
349 int ret = -EINVAL;
350
351 if (!parent || !clk)
352 return ret;
353 if (clk->parent == parent)
354 return 0;
355
356 spin_lock_irqsave(&clock_lock, flags);
357 if (clk->usecount == 0) {
358 if (clk->ops->set_parent)
359 ret = clk->ops->set_parent(clk, parent);
360 else
361 ret = clk_reparent(clk, parent);
362
363 if (ret == 0) {
364 pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
365 clk->name, clk->parent->name, clk->rate);
366 if (clk->ops->recalc)
367 clk->rate = clk->ops->recalc(clk);
368 propagate_rate(clk);
369 }
370 } else
371 ret = -EBUSY;
372 spin_unlock_irqrestore(&clock_lock, flags);
373
374 return ret;
375}
376EXPORT_SYMBOL_GPL(clk_set_parent);
377
378struct clk *clk_get_parent(struct clk *clk)
379{
380 return clk->parent;
381}
382EXPORT_SYMBOL_GPL(clk_get_parent);
383
384long clk_round_rate(struct clk *clk, unsigned long rate)
385{
386 if (likely(clk->ops && clk->ops->round_rate)) {
387 unsigned long flags, rounded;
388
389 spin_lock_irqsave(&clock_lock, flags);
390 rounded = clk->ops->round_rate(clk, rate);
391 spin_unlock_irqrestore(&clock_lock, flags);
392
393 return rounded;
394 }
395
396 return clk_get_rate(clk);
397}
398EXPORT_SYMBOL_GPL(clk_round_rate);
399
400/*
401 * Find the correct struct clk for the device and connection ID.
402 * We do slightly fuzzy matching here:
403 * An entry with a NULL ID is assumed to be a wildcard.
404 * If an entry has a device ID, it must match
405 * If an entry has a connection ID, it must match
406 * Then we take the most specific entry - with the following
407 * order of precidence: dev+con > dev only > con only.
408 */
409static struct clk *clk_find(const char *dev_id, const char *con_id)
410{
411 struct clk_lookup *p;
412 struct clk *clk = NULL;
413 int match, best = 0;
414
415 list_for_each_entry(p, &clock_list, node) {
416 match = 0;
417 if (p->dev_id) {
418 if (!dev_id || strcmp(p->dev_id, dev_id))
419 continue;
420 match += 2;
421 }
422 if (p->con_id) {
423 if (!con_id || strcmp(p->con_id, con_id))
424 continue;
425 match += 1;
426 }
427 if (match == 0)
428 continue;
429
430 if (match > best) {
431 clk = p->clk;
432 best = match;
433 }
434 }
435 return clk;
436}
437
438struct clk *clk_get_sys(const char *dev_id, const char *con_id)
439{
440 struct clk *clk;
441
442 mutex_lock(&clock_list_sem);
443 clk = clk_find(dev_id, con_id);
444 mutex_unlock(&clock_list_sem);
445
446 return clk ? clk : ERR_PTR(-ENOENT);
447}
448EXPORT_SYMBOL_GPL(clk_get_sys);
449
450/*
451 * Returns a clock. Note that we first try to use device id on the bus
452 * and clock name. If this fails, we try to use clock name only.
453 */
454struct clk *clk_get(struct device *dev, const char *id)
455{
456 const char *dev_id = dev ? dev_name(dev) : NULL;
457 struct clk *p, *clk = ERR_PTR(-ENOENT);
458 int idno;
459
460 clk = clk_get_sys(dev_id, id);
461 if (clk && !IS_ERR(clk))
462 return clk;
463
464 if (dev == NULL || dev->bus != &platform_bus_type)
465 idno = -1;
466 else
467 idno = to_platform_device(dev)->id;
468
469 mutex_lock(&clock_list_sem);
470 list_for_each_entry(p, &clock_list, node) {
471 if (p->id == idno &&
472 strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
473 clk = p;
474 goto found;
475 }
476 }
477
478 list_for_each_entry(p, &clock_list, node) {
479 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
480 clk = p;
481 break;
482 }
483 }
484
485found:
486 mutex_unlock(&clock_list_sem);
487
488 return clk;
489}
490EXPORT_SYMBOL_GPL(clk_get);
491
492void clk_put(struct clk *clk)
493{
494 if (clk && !IS_ERR(clk))
495 module_put(clk->owner);
496}
497EXPORT_SYMBOL_GPL(clk_put);
498
499#ifdef CONFIG_PM
500static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
501{
502 static pm_message_t prev_state;
503 struct clk *clkp;
504
505 switch (state.event) {
506 case PM_EVENT_ON:
507 /* Resumeing from hibernation */
508 if (prev_state.event != PM_EVENT_FREEZE)
509 break;
510
511 list_for_each_entry(clkp, &clock_list, node) {
512 if (likely(clkp->ops)) {
513 unsigned long rate = clkp->rate;
514
515 if (likely(clkp->ops->set_parent))
516 clkp->ops->set_parent(clkp,
517 clkp->parent);
518 if (likely(clkp->ops->set_rate))
519 clkp->ops->set_rate(clkp,
520 rate, NO_CHANGE);
521 else if (likely(clkp->ops->recalc))
522 clkp->rate = clkp->ops->recalc(clkp);
523 }
524 }
525 break;
526 case PM_EVENT_FREEZE:
527 break;
528 case PM_EVENT_SUSPEND:
529 break;
530 }
531
532 prev_state = state;
533 return 0;
534}
535
536static int clks_sysdev_resume(struct sys_device *dev)
537{
538 return clks_sysdev_suspend(dev, PMSG_ON);
539}
540
541static struct sysdev_class clks_sysdev_class = {
542 .name = "clks",
543};
544
545static struct sysdev_driver clks_sysdev_driver = {
546 .suspend = clks_sysdev_suspend,
547 .resume = clks_sysdev_resume,
548};
549
550static struct sys_device clks_sysdev_dev = {
551 .cls = &clks_sysdev_class,
552};
553
554static int __init clk_sysdev_init(void)
555{
556 sysdev_class_register(&clks_sysdev_class);
557 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
558 sysdev_register(&clks_sysdev_dev);
559
560 return 0;
561}
562subsys_initcall(clk_sysdev_init);
563#endif
564
565int __init clk_init(void) 23int __init clk_init(void)
566{ 24{
567 int ret; 25 int ret;
@@ -590,90 +48,4 @@ int __init clk_init(void)
590 return ret; 48 return ret;
591} 49}
592 50
593/*
594 * debugfs support to trace clock tree hierarchy and attributes
595 */
596static struct dentry *clk_debugfs_root;
597
598static int clk_debugfs_register_one(struct clk *c)
599{
600 int err;
601 struct dentry *d, *child;
602 struct clk *pa = c->parent;
603 char s[255];
604 char *p = s;
605
606 p += sprintf(p, "%s", c->name);
607 if (c->id >= 0)
608 sprintf(p, ":%d", c->id);
609 d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
610 if (!d)
611 return -ENOMEM;
612 c->dentry = d;
613
614 d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
615 if (!d) {
616 err = -ENOMEM;
617 goto err_out;
618 }
619 d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
620 if (!d) {
621 err = -ENOMEM;
622 goto err_out;
623 }
624 d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
625 if (!d) {
626 err = -ENOMEM;
627 goto err_out;
628 }
629 return 0;
630
631err_out:
632 d = c->dentry;
633 list_for_each_entry(child, &d->d_subdirs, d_u.d_child)
634 debugfs_remove(child);
635 debugfs_remove(c->dentry);
636 return err;
637}
638
639static int clk_debugfs_register(struct clk *c)
640{
641 int err;
642 struct clk *pa = c->parent;
643 51
644 if (pa && !pa->dentry) {
645 err = clk_debugfs_register(pa);
646 if (err)
647 return err;
648 }
649
650 if (!c->dentry) {
651 err = clk_debugfs_register_one(c);
652 if (err)
653 return err;
654 }
655 return 0;
656}
657
658static int __init clk_debugfs_init(void)
659{
660 struct clk *c;
661 struct dentry *d;
662 int err;
663
664 d = debugfs_create_dir("clock", NULL);
665 if (!d)
666 return -ENOMEM;
667 clk_debugfs_root = d;
668
669 list_for_each_entry(c, &clock_list, node) {
670 err = clk_debugfs_register(c);
671 if (err)
672 goto err_out;
673 }
674 return 0;
675err_out:
676 debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */
677 return err;
678}
679late_initcall(clk_debugfs_init);
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
new file mode 100644
index 000000000000..7f1b70cace35
--- /dev/null
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -0,0 +1,85 @@
1#include <linux/sched.h>
2#include <linux/slab.h>
3#include <asm/processor.h>
4#include <asm/fpu.h>
5
6int init_fpu(struct task_struct *tsk)
7{
8 if (tsk_used_math(tsk)) {
9 if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
10 unlazy_fpu(tsk, task_pt_regs(tsk));
11 return 0;
12 }
13
14 /*
15 * Memory allocation at the first usage of the FPU and other state.
16 */
17 if (!tsk->thread.xstate) {
18 tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
19 GFP_KERNEL);
20 if (!tsk->thread.xstate)
21 return -ENOMEM;
22 }
23
24 if (boot_cpu_data.flags & CPU_HAS_FPU) {
25 struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
26 memset(fp, 0, xstate_size);
27 fp->fpscr = FPSCR_INIT;
28 } else {
29 struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
30 memset(fp, 0, xstate_size);
31 fp->fpscr = FPSCR_INIT;
32 }
33
34 set_stopped_child_used_math(tsk);
35 return 0;
36}
37
38#ifdef CONFIG_SH_FPU
39void __fpu_state_restore(void)
40{
41 struct task_struct *tsk = current;
42
43 restore_fpu(tsk);
44
45 task_thread_info(tsk)->status |= TS_USEDFPU;
46 tsk->fpu_counter++;
47}
48
49void fpu_state_restore(struct pt_regs *regs)
50{
51 struct task_struct *tsk = current;
52
53 if (unlikely(!user_mode(regs))) {
54 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
55 BUG();
56 return;
57 }
58
59 if (!tsk_used_math(tsk)) {
60 local_irq_enable();
61 /*
62 * does a slab alloc which can sleep
63 */
64 if (init_fpu(tsk)) {
65 /*
66 * ran out of memory!
67 */
68 do_group_exit(SIGKILL);
69 return;
70 }
71 local_irq_disable();
72 }
73
74 grab_fpu(regs);
75
76 __fpu_state_restore();
77}
78
79BUILD_TRAP_HANDLER(fpu_state_restore)
80{
81 TRAP_HANDLER_DECL;
82
83 fpu_state_restore(regs);
84}
85#endif /* CONFIG_SH_FPU */
diff --git a/arch/sh/kernel/cpu/hwblk.c b/arch/sh/kernel/cpu/hwblk.c
new file mode 100644
index 000000000000..3e985aae5d91
--- /dev/null
+++ b/arch/sh/kernel/cpu/hwblk.c
@@ -0,0 +1,159 @@
1#include <linux/clk.h>
2#include <linux/compiler.h>
3#include <linux/io.h>
4#include <linux/spinlock.h>
5#include <asm/suspend.h>
6#include <asm/hwblk.h>
7#include <asm/clock.h>
8
9static DEFINE_SPINLOCK(hwblk_lock);
10
11static void hwblk_area_mod_cnt(struct hwblk_info *info,
12 int area, int counter, int value, int goal)
13{
14 struct hwblk_area *hap = info->areas + area;
15
16 hap->cnt[counter] += value;
17
18 if (hap->cnt[counter] != goal)
19 return;
20
21 if (hap->flags & HWBLK_AREA_FLAG_PARENT)
22 hwblk_area_mod_cnt(info, hap->parent, counter, value, goal);
23}
24
25
26static int __hwblk_mod_cnt(struct hwblk_info *info, int hwblk,
27 int counter, int value, int goal)
28{
29 struct hwblk *hp = info->hwblks + hwblk;
30
31 hp->cnt[counter] += value;
32 if (hp->cnt[counter] == goal)
33 hwblk_area_mod_cnt(info, hp->area, counter, value, goal);
34
35 return hp->cnt[counter];
36}
37
38static void hwblk_mod_cnt(struct hwblk_info *info, int hwblk,
39 int counter, int value, int goal)
40{
41 unsigned long flags;
42
43 spin_lock_irqsave(&hwblk_lock, flags);
44 __hwblk_mod_cnt(info, hwblk, counter, value, goal);
45 spin_unlock_irqrestore(&hwblk_lock, flags);
46}
47
48void hwblk_cnt_inc(struct hwblk_info *info, int hwblk, int counter)
49{
50 hwblk_mod_cnt(info, hwblk, counter, 1, 1);
51}
52
53void hwblk_cnt_dec(struct hwblk_info *info, int hwblk, int counter)
54{
55 hwblk_mod_cnt(info, hwblk, counter, -1, 0);
56}
57
58void hwblk_enable(struct hwblk_info *info, int hwblk)
59{
60 struct hwblk *hp = info->hwblks + hwblk;
61 unsigned long tmp;
62 unsigned long flags;
63 int ret;
64
65 spin_lock_irqsave(&hwblk_lock, flags);
66
67 ret = __hwblk_mod_cnt(info, hwblk, HWBLK_CNT_USAGE, 1, 1);
68 if (ret == 1) {
69 tmp = __raw_readl(hp->mstp);
70 tmp &= ~(1 << hp->bit);
71 __raw_writel(tmp, hp->mstp);
72 }
73
74 spin_unlock_irqrestore(&hwblk_lock, flags);
75}
76
77void hwblk_disable(struct hwblk_info *info, int hwblk)
78{
79 struct hwblk *hp = info->hwblks + hwblk;
80 unsigned long tmp;
81 unsigned long flags;
82 int ret;
83
84 spin_lock_irqsave(&hwblk_lock, flags);
85
86 ret = __hwblk_mod_cnt(info, hwblk, HWBLK_CNT_USAGE, -1, 0);
87 if (ret == 0) {
88 tmp = __raw_readl(hp->mstp);
89 tmp |= 1 << hp->bit;
90 __raw_writel(tmp, hp->mstp);
91 }
92
93 spin_unlock_irqrestore(&hwblk_lock, flags);
94}
95
96struct hwblk_info *hwblk_info;
97
98int __init hwblk_register(struct hwblk_info *info)
99{
100 hwblk_info = info;
101 return 0;
102}
103
104int __init __weak arch_hwblk_init(void)
105{
106 return 0;
107}
108
109int __weak arch_hwblk_sleep_mode(void)
110{
111 return SUSP_SH_SLEEP;
112}
113
114int __init hwblk_init(void)
115{
116 return arch_hwblk_init();
117}
118
119/* allow clocks to enable and disable hardware blocks */
120static int sh_hwblk_clk_enable(struct clk *clk)
121{
122 if (!hwblk_info)
123 return -ENOENT;
124
125 hwblk_enable(hwblk_info, clk->arch_flags);
126 return 0;
127}
128
129static void sh_hwblk_clk_disable(struct clk *clk)
130{
131 if (hwblk_info)
132 hwblk_disable(hwblk_info, clk->arch_flags);
133}
134
135static struct clk_ops sh_hwblk_clk_ops = {
136 .enable = sh_hwblk_clk_enable,
137 .disable = sh_hwblk_clk_disable,
138 .recalc = followparent_recalc,
139};
140
141int __init sh_hwblk_clk_register(struct clk *clks, int nr)
142{
143 struct clk *clkp;
144 int ret = 0;
145 int k;
146
147 for (k = 0; !ret && (k < nr); k++) {
148 clkp = clks + k;
149
150 /* skip over clocks using hwblk 0 (HWBLK_UNKNOWN) */
151 if (!clkp->arch_flags)
152 continue;
153
154 clkp->ops = &sh_hwblk_clk_ops;
155 ret |= clk_register(clkp);
156 }
157
158 return ret;
159}
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index ad85421099cd..fac742e514ee 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * CPU init code 4 * CPU init code
5 * 5 *
6 * Copyright (C) 2002 - 2007 Paul Mundt 6 * Copyright (C) 2002 - 2009 Paul Mundt
7 * Copyright (C) 2003 Richard Curnow 7 * Copyright (C) 2003 Richard Curnow
8 * 8 *
9 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
@@ -24,22 +24,32 @@
24#include <asm/elf.h> 24#include <asm/elf.h>
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/smp.h> 26#include <asm/smp.h>
27#ifdef CONFIG_SUPERH32 27#include <asm/sh_bios.h>
28#include <asm/ubc.h> 28
29#ifdef CONFIG_SH_FPU
30#define cpu_has_fpu 1
31#else
32#define cpu_has_fpu 0
33#endif
34
35#ifdef CONFIG_SH_DSP
36#define cpu_has_dsp 1
37#else
38#define cpu_has_dsp 0
29#endif 39#endif
30 40
31/* 41/*
32 * Generic wrapper for command line arguments to disable on-chip 42 * Generic wrapper for command line arguments to disable on-chip
33 * peripherals (nofpu, nodsp, and so forth). 43 * peripherals (nofpu, nodsp, and so forth).
34 */ 44 */
35#define onchip_setup(x) \ 45#define onchip_setup(x) \
36static int x##_disabled __initdata = 0; \ 46static int x##_disabled __cpuinitdata = !cpu_has_##x; \
37 \ 47 \
38static int __init x##_setup(char *opts) \ 48static int __cpuinit x##_setup(char *opts) \
39{ \ 49{ \
40 x##_disabled = 1; \ 50 x##_disabled = 1; \
41 return 1; \ 51 return 1; \
42} \ 52} \
43__setup("no" __stringify(x), x##_setup); 53__setup("no" __stringify(x), x##_setup);
44 54
45onchip_setup(fpu); 55onchip_setup(fpu);
@@ -49,21 +59,47 @@ onchip_setup(dsp);
49#define CPUOPM 0xff2f0000 59#define CPUOPM 0xff2f0000
50#define CPUOPM_RABD (1 << 5) 60#define CPUOPM_RABD (1 << 5)
51 61
52static void __init speculative_execution_init(void) 62static void __cpuinit speculative_execution_init(void)
53{ 63{
54 /* Clear RABD */ 64 /* Clear RABD */
55 ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); 65 __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
56 66
57 /* Flush the update */ 67 /* Flush the update */
58 (void)ctrl_inl(CPUOPM); 68 (void)__raw_readl(CPUOPM);
59 ctrl_barrier(); 69 ctrl_barrier();
60} 70}
61#else 71#else
62#define speculative_execution_init() do { } while (0) 72#define speculative_execution_init() do { } while (0)
63#endif 73#endif
64 74
75#ifdef CONFIG_CPU_SH4A
76#define EXPMASK 0xff2f0004
77#define EXPMASK_RTEDS (1 << 0)
78#define EXPMASK_BRDSSLP (1 << 1)
79#define EXPMASK_MMCAW (1 << 4)
80
81static void __cpuinit expmask_init(void)
82{
83 unsigned long expmask = __raw_readl(EXPMASK);
84
85 /*
86 * Future proofing.
87 *
88 * Disable support for slottable sleep instruction, non-nop
89 * instructions in the rte delay slot, and associative writes to
90 * the memory-mapped cache array.
91 */
92 expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
93
94 __raw_writel(expmask, EXPMASK);
95 ctrl_barrier();
96}
97#else
98#define expmask_init() do { } while (0)
99#endif
100
65/* 2nd-level cache init */ 101/* 2nd-level cache init */
66void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void) 102void __attribute__ ((weak)) l2_cache_init(void)
67{ 103{
68} 104}
69 105
@@ -71,12 +107,12 @@ void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void)
71 * Generic first-level cache init 107 * Generic first-level cache init
72 */ 108 */
73#ifdef CONFIG_SUPERH32 109#ifdef CONFIG_SUPERH32
74static void __uses_jump_to_uncached cache_init(void) 110static void cache_init(void)
75{ 111{
76 unsigned long ccr, flags; 112 unsigned long ccr, flags;
77 113
78 jump_to_uncached(); 114 jump_to_uncached();
79 ccr = ctrl_inl(CCR); 115 ccr = __raw_readl(CCR);
80 116
81 /* 117 /*
82 * At this point we don't know whether the cache is enabled or not - a 118 * At this point we don't know whether the cache is enabled or not - a
@@ -120,7 +156,7 @@ static void __uses_jump_to_uncached cache_init(void)
120 for (addr = addrstart; 156 for (addr = addrstart;
121 addr < addrstart + waysize; 157 addr < addrstart + waysize;
122 addr += current_cpu_data.dcache.linesz) 158 addr += current_cpu_data.dcache.linesz)
123 ctrl_outl(0, addr); 159 __raw_writel(0, addr);
124 160
125 addrstart += current_cpu_data.dcache.way_incr; 161 addrstart += current_cpu_data.dcache.way_incr;
126 } while (--ways); 162 } while (--ways);
@@ -153,7 +189,7 @@ static void __uses_jump_to_uncached cache_init(void)
153 189
154 l2_cache_init(); 190 l2_cache_init();
155 191
156 ctrl_outl(flags, CCR); 192 __raw_writel(flags, CCR);
157 back_to_cached(); 193 back_to_cached();
158} 194}
159#else 195#else
@@ -181,8 +217,20 @@ static void detect_cache_shape(void)
181 l2_cache_shape = -1; /* No S-cache */ 217 l2_cache_shape = -1; /* No S-cache */
182} 218}
183 219
220static void __cpuinit fpu_init(void)
221{
222 /* Disable the FPU */
223 if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
224 printk("FPU Disabled\n");
225 current_cpu_data.flags &= ~CPU_HAS_FPU;
226 }
227
228 disable_fpu();
229 clear_used_math();
230}
231
184#ifdef CONFIG_SH_DSP 232#ifdef CONFIG_SH_DSP
185static void __init release_dsp(void) 233static void __cpuinit release_dsp(void)
186{ 234{
187 unsigned long sr; 235 unsigned long sr;
188 236
@@ -196,7 +244,7 @@ static void __init release_dsp(void)
196 ); 244 );
197} 245}
198 246
199static void __init dsp_init(void) 247static void __cpuinit dsp_init(void)
200{ 248{
201 unsigned long sr; 249 unsigned long sr;
202 250
@@ -218,34 +266,41 @@ static void __init dsp_init(void)
218 if (sr & SR_DSP) 266 if (sr & SR_DSP)
219 current_cpu_data.flags |= CPU_HAS_DSP; 267 current_cpu_data.flags |= CPU_HAS_DSP;
220 268
269 /* Disable the DSP */
270 if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
271 printk("DSP Disabled\n");
272 current_cpu_data.flags &= ~CPU_HAS_DSP;
273 }
274
221 /* Now that we've determined the DSP status, clear the DSP bit. */ 275 /* Now that we've determined the DSP status, clear the DSP bit. */
222 release_dsp(); 276 release_dsp();
223} 277}
278#else
279static inline void __cpuinit dsp_init(void) { }
224#endif /* CONFIG_SH_DSP */ 280#endif /* CONFIG_SH_DSP */
225 281
226/** 282/**
227 * sh_cpu_init 283 * cpu_init
228 * 284 *
229 * This is our initial entry point for each CPU, and is invoked on the boot 285 * This is our initial entry point for each CPU, and is invoked on the
230 * CPU prior to calling start_kernel(). For SMP, a combination of this and 286 * boot CPU prior to calling start_kernel(). For SMP, a combination of
231 * start_secondary() will bring up each processor to a ready state prior 287 * this and start_secondary() will bring up each processor to a ready
232 * to hand forking the idle loop. 288 * state prior to hand forking the idle loop.
233 * 289 *
234 * We do all of the basic processor init here, including setting up the 290 * We do all of the basic processor init here, including setting up
235 * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is 291 * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and
236 * hit (and subsequently platform_setup()) things like determining the 292 * subsequently platform_setup()) things like determining the CPU
237 * CPU subtype and initial configuration will all be done. 293 * subtype and initial configuration will all be done.
238 * 294 *
239 * Each processor family is still responsible for doing its own probing 295 * Each processor family is still responsible for doing its own probing
240 * and cache configuration in detect_cpu_and_cache_system(). 296 * and cache configuration in cpu_probe().
241 */ 297 */
242 298asmlinkage void __cpuinit cpu_init(void)
243asmlinkage void __init sh_cpu_init(void)
244{ 299{
245 current_thread_info()->cpu = hard_smp_processor_id(); 300 current_thread_info()->cpu = hard_smp_processor_id();
246 301
247 /* First, probe the CPU */ 302 /* First, probe the CPU */
248 detect_cpu_and_cache_system(); 303 cpu_probe();
249 304
250 if (current_cpu_data.type == CPU_SH_NONE) 305 if (current_cpu_data.type == CPU_SH_NONE)
251 panic("Unknown CPU"); 306 panic("Unknown CPU");
@@ -268,28 +323,16 @@ asmlinkage void __init sh_cpu_init(void)
268 cache_init(); 323 cache_init();
269 324
270 if (raw_smp_processor_id() == 0) { 325 if (raw_smp_processor_id() == 0) {
271#ifdef CONFIG_MMU
272 shm_align_mask = max_t(unsigned long, 326 shm_align_mask = max_t(unsigned long,
273 current_cpu_data.dcache.way_size - 1, 327 current_cpu_data.dcache.way_size - 1,
274 PAGE_SIZE - 1); 328 PAGE_SIZE - 1);
275#endif
276 329
277 /* Boot CPU sets the cache shape */ 330 /* Boot CPU sets the cache shape */
278 detect_cache_shape(); 331 detect_cache_shape();
279 } 332 }
280 333
281 /* Disable the FPU */ 334 fpu_init();
282 if (fpu_disabled) { 335 dsp_init();
283 printk("FPU Disabled\n");
284 current_cpu_data.flags &= ~CPU_HAS_FPU;
285 disable_fpu();
286 }
287
288 /* FPU initialization */
289 if ((current_cpu_data.flags & CPU_HAS_FPU)) {
290 clear_thread_flag(TIF_USEDFPU);
291 clear_used_math();
292 }
293 336
294 /* 337 /*
295 * Initialize the per-CPU ASID cache very early, since the 338 * Initialize the per-CPU ASID cache very early, since the
@@ -297,28 +340,26 @@ asmlinkage void __init sh_cpu_init(void)
297 */ 340 */
298 current_cpu_data.asid_cache = NO_CONTEXT; 341 current_cpu_data.asid_cache = NO_CONTEXT;
299 342
300#ifdef CONFIG_SH_DSP 343 current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32;
301 /* Probe for DSP */
302 dsp_init();
303 344
304 /* Disable the DSP */ 345 speculative_execution_init();
305 if (dsp_disabled) { 346 expmask_init();
306 printk("DSP Disabled\n");
307 current_cpu_data.flags &= ~CPU_HAS_DSP;
308 release_dsp();
309 }
310#endif
311 347
312 /* 348 /* Do the rest of the boot processor setup */
313 * Some brain-damaged loaders decided it would be a good idea to put 349 if (raw_smp_processor_id() == 0) {
314 * the UBC to sleep. This causes some issues when it comes to things 350 /* Save off the BIOS VBR, if there is one */
315 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So .. 351 sh_bios_vbr_init();
316 * we wake it up and hope that all is well.
317 */
318#ifdef CONFIG_SUPERH32
319 if (raw_smp_processor_id() == 0)
320 ubc_wakeup();
321#endif
322 352
323 speculative_execution_init(); 353 /*
354 * Setup VBR for boot CPU. Secondary CPUs do this through
355 * start_secondary().
356 */
357 per_cpu_trap_init();
358
359 /*
360 * Boot processor to setup the FP and extended state
361 * context info.
362 */
363 init_thread_xstate();
364 }
324} 365}
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c
index 6b5d191eec3a..32c825c9488e 100644
--- a/arch/sh/kernel/cpu/irq/imask.c
+++ b/arch/sh/kernel/cpu/irq/imask.c
@@ -51,16 +51,20 @@ static inline void set_interrupt_registers(int ip)
51 : "t"); 51 : "t");
52} 52}
53 53
54static void mask_imask_irq(unsigned int irq) 54static void mask_imask_irq(struct irq_data *data)
55{ 55{
56 unsigned int irq = data->irq;
57
56 clear_bit(irq, imask_mask); 58 clear_bit(irq, imask_mask);
57 if (interrupt_priority < IMASK_PRIORITY - irq) 59 if (interrupt_priority < IMASK_PRIORITY - irq)
58 interrupt_priority = IMASK_PRIORITY - irq; 60 interrupt_priority = IMASK_PRIORITY - irq;
59 set_interrupt_registers(interrupt_priority); 61 set_interrupt_registers(interrupt_priority);
60} 62}
61 63
62static void unmask_imask_irq(unsigned int irq) 64static void unmask_imask_irq(struct irq_data *data)
63{ 65{
66 unsigned int irq = data->irq;
67
64 set_bit(irq, imask_mask); 68 set_bit(irq, imask_mask);
65 interrupt_priority = IMASK_PRIORITY - 69 interrupt_priority = IMASK_PRIORITY -
66 find_first_zero_bit(imask_mask, IMASK_PRIORITY); 70 find_first_zero_bit(imask_mask, IMASK_PRIORITY);
@@ -68,10 +72,10 @@ static void unmask_imask_irq(unsigned int irq)
68} 72}
69 73
70static struct irq_chip imask_irq_chip = { 74static struct irq_chip imask_irq_chip = {
71 .typename = "SR.IMASK", 75 .name = "SR.IMASK",
72 .mask = mask_imask_irq, 76 .irq_mask = mask_imask_irq,
73 .unmask = unmask_imask_irq, 77 .irq_unmask = unmask_imask_irq,
74 .mask_ack = mask_imask_irq, 78 .irq_mask_ack = mask_imask_irq,
75}; 79};
76 80
77void make_imask_irq(unsigned int irq) 81void make_imask_irq(unsigned int irq)
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
index 6c092f1f5557..5af48f8357e5 100644
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -76,39 +76,11 @@ int intc_evt_to_irq[(0xE20/0x20)+1] = {
76}; 76};
77 77
78static unsigned long intc_virt; 78static unsigned long intc_virt;
79
80static unsigned int startup_intc_irq(unsigned int irq);
81static void shutdown_intc_irq(unsigned int irq);
82static void enable_intc_irq(unsigned int irq);
83static void disable_intc_irq(unsigned int irq);
84static void mask_and_ack_intc(unsigned int);
85static void end_intc_irq(unsigned int irq);
86
87static struct irq_chip intc_irq_type = {
88 .typename = "INTC",
89 .startup = startup_intc_irq,
90 .shutdown = shutdown_intc_irq,
91 .enable = enable_intc_irq,
92 .disable = disable_intc_irq,
93 .ack = mask_and_ack_intc,
94 .end = end_intc_irq
95};
96
97static int irlm; /* IRL mode */ 79static int irlm; /* IRL mode */
98 80
99static unsigned int startup_intc_irq(unsigned int irq) 81static void enable_intc_irq(struct irq_data *data)
100{
101 enable_intc_irq(irq);
102 return 0; /* never anything pending */
103}
104
105static void shutdown_intc_irq(unsigned int irq)
106{
107 disable_intc_irq(irq);
108}
109
110static void enable_intc_irq(unsigned int irq)
111{ 82{
83 unsigned int irq = data->irq;
112 unsigned long reg; 84 unsigned long reg;
113 unsigned long bitmask; 85 unsigned long bitmask;
114 86
@@ -123,11 +95,12 @@ static void enable_intc_irq(unsigned int irq)
123 bitmask = 1 << (irq - 32); 95 bitmask = 1 << (irq - 32);
124 } 96 }
125 97
126 ctrl_outl(bitmask, reg); 98 __raw_writel(bitmask, reg);
127} 99}
128 100
129static void disable_intc_irq(unsigned int irq) 101static void disable_intc_irq(struct irq_data *data)
130{ 102{
103 unsigned int irq = data->irq;
131 unsigned long reg; 104 unsigned long reg;
132 unsigned long bitmask; 105 unsigned long bitmask;
133 106
@@ -139,18 +112,14 @@ static void disable_intc_irq(unsigned int irq)
139 bitmask = 1 << (irq - 32); 112 bitmask = 1 << (irq - 32);
140 } 113 }
141 114
142 ctrl_outl(bitmask, reg); 115 __raw_writel(bitmask, reg);
143}
144
145static void mask_and_ack_intc(unsigned int irq)
146{
147 disable_intc_irq(irq);
148} 116}
149 117
150static void end_intc_irq(unsigned int irq) 118static struct irq_chip intc_irq_type = {
151{ 119 .name = "INTC",
152 enable_intc_irq(irq); 120 .irq_enable = enable_intc_irq,
153} 121 .irq_disable = disable_intc_irq,
122};
154 123
155void __init plat_irq_setup(void) 124void __init plat_irq_setup(void)
156{ 125{
@@ -170,11 +139,11 @@ void __init plat_irq_setup(void)
170 139
171 140
172 /* Disable all interrupts and set all priorities to 0 to avoid trouble */ 141 /* Disable all interrupts and set all priorities to 0 to avoid trouble */
173 ctrl_outl(-1, INTC_INTDSB_0); 142 __raw_writel(-1, INTC_INTDSB_0);
174 ctrl_outl(-1, INTC_INTDSB_1); 143 __raw_writel(-1, INTC_INTDSB_1);
175 144
176 for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8) 145 for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
177 ctrl_outl( NO_PRIORITY, reg); 146 __raw_writel( NO_PRIORITY, reg);
178 147
179 148
180#ifdef CONFIG_SH_CAYMAN 149#ifdef CONFIG_SH_CAYMAN
@@ -199,7 +168,7 @@ void __init plat_irq_setup(void)
199 reg = INTC_ICR_SET; 168 reg = INTC_ICR_SET;
200 i = IRQ_IRL0; 169 i = IRQ_IRL0;
201 } 170 }
202 ctrl_outl(INTC_ICR_IRLM, reg); 171 __raw_writel(INTC_ICR_IRLM, reg);
203 172
204 /* Set interrupt priorities according to platform description */ 173 /* Set interrupt priorities according to platform description */
205 for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) { 174 for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
@@ -207,7 +176,7 @@ void __init plat_irq_setup(void)
207 ((i % INTC_INTPRI_PPREG) * 4); 176 ((i % INTC_INTPRI_PPREG) * 4);
208 if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) { 177 if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
209 /* Upon the 7th, set Priority Register */ 178 /* Upon the 7th, set Priority Register */
210 ctrl_outl(data, reg); 179 __raw_writel(data, reg);
211 data = 0; 180 data = 0;
212 reg += 8; 181 reg += 8;
213 } 182 }
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index 808d99a48efb..7516c35ee514 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -17,30 +17,32 @@
17 * for more details. 17 * for more details.
18 */ 18 */
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
20#include <linux/irq.h> 22#include <linux/irq.h>
23#include <linux/kernel.h>
21#include <linux/module.h> 24#include <linux/module.h>
22#include <linux/io.h>
23#include <linux/interrupt.h>
24#include <linux/topology.h> 25#include <linux/topology.h>
25 26
26static inline struct ipr_desc *get_ipr_desc(unsigned int irq) 27static inline struct ipr_desc *get_ipr_desc(struct irq_data *data)
27{ 28{
28 struct irq_chip *chip = get_irq_chip(irq); 29 struct irq_chip *chip = irq_data_get_irq_chip(data);
29 return (void *)((char *)chip - offsetof(struct ipr_desc, chip)); 30 return container_of(chip, struct ipr_desc, chip);
30} 31}
31 32
32static void disable_ipr_irq(unsigned int irq) 33static void disable_ipr_irq(struct irq_data *data)
33{ 34{
34 struct ipr_data *p = get_irq_chip_data(irq); 35 struct ipr_data *p = irq_data_get_irq_chip_data(data);
35 unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx]; 36 unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
36 /* Set the priority in IPR to 0 */ 37 /* Set the priority in IPR to 0 */
37 __raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr); 38 __raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
39 (void)__raw_readw(addr); /* Read back to flush write posting */
38} 40}
39 41
40static void enable_ipr_irq(unsigned int irq) 42static void enable_ipr_irq(struct irq_data *data)
41{ 43{
42 struct ipr_data *p = get_irq_chip_data(irq); 44 struct ipr_data *p = irq_data_get_irq_chip_data(data);
43 unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx]; 45 unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
44 /* Set priority in IPR back to original value */ 46 /* Set priority in IPR back to original value */
45 __raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr); 47 __raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr);
46} 48}
@@ -54,19 +56,18 @@ void register_ipr_controller(struct ipr_desc *desc)
54{ 56{
55 int i; 57 int i;
56 58
57 desc->chip.mask = disable_ipr_irq; 59 desc->chip.irq_mask = disable_ipr_irq;
58 desc->chip.unmask = enable_ipr_irq; 60 desc->chip.irq_unmask = enable_ipr_irq;
59 desc->chip.mask_ack = disable_ipr_irq;
60 61
61 for (i = 0; i < desc->nr_irqs; i++) { 62 for (i = 0; i < desc->nr_irqs; i++) {
62 struct ipr_data *p = desc->ipr_data + i; 63 struct ipr_data *p = desc->ipr_data + i;
63 struct irq_desc *irq_desc; 64 int res;
64 65
65 BUG_ON(p->ipr_idx >= desc->nr_offsets); 66 BUG_ON(p->ipr_idx >= desc->nr_offsets);
66 BUG_ON(!desc->ipr_offsets[p->ipr_idx]); 67 BUG_ON(!desc->ipr_offsets[p->ipr_idx]);
67 68
68 irq_desc = irq_to_desc_alloc_node(p->irq, numa_node_id()); 69 res = irq_alloc_desc_at(p->irq, numa_node_id());
69 if (unlikely(!irq_desc)) { 70 if (unlikely(res != p->irq && res != -EEXIST)) {
70 printk(KERN_INFO "can not get irq_desc for %d\n", 71 printk(KERN_INFO "can not get irq_desc for %d\n",
71 p->irq); 72 p->irq);
72 continue; 73 continue;
@@ -76,7 +77,7 @@ void register_ipr_controller(struct ipr_desc *desc)
76 set_irq_chip_and_handler_name(p->irq, &desc->chip, 77 set_irq_chip_and_handler_name(p->irq, &desc->chip,
77 handle_level_irq, "level"); 78 handle_level_irq, "level");
78 set_irq_chip_data(p->irq, p); 79 set_irq_chip_data(p->irq, p);
79 disable_ipr_irq(p->irq); 80 disable_ipr_irq(irq_get_irq_data(p->irq));
80 } 81 }
81} 82}
82EXPORT_SYMBOL(register_ipr_controller); 83EXPORT_SYMBOL(register_ipr_controller);
diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c
new file mode 100644
index 000000000000..e80a936f409a
--- /dev/null
+++ b/arch/sh/kernel/cpu/proc.c
@@ -0,0 +1,148 @@
1#include <linux/seq_file.h>
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <asm/machvec.h>
5#include <asm/processor.h>
6
7static const char *cpu_name[] = {
8 [CPU_SH7201] = "SH7201",
9 [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263",
10 [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
11 [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
12 [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
13 [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
14 [CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720",
15 [CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729",
16 [CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S",
17 [CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751",
18 [CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760",
19 [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
20 [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
21 [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
22 [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
23 [CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
24 [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
25 [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
26 [CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
27 [CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
28 [CPU_SH_NONE] = "Unknown"
29};
30
31const char *get_cpu_subtype(struct sh_cpuinfo *c)
32{
33 return cpu_name[c->type];
34}
35EXPORT_SYMBOL(get_cpu_subtype);
36
37#ifdef CONFIG_PROC_FS
38/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
39static const char *cpu_flags[] = {
40 "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
41 "ptea", "llsc", "l2", "op32", "pteaex", NULL
42};
43
44static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
45{
46 unsigned long i;
47
48 seq_printf(m, "cpu flags\t:");
49
50 if (!c->flags) {
51 seq_printf(m, " %s\n", cpu_flags[0]);
52 return;
53 }
54
55 for (i = 0; cpu_flags[i]; i++)
56 if ((c->flags & (1 << i)))
57 seq_printf(m, " %s", cpu_flags[i+1]);
58
59 seq_printf(m, "\n");
60}
61
62static void show_cacheinfo(struct seq_file *m, const char *type,
63 struct cache_info info)
64{
65 unsigned int cache_size;
66
67 cache_size = info.ways * info.sets * info.linesz;
68
69 seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
70 type, cache_size >> 10, info.ways);
71}
72
73/*
74 * Get CPU information for use by the procfs.
75 */
76static int show_cpuinfo(struct seq_file *m, void *v)
77{
78 struct sh_cpuinfo *c = v;
79 unsigned int cpu = c - cpu_data;
80
81 if (!cpu_online(cpu))
82 return 0;
83
84 if (cpu == 0)
85 seq_printf(m, "machine\t\t: %s\n", get_system_type());
86 else
87 seq_printf(m, "\n");
88
89 seq_printf(m, "processor\t: %d\n", cpu);
90 seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
91 seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
92 if (c->cut_major == -1)
93 seq_printf(m, "cut\t\t: unknown\n");
94 else if (c->cut_minor == -1)
95 seq_printf(m, "cut\t\t: %d.x\n", c->cut_major);
96 else
97 seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor);
98
99 show_cpuflags(m, c);
100
101 seq_printf(m, "cache type\t: ");
102
103 /*
104 * Check for what type of cache we have, we support both the
105 * unified cache on the SH-2 and SH-3, as well as the harvard
106 * style cache on the SH-4.
107 */
108 if (c->icache.flags & SH_CACHE_COMBINED) {
109 seq_printf(m, "unified\n");
110 show_cacheinfo(m, "cache", c->icache);
111 } else {
112 seq_printf(m, "split (harvard)\n");
113 show_cacheinfo(m, "icache", c->icache);
114 show_cacheinfo(m, "dcache", c->dcache);
115 }
116
117 /* Optional secondary cache */
118 if (c->flags & CPU_HAS_L2_CACHE)
119 show_cacheinfo(m, "scache", c->scache);
120
121 seq_printf(m, "address sizes\t: %u bits physical\n", c->phys_bits);
122
123 seq_printf(m, "bogomips\t: %lu.%02lu\n",
124 c->loops_per_jiffy/(500000/HZ),
125 (c->loops_per_jiffy/(5000/HZ)) % 100);
126
127 return 0;
128}
129
130static void *c_start(struct seq_file *m, loff_t *pos)
131{
132 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
133}
134static void *c_next(struct seq_file *m, void *v, loff_t *pos)
135{
136 ++*pos;
137 return c_start(m, pos);
138}
139static void c_stop(struct seq_file *m, void *v)
140{
141}
142const struct seq_operations cpuinfo_op = {
143 .start = c_start,
144 .next = c_next,
145 .stop = c_stop,
146 .show = show_cpuinfo,
147};
148#endif /* CONFIG_PROC_FS */
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
index 4fe863170e31..5b7f12e58a8d 100644
--- a/arch/sh/kernel/cpu/sh2/clock-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
@@ -14,24 +14,18 @@
14 */ 14 */
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/io.h>
17#include <asm/clock.h> 18#include <asm/clock.h>
18#include <asm/freq.h> 19#include <asm/freq.h>
19#include <asm/io.h> 20#include <asm/processor.h>
20 21
21static const int pll1rate[] = {1,2}; 22static const int pll1rate[] = {1,2};
22static const int pfc_divisors[] = {1,2,0,4}; 23static const int pfc_divisors[] = {1,2,0,4};
23 24static unsigned int pll2_mult;
24#if (CONFIG_SH_CLK_MD == 1) || (CONFIG_SH_CLK_MD == 2)
25#define PLL2 (4)
26#elif (CONFIG_SH_CLK_MD == 5) || (CONFIG_SH_CLK_MD == 6)
27#define PLL2 (2)
28#else
29#error "Illigal Clock Mode!"
30#endif
31 25
32static void master_clk_init(struct clk *clk) 26static void master_clk_init(struct clk *clk)
33{ 27{
34 clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 7]; 28 clk->rate *= pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
35} 29}
36 30
37static struct clk_ops sh7619_master_clk_ops = { 31static struct clk_ops sh7619_master_clk_ops = {
@@ -40,7 +34,7 @@ static struct clk_ops sh7619_master_clk_ops = {
40 34
41static unsigned long module_clk_recalc(struct clk *clk) 35static unsigned long module_clk_recalc(struct clk *clk)
42{ 36{
43 int idx = (ctrl_inw(FREQCR) & 0x0007); 37 int idx = (__raw_readw(FREQCR) & 0x0007);
44 return clk->parent->rate / pfc_divisors[idx]; 38 return clk->parent->rate / pfc_divisors[idx];
45} 39}
46 40
@@ -50,7 +44,7 @@ static struct clk_ops sh7619_module_clk_ops = {
50 44
51static unsigned long bus_clk_recalc(struct clk *clk) 45static unsigned long bus_clk_recalc(struct clk *clk)
52{ 46{
53 return clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 7]; 47 return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
54} 48}
55 49
56static struct clk_ops sh7619_bus_clk_ops = { 50static struct clk_ops sh7619_bus_clk_ops = {
@@ -70,6 +64,14 @@ static struct clk_ops *sh7619_clk_ops[] = {
70 64
71void __init arch_init_clk_ops(struct clk_ops **ops, int idx) 65void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
72{ 66{
67 if (test_mode_pin(MODE_PIN2 | MODE_PIN0) ||
68 test_mode_pin(MODE_PIN2 | MODE_PIN1))
69 pll2_mult = 2;
70 else if (test_mode_pin(MODE_PIN0) || test_mode_pin(MODE_PIN1))
71 pll2_mult = 4;
72
73 BUG_ON(!pll2_mult);
74
73 if (idx < ARRAY_SIZE(sh7619_clk_ops)) 75 if (idx < ARRAY_SIZE(sh7619_clk_ops))
74 *ops = sh7619_clk_ops[idx]; 76 *ops = sh7619_clk_ops[idx];
75} 77}
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
index becc54c45692..c8a4331d9b8d 100644
--- a/arch/sh/kernel/cpu/sh2/entry.S
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -227,8 +227,9 @@ ENTRY(sh_bios_handler)
227 mov.l @r15+, r14 227 mov.l @r15+, r14
228 add #8,r15 228 add #8,r15
229 lds.l @r15+, pr 229 lds.l @r15+, pr
230 mov.l @r15+,r15
230 rte 231 rte
231 mov.l @r15+,r15 232 nop
232 .align 2 233 .align 2
2331: .long gdb_vbr_vector 2341: .long gdb_vbr_vector
234#endif /* CONFIG_SH_STANDARD_BIOS */ 235#endif /* CONFIG_SH_STANDARD_BIOS */
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index 5916d9096b99..bab8e75958ae 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -13,7 +13,7 @@
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/cache.h> 14#include <asm/cache.h>
15 15
16int __init detect_cpu_and_cache_system(void) 16void __cpuinit cpu_probe(void)
17{ 17{
18#if defined(CONFIG_CPU_SUBTYPE_SH7619) 18#if defined(CONFIG_CPU_SUBTYPE_SH7619)
19 boot_cpu_data.type = CPU_SH7619; 19 boot_cpu_data.type = CPU_SH7619;
@@ -29,7 +29,5 @@ int __init detect_cpu_and_cache_system(void)
29 */ 29 */
30 boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED; 30 boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
31 boot_cpu_data.icache = boot_cpu_data.dcache; 31 boot_cpu_data.icache = boot_cpu_data.dcache;
32 32 boot_cpu_data.family = CPU_FAMILY_SH2;
33 return 0;
34} 33}
35
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 86acede777b9..0f8befccf9fa 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -59,38 +59,54 @@ static struct intc_prio_reg prio_registers[] __initdata = {
59static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL, 59static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL,
60 NULL, prio_registers, NULL); 60 NULL, prio_registers, NULL);
61 61
62static struct plat_sci_port sci_platform_data[] = { 62static struct plat_sci_port scif0_platform_data = {
63 { 63 .mapbase = 0xf8400000,
64 .mapbase = 0xf8400000, 64 .flags = UPF_BOOT_AUTOCONF,
65 .flags = UPF_BOOT_AUTOCONF, 65 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
66 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 66 .scbrr_algo_id = SCBRR_ALGO_2,
67 .scbrr_algo_id = SCBRR_ALGO_2, 67 .type = PORT_SCIF,
68 .type = PORT_SCIF, 68 .irqs = { 88, 88, 88, 88 },
69 .irqs = { 88, 88, 88, 88 }, 69};
70 }, { 70
71 .mapbase = 0xf8410000, 71static struct platform_device scif0_device = {
72 .flags = UPF_BOOT_AUTOCONF, 72 .name = "sh-sci",
73 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 73 .id = 0,
74 .scbrr_algo_id = SCBRR_ALGO_2, 74 .dev = {
75 .type = PORT_SCIF, 75 .platform_data = &scif0_platform_data,
76 .irqs = { 92, 92, 92, 92 }, 76 },
77 }, { 77};
78 .mapbase = 0xf8420000, 78
79 .flags = UPF_BOOT_AUTOCONF, 79static struct plat_sci_port scif1_platform_data = {
80 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 80 .mapbase = 0xf8410000,
81 .scbrr_algo_id = SCBRR_ALGO_2, 81 .flags = UPF_BOOT_AUTOCONF,
82 .type = PORT_SCIF, 82 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
83 .irqs = { 96, 96, 96, 96 }, 83 .scbrr_algo_id = SCBRR_ALGO_2,
84 }, { 84 .type = PORT_SCIF,
85 .flags = 0, 85 .irqs = { 92, 92, 92, 92 },
86 } 86};
87}; 87
88 88static struct platform_device scif1_device = {
89static struct platform_device sci_device = { 89 .name = "sh-sci",
90 .id = 1,
91 .dev = {
92 .platform_data = &scif1_platform_data,
93 },
94};
95
96static struct plat_sci_port scif2_platform_data = {
97 .mapbase = 0xf8420000,
98 .flags = UPF_BOOT_AUTOCONF,
99 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
100 .scbrr_algo_id = SCBRR_ALGO_2,
101 .type = PORT_SCIF,
102 .irqs = { 96, 96, 96, 96 },
103};
104
105static struct platform_device scif2_device = {
90 .name = "sh-sci", 106 .name = "sh-sci",
91 .id = -1, 107 .id = 2,
92 .dev = { 108 .dev = {
93 .platform_data = sci_platform_data, 109 .platform_data = &scif2_platform_data,
94 }, 110 },
95}; 111};
96 112
@@ -118,17 +134,14 @@ static struct platform_device eth_device = {
118}; 134};
119 135
120static struct sh_timer_config cmt0_platform_data = { 136static struct sh_timer_config cmt0_platform_data = {
121 .name = "CMT0",
122 .channel_offset = 0x02, 137 .channel_offset = 0x02,
123 .timer_bit = 0, 138 .timer_bit = 0,
124 .clk = "peripheral_clk",
125 .clockevent_rating = 125, 139 .clockevent_rating = 125,
126 .clocksource_rating = 0, /* disabled due to code generation issues */ 140 .clocksource_rating = 0, /* disabled due to code generation issues */
127}; 141};
128 142
129static struct resource cmt0_resources[] = { 143static struct resource cmt0_resources[] = {
130 [0] = { 144 [0] = {
131 .name = "CMT0",
132 .start = 0xf84a0072, 145 .start = 0xf84a0072,
133 .end = 0xf84a0077, 146 .end = 0xf84a0077,
134 .flags = IORESOURCE_MEM, 147 .flags = IORESOURCE_MEM,
@@ -150,17 +163,14 @@ static struct platform_device cmt0_device = {
150}; 163};
151 164
152static struct sh_timer_config cmt1_platform_data = { 165static struct sh_timer_config cmt1_platform_data = {
153 .name = "CMT1",
154 .channel_offset = 0x08, 166 .channel_offset = 0x08,
155 .timer_bit = 1, 167 .timer_bit = 1,
156 .clk = "peripheral_clk",
157 .clockevent_rating = 125, 168 .clockevent_rating = 125,
158 .clocksource_rating = 0, /* disabled due to code generation issues */ 169 .clocksource_rating = 0, /* disabled due to code generation issues */
159}; 170};
160 171
161static struct resource cmt1_resources[] = { 172static struct resource cmt1_resources[] = {
162 [0] = { 173 [0] = {
163 .name = "CMT1",
164 .start = 0xf84a0078, 174 .start = 0xf84a0078,
165 .end = 0xf84a007d, 175 .end = 0xf84a007d,
166 .flags = IORESOURCE_MEM, 176 .flags = IORESOURCE_MEM,
@@ -182,7 +192,9 @@ static struct platform_device cmt1_device = {
182}; 192};
183 193
184static struct platform_device *sh7619_devices[] __initdata = { 194static struct platform_device *sh7619_devices[] __initdata = {
185 &sci_device, 195 &scif0_device,
196 &scif1_device,
197 &scif2_device,
186 &eth_device, 198 &eth_device,
187 &cmt0_device, 199 &cmt0_device,
188 &cmt1_device, 200 &cmt1_device,
@@ -193,7 +205,7 @@ static int __init sh7619_devices_setup(void)
193 return platform_add_devices(sh7619_devices, 205 return platform_add_devices(sh7619_devices,
194 ARRAY_SIZE(sh7619_devices)); 206 ARRAY_SIZE(sh7619_devices));
195} 207}
196__initcall(sh7619_devices_setup); 208arch_initcall(sh7619_devices_setup);
197 209
198void __init plat_irq_setup(void) 210void __init plat_irq_setup(void)
199{ 211{
@@ -201,6 +213,9 @@ void __init plat_irq_setup(void)
201} 213}
202 214
203static struct platform_device *sh7619_early_devices[] __initdata = { 215static struct platform_device *sh7619_early_devices[] __initdata = {
216 &scif0_device,
217 &scif1_device,
218 &scif2_device,
204 &cmt0_device, 219 &cmt0_device,
205 &cmt1_device, 220 &cmt1_device,
206}; 221};
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
index 7814c76159a7..1174e2d96c03 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
@@ -22,19 +22,12 @@ static const int pll1rate[]={1,2,3,4,6,8};
22static const int pfc_divisors[]={1,2,3,4,6,8,12}; 22static const int pfc_divisors[]={1,2,3,4,6,8,12};
23#define ifc_divisors pfc_divisors 23#define ifc_divisors pfc_divisors
24 24
25#if (CONFIG_SH_CLK_MD == 0) 25static unsigned int pll2_mult;
26#define PLL2 (4)
27#elif (CONFIG_SH_CLK_MD == 2)
28#define PLL2 (2)
29#elif (CONFIG_SH_CLK_MD == 3)
30#define PLL2 (1)
31#else
32#error "Illegal Clock Mode!"
33#endif
34 26
35static void master_clk_init(struct clk *clk) 27static void master_clk_init(struct clk *clk)
36{ 28{
37 return 10000000 * PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; 29 clk->rate = 10000000 * pll2_mult *
30 pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
38} 31}
39 32
40static struct clk_ops sh7201_master_clk_ops = { 33static struct clk_ops sh7201_master_clk_ops = {
@@ -43,7 +36,7 @@ static struct clk_ops sh7201_master_clk_ops = {
43 36
44static unsigned long module_clk_recalc(struct clk *clk) 37static unsigned long module_clk_recalc(struct clk *clk)
45{ 38{
46 int idx = (ctrl_inw(FREQCR) & 0x0007); 39 int idx = (__raw_readw(FREQCR) & 0x0007);
47 return clk->parent->rate / pfc_divisors[idx]; 40 return clk->parent->rate / pfc_divisors[idx];
48} 41}
49 42
@@ -53,7 +46,7 @@ static struct clk_ops sh7201_module_clk_ops = {
53 46
54static unsigned long bus_clk_recalc(struct clk *clk) 47static unsigned long bus_clk_recalc(struct clk *clk)
55{ 48{
56 int idx = (ctrl_inw(FREQCR) & 0x0007); 49 int idx = (__raw_readw(FREQCR) & 0x0007);
57 return clk->parent->rate / pfc_divisors[idx]; 50 return clk->parent->rate / pfc_divisors[idx];
58} 51}
59 52
@@ -63,7 +56,7 @@ static struct clk_ops sh7201_bus_clk_ops = {
63 56
64static unsigned long cpu_clk_recalc(struct clk *clk) 57static unsigned long cpu_clk_recalc(struct clk *clk)
65{ 58{
66 int idx = ((ctrl_inw(FREQCR) >> 4) & 0x0007); 59 int idx = ((__raw_readw(FREQCR) >> 4) & 0x0007);
67 return clk->parent->rate / ifc_divisors[idx]; 60 return clk->parent->rate / ifc_divisors[idx];
68} 61}
69 62
@@ -80,6 +73,13 @@ static struct clk_ops *sh7201_clk_ops[] = {
80 73
81void __init arch_init_clk_ops(struct clk_ops **ops, int idx) 74void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
82{ 75{
76 if (test_mode_pin(MODE_PIN1 | MODE_PIN0))
77 pll2_mult = 1;
78 else if (test_mode_pin(MODE_PIN1))
79 pll2_mult = 2;
80 else
81 pll2_mult = 4;
82
83 if (idx < ARRAY_SIZE(sh7201_clk_ops)) 83 if (idx < ARRAY_SIZE(sh7201_clk_ops))
84 *ops = sh7201_clk_ops[idx]; 84 *ops = sh7201_clk_ops[idx];
85} 85}
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
index 940986965102..95a008e8b735 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
@@ -25,21 +25,11 @@ static const int pll1rate[]={8,12,16,0};
25static const int pfc_divisors[]={1,2,3,4,6,8,12}; 25static const int pfc_divisors[]={1,2,3,4,6,8,12};
26#define ifc_divisors pfc_divisors 26#define ifc_divisors pfc_divisors
27 27
28#if (CONFIG_SH_CLK_MD == 0) 28static unsigned int pll2_mult;
29#define PLL2 (1)
30#elif (CONFIG_SH_CLK_MD == 1)
31#define PLL2 (2)
32#elif (CONFIG_SH_CLK_MD == 2)
33#define PLL2 (4)
34#elif (CONFIG_SH_CLK_MD == 3)
35#define PLL2 (4)
36#else
37#error "Illegal Clock Mode!"
38#endif
39 29
40static void master_clk_init(struct clk *clk) 30static void master_clk_init(struct clk *clk)
41{ 31{
42 clk->rate *= pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0003] * PLL2 ; 32 clk->rate *= pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0003] * pll2_mult;
43} 33}
44 34
45static struct clk_ops sh7203_master_clk_ops = { 35static struct clk_ops sh7203_master_clk_ops = {
@@ -48,7 +38,7 @@ static struct clk_ops sh7203_master_clk_ops = {
48 38
49static unsigned long module_clk_recalc(struct clk *clk) 39static unsigned long module_clk_recalc(struct clk *clk)
50{ 40{
51 int idx = (ctrl_inw(FREQCR) & 0x0007); 41 int idx = (__raw_readw(FREQCR) & 0x0007);
52 return clk->parent->rate / pfc_divisors[idx]; 42 return clk->parent->rate / pfc_divisors[idx];
53} 43}
54 44
@@ -58,7 +48,7 @@ static struct clk_ops sh7203_module_clk_ops = {
58 48
59static unsigned long bus_clk_recalc(struct clk *clk) 49static unsigned long bus_clk_recalc(struct clk *clk)
60{ 50{
61 int idx = (ctrl_inw(FREQCR) & 0x0007); 51 int idx = (__raw_readw(FREQCR) & 0x0007);
62 return clk->parent->rate / pfc_divisors[idx-2]; 52 return clk->parent->rate / pfc_divisors[idx-2];
63} 53}
64 54
@@ -79,6 +69,13 @@ static struct clk_ops *sh7203_clk_ops[] = {
79 69
80void __init arch_init_clk_ops(struct clk_ops **ops, int idx) 70void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
81{ 71{
72 if (test_mode_pin(MODE_PIN1))
73 pll2_mult = 4;
74 else if (test_mode_pin(MODE_PIN0))
75 pll2_mult = 2;
76 else
77 pll2_mult = 1;
78
82 if (idx < ARRAY_SIZE(sh7203_clk_ops)) 79 if (idx < ARRAY_SIZE(sh7203_clk_ops))
83 *ops = sh7203_clk_ops[idx]; 80 *ops = sh7203_clk_ops[idx];
84} 81}
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
index c2268bdeceeb..3c314d7cd6e6 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
@@ -22,19 +22,11 @@ static const int pll1rate[]={1,2,3,4,6,8};
22static const int pfc_divisors[]={1,2,3,4,6,8,12}; 22static const int pfc_divisors[]={1,2,3,4,6,8,12};
23#define ifc_divisors pfc_divisors 23#define ifc_divisors pfc_divisors
24 24
25#if (CONFIG_SH_CLK_MD == 2) 25static unsigned int pll2_mult;
26#define PLL2 (4)
27#elif (CONFIG_SH_CLK_MD == 6)
28#define PLL2 (2)
29#elif (CONFIG_SH_CLK_MD == 7)
30#define PLL2 (1)
31#else
32#error "Illigal Clock Mode!"
33#endif
34 26
35static void master_clk_init(struct clk *clk) 27static void master_clk_init(struct clk *clk)
36{ 28{
37 clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; 29 clk->rate *= pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
38} 30}
39 31
40static struct clk_ops sh7206_master_clk_ops = { 32static struct clk_ops sh7206_master_clk_ops = {
@@ -43,7 +35,7 @@ static struct clk_ops sh7206_master_clk_ops = {
43 35
44static unsigned long module_clk_recalc(struct clk *clk) 36static unsigned long module_clk_recalc(struct clk *clk)
45{ 37{
46 int idx = (ctrl_inw(FREQCR) & 0x0007); 38 int idx = (__raw_readw(FREQCR) & 0x0007);
47 return clk->parent->rate / pfc_divisors[idx]; 39 return clk->parent->rate / pfc_divisors[idx];
48} 40}
49 41
@@ -53,7 +45,7 @@ static struct clk_ops sh7206_module_clk_ops = {
53 45
54static unsigned long bus_clk_recalc(struct clk *clk) 46static unsigned long bus_clk_recalc(struct clk *clk)
55{ 47{
56 return clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; 48 return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
57} 49}
58 50
59static struct clk_ops sh7206_bus_clk_ops = { 51static struct clk_ops sh7206_bus_clk_ops = {
@@ -62,7 +54,7 @@ static struct clk_ops sh7206_bus_clk_ops = {
62 54
63static unsigned long cpu_clk_recalc(struct clk *clk) 55static unsigned long cpu_clk_recalc(struct clk *clk)
64{ 56{
65 int idx = (ctrl_inw(FREQCR) & 0x0007); 57 int idx = (__raw_readw(FREQCR) & 0x0007);
66 return clk->parent->rate / ifc_divisors[idx]; 58 return clk->parent->rate / ifc_divisors[idx];
67} 59}
68 60
@@ -79,7 +71,13 @@ static struct clk_ops *sh7206_clk_ops[] = {
79 71
80void __init arch_init_clk_ops(struct clk_ops **ops, int idx) 72void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
81{ 73{
74 if (test_mode_pin(MODE_PIN2 | MODE_PIN1 | MODE_PIN0))
75 pll2_mult = 1;
76 else if (test_mode_pin(MODE_PIN2 | MODE_PIN1))
77 pll2_mult = 2;
78 else if (test_mode_pin(MODE_PIN1))
79 pll2_mult = 4;
80
82 if (idx < ARRAY_SIZE(sh7206_clk_ops)) 81 if (idx < ARRAY_SIZE(sh7206_clk_ops))
83 *ops = sh7206_clk_ops[idx]; 82 *ops = sh7206_clk_ops[idx];
84} 83}
85
diff --git a/arch/sh/kernel/cpu/sh2a/entry.S b/arch/sh/kernel/cpu/sh2a/entry.S
index ab3903eeda5c..222742ddc0d6 100644
--- a/arch/sh/kernel/cpu/sh2a/entry.S
+++ b/arch/sh/kernel/cpu/sh2a/entry.S
@@ -176,8 +176,9 @@ ENTRY(sh_bios_handler)
176 movml.l @r15+,r14 176 movml.l @r15+,r14
177 add #8,r15 177 add #8,r15
178 lds.l @r15+, pr 178 lds.l @r15+, pr
179 mov.l @r15+,r15
179 rte 180 rte
180 mov.l @r15+,r15 181 nop
181 .align 2 182 .align 2
1821: .long gdb_vbr_vector 1831: .long gdb_vbr_vector
183#endif /* CONFIG_SH_STANDARD_BIOS */ 184#endif /* CONFIG_SH_STANDARD_BIOS */
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
index 6df2fb98eb30..488d24e0cdf0 100644
--- a/arch/sh/kernel/cpu/sh2a/fpu.c
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -25,14 +25,11 @@
25 25
26/* 26/*
27 * Save FPU registers onto task structure. 27 * Save FPU registers onto task structure.
28 * Assume called with FPU enabled (SR.FD=0).
29 */ 28 */
30void 29void save_fpu(struct task_struct *tsk)
31save_fpu(struct task_struct *tsk, struct pt_regs *regs)
32{ 30{
33 unsigned long dummy; 31 unsigned long dummy;
34 32
35 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
36 enable_fpu(); 33 enable_fpu();
37 asm volatile("sts.l fpul, @-%0\n\t" 34 asm volatile("sts.l fpul, @-%0\n\t"
38 "sts.l fpscr, @-%0\n\t" 35 "sts.l fpscr, @-%0\n\t"
@@ -54,17 +51,15 @@ save_fpu(struct task_struct *tsk, struct pt_regs *regs)
54 "fmov.s fr0, @-%0\n\t" 51 "fmov.s fr0, @-%0\n\t"
55 "lds %3, fpscr\n\t" 52 "lds %3, fpscr\n\t"
56 : "=r" (dummy) 53 : "=r" (dummy)
57 : "0" ((char *)(&tsk->thread.fpu.hard.status)), 54 : "0" ((char *)(&tsk->thread.xstate->hardfpu.status)),
58 "r" (FPSCR_RCHG), 55 "r" (FPSCR_RCHG),
59 "r" (FPSCR_INIT) 56 "r" (FPSCR_INIT)
60 : "memory"); 57 : "memory");
61 58
62 disable_fpu(); 59 disable_fpu();
63 release_fpu(regs);
64} 60}
65 61
66static void 62void restore_fpu(struct task_struct *tsk)
67restore_fpu(struct task_struct *tsk)
68{ 63{
69 unsigned long dummy; 64 unsigned long dummy;
70 65
@@ -88,45 +83,12 @@ restore_fpu(struct task_struct *tsk)
88 "lds.l @%0+, fpscr\n\t" 83 "lds.l @%0+, fpscr\n\t"
89 "lds.l @%0+, fpul\n\t" 84 "lds.l @%0+, fpul\n\t"
90 : "=r" (dummy) 85 : "=r" (dummy)
91 : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG) 86 : "0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
92 : "memory"); 87 : "memory");
93 disable_fpu(); 88 disable_fpu();
94} 89}
95 90
96/* 91/*
97 * Load the FPU with signalling NANS. This bit pattern we're using
98 * has the property that no matter wether considered as single or as
99 * double precission represents signaling NANS.
100 */
101
102static void
103fpu_init(void)
104{
105 enable_fpu();
106 asm volatile("lds %0, fpul\n\t"
107 "fsts fpul, fr0\n\t"
108 "fsts fpul, fr1\n\t"
109 "fsts fpul, fr2\n\t"
110 "fsts fpul, fr3\n\t"
111 "fsts fpul, fr4\n\t"
112 "fsts fpul, fr5\n\t"
113 "fsts fpul, fr6\n\t"
114 "fsts fpul, fr7\n\t"
115 "fsts fpul, fr8\n\t"
116 "fsts fpul, fr9\n\t"
117 "fsts fpul, fr10\n\t"
118 "fsts fpul, fr11\n\t"
119 "fsts fpul, fr12\n\t"
120 "fsts fpul, fr13\n\t"
121 "fsts fpul, fr14\n\t"
122 "fsts fpul, fr15\n\t"
123 "lds %2, fpscr\n\t"
124 : /* no output */
125 : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
126 disable_fpu();
127}
128
129/*
130 * Emulate arithmetic ops on denormalized number for some FPU insns. 92 * Emulate arithmetic ops on denormalized number for some FPU insns.
131 */ 93 */
132 94
@@ -493,9 +455,9 @@ ieee_fpe_handler (struct pt_regs *regs)
493 if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ 455 if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
494 struct task_struct *tsk = current; 456 struct task_struct *tsk = current;
495 457
496 if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) { 458 if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) {
497 /* FPU error */ 459 /* FPU error */
498 denormal_to_double (&tsk->thread.fpu.hard, 460 denormal_to_double (&tsk->thread.xstate->hardfpu,
499 (finsn >> 8) & 0xf); 461 (finsn >> 8) & 0xf);
500 } else 462 } else
501 return 0; 463 return 0;
@@ -510,9 +472,9 @@ ieee_fpe_handler (struct pt_regs *regs)
510 472
511 n = (finsn >> 8) & 0xf; 473 n = (finsn >> 8) & 0xf;
512 m = (finsn >> 4) & 0xf; 474 m = (finsn >> 4) & 0xf;
513 hx = tsk->thread.fpu.hard.fp_regs[n]; 475 hx = tsk->thread.xstate->hardfpu.fp_regs[n];
514 hy = tsk->thread.fpu.hard.fp_regs[m]; 476 hy = tsk->thread.xstate->hardfpu.fp_regs[m];
515 fpscr = tsk->thread.fpu.hard.fpscr; 477 fpscr = tsk->thread.xstate->hardfpu.fpscr;
516 prec = fpscr & (1 << 19); 478 prec = fpscr & (1 << 19);
517 479
518 if ((fpscr & FPSCR_FPU_ERROR) 480 if ((fpscr & FPSCR_FPU_ERROR)
@@ -522,15 +484,15 @@ ieee_fpe_handler (struct pt_regs *regs)
522 484
523 /* FPU error because of denormal */ 485 /* FPU error because of denormal */
524 llx = ((long long) hx << 32) 486 llx = ((long long) hx << 32)
525 | tsk->thread.fpu.hard.fp_regs[n+1]; 487 | tsk->thread.xstate->hardfpu.fp_regs[n+1];
526 lly = ((long long) hy << 32) 488 lly = ((long long) hy << 32)
527 | tsk->thread.fpu.hard.fp_regs[m+1]; 489 | tsk->thread.xstate->hardfpu.fp_regs[m+1];
528 if ((hx & 0x7fffffff) >= 0x00100000) 490 if ((hx & 0x7fffffff) >= 0x00100000)
529 llx = denormal_muld(lly, llx); 491 llx = denormal_muld(lly, llx);
530 else 492 else
531 llx = denormal_muld(llx, lly); 493 llx = denormal_muld(llx, lly);
532 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; 494 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
533 tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff; 495 tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
534 } else if ((fpscr & FPSCR_FPU_ERROR) 496 } else if ((fpscr & FPSCR_FPU_ERROR)
535 && (!prec && ((hx & 0x7fffffff) < 0x00800000 497 && (!prec && ((hx & 0x7fffffff) < 0x00800000
536 || (hy & 0x7fffffff) < 0x00800000))) { 498 || (hy & 0x7fffffff) < 0x00800000))) {
@@ -539,7 +501,7 @@ ieee_fpe_handler (struct pt_regs *regs)
539 hx = denormal_mulf(hy, hx); 501 hx = denormal_mulf(hy, hx);
540 else 502 else
541 hx = denormal_mulf(hx, hy); 503 hx = denormal_mulf(hx, hy);
542 tsk->thread.fpu.hard.fp_regs[n] = hx; 504 tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
543 } else 505 } else
544 return 0; 506 return 0;
545 507
@@ -553,9 +515,9 @@ ieee_fpe_handler (struct pt_regs *regs)
553 515
554 n = (finsn >> 8) & 0xf; 516 n = (finsn >> 8) & 0xf;
555 m = (finsn >> 4) & 0xf; 517 m = (finsn >> 4) & 0xf;
556 hx = tsk->thread.fpu.hard.fp_regs[n]; 518 hx = tsk->thread.xstate->hardfpu.fp_regs[n];
557 hy = tsk->thread.fpu.hard.fp_regs[m]; 519 hy = tsk->thread.xstate->hardfpu.fp_regs[m];
558 fpscr = tsk->thread.fpu.hard.fpscr; 520 fpscr = tsk->thread.xstate->hardfpu.fpscr;
559 prec = fpscr & (1 << 19); 521 prec = fpscr & (1 << 19);
560 522
561 if ((fpscr & FPSCR_FPU_ERROR) 523 if ((fpscr & FPSCR_FPU_ERROR)
@@ -565,15 +527,15 @@ ieee_fpe_handler (struct pt_regs *regs)
565 527
566 /* FPU error because of denormal */ 528 /* FPU error because of denormal */
567 llx = ((long long) hx << 32) 529 llx = ((long long) hx << 32)
568 | tsk->thread.fpu.hard.fp_regs[n+1]; 530 | tsk->thread.xstate->hardfpu.fp_regs[n+1];
569 lly = ((long long) hy << 32) 531 lly = ((long long) hy << 32)
570 | tsk->thread.fpu.hard.fp_regs[m+1]; 532 | tsk->thread.xstate->hardfpu.fp_regs[m+1];
571 if ((finsn & 0xf00f) == 0xf000) 533 if ((finsn & 0xf00f) == 0xf000)
572 llx = denormal_addd(llx, lly); 534 llx = denormal_addd(llx, lly);
573 else 535 else
574 llx = denormal_addd(llx, lly ^ (1LL << 63)); 536 llx = denormal_addd(llx, lly ^ (1LL << 63));
575 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; 537 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
576 tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff; 538 tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
577 } else if ((fpscr & FPSCR_FPU_ERROR) 539 } else if ((fpscr & FPSCR_FPU_ERROR)
578 && (!prec && ((hx & 0x7fffffff) < 0x00800000 540 && (!prec && ((hx & 0x7fffffff) < 0x00800000
579 || (hy & 0x7fffffff) < 0x00800000))) { 541 || (hy & 0x7fffffff) < 0x00800000))) {
@@ -582,7 +544,7 @@ ieee_fpe_handler (struct pt_regs *regs)
582 hx = denormal_addf(hx, hy); 544 hx = denormal_addf(hx, hy);
583 else 545 else
584 hx = denormal_addf(hx, hy ^ 0x80000000); 546 hx = denormal_addf(hx, hy ^ 0x80000000);
585 tsk->thread.fpu.hard.fp_regs[n] = hx; 547 tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
586 } else 548 } else
587 return 0; 549 return 0;
588 550
@@ -598,37 +560,15 @@ BUILD_TRAP_HANDLER(fpu_error)
598 struct task_struct *tsk = current; 560 struct task_struct *tsk = current;
599 TRAP_HANDLER_DECL; 561 TRAP_HANDLER_DECL;
600 562
601 save_fpu(tsk, regs); 563 __unlazy_fpu(tsk, regs);
602 if (ieee_fpe_handler(regs)) { 564 if (ieee_fpe_handler(regs)) {
603 tsk->thread.fpu.hard.fpscr &= 565 tsk->thread.xstate->hardfpu.fpscr &=
604 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); 566 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
605 grab_fpu(regs); 567 grab_fpu(regs);
606 restore_fpu(tsk); 568 restore_fpu(tsk);
607 set_tsk_thread_flag(tsk, TIF_USEDFPU); 569 task_thread_info(tsk)->status |= TS_USEDFPU;
608 return; 570 return;
609 } 571 }
610 572
611 force_sig(SIGFPE, tsk); 573 force_sig(SIGFPE, tsk);
612} 574}
613
614BUILD_TRAP_HANDLER(fpu_state_restore)
615{
616 struct task_struct *tsk = current;
617 TRAP_HANDLER_DECL;
618
619 grab_fpu(regs);
620 if (!user_mode(regs)) {
621 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
622 return;
623 }
624
625 if (used_math()) {
626 /* Using the FPU again. */
627 restore_fpu(tsk);
628 } else {
629 /* First time FPU user. */
630 fpu_init();
631 set_used_math();
632 }
633 set_tsk_thread_flag(tsk, TIF_USEDFPU);
634}
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index e098e2f6aa08..48e97a2a0c8d 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -13,8 +13,10 @@
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/cache.h> 14#include <asm/cache.h>
15 15
16int __init detect_cpu_and_cache_system(void) 16void __cpuinit cpu_probe(void)
17{ 17{
18 boot_cpu_data.family = CPU_FAMILY_SH2A;
19
18 /* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */ 20 /* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */
19 boot_cpu_data.flags |= CPU_HAS_OP32; 21 boot_cpu_data.flags |= CPU_HAS_OP32;
20 22
@@ -49,6 +51,4 @@ int __init detect_cpu_and_cache_system(void)
49 * on the cache info. 51 * on the cache info.
50 */ 52 */
51 boot_cpu_data.icache = boot_cpu_data.dcache; 53 boot_cpu_data.icache = boot_cpu_data.dcache;
52
53 return 0;
54} 54}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
index b2c3bcc01190..949bf2bac28c 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
@@ -115,16 +115,13 @@ static DECLARE_INTC_DESC(intc_desc, "mxg", vectors, groups,
115 mask_registers, prio_registers, NULL); 115 mask_registers, prio_registers, NULL);
116 116
117static struct sh_timer_config mtu2_0_platform_data = { 117static struct sh_timer_config mtu2_0_platform_data = {
118 .name = "MTU2_0",
119 .channel_offset = -0x80, 118 .channel_offset = -0x80,
120 .timer_bit = 0, 119 .timer_bit = 0,
121 .clk = "peripheral_clk",
122 .clockevent_rating = 200, 120 .clockevent_rating = 200,
123}; 121};
124 122
125static struct resource mtu2_0_resources[] = { 123static struct resource mtu2_0_resources[] = {
126 [0] = { 124 [0] = {
127 .name = "MTU2_0",
128 .start = 0xff801300, 125 .start = 0xff801300,
129 .end = 0xff801326, 126 .end = 0xff801326,
130 .flags = IORESOURCE_MEM, 127 .flags = IORESOURCE_MEM,
@@ -146,16 +143,13 @@ static struct platform_device mtu2_0_device = {
146}; 143};
147 144
148static struct sh_timer_config mtu2_1_platform_data = { 145static struct sh_timer_config mtu2_1_platform_data = {
149 .name = "MTU2_1",
150 .channel_offset = -0x100, 146 .channel_offset = -0x100,
151 .timer_bit = 1, 147 .timer_bit = 1,
152 .clk = "peripheral_clk",
153 .clockevent_rating = 200, 148 .clockevent_rating = 200,
154}; 149};
155 150
156static struct resource mtu2_1_resources[] = { 151static struct resource mtu2_1_resources[] = {
157 [0] = { 152 [0] = {
158 .name = "MTU2_1",
159 .start = 0xff801380, 153 .start = 0xff801380,
160 .end = 0xff801390, 154 .end = 0xff801390,
161 .flags = IORESOURCE_MEM, 155 .flags = IORESOURCE_MEM,
@@ -177,16 +171,13 @@ static struct platform_device mtu2_1_device = {
177}; 171};
178 172
179static struct sh_timer_config mtu2_2_platform_data = { 173static struct sh_timer_config mtu2_2_platform_data = {
180 .name = "MTU2_2",
181 .channel_offset = 0x80, 174 .channel_offset = 0x80,
182 .timer_bit = 2, 175 .timer_bit = 2,
183 .clk = "peripheral_clk",
184 .clockevent_rating = 200, 176 .clockevent_rating = 200,
185}; 177};
186 178
187static struct resource mtu2_2_resources[] = { 179static struct resource mtu2_2_resources[] = {
188 [0] = { 180 [0] = {
189 .name = "MTU2_2",
190 .start = 0xff801000, 181 .start = 0xff801000,
191 .end = 0xff80100a, 182 .end = 0xff80100a,
192 .flags = IORESOURCE_MEM, 183 .flags = IORESOURCE_MEM,
@@ -207,29 +198,25 @@ static struct platform_device mtu2_2_device = {
207 .num_resources = ARRAY_SIZE(mtu2_2_resources), 198 .num_resources = ARRAY_SIZE(mtu2_2_resources),
208}; 199};
209 200
210static struct plat_sci_port sci_platform_data[] = { 201static struct plat_sci_port scif0_platform_data = {
211 { 202 .mapbase = 0xff804000,
212 .mapbase = 0xff804000, 203 .flags = UPF_BOOT_AUTOCONF,
213 .flags = UPF_BOOT_AUTOCONF, 204 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
214 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 205 .scbrr_algo_id = SCBRR_ALGO_2,
215 .scbrr_algo_id = SCBRR_ALGO_2, 206 .type = PORT_SCIF,
216 .type = PORT_SCIF, 207 .irqs = { 220, 220, 220, 220 },
217 .irqs = { 220, 220, 220, 220 },
218 }, {
219 .flags = 0,
220 }
221}; 208};
222 209
223static struct platform_device sci_device = { 210static struct platform_device scif0_device = {
224 .name = "sh-sci", 211 .name = "sh-sci",
225 .id = -1, 212 .id = 0,
226 .dev = { 213 .dev = {
227 .platform_data = sci_platform_data, 214 .platform_data = &scif0_platform_data,
228 }, 215 },
229}; 216};
230 217
231static struct platform_device *mxg_devices[] __initdata = { 218static struct platform_device *mxg_devices[] __initdata = {
232 &sci_device, 219 &scif0_device,
233 &mtu2_0_device, 220 &mtu2_0_device,
234 &mtu2_1_device, 221 &mtu2_1_device,
235 &mtu2_2_device, 222 &mtu2_2_device,
@@ -240,7 +227,7 @@ static int __init mxg_devices_setup(void)
240 return platform_add_devices(mxg_devices, 227 return platform_add_devices(mxg_devices,
241 ARRAY_SIZE(mxg_devices)); 228 ARRAY_SIZE(mxg_devices));
242} 229}
243__initcall(mxg_devices_setup); 230arch_initcall(mxg_devices_setup);
244 231
245void __init plat_irq_setup(void) 232void __init plat_irq_setup(void)
246{ 233{
@@ -248,6 +235,7 @@ void __init plat_irq_setup(void)
248} 235}
249 236
250static struct platform_device *mxg_early_devices[] __initdata = { 237static struct platform_device *mxg_early_devices[] __initdata = {
238 &scif0_device,
251 &mtu2_0_device, 239 &mtu2_0_device,
252 &mtu2_1_device, 240 &mtu2_1_device,
253 &mtu2_2_device, 241 &mtu2_2_device,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
index 8d44917ce50b..9df558dcdb86 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
@@ -177,73 +177,139 @@ static struct intc_mask_reg mask_registers[] __initdata = {
177static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups, 177static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups,
178 mask_registers, prio_registers, NULL); 178 mask_registers, prio_registers, NULL);
179 179
180static struct plat_sci_port sci_platform_data[] = { 180static struct plat_sci_port scif0_platform_data = {
181 { 181 .mapbase = 0xfffe8000,
182 .mapbase = 0xfffe8000, 182 .flags = UPF_BOOT_AUTOCONF,
183 .flags = UPF_BOOT_AUTOCONF, 183 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
184 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 184 .scbrr_algo_id = SCBRR_ALGO_2,
185 .scbrr_algo_id = SCBRR_ALGO_2, 185 .type = PORT_SCIF,
186 .type = PORT_SCIF, 186 .irqs = { 180, 180, 180, 180 }
187 .irqs = { 180, 180, 180, 180 } 187};
188 }, { 188
189 .mapbase = 0xfffe8800, 189static struct platform_device scif0_device = {
190 .flags = UPF_BOOT_AUTOCONF,
191 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
192 .scbrr_algo_id = SCBRR_ALGO_2,
193 .type = PORT_SCIF,
194 .irqs = { 184, 184, 184, 184 }
195 }, {
196 .mapbase = 0xfffe9000,
197 .flags = UPF_BOOT_AUTOCONF,
198 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
199 .scbrr_algo_id = SCBRR_ALGO_2,
200 .type = PORT_SCIF,
201 .irqs = { 188, 188, 188, 188 }
202 }, {
203 .mapbase = 0xfffe9800,
204 .flags = UPF_BOOT_AUTOCONF,
205 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
206 .scbrr_algo_id = SCBRR_ALGO_2,
207 .type = PORT_SCIF,
208 .irqs = { 192, 192, 192, 192 }
209 }, {
210 .mapbase = 0xfffea000,
211 .flags = UPF_BOOT_AUTOCONF,
212 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
213 .scbrr_algo_id = SCBRR_ALGO_2,
214 .type = PORT_SCIF,
215 .irqs = { 196, 196, 196, 196 }
216 }, {
217 .mapbase = 0xfffea800,
218 .flags = UPF_BOOT_AUTOCONF,
219 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
220 .scbrr_algo_id = SCBRR_ALGO_2,
221 .type = PORT_SCIF,
222 .irqs = { 200, 200, 200, 200 }
223 }, {
224 .mapbase = 0xfffeb000,
225 .flags = UPF_BOOT_AUTOCONF,
226 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
227 .scbrr_algo_id = SCBRR_ALGO_2,
228 .type = PORT_SCIF,
229 .irqs = { 204, 204, 204, 204 }
230 }, {
231 .mapbase = 0xfffeb800,
232 .flags = UPF_BOOT_AUTOCONF,
233 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
234 .scbrr_algo_id = SCBRR_ALGO_2,
235 .type = PORT_SCIF,
236 .irqs = { 208, 208, 208, 208 }
237 }, {
238 .flags = 0,
239 }
240};
241
242static struct platform_device sci_device = {
243 .name = "sh-sci", 190 .name = "sh-sci",
244 .id = -1, 191 .id = 0,
192 .dev = {
193 .platform_data = &scif0_platform_data,
194 },
195};
196
197static struct plat_sci_port scif1_platform_data = {
198 .mapbase = 0xfffe8800,
199 .flags = UPF_BOOT_AUTOCONF,
200 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
201 .scbrr_algo_id = SCBRR_ALGO_2,
202 .type = PORT_SCIF,
203 .irqs = { 184, 184, 184, 184 }
204};
205
206static struct platform_device scif1_device = {
207 .name = "sh-sci",
208 .id = 1,
209 .dev = {
210 .platform_data = &scif1_platform_data,
211 },
212};
213
214static struct plat_sci_port scif2_platform_data = {
215 .mapbase = 0xfffe9000,
216 .flags = UPF_BOOT_AUTOCONF,
217 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
218 .scbrr_algo_id = SCBRR_ALGO_2,
219 .type = PORT_SCIF,
220 .irqs = { 188, 188, 188, 188 }
221};
222
223static struct platform_device scif2_device = {
224 .name = "sh-sci",
225 .id = 2,
226 .dev = {
227 .platform_data = &scif2_platform_data,
228 },
229};
230
231static struct plat_sci_port scif3_platform_data = {
232 .mapbase = 0xfffe9800,
233 .flags = UPF_BOOT_AUTOCONF,
234 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
235 .scbrr_algo_id = SCBRR_ALGO_2,
236 .type = PORT_SCIF,
237 .irqs = { 192, 192, 192, 192 }
238};
239
240static struct platform_device scif3_device = {
241 .name = "sh-sci",
242 .id = 3,
243 .dev = {
244 .platform_data = &scif3_platform_data,
245 },
246};
247
248static struct plat_sci_port scif4_platform_data = {
249 .mapbase = 0xfffea000,
250 .flags = UPF_BOOT_AUTOCONF,
251 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
252 .scbrr_algo_id = SCBRR_ALGO_2,
253 .type = PORT_SCIF,
254 .irqs = { 196, 196, 196, 196 }
255};
256
257static struct platform_device scif4_device = {
258 .name = "sh-sci",
259 .id = 4,
260 .dev = {
261 .platform_data = &scif4_platform_data,
262 },
263};
264
265static struct plat_sci_port scif5_platform_data = {
266 .mapbase = 0xfffea800,
267 .flags = UPF_BOOT_AUTOCONF,
268 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
269 .scbrr_algo_id = SCBRR_ALGO_2,
270 .type = PORT_SCIF,
271 .irqs = { 200, 200, 200, 200 }
272};
273
274static struct platform_device scif5_device = {
275 .name = "sh-sci",
276 .id = 5,
277 .dev = {
278 .platform_data = &scif5_platform_data,
279 },
280};
281
282static struct plat_sci_port scif6_platform_data = {
283 .mapbase = 0xfffeb000,
284 .flags = UPF_BOOT_AUTOCONF,
285 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
286 .scbrr_algo_id = SCBRR_ALGO_2,
287 .type = PORT_SCIF,
288 .irqs = { 204, 204, 204, 204 }
289};
290
291static struct platform_device scif6_device = {
292 .name = "sh-sci",
293 .id = 6,
294 .dev = {
295 .platform_data = &scif6_platform_data,
296 },
297};
298
299static struct plat_sci_port scif7_platform_data = {
300 .mapbase = 0xfffeb800,
301 .flags = UPF_BOOT_AUTOCONF,
302 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
303 .scbrr_algo_id = SCBRR_ALGO_2,
304 .type = PORT_SCIF,
305 .irqs = { 208, 208, 208, 208 }
306};
307
308static struct platform_device scif7_device = {
309 .name = "sh-sci",
310 .id = 7,
245 .dev = { 311 .dev = {
246 .platform_data = sci_platform_data, 312 .platform_data = &scif7_platform_data,
247 }, 313 },
248}; 314};
249 315
@@ -268,16 +334,13 @@ static struct platform_device rtc_device = {
268}; 334};
269 335
270static struct sh_timer_config mtu2_0_platform_data = { 336static struct sh_timer_config mtu2_0_platform_data = {
271 .name = "MTU2_0",
272 .channel_offset = -0x80, 337 .channel_offset = -0x80,
273 .timer_bit = 0, 338 .timer_bit = 0,
274 .clk = "peripheral_clk",
275 .clockevent_rating = 200, 339 .clockevent_rating = 200,
276}; 340};
277 341
278static struct resource mtu2_0_resources[] = { 342static struct resource mtu2_0_resources[] = {
279 [0] = { 343 [0] = {
280 .name = "MTU2_0",
281 .start = 0xfffe4300, 344 .start = 0xfffe4300,
282 .end = 0xfffe4326, 345 .end = 0xfffe4326,
283 .flags = IORESOURCE_MEM, 346 .flags = IORESOURCE_MEM,
@@ -299,16 +362,13 @@ static struct platform_device mtu2_0_device = {
299}; 362};
300 363
301static struct sh_timer_config mtu2_1_platform_data = { 364static struct sh_timer_config mtu2_1_platform_data = {
302 .name = "MTU2_1",
303 .channel_offset = -0x100, 365 .channel_offset = -0x100,
304 .timer_bit = 1, 366 .timer_bit = 1,
305 .clk = "peripheral_clk",
306 .clockevent_rating = 200, 367 .clockevent_rating = 200,
307}; 368};
308 369
309static struct resource mtu2_1_resources[] = { 370static struct resource mtu2_1_resources[] = {
310 [0] = { 371 [0] = {
311 .name = "MTU2_1",
312 .start = 0xfffe4380, 372 .start = 0xfffe4380,
313 .end = 0xfffe4390, 373 .end = 0xfffe4390,
314 .flags = IORESOURCE_MEM, 374 .flags = IORESOURCE_MEM,
@@ -330,16 +390,13 @@ static struct platform_device mtu2_1_device = {
330}; 390};
331 391
332static struct sh_timer_config mtu2_2_platform_data = { 392static struct sh_timer_config mtu2_2_platform_data = {
333 .name = "MTU2_2",
334 .channel_offset = 0x80, 393 .channel_offset = 0x80,
335 .timer_bit = 2, 394 .timer_bit = 2,
336 .clk = "peripheral_clk",
337 .clockevent_rating = 200, 395 .clockevent_rating = 200,
338}; 396};
339 397
340static struct resource mtu2_2_resources[] = { 398static struct resource mtu2_2_resources[] = {
341 [0] = { 399 [0] = {
342 .name = "MTU2_2",
343 .start = 0xfffe4000, 400 .start = 0xfffe4000,
344 .end = 0xfffe400a, 401 .end = 0xfffe400a,
345 .flags = IORESOURCE_MEM, 402 .flags = IORESOURCE_MEM,
@@ -361,7 +418,14 @@ static struct platform_device mtu2_2_device = {
361}; 418};
362 419
363static struct platform_device *sh7201_devices[] __initdata = { 420static struct platform_device *sh7201_devices[] __initdata = {
364 &sci_device, 421 &scif0_device,
422 &scif1_device,
423 &scif2_device,
424 &scif3_device,
425 &scif4_device,
426 &scif5_device,
427 &scif6_device,
428 &scif7_device,
365 &rtc_device, 429 &rtc_device,
366 &mtu2_0_device, 430 &mtu2_0_device,
367 &mtu2_1_device, 431 &mtu2_1_device,
@@ -373,7 +437,7 @@ static int __init sh7201_devices_setup(void)
373 return platform_add_devices(sh7201_devices, 437 return platform_add_devices(sh7201_devices,
374 ARRAY_SIZE(sh7201_devices)); 438 ARRAY_SIZE(sh7201_devices));
375} 439}
376__initcall(sh7201_devices_setup); 440arch_initcall(sh7201_devices_setup);
377 441
378void __init plat_irq_setup(void) 442void __init plat_irq_setup(void)
379{ 443{
@@ -381,6 +445,14 @@ void __init plat_irq_setup(void)
381} 445}
382 446
383static struct platform_device *sh7201_early_devices[] __initdata = { 447static struct platform_device *sh7201_early_devices[] __initdata = {
448 &scif0_device,
449 &scif1_device,
450 &scif2_device,
451 &scif3_device,
452 &scif4_device,
453 &scif5_device,
454 &scif6_device,
455 &scif7_device,
384 &mtu2_0_device, 456 &mtu2_0_device,
385 &mtu2_1_device, 457 &mtu2_1_device,
386 &mtu2_2_device, 458 &mtu2_2_device,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index a78d2a219f3b..a43124e608c3 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -173,60 +173,83 @@ static struct intc_mask_reg mask_registers[] __initdata = {
173static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups, 173static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
174 mask_registers, prio_registers, NULL); 174 mask_registers, prio_registers, NULL);
175 175
176static struct plat_sci_port sci_platform_data[] = { 176static struct plat_sci_port scif0_platform_data = {
177 { 177 .mapbase = 0xfffe8000,
178 .mapbase = 0xfffe8000, 178 .flags = UPF_BOOT_AUTOCONF,
179 .flags = UPF_BOOT_AUTOCONF, 179 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
180 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 180 .scbrr_algo_id = SCBRR_ALGO_2,
181 .scbrr_algo_id = SCBRR_ALGO_2, 181 .type = PORT_SCIF,
182 .type = PORT_SCIF, 182 .irqs = { 192, 192, 192, 192 },
183 .irqs = { 192, 192, 192, 192 },
184 }, {
185 .mapbase = 0xfffe8800,
186 .flags = UPF_BOOT_AUTOCONF,
187 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
188 .scbrr_algo_id = SCBRR_ALGO_2,
189 .type = PORT_SCIF,
190 .irqs = { 196, 196, 196, 196 },
191 }, {
192 .mapbase = 0xfffe9000,
193 .flags = UPF_BOOT_AUTOCONF,
194 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
195 .scbrr_algo_id = SCBRR_ALGO_2,
196 .type = PORT_SCIF,
197 .irqs = { 200, 200, 200, 200 },
198 }, {
199 .mapbase = 0xfffe9800,
200 .flags = UPF_BOOT_AUTOCONF,
201 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
202 .scbrr_algo_id = SCBRR_ALGO_2,
203 .type = PORT_SCIF,
204 .irqs = { 204, 204, 204, 204 },
205 }, {
206 .flags = 0,
207 }
208}; 183};
209 184
210static struct platform_device sci_device = { 185static struct platform_device scif0_device = {
211 .name = "sh-sci", 186 .name = "sh-sci",
212 .id = -1, 187 .id = 0,
188 .dev = {
189 .platform_data = &scif0_platform_data,
190 },
191};
192
193static struct plat_sci_port scif1_platform_data = {
194 .mapbase = 0xfffe8800,
195 .flags = UPF_BOOT_AUTOCONF,
196 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
197 .scbrr_algo_id = SCBRR_ALGO_2,
198 .type = PORT_SCIF,
199 .irqs = { 196, 196, 196, 196 },
200};
201
202static struct platform_device scif1_device = {
203 .name = "sh-sci",
204 .id = 1,
205 .dev = {
206 .platform_data = &scif1_platform_data,
207 },
208};
209
210static struct plat_sci_port scif2_platform_data = {
211 .mapbase = 0xfffe9000,
212 .flags = UPF_BOOT_AUTOCONF,
213 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
214 .scbrr_algo_id = SCBRR_ALGO_2,
215 .type = PORT_SCIF,
216 .irqs = { 200, 200, 200, 200 },
217};
218
219static struct platform_device scif2_device = {
220 .name = "sh-sci",
221 .id = 2,
222 .dev = {
223 .platform_data = &scif2_platform_data,
224 },
225};
226
227static struct plat_sci_port scif3_platform_data = {
228 .mapbase = 0xfffe9800,
229 .flags = UPF_BOOT_AUTOCONF,
230 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
231 .scbrr_algo_id = SCBRR_ALGO_2,
232 .type = PORT_SCIF,
233 .irqs = { 204, 204, 204, 204 },
234};
235
236static struct platform_device scif3_device = {
237 .name = "sh-sci",
238 .id = 3,
213 .dev = { 239 .dev = {
214 .platform_data = sci_platform_data, 240 .platform_data = &scif3_platform_data,
215 }, 241 },
216}; 242};
217 243
218static struct sh_timer_config cmt0_platform_data = { 244static struct sh_timer_config cmt0_platform_data = {
219 .name = "CMT0",
220 .channel_offset = 0x02, 245 .channel_offset = 0x02,
221 .timer_bit = 0, 246 .timer_bit = 0,
222 .clk = "peripheral_clk",
223 .clockevent_rating = 125, 247 .clockevent_rating = 125,
224 .clocksource_rating = 0, /* disabled due to code generation issues */ 248 .clocksource_rating = 0, /* disabled due to code generation issues */
225}; 249};
226 250
227static struct resource cmt0_resources[] = { 251static struct resource cmt0_resources[] = {
228 [0] = { 252 [0] = {
229 .name = "CMT0",
230 .start = 0xfffec002, 253 .start = 0xfffec002,
231 .end = 0xfffec007, 254 .end = 0xfffec007,
232 .flags = IORESOURCE_MEM, 255 .flags = IORESOURCE_MEM,
@@ -248,17 +271,14 @@ static struct platform_device cmt0_device = {
248}; 271};
249 272
250static struct sh_timer_config cmt1_platform_data = { 273static struct sh_timer_config cmt1_platform_data = {
251 .name = "CMT1",
252 .channel_offset = 0x08, 274 .channel_offset = 0x08,
253 .timer_bit = 1, 275 .timer_bit = 1,
254 .clk = "peripheral_clk",
255 .clockevent_rating = 125, 276 .clockevent_rating = 125,
256 .clocksource_rating = 0, /* disabled due to code generation issues */ 277 .clocksource_rating = 0, /* disabled due to code generation issues */
257}; 278};
258 279
259static struct resource cmt1_resources[] = { 280static struct resource cmt1_resources[] = {
260 [0] = { 281 [0] = {
261 .name = "CMT1",
262 .start = 0xfffec008, 282 .start = 0xfffec008,
263 .end = 0xfffec00d, 283 .end = 0xfffec00d,
264 .flags = IORESOURCE_MEM, 284 .flags = IORESOURCE_MEM,
@@ -280,16 +300,13 @@ static struct platform_device cmt1_device = {
280}; 300};
281 301
282static struct sh_timer_config mtu2_0_platform_data = { 302static struct sh_timer_config mtu2_0_platform_data = {
283 .name = "MTU2_0",
284 .channel_offset = -0x80, 303 .channel_offset = -0x80,
285 .timer_bit = 0, 304 .timer_bit = 0,
286 .clk = "peripheral_clk",
287 .clockevent_rating = 200, 305 .clockevent_rating = 200,
288}; 306};
289 307
290static struct resource mtu2_0_resources[] = { 308static struct resource mtu2_0_resources[] = {
291 [0] = { 309 [0] = {
292 .name = "MTU2_0",
293 .start = 0xfffe4300, 310 .start = 0xfffe4300,
294 .end = 0xfffe4326, 311 .end = 0xfffe4326,
295 .flags = IORESOURCE_MEM, 312 .flags = IORESOURCE_MEM,
@@ -311,16 +328,13 @@ static struct platform_device mtu2_0_device = {
311}; 328};
312 329
313static struct sh_timer_config mtu2_1_platform_data = { 330static struct sh_timer_config mtu2_1_platform_data = {
314 .name = "MTU2_1",
315 .channel_offset = -0x100, 331 .channel_offset = -0x100,
316 .timer_bit = 1, 332 .timer_bit = 1,
317 .clk = "peripheral_clk",
318 .clockevent_rating = 200, 333 .clockevent_rating = 200,
319}; 334};
320 335
321static struct resource mtu2_1_resources[] = { 336static struct resource mtu2_1_resources[] = {
322 [0] = { 337 [0] = {
323 .name = "MTU2_1",
324 .start = 0xfffe4380, 338 .start = 0xfffe4380,
325 .end = 0xfffe4390, 339 .end = 0xfffe4390,
326 .flags = IORESOURCE_MEM, 340 .flags = IORESOURCE_MEM,
@@ -362,7 +376,10 @@ static struct platform_device rtc_device = {
362}; 376};
363 377
364static struct platform_device *sh7203_devices[] __initdata = { 378static struct platform_device *sh7203_devices[] __initdata = {
365 &sci_device, 379 &scif0_device,
380 &scif1_device,
381 &scif2_device,
382 &scif3_device,
366 &cmt0_device, 383 &cmt0_device,
367 &cmt1_device, 384 &cmt1_device,
368 &mtu2_0_device, 385 &mtu2_0_device,
@@ -375,7 +392,7 @@ static int __init sh7203_devices_setup(void)
375 return platform_add_devices(sh7203_devices, 392 return platform_add_devices(sh7203_devices,
376 ARRAY_SIZE(sh7203_devices)); 393 ARRAY_SIZE(sh7203_devices));
377} 394}
378__initcall(sh7203_devices_setup); 395arch_initcall(sh7203_devices_setup);
379 396
380void __init plat_irq_setup(void) 397void __init plat_irq_setup(void)
381{ 398{
@@ -383,6 +400,10 @@ void __init plat_irq_setup(void)
383} 400}
384 401
385static struct platform_device *sh7203_early_devices[] __initdata = { 402static struct platform_device *sh7203_early_devices[] __initdata = {
403 &scif0_device,
404 &scif1_device,
405 &scif2_device,
406 &scif3_device,
386 &cmt0_device, 407 &cmt0_device,
387 &cmt1_device, 408 &cmt1_device,
388 &mtu2_0_device, 409 &mtu2_0_device,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index 68b93ed44cc2..5d14f849aea3 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -133,60 +133,83 @@ static struct intc_mask_reg mask_registers[] __initdata = {
133static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups, 133static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
134 mask_registers, prio_registers, NULL); 134 mask_registers, prio_registers, NULL);
135 135
136static struct plat_sci_port sci_platform_data[] = { 136static struct plat_sci_port scif0_platform_data = {
137 { 137 .mapbase = 0xfffe8000,
138 .mapbase = 0xfffe8000, 138 .flags = UPF_BOOT_AUTOCONF,
139 .flags = UPF_BOOT_AUTOCONF, 139 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
140 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 140 .scbrr_algo_id = SCBRR_ALGO_2,
141 .scbrr_algo_id = SCBRR_ALGO_2, 141 .type = PORT_SCIF,
142 .type = PORT_SCIF, 142 .irqs = { 240, 240, 240, 240 },
143 .irqs = { 240, 240, 240, 240 },
144 }, {
145 .mapbase = 0xfffe8800,
146 .flags = UPF_BOOT_AUTOCONF,
147 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
148 .scbrr_algo_id = SCBRR_ALGO_2,
149 .type = PORT_SCIF,
150 .irqs = { 244, 244, 244, 244 },
151 }, {
152 .mapbase = 0xfffe9000,
153 .flags = UPF_BOOT_AUTOCONF,
154 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
155 .scbrr_algo_id = SCBRR_ALGO_2,
156 .type = PORT_SCIF,
157 .irqs = { 248, 248, 248, 248 },
158 }, {
159 .mapbase = 0xfffe9800,
160 .flags = UPF_BOOT_AUTOCONF,
161 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
162 .scbrr_algo_id = SCBRR_ALGO_2,
163 .type = PORT_SCIF,
164 .irqs = { 252, 252, 252, 252 },
165 }, {
166 .flags = 0,
167 }
168}; 143};
169 144
170static struct platform_device sci_device = { 145static struct platform_device scif0_device = {
171 .name = "sh-sci", 146 .name = "sh-sci",
172 .id = -1, 147 .id = 0,
148 .dev = {
149 .platform_data = &scif0_platform_data,
150 },
151};
152
153static struct plat_sci_port scif1_platform_data = {
154 .mapbase = 0xfffe8800,
155 .flags = UPF_BOOT_AUTOCONF,
156 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
157 .scbrr_algo_id = SCBRR_ALGO_2,
158 .type = PORT_SCIF,
159 .irqs = { 244, 244, 244, 244 },
160};
161
162static struct platform_device scif1_device = {
163 .name = "sh-sci",
164 .id = 1,
165 .dev = {
166 .platform_data = &scif1_platform_data,
167 },
168};
169
170static struct plat_sci_port scif2_platform_data = {
171 .mapbase = 0xfffe9000,
172 .flags = UPF_BOOT_AUTOCONF,
173 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
174 .scbrr_algo_id = SCBRR_ALGO_2,
175 .type = PORT_SCIF,
176 .irqs = { 248, 248, 248, 248 },
177};
178
179static struct platform_device scif2_device = {
180 .name = "sh-sci",
181 .id = 2,
182 .dev = {
183 .platform_data = &scif2_platform_data,
184 },
185};
186
187static struct plat_sci_port scif3_platform_data = {
188 .mapbase = 0xfffe9800,
189 .flags = UPF_BOOT_AUTOCONF,
190 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
191 .scbrr_algo_id = SCBRR_ALGO_2,
192 .type = PORT_SCIF,
193 .irqs = { 252, 252, 252, 252 },
194};
195
196static struct platform_device scif3_device = {
197 .name = "sh-sci",
198 .id = 3,
173 .dev = { 199 .dev = {
174 .platform_data = sci_platform_data, 200 .platform_data = &scif3_platform_data,
175 }, 201 },
176}; 202};
177 203
178static struct sh_timer_config cmt0_platform_data = { 204static struct sh_timer_config cmt0_platform_data = {
179 .name = "CMT0",
180 .channel_offset = 0x02, 205 .channel_offset = 0x02,
181 .timer_bit = 0, 206 .timer_bit = 0,
182 .clk = "peripheral_clk",
183 .clockevent_rating = 125, 207 .clockevent_rating = 125,
184 .clocksource_rating = 0, /* disabled due to code generation issues */ 208 .clocksource_rating = 0, /* disabled due to code generation issues */
185}; 209};
186 210
187static struct resource cmt0_resources[] = { 211static struct resource cmt0_resources[] = {
188 [0] = { 212 [0] = {
189 .name = "CMT0",
190 .start = 0xfffec002, 213 .start = 0xfffec002,
191 .end = 0xfffec007, 214 .end = 0xfffec007,
192 .flags = IORESOURCE_MEM, 215 .flags = IORESOURCE_MEM,
@@ -208,17 +231,14 @@ static struct platform_device cmt0_device = {
208}; 231};
209 232
210static struct sh_timer_config cmt1_platform_data = { 233static struct sh_timer_config cmt1_platform_data = {
211 .name = "CMT1",
212 .channel_offset = 0x08, 234 .channel_offset = 0x08,
213 .timer_bit = 1, 235 .timer_bit = 1,
214 .clk = "peripheral_clk",
215 .clockevent_rating = 125, 236 .clockevent_rating = 125,
216 .clocksource_rating = 0, /* disabled due to code generation issues */ 237 .clocksource_rating = 0, /* disabled due to code generation issues */
217}; 238};
218 239
219static struct resource cmt1_resources[] = { 240static struct resource cmt1_resources[] = {
220 [0] = { 241 [0] = {
221 .name = "CMT1",
222 .start = 0xfffec008, 242 .start = 0xfffec008,
223 .end = 0xfffec00d, 243 .end = 0xfffec00d,
224 .flags = IORESOURCE_MEM, 244 .flags = IORESOURCE_MEM,
@@ -240,16 +260,13 @@ static struct platform_device cmt1_device = {
240}; 260};
241 261
242static struct sh_timer_config mtu2_0_platform_data = { 262static struct sh_timer_config mtu2_0_platform_data = {
243 .name = "MTU2_0",
244 .channel_offset = -0x80, 263 .channel_offset = -0x80,
245 .timer_bit = 0, 264 .timer_bit = 0,
246 .clk = "peripheral_clk",
247 .clockevent_rating = 200, 265 .clockevent_rating = 200,
248}; 266};
249 267
250static struct resource mtu2_0_resources[] = { 268static struct resource mtu2_0_resources[] = {
251 [0] = { 269 [0] = {
252 .name = "MTU2_0",
253 .start = 0xfffe4300, 270 .start = 0xfffe4300,
254 .end = 0xfffe4326, 271 .end = 0xfffe4326,
255 .flags = IORESOURCE_MEM, 272 .flags = IORESOURCE_MEM,
@@ -271,16 +288,13 @@ static struct platform_device mtu2_0_device = {
271}; 288};
272 289
273static struct sh_timer_config mtu2_1_platform_data = { 290static struct sh_timer_config mtu2_1_platform_data = {
274 .name = "MTU2_1",
275 .channel_offset = -0x100, 291 .channel_offset = -0x100,
276 .timer_bit = 1, 292 .timer_bit = 1,
277 .clk = "peripheral_clk",
278 .clockevent_rating = 200, 293 .clockevent_rating = 200,
279}; 294};
280 295
281static struct resource mtu2_1_resources[] = { 296static struct resource mtu2_1_resources[] = {
282 [0] = { 297 [0] = {
283 .name = "MTU2_1",
284 .start = 0xfffe4380, 298 .start = 0xfffe4380,
285 .end = 0xfffe4390, 299 .end = 0xfffe4390,
286 .flags = IORESOURCE_MEM, 300 .flags = IORESOURCE_MEM,
@@ -302,16 +316,13 @@ static struct platform_device mtu2_1_device = {
302}; 316};
303 317
304static struct sh_timer_config mtu2_2_platform_data = { 318static struct sh_timer_config mtu2_2_platform_data = {
305 .name = "MTU2_2",
306 .channel_offset = 0x80, 319 .channel_offset = 0x80,
307 .timer_bit = 2, 320 .timer_bit = 2,
308 .clk = "peripheral_clk",
309 .clockevent_rating = 200, 321 .clockevent_rating = 200,
310}; 322};
311 323
312static struct resource mtu2_2_resources[] = { 324static struct resource mtu2_2_resources[] = {
313 [0] = { 325 [0] = {
314 .name = "MTU2_2",
315 .start = 0xfffe4000, 326 .start = 0xfffe4000,
316 .end = 0xfffe400a, 327 .end = 0xfffe400a,
317 .flags = IORESOURCE_MEM, 328 .flags = IORESOURCE_MEM,
@@ -333,7 +344,10 @@ static struct platform_device mtu2_2_device = {
333}; 344};
334 345
335static struct platform_device *sh7206_devices[] __initdata = { 346static struct platform_device *sh7206_devices[] __initdata = {
336 &sci_device, 347 &scif0_device,
348 &scif1_device,
349 &scif2_device,
350 &scif3_device,
337 &cmt0_device, 351 &cmt0_device,
338 &cmt1_device, 352 &cmt1_device,
339 &mtu2_0_device, 353 &mtu2_0_device,
@@ -346,7 +360,7 @@ static int __init sh7206_devices_setup(void)
346 return platform_add_devices(sh7206_devices, 360 return platform_add_devices(sh7206_devices,
347 ARRAY_SIZE(sh7206_devices)); 361 ARRAY_SIZE(sh7206_devices));
348} 362}
349__initcall(sh7206_devices_setup); 363arch_initcall(sh7206_devices_setup);
350 364
351void __init plat_irq_setup(void) 365void __init plat_irq_setup(void)
352{ 366{
@@ -354,6 +368,10 @@ void __init plat_irq_setup(void)
354} 368}
355 369
356static struct platform_device *sh7206_early_devices[] __initdata = { 370static struct platform_device *sh7206_early_devices[] __initdata = {
371 &scif0_device,
372 &scif1_device,
373 &scif2_device,
374 &scif3_device,
357 &cmt0_device, 375 &cmt0_device,
358 &cmt1_device, 376 &cmt1_device,
359 &mtu2_0_device, 377 &mtu2_0_device,
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh3.c b/arch/sh/kernel/cpu/sh3/clock-sh3.c
index 27b8738f0b09..b78384afac09 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh3.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh3.c
@@ -28,7 +28,7 @@ static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
28 28
29static void master_clk_init(struct clk *clk) 29static void master_clk_init(struct clk *clk)
30{ 30{
31 int frqcr = ctrl_inw(FRQCR); 31 int frqcr = __raw_readw(FRQCR);
32 int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); 32 int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
33 33
34 clk->rate *= pfc_divisors[idx]; 34 clk->rate *= pfc_divisors[idx];
@@ -40,7 +40,7 @@ static struct clk_ops sh3_master_clk_ops = {
40 40
41static unsigned long module_clk_recalc(struct clk *clk) 41static unsigned long module_clk_recalc(struct clk *clk)
42{ 42{
43 int frqcr = ctrl_inw(FRQCR); 43 int frqcr = __raw_readw(FRQCR);
44 int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); 44 int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
45 45
46 return clk->parent->rate / pfc_divisors[idx]; 46 return clk->parent->rate / pfc_divisors[idx];
@@ -52,7 +52,7 @@ static struct clk_ops sh3_module_clk_ops = {
52 52
53static unsigned long bus_clk_recalc(struct clk *clk) 53static unsigned long bus_clk_recalc(struct clk *clk)
54{ 54{
55 int frqcr = ctrl_inw(FRQCR); 55 int frqcr = __raw_readw(FRQCR);
56 int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4); 56 int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
57 57
58 return clk->parent->rate / stc_multipliers[idx]; 58 return clk->parent->rate / stc_multipliers[idx];
@@ -64,7 +64,7 @@ static struct clk_ops sh3_bus_clk_ops = {
64 64
65static unsigned long cpu_clk_recalc(struct clk *clk) 65static unsigned long cpu_clk_recalc(struct clk *clk)
66{ 66{
67 int frqcr = ctrl_inw(FRQCR); 67 int frqcr = __raw_readw(FRQCR);
68 int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); 68 int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
69 69
70 return clk->parent->rate / ifc_divisors[idx]; 70 return clk->parent->rate / ifc_divisors[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7705.c b/arch/sh/kernel/cpu/sh3/clock-sh7705.c
index 0ca8f2c3646c..0ecea1451c6f 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7705.c
@@ -32,7 +32,7 @@ static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
32 32
33static void master_clk_init(struct clk *clk) 33static void master_clk_init(struct clk *clk)
34{ 34{
35 clk->rate *= pfc_divisors[ctrl_inw(FRQCR) & 0x0003]; 35 clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0003];
36} 36}
37 37
38static struct clk_ops sh7705_master_clk_ops = { 38static struct clk_ops sh7705_master_clk_ops = {
@@ -41,7 +41,7 @@ static struct clk_ops sh7705_master_clk_ops = {
41 41
42static unsigned long module_clk_recalc(struct clk *clk) 42static unsigned long module_clk_recalc(struct clk *clk)
43{ 43{
44 int idx = ctrl_inw(FRQCR) & 0x0003; 44 int idx = __raw_readw(FRQCR) & 0x0003;
45 return clk->parent->rate / pfc_divisors[idx]; 45 return clk->parent->rate / pfc_divisors[idx];
46} 46}
47 47
@@ -51,7 +51,7 @@ static struct clk_ops sh7705_module_clk_ops = {
51 51
52static unsigned long bus_clk_recalc(struct clk *clk) 52static unsigned long bus_clk_recalc(struct clk *clk)
53{ 53{
54 int idx = (ctrl_inw(FRQCR) & 0x0300) >> 8; 54 int idx = (__raw_readw(FRQCR) & 0x0300) >> 8;
55 return clk->parent->rate / stc_multipliers[idx]; 55 return clk->parent->rate / stc_multipliers[idx];
56} 56}
57 57
@@ -61,7 +61,7 @@ static struct clk_ops sh7705_bus_clk_ops = {
61 61
62static unsigned long cpu_clk_recalc(struct clk *clk) 62static unsigned long cpu_clk_recalc(struct clk *clk)
63{ 63{
64 int idx = (ctrl_inw(FRQCR) & 0x0030) >> 4; 64 int idx = (__raw_readw(FRQCR) & 0x0030) >> 4;
65 return clk->parent->rate / ifc_divisors[idx]; 65 return clk->parent->rate / ifc_divisors[idx];
66} 66}
67 67
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7706.c b/arch/sh/kernel/cpu/sh3/clock-sh7706.c
index 4bf7887d310a..6f9ff8b57dd6 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7706.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7706.c
@@ -24,7 +24,7 @@ static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
24 24
25static void master_clk_init(struct clk *clk) 25static void master_clk_init(struct clk *clk)
26{ 26{
27 int frqcr = ctrl_inw(FRQCR); 27 int frqcr = __raw_readw(FRQCR);
28 int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); 28 int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
29 29
30 clk->rate *= pfc_divisors[idx]; 30 clk->rate *= pfc_divisors[idx];
@@ -36,7 +36,7 @@ static struct clk_ops sh7706_master_clk_ops = {
36 36
37static unsigned long module_clk_recalc(struct clk *clk) 37static unsigned long module_clk_recalc(struct clk *clk)
38{ 38{
39 int frqcr = ctrl_inw(FRQCR); 39 int frqcr = __raw_readw(FRQCR);
40 int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); 40 int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
41 41
42 return clk->parent->rate / pfc_divisors[idx]; 42 return clk->parent->rate / pfc_divisors[idx];
@@ -48,7 +48,7 @@ static struct clk_ops sh7706_module_clk_ops = {
48 48
49static unsigned long bus_clk_recalc(struct clk *clk) 49static unsigned long bus_clk_recalc(struct clk *clk)
50{ 50{
51 int frqcr = ctrl_inw(FRQCR); 51 int frqcr = __raw_readw(FRQCR);
52 int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4); 52 int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
53 53
54 return clk->parent->rate / stc_multipliers[idx]; 54 return clk->parent->rate / stc_multipliers[idx];
@@ -60,7 +60,7 @@ static struct clk_ops sh7706_bus_clk_ops = {
60 60
61static unsigned long cpu_clk_recalc(struct clk *clk) 61static unsigned long cpu_clk_recalc(struct clk *clk)
62{ 62{
63 int frqcr = ctrl_inw(FRQCR); 63 int frqcr = __raw_readw(FRQCR);
64 int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); 64 int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
65 65
66 return clk->parent->rate / ifc_divisors[idx]; 66 return clk->parent->rate / ifc_divisors[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
index fa30b6017730..f302ba09e681 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
@@ -22,16 +22,9 @@ static int stc_multipliers[] = { 1, 2, 4, 8, 3, 6, 1, 1 };
22static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 }; 22static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 };
23static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 }; 23static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
24 24
25static void set_bus_parent(struct clk *clk)
26{
27 struct clk *bus_clk = clk_get(NULL, "bus_clk");
28 clk->parent = bus_clk;
29 clk_put(bus_clk);
30}
31
32static void master_clk_init(struct clk *clk) 25static void master_clk_init(struct clk *clk)
33{ 26{
34 int frqcr = ctrl_inw(FRQCR); 27 int frqcr = __raw_readw(FRQCR);
35 int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); 28 int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
36 29
37 clk->rate *= pfc_divisors[idx]; 30 clk->rate *= pfc_divisors[idx];
@@ -43,22 +36,19 @@ static struct clk_ops sh7709_master_clk_ops = {
43 36
44static unsigned long module_clk_recalc(struct clk *clk) 37static unsigned long module_clk_recalc(struct clk *clk)
45{ 38{
46 int frqcr = ctrl_inw(FRQCR); 39 int frqcr = __raw_readw(FRQCR);
47 int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); 40 int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
48 41
49 return clk->parent->rate / pfc_divisors[idx]; 42 return clk->parent->rate / pfc_divisors[idx];
50} 43}
51 44
52static struct clk_ops sh7709_module_clk_ops = { 45static struct clk_ops sh7709_module_clk_ops = {
53#ifdef CLOCK_MODE_0_1_2_7
54 .init = set_bus_parent,
55#endif
56 .recalc = module_clk_recalc, 46 .recalc = module_clk_recalc,
57}; 47};
58 48
59static unsigned long bus_clk_recalc(struct clk *clk) 49static unsigned long bus_clk_recalc(struct clk *clk)
60{ 50{
61 int frqcr = ctrl_inw(FRQCR); 51 int frqcr = __raw_readw(FRQCR);
62 int idx = (frqcr & 0x0080) ? 52 int idx = (frqcr & 0x0080) ?
63 ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4) : 1; 53 ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4) : 1;
64 54
@@ -71,14 +61,13 @@ static struct clk_ops sh7709_bus_clk_ops = {
71 61
72static unsigned long cpu_clk_recalc(struct clk *clk) 62static unsigned long cpu_clk_recalc(struct clk *clk)
73{ 63{
74 int frqcr = ctrl_inw(FRQCR); 64 int frqcr = __raw_readw(FRQCR);
75 int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); 65 int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
76 66
77 return clk->parent->rate / ifc_divisors[idx]; 67 return clk->parent->rate / ifc_divisors[idx];
78} 68}
79 69
80static struct clk_ops sh7709_cpu_clk_ops = { 70static struct clk_ops sh7709_cpu_clk_ops = {
81 .init = set_bus_parent,
82 .recalc = cpu_clk_recalc, 71 .recalc = cpu_clk_recalc,
83}; 72};
84 73
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7710.c b/arch/sh/kernel/cpu/sh3/clock-sh7710.c
index 030a58ba18a5..29a87d8946a4 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7710.c
@@ -26,7 +26,7 @@ static int md_table[] = { 1, 2, 3, 4, 6, 8, 12 };
26 26
27static void master_clk_init(struct clk *clk) 27static void master_clk_init(struct clk *clk)
28{ 28{
29 clk->rate *= md_table[ctrl_inw(FRQCR) & 0x0007]; 29 clk->rate *= md_table[__raw_readw(FRQCR) & 0x0007];
30} 30}
31 31
32static struct clk_ops sh7710_master_clk_ops = { 32static struct clk_ops sh7710_master_clk_ops = {
@@ -35,7 +35,7 @@ static struct clk_ops sh7710_master_clk_ops = {
35 35
36static unsigned long module_clk_recalc(struct clk *clk) 36static unsigned long module_clk_recalc(struct clk *clk)
37{ 37{
38 int idx = (ctrl_inw(FRQCR) & 0x0007); 38 int idx = (__raw_readw(FRQCR) & 0x0007);
39 return clk->parent->rate / md_table[idx]; 39 return clk->parent->rate / md_table[idx];
40} 40}
41 41
@@ -45,7 +45,7 @@ static struct clk_ops sh7710_module_clk_ops = {
45 45
46static unsigned long bus_clk_recalc(struct clk *clk) 46static unsigned long bus_clk_recalc(struct clk *clk)
47{ 47{
48 int idx = (ctrl_inw(FRQCR) & 0x0700) >> 8; 48 int idx = (__raw_readw(FRQCR) & 0x0700) >> 8;
49 return clk->parent->rate / md_table[idx]; 49 return clk->parent->rate / md_table[idx];
50} 50}
51 51
@@ -55,7 +55,7 @@ static struct clk_ops sh7710_bus_clk_ops = {
55 55
56static unsigned long cpu_clk_recalc(struct clk *clk) 56static unsigned long cpu_clk_recalc(struct clk *clk)
57{ 57{
58 int idx = (ctrl_inw(FRQCR) & 0x0070) >> 4; 58 int idx = (__raw_readw(FRQCR) & 0x0070) >> 4;
59 return clk->parent->rate / md_table[idx]; 59 return clk->parent->rate / md_table[idx];
60} 60}
61 61
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
index 6428ee6c77ed..b0d0c5203996 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7712.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
@@ -23,7 +23,7 @@ static int divisors[] = { 1, 2, 3, 4, 6 };
23 23
24static void master_clk_init(struct clk *clk) 24static void master_clk_init(struct clk *clk)
25{ 25{
26 int frqcr = ctrl_inw(FRQCR); 26 int frqcr = __raw_readw(FRQCR);
27 int idx = (frqcr & 0x0300) >> 8; 27 int idx = (frqcr & 0x0300) >> 8;
28 28
29 clk->rate *= multipliers[idx]; 29 clk->rate *= multipliers[idx];
@@ -35,7 +35,7 @@ static struct clk_ops sh7712_master_clk_ops = {
35 35
36static unsigned long module_clk_recalc(struct clk *clk) 36static unsigned long module_clk_recalc(struct clk *clk)
37{ 37{
38 int frqcr = ctrl_inw(FRQCR); 38 int frqcr = __raw_readw(FRQCR);
39 int idx = frqcr & 0x0007; 39 int idx = frqcr & 0x0007;
40 40
41 return clk->parent->rate / divisors[idx]; 41 return clk->parent->rate / divisors[idx];
@@ -47,7 +47,7 @@ static struct clk_ops sh7712_module_clk_ops = {
47 47
48static unsigned long cpu_clk_recalc(struct clk *clk) 48static unsigned long cpu_clk_recalc(struct clk *clk)
49{ 49{
50 int frqcr = ctrl_inw(FRQCR); 50 int frqcr = __raw_readw(FRQCR);
51 int idx = (frqcr & 0x0030) >> 4; 51 int idx = (frqcr & 0x0030) >> 4;
52 52
53 return clk->parent->rate / divisors[idx]; 53 return clk->parent->rate / divisors[idx];
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 3cb531f233f2..f6a389c996cb 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -53,10 +53,6 @@
53 * syscall # 53 * syscall #
54 * 54 *
55 */ 55 */
56#if defined(CONFIG_KGDB)
57NMI_VEC = 0x1c0 ! Must catch early for debounce
58#endif
59
60/* Offsets to the stack */ 56/* Offsets to the stack */
61OFF_R0 = 0 /* Return value. New ABI also arg4 */ 57OFF_R0 = 0 /* Return value. New ABI also arg4 */
62OFF_R1 = 4 /* New ABI: arg5 */ 58OFF_R1 = 4 /* New ABI: arg5 */
@@ -71,7 +67,6 @@ OFF_PC = (16*4)
71OFF_SR = (16*4+8) 67OFF_SR = (16*4+8)
72OFF_TRA = (16*4+6*4) 68OFF_TRA = (16*4+6*4)
73 69
74
75#define k0 r0 70#define k0 r0
76#define k1 r1 71#define k1 r1
77#define k2 r2 72#define k2 r2
@@ -113,34 +108,33 @@ OFF_TRA = (16*4+6*4)
113#if defined(CONFIG_MMU) 108#if defined(CONFIG_MMU)
114 .align 2 109 .align 2
115ENTRY(tlb_miss_load) 110ENTRY(tlb_miss_load)
116 bra call_dpf 111 bra call_handle_tlbmiss
117 mov #0, r5 112 mov #0, r5
118 113
119 .align 2 114 .align 2
120ENTRY(tlb_miss_store) 115ENTRY(tlb_miss_store)
121 bra call_dpf 116 bra call_handle_tlbmiss
122 mov #1, r5 117 mov #1, r5
123 118
124 .align 2 119 .align 2
125ENTRY(initial_page_write) 120ENTRY(initial_page_write)
126 bra call_dpf 121 bra call_handle_tlbmiss
127 mov #1, r5 122 mov #2, r5
128 123
129 .align 2 124 .align 2
130ENTRY(tlb_protection_violation_load) 125ENTRY(tlb_protection_violation_load)
131 bra call_dpf 126 bra call_do_page_fault
132 mov #0, r5 127 mov #0, r5
133 128
134 .align 2 129 .align 2
135ENTRY(tlb_protection_violation_store) 130ENTRY(tlb_protection_violation_store)
136 bra call_dpf 131 bra call_do_page_fault
137 mov #1, r5 132 mov #1, r5
138 133
139call_dpf: 134call_handle_tlbmiss:
140 mov.l 1f, r0 135 mov.l 1f, r0
141 mov r5, r8 136 mov r5, r8
142 mov.l @r0, r6 137 mov.l @r0, r6
143 mov r6, r9
144 mov.l 2f, r0 138 mov.l 2f, r0
145 sts pr, r10 139 sts pr, r10
146 jsr @r0 140 jsr @r0
@@ -151,16 +145,23 @@ call_dpf:
151 lds r10, pr 145 lds r10, pr
152 rts 146 rts
153 nop 147 nop
1540: mov.l 3f, r0 1480:
155 mov r9, r6
156 mov r8, r5 149 mov r8, r5
150call_do_page_fault:
151 mov.l 1f, r0
152 mov.l @r0, r6
153
154 mov.l 3f, r0
155 mov.l 4f, r1
156 mov r15, r4
157 jmp @r0 157 jmp @r0
158 mov r15, r4 158 lds r1, pr
159 159
160 .align 2 160 .align 2
1611: .long MMU_TEA 1611: .long MMU_TEA
1622: .long __do_page_fault 1622: .long handle_tlbmiss
1633: .long do_page_fault 1633: .long do_page_fault
1644: .long ret_from_exception
164 165
165 .align 2 166 .align 2
166ENTRY(address_error_load) 167ENTRY(address_error_load)
@@ -256,7 +257,7 @@ restore_all:
256 ! 257 !
257 ! Calculate new SR value 258 ! Calculate new SR value
258 mov k3, k2 ! original SR value 259 mov k3, k2 ! original SR value
259 mov #0xf0, k1 260 mov #0xfffffff0, k1
260 extu.b k1, k1 261 extu.b k1, k1
261 not k1, k1 262 not k1, k1
262 and k1, k2 ! Mask original SR value 263 and k1, k2 ! Mask original SR value
@@ -272,21 +273,12 @@ restore_all:
2726: or k0, k2 ! Set the IMASK-bits 2736: or k0, k2 ! Set the IMASK-bits
273 ldc k2, ssr 274 ldc k2, ssr
274 ! 275 !
275#if defined(CONFIG_KGDB)
276 ! Clear in_nmi
277 mov.l 6f, k0
278 mov #0, k1
279 mov.b k1, @k0
280#endif
281 mov k4, r15 276 mov k4, r15
282 rte 277 rte
283 nop 278 nop
284 279
285 .align 2 280 .align 2
2865: .long 0x00001000 ! DSP 2815: .long 0x00001000 ! DSP
287#ifdef CONFIG_KGDB
2886: .long in_nmi
289#endif
2907: .long 0x30000000 2827: .long 0x30000000
291 283
292! common exception handler 284! common exception handler
@@ -304,41 +296,8 @@ ENTRY(vbr_base)
304! 296!
305 .balign 256,0,256 297 .balign 256,0,256
306general_exception: 298general_exception:
307#ifndef CONFIG_CPU_SUBTYPE_SHX3
308 bra handle_exception 299 bra handle_exception
309 sts pr, k3 ! save original pr value in k3 300 sts pr, k3 ! save original pr value in k3
310#else
311 mov.l 1f, k4
312 mov.l @k4, k4
313
314 ! Is EXPEVT larger than 0x800?
315 mov #0x8, k0
316 shll8 k0
317 cmp/hs k0, k4
318 bf 0f
319
320 ! then add 0x580 (k2 is 0xd80 or 0xda0)
321 mov #0x58, k0
322 shll2 k0
323 shll2 k0
324 add k0, k4
3250:
326 ! Setup stack and save DSP context (k0 contains original r15 on return)
327 bsr prepare_stack
328 nop
329
330 ! Save registers / Switch to bank 0
331 mov k4, k2 ! keep vector in k2
332 mov.l 1f, k4 ! SR bits to clear in k4
333 bsr save_regs ! needs original pr value in k3
334 nop
335
336 bra handle_exception_special
337 nop
338
339 .align 2
3401: .long EXPEVT
341#endif
342 301
343! prepare_stack() 302! prepare_stack()
344! - roll back gRB 303! - roll back gRB
@@ -405,6 +364,8 @@ handle_exception:
405 mov.l @k2, k2 ! read out vector and keep in k2 364 mov.l @k2, k2 ! read out vector and keep in k2
406 365
407handle_exception_special: 366handle_exception_special:
367 setup_frame_reg
368
408 ! Setup return address and jump to exception handler 369 ! Setup return address and jump to exception handler
409 mov.l 7f, r9 ! fetch return address 370 mov.l 7f, r9 ! fetch return address
410 stc r2_bank, r0 ! k2 (vector) 371 stc r2_bank, r0 ! k2 (vector)
@@ -478,23 +439,6 @@ ENTRY(save_low_regs)
478! 439!
479 .balign 512,0,512 440 .balign 512,0,512
480ENTRY(handle_interrupt) 441ENTRY(handle_interrupt)
481#if defined(CONFIG_KGDB)
482 mov.l 2f, k2
483 ! Debounce (filter nested NMI)
484 mov.l @k2, k0
485 mov.l 9f, k1
486 cmp/eq k1, k0
487 bf 11f
488 mov.l 10f, k1
489 tas.b @k1
490 bt 11f
491 rte
492 nop
493 .align 2
4949: .long NMI_VEC
49510: .long in_nmi
49611:
497#endif /* defined(CONFIG_KGDB) */
498 sts pr, k3 ! save original pr value in k3 442 sts pr, k3 ! save original pr value in k3
499 mova exception_data, k0 443 mova exception_data, k0
500 444
@@ -507,13 +451,49 @@ ENTRY(handle_interrupt)
507 bsr save_regs ! needs original pr value in k3 451 bsr save_regs ! needs original pr value in k3
508 mov #-1, k2 ! default vector kept in k2 452 mov #-1, k2 ! default vector kept in k2
509 453
454 setup_frame_reg
455
456 stc sr, r0 ! get status register
457 shlr2 r0
458 and #0x3c, r0
459 cmp/eq #0x3c, r0
460 bf 9f
461 TRACE_IRQS_OFF
4629:
463
510 ! Setup return address and jump to do_IRQ 464 ! Setup return address and jump to do_IRQ
511 mov.l 4f, r9 ! fetch return address 465 mov.l 4f, r9 ! fetch return address
512 lds r9, pr ! put return address in pr 466 lds r9, pr ! put return address in pr
513 mov.l 2f, r4 467 mov.l 2f, r4
514 mov.l 3f, r9 468 mov.l 3f, r9
515 mov.l @r4, r4 ! pass INTEVT vector as arg0 469 mov.l @r4, r4 ! pass INTEVT vector as arg0
470
471 shlr2 r4
472 shlr r4
473 mov r4, r0 ! save vector->jmp table offset for later
474
475 shlr2 r4 ! vector to IRQ# conversion
476 add #-0x10, r4
477
478 cmp/pz r4 ! is it a valid IRQ?
479 bt 10f
480
481 /*
482 * We got here as a result of taking the INTEVT path for something
483 * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
484 * path and special case the event dispatch instead. This is the
485 * expected path for the NMI (and any other brilliantly implemented
486 * exception), which effectively wants regular exception dispatch
487 * but is unfortunately reported through INTEVT rather than
488 * EXPEVT. Grr.
489 */
490 mov.l 6f, r9
491 mov.l @(r0, r9), r9
516 jmp @r9 492 jmp @r9
493 mov r15, r8 ! trap handlers take saved regs in r8
494
49510:
496 jmp @r9 ! Off to do_IRQ() we go.
517 mov r15, r5 ! pass saved registers as arg1 497 mov r15, r5 ! pass saved registers as arg1
518 498
519ENTRY(exception_none) 499ENTRY(exception_none)
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
index e5a0de39a2db..99b4d020179a 100644
--- a/arch/sh/kernel/cpu/sh3/ex.S
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -48,10 +48,8 @@ ENTRY(exception_handling_table)
48 .long system_call ! Unconditional Trap /* 160 */ 48 .long system_call ! Unconditional Trap /* 160 */
49 .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ 49 .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
50 .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ 50 .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
51ENTRY(nmi_slot) 51 .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger
52 .long kgdb_handle_exception /* 1C0 */ ! Allow trap to debugger 52 .long breakpoint_trap_handler /* 1E0 */
53ENTRY(user_break_point_trap)
54 .long break_point_trap /* 1E0 */
55 53
56 /* 54 /*
57 * Pad the remainder of the table out, exceptions residing in far 55 * Pad the remainder of the table out, exceptions residing in far
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index 10f2a760c5ee..bf23c322e164 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -16,7 +16,7 @@
16#include <asm/cache.h> 16#include <asm/cache.h>
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19int __uses_jump_to_uncached detect_cpu_and_cache_system(void) 19void __cpuinit cpu_probe(void)
20{ 20{
21 unsigned long addr0, addr1, data0, data1, data2, data3; 21 unsigned long addr0, addr1, data0, data1, data2, data3;
22 22
@@ -30,23 +30,23 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
30 addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12); 30 addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);
31 31
32 /* First, write back & invalidate */ 32 /* First, write back & invalidate */
33 data0 = ctrl_inl(addr0); 33 data0 = __raw_readl(addr0);
34 ctrl_outl(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0); 34 __raw_writel(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
35 data1 = ctrl_inl(addr1); 35 data1 = __raw_readl(addr1);
36 ctrl_outl(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1); 36 __raw_writel(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);
37 37
38 /* Next, check if there's shadow or not */ 38 /* Next, check if there's shadow or not */
39 data0 = ctrl_inl(addr0); 39 data0 = __raw_readl(addr0);
40 data0 ^= SH_CACHE_VALID; 40 data0 ^= SH_CACHE_VALID;
41 ctrl_outl(data0, addr0); 41 __raw_writel(data0, addr0);
42 data1 = ctrl_inl(addr1); 42 data1 = __raw_readl(addr1);
43 data2 = data1 ^ SH_CACHE_VALID; 43 data2 = data1 ^ SH_CACHE_VALID;
44 ctrl_outl(data2, addr1); 44 __raw_writel(data2, addr1);
45 data3 = ctrl_inl(addr0); 45 data3 = __raw_readl(addr0);
46 46
47 /* Lastly, invaliate them. */ 47 /* Lastly, invaliate them. */
48 ctrl_outl(data0&~SH_CACHE_VALID, addr0); 48 __raw_writel(data0&~SH_CACHE_VALID, addr0);
49 ctrl_outl(data2&~SH_CACHE_VALID, addr1); 49 __raw_writel(data2&~SH_CACHE_VALID, addr1);
50 50
51 back_to_cached(); 51 back_to_cached();
52 52
@@ -94,9 +94,9 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
94 boot_cpu_data.dcache.way_incr = (1 << 13); 94 boot_cpu_data.dcache.way_incr = (1 << 13);
95 boot_cpu_data.dcache.entry_mask = 0x1ff0; 95 boot_cpu_data.dcache.entry_mask = 0x1ff0;
96 boot_cpu_data.dcache.sets = 512; 96 boot_cpu_data.dcache.sets = 512;
97 ctrl_outl(CCR_CACHE_32KB, CCR3_REG); 97 __raw_writel(CCR_CACHE_32KB, CCR3_REG);
98#else 98#else
99 ctrl_outl(CCR_CACHE_16KB, CCR3_REG); 99 __raw_writel(CCR_CACHE_16KB, CCR3_REG);
100#endif 100#endif
101#endif 101#endif
102 } 102 }
@@ -107,5 +107,5 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
107 boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED; 107 boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
108 boot_cpu_data.icache = boot_cpu_data.dcache; 108 boot_cpu_data.icache = boot_cpu_data.dcache;
109 109
110 return 0; 110 boot_cpu_data.family = CPU_FAMILY_SH3;
111} 111}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh3.c b/arch/sh/kernel/cpu/sh3/setup-sh3.c
index c98846857855..53be70b98116 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh3.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh3.c
@@ -58,7 +58,7 @@ static DECLARE_INTC_DESC_ACK(intc_desc_irq45, "sh3-irq45",
58void __init plat_irq_setup_pins(int mode) 58void __init plat_irq_setup_pins(int mode)
59{ 59{
60 if (mode == IRQ_MODE_IRQ) { 60 if (mode == IRQ_MODE_IRQ) {
61 ctrl_outw(ctrl_inw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1); 61 __raw_writew(__raw_readw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1);
62 register_intc_controller(&intc_desc_irq0123); 62 register_intc_controller(&intc_desc_irq0123);
63 return; 63 return;
64 } 64 }
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index 27d03d836056..cd2e702feb7e 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -67,32 +67,38 @@ static struct intc_prio_reg prio_registers[] __initdata = {
67static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL, 67static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL,
68 NULL, prio_registers, NULL); 68 NULL, prio_registers, NULL);
69 69
70static struct plat_sci_port sci_platform_data[] = { 70static struct plat_sci_port scif0_platform_data = {
71 { 71 .mapbase = 0xa4410000,
72 .mapbase = 0xa4410000, 72 .flags = UPF_BOOT_AUTOCONF,
73 .flags = UPF_BOOT_AUTOCONF, 73 .scscr = SCSCR_TIE | SCSCR_RIE | SCSCR_TE |
74 .scscr = SCSCR_TIE | SCSCR_RIE | SCSCR_TE | 74 SCSCR_RE | SCSCR_CKE1 | SCSCR_CKE0,
75 SCSCR_RE | SCSCR_CKE1 | SCSCR_CKE0, 75 .scbrr_algo_id = SCBRR_ALGO_4,
76 .scbrr_algo_id = SCBRR_ALGO_4, 76 .type = PORT_SCIF,
77 .type = PORT_SCIF, 77 .irqs = { 56, 56, 56 },
78 .irqs = { 56, 56, 56 }, 78};
79 }, { 79
80 .mapbase = 0xa4400000, 80static struct platform_device scif0_device = {
81 .flags = UPF_BOOT_AUTOCONF,
82 .scscr = SCSCR_TIE | SCSCR_RIE | SCSCR_TE | SCSCR_RE,
83 .scbrr_algo_id = SCBRR_ALGO_4,
84 .type = PORT_SCIF,
85 .irqs = { 52, 52, 52 },
86 }, {
87 .flags = 0,
88 }
89};
90
91static struct platform_device sci_device = {
92 .name = "sh-sci", 81 .name = "sh-sci",
93 .id = -1, 82 .id = 0,
83 .dev = {
84 .platform_data = &scif0_platform_data,
85 },
86};
87
88static struct plat_sci_port scif1_platform_data = {
89 .mapbase = 0xa4400000,
90 .flags = UPF_BOOT_AUTOCONF,
91 .scscr = SCSCR_TIE | SCSCR_RIE | SCSCR_TE | SCSCR_RE,
92 .scbrr_algo_id = SCBRR_ALGO_4,
93 .type = PORT_SCIF,
94 .irqs = { 52, 52, 52 },
95};
96
97static struct platform_device scif1_device = {
98 .name = "sh-sci",
99 .id = 1,
94 .dev = { 100 .dev = {
95 .platform_data = sci_platform_data, 101 .platform_data = &scif1_platform_data,
96 }, 102 },
97}; 103};
98 104
@@ -123,16 +129,13 @@ static struct platform_device rtc_device = {
123}; 129};
124 130
125static struct sh_timer_config tmu0_platform_data = { 131static struct sh_timer_config tmu0_platform_data = {
126 .name = "TMU0",
127 .channel_offset = 0x02, 132 .channel_offset = 0x02,
128 .timer_bit = 0, 133 .timer_bit = 0,
129 .clk = "peripheral_clk",
130 .clockevent_rating = 200, 134 .clockevent_rating = 200,
131}; 135};
132 136
133static struct resource tmu0_resources[] = { 137static struct resource tmu0_resources[] = {
134 [0] = { 138 [0] = {
135 .name = "TMU0",
136 .start = 0xfffffe94, 139 .start = 0xfffffe94,
137 .end = 0xfffffe9f, 140 .end = 0xfffffe9f,
138 .flags = IORESOURCE_MEM, 141 .flags = IORESOURCE_MEM,
@@ -154,16 +157,13 @@ static struct platform_device tmu0_device = {
154}; 157};
155 158
156static struct sh_timer_config tmu1_platform_data = { 159static struct sh_timer_config tmu1_platform_data = {
157 .name = "TMU1",
158 .channel_offset = 0xe, 160 .channel_offset = 0xe,
159 .timer_bit = 1, 161 .timer_bit = 1,
160 .clk = "peripheral_clk",
161 .clocksource_rating = 200, 162 .clocksource_rating = 200,
162}; 163};
163 164
164static struct resource tmu1_resources[] = { 165static struct resource tmu1_resources[] = {
165 [0] = { 166 [0] = {
166 .name = "TMU1",
167 .start = 0xfffffea0, 167 .start = 0xfffffea0,
168 .end = 0xfffffeab, 168 .end = 0xfffffeab,
169 .flags = IORESOURCE_MEM, 169 .flags = IORESOURCE_MEM,
@@ -185,15 +185,12 @@ static struct platform_device tmu1_device = {
185}; 185};
186 186
187static struct sh_timer_config tmu2_platform_data = { 187static struct sh_timer_config tmu2_platform_data = {
188 .name = "TMU2",
189 .channel_offset = 0x1a, 188 .channel_offset = 0x1a,
190 .timer_bit = 2, 189 .timer_bit = 2,
191 .clk = "peripheral_clk",
192}; 190};
193 191
194static struct resource tmu2_resources[] = { 192static struct resource tmu2_resources[] = {
195 [0] = { 193 [0] = {
196 .name = "TMU2",
197 .start = 0xfffffeac, 194 .start = 0xfffffeac,
198 .end = 0xfffffebb, 195 .end = 0xfffffebb,
199 .flags = IORESOURCE_MEM, 196 .flags = IORESOURCE_MEM,
@@ -215,10 +212,11 @@ static struct platform_device tmu2_device = {
215}; 212};
216 213
217static struct platform_device *sh7705_devices[] __initdata = { 214static struct platform_device *sh7705_devices[] __initdata = {
215 &scif0_device,
216 &scif1_device,
218 &tmu0_device, 217 &tmu0_device,
219 &tmu1_device, 218 &tmu1_device,
220 &tmu2_device, 219 &tmu2_device,
221 &sci_device,
222 &rtc_device, 220 &rtc_device,
223}; 221};
224 222
@@ -227,9 +225,11 @@ static int __init sh7705_devices_setup(void)
227 return platform_add_devices(sh7705_devices, 225 return platform_add_devices(sh7705_devices,
228 ARRAY_SIZE(sh7705_devices)); 226 ARRAY_SIZE(sh7705_devices));
229} 227}
230__initcall(sh7705_devices_setup); 228arch_initcall(sh7705_devices_setup);
231 229
232static struct platform_device *sh7705_early_devices[] __initdata = { 230static struct platform_device *sh7705_early_devices[] __initdata = {
231 &scif0_device,
232 &scif1_device,
233 &tmu0_device, 233 &tmu0_device,
234 &tmu1_device, 234 &tmu1_device,
235 &tmu2_device, 235 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 83c9a5a39685..4551ad647c2c 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -106,62 +106,70 @@ static struct platform_device rtc_device = {
106 .resource = rtc_resources, 106 .resource = rtc_resources,
107}; 107};
108 108
109static struct plat_sci_port sci_platform_data[] = { 109static struct plat_sci_port scif0_platform_data = {
110 { 110 .mapbase = 0xfffffe80,
111 .mapbase = 0xfffffe80, 111 .flags = UPF_BOOT_AUTOCONF,
112 .flags = UPF_BOOT_AUTOCONF, 112 .scscr = SCSCR_TE | SCSCR_RE,
113 .scscr = SCSCR_TE | SCSCR_RE, 113 .scbrr_algo_id = SCBRR_ALGO_2,
114 .scbrr_algo_id = SCBRR_ALGO_2, 114 .type = PORT_SCI,
115 .type = PORT_SCI, 115 .irqs = { 23, 23, 23, 0 },
116 .irqs = { 23, 23, 23, 0 }, 116};
117
118static struct platform_device scif0_device = {
119 .name = "sh-sci",
120 .id = 0,
121 .dev = {
122 .platform_data = &scif0_platform_data,
117 }, 123 },
124};
118#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ 125#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
119 defined(CONFIG_CPU_SUBTYPE_SH7707) || \ 126 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
120 defined(CONFIG_CPU_SUBTYPE_SH7709) 127 defined(CONFIG_CPU_SUBTYPE_SH7709)
121 { 128static struct plat_sci_port scif1_platform_data = {
122 .mapbase = 0xa4000150, 129 .mapbase = 0xa4000150,
123 .flags = UPF_BOOT_AUTOCONF, 130 .flags = UPF_BOOT_AUTOCONF,
124 .scscr = SCSCR_TE | SCSCR_RE, 131 .scscr = SCSCR_TE | SCSCR_RE,
125 .scbrr_algo_id = SCBRR_ALGO_2, 132 .scbrr_algo_id = SCBRR_ALGO_2,
126 .type = PORT_SCIF, 133 .type = PORT_SCIF,
127 .irqs = { 56, 56, 56, 56 }, 134 .irqs = { 56, 56, 56, 56 },
135};
136
137static struct platform_device scif1_device = {
138 .name = "sh-sci",
139 .id = 1,
140 .dev = {
141 .platform_data = &scif1_platform_data,
128 }, 142 },
143};
129#endif 144#endif
130#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \ 145#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
131 defined(CONFIG_CPU_SUBTYPE_SH7709) 146 defined(CONFIG_CPU_SUBTYPE_SH7709)
132 { 147static struct plat_sci_port scif2_platform_data = {
133 .mapbase = 0xa4000140, 148 .mapbase = 0xa4000140,
134 .flags = UPF_BOOT_AUTOCONF, 149 .flags = UPF_BOOT_AUTOCONF,
135 .scscr = SCSCR_TE | SCSCR_RE, 150 .scscr = SCSCR_TE | SCSCR_RE,
136 .scbrr_algo_id = SCBRR_ALGO_2, 151 .scbrr_algo_id = SCBRR_ALGO_2,
137 .type = PORT_IRDA, 152 .type = PORT_IRDA,
138 .irqs = { 52, 52, 52, 52 }, 153 .irqs = { 52, 52, 52, 52 },
139 },
140#endif
141 {
142 .flags = 0,
143 }
144}; 154};
145 155
146static struct platform_device sci_device = { 156static struct platform_device scif2_device = {
147 .name = "sh-sci", 157 .name = "sh-sci",
148 .id = -1, 158 .id = 2,
149 .dev = { 159 .dev = {
150 .platform_data = sci_platform_data, 160 .platform_data = &scif2_platform_data,
151 }, 161 },
152}; 162};
163#endif
153 164
154static struct sh_timer_config tmu0_platform_data = { 165static struct sh_timer_config tmu0_platform_data = {
155 .name = "TMU0",
156 .channel_offset = 0x02, 166 .channel_offset = 0x02,
157 .timer_bit = 0, 167 .timer_bit = 0,
158 .clk = "peripheral_clk",
159 .clockevent_rating = 200, 168 .clockevent_rating = 200,
160}; 169};
161 170
162static struct resource tmu0_resources[] = { 171static struct resource tmu0_resources[] = {
163 [0] = { 172 [0] = {
164 .name = "TMU0",
165 .start = 0xfffffe94, 173 .start = 0xfffffe94,
166 .end = 0xfffffe9f, 174 .end = 0xfffffe9f,
167 .flags = IORESOURCE_MEM, 175 .flags = IORESOURCE_MEM,
@@ -183,16 +191,13 @@ static struct platform_device tmu0_device = {
183}; 191};
184 192
185static struct sh_timer_config tmu1_platform_data = { 193static struct sh_timer_config tmu1_platform_data = {
186 .name = "TMU1",
187 .channel_offset = 0xe, 194 .channel_offset = 0xe,
188 .timer_bit = 1, 195 .timer_bit = 1,
189 .clk = "peripheral_clk",
190 .clocksource_rating = 200, 196 .clocksource_rating = 200,
191}; 197};
192 198
193static struct resource tmu1_resources[] = { 199static struct resource tmu1_resources[] = {
194 [0] = { 200 [0] = {
195 .name = "TMU1",
196 .start = 0xfffffea0, 201 .start = 0xfffffea0,
197 .end = 0xfffffeab, 202 .end = 0xfffffeab,
198 .flags = IORESOURCE_MEM, 203 .flags = IORESOURCE_MEM,
@@ -214,15 +219,12 @@ static struct platform_device tmu1_device = {
214}; 219};
215 220
216static struct sh_timer_config tmu2_platform_data = { 221static struct sh_timer_config tmu2_platform_data = {
217 .name = "TMU2",
218 .channel_offset = 0x1a, 222 .channel_offset = 0x1a,
219 .timer_bit = 2, 223 .timer_bit = 2,
220 .clk = "peripheral_clk",
221}; 224};
222 225
223static struct resource tmu2_resources[] = { 226static struct resource tmu2_resources[] = {
224 [0] = { 227 [0] = {
225 .name = "TMU2",
226 .start = 0xfffffeac, 228 .start = 0xfffffeac,
227 .end = 0xfffffebb, 229 .end = 0xfffffebb,
228 .flags = IORESOURCE_MEM, 230 .flags = IORESOURCE_MEM,
@@ -244,10 +246,19 @@ static struct platform_device tmu2_device = {
244}; 246};
245 247
246static struct platform_device *sh770x_devices[] __initdata = { 248static struct platform_device *sh770x_devices[] __initdata = {
249 &scif0_device,
250#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
251 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
252 defined(CONFIG_CPU_SUBTYPE_SH7709)
253 &scif1_device,
254#endif
255#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
256 defined(CONFIG_CPU_SUBTYPE_SH7709)
257 &scif2_device,
258#endif
247 &tmu0_device, 259 &tmu0_device,
248 &tmu1_device, 260 &tmu1_device,
249 &tmu2_device, 261 &tmu2_device,
250 &sci_device,
251 &rtc_device, 262 &rtc_device,
252}; 263};
253 264
@@ -256,9 +267,19 @@ static int __init sh770x_devices_setup(void)
256 return platform_add_devices(sh770x_devices, 267 return platform_add_devices(sh770x_devices,
257 ARRAY_SIZE(sh770x_devices)); 268 ARRAY_SIZE(sh770x_devices));
258} 269}
259__initcall(sh770x_devices_setup); 270arch_initcall(sh770x_devices_setup);
260 271
261static struct platform_device *sh770x_early_devices[] __initdata = { 272static struct platform_device *sh770x_early_devices[] __initdata = {
273 &scif0_device,
274#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
275 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
276 defined(CONFIG_CPU_SUBTYPE_SH7709)
277 &scif1_device,
278#endif
279#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
280 defined(CONFIG_CPU_SUBTYPE_SH7709)
281 &scif2_device,
282#endif
262 &tmu0_device, 283 &tmu0_device,
263 &tmu1_device, 284 &tmu1_device,
264 &tmu2_device, 285 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index 9a60ffd34a9f..78f6b01d42c3 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -96,48 +96,50 @@ static struct platform_device rtc_device = {
96 }, 96 },
97}; 97};
98 98
99static struct plat_sci_port sci_platform_data[] = { 99static struct plat_sci_port scif0_platform_data = {
100 { 100 .mapbase = 0xa4400000,
101 .mapbase = 0xa4400000, 101 .flags = UPF_BOOT_AUTOCONF,
102 .flags = UPF_BOOT_AUTOCONF, 102 .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE |
103 .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE | 103 SCSCR_CKE1 | SCSCR_CKE0,
104 SCSCR_CKE1 | SCSCR_CKE0, 104 .scbrr_algo_id = SCBRR_ALGO_2,
105 .scbrr_algo_id = SCBRR_ALGO_2, 105 .type = PORT_SCIF,
106 .type = PORT_SCIF, 106 .irqs = { 52, 52, 52, 52 },
107 .irqs = { 52, 52, 52, 52 }, 107};
108 }, { 108
109 .mapbase = 0xa4410000, 109static struct platform_device scif0_device = {
110 .flags = UPF_BOOT_AUTOCONF,
111 .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE |
112 SCSCR_CKE1 | SCSCR_CKE0,
113 .scbrr_algo_id = SCBRR_ALGO_2,
114 .type = PORT_SCIF,
115 .irqs = { 56, 56, 56, 56 },
116 }, {
117
118 .flags = 0,
119 }
120};
121
122static struct platform_device sci_device = {
123 .name = "sh-sci", 110 .name = "sh-sci",
124 .id = -1, 111 .id = 0,
112 .dev = {
113 .platform_data = &scif0_platform_data,
114 },
115};
116
117static struct plat_sci_port scif1_platform_data = {
118 .mapbase = 0xa4410000,
119 .flags = UPF_BOOT_AUTOCONF,
120 .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE |
121 SCSCR_CKE1 | SCSCR_CKE0,
122 .scbrr_algo_id = SCBRR_ALGO_2,
123 .type = PORT_SCIF,
124 .irqs = { 56, 56, 56, 56 },
125};
126
127static struct platform_device scif1_device = {
128 .name = "sh-sci",
129 .id = 1,
125 .dev = { 130 .dev = {
126 .platform_data = sci_platform_data, 131 .platform_data = &scif1_platform_data,
127 }, 132 },
128}; 133};
129 134
130static struct sh_timer_config tmu0_platform_data = { 135static struct sh_timer_config tmu0_platform_data = {
131 .name = "TMU0",
132 .channel_offset = 0x02, 136 .channel_offset = 0x02,
133 .timer_bit = 0, 137 .timer_bit = 0,
134 .clk = "peripheral_clk",
135 .clockevent_rating = 200, 138 .clockevent_rating = 200,
136}; 139};
137 140
138static struct resource tmu0_resources[] = { 141static struct resource tmu0_resources[] = {
139 [0] = { 142 [0] = {
140 .name = "TMU0",
141 .start = 0xa412fe94, 143 .start = 0xa412fe94,
142 .end = 0xa412fe9f, 144 .end = 0xa412fe9f,
143 .flags = IORESOURCE_MEM, 145 .flags = IORESOURCE_MEM,
@@ -159,16 +161,13 @@ static struct platform_device tmu0_device = {
159}; 161};
160 162
161static struct sh_timer_config tmu1_platform_data = { 163static struct sh_timer_config tmu1_platform_data = {
162 .name = "TMU1",
163 .channel_offset = 0xe, 164 .channel_offset = 0xe,
164 .timer_bit = 1, 165 .timer_bit = 1,
165 .clk = "peripheral_clk",
166 .clocksource_rating = 200, 166 .clocksource_rating = 200,
167}; 167};
168 168
169static struct resource tmu1_resources[] = { 169static struct resource tmu1_resources[] = {
170 [0] = { 170 [0] = {
171 .name = "TMU1",
172 .start = 0xa412fea0, 171 .start = 0xa412fea0,
173 .end = 0xa412feab, 172 .end = 0xa412feab,
174 .flags = IORESOURCE_MEM, 173 .flags = IORESOURCE_MEM,
@@ -190,15 +189,12 @@ static struct platform_device tmu1_device = {
190}; 189};
191 190
192static struct sh_timer_config tmu2_platform_data = { 191static struct sh_timer_config tmu2_platform_data = {
193 .name = "TMU2",
194 .channel_offset = 0x1a, 192 .channel_offset = 0x1a,
195 .timer_bit = 2, 193 .timer_bit = 2,
196 .clk = "peripheral_clk",
197}; 194};
198 195
199static struct resource tmu2_resources[] = { 196static struct resource tmu2_resources[] = {
200 [0] = { 197 [0] = {
201 .name = "TMU2",
202 .start = 0xa412feac, 198 .start = 0xa412feac,
203 .end = 0xa412feb5, 199 .end = 0xa412feb5,
204 .flags = IORESOURCE_MEM, 200 .flags = IORESOURCE_MEM,
@@ -220,10 +216,11 @@ static struct platform_device tmu2_device = {
220}; 216};
221 217
222static struct platform_device *sh7710_devices[] __initdata = { 218static struct platform_device *sh7710_devices[] __initdata = {
219 &scif0_device,
220 &scif1_device,
223 &tmu0_device, 221 &tmu0_device,
224 &tmu1_device, 222 &tmu1_device,
225 &tmu2_device, 223 &tmu2_device,
226 &sci_device,
227 &rtc_device, 224 &rtc_device,
228}; 225};
229 226
@@ -232,9 +229,11 @@ static int __init sh7710_devices_setup(void)
232 return platform_add_devices(sh7710_devices, 229 return platform_add_devices(sh7710_devices,
233 ARRAY_SIZE(sh7710_devices)); 230 ARRAY_SIZE(sh7710_devices));
234} 231}
235__initcall(sh7710_devices_setup); 232arch_initcall(sh7710_devices_setup);
236 233
237static struct platform_device *sh7710_early_devices[] __initdata = { 234static struct platform_device *sh7710_early_devices[] __initdata = {
235 &scif0_device,
236 &scif1_device,
238 &tmu0_device, 237 &tmu0_device,
239 &tmu1_device, 238 &tmu1_device,
240 &tmu2_device, 239 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index 48d50a65db32..365b94a6fcb7 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -48,31 +48,37 @@ static struct platform_device rtc_device = {
48 }, 48 },
49}; 49};
50 50
51static struct plat_sci_port sci_platform_data[] = { 51static struct plat_sci_port scif0_platform_data = {
52 { 52 .mapbase = 0xa4430000,
53 .mapbase = 0xa4430000, 53 .flags = UPF_BOOT_AUTOCONF,
54 .flags = UPF_BOOT_AUTOCONF, 54 .scscr = SCSCR_RE | SCSCR_TE,
55 .scscr = SCSCR_RE | SCSCR_TE, 55 .scbrr_algo_id = SCBRR_ALGO_4,
56 .scbrr_algo_id = SCBRR_ALGO_4, 56 .type = PORT_SCIF,
57 .type = PORT_SCIF, 57 .irqs = { 80, 80, 80, 80 },
58 .irqs = { 80, 80, 80, 80 }, 58};
59 }, { 59
60 .mapbase = 0xa4438000, 60static struct platform_device scif0_device = {
61 .flags = UPF_BOOT_AUTOCONF,
62 .scscr = SCSCR_RE | SCSCR_TE,
63 .scbrr_algo_id = SCBRR_ALGO_4,
64 .type = PORT_SCIF,
65 .irqs = { 81, 81, 81, 81 },
66 }, {
67 .flags = 0,
68 }
69};
70
71static struct platform_device sci_device = {
72 .name = "sh-sci", 61 .name = "sh-sci",
73 .id = -1, 62 .id = 0,
63 .dev = {
64 .platform_data = &scif0_platform_data,
65 },
66};
67
68static struct plat_sci_port scif1_platform_data = {
69 .mapbase = 0xa4438000,
70 .flags = UPF_BOOT_AUTOCONF,
71 .scscr = SCSCR_RE | SCSCR_TE,
72 .scbrr_algo_id = SCBRR_ALGO_4,
73 .type = PORT_SCIF,
74 .irqs = { 81, 81, 81, 81 },
75};
76
77static struct platform_device scif1_device = {
78 .name = "sh-sci",
79 .id = 1,
74 .dev = { 80 .dev = {
75 .platform_data = sci_platform_data, 81 .platform_data = &scif1_platform_data,
76 }, 82 },
77}; 83};
78 84
@@ -128,17 +134,14 @@ static struct platform_device usbf_device = {
128}; 134};
129 135
130static struct sh_timer_config cmt0_platform_data = { 136static struct sh_timer_config cmt0_platform_data = {
131 .name = "CMT0",
132 .channel_offset = 0x10, 137 .channel_offset = 0x10,
133 .timer_bit = 0, 138 .timer_bit = 0,
134 .clk = "peripheral_clk",
135 .clockevent_rating = 125, 139 .clockevent_rating = 125,
136 .clocksource_rating = 125, 140 .clocksource_rating = 125,
137}; 141};
138 142
139static struct resource cmt0_resources[] = { 143static struct resource cmt0_resources[] = {
140 [0] = { 144 [0] = {
141 .name = "CMT0",
142 .start = 0x044a0010, 145 .start = 0x044a0010,
143 .end = 0x044a001b, 146 .end = 0x044a001b,
144 .flags = IORESOURCE_MEM, 147 .flags = IORESOURCE_MEM,
@@ -160,15 +163,12 @@ static struct platform_device cmt0_device = {
160}; 163};
161 164
162static struct sh_timer_config cmt1_platform_data = { 165static struct sh_timer_config cmt1_platform_data = {
163 .name = "CMT1",
164 .channel_offset = 0x20, 166 .channel_offset = 0x20,
165 .timer_bit = 1, 167 .timer_bit = 1,
166 .clk = "peripheral_clk",
167}; 168};
168 169
169static struct resource cmt1_resources[] = { 170static struct resource cmt1_resources[] = {
170 [0] = { 171 [0] = {
171 .name = "CMT1",
172 .start = 0x044a0020, 172 .start = 0x044a0020,
173 .end = 0x044a002b, 173 .end = 0x044a002b,
174 .flags = IORESOURCE_MEM, 174 .flags = IORESOURCE_MEM,
@@ -190,15 +190,12 @@ static struct platform_device cmt1_device = {
190}; 190};
191 191
192static struct sh_timer_config cmt2_platform_data = { 192static struct sh_timer_config cmt2_platform_data = {
193 .name = "CMT2",
194 .channel_offset = 0x30, 193 .channel_offset = 0x30,
195 .timer_bit = 2, 194 .timer_bit = 2,
196 .clk = "peripheral_clk",
197}; 195};
198 196
199static struct resource cmt2_resources[] = { 197static struct resource cmt2_resources[] = {
200 [0] = { 198 [0] = {
201 .name = "CMT2",
202 .start = 0x044a0030, 199 .start = 0x044a0030,
203 .end = 0x044a003b, 200 .end = 0x044a003b,
204 .flags = IORESOURCE_MEM, 201 .flags = IORESOURCE_MEM,
@@ -220,15 +217,12 @@ static struct platform_device cmt2_device = {
220}; 217};
221 218
222static struct sh_timer_config cmt3_platform_data = { 219static struct sh_timer_config cmt3_platform_data = {
223 .name = "CMT3",
224 .channel_offset = 0x40, 220 .channel_offset = 0x40,
225 .timer_bit = 3, 221 .timer_bit = 3,
226 .clk = "peripheral_clk",
227}; 222};
228 223
229static struct resource cmt3_resources[] = { 224static struct resource cmt3_resources[] = {
230 [0] = { 225 [0] = {
231 .name = "CMT3",
232 .start = 0x044a0040, 226 .start = 0x044a0040,
233 .end = 0x044a004b, 227 .end = 0x044a004b,
234 .flags = IORESOURCE_MEM, 228 .flags = IORESOURCE_MEM,
@@ -250,15 +244,12 @@ static struct platform_device cmt3_device = {
250}; 244};
251 245
252static struct sh_timer_config cmt4_platform_data = { 246static struct sh_timer_config cmt4_platform_data = {
253 .name = "CMT4",
254 .channel_offset = 0x50, 247 .channel_offset = 0x50,
255 .timer_bit = 4, 248 .timer_bit = 4,
256 .clk = "peripheral_clk",
257}; 249};
258 250
259static struct resource cmt4_resources[] = { 251static struct resource cmt4_resources[] = {
260 [0] = { 252 [0] = {
261 .name = "CMT4",
262 .start = 0x044a0050, 253 .start = 0x044a0050,
263 .end = 0x044a005b, 254 .end = 0x044a005b,
264 .flags = IORESOURCE_MEM, 255 .flags = IORESOURCE_MEM,
@@ -280,16 +271,13 @@ static struct platform_device cmt4_device = {
280}; 271};
281 272
282static struct sh_timer_config tmu0_platform_data = { 273static struct sh_timer_config tmu0_platform_data = {
283 .name = "TMU0",
284 .channel_offset = 0x02, 274 .channel_offset = 0x02,
285 .timer_bit = 0, 275 .timer_bit = 0,
286 .clk = "peripheral_clk",
287 .clockevent_rating = 200, 276 .clockevent_rating = 200,
288}; 277};
289 278
290static struct resource tmu0_resources[] = { 279static struct resource tmu0_resources[] = {
291 [0] = { 280 [0] = {
292 .name = "TMU0",
293 .start = 0xa412fe94, 281 .start = 0xa412fe94,
294 .end = 0xa412fe9f, 282 .end = 0xa412fe9f,
295 .flags = IORESOURCE_MEM, 283 .flags = IORESOURCE_MEM,
@@ -311,16 +299,13 @@ static struct platform_device tmu0_device = {
311}; 299};
312 300
313static struct sh_timer_config tmu1_platform_data = { 301static struct sh_timer_config tmu1_platform_data = {
314 .name = "TMU1",
315 .channel_offset = 0xe, 302 .channel_offset = 0xe,
316 .timer_bit = 1, 303 .timer_bit = 1,
317 .clk = "peripheral_clk",
318 .clocksource_rating = 200, 304 .clocksource_rating = 200,
319}; 305};
320 306
321static struct resource tmu1_resources[] = { 307static struct resource tmu1_resources[] = {
322 [0] = { 308 [0] = {
323 .name = "TMU1",
324 .start = 0xa412fea0, 309 .start = 0xa412fea0,
325 .end = 0xa412feab, 310 .end = 0xa412feab,
326 .flags = IORESOURCE_MEM, 311 .flags = IORESOURCE_MEM,
@@ -342,15 +327,12 @@ static struct platform_device tmu1_device = {
342}; 327};
343 328
344static struct sh_timer_config tmu2_platform_data = { 329static struct sh_timer_config tmu2_platform_data = {
345 .name = "TMU2",
346 .channel_offset = 0x1a, 330 .channel_offset = 0x1a,
347 .timer_bit = 2, 331 .timer_bit = 2,
348 .clk = "peripheral_clk",
349}; 332};
350 333
351static struct resource tmu2_resources[] = { 334static struct resource tmu2_resources[] = {
352 [0] = { 335 [0] = {
353 .name = "TMU2",
354 .start = 0xa412feac, 336 .start = 0xa412feac,
355 .end = 0xa412feb5, 337 .end = 0xa412feb5,
356 .flags = IORESOURCE_MEM, 338 .flags = IORESOURCE_MEM,
@@ -372,6 +354,8 @@ static struct platform_device tmu2_device = {
372}; 354};
373 355
374static struct platform_device *sh7720_devices[] __initdata = { 356static struct platform_device *sh7720_devices[] __initdata = {
357 &scif0_device,
358 &scif1_device,
375 &cmt0_device, 359 &cmt0_device,
376 &cmt1_device, 360 &cmt1_device,
377 &cmt2_device, 361 &cmt2_device,
@@ -381,7 +365,6 @@ static struct platform_device *sh7720_devices[] __initdata = {
381 &tmu1_device, 365 &tmu1_device,
382 &tmu2_device, 366 &tmu2_device,
383 &rtc_device, 367 &rtc_device,
384 &sci_device,
385 &usb_ohci_device, 368 &usb_ohci_device,
386 &usbf_device, 369 &usbf_device,
387}; 370};
@@ -391,9 +374,11 @@ static int __init sh7720_devices_setup(void)
391 return platform_add_devices(sh7720_devices, 374 return platform_add_devices(sh7720_devices,
392 ARRAY_SIZE(sh7720_devices)); 375 ARRAY_SIZE(sh7720_devices));
393} 376}
394__initcall(sh7720_devices_setup); 377arch_initcall(sh7720_devices_setup);
395 378
396static struct platform_device *sh7720_early_devices[] __initdata = { 379static struct platform_device *sh7720_early_devices[] __initdata = {
380 &scif0_device,
381 &scif1_device,
397 &cmt0_device, 382 &cmt0_device,
398 &cmt1_device, 383 &cmt1_device,
399 &cmt2_device, 384 &cmt2_device,
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index 203b18347b83..3a1dbc709831 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -9,6 +9,11 @@ obj-$(CONFIG_HIBERNATION) += $(addprefix ../sh3/, swsusp.o)
9obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o 9obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o
10obj-$(CONFIG_SH_STORE_QUEUES) += sq.o 10obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
11 11
12# Perf events
13perf-$(CONFIG_CPU_SUBTYPE_SH7750) := perf_event.o
14perf-$(CONFIG_CPU_SUBTYPE_SH7750S) := perf_event.o
15perf-$(CONFIG_CPU_SUBTYPE_SH7091) := perf_event.o
16
12# CPU subtype setup 17# CPU subtype setup
13obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o 18obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o
14obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o 19obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o
@@ -27,4 +32,5 @@ endif
27# Additional clocks by subtype 32# Additional clocks by subtype
28clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o 33clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o
29 34
30obj-y += $(clock-y) 35obj-y += $(clock-y)
36obj-$(CONFIG_PERF_EVENTS) += $(perf-y)
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index 21421e34e7d5..3f6f8e98635c 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -12,9 +12,10 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/clkdev.h>
15#include <asm/clock.h> 17#include <asm/clock.h>
16#include <asm/freq.h> 18#include <asm/freq.h>
17#include <asm/io.h>
18 19
19#define CPG2_FRQCR3 0xfe0a0018 20#define CPG2_FRQCR3 0xfe0a0018
20 21
@@ -23,7 +24,7 @@ static int frqcr3_values[] = { 0, 1, 2, 3, 4, 5, 6 };
23 24
24static unsigned long emi_clk_recalc(struct clk *clk) 25static unsigned long emi_clk_recalc(struct clk *clk)
25{ 26{
26 int idx = ctrl_inl(CPG2_FRQCR3) & 0x0007; 27 int idx = __raw_readl(CPG2_FRQCR3) & 0x0007;
27 return clk->parent->rate / frqcr3_divisors[idx]; 28 return clk->parent->rate / frqcr3_divisors[idx];
28} 29}
29 30
@@ -45,14 +46,13 @@ static struct clk_ops sh4202_emi_clk_ops = {
45}; 46};
46 47
47static struct clk sh4202_emi_clk = { 48static struct clk sh4202_emi_clk = {
48 .name = "emi_clk",
49 .flags = CLK_ENABLE_ON_INIT, 49 .flags = CLK_ENABLE_ON_INIT,
50 .ops = &sh4202_emi_clk_ops, 50 .ops = &sh4202_emi_clk_ops,
51}; 51};
52 52
53static unsigned long femi_clk_recalc(struct clk *clk) 53static unsigned long femi_clk_recalc(struct clk *clk)
54{ 54{
55 int idx = (ctrl_inl(CPG2_FRQCR3) >> 3) & 0x0007; 55 int idx = (__raw_readl(CPG2_FRQCR3) >> 3) & 0x0007;
56 return clk->parent->rate / frqcr3_divisors[idx]; 56 return clk->parent->rate / frqcr3_divisors[idx];
57} 57}
58 58
@@ -61,7 +61,6 @@ static struct clk_ops sh4202_femi_clk_ops = {
61}; 61};
62 62
63static struct clk sh4202_femi_clk = { 63static struct clk sh4202_femi_clk = {
64 .name = "femi_clk",
65 .flags = CLK_ENABLE_ON_INIT, 64 .flags = CLK_ENABLE_ON_INIT,
66 .ops = &sh4202_femi_clk_ops, 65 .ops = &sh4202_femi_clk_ops,
67}; 66};
@@ -82,8 +81,7 @@ static void shoc_clk_init(struct clk *clk)
82 for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) { 81 for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) {
83 int divisor = frqcr3_divisors[i]; 82 int divisor = frqcr3_divisors[i];
84 83
85 if (clk->ops->set_rate(clk, clk->parent->rate / 84 if (clk->ops->set_rate(clk, clk->parent->rate / divisor) == 0)
86 divisor, 0) == 0)
87 break; 85 break;
88 } 86 }
89 87
@@ -92,7 +90,7 @@ static void shoc_clk_init(struct clk *clk)
92 90
93static unsigned long shoc_clk_recalc(struct clk *clk) 91static unsigned long shoc_clk_recalc(struct clk *clk)
94{ 92{
95 int idx = (ctrl_inl(CPG2_FRQCR3) >> 6) & 0x0007; 93 int idx = (__raw_readl(CPG2_FRQCR3) >> 6) & 0x0007;
96 return clk->parent->rate / frqcr3_divisors[idx]; 94 return clk->parent->rate / frqcr3_divisors[idx];
97} 95}
98 96
@@ -111,7 +109,7 @@ static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
111 return 0; 109 return 0;
112} 110}
113 111
114static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id) 112static int shoc_clk_set_rate(struct clk *clk, unsigned long rate)
115{ 113{
116 unsigned long frqcr3; 114 unsigned long frqcr3;
117 unsigned int tmp; 115 unsigned int tmp;
@@ -122,10 +120,10 @@ static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id)
122 120
123 tmp = frqcr3_lookup(clk, rate); 121 tmp = frqcr3_lookup(clk, rate);
124 122
125 frqcr3 = ctrl_inl(CPG2_FRQCR3); 123 frqcr3 = __raw_readl(CPG2_FRQCR3);
126 frqcr3 &= ~(0x0007 << 6); 124 frqcr3 &= ~(0x0007 << 6);
127 frqcr3 |= tmp << 6; 125 frqcr3 |= tmp << 6;
128 ctrl_outl(frqcr3, CPG2_FRQCR3); 126 __raw_writel(frqcr3, CPG2_FRQCR3);
129 127
130 clk->rate = clk->parent->rate / frqcr3_divisors[tmp]; 128 clk->rate = clk->parent->rate / frqcr3_divisors[tmp];
131 129
@@ -139,7 +137,6 @@ static struct clk_ops sh4202_shoc_clk_ops = {
139}; 137};
140 138
141static struct clk sh4202_shoc_clk = { 139static struct clk sh4202_shoc_clk = {
142 .name = "shoc_clk",
143 .flags = CLK_ENABLE_ON_INIT, 140 .flags = CLK_ENABLE_ON_INIT,
144 .ops = &sh4202_shoc_clk_ops, 141 .ops = &sh4202_shoc_clk_ops,
145}; 142};
@@ -150,6 +147,15 @@ static struct clk *sh4202_onchip_clocks[] = {
150 &sh4202_shoc_clk, 147 &sh4202_shoc_clk,
151}; 148};
152 149
150#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
151
152static struct clk_lookup lookups[] = {
153 /* main clocks */
154 CLKDEV_CON_ID("emi_clk", &sh4202_emi_clk),
155 CLKDEV_CON_ID("femi_clk", &sh4202_femi_clk),
156 CLKDEV_CON_ID("shoc_clk", &sh4202_shoc_clk),
157};
158
153int __init arch_clk_init(void) 159int __init arch_clk_init(void)
154{ 160{
155 struct clk *clk; 161 struct clk *clk;
@@ -167,5 +173,7 @@ int __init arch_clk_init(void)
167 173
168 clk_put(clk); 174 clk_put(clk);
169 175
176 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
177
170 return ret; 178 return ret;
171} 179}
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4.c b/arch/sh/kernel/cpu/sh4/clock-sh4.c
index 73294d9cd049..5add75c1f539 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4.c
@@ -28,7 +28,7 @@ static int pfc_divisors[] = { 2, 3, 4, 6, 8, 2, 2, 2 };
28 28
29static void master_clk_init(struct clk *clk) 29static void master_clk_init(struct clk *clk)
30{ 30{
31 clk->rate *= pfc_divisors[ctrl_inw(FRQCR) & 0x0007]; 31 clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0007];
32} 32}
33 33
34static struct clk_ops sh4_master_clk_ops = { 34static struct clk_ops sh4_master_clk_ops = {
@@ -37,7 +37,7 @@ static struct clk_ops sh4_master_clk_ops = {
37 37
38static unsigned long module_clk_recalc(struct clk *clk) 38static unsigned long module_clk_recalc(struct clk *clk)
39{ 39{
40 int idx = (ctrl_inw(FRQCR) & 0x0007); 40 int idx = (__raw_readw(FRQCR) & 0x0007);
41 return clk->parent->rate / pfc_divisors[idx]; 41 return clk->parent->rate / pfc_divisors[idx];
42} 42}
43 43
@@ -47,7 +47,7 @@ static struct clk_ops sh4_module_clk_ops = {
47 47
48static unsigned long bus_clk_recalc(struct clk *clk) 48static unsigned long bus_clk_recalc(struct clk *clk)
49{ 49{
50 int idx = (ctrl_inw(FRQCR) >> 3) & 0x0007; 50 int idx = (__raw_readw(FRQCR) >> 3) & 0x0007;
51 return clk->parent->rate / bfc_divisors[idx]; 51 return clk->parent->rate / bfc_divisors[idx];
52} 52}
53 53
@@ -57,7 +57,7 @@ static struct clk_ops sh4_bus_clk_ops = {
57 57
58static unsigned long cpu_clk_recalc(struct clk *clk) 58static unsigned long cpu_clk_recalc(struct clk *clk)
59{ 59{
60 int idx = (ctrl_inw(FRQCR) >> 6) & 0x0007; 60 int idx = (__raw_readw(FRQCR) >> 6) & 0x0007;
61 return clk->parent->rate / ifc_divisors[idx]; 61 return clk->parent->rate / ifc_divisors[idx];
62} 62}
63 63
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index e3ea5411da6d..447482d7f65e 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -41,13 +41,11 @@ static unsigned int fpu_exception_flags;
41 41
42/* 42/*
43 * Save FPU registers onto task structure. 43 * Save FPU registers onto task structure.
44 * Assume called with FPU enabled (SR.FD=0).
45 */ 44 */
46void save_fpu(struct task_struct *tsk, struct pt_regs *regs) 45void save_fpu(struct task_struct *tsk)
47{ 46{
48 unsigned long dummy; 47 unsigned long dummy;
49 48
50 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
51 enable_fpu(); 49 enable_fpu();
52 asm volatile ("sts.l fpul, @-%0\n\t" 50 asm volatile ("sts.l fpul, @-%0\n\t"
53 "sts.l fpscr, @-%0\n\t" 51 "sts.l fpscr, @-%0\n\t"
@@ -87,15 +85,14 @@ void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
87 "fmov.s fr1, @-%0\n\t" 85 "fmov.s fr1, @-%0\n\t"
88 "fmov.s fr0, @-%0\n\t" 86 "fmov.s fr0, @-%0\n\t"
89 "lds %3, fpscr\n\t":"=r" (dummy) 87 "lds %3, fpscr\n\t":"=r" (dummy)
90 :"0"((char *)(&tsk->thread.fpu.hard.status)), 88 :"0"((char *)(&tsk->thread.xstate->hardfpu.status)),
91 "r"(FPSCR_RCHG), "r"(FPSCR_INIT) 89 "r"(FPSCR_RCHG), "r"(FPSCR_INIT)
92 :"memory"); 90 :"memory");
93 91
94 disable_fpu(); 92 disable_fpu();
95 release_fpu(regs);
96} 93}
97 94
98static void restore_fpu(struct task_struct *tsk) 95void restore_fpu(struct task_struct *tsk)
99{ 96{
100 unsigned long dummy; 97 unsigned long dummy;
101 98
@@ -138,62 +135,11 @@ static void restore_fpu(struct task_struct *tsk)
138 "lds.l @%0+, fpscr\n\t" 135 "lds.l @%0+, fpscr\n\t"
139 "lds.l @%0+, fpul\n\t" 136 "lds.l @%0+, fpul\n\t"
140 :"=r" (dummy) 137 :"=r" (dummy)
141 :"0"(&tsk->thread.fpu), "r"(FPSCR_RCHG) 138 :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
142 :"memory"); 139 :"memory");
143 disable_fpu(); 140 disable_fpu();
144} 141}
145 142
146/*
147 * Load the FPU with signalling NANS. This bit pattern we're using
148 * has the property that no matter wether considered as single or as
149 * double precision represents signaling NANS.
150 */
151
152static void fpu_init(void)
153{
154 enable_fpu();
155 asm volatile ( "lds %0, fpul\n\t"
156 "lds %1, fpscr\n\t"
157 "fsts fpul, fr0\n\t"
158 "fsts fpul, fr1\n\t"
159 "fsts fpul, fr2\n\t"
160 "fsts fpul, fr3\n\t"
161 "fsts fpul, fr4\n\t"
162 "fsts fpul, fr5\n\t"
163 "fsts fpul, fr6\n\t"
164 "fsts fpul, fr7\n\t"
165 "fsts fpul, fr8\n\t"
166 "fsts fpul, fr9\n\t"
167 "fsts fpul, fr10\n\t"
168 "fsts fpul, fr11\n\t"
169 "fsts fpul, fr12\n\t"
170 "fsts fpul, fr13\n\t"
171 "fsts fpul, fr14\n\t"
172 "fsts fpul, fr15\n\t"
173 "frchg\n\t"
174 "fsts fpul, fr0\n\t"
175 "fsts fpul, fr1\n\t"
176 "fsts fpul, fr2\n\t"
177 "fsts fpul, fr3\n\t"
178 "fsts fpul, fr4\n\t"
179 "fsts fpul, fr5\n\t"
180 "fsts fpul, fr6\n\t"
181 "fsts fpul, fr7\n\t"
182 "fsts fpul, fr8\n\t"
183 "fsts fpul, fr9\n\t"
184 "fsts fpul, fr10\n\t"
185 "fsts fpul, fr11\n\t"
186 "fsts fpul, fr12\n\t"
187 "fsts fpul, fr13\n\t"
188 "fsts fpul, fr14\n\t"
189 "fsts fpul, fr15\n\t"
190 "frchg\n\t"
191 "lds %2, fpscr\n\t"
192 : /* no output */
193 :"r" (0), "r"(FPSCR_RCHG), "r"(FPSCR_INIT));
194 disable_fpu();
195}
196
197/** 143/**
198 * denormal_to_double - Given denormalized float number, 144 * denormal_to_double - Given denormalized float number,
199 * store double float 145 * store double float
@@ -285,10 +231,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
285 /* fcnvsd */ 231 /* fcnvsd */
286 struct task_struct *tsk = current; 232 struct task_struct *tsk = current;
287 233
288 save_fpu(tsk, regs); 234 if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR))
289 if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR))
290 /* FPU error */ 235 /* FPU error */
291 denormal_to_double(&tsk->thread.fpu.hard, 236 denormal_to_double(&tsk->thread.xstate->hardfpu,
292 (finsn >> 8) & 0xf); 237 (finsn >> 8) & 0xf);
293 else 238 else
294 return 0; 239 return 0;
@@ -304,9 +249,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
304 249
305 n = (finsn >> 8) & 0xf; 250 n = (finsn >> 8) & 0xf;
306 m = (finsn >> 4) & 0xf; 251 m = (finsn >> 4) & 0xf;
307 hx = tsk->thread.fpu.hard.fp_regs[n]; 252 hx = tsk->thread.xstate->hardfpu.fp_regs[n];
308 hy = tsk->thread.fpu.hard.fp_regs[m]; 253 hy = tsk->thread.xstate->hardfpu.fp_regs[m];
309 fpscr = tsk->thread.fpu.hard.fpscr; 254 fpscr = tsk->thread.xstate->hardfpu.fpscr;
310 prec = fpscr & FPSCR_DBL_PRECISION; 255 prec = fpscr & FPSCR_DBL_PRECISION;
311 256
312 if ((fpscr & FPSCR_CAUSE_ERROR) 257 if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -316,18 +261,18 @@ static int ieee_fpe_handler(struct pt_regs *regs)
316 261
317 /* FPU error because of denormal (doubles) */ 262 /* FPU error because of denormal (doubles) */
318 llx = ((long long)hx << 32) 263 llx = ((long long)hx << 32)
319 | tsk->thread.fpu.hard.fp_regs[n + 1]; 264 | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
320 lly = ((long long)hy << 32) 265 lly = ((long long)hy << 32)
321 | tsk->thread.fpu.hard.fp_regs[m + 1]; 266 | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
322 llx = float64_mul(llx, lly); 267 llx = float64_mul(llx, lly);
323 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; 268 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
324 tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff; 269 tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
325 } else if ((fpscr & FPSCR_CAUSE_ERROR) 270 } else if ((fpscr & FPSCR_CAUSE_ERROR)
326 && (!prec && ((hx & 0x7fffffff) < 0x00800000 271 && (!prec && ((hx & 0x7fffffff) < 0x00800000
327 || (hy & 0x7fffffff) < 0x00800000))) { 272 || (hy & 0x7fffffff) < 0x00800000))) {
328 /* FPU error because of denormal (floats) */ 273 /* FPU error because of denormal (floats) */
329 hx = float32_mul(hx, hy); 274 hx = float32_mul(hx, hy);
330 tsk->thread.fpu.hard.fp_regs[n] = hx; 275 tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
331 } else 276 } else
332 return 0; 277 return 0;
333 278
@@ -342,9 +287,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
342 287
343 n = (finsn >> 8) & 0xf; 288 n = (finsn >> 8) & 0xf;
344 m = (finsn >> 4) & 0xf; 289 m = (finsn >> 4) & 0xf;
345 hx = tsk->thread.fpu.hard.fp_regs[n]; 290 hx = tsk->thread.xstate->hardfpu.fp_regs[n];
346 hy = tsk->thread.fpu.hard.fp_regs[m]; 291 hy = tsk->thread.xstate->hardfpu.fp_regs[m];
347 fpscr = tsk->thread.fpu.hard.fpscr; 292 fpscr = tsk->thread.xstate->hardfpu.fpscr;
348 prec = fpscr & FPSCR_DBL_PRECISION; 293 prec = fpscr & FPSCR_DBL_PRECISION;
349 294
350 if ((fpscr & FPSCR_CAUSE_ERROR) 295 if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -354,15 +299,15 @@ static int ieee_fpe_handler(struct pt_regs *regs)
354 299
355 /* FPU error because of denormal (doubles) */ 300 /* FPU error because of denormal (doubles) */
356 llx = ((long long)hx << 32) 301 llx = ((long long)hx << 32)
357 | tsk->thread.fpu.hard.fp_regs[n + 1]; 302 | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
358 lly = ((long long)hy << 32) 303 lly = ((long long)hy << 32)
359 | tsk->thread.fpu.hard.fp_regs[m + 1]; 304 | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
360 if ((finsn & 0xf00f) == 0xf000) 305 if ((finsn & 0xf00f) == 0xf000)
361 llx = float64_add(llx, lly); 306 llx = float64_add(llx, lly);
362 else 307 else
363 llx = float64_sub(llx, lly); 308 llx = float64_sub(llx, lly);
364 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; 309 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
365 tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff; 310 tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
366 } else if ((fpscr & FPSCR_CAUSE_ERROR) 311 } else if ((fpscr & FPSCR_CAUSE_ERROR)
367 && (!prec && ((hx & 0x7fffffff) < 0x00800000 312 && (!prec && ((hx & 0x7fffffff) < 0x00800000
368 || (hy & 0x7fffffff) < 0x00800000))) { 313 || (hy & 0x7fffffff) < 0x00800000))) {
@@ -371,7 +316,7 @@ static int ieee_fpe_handler(struct pt_regs *regs)
371 hx = float32_add(hx, hy); 316 hx = float32_add(hx, hy);
372 else 317 else
373 hx = float32_sub(hx, hy); 318 hx = float32_sub(hx, hy);
374 tsk->thread.fpu.hard.fp_regs[n] = hx; 319 tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
375 } else 320 } else
376 return 0; 321 return 0;
377 322
@@ -386,9 +331,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
386 331
387 n = (finsn >> 8) & 0xf; 332 n = (finsn >> 8) & 0xf;
388 m = (finsn >> 4) & 0xf; 333 m = (finsn >> 4) & 0xf;
389 hx = tsk->thread.fpu.hard.fp_regs[n]; 334 hx = tsk->thread.xstate->hardfpu.fp_regs[n];
390 hy = tsk->thread.fpu.hard.fp_regs[m]; 335 hy = tsk->thread.xstate->hardfpu.fp_regs[m];
391 fpscr = tsk->thread.fpu.hard.fpscr; 336 fpscr = tsk->thread.xstate->hardfpu.fpscr;
392 prec = fpscr & FPSCR_DBL_PRECISION; 337 prec = fpscr & FPSCR_DBL_PRECISION;
393 338
394 if ((fpscr & FPSCR_CAUSE_ERROR) 339 if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -398,20 +343,20 @@ static int ieee_fpe_handler(struct pt_regs *regs)
398 343
399 /* FPU error because of denormal (doubles) */ 344 /* FPU error because of denormal (doubles) */
400 llx = ((long long)hx << 32) 345 llx = ((long long)hx << 32)
401 | tsk->thread.fpu.hard.fp_regs[n + 1]; 346 | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
402 lly = ((long long)hy << 32) 347 lly = ((long long)hy << 32)
403 | tsk->thread.fpu.hard.fp_regs[m + 1]; 348 | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
404 349
405 llx = float64_div(llx, lly); 350 llx = float64_div(llx, lly);
406 351
407 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; 352 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
408 tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff; 353 tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
409 } else if ((fpscr & FPSCR_CAUSE_ERROR) 354 } else if ((fpscr & FPSCR_CAUSE_ERROR)
410 && (!prec && ((hx & 0x7fffffff) < 0x00800000 355 && (!prec && ((hx & 0x7fffffff) < 0x00800000
411 || (hy & 0x7fffffff) < 0x00800000))) { 356 || (hy & 0x7fffffff) < 0x00800000))) {
412 /* FPU error because of denormal (floats) */ 357 /* FPU error because of denormal (floats) */
413 hx = float32_div(hx, hy); 358 hx = float32_div(hx, hy);
414 tsk->thread.fpu.hard.fp_regs[n] = hx; 359 tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
415 } else 360 } else
416 return 0; 361 return 0;
417 362
@@ -424,17 +369,17 @@ static int ieee_fpe_handler(struct pt_regs *regs)
424 unsigned int hx; 369 unsigned int hx;
425 370
426 m = (finsn >> 8) & 0x7; 371 m = (finsn >> 8) & 0x7;
427 hx = tsk->thread.fpu.hard.fp_regs[m]; 372 hx = tsk->thread.xstate->hardfpu.fp_regs[m];
428 373
429 if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR) 374 if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)
430 && ((hx & 0x7fffffff) < 0x00100000)) { 375 && ((hx & 0x7fffffff) < 0x00100000)) {
431 /* subnormal double to float conversion */ 376 /* subnormal double to float conversion */
432 long long llx; 377 long long llx;
433 378
434 llx = ((long long)tsk->thread.fpu.hard.fp_regs[m] << 32) 379 llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32)
435 | tsk->thread.fpu.hard.fp_regs[m + 1]; 380 | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
436 381
437 tsk->thread.fpu.hard.fpul = float64_to_float32(llx); 382 tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx);
438 } else 383 } else
439 return 0; 384 return 0;
440 385
@@ -453,7 +398,7 @@ void float_raise(unsigned int flags)
453int float_rounding_mode(void) 398int float_rounding_mode(void)
454{ 399{
455 struct task_struct *tsk = current; 400 struct task_struct *tsk = current;
456 int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.fpu.hard.fpscr); 401 int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr);
457 return roundingMode; 402 return roundingMode;
458} 403}
459 404
@@ -462,19 +407,19 @@ BUILD_TRAP_HANDLER(fpu_error)
462 struct task_struct *tsk = current; 407 struct task_struct *tsk = current;
463 TRAP_HANDLER_DECL; 408 TRAP_HANDLER_DECL;
464 409
465 save_fpu(tsk, regs); 410 __unlazy_fpu(tsk, regs);
466 fpu_exception_flags = 0; 411 fpu_exception_flags = 0;
467 if (ieee_fpe_handler(regs)) { 412 if (ieee_fpe_handler(regs)) {
468 tsk->thread.fpu.hard.fpscr &= 413 tsk->thread.xstate->hardfpu.fpscr &=
469 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); 414 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
470 tsk->thread.fpu.hard.fpscr |= fpu_exception_flags; 415 tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags;
471 /* Set the FPSCR flag as well as cause bits - simply 416 /* Set the FPSCR flag as well as cause bits - simply
472 * replicate the cause */ 417 * replicate the cause */
473 tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10); 418 tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10);
474 grab_fpu(regs); 419 grab_fpu(regs);
475 restore_fpu(tsk); 420 restore_fpu(tsk);
476 set_tsk_thread_flag(tsk, TIF_USEDFPU); 421 task_thread_info(tsk)->status |= TS_USEDFPU;
477 if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) & 422 if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) &
478 (fpu_exception_flags >> 2)) == 0) { 423 (fpu_exception_flags >> 2)) == 0) {
479 return; 424 return;
480 } 425 }
@@ -482,25 +427,3 @@ BUILD_TRAP_HANDLER(fpu_error)
482 427
483 force_sig(SIGFPE, tsk); 428 force_sig(SIGFPE, tsk);
484} 429}
485
486BUILD_TRAP_HANDLER(fpu_state_restore)
487{
488 struct task_struct *tsk = current;
489 TRAP_HANDLER_DECL;
490
491 grab_fpu(regs);
492 if (!user_mode(regs)) {
493 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
494 return;
495 }
496
497 if (used_math()) {
498 /* Using the FPU again. */
499 restore_fpu(tsk);
500 } else {
501 /* First time FPU user. */
502 fpu_init();
503 set_used_math();
504 }
505 set_tsk_thread_flag(tsk, TIF_USEDFPU);
506}
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c
new file mode 100644
index 000000000000..748955df018d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/perf_event.c
@@ -0,0 +1,253 @@
1/*
2 * Performance events support for SH7750-style performance counters
3 *
4 * Copyright (C) 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/io.h>
13#include <linux/irq.h>
14#include <linux/perf_event.h>
15#include <asm/processor.h>
16
17#define PM_CR_BASE 0xff000084 /* 16-bit */
18#define PM_CTR_BASE 0xff100004 /* 32-bit */
19
20#define PMCR(n) (PM_CR_BASE + ((n) * 0x04))
21#define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08))
22#define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08))
23
24#define PMCR_PMM_MASK 0x0000003f
25
26#define PMCR_CLKF 0x00000100
27#define PMCR_PMCLR 0x00002000
28#define PMCR_PMST 0x00004000
29#define PMCR_PMEN 0x00008000
30
31static struct sh_pmu sh7750_pmu;
32
33/*
34 * There are a number of events supported by each counter (33 in total).
35 * Since we have 2 counters, each counter will take the event code as it
36 * corresponds to the PMCR PMM setting. Each counter can be configured
37 * independently.
38 *
39 * Event Code Description
40 * ---------- -----------
41 *
42 * 0x01 Operand read access
43 * 0x02 Operand write access
44 * 0x03 UTLB miss
45 * 0x04 Operand cache read miss
46 * 0x05 Operand cache write miss
47 * 0x06 Instruction fetch (w/ cache)
48 * 0x07 Instruction TLB miss
49 * 0x08 Instruction cache miss
50 * 0x09 All operand accesses
51 * 0x0a All instruction accesses
52 * 0x0b OC RAM operand access
53 * 0x0d On-chip I/O space access
54 * 0x0e Operand access (r/w)
55 * 0x0f Operand cache miss (r/w)
56 * 0x10 Branch instruction
57 * 0x11 Branch taken
58 * 0x12 BSR/BSRF/JSR
59 * 0x13 Instruction execution
60 * 0x14 Instruction execution in parallel
61 * 0x15 FPU Instruction execution
62 * 0x16 Interrupt
63 * 0x17 NMI
64 * 0x18 trapa instruction execution
65 * 0x19 UBCA match
66 * 0x1a UBCB match
67 * 0x21 Instruction cache fill
68 * 0x22 Operand cache fill
69 * 0x23 Elapsed time
70 * 0x24 Pipeline freeze by I-cache miss
71 * 0x25 Pipeline freeze by D-cache miss
72 * 0x27 Pipeline freeze by branch instruction
73 * 0x28 Pipeline freeze by CPU register
74 * 0x29 Pipeline freeze by FPU
75 */
76
77static const int sh7750_general_events[] = {
78 [PERF_COUNT_HW_CPU_CYCLES] = 0x0023,
79 [PERF_COUNT_HW_INSTRUCTIONS] = 0x000a,
80 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0006, /* I-cache */
81 [PERF_COUNT_HW_CACHE_MISSES] = 0x0008, /* I-cache */
82 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0010,
83 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
84 [PERF_COUNT_HW_BUS_CYCLES] = -1,
85};
86
87#define C(x) PERF_COUNT_HW_CACHE_##x
88
89static const int sh7750_cache_events
90 [PERF_COUNT_HW_CACHE_MAX]
91 [PERF_COUNT_HW_CACHE_OP_MAX]
92 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
93{
94 [ C(L1D) ] = {
95 [ C(OP_READ) ] = {
96 [ C(RESULT_ACCESS) ] = 0x0001,
97 [ C(RESULT_MISS) ] = 0x0004,
98 },
99 [ C(OP_WRITE) ] = {
100 [ C(RESULT_ACCESS) ] = 0x0002,
101 [ C(RESULT_MISS) ] = 0x0005,
102 },
103 [ C(OP_PREFETCH) ] = {
104 [ C(RESULT_ACCESS) ] = 0,
105 [ C(RESULT_MISS) ] = 0,
106 },
107 },
108
109 [ C(L1I) ] = {
110 [ C(OP_READ) ] = {
111 [ C(RESULT_ACCESS) ] = 0x0006,
112 [ C(RESULT_MISS) ] = 0x0008,
113 },
114 [ C(OP_WRITE) ] = {
115 [ C(RESULT_ACCESS) ] = -1,
116 [ C(RESULT_MISS) ] = -1,
117 },
118 [ C(OP_PREFETCH) ] = {
119 [ C(RESULT_ACCESS) ] = 0,
120 [ C(RESULT_MISS) ] = 0,
121 },
122 },
123
124 [ C(LL) ] = {
125 [ C(OP_READ) ] = {
126 [ C(RESULT_ACCESS) ] = 0,
127 [ C(RESULT_MISS) ] = 0,
128 },
129 [ C(OP_WRITE) ] = {
130 [ C(RESULT_ACCESS) ] = 0,
131 [ C(RESULT_MISS) ] = 0,
132 },
133 [ C(OP_PREFETCH) ] = {
134 [ C(RESULT_ACCESS) ] = 0,
135 [ C(RESULT_MISS) ] = 0,
136 },
137 },
138
139 [ C(DTLB) ] = {
140 [ C(OP_READ) ] = {
141 [ C(RESULT_ACCESS) ] = 0,
142 [ C(RESULT_MISS) ] = 0x0003,
143 },
144 [ C(OP_WRITE) ] = {
145 [ C(RESULT_ACCESS) ] = 0,
146 [ C(RESULT_MISS) ] = 0,
147 },
148 [ C(OP_PREFETCH) ] = {
149 [ C(RESULT_ACCESS) ] = 0,
150 [ C(RESULT_MISS) ] = 0,
151 },
152 },
153
154 [ C(ITLB) ] = {
155 [ C(OP_READ) ] = {
156 [ C(RESULT_ACCESS) ] = 0,
157 [ C(RESULT_MISS) ] = 0x0007,
158 },
159 [ C(OP_WRITE) ] = {
160 [ C(RESULT_ACCESS) ] = -1,
161 [ C(RESULT_MISS) ] = -1,
162 },
163 [ C(OP_PREFETCH) ] = {
164 [ C(RESULT_ACCESS) ] = -1,
165 [ C(RESULT_MISS) ] = -1,
166 },
167 },
168
169 [ C(BPU) ] = {
170 [ C(OP_READ) ] = {
171 [ C(RESULT_ACCESS) ] = -1,
172 [ C(RESULT_MISS) ] = -1,
173 },
174 [ C(OP_WRITE) ] = {
175 [ C(RESULT_ACCESS) ] = -1,
176 [ C(RESULT_MISS) ] = -1,
177 },
178 [ C(OP_PREFETCH) ] = {
179 [ C(RESULT_ACCESS) ] = -1,
180 [ C(RESULT_MISS) ] = -1,
181 },
182 },
183};
184
185static int sh7750_event_map(int event)
186{
187 return sh7750_general_events[event];
188}
189
190static u64 sh7750_pmu_read(int idx)
191{
192 return (u64)((u64)(__raw_readl(PMCTRH(idx)) & 0xffff) << 32) |
193 __raw_readl(PMCTRL(idx));
194}
195
196static void sh7750_pmu_disable(struct hw_perf_event *hwc, int idx)
197{
198 unsigned int tmp;
199
200 tmp = __raw_readw(PMCR(idx));
201 tmp &= ~(PMCR_PMM_MASK | PMCR_PMEN);
202 __raw_writew(tmp, PMCR(idx));
203}
204
205static void sh7750_pmu_enable(struct hw_perf_event *hwc, int idx)
206{
207 __raw_writew(__raw_readw(PMCR(idx)) | PMCR_PMCLR, PMCR(idx));
208 __raw_writew(hwc->config | PMCR_PMEN | PMCR_PMST, PMCR(idx));
209}
210
211static void sh7750_pmu_disable_all(void)
212{
213 int i;
214
215 for (i = 0; i < sh7750_pmu.num_events; i++)
216 __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i));
217}
218
219static void sh7750_pmu_enable_all(void)
220{
221 int i;
222
223 for (i = 0; i < sh7750_pmu.num_events; i++)
224 __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMEN, PMCR(i));
225}
226
227static struct sh_pmu sh7750_pmu = {
228 .name = "sh7750",
229 .num_events = 2,
230 .event_map = sh7750_event_map,
231 .max_events = ARRAY_SIZE(sh7750_general_events),
232 .raw_event_mask = PMCR_PMM_MASK,
233 .cache_events = &sh7750_cache_events,
234 .read = sh7750_pmu_read,
235 .disable = sh7750_pmu_disable,
236 .enable = sh7750_pmu_enable,
237 .disable_all = sh7750_pmu_disable_all,
238 .enable_all = sh7750_pmu_enable_all,
239};
240
241static int __init sh7750_pmu_init(void)
242{
243 /*
244 * Make sure this CPU actually has perf counters.
245 */
246 if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
247 pr_notice("HW perf events unsupported, software events only.\n");
248 return -ENODEV;
249 }
250
251 return register_sh_pmu(&sh7750_pmu);
252}
253early_initcall(sh7750_pmu_init);
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index 6c78d0a9c857..b93458f33b74 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -15,7 +15,7 @@
15#include <asm/processor.h> 15#include <asm/processor.h>
16#include <asm/cache.h> 16#include <asm/cache.h>
17 17
18int __init detect_cpu_and_cache_system(void) 18void __cpuinit cpu_probe(void)
19{ 19{
20 unsigned long pvr, prr, cvr; 20 unsigned long pvr, prr, cvr;
21 unsigned long size; 21 unsigned long size;
@@ -28,9 +28,9 @@ int __init detect_cpu_and_cache_system(void)
28 [9] = (1 << 16) 28 [9] = (1 << 16)
29 }; 29 };
30 30
31 pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffffff; 31 pvr = (__raw_readl(CCN_PVR) >> 8) & 0xffffff;
32 prr = (ctrl_inl(CCN_PRR) >> 4) & 0xff; 32 prr = (__raw_readl(CCN_PRR) >> 4) & 0xff;
33 cvr = (ctrl_inl(CCN_CVR)); 33 cvr = (__raw_readl(CCN_CVR));
34 34
35 /* 35 /*
36 * Setup some sane SH-4 defaults for the icache 36 * Setup some sane SH-4 defaults for the icache
@@ -57,8 +57,12 @@ int __init detect_cpu_and_cache_system(void)
57 * Setup some generic flags we can probe on SH-4A parts 57 * Setup some generic flags we can probe on SH-4A parts
58 */ 58 */
59 if (((pvr >> 16) & 0xff) == 0x10) { 59 if (((pvr >> 16) & 0xff) == 0x10) {
60 if ((cvr & 0x10000000) == 0) 60 boot_cpu_data.family = CPU_FAMILY_SH4A;
61
62 if ((cvr & 0x10000000) == 0) {
61 boot_cpu_data.flags |= CPU_HAS_DSP; 63 boot_cpu_data.flags |= CPU_HAS_DSP;
64 boot_cpu_data.family = CPU_FAMILY_SH4AL_DSP;
65 }
62 66
63 boot_cpu_data.flags |= CPU_HAS_LLSC | CPU_HAS_PERF_COUNTER; 67 boot_cpu_data.flags |= CPU_HAS_LLSC | CPU_HAS_PERF_COUNTER;
64 boot_cpu_data.cut_major = pvr & 0x7f; 68 boot_cpu_data.cut_major = pvr & 0x7f;
@@ -67,10 +71,11 @@ int __init detect_cpu_and_cache_system(void)
67 boot_cpu_data.dcache.ways = 4; 71 boot_cpu_data.dcache.ways = 4;
68 } else { 72 } else {
69 /* And some SH-4 defaults.. */ 73 /* And some SH-4 defaults.. */
70 boot_cpu_data.flags |= CPU_HAS_PTEA; 74 boot_cpu_data.flags |= CPU_HAS_PTEA | CPU_HAS_FPU;
75 boot_cpu_data.family = CPU_FAMILY_SH4;
71 } 76 }
72 77
73 /* FPU detection works for everyone */ 78 /* FPU detection works for almost everyone */
74 if ((cvr & 0x20000000)) 79 if ((cvr & 0x20000000))
75 boot_cpu_data.flags |= CPU_HAS_FPU; 80 boot_cpu_data.flags |= CPU_HAS_FPU;
76 81
@@ -119,6 +124,7 @@ int __init detect_cpu_and_cache_system(void)
119 boot_cpu_data.type = CPU_SH7785; 124 boot_cpu_data.type = CPU_SH7785;
120 break; 125 break;
121 case 0x4004: 126 case 0x4004:
127 case 0x4005:
122 boot_cpu_data.type = CPU_SH7786; 128 boot_cpu_data.type = CPU_SH7786;
123 boot_cpu_data.flags |= CPU_HAS_PTEAEX | CPU_HAS_L2_CACHE; 129 boot_cpu_data.flags |= CPU_HAS_PTEAEX | CPU_HAS_L2_CACHE;
124 break; 130 break;
@@ -139,8 +145,15 @@ int __init detect_cpu_and_cache_system(void)
139 } 145 }
140 break; 146 break;
141 case 0x300b: 147 case 0x300b:
142 boot_cpu_data.type = CPU_SH7724; 148 switch (prr) {
143 boot_cpu_data.flags |= CPU_HAS_L2_CACHE; 149 case 0x20:
150 boot_cpu_data.type = CPU_SH7724;
151 boot_cpu_data.flags |= CPU_HAS_L2_CACHE;
152 break;
153 case 0x10:
154 boot_cpu_data.type = CPU_SH7757;
155 break;
156 }
144 break; 157 break;
145 case 0x4000: /* 1st cut */ 158 case 0x4000: /* 1st cut */
146 case 0x4001: /* 2nd cut */ 159 case 0x4001: /* 2nd cut */
@@ -148,6 +161,7 @@ int __init detect_cpu_and_cache_system(void)
148 break; 161 break;
149 case 0x700: 162 case 0x700:
150 boot_cpu_data.type = CPU_SH4_501; 163 boot_cpu_data.type = CPU_SH4_501;
164 boot_cpu_data.flags &= ~CPU_HAS_FPU;
151 boot_cpu_data.icache.ways = 2; 165 boot_cpu_data.icache.ways = 2;
152 boot_cpu_data.dcache.ways = 2; 166 boot_cpu_data.dcache.ways = 2;
153 break; 167 break;
@@ -173,9 +187,6 @@ int __init detect_cpu_and_cache_system(void)
173 boot_cpu_data.dcache.ways = 2; 187 boot_cpu_data.dcache.ways = 2;
174 188
175 break; 189 break;
176 default:
177 boot_cpu_data.type = CPU_SH_NONE;
178 break;
179 } 190 }
180 191
181 /* 192 /*
@@ -218,7 +229,7 @@ int __init detect_cpu_and_cache_system(void)
218 * Size calculation is much more sensible 229 * Size calculation is much more sensible
219 * than it is for the L1. 230 * than it is for the L1.
220 * 231 *
221 * Sizes are 128KB, 258KB, 512KB, and 1MB. 232 * Sizes are 128KB, 256KB, 512KB, and 1MB.
222 */ 233 */
223 size = (cvr & 0xf) << 17; 234 size = (cvr & 0xf) << 17;
224 235
@@ -240,6 +251,4 @@ int __init detect_cpu_and_cache_system(void)
240 boot_cpu_data.scache.linesz); 251 boot_cpu_data.scache.linesz);
241 } 252 }
242 } 253 }
243
244 return 0;
245} 254}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index ec2104b49ef7..5b2833159b7d 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -15,38 +15,31 @@
15#include <linux/sh_timer.h> 15#include <linux/sh_timer.h>
16#include <linux/io.h> 16#include <linux/io.h>
17 17
18static struct plat_sci_port sci_platform_data[] = { 18static struct plat_sci_port scif0_platform_data = {
19 { 19 .mapbase = 0xffe80000,
20 .mapbase = 0xffe80000, 20 .flags = UPF_BOOT_AUTOCONF,
21 .flags = UPF_BOOT_AUTOCONF, 21 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
22 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 22 .scbrr_algo_id = SCBRR_ALGO_2,
23 .scbrr_algo_id = SCBRR_ALGO_2, 23 .type = PORT_SCIF,
24 .type = PORT_SCIF, 24 .irqs = { 40, 41, 43, 42 },
25 .irqs = { 40, 41, 43, 42 },
26 }, {
27 .flags = 0,
28 }
29}; 25};
30 26
31static struct platform_device sci_device = { 27static struct platform_device scif0_device = {
32 .name = "sh-sci", 28 .name = "sh-sci",
33 .id = -1, 29 .id = 0,
34 .dev = { 30 .dev = {
35 .platform_data = sci_platform_data, 31 .platform_data = &scif0_platform_data,
36 }, 32 },
37}; 33};
38 34
39static struct sh_timer_config tmu0_platform_data = { 35static struct sh_timer_config tmu0_platform_data = {
40 .name = "TMU0",
41 .channel_offset = 0x04, 36 .channel_offset = 0x04,
42 .timer_bit = 0, 37 .timer_bit = 0,
43 .clk = "peripheral_clk",
44 .clockevent_rating = 200, 38 .clockevent_rating = 200,
45}; 39};
46 40
47static struct resource tmu0_resources[] = { 41static struct resource tmu0_resources[] = {
48 [0] = { 42 [0] = {
49 .name = "TMU0",
50 .start = 0xffd80008, 43 .start = 0xffd80008,
51 .end = 0xffd80013, 44 .end = 0xffd80013,
52 .flags = IORESOURCE_MEM, 45 .flags = IORESOURCE_MEM,
@@ -68,16 +61,13 @@ static struct platform_device tmu0_device = {
68}; 61};
69 62
70static struct sh_timer_config tmu1_platform_data = { 63static struct sh_timer_config tmu1_platform_data = {
71 .name = "TMU1",
72 .channel_offset = 0x10, 64 .channel_offset = 0x10,
73 .timer_bit = 1, 65 .timer_bit = 1,
74 .clk = "peripheral_clk",
75 .clocksource_rating = 200, 66 .clocksource_rating = 200,
76}; 67};
77 68
78static struct resource tmu1_resources[] = { 69static struct resource tmu1_resources[] = {
79 [0] = { 70 [0] = {
80 .name = "TMU1",
81 .start = 0xffd80014, 71 .start = 0xffd80014,
82 .end = 0xffd8001f, 72 .end = 0xffd8001f,
83 .flags = IORESOURCE_MEM, 73 .flags = IORESOURCE_MEM,
@@ -99,15 +89,12 @@ static struct platform_device tmu1_device = {
99}; 89};
100 90
101static struct sh_timer_config tmu2_platform_data = { 91static struct sh_timer_config tmu2_platform_data = {
102 .name = "TMU2",
103 .channel_offset = 0x1c, 92 .channel_offset = 0x1c,
104 .timer_bit = 2, 93 .timer_bit = 2,
105 .clk = "peripheral_clk",
106}; 94};
107 95
108static struct resource tmu2_resources[] = { 96static struct resource tmu2_resources[] = {
109 [0] = { 97 [0] = {
110 .name = "TMU2",
111 .start = 0xffd80020, 98 .start = 0xffd80020,
112 .end = 0xffd8002f, 99 .end = 0xffd8002f,
113 .flags = IORESOURCE_MEM, 100 .flags = IORESOURCE_MEM,
@@ -129,7 +116,7 @@ static struct platform_device tmu2_device = {
129}; 116};
130 117
131static struct platform_device *sh4202_devices[] __initdata = { 118static struct platform_device *sh4202_devices[] __initdata = {
132 &sci_device, 119 &scif0_device,
133 &tmu0_device, 120 &tmu0_device,
134 &tmu1_device, 121 &tmu1_device,
135 &tmu2_device, 122 &tmu2_device,
@@ -140,9 +127,10 @@ static int __init sh4202_devices_setup(void)
140 return platform_add_devices(sh4202_devices, 127 return platform_add_devices(sh4202_devices,
141 ARRAY_SIZE(sh4202_devices)); 128 ARRAY_SIZE(sh4202_devices));
142} 129}
143__initcall(sh4202_devices_setup); 130arch_initcall(sh4202_devices_setup);
144 131
145static struct platform_device *sh4202_early_devices[] __initdata = { 132static struct platform_device *sh4202_early_devices[] __initdata = {
133 &scif0_device,
146 &tmu0_device, 134 &tmu0_device,
147 &tmu1_device, 135 &tmu1_device,
148 &tmu2_device, 136 &tmu2_device,
@@ -203,7 +191,7 @@ void __init plat_irq_setup_pins(int mode)
203{ 191{
204 switch (mode) { 192 switch (mode) {
205 case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */ 193 case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */
206 ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); 194 __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
207 register_intc_controller(&intc_desc_irlm); 195 register_intc_controller(&intc_desc_irlm);
208 break; 196 break;
209 default: 197 default:
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 51a945e0d72c..c2b0aaaedcae 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -39,16 +39,17 @@ static struct platform_device rtc_device = {
39static struct plat_sci_port sci_platform_data = { 39static struct plat_sci_port sci_platform_data = {
40 .mapbase = 0xffe00000, 40 .mapbase = 0xffe00000,
41 .flags = UPF_BOOT_AUTOCONF, 41 .flags = UPF_BOOT_AUTOCONF,
42 .type = PORT_SCI,
43 .scscr = SCSCR_TE | SCSCR_RE, 42 .scscr = SCSCR_TE | SCSCR_RE,
44 .scbrr_algo_id = SCBRR_ALGO_2, 43 .scbrr_algo_id = SCBRR_ALGO_2,
44 .type = PORT_SCI,
45 .irqs = { 23, 23, 23, 0 }, 45 .irqs = { 23, 23, 23, 0 },
46}; 46};
47 47
48static struct platform_device sci_device = { 48static struct platform_device sci_device = {
49 .name = "sh-sci", 49 .name = "sh-sci",
50 .id = 0,
50 .dev = { 51 .dev = {
51 .platform_data = sci_platform_data, 52 .platform_data = &sci_platform_data,
52 }, 53 },
53}; 54};
54 55
@@ -63,22 +64,20 @@ static struct plat_sci_port scif_platform_data = {
63 64
64static struct platform_device scif_device = { 65static struct platform_device scif_device = {
65 .name = "sh-sci", 66 .name = "sh-sci",
67 .id = 1,
66 .dev = { 68 .dev = {
67 .platform_data = scif_platform_data, 69 .platform_data = &scif_platform_data,
68 }, 70 },
69}; 71};
70 72
71static struct sh_timer_config tmu0_platform_data = { 73static struct sh_timer_config tmu0_platform_data = {
72 .name = "TMU0",
73 .channel_offset = 0x04, 74 .channel_offset = 0x04,
74 .timer_bit = 0, 75 .timer_bit = 0,
75 .clk = "peripheral_clk",
76 .clockevent_rating = 200, 76 .clockevent_rating = 200,
77}; 77};
78 78
79static struct resource tmu0_resources[] = { 79static struct resource tmu0_resources[] = {
80 [0] = { 80 [0] = {
81 .name = "TMU0",
82 .start = 0xffd80008, 81 .start = 0xffd80008,
83 .end = 0xffd80013, 82 .end = 0xffd80013,
84 .flags = IORESOURCE_MEM, 83 .flags = IORESOURCE_MEM,
@@ -100,16 +99,13 @@ static struct platform_device tmu0_device = {
100}; 99};
101 100
102static struct sh_timer_config tmu1_platform_data = { 101static struct sh_timer_config tmu1_platform_data = {
103 .name = "TMU1",
104 .channel_offset = 0x10, 102 .channel_offset = 0x10,
105 .timer_bit = 1, 103 .timer_bit = 1,
106 .clk = "peripheral_clk",
107 .clocksource_rating = 200, 104 .clocksource_rating = 200,
108}; 105};
109 106
110static struct resource tmu1_resources[] = { 107static struct resource tmu1_resources[] = {
111 [0] = { 108 [0] = {
112 .name = "TMU1",
113 .start = 0xffd80014, 109 .start = 0xffd80014,
114 .end = 0xffd8001f, 110 .end = 0xffd8001f,
115 .flags = IORESOURCE_MEM, 111 .flags = IORESOURCE_MEM,
@@ -131,15 +127,12 @@ static struct platform_device tmu1_device = {
131}; 127};
132 128
133static struct sh_timer_config tmu2_platform_data = { 129static struct sh_timer_config tmu2_platform_data = {
134 .name = "TMU2",
135 .channel_offset = 0x1c, 130 .channel_offset = 0x1c,
136 .timer_bit = 2, 131 .timer_bit = 2,
137 .clk = "peripheral_clk",
138}; 132};
139 133
140static struct resource tmu2_resources[] = { 134static struct resource tmu2_resources[] = {
141 [0] = { 135 [0] = {
142 .name = "TMU2",
143 .start = 0xffd80020, 136 .start = 0xffd80020,
144 .end = 0xffd8002f, 137 .end = 0xffd8002f,
145 .flags = IORESOURCE_MEM, 138 .flags = IORESOURCE_MEM,
@@ -166,15 +159,12 @@ static struct platform_device tmu2_device = {
166 defined(CONFIG_CPU_SUBTYPE_SH7751R) 159 defined(CONFIG_CPU_SUBTYPE_SH7751R)
167 160
168static struct sh_timer_config tmu3_platform_data = { 161static struct sh_timer_config tmu3_platform_data = {
169 .name = "TMU3",
170 .channel_offset = 0x04, 162 .channel_offset = 0x04,
171 .timer_bit = 0, 163 .timer_bit = 0,
172 .clk = "peripheral_clk",
173}; 164};
174 165
175static struct resource tmu3_resources[] = { 166static struct resource tmu3_resources[] = {
176 [0] = { 167 [0] = {
177 .name = "TMU3",
178 .start = 0xfe100008, 168 .start = 0xfe100008,
179 .end = 0xfe100013, 169 .end = 0xfe100013,
180 .flags = IORESOURCE_MEM, 170 .flags = IORESOURCE_MEM,
@@ -196,15 +186,12 @@ static struct platform_device tmu3_device = {
196}; 186};
197 187
198static struct sh_timer_config tmu4_platform_data = { 188static struct sh_timer_config tmu4_platform_data = {
199 .name = "TMU4",
200 .channel_offset = 0x10, 189 .channel_offset = 0x10,
201 .timer_bit = 1, 190 .timer_bit = 1,
202 .clk = "peripheral_clk",
203}; 191};
204 192
205static struct resource tmu4_resources[] = { 193static struct resource tmu4_resources[] = {
206 [0] = { 194 [0] = {
207 .name = "TMU4",
208 .start = 0xfe100014, 195 .start = 0xfe100014,
209 .end = 0xfe10001f, 196 .end = 0xfe10001f,
210 .flags = IORESOURCE_MEM, 197 .flags = IORESOURCE_MEM,
@@ -243,7 +230,6 @@ static struct platform_device *sh7750_devices[] __initdata = {
243static int __init sh7750_devices_setup(void) 230static int __init sh7750_devices_setup(void)
244{ 231{
245 if (mach_is_rts7751r2d()) { 232 if (mach_is_rts7751r2d()) {
246 scif_platform_data.scscr |= SCSCR_CKE1;
247 platform_register_device(&scif_device); 233 platform_register_device(&scif_device);
248 } else { 234 } else {
249 platform_register_device(&sci_device); 235 platform_register_device(&sci_device);
@@ -253,7 +239,7 @@ static int __init sh7750_devices_setup(void)
253 return platform_add_devices(sh7750_devices, 239 return platform_add_devices(sh7750_devices,
254 ARRAY_SIZE(sh7750_devices)); 240 ARRAY_SIZE(sh7750_devices));
255} 241}
256__initcall(sh7750_devices_setup); 242arch_initcall(sh7750_devices_setup);
257 243
258static struct platform_device *sh7750_early_devices[] __initdata = { 244static struct platform_device *sh7750_early_devices[] __initdata = {
259 &tmu0_device, 245 &tmu0_device,
@@ -269,6 +255,14 @@ static struct platform_device *sh7750_early_devices[] __initdata = {
269 255
270void __init plat_early_device_setup(void) 256void __init plat_early_device_setup(void)
271{ 257{
258 if (mach_is_rts7751r2d()) {
259 scif_platform_data.scscr |= SCSCR_CKE1;
260 early_platform_add_devices(&scif_device, 1);
261 } else {
262 early_platform_add_devices(&sci_device, 1);
263 early_platform_add_devices(&scif_device, 1);
264 }
265
272 early_platform_add_devices(sh7750_early_devices, 266 early_platform_add_devices(sh7750_early_devices,
273 ARRAY_SIZE(sh7750_early_devices)); 267 ARRAY_SIZE(sh7750_early_devices));
274} 268}
@@ -449,7 +443,7 @@ void __init plat_irq_setup_pins(int mode)
449 443
450 switch (mode) { 444 switch (mode) {
451 case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */ 445 case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */
452 ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); 446 __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
453 register_intc_controller(&intc_desc_irlm); 447 register_intc_controller(&intc_desc_irlm);
454 break; 448 break;
455 default: 449 default:
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index cee660fe1d90..78bbf232e391 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -126,59 +126,82 @@ static struct intc_vect vectors_irq[] __initdata = {
126static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups, 126static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups,
127 mask_registers, prio_registers, NULL); 127 mask_registers, prio_registers, NULL);
128 128
129static struct plat_sci_port sci_platform_data[] = { 129static struct plat_sci_port scif0_platform_data = {
130 { 130 .mapbase = 0xfe600000,
131 .mapbase = 0xfe600000, 131 .flags = UPF_BOOT_AUTOCONF,
132 .flags = UPF_BOOT_AUTOCONF, 132 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
133 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 133 .scbrr_algo_id = SCBRR_ALGO_2,
134 .scbrr_algo_id = SCBRR_ALGO_2, 134 .type = PORT_SCIF,
135 .type = PORT_SCIF, 135 .irqs = { 52, 53, 55, 54 },
136 .irqs = { 52, 53, 55, 54 }, 136};
137 }, { 137
138 .mapbase = 0xfe610000, 138static struct platform_device scif0_device = {
139 .flags = UPF_BOOT_AUTOCONF, 139 .name = "sh-sci",
140 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 140 .id = 0,
141 .scbrr_algo_id = SCBRR_ALGO_2, 141 .dev = {
142 .type = PORT_SCIF, 142 .platform_data = &scif0_platform_data,
143 .irqs = { 72, 73, 75, 74 }, 143 },
144 }, { 144};
145 .mapbase = 0xfe620000, 145
146 .flags = UPF_BOOT_AUTOCONF, 146static struct plat_sci_port scif1_platform_data = {
147 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 147 .mapbase = 0xfe610000,
148 .scbrr_algo_id = SCBRR_ALGO_2, 148 .flags = UPF_BOOT_AUTOCONF,
149 .type = PORT_SCIF, 149 .type = PORT_SCIF,
150 .irqs = { 76, 77, 79, 78 }, 150 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
151 }, { 151 .scbrr_algo_id = SCBRR_ALGO_2,
152 .mapbase = 0xfe480000, 152 .irqs = { 72, 73, 75, 74 },
153 .flags = UPF_BOOT_AUTOCONF, 153};
154 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 154
155 .scbrr_algo_id = SCBRR_ALGO_2, 155static struct platform_device scif1_device = {
156 .type = PORT_SCI, 156 .name = "sh-sci",
157 .irqs = { 80, 81, 82, 0 }, 157 .id = 1,
158 }, { 158 .dev = {
159 .flags = 0, 159 .platform_data = &scif1_platform_data,
160 } 160 },
161};
162
163static struct plat_sci_port scif2_platform_data = {
164 .mapbase = 0xfe620000,
165 .flags = UPF_BOOT_AUTOCONF,
166 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
167 .scbrr_algo_id = SCBRR_ALGO_2,
168 .type = PORT_SCIF,
169 .irqs = { 76, 77, 79, 78 },
170};
171
172static struct platform_device scif2_device = {
173 .name = "sh-sci",
174 .id = 2,
175 .dev = {
176 .platform_data = &scif2_platform_data,
177 },
178};
179
180static struct plat_sci_port scif3_platform_data = {
181 .mapbase = 0xfe480000,
182 .flags = UPF_BOOT_AUTOCONF,
183 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
184 .scbrr_algo_id = SCBRR_ALGO_2,
185 .type = PORT_SCI,
186 .irqs = { 80, 81, 82, 0 },
161}; 187};
162 188
163static struct platform_device sci_device = { 189static struct platform_device scif3_device = {
164 .name = "sh-sci", 190 .name = "sh-sci",
165 .id = -1, 191 .id = 3,
166 .dev = { 192 .dev = {
167 .platform_data = sci_platform_data, 193 .platform_data = &scif3_platform_data,
168 }, 194 },
169}; 195};
170 196
171static struct sh_timer_config tmu0_platform_data = { 197static struct sh_timer_config tmu0_platform_data = {
172 .name = "TMU0",
173 .channel_offset = 0x04, 198 .channel_offset = 0x04,
174 .timer_bit = 0, 199 .timer_bit = 0,
175 .clk = "peripheral_clk",
176 .clockevent_rating = 200, 200 .clockevent_rating = 200,
177}; 201};
178 202
179static struct resource tmu0_resources[] = { 203static struct resource tmu0_resources[] = {
180 [0] = { 204 [0] = {
181 .name = "TMU0",
182 .start = 0xffd80008, 205 .start = 0xffd80008,
183 .end = 0xffd80013, 206 .end = 0xffd80013,
184 .flags = IORESOURCE_MEM, 207 .flags = IORESOURCE_MEM,
@@ -200,16 +223,13 @@ static struct platform_device tmu0_device = {
200}; 223};
201 224
202static struct sh_timer_config tmu1_platform_data = { 225static struct sh_timer_config tmu1_platform_data = {
203 .name = "TMU1",
204 .channel_offset = 0x10, 226 .channel_offset = 0x10,
205 .timer_bit = 1, 227 .timer_bit = 1,
206 .clk = "peripheral_clk",
207 .clocksource_rating = 200, 228 .clocksource_rating = 200,
208}; 229};
209 230
210static struct resource tmu1_resources[] = { 231static struct resource tmu1_resources[] = {
211 [0] = { 232 [0] = {
212 .name = "TMU1",
213 .start = 0xffd80014, 233 .start = 0xffd80014,
214 .end = 0xffd8001f, 234 .end = 0xffd8001f,
215 .flags = IORESOURCE_MEM, 235 .flags = IORESOURCE_MEM,
@@ -231,15 +251,12 @@ static struct platform_device tmu1_device = {
231}; 251};
232 252
233static struct sh_timer_config tmu2_platform_data = { 253static struct sh_timer_config tmu2_platform_data = {
234 .name = "TMU2",
235 .channel_offset = 0x1c, 254 .channel_offset = 0x1c,
236 .timer_bit = 2, 255 .timer_bit = 2,
237 .clk = "peripheral_clk",
238}; 256};
239 257
240static struct resource tmu2_resources[] = { 258static struct resource tmu2_resources[] = {
241 [0] = { 259 [0] = {
242 .name = "TMU2",
243 .start = 0xffd80020, 260 .start = 0xffd80020,
244 .end = 0xffd8002f, 261 .end = 0xffd8002f,
245 .flags = IORESOURCE_MEM, 262 .flags = IORESOURCE_MEM,
@@ -262,7 +279,10 @@ static struct platform_device tmu2_device = {
262 279
263 280
264static struct platform_device *sh7760_devices[] __initdata = { 281static struct platform_device *sh7760_devices[] __initdata = {
265 &sci_device, 282 &scif0_device,
283 &scif1_device,
284 &scif2_device,
285 &scif3_device,
266 &tmu0_device, 286 &tmu0_device,
267 &tmu1_device, 287 &tmu1_device,
268 &tmu2_device, 288 &tmu2_device,
@@ -273,9 +293,13 @@ static int __init sh7760_devices_setup(void)
273 return platform_add_devices(sh7760_devices, 293 return platform_add_devices(sh7760_devices,
274 ARRAY_SIZE(sh7760_devices)); 294 ARRAY_SIZE(sh7760_devices));
275} 295}
276__initcall(sh7760_devices_setup); 296arch_initcall(sh7760_devices_setup);
277 297
278static struct platform_device *sh7760_early_devices[] __initdata = { 298static struct platform_device *sh7760_early_devices[] __initdata = {
299 &scif0_device,
300 &scif1_device,
301 &scif2_device,
302 &scif3_device,
279 &tmu0_device, 303 &tmu0_device,
280 &tmu1_device, 304 &tmu1_device,
281 &tmu2_device, 305 &tmu2_device,
@@ -294,7 +318,7 @@ void __init plat_irq_setup_pins(int mode)
294{ 318{
295 switch (mode) { 319 switch (mode) {
296 case IRQ_MODE_IRQ: 320 case IRQ_MODE_IRQ:
297 ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); 321 __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
298 register_intc_controller(&intc_desc_irq); 322 register_intc_controller(&intc_desc_irq);
299 break; 323 break;
300 default: 324 default:
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 8a8a993f55ea..14726eef1ce0 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -43,9 +43,9 @@ static unsigned long *sq_bitmap;
43 43
44#define store_queue_barrier() \ 44#define store_queue_barrier() \
45do { \ 45do { \
46 (void)ctrl_inl(P4SEG_STORE_QUE); \ 46 (void)__raw_readl(P4SEG_STORE_QUE); \
47 ctrl_outl(0, P4SEG_STORE_QUE + 0); \ 47 __raw_writel(0, P4SEG_STORE_QUE + 0); \
48 ctrl_outl(0, P4SEG_STORE_QUE + 8); \ 48 __raw_writel(0, P4SEG_STORE_QUE + 8); \
49} while (0); 49} while (0);
50 50
51/** 51/**
@@ -100,7 +100,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map)
100 spin_unlock_irq(&sq_mapping_lock); 100 spin_unlock_irq(&sq_mapping_lock);
101} 101}
102 102
103static int __sq_remap(struct sq_mapping *map, unsigned long flags) 103static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
104{ 104{
105#if defined(CONFIG_MMU) 105#if defined(CONFIG_MMU)
106 struct vm_struct *vma; 106 struct vm_struct *vma;
@@ -113,7 +113,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
113 113
114 if (ioremap_page_range((unsigned long)vma->addr, 114 if (ioremap_page_range((unsigned long)vma->addr,
115 (unsigned long)vma->addr + map->size, 115 (unsigned long)vma->addr + map->size,
116 vma->phys_addr, __pgprot(flags))) { 116 vma->phys_addr, prot)) {
117 vunmap(vma->addr); 117 vunmap(vma->addr);
118 return -EAGAIN; 118 return -EAGAIN;
119 } 119 }
@@ -123,8 +123,8 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
123 * straightforward, as we can just load up each queue's QACR with 123 * straightforward, as we can just load up each queue's QACR with
124 * the physical address appropriately masked. 124 * the physical address appropriately masked.
125 */ 125 */
126 ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); 126 __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
127 ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); 127 __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
128#endif 128#endif
129 129
130 return 0; 130 return 0;
@@ -135,14 +135,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
135 * @phys: Physical address of mapping. 135 * @phys: Physical address of mapping.
136 * @size: Length of mapping. 136 * @size: Length of mapping.
137 * @name: User invoking mapping. 137 * @name: User invoking mapping.
138 * @flags: Protection flags. 138 * @prot: Protection bits.
139 * 139 *
140 * Remaps the physical address @phys through the next available store queue 140 * Remaps the physical address @phys through the next available store queue
141 * address of @size length. @name is logged at boot time as well as through 141 * address of @size length. @name is logged at boot time as well as through
142 * the sysfs interface. 142 * the sysfs interface.
143 */ 143 */
144unsigned long sq_remap(unsigned long phys, unsigned int size, 144unsigned long sq_remap(unsigned long phys, unsigned int size,
145 const char *name, unsigned long flags) 145 const char *name, pgprot_t prot)
146{ 146{
147 struct sq_mapping *map; 147 struct sq_mapping *map;
148 unsigned long end; 148 unsigned long end;
@@ -177,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,
177 177
178 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); 178 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
179 179
180 ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); 180 ret = __sq_remap(map, prot);
181 if (unlikely(ret != 0)) 181 if (unlikely(ret != 0))
182 goto out; 182 goto out;
183 183
@@ -309,8 +309,7 @@ static ssize_t mapping_store(const char *buf, size_t count)
309 return -EIO; 309 return -EIO;
310 310
311 if (likely(len)) { 311 if (likely(len)) {
312 int ret = sq_remap(base, len, "Userspace", 312 int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
313 pgprot_val(PAGE_SHARED));
314 if (ret < 0) 313 if (ret < 0)
315 return ret; 314 return ret;
316 } else 315 } else
@@ -327,7 +326,7 @@ static struct attribute *sq_sysfs_attrs[] = {
327 NULL, 326 NULL,
328}; 327};
329 328
330static struct sysfs_ops sq_sysfs_ops = { 329static const struct sysfs_ops sq_sysfs_ops = {
331 .show = sq_sysfs_show, 330 .show = sq_sysfs_show,
332 .store = sq_sysfs_store, 331 .store = sq_sysfs_store,
333}; 332};
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index ebdd391d5f42..cc122b1d3035 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -3,31 +3,33 @@
3# 3#
4 4
5# CPU subtype setup 5# CPU subtype setup
6obj-$(CONFIG_CPU_SUBTYPE_SH7757) += setup-sh7757.o
6obj-$(CONFIG_CPU_SUBTYPE_SH7763) += setup-sh7763.o 7obj-$(CONFIG_CPU_SUBTYPE_SH7763) += setup-sh7763.o
7obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o 8obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o
8obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o 9obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
9obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o 10obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o
10obj-$(CONFIG_CPU_SUBTYPE_SH7786) += setup-sh7786.o 11obj-$(CONFIG_CPU_SUBTYPE_SH7786) += setup-sh7786.o intc-shx3.o
11obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o 12obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o
12obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o 13obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o
13obj-$(CONFIG_CPU_SUBTYPE_SH7723) += setup-sh7723.o 14obj-$(CONFIG_CPU_SUBTYPE_SH7723) += setup-sh7723.o
14obj-$(CONFIG_CPU_SUBTYPE_SH7724) += setup-sh7724.o 15obj-$(CONFIG_CPU_SUBTYPE_SH7724) += setup-sh7724.o
15obj-$(CONFIG_CPU_SUBTYPE_SH7366) += setup-sh7366.o 16obj-$(CONFIG_CPU_SUBTYPE_SH7366) += setup-sh7366.o
16obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o 17obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o intc-shx3.o
17 18
18# SMP setup 19# SMP setup
19smp-$(CONFIG_CPU_SHX3) := smp-shx3.o 20smp-$(CONFIG_CPU_SHX3) := smp-shx3.o
20 21
21# Primary on-chip clocks (common) 22# Primary on-chip clocks (common)
23clock-$(CONFIG_CPU_SUBTYPE_SH7757) := clock-sh7757.o
22clock-$(CONFIG_CPU_SUBTYPE_SH7763) := clock-sh7763.o 24clock-$(CONFIG_CPU_SUBTYPE_SH7763) := clock-sh7763.o
23clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o 25clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o
24clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o 26clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o
25clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o 27clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o
26clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o 28clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o
27clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o 29clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o
28clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o 30clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o hwblk-sh7722.o
29clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o 31clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o hwblk-sh7723.o
30clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o 32clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o hwblk-sh7724.o
31clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o 33clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o
32clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o 34clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o
33 35
@@ -35,9 +37,13 @@ clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o
35pinmux-$(CONFIG_CPU_SUBTYPE_SH7722) := pinmux-sh7722.o 37pinmux-$(CONFIG_CPU_SUBTYPE_SH7722) := pinmux-sh7722.o
36pinmux-$(CONFIG_CPU_SUBTYPE_SH7723) := pinmux-sh7723.o 38pinmux-$(CONFIG_CPU_SUBTYPE_SH7723) := pinmux-sh7723.o
37pinmux-$(CONFIG_CPU_SUBTYPE_SH7724) := pinmux-sh7724.o 39pinmux-$(CONFIG_CPU_SUBTYPE_SH7724) := pinmux-sh7724.o
40pinmux-$(CONFIG_CPU_SUBTYPE_SH7757) := pinmux-sh7757.o
38pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o 41pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o
39pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o 42pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o
43pinmux-$(CONFIG_CPU_SUBTYPE_SHX3) := pinmux-shx3.o
40 44
41obj-y += $(clock-y) 45obj-y += $(clock-y)
42obj-$(CONFIG_SMP) += $(smp-y) 46obj-$(CONFIG_SMP) += $(smp-y)
43obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) 47obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
48obj-$(CONFIG_PERF_EVENTS) += perf_event.o
49obj-$(CONFIG_HAVE_HW_BREAKPOINT) += ubc.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
index 0ee3ee861252..93c646072c1b 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/clkdev.h>
24#include <asm/clock.h> 25#include <asm/clock.h>
25 26
26/* SH7343 registers */ 27/* SH7343 registers */
@@ -36,8 +37,6 @@
36 37
37/* Fixed 32 KHz root clock for RTC and Power Management purposes */ 38/* Fixed 32 KHz root clock for RTC and Power Management purposes */
38static struct clk r_clk = { 39static struct clk r_clk = {
39 .name = "rclk",
40 .id = -1,
41 .rate = 32768, 40 .rate = 32768,
42}; 41};
43 42
@@ -46,8 +45,6 @@ static struct clk r_clk = {
46 * from the platform code. 45 * from the platform code.
47 */ 46 */
48struct clk extal_clk = { 47struct clk extal_clk = {
49 .name = "extal",
50 .id = -1,
51 .rate = 33333333, 48 .rate = 33333333,
52}; 49};
53 50
@@ -69,8 +66,6 @@ static struct clk_ops dll_clk_ops = {
69}; 66};
70 67
71static struct clk dll_clk = { 68static struct clk dll_clk = {
72 .name = "dll_clk",
73 .id = -1,
74 .ops = &dll_clk_ops, 69 .ops = &dll_clk_ops,
75 .parent = &r_clk, 70 .parent = &r_clk,
76 .flags = CLK_ENABLE_ON_INIT, 71 .flags = CLK_ENABLE_ON_INIT,
@@ -91,8 +86,6 @@ static struct clk_ops pll_clk_ops = {
91}; 86};
92 87
93static struct clk pll_clk = { 88static struct clk pll_clk = {
94 .name = "pll_clk",
95 .id = -1,
96 .ops = &pll_clk_ops, 89 .ops = &pll_clk_ops,
97 .flags = CLK_ENABLE_ON_INIT, 90 .flags = CLK_ENABLE_ON_INIT,
98}; 91};
@@ -107,82 +100,182 @@ struct clk *main_clks[] = {
107static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; 100static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
108static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; 101static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
109 102
110static struct clk_div_mult_table div4_table = { 103static struct clk_div_mult_table div4_div_mult_table = {
111 .divisors = divisors, 104 .divisors = divisors,
112 .nr_divisors = ARRAY_SIZE(divisors), 105 .nr_divisors = ARRAY_SIZE(divisors),
113 .multipliers = multipliers, 106 .multipliers = multipliers,
114 .nr_multipliers = ARRAY_SIZE(multipliers), 107 .nr_multipliers = ARRAY_SIZE(multipliers),
115}; 108};
116 109
110static struct clk_div4_table div4_table = {
111 .div_mult_table = &div4_div_mult_table,
112};
113
117enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, 114enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
118 DIV4_SIUA, DIV4_SIUB, DIV4_NR }; 115 DIV4_SIUA, DIV4_SIUB, DIV4_NR };
119 116
120#define DIV4(_str, _reg, _bit, _mask, _flags) \ 117#define DIV4(_reg, _bit, _mask, _flags) \
121 SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags) 118 SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
122 119
123struct clk div4_clks[DIV4_NR] = { 120struct clk div4_clks[DIV4_NR] = {
124 [DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x1fff, CLK_ENABLE_ON_INIT), 121 [DIV4_I] = DIV4(FRQCR, 20, 0x1fff, CLK_ENABLE_ON_INIT),
125 [DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), 122 [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
126 [DIV4_SH] = DIV4("shyway_clk", FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT), 123 [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
127 [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), 124 [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
128 [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), 125 [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
129 [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x1fff, 0), 126 [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
130 [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x1fff, 0), 127 [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
131 [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0), 128 [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
132}; 129};
133 130
134struct clk div6_clks[] = { 131enum { DIV6_V, DIV6_NR };
135 SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0), 132
133struct clk div6_clks[DIV6_NR] = {
134 [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
135};
136
137#define MSTP(_parent, _reg, _bit, _flags) \
138 SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
139
140enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026,
141 MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016,
142 MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010,
143 MSTP007, MSTP006, MSTP005, MSTP004, MSTP003, MSTP002, MSTP001,
144 MSTP109, MSTP108, MSTP100,
145 MSTP225, MSTP224, MSTP218, MSTP217, MSTP216,
146 MSTP214, MSTP213, MSTP212, MSTP211, MSTP208,
147 MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
148 MSTP_NR };
149
150static struct clk mstp_clks[MSTP_NR] = {
151 [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
152 [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
153 [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
154 [MSTP028] = MSTP(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
155 [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
156 [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
157 [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
158 [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
159 [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
160 [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
161 [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
162 [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
163 [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0),
164 [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0),
165 [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
166 [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
167 [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
168 [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
169 [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
170 [MSTP004] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
171 [MSTP003] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
172 [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
173 [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
174
175 [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
176 [MSTP108] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 8, 0),
177
178 [MSTP225] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 25, 0),
179 [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
180 [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
181 [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0),
182 [MSTP216] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 16, 0),
183 [MSTP214] = MSTP(&r_clk, MSTPCR2, 14, 0),
184 [MSTP213] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 13, 0),
185 [MSTP212] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 12, 0),
186 [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
187 [MSTP208] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 8, 0),
188 [MSTP206] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT),
189 [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
190 [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
191 [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
192 [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
193 [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
194 [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
136}; 195};
137 196
138#define MSTP(_str, _parent, _reg, _bit, _flags) \ 197#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
139 SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _flags) 198
140 199static struct clk_lookup lookups[] = {
141static struct clk mstp_clks[] = { 200 /* main clocks */
142 MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT), 201 CLKDEV_CON_ID("rclk", &r_clk),
143 MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT), 202 CLKDEV_CON_ID("extal", &extal_clk),
144 MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT), 203 CLKDEV_CON_ID("dll_clk", &dll_clk),
145 MSTP("uram0", &div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT), 204 CLKDEV_CON_ID("pll_clk", &pll_clk),
146 MSTP("xymem0", &div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), 205
147 MSTP("intc3", &div4_clks[DIV4_P], MSTPCR0, 23, 0), 206 /* DIV4 clocks */
148 MSTP("intc0", &div4_clks[DIV4_P], MSTPCR0, 22, 0), 207 CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
149 MSTP("dmac0", &div4_clks[DIV4_P], MSTPCR0, 21, 0), 208 CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
150 MSTP("sh0", &div4_clks[DIV4_P], MSTPCR0, 20, 0), 209 CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
151 MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0), 210 CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
152 MSTP("ubc0", &div4_clks[DIV4_P], MSTPCR0, 17, 0), 211 CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
153 MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0), 212 CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
154 MSTP("cmt0", &r_clk, MSTPCR0, 14, 0), 213 CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]),
155 MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0), 214 CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]),
156 MSTP("mfi0", &div4_clks[DIV4_P], MSTPCR0, 11, 0), 215
157 MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0), 216 /* DIV6 clocks */
158 MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0), 217 CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
159 MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0), 218
160 MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0), 219 /* MSTP32 clocks */
161 MSTP("scif3", &div4_clks[DIV4_P], MSTPCR0, 4, 0), 220 CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]),
162 MSTP("sio0", &div4_clks[DIV4_P], MSTPCR0, 3, 0), 221 CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]),
163 MSTP("siof0", &div4_clks[DIV4_P], MSTPCR0, 2, 0), 222 CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]),
164 MSTP("siof1", &div4_clks[DIV4_P], MSTPCR0, 1, 0), 223 CLKDEV_CON_ID("uram0", &mstp_clks[MSTP028]),
165 224 CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]),
166 MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0), 225 CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]),
167 MSTP("i2c1", &div4_clks[DIV4_P], MSTPCR1, 8, 0), 226 CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]),
168 227 CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]),
169 MSTP("tpu0", &div4_clks[DIV4_P], MSTPCR2, 25, 0), 228 CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]),
170 MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0), 229 CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]),
171 MSTP("sdhi0", &div4_clks[DIV4_P], MSTPCR2, 18, 0), 230 CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]),
172 MSTP("mmcif0", &div4_clks[DIV4_P], MSTPCR2, 17, 0), 231 CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]),
173 MSTP("sim0", &div4_clks[DIV4_P], MSTPCR2, 16, 0), 232 CLKDEV_CON_ID("cmt_fck", &mstp_clks[MSTP014]),
174 MSTP("keysc0", &r_clk, MSTPCR2, 14, 0), 233 CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
175 MSTP("tsif0", &div4_clks[DIV4_P], MSTPCR2, 13, 0), 234 CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
176 MSTP("s3d40", &div4_clks[DIV4_P], MSTPCR2, 12, 0), 235 CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
177 MSTP("usbf0", &div4_clks[DIV4_P], MSTPCR2, 11, 0), 236 {
178 MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0), 237 /* SCIF0 */
179 MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT), 238 .dev_id = "sh-sci.0",
180 MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0), 239 .con_id = "sci_fck",
181 MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0), 240 .clk = &mstp_clks[MSTP007],
182 MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0), 241 }, {
183 MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT), 242 /* SCIF1 */
184 MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT), 243 .dev_id = "sh-sci.1",
185 MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0), 244 .con_id = "sci_fck",
245 .clk = &mstp_clks[MSTP006],
246 }, {
247 /* SCIF2 */
248 .dev_id = "sh-sci.2",
249 .con_id = "sci_fck",
250 .clk = &mstp_clks[MSTP005],
251 }, {
252 /* SCIF3 */
253 .dev_id = "sh-sci.3",
254 .con_id = "sci_fck",
255 .clk = &mstp_clks[MSTP004],
256 },
257 CLKDEV_CON_ID("sio0", &mstp_clks[MSTP003]),
258 CLKDEV_CON_ID("siof0", &mstp_clks[MSTP002]),
259 CLKDEV_CON_ID("siof1", &mstp_clks[MSTP001]),
260 CLKDEV_CON_ID("i2c0", &mstp_clks[MSTP109]),
261 CLKDEV_CON_ID("i2c1", &mstp_clks[MSTP108]),
262 CLKDEV_CON_ID("tpu0", &mstp_clks[MSTP225]),
263 CLKDEV_CON_ID("irda0", &mstp_clks[MSTP224]),
264 CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]),
265 CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]),
266 CLKDEV_CON_ID("sim0", &mstp_clks[MSTP216]),
267 CLKDEV_CON_ID("keysc0", &mstp_clks[MSTP214]),
268 CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP213]),
269 CLKDEV_CON_ID("s3d40", &mstp_clks[MSTP212]),
270 CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]),
271 CLKDEV_CON_ID("siu0", &mstp_clks[MSTP208]),
272 CLKDEV_CON_ID("jpu0", &mstp_clks[MSTP206]),
273 CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]),
274 CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]),
275 CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]),
276 CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]),
277 CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]),
278 CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]),
186}; 279};
187 280
188int __init arch_clk_init(void) 281int __init arch_clk_init(void)
@@ -198,14 +291,16 @@ int __init arch_clk_init(void)
198 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) 291 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
199 ret = clk_register(main_clks[k]); 292 ret = clk_register(main_clks[k]);
200 293
294 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
295
201 if (!ret) 296 if (!ret)
202 ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); 297 ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
203 298
204 if (!ret) 299 if (!ret)
205 ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); 300 ret = sh_clk_div6_register(div6_clks, DIV6_NR);
206 301
207 if (!ret) 302 if (!ret)
208 ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); 303 ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
209 304
210 return ret; 305 return ret;
211} 306}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
index a95ebaba095c..049dc0628ccc 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/clkdev.h>
24#include <asm/clock.h> 25#include <asm/clock.h>
25 26
26/* SH7366 registers */ 27/* SH7366 registers */
@@ -36,8 +37,6 @@
36 37
37/* Fixed 32 KHz root clock for RTC and Power Management purposes */ 38/* Fixed 32 KHz root clock for RTC and Power Management purposes */
38static struct clk r_clk = { 39static struct clk r_clk = {
39 .name = "rclk",
40 .id = -1,
41 .rate = 32768, 40 .rate = 32768,
42}; 41};
43 42
@@ -46,8 +45,6 @@ static struct clk r_clk = {
46 * from the platform code. 45 * from the platform code.
47 */ 46 */
48struct clk extal_clk = { 47struct clk extal_clk = {
49 .name = "extal",
50 .id = -1,
51 .rate = 33333333, 48 .rate = 33333333,
52}; 49};
53 50
@@ -69,8 +66,6 @@ static struct clk_ops dll_clk_ops = {
69}; 66};
70 67
71static struct clk dll_clk = { 68static struct clk dll_clk = {
72 .name = "dll_clk",
73 .id = -1,
74 .ops = &dll_clk_ops, 69 .ops = &dll_clk_ops,
75 .parent = &r_clk, 70 .parent = &r_clk,
76 .flags = CLK_ENABLE_ON_INIT, 71 .flags = CLK_ENABLE_ON_INIT,
@@ -94,8 +89,6 @@ static struct clk_ops pll_clk_ops = {
94}; 89};
95 90
96static struct clk pll_clk = { 91static struct clk pll_clk = {
97 .name = "pll_clk",
98 .id = -1,
99 .ops = &pll_clk_ops, 92 .ops = &pll_clk_ops,
100 .flags = CLK_ENABLE_ON_INIT, 93 .flags = CLK_ENABLE_ON_INIT,
101}; 94};
@@ -110,79 +103,168 @@ struct clk *main_clks[] = {
110static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; 103static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
111static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; 104static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
112 105
113static struct clk_div_mult_table div4_table = { 106static struct clk_div_mult_table div4_div_mult_table = {
114 .divisors = divisors, 107 .divisors = divisors,
115 .nr_divisors = ARRAY_SIZE(divisors), 108 .nr_divisors = ARRAY_SIZE(divisors),
116 .multipliers = multipliers, 109 .multipliers = multipliers,
117 .nr_multipliers = ARRAY_SIZE(multipliers), 110 .nr_multipliers = ARRAY_SIZE(multipliers),
118}; 111};
119 112
113static struct clk_div4_table div4_table = {
114 .div_mult_table = &div4_div_mult_table,
115};
116
120enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, 117enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
121 DIV4_SIUA, DIV4_SIUB, DIV4_NR }; 118 DIV4_SIUA, DIV4_SIUB, DIV4_NR };
122 119
123#define DIV4(_str, _reg, _bit, _mask, _flags) \ 120#define DIV4(_reg, _bit, _mask, _flags) \
124 SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags) 121 SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
125 122
126struct clk div4_clks[DIV4_NR] = { 123struct clk div4_clks[DIV4_NR] = {
127 [DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT), 124 [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
128 [DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), 125 [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
129 [DIV4_SH] = DIV4("shyway_clk", FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT), 126 [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
130 [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), 127 [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
131 [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), 128 [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
132 [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x1fff, 0), 129 [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
133 [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x1fff, 0), 130 [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
134 [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0), 131 [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
135}; 132};
136 133
137struct clk div6_clks[] = { 134enum { DIV6_V, DIV6_NR };
138 SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0), 135
136struct clk div6_clks[DIV6_NR] = {
137 [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
139}; 138};
140 139
141#define MSTP(_str, _parent, _reg, _bit, _flags) \ 140#define MSTP(_parent, _reg, _bit, _flags) \
142 SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _flags) 141 SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
142
143enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026,
144 MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016,
145 MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010,
146 MSTP007, MSTP006, MSTP005, MSTP002, MSTP001,
147 MSTP109, MSTP100,
148 MSTP227, MSTP226, MSTP224, MSTP223, MSTP222, MSTP218, MSTP217,
149 MSTP211, MSTP207, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
150 MSTP_NR };
143 151
144static struct clk mstp_clks[] = { 152static struct clk mstp_clks[MSTP_NR] = {
145 /* See page 52 of Datasheet V0.40: Overview -> Block Diagram */ 153 /* See page 52 of Datasheet V0.40: Overview -> Block Diagram */
146 MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT), 154 [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
147 MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT), 155 [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
148 MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT), 156 [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
149 MSTP("rsmem0", &div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT), 157 [MSTP028] = MSTP(&div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
150 MSTP("xymem0", &div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), 158 [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
151 MSTP("intc3", &div4_clks[DIV4_P], MSTPCR0, 23, 0), 159 [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
152 MSTP("intc0", &div4_clks[DIV4_P], MSTPCR0, 22, 0), 160 [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
153 MSTP("dmac0", &div4_clks[DIV4_P], MSTPCR0, 21, 0), 161 [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
154 MSTP("sh0", &div4_clks[DIV4_P], MSTPCR0, 20, 0), 162 [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
155 MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0), 163 [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
156 MSTP("ubc0", &div4_clks[DIV4_P], MSTPCR0, 17, 0), 164 [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
157 MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0), 165 [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
158 MSTP("cmt0", &r_clk, MSTPCR0, 14, 0), 166 [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0),
159 MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0), 167 [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0),
160 MSTP("mfi0", &div4_clks[DIV4_P], MSTPCR0, 11, 0), 168 [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
161 MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0), 169 [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
162 MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0), 170 [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
163 MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0), 171 [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
164 MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0), 172 [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
165 MSTP("msiof0", &div4_clks[DIV4_P], MSTPCR0, 2, 0), 173 [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
166 MSTP("sbr0", &div4_clks[DIV4_P], MSTPCR0, 1, 0), 174 [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
167 175
168 MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0), 176 [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
169 177
170 MSTP("icb0", &div4_clks[DIV4_P], MSTPCR2, 27, 0), 178 [MSTP227] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 27, 0),
171 MSTP("meram0", &div4_clks[DIV4_P], MSTPCR2, 26, 0), 179 [MSTP226] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 26, 0),
172 MSTP("dacy1", &div4_clks[DIV4_P], MSTPCR2, 24, 0), 180 [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
173 MSTP("dacy0", &div4_clks[DIV4_P], MSTPCR2, 23, 0), 181 [MSTP223] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 23, 0),
174 MSTP("tsif0", &div4_clks[DIV4_P], MSTPCR2, 22, 0), 182 [MSTP222] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 22, 0),
175 MSTP("sdhi0", &div4_clks[DIV4_P], MSTPCR2, 18, 0), 183 [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
176 MSTP("mmcif0", &div4_clks[DIV4_P], MSTPCR2, 17, 0), 184 [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0),
177 MSTP("usbf0", &div4_clks[DIV4_P], MSTPCR2, 11, 0), 185 [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
178 MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 9, 0), 186 [MSTP207] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 7, CLK_ENABLE_ON_INIT),
179 MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 7, CLK_ENABLE_ON_INIT), 187 [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
180 MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0), 188 [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
181 MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0), 189 [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
182 MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0), 190 [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
183 MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT), 191 [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
184 MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT), 192 [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
185 MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0), 193};
194
195#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
196
197static struct clk_lookup lookups[] = {
198 /* main clocks */
199 CLKDEV_CON_ID("rclk", &r_clk),
200 CLKDEV_CON_ID("extal", &extal_clk),
201 CLKDEV_CON_ID("dll_clk", &dll_clk),
202 CLKDEV_CON_ID("pll_clk", &pll_clk),
203
204 /* DIV4 clocks */
205 CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
206 CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
207 CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
208 CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
209 CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
210 CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
211 CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]),
212 CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]),
213
214 /* DIV6 clocks */
215 CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
216
217 /* MSTP32 clocks */
218 CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]),
219 CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]),
220 CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]),
221 CLKDEV_CON_ID("rsmem0", &mstp_clks[MSTP028]),
222 CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]),
223 CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]),
224 CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]),
225 CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]),
226 CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]),
227 CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]),
228 CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]),
229 CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]),
230 CLKDEV_CON_ID("cmt_fck", &mstp_clks[MSTP014]),
231 CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
232 CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
233 CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
234 {
235 /* SCIF0 */
236 .dev_id = "sh-sci.0",
237 .con_id = "sci_fck",
238 .clk = &mstp_clks[MSTP007],
239 }, {
240 /* SCIF1 */
241 .dev_id = "sh-sci.1",
242 .con_id = "sci_fck",
243 .clk = &mstp_clks[MSTP006],
244 }, {
245 /* SCIF2 */
246 .dev_id = "sh-sci.2",
247 .con_id = "sci_fck",
248 .clk = &mstp_clks[MSTP005],
249 },
250 CLKDEV_CON_ID("msiof0", &mstp_clks[MSTP002]),
251 CLKDEV_CON_ID("sbr0", &mstp_clks[MSTP001]),
252 CLKDEV_CON_ID("i2c0", &mstp_clks[MSTP109]),
253 CLKDEV_CON_ID("icb0", &mstp_clks[MSTP227]),
254 CLKDEV_CON_ID("meram0", &mstp_clks[MSTP226]),
255 CLKDEV_CON_ID("dacy1", &mstp_clks[MSTP224]),
256 CLKDEV_CON_ID("dacy0", &mstp_clks[MSTP223]),
257 CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP222]),
258 CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]),
259 CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]),
260 CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]),
261 CLKDEV_CON_ID("veu1", &mstp_clks[MSTP207]),
262 CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]),
263 CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]),
264 CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]),
265 CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]),
266 CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]),
267 CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]),
186}; 268};
187 269
188int __init arch_clk_init(void) 270int __init arch_clk_init(void)
@@ -198,14 +280,16 @@ int __init arch_clk_init(void)
198 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) 280 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
199 ret = clk_register(main_clks[k]); 281 ret = clk_register(main_clks[k]);
200 282
283 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
284
201 if (!ret) 285 if (!ret)
202 ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); 286 ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
203 287
204 if (!ret) 288 if (!ret)
205 ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); 289 ret = sh_clk_div6_register(div6_clks, DIV6_NR);
206 290
207 if (!ret) 291 if (!ret)
208 ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); 292 ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
209 293
210 return ret; 294 return ret;
211} 295}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index 40f859354f79..9d23a36f0647 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -21,7 +21,10 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/clkdev.h>
24#include <asm/clock.h> 25#include <asm/clock.h>
26#include <asm/hwblk.h>
27#include <cpu/sh7722.h>
25 28
26/* SH7722 registers */ 29/* SH7722 registers */
27#define FRQCR 0xa4150000 30#define FRQCR 0xa4150000
@@ -30,15 +33,10 @@
30#define SCLKBCR 0xa415000c 33#define SCLKBCR 0xa415000c
31#define IRDACLKCR 0xa4150018 34#define IRDACLKCR 0xa4150018
32#define PLLCR 0xa4150024 35#define PLLCR 0xa4150024
33#define MSTPCR0 0xa4150030
34#define MSTPCR1 0xa4150034
35#define MSTPCR2 0xa4150038
36#define DLLFRQ 0xa4150050 36#define DLLFRQ 0xa4150050
37 37
38/* Fixed 32 KHz root clock for RTC and Power Management purposes */ 38/* Fixed 32 KHz root clock for RTC and Power Management purposes */
39static struct clk r_clk = { 39static struct clk r_clk = {
40 .name = "rclk",
41 .id = -1,
42 .rate = 32768, 40 .rate = 32768,
43}; 41};
44 42
@@ -47,8 +45,6 @@ static struct clk r_clk = {
47 * from the platform code. 45 * from the platform code.
48 */ 46 */
49struct clk extal_clk = { 47struct clk extal_clk = {
50 .name = "extal",
51 .id = -1,
52 .rate = 33333333, 48 .rate = 33333333,
53}; 49};
54 50
@@ -70,8 +66,6 @@ static struct clk_ops dll_clk_ops = {
70}; 66};
71 67
72static struct clk dll_clk = { 68static struct clk dll_clk = {
73 .name = "dll_clk",
74 .id = -1,
75 .ops = &dll_clk_ops, 69 .ops = &dll_clk_ops,
76 .parent = &r_clk, 70 .parent = &r_clk,
77 .flags = CLK_ENABLE_ON_INIT, 71 .flags = CLK_ENABLE_ON_INIT,
@@ -95,8 +89,6 @@ static struct clk_ops pll_clk_ops = {
95}; 89};
96 90
97static struct clk pll_clk = { 91static struct clk pll_clk = {
98 .name = "pll_clk",
99 .id = -1,
100 .ops = &pll_clk_ops, 92 .ops = &pll_clk_ops,
101 .flags = CLK_ENABLE_ON_INIT, 93 .flags = CLK_ENABLE_ON_INIT,
102}; 94};
@@ -111,64 +103,153 @@ struct clk *main_clks[] = {
111static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; 103static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
112static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; 104static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
113 105
114static struct clk_div_mult_table div4_table = { 106static struct clk_div_mult_table div4_div_mult_table = {
115 .divisors = divisors, 107 .divisors = divisors,
116 .nr_divisors = ARRAY_SIZE(divisors), 108 .nr_divisors = ARRAY_SIZE(divisors),
117 .multipliers = multipliers, 109 .multipliers = multipliers,
118 .nr_multipliers = ARRAY_SIZE(multipliers), 110 .nr_multipliers = ARRAY_SIZE(multipliers),
119}; 111};
120 112
121enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, 113static struct clk_div4_table div4_table = {
122 DIV4_SIUA, DIV4_SIUB, DIV4_IRDA, DIV4_NR }; 114 .div_mult_table = &div4_div_mult_table,
115};
116
117#define DIV4(_reg, _bit, _mask, _flags) \
118 SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
123 119
124#define DIV4(_str, _reg, _bit, _mask, _flags) \ 120enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
125 SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags)
126 121
127struct clk div4_clks[DIV4_NR] = { 122struct clk div4_clks[DIV4_NR] = {
128 [DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT), 123 [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
129 [DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), 124 [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
130 [DIV4_SH] = DIV4("shyway_clk", FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT), 125 [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
131 [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), 126 [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
132 [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), 127 [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
133 [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x1fff, 0), 128 [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
134 [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x1fff, 0), 129};
135 [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0), 130
136 [DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x1fff, 0), 131enum { DIV4_IRDA, DIV4_ENABLE_NR };
132
133struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
134 [DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x1fff, 0),
135};
136
137enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
138
139struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
140 [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
141 [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
142};
143
144enum { DIV6_V, DIV6_NR };
145
146struct clk div6_clks[DIV6_NR] = {
147 [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
137}; 148};
138 149
139struct clk div6_clks[] = { 150static struct clk mstp_clks[HWBLK_NR] = {
140 SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0), 151 SH_HWBLK_CLK(HWBLK_URAM, &div4_clks[DIV4_U], CLK_ENABLE_ON_INIT),
152 SH_HWBLK_CLK(HWBLK_XYMEM, &div4_clks[DIV4_B], CLK_ENABLE_ON_INIT),
153 SH_HWBLK_CLK(HWBLK_TMU, &div4_clks[DIV4_P], 0),
154 SH_HWBLK_CLK(HWBLK_CMT, &r_clk, 0),
155 SH_HWBLK_CLK(HWBLK_RWDT, &r_clk, 0),
156 SH_HWBLK_CLK(HWBLK_FLCTL, &div4_clks[DIV4_P], 0),
157 SH_HWBLK_CLK(HWBLK_SCIF0, &div4_clks[DIV4_P], 0),
158 SH_HWBLK_CLK(HWBLK_SCIF1, &div4_clks[DIV4_P], 0),
159 SH_HWBLK_CLK(HWBLK_SCIF2, &div4_clks[DIV4_P], 0),
160
161 SH_HWBLK_CLK(HWBLK_IIC, &div4_clks[DIV4_P], 0),
162 SH_HWBLK_CLK(HWBLK_RTC, &r_clk, 0),
163
164 SH_HWBLK_CLK(HWBLK_SDHI, &div4_clks[DIV4_P], 0),
165 SH_HWBLK_CLK(HWBLK_KEYSC, &r_clk, 0),
166 SH_HWBLK_CLK(HWBLK_USBF, &div4_clks[DIV4_P], 0),
167 SH_HWBLK_CLK(HWBLK_2DG, &div4_clks[DIV4_B], 0),
168 SH_HWBLK_CLK(HWBLK_SIU, &div4_clks[DIV4_B], 0),
169 SH_HWBLK_CLK(HWBLK_VOU, &div4_clks[DIV4_B], 0),
170 SH_HWBLK_CLK(HWBLK_JPU, &div4_clks[DIV4_B], 0),
171 SH_HWBLK_CLK(HWBLK_BEU, &div4_clks[DIV4_B], 0),
172 SH_HWBLK_CLK(HWBLK_CEU, &div4_clks[DIV4_B], 0),
173 SH_HWBLK_CLK(HWBLK_VEU, &div4_clks[DIV4_B], 0),
174 SH_HWBLK_CLK(HWBLK_VPU, &div4_clks[DIV4_B], 0),
175 SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_P], 0),
141}; 176};
142 177
143#define MSTP(_str, _parent, _reg, _bit, _flags) \ 178#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
144 SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _flags) 179
145 180static struct clk_lookup lookups[] = {
146static struct clk mstp_clks[] = { 181 /* main clocks */
147 MSTP("uram0", &div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT), 182 CLKDEV_CON_ID("rclk", &r_clk),
148 MSTP("xymem0", &div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), 183 CLKDEV_CON_ID("extal", &extal_clk),
149 MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0), 184 CLKDEV_CON_ID("dll_clk", &dll_clk),
150 MSTP("cmt0", &r_clk, MSTPCR0, 14, 0), 185 CLKDEV_CON_ID("pll_clk", &pll_clk),
151 MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0), 186
152 MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0), 187 /* DIV4 clocks */
153 MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0), 188 CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
154 MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0), 189 CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
155 MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0), 190 CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
156 191 CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
157 MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0), 192 CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
158 MSTP("rtc0", &r_clk, MSTPCR1, 8, 0), 193 CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
159 194 CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]),
160 MSTP("sdhi0", &div4_clks[DIV4_P], MSTPCR2, 18, 0), 195 CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]),
161 MSTP("keysc0", &r_clk, MSTPCR2, 14, 0), 196 CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]),
162 MSTP("usbf0", &div4_clks[DIV4_P], MSTPCR2, 11, 0), 197
163 MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 9, 0), 198 /* DIV6 clocks */
164 MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0), 199 CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
165 MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0), 200
166 MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT), 201 /* MSTP clocks */
167 MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0), 202 CLKDEV_CON_ID("uram0", &mstp_clks[HWBLK_URAM]),
168 MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0), 203 CLKDEV_CON_ID("xymem0", &mstp_clks[HWBLK_XYMEM]),
169 MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT), 204 {
170 MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT), 205 /* TMU0 */
171 MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0), 206 .dev_id = "sh_tmu.0",
207 .con_id = "tmu_fck",
208 .clk = &mstp_clks[HWBLK_TMU],
209 }, {
210 /* TMU1 */
211 .dev_id = "sh_tmu.1",
212 .con_id = "tmu_fck",
213 .clk = &mstp_clks[HWBLK_TMU],
214 }, {
215 /* TMU2 */
216 .dev_id = "sh_tmu.2",
217 .con_id = "tmu_fck",
218 .clk = &mstp_clks[HWBLK_TMU],
219 },
220 CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
221 CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]),
222 CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
223 {
224 /* SCIF0 */
225 .dev_id = "sh-sci.0",
226 .con_id = "sci_fck",
227 .clk = &mstp_clks[HWBLK_SCIF0],
228 }, {
229 /* SCIF1 */
230 .dev_id = "sh-sci.1",
231 .con_id = "sci_fck",
232 .clk = &mstp_clks[HWBLK_SCIF1],
233 }, {
234 /* SCIF2 */
235 .dev_id = "sh-sci.2",
236 .con_id = "sci_fck",
237 .clk = &mstp_clks[HWBLK_SCIF2],
238 },
239 CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC]),
240 CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
241 CLKDEV_CON_ID("sdhi0", &mstp_clks[HWBLK_SDHI]),
242 CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]),
243 CLKDEV_CON_ID("usbf0", &mstp_clks[HWBLK_USBF]),
244 CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
245 CLKDEV_CON_ID("siu0", &mstp_clks[HWBLK_SIU]),
246 CLKDEV_CON_ID("vou0", &mstp_clks[HWBLK_VOU]),
247 CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
248 CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
249 CLKDEV_CON_ID("ceu0", &mstp_clks[HWBLK_CEU]),
250 CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU]),
251 CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
252 CLKDEV_CON_ID("lcdc0", &mstp_clks[HWBLK_LCDC]),
172}; 253};
173 254
174int __init arch_clk_init(void) 255int __init arch_clk_init(void)
@@ -184,14 +265,24 @@ int __init arch_clk_init(void)
184 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) 265 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
185 ret = clk_register(main_clks[k]); 266 ret = clk_register(main_clks[k]);
186 267
268 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
269
187 if (!ret) 270 if (!ret)
188 ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); 271 ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
189 272
190 if (!ret) 273 if (!ret)
191 ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); 274 ret = sh_clk_div4_enable_register(div4_enable_clks,
275 DIV4_ENABLE_NR, &div4_table);
276
277 if (!ret)
278 ret = sh_clk_div4_reparent_register(div4_reparent_clks,
279 DIV4_REPARENT_NR, &div4_table);
280
281 if (!ret)
282 ret = sh_clk_div6_register(div6_clks, DIV6_NR);
192 283
193 if (!ret) 284 if (!ret)
194 ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); 285 ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR);
195 286
196 return ret; 287 return ret;
197} 288}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
index e67c2678b8ae..55493cd5bd8f 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -21,7 +21,11 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/clk.h>
25#include <linux/clkdev.h>
24#include <asm/clock.h> 26#include <asm/clock.h>
27#include <asm/hwblk.h>
28#include <cpu/sh7723.h>
25 29
26/* SH7723 registers */ 30/* SH7723 registers */
27#define FRQCR 0xa4150000 31#define FRQCR 0xa4150000
@@ -30,15 +34,10 @@
30#define SCLKBCR 0xa415000c 34#define SCLKBCR 0xa415000c
31#define IRDACLKCR 0xa4150018 35#define IRDACLKCR 0xa4150018
32#define PLLCR 0xa4150024 36#define PLLCR 0xa4150024
33#define MSTPCR0 0xa4150030
34#define MSTPCR1 0xa4150034
35#define MSTPCR2 0xa4150038
36#define DLLFRQ 0xa4150050 37#define DLLFRQ 0xa4150050
37 38
38/* Fixed 32 KHz root clock for RTC and Power Management purposes */ 39/* Fixed 32 KHz root clock for RTC and Power Management purposes */
39static struct clk r_clk = { 40static struct clk r_clk = {
40 .name = "rclk",
41 .id = -1,
42 .rate = 32768, 41 .rate = 32768,
43}; 42};
44 43
@@ -47,8 +46,6 @@ static struct clk r_clk = {
47 * from the platform code. 46 * from the platform code.
48 */ 47 */
49struct clk extal_clk = { 48struct clk extal_clk = {
50 .name = "extal",
51 .id = -1,
52 .rate = 33333333, 49 .rate = 33333333,
53}; 50};
54 51
@@ -70,8 +67,6 @@ static struct clk_ops dll_clk_ops = {
70}; 67};
71 68
72static struct clk dll_clk = { 69static struct clk dll_clk = {
73 .name = "dll_clk",
74 .id = -1,
75 .ops = &dll_clk_ops, 70 .ops = &dll_clk_ops,
76 .parent = &r_clk, 71 .parent = &r_clk,
77 .flags = CLK_ENABLE_ON_INIT, 72 .flags = CLK_ENABLE_ON_INIT,
@@ -95,8 +90,6 @@ static struct clk_ops pll_clk_ops = {
95}; 90};
96 91
97static struct clk pll_clk = { 92static struct clk pll_clk = {
98 .name = "pll_clk",
99 .id = -1,
100 .ops = &pll_clk_ops, 93 .ops = &pll_clk_ops,
101 .flags = CLK_ENABLE_ON_INIT, 94 .flags = CLK_ENABLE_ON_INIT,
102}; 95};
@@ -111,89 +104,228 @@ struct clk *main_clks[] = {
111static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; 104static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
112static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; 105static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
113 106
114static struct clk_div_mult_table div4_table = { 107static struct clk_div_mult_table div4_div_mult_table = {
115 .divisors = divisors, 108 .divisors = divisors,
116 .nr_divisors = ARRAY_SIZE(divisors), 109 .nr_divisors = ARRAY_SIZE(divisors),
117 .multipliers = multipliers, 110 .multipliers = multipliers,
118 .nr_multipliers = ARRAY_SIZE(multipliers), 111 .nr_multipliers = ARRAY_SIZE(multipliers),
119}; 112};
120 113
121enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, 114static struct clk_div4_table div4_table = {
122 DIV4_SIUA, DIV4_SIUB, DIV4_IRDA, DIV4_NR }; 115 .div_mult_table = &div4_div_mult_table,
116};
117
118enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
123 119
124#define DIV4(_str, _reg, _bit, _mask, _flags) \ 120#define DIV4(_reg, _bit, _mask, _flags) \
125 SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags) 121 SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
126 122
127struct clk div4_clks[DIV4_NR] = { 123struct clk div4_clks[DIV4_NR] = {
128 [DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x0dbf, CLK_ENABLE_ON_INIT), 124 [DIV4_I] = DIV4(FRQCR, 20, 0x0dbf, CLK_ENABLE_ON_INIT),
129 [DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x0dbf, CLK_ENABLE_ON_INIT), 125 [DIV4_U] = DIV4(FRQCR, 16, 0x0dbf, CLK_ENABLE_ON_INIT),
130 [DIV4_SH] = DIV4("shyway_clk", FRQCR, 12, 0x0dbf, CLK_ENABLE_ON_INIT), 126 [DIV4_SH] = DIV4(FRQCR, 12, 0x0dbf, CLK_ENABLE_ON_INIT),
131 [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT), 127 [DIV4_B] = DIV4(FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT),
132 [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT), 128 [DIV4_B3] = DIV4(FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT),
133 [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x0dbf, 0), 129 [DIV4_P] = DIV4(FRQCR, 0, 0x0dbf, 0),
134 [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x0dbf, 0), 130};
135 [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x0dbf, 0), 131
136 [DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x0dbf, 0), 132enum { DIV4_IRDA, DIV4_ENABLE_NR };
133
134struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
135 [DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x0dbf, 0),
137}; 136};
138 137
139struct clk div6_clks[] = { 138enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
140 SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0), 139
140struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
141 [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x0dbf, 0),
142 [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x0dbf, 0),
141}; 143};
144enum { DIV6_V, DIV6_NR };
142 145
143#define MSTP(_str, _parent, _reg, _bit, _force_on, _need_cpg, _need_ram) \ 146struct clk div6_clks[DIV6_NR] = {
144 SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _force_on * CLK_ENABLE_ON_INIT) 147 [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
148};
145 149
146static struct clk mstp_clks[] = { 150static struct clk mstp_clks[] = {
147 /* See page 60 of Datasheet V1.0: Overview -> Block Diagram */ 151 /* See page 60 of Datasheet V1.0: Overview -> Block Diagram */
148 MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, 1, 1, 0), 152 SH_HWBLK_CLK(HWBLK_TLB, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
149 MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, 1, 1, 0), 153 SH_HWBLK_CLK(HWBLK_IC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
150 MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, 1, 1, 0), 154 SH_HWBLK_CLK(HWBLK_OC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
151 MSTP("l2c0", &div4_clks[DIV4_SH], MSTPCR0, 28, 1, 1, 0), 155 SH_HWBLK_CLK(HWBLK_L2C, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT),
152 MSTP("ilmem0", &div4_clks[DIV4_I], MSTPCR0, 27, 1, 1, 0), 156 SH_HWBLK_CLK(HWBLK_ILMEM, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
153 MSTP("fpu0", &div4_clks[DIV4_I], MSTPCR0, 24, 1, 1, 0), 157 SH_HWBLK_CLK(HWBLK_FPU, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
154 MSTP("intc0", &div4_clks[DIV4_I], MSTPCR0, 22, 1, 1, 0), 158 SH_HWBLK_CLK(HWBLK_INTC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
155 MSTP("dmac0", &div4_clks[DIV4_B], MSTPCR0, 21, 0, 1, 1), 159 SH_HWBLK_CLK(HWBLK_DMAC0, &div4_clks[DIV4_B], 0),
156 MSTP("sh0", &div4_clks[DIV4_SH], MSTPCR0, 20, 0, 1, 0), 160 SH_HWBLK_CLK(HWBLK_SHYWAY, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT),
157 MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0, 1, 0), 161 SH_HWBLK_CLK(HWBLK_HUDI, &div4_clks[DIV4_P], 0),
158 MSTP("ubc0", &div4_clks[DIV4_I], MSTPCR0, 17, 0, 1, 0), 162 SH_HWBLK_CLK(HWBLK_UBC, &div4_clks[DIV4_I], 0),
159 MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0, 1, 0), 163 SH_HWBLK_CLK(HWBLK_TMU0, &div4_clks[DIV4_P], 0),
160 MSTP("cmt0", &r_clk, MSTPCR0, 14, 0, 0, 0), 164 SH_HWBLK_CLK(HWBLK_CMT, &r_clk, 0),
161 MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0, 0, 0), 165 SH_HWBLK_CLK(HWBLK_RWDT, &r_clk, 0),
162 MSTP("dmac1", &div4_clks[DIV4_B], MSTPCR0, 12, 0, 1, 1), 166 SH_HWBLK_CLK(HWBLK_DMAC1, &div4_clks[DIV4_B], 0),
163 MSTP("tmu1", &div4_clks[DIV4_P], MSTPCR0, 11, 0, 1, 0), 167 SH_HWBLK_CLK(HWBLK_TMU1, &div4_clks[DIV4_P], 0),
164 MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0, 1, 0), 168 SH_HWBLK_CLK(HWBLK_FLCTL, &div4_clks[DIV4_P], 0),
165 MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 9, 0, 1, 0), 169 SH_HWBLK_CLK(HWBLK_SCIF0, &div4_clks[DIV4_P], 0),
166 MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 8, 0, 1, 0), 170 SH_HWBLK_CLK(HWBLK_SCIF1, &div4_clks[DIV4_P], 0),
167 MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 7, 0, 1, 0), 171 SH_HWBLK_CLK(HWBLK_SCIF2, &div4_clks[DIV4_P], 0),
168 MSTP("scif3", &div4_clks[DIV4_B], MSTPCR0, 6, 0, 1, 0), 172 SH_HWBLK_CLK(HWBLK_SCIF3, &div4_clks[DIV4_B], 0),
169 MSTP("scif4", &div4_clks[DIV4_B], MSTPCR0, 5, 0, 1, 0), 173 SH_HWBLK_CLK(HWBLK_SCIF4, &div4_clks[DIV4_B], 0),
170 MSTP("scif5", &div4_clks[DIV4_B], MSTPCR0, 4, 0, 1, 0), 174 SH_HWBLK_CLK(HWBLK_SCIF5, &div4_clks[DIV4_B], 0),
171 MSTP("msiof0", &div4_clks[DIV4_B], MSTPCR0, 2, 0, 1, 0), 175 SH_HWBLK_CLK(HWBLK_MSIOF0, &div4_clks[DIV4_B], 0),
172 MSTP("msiof1", &div4_clks[DIV4_B], MSTPCR0, 1, 0, 1, 0), 176 SH_HWBLK_CLK(HWBLK_MSIOF1, &div4_clks[DIV4_B], 0),
173 MSTP("meram0", &div4_clks[DIV4_SH], MSTPCR0, 0, 1, 1, 0), 177 SH_HWBLK_CLK(HWBLK_MERAM, &div4_clks[DIV4_SH], 0),
174 178
175 MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0, 1, 0), 179 SH_HWBLK_CLK(HWBLK_IIC, &div4_clks[DIV4_P], 0),
176 MSTP("rtc0", &r_clk, MSTPCR1, 8, 0, 0, 0), 180 SH_HWBLK_CLK(HWBLK_RTC, &r_clk, 0),
177 181
178 MSTP("atapi0", &div4_clks[DIV4_SH], MSTPCR2, 28, 0, 1, 0), 182 SH_HWBLK_CLK(HWBLK_ATAPI, &div4_clks[DIV4_SH], 0),
179 MSTP("adc0", &div4_clks[DIV4_P], MSTPCR2, 27, 0, 1, 0), 183 SH_HWBLK_CLK(HWBLK_ADC, &div4_clks[DIV4_P], 0),
180 MSTP("tpu0", &div4_clks[DIV4_B], MSTPCR2, 25, 0, 1, 0), 184 SH_HWBLK_CLK(HWBLK_TPU, &div4_clks[DIV4_B], 0),
181 MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0, 1, 0), 185 SH_HWBLK_CLK(HWBLK_IRDA, &div4_clks[DIV4_P], 0),
182 MSTP("tsif0", &div4_clks[DIV4_B], MSTPCR2, 22, 0, 1, 0), 186 SH_HWBLK_CLK(HWBLK_TSIF, &div4_clks[DIV4_B], 0),
183 MSTP("icb0", &div4_clks[DIV4_B], MSTPCR2, 21, 0, 1, 1), 187 SH_HWBLK_CLK(HWBLK_ICB, &div4_clks[DIV4_B], CLK_ENABLE_ON_INIT),
184 MSTP("sdhi0", &div4_clks[DIV4_B], MSTPCR2, 18, 0, 1, 0), 188 SH_HWBLK_CLK(HWBLK_SDHI0, &div4_clks[DIV4_B], 0),
185 MSTP("sdhi1", &div4_clks[DIV4_B], MSTPCR2, 17, 0, 1, 0), 189 SH_HWBLK_CLK(HWBLK_SDHI1, &div4_clks[DIV4_B], 0),
186 MSTP("keysc0", &r_clk, MSTPCR2, 14, 0, 0, 0), 190 SH_HWBLK_CLK(HWBLK_KEYSC, &r_clk, 0),
187 MSTP("usb0", &div4_clks[DIV4_B], MSTPCR2, 11, 0, 1, 0), 191 SH_HWBLK_CLK(HWBLK_USB, &div4_clks[DIV4_B], 0),
188 MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 10, 0, 1, 1), 192 SH_HWBLK_CLK(HWBLK_2DG, &div4_clks[DIV4_B], 0),
189 MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0, 1, 0), 193 SH_HWBLK_CLK(HWBLK_SIU, &div4_clks[DIV4_B], 0),
190 MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 6, 1, 1, 1), 194 SH_HWBLK_CLK(HWBLK_VEU2H1, &div4_clks[DIV4_B], 0),
191 MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0, 1, 1), 195 SH_HWBLK_CLK(HWBLK_VOU, &div4_clks[DIV4_B], 0),
192 MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0, 1, 1), 196 SH_HWBLK_CLK(HWBLK_BEU, &div4_clks[DIV4_B], 0),
193 MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0, 1, 1), 197 SH_HWBLK_CLK(HWBLK_CEU, &div4_clks[DIV4_B], 0),
194 MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, 1, 1, 1), 198 SH_HWBLK_CLK(HWBLK_VEU2H0, &div4_clks[DIV4_B], 0),
195 MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, 1, 1, 1), 199 SH_HWBLK_CLK(HWBLK_VPU, &div4_clks[DIV4_B], 0),
196 MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0, 1, 1), 200 SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_B], 0),
201};
202
203#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
204
205static struct clk_lookup lookups[] = {
206 /* main clocks */
207 CLKDEV_CON_ID("rclk", &r_clk),
208 CLKDEV_CON_ID("extal", &extal_clk),
209 CLKDEV_CON_ID("dll_clk", &dll_clk),
210 CLKDEV_CON_ID("pll_clk", &pll_clk),
211
212 /* DIV4 clocks */
213 CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
214 CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
215 CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
216 CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
217 CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
218 CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
219 CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]),
220 CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]),
221 CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]),
222
223 /* DIV6 clocks */
224 CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
225
226 /* MSTP clocks */
227 CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]),
228 CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]),
229 CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]),
230 CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]),
231 CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]),
232 CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]),
233 CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]),
234 CLKDEV_CON_ID("dmac0", &mstp_clks[HWBLK_DMAC0]),
235 CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
236 CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
237 CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
238 {
239 /* TMU0 */
240 .dev_id = "sh_tmu.0",
241 .con_id = "tmu_fck",
242 .clk = &mstp_clks[HWBLK_TMU0],
243 }, {
244 /* TMU1 */
245 .dev_id = "sh_tmu.1",
246 .con_id = "tmu_fck",
247 .clk = &mstp_clks[HWBLK_TMU0],
248 }, {
249 /* TMU2 */
250 .dev_id = "sh_tmu.2",
251 .con_id = "tmu_fck",
252 .clk = &mstp_clks[HWBLK_TMU0],
253 },
254 CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
255 CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]),
256 CLKDEV_CON_ID("dmac1", &mstp_clks[HWBLK_DMAC1]),
257 {
258 /* TMU3 */
259 .dev_id = "sh_tmu.3",
260 .con_id = "tmu_fck",
261 .clk = &mstp_clks[HWBLK_TMU1],
262 }, {
263 /* TMU4 */
264 .dev_id = "sh_tmu.4",
265 .con_id = "tmu_fck",
266 .clk = &mstp_clks[HWBLK_TMU1],
267 }, {
268 /* TMU5 */
269 .dev_id = "sh_tmu.5",
270 .con_id = "tmu_fck",
271 .clk = &mstp_clks[HWBLK_TMU1],
272 },
273 CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
274 {
275 /* SCIF0 */
276 .dev_id = "sh-sci.0",
277 .con_id = "sci_fck",
278 .clk = &mstp_clks[HWBLK_SCIF0],
279 }, {
280 /* SCIF1 */
281 .dev_id = "sh-sci.1",
282 .con_id = "sci_fck",
283 .clk = &mstp_clks[HWBLK_SCIF1],
284 }, {
285 /* SCIF2 */
286 .dev_id = "sh-sci.2",
287 .con_id = "sci_fck",
288 .clk = &mstp_clks[HWBLK_SCIF2],
289 }, {
290 /* SCIF3 */
291 .dev_id = "sh-sci.3",
292 .con_id = "sci_fck",
293 .clk = &mstp_clks[HWBLK_SCIF3],
294 }, {
295 /* SCIF4 */
296 .dev_id = "sh-sci.4",
297 .con_id = "sci_fck",
298 .clk = &mstp_clks[HWBLK_SCIF4],
299 }, {
300 /* SCIF5 */
301 .dev_id = "sh-sci.5",
302 .con_id = "sci_fck",
303 .clk = &mstp_clks[HWBLK_SCIF5],
304 },
305 CLKDEV_CON_ID("msiof0", &mstp_clks[HWBLK_MSIOF0]),
306 CLKDEV_CON_ID("msiof1", &mstp_clks[HWBLK_MSIOF1]),
307 CLKDEV_CON_ID("meram0", &mstp_clks[HWBLK_MERAM]),
308 CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC]),
309 CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
310 CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
311 CLKDEV_CON_ID("adc0", &mstp_clks[HWBLK_ADC]),
312 CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
313 CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
314 CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
315 CLKDEV_CON_ID("icb0", &mstp_clks[HWBLK_ICB]),
316 CLKDEV_CON_ID("sdhi0", &mstp_clks[HWBLK_SDHI0]),
317 CLKDEV_CON_ID("sdhi1", &mstp_clks[HWBLK_SDHI1]),
318 CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]),
319 CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB]),
320 CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
321 CLKDEV_CON_ID("siu0", &mstp_clks[HWBLK_SIU]),
322 CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU2H1]),
323 CLKDEV_CON_ID("vou0", &mstp_clks[HWBLK_VOU]),
324 CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
325 CLKDEV_CON_ID("ceu0", &mstp_clks[HWBLK_CEU]),
326 CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU2H0]),
327 CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
328 CLKDEV_CON_ID("lcdc0", &mstp_clks[HWBLK_LCDC]),
197}; 329};
198 330
199int __init arch_clk_init(void) 331int __init arch_clk_init(void)
@@ -207,16 +339,26 @@ int __init arch_clk_init(void)
207 pll_clk.parent = &extal_clk; 339 pll_clk.parent = &extal_clk;
208 340
209 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) 341 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
210 ret = clk_register(main_clks[k]); 342 ret |= clk_register(main_clks[k]);
343
344 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
211 345
212 if (!ret) 346 if (!ret)
213 ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); 347 ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
214 348
215 if (!ret) 349 if (!ret)
216 ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); 350 ret = sh_clk_div4_enable_register(div4_enable_clks,
351 DIV4_ENABLE_NR, &div4_table);
352
353 if (!ret)
354 ret = sh_clk_div4_reparent_register(div4_reparent_clks,
355 DIV4_REPARENT_NR, &div4_table);
356
357 if (!ret)
358 ret = sh_clk_div6_register(div6_clks, DIV6_NR);
217 359
218 if (!ret) 360 if (!ret)
219 ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); 361 ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR);
220 362
221 return ret; 363 return ret;
222} 364}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 5d5c9b952883..d08fa953c88b 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -21,7 +21,11 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/clk.h>
25#include <linux/clkdev.h>
24#include <asm/clock.h> 26#include <asm/clock.h>
27#include <asm/hwblk.h>
28#include <cpu/sh7724.h>
25 29
26/* SH7724 registers */ 30/* SH7724 registers */
27#define FRQCRA 0xa4150000 31#define FRQCRA 0xa4150000
@@ -31,17 +35,12 @@
31#define FCLKBCR 0xa415000c 35#define FCLKBCR 0xa415000c
32#define IRDACLKCR 0xa4150018 36#define IRDACLKCR 0xa4150018
33#define PLLCR 0xa4150024 37#define PLLCR 0xa4150024
34#define MSTPCR0 0xa4150030
35#define MSTPCR1 0xa4150034
36#define MSTPCR2 0xa4150038
37#define SPUCLKCR 0xa415003c 38#define SPUCLKCR 0xa415003c
38#define FLLFRQ 0xa4150050 39#define FLLFRQ 0xa4150050
39#define LSTATS 0xa4150060 40#define LSTATS 0xa4150060
40 41
41/* Fixed 32 KHz root clock for RTC and Power Management purposes */ 42/* Fixed 32 KHz root clock for RTC and Power Management purposes */
42static struct clk r_clk = { 43static struct clk r_clk = {
43 .name = "rclk",
44 .id = -1,
45 .rate = 32768, 44 .rate = 32768,
46}; 45};
47 46
@@ -49,9 +48,7 @@ static struct clk r_clk = {
49 * Default rate for the root input clock, reset this with clk_set_rate() 48 * Default rate for the root input clock, reset this with clk_set_rate()
50 * from the platform code. 49 * from the platform code.
51 */ 50 */
52struct clk extal_clk = { 51static struct clk extal_clk = {
53 .name = "extal",
54 .id = -1,
55 .rate = 33333333, 52 .rate = 33333333,
56}; 53};
57 54
@@ -75,8 +72,6 @@ static struct clk_ops fll_clk_ops = {
75}; 72};
76 73
77static struct clk fll_clk = { 74static struct clk fll_clk = {
78 .name = "fll_clk",
79 .id = -1,
80 .ops = &fll_clk_ops, 75 .ops = &fll_clk_ops,
81 .parent = &r_clk, 76 .parent = &r_clk,
82 .flags = CLK_ENABLE_ON_INIT, 77 .flags = CLK_ENABLE_ON_INIT,
@@ -97,8 +92,6 @@ static struct clk_ops pll_clk_ops = {
97}; 92};
98 93
99static struct clk pll_clk = { 94static struct clk pll_clk = {
100 .name = "pll_clk",
101 .id = -1,
102 .ops = &pll_clk_ops, 95 .ops = &pll_clk_ops,
103 .flags = CLK_ENABLE_ON_INIT, 96 .flags = CLK_ENABLE_ON_INIT,
104}; 97};
@@ -114,106 +107,282 @@ static struct clk_ops div3_clk_ops = {
114}; 107};
115 108
116static struct clk div3_clk = { 109static struct clk div3_clk = {
117 .name = "div3_clk",
118 .id = -1,
119 .ops = &div3_clk_ops, 110 .ops = &div3_clk_ops,
120 .parent = &pll_clk, 111 .parent = &pll_clk,
121}; 112};
122 113
123struct clk *main_clks[] = { 114/* External input clock (pin name: FSIMCKA/FSIMCKB ) */
115struct clk sh7724_fsimcka_clk = {
116};
117
118struct clk sh7724_fsimckb_clk = {
119};
120
121static struct clk *main_clks[] = {
124 &r_clk, 122 &r_clk,
125 &extal_clk, 123 &extal_clk,
126 &fll_clk, 124 &fll_clk,
127 &pll_clk, 125 &pll_clk,
128 &div3_clk, 126 &div3_clk,
127 &sh7724_fsimcka_clk,
128 &sh7724_fsimckb_clk,
129}; 129};
130 130
131static int divisors[] = { 2, 0, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 }; 131static void div4_kick(struct clk *clk)
132{
133 unsigned long value;
134
135 /* set KICK bit in FRQCRA to update hardware setting */
136 value = __raw_readl(FRQCRA);
137 value |= (1 << 31);
138 __raw_writel(value, FRQCRA);
139}
140
141static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 };
132 142
133static struct clk_div_mult_table div4_table = { 143static struct clk_div_mult_table div4_div_mult_table = {
134 .divisors = divisors, 144 .divisors = divisors,
135 .nr_divisors = ARRAY_SIZE(divisors), 145 .nr_divisors = ARRAY_SIZE(divisors),
136}; 146};
137 147
148static struct clk_div4_table div4_table = {
149 .div_mult_table = &div4_div_mult_table,
150 .kick = div4_kick,
151};
152
138enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR }; 153enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR };
139 154
140#define DIV4(_str, _reg, _bit, _mask, _flags) \ 155#define DIV4(_reg, _bit, _mask, _flags) \
141 SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags) 156 SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
142 157
143struct clk div4_clks[DIV4_NR] = { 158struct clk div4_clks[DIV4_NR] = {
144 [DIV4_I] = DIV4("cpu_clk", FRQCRA, 20, 0x2f7d, CLK_ENABLE_ON_INIT), 159 [DIV4_I] = DIV4(FRQCRA, 20, 0x2f7d, CLK_ENABLE_ON_INIT),
145 [DIV4_SH] = DIV4("shyway_clk", FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT), 160 [DIV4_SH] = DIV4(FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT),
146 [DIV4_B] = DIV4("bus_clk", FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT), 161 [DIV4_B] = DIV4(FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT),
147 [DIV4_P] = DIV4("peripheral_clk", FRQCRA, 0, 0x2f7c, 0), 162 [DIV4_P] = DIV4(FRQCRA, 0, 0x2f7c, 0),
148 [DIV4_M1] = DIV4("vpu_clk", FRQCRB, 4, 0x2f7c, 0), 163 [DIV4_M1] = DIV4(FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT),
149}; 164};
150 165
151struct clk div6_clks[] = { 166enum { DIV6_V, DIV6_I, DIV6_S, DIV6_NR };
152 SH_CLK_DIV6("video_clk", &div3_clk, VCLKCR, 0), 167
153 SH_CLK_DIV6("fsia_clk", &div3_clk, FCLKACR, 0), 168static struct clk div6_clks[DIV6_NR] = {
154 SH_CLK_DIV6("fsib_clk", &div3_clk, FCLKBCR, 0), 169 [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0),
155 SH_CLK_DIV6("irda_clk", &div3_clk, IRDACLKCR, 0), 170 [DIV6_I] = SH_CLK_DIV6(&div3_clk, IRDACLKCR, 0),
156 SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, 0), 171 [DIV6_S] = SH_CLK_DIV6(&div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT),
157}; 172};
158 173
159#define MSTP(_str, _parent, _reg, _bit, _force_on, _need_cpg, _need_ram) \ 174enum { DIV6_FA, DIV6_FB, DIV6_REPARENT_NR };
160 SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _force_on * CLK_ENABLE_ON_INIT) 175
161 176/* Indices are important - they are the actual src selecting values */
162static struct clk mstp_clks[] = { 177static struct clk *fclkacr_parent[] = {
163 MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, 1, 1, 0), 178 [0] = &div3_clk,
164 MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, 1, 1, 0), 179 [1] = NULL,
165 MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, 1, 1, 0), 180 [2] = &sh7724_fsimcka_clk,
166 MSTP("rs0", &div4_clks[DIV4_B], MSTPCR0, 28, 1, 1, 0), 181 [3] = NULL,
167 MSTP("ilmem0", &div4_clks[DIV4_I], MSTPCR0, 27, 1, 1, 0), 182};
168 MSTP("l2c0", &div4_clks[DIV4_SH], MSTPCR0, 26, 1, 1, 0), 183
169 MSTP("fpu0", &div4_clks[DIV4_I], MSTPCR0, 24, 1, 1, 0), 184static struct clk *fclkbcr_parent[] = {
170 MSTP("intc0", &div4_clks[DIV4_P], MSTPCR0, 22, 1, 1, 0), 185 [0] = &div3_clk,
171 MSTP("dmac0", &div4_clks[DIV4_B], MSTPCR0, 21, 0, 1, 1), 186 [1] = NULL,
172 MSTP("sh0", &div4_clks[DIV4_SH], MSTPCR0, 20, 0, 1, 0), 187 [2] = &sh7724_fsimckb_clk,
173 MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0, 1, 0), 188 [3] = NULL,
174 MSTP("ubc0", &div4_clks[DIV4_I], MSTPCR0, 17, 0, 1, 0), 189};
175 MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0, 1, 0), 190
176 MSTP("cmt0", &r_clk, MSTPCR0, 14, 0, 0, 0), 191static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
177 MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0, 0, 0), 192 [DIV6_FA] = SH_CLK_DIV6_EXT(&div3_clk, FCLKACR, 0,
178 MSTP("dmac1", &div4_clks[DIV4_B], MSTPCR0, 12, 0, 1, 1), 193 fclkacr_parent, ARRAY_SIZE(fclkacr_parent), 6, 2),
179 MSTP("tmu1", &div4_clks[DIV4_P], MSTPCR0, 10, 0, 1, 0), 194 [DIV6_FB] = SH_CLK_DIV6_EXT(&div3_clk, FCLKBCR, 0,
180 MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 9, 0, 1, 0), 195 fclkbcr_parent, ARRAY_SIZE(fclkbcr_parent), 6, 2),
181 MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 8, 0, 1, 0), 196};
182 MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 7, 0, 1, 0), 197
183 MSTP("scif3", &div4_clks[DIV4_B], MSTPCR0, 6, 0, 1, 0), 198static struct clk mstp_clks[HWBLK_NR] = {
184 MSTP("scif4", &div4_clks[DIV4_B], MSTPCR0, 5, 0, 1, 0), 199 SH_HWBLK_CLK(HWBLK_TLB, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
185 MSTP("scif5", &div4_clks[DIV4_B], MSTPCR0, 4, 0, 1, 0), 200 SH_HWBLK_CLK(HWBLK_IC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
186 MSTP("msiof0", &div4_clks[DIV4_B], MSTPCR0, 2, 0, 1, 0), 201 SH_HWBLK_CLK(HWBLK_OC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
187 MSTP("msiof1", &div4_clks[DIV4_B], MSTPCR0, 1, 0, 1, 0), 202 SH_HWBLK_CLK(HWBLK_RSMEM, &div4_clks[DIV4_B], CLK_ENABLE_ON_INIT),
188 203 SH_HWBLK_CLK(HWBLK_ILMEM, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
189 MSTP("keysc0", &r_clk, MSTPCR1, 12, 0, 0, 0), 204 SH_HWBLK_CLK(HWBLK_L2C, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT),
190 MSTP("rtc0", &r_clk, MSTPCR1, 11, 0, 0, 0), 205 SH_HWBLK_CLK(HWBLK_FPU, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
191 MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0, 1, 0), 206 SH_HWBLK_CLK(HWBLK_INTC, &div4_clks[DIV4_P], CLK_ENABLE_ON_INIT),
192 MSTP("i2c1", &div4_clks[DIV4_P], MSTPCR1, 8, 0, 1, 0), 207 SH_HWBLK_CLK(HWBLK_DMAC0, &div4_clks[DIV4_B], 0),
193 208 SH_HWBLK_CLK(HWBLK_SHYWAY, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT),
194 MSTP("mmc0", &div4_clks[DIV4_B], MSTPCR2, 29, 0, 1, 0), 209 SH_HWBLK_CLK(HWBLK_HUDI, &div4_clks[DIV4_P], 0),
195 MSTP("eth0", &div4_clks[DIV4_B], MSTPCR2, 28, 0, 1, 0), 210 SH_HWBLK_CLK(HWBLK_UBC, &div4_clks[DIV4_I], 0),
196 MSTP("atapi0", &div4_clks[DIV4_B], MSTPCR2, 26, 0, 1, 0), 211 SH_HWBLK_CLK(HWBLK_TMU0, &div4_clks[DIV4_P], 0),
197 MSTP("tpu0", &div4_clks[DIV4_B], MSTPCR2, 25, 0, 1, 0), 212 SH_HWBLK_CLK(HWBLK_CMT, &r_clk, 0),
198 MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0, 1, 0), 213 SH_HWBLK_CLK(HWBLK_RWDT, &r_clk, 0),
199 MSTP("tsif0", &div4_clks[DIV4_B], MSTPCR2, 22, 0, 1, 0), 214 SH_HWBLK_CLK(HWBLK_DMAC1, &div4_clks[DIV4_B], 0),
200 MSTP("usb1", &div4_clks[DIV4_B], MSTPCR2, 21, 0, 1, 1), 215 SH_HWBLK_CLK(HWBLK_TMU1, &div4_clks[DIV4_P], 0),
201 MSTP("usb0", &div4_clks[DIV4_B], MSTPCR2, 20, 0, 1, 1), 216 SH_HWBLK_CLK(HWBLK_SCIF0, &div4_clks[DIV4_P], 0),
202 MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 19, 0, 1, 1), 217 SH_HWBLK_CLK(HWBLK_SCIF1, &div4_clks[DIV4_P], 0),
203 MSTP("sdhi0", &div4_clks[DIV4_B], MSTPCR2, 18, 0, 1, 0), 218 SH_HWBLK_CLK(HWBLK_SCIF2, &div4_clks[DIV4_P], 0),
204 MSTP("sdhi1", &div4_clks[DIV4_B], MSTPCR2, 17, 0, 1, 0), 219 SH_HWBLK_CLK(HWBLK_SCIF3, &div4_clks[DIV4_B], 0),
205 MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 15, 1, 1, 1), 220 SH_HWBLK_CLK(HWBLK_SCIF4, &div4_clks[DIV4_B], 0),
206 MSTP("ceu1", &div4_clks[DIV4_B], MSTPCR2, 13, 0, 1, 1), 221 SH_HWBLK_CLK(HWBLK_SCIF5, &div4_clks[DIV4_B], 0),
207 MSTP("beu1", &div4_clks[DIV4_B], MSTPCR2, 12, 0, 1, 1), 222 SH_HWBLK_CLK(HWBLK_MSIOF0, &div4_clks[DIV4_B], 0),
208 MSTP("2ddmac0", &div4_clks[DIV4_SH], MSTPCR2, 10, 0, 1, 1), 223 SH_HWBLK_CLK(HWBLK_MSIOF1, &div4_clks[DIV4_B], 0),
209 MSTP("spu0", &div4_clks[DIV4_B], MSTPCR2, 9, 0, 1, 0), 224
210 MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, 1, 1, 1), 225 SH_HWBLK_CLK(HWBLK_KEYSC, &r_clk, 0),
211 MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0, 1, 1), 226 SH_HWBLK_CLK(HWBLK_RTC, &r_clk, 0),
212 MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0, 1, 1), 227 SH_HWBLK_CLK(HWBLK_IIC0, &div4_clks[DIV4_P], 0),
213 MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0, 1, 1), 228 SH_HWBLK_CLK(HWBLK_IIC1, &div4_clks[DIV4_P], 0),
214 MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, 1, 1, 1), 229
215 MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, 1, 1, 1), 230 SH_HWBLK_CLK(HWBLK_MMC, &div4_clks[DIV4_B], 0),
216 MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0, 1, 1), 231 SH_HWBLK_CLK(HWBLK_ETHER, &div4_clks[DIV4_B], 0),
232 SH_HWBLK_CLK(HWBLK_ATAPI, &div4_clks[DIV4_B], 0),
233 SH_HWBLK_CLK(HWBLK_TPU, &div4_clks[DIV4_B], 0),
234 SH_HWBLK_CLK(HWBLK_IRDA, &div4_clks[DIV4_P], 0),
235 SH_HWBLK_CLK(HWBLK_TSIF, &div4_clks[DIV4_B], 0),
236 SH_HWBLK_CLK(HWBLK_USB1, &div4_clks[DIV4_B], 0),
237 SH_HWBLK_CLK(HWBLK_USB0, &div4_clks[DIV4_B], 0),
238 SH_HWBLK_CLK(HWBLK_2DG, &div4_clks[DIV4_B], 0),
239 SH_HWBLK_CLK(HWBLK_SDHI0, &div4_clks[DIV4_B], 0),
240 SH_HWBLK_CLK(HWBLK_SDHI1, &div4_clks[DIV4_B], 0),
241 SH_HWBLK_CLK(HWBLK_VEU1, &div4_clks[DIV4_B], 0),
242 SH_HWBLK_CLK(HWBLK_CEU1, &div4_clks[DIV4_B], 0),
243 SH_HWBLK_CLK(HWBLK_BEU1, &div4_clks[DIV4_B], 0),
244 SH_HWBLK_CLK(HWBLK_2DDMAC, &div4_clks[DIV4_SH], 0),
245 SH_HWBLK_CLK(HWBLK_SPU, &div4_clks[DIV4_B], 0),
246 SH_HWBLK_CLK(HWBLK_JPU, &div4_clks[DIV4_B], 0),
247 SH_HWBLK_CLK(HWBLK_VOU, &div4_clks[DIV4_B], 0),
248 SH_HWBLK_CLK(HWBLK_BEU0, &div4_clks[DIV4_B], 0),
249 SH_HWBLK_CLK(HWBLK_CEU0, &div4_clks[DIV4_B], 0),
250 SH_HWBLK_CLK(HWBLK_VEU0, &div4_clks[DIV4_B], 0),
251 SH_HWBLK_CLK(HWBLK_VPU, &div4_clks[DIV4_B], 0),
252 SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_B], 0),
253};
254
255#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
256
257static struct clk_lookup lookups[] = {
258 /* main clocks */
259 CLKDEV_CON_ID("rclk", &r_clk),
260 CLKDEV_CON_ID("extal", &extal_clk),
261 CLKDEV_CON_ID("fll_clk", &fll_clk),
262 CLKDEV_CON_ID("pll_clk", &pll_clk),
263 CLKDEV_CON_ID("div3_clk", &div3_clk),
264
265 /* DIV4 clocks */
266 CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
267 CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
268 CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
269 CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
270 CLKDEV_CON_ID("vpu_clk", &div4_clks[DIV4_M1]),
271
272 /* DIV6 clocks */
273 CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
274 CLKDEV_CON_ID("fsia_clk", &div6_reparent_clks[DIV6_FA]),
275 CLKDEV_CON_ID("fsib_clk", &div6_reparent_clks[DIV6_FB]),
276 CLKDEV_CON_ID("irda_clk", &div6_clks[DIV6_I]),
277 CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_S]),
278
279 /* MSTP clocks */
280 CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]),
281 CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]),
282 CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]),
283 CLKDEV_CON_ID("rs0", &mstp_clks[HWBLK_RSMEM]),
284 CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]),
285 CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]),
286 CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]),
287 CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]),
288 CLKDEV_CON_ID("dmac0", &mstp_clks[HWBLK_DMAC0]),
289 CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
290 CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
291 CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
292 {
293 /* TMU0 */
294 .dev_id = "sh_tmu.0",
295 .con_id = "tmu_fck",
296 .clk = &mstp_clks[HWBLK_TMU0],
297 }, {
298 /* TMU1 */
299 .dev_id = "sh_tmu.1",
300 .con_id = "tmu_fck",
301 .clk = &mstp_clks[HWBLK_TMU0],
302 }, {
303 /* TMU2 */
304 .dev_id = "sh_tmu.2",
305 .con_id = "tmu_fck",
306 .clk = &mstp_clks[HWBLK_TMU0],
307 }, {
308 /* TMU3 */
309 .dev_id = "sh_tmu.3",
310 .con_id = "tmu_fck",
311 .clk = &mstp_clks[HWBLK_TMU1],
312 },
313 CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
314 CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]),
315 CLKDEV_CON_ID("dmac1", &mstp_clks[HWBLK_DMAC1]),
316 {
317 /* TMU4 */
318 .dev_id = "sh_tmu.4",
319 .con_id = "tmu_fck",
320 .clk = &mstp_clks[HWBLK_TMU1],
321 }, {
322 /* TMU5 */
323 .dev_id = "sh_tmu.5",
324 .con_id = "tmu_fck",
325 .clk = &mstp_clks[HWBLK_TMU1],
326 }, {
327 /* SCIF0 */
328 .dev_id = "sh-sci.0",
329 .con_id = "sci_fck",
330 .clk = &mstp_clks[HWBLK_SCIF0],
331 }, {
332 /* SCIF1 */
333 .dev_id = "sh-sci.1",
334 .con_id = "sci_fck",
335 .clk = &mstp_clks[HWBLK_SCIF1],
336 }, {
337 /* SCIF2 */
338 .dev_id = "sh-sci.2",
339 .con_id = "sci_fck",
340 .clk = &mstp_clks[HWBLK_SCIF2],
341 }, {
342 /* SCIF3 */
343 .dev_id = "sh-sci.3",
344 .con_id = "sci_fck",
345 .clk = &mstp_clks[HWBLK_SCIF3],
346 }, {
347 /* SCIF4 */
348 .dev_id = "sh-sci.4",
349 .con_id = "sci_fck",
350 .clk = &mstp_clks[HWBLK_SCIF4],
351 }, {
352 /* SCIF5 */
353 .dev_id = "sh-sci.5",
354 .con_id = "sci_fck",
355 .clk = &mstp_clks[HWBLK_SCIF5],
356 },
357 CLKDEV_CON_ID("msiof0", &mstp_clks[HWBLK_MSIOF0]),
358 CLKDEV_CON_ID("msiof1", &mstp_clks[HWBLK_MSIOF1]),
359 CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]),
360 CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
361 CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC0]),
362 CLKDEV_CON_ID("i2c1", &mstp_clks[HWBLK_IIC1]),
363 CLKDEV_CON_ID("mmc0", &mstp_clks[HWBLK_MMC]),
364 CLKDEV_CON_ID("eth0", &mstp_clks[HWBLK_ETHER]),
365 CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
366 CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
367 CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
368 CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
369 CLKDEV_CON_ID("usb1", &mstp_clks[HWBLK_USB1]),
370 CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB0]),
371 CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
372 CLKDEV_CON_ID("sdhi0", &mstp_clks[HWBLK_SDHI0]),
373 CLKDEV_CON_ID("sdhi1", &mstp_clks[HWBLK_SDHI1]),
374 CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU1]),
375 CLKDEV_CON_ID("ceu1", &mstp_clks[HWBLK_CEU1]),
376 CLKDEV_CON_ID("beu1", &mstp_clks[HWBLK_BEU1]),
377 CLKDEV_CON_ID("2ddmac0", &mstp_clks[HWBLK_2DDMAC]),
378 CLKDEV_CON_ID("spu0", &mstp_clks[HWBLK_SPU]),
379 CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
380 CLKDEV_CON_ID("vou0", &mstp_clks[HWBLK_VOU]),
381 CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU0]),
382 CLKDEV_CON_ID("ceu0", &mstp_clks[HWBLK_CEU0]),
383 CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU0]),
384 CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
385 CLKDEV_CON_ID("lcdc0", &mstp_clks[HWBLK_LCDC]),
217}; 386};
218 387
219int __init arch_clk_init(void) 388int __init arch_clk_init(void)
@@ -229,14 +398,19 @@ int __init arch_clk_init(void)
229 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) 398 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
230 ret = clk_register(main_clks[k]); 399 ret = clk_register(main_clks[k]);
231 400
401 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
402
232 if (!ret) 403 if (!ret)
233 ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); 404 ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
234 405
235 if (!ret) 406 if (!ret)
236 ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); 407 ret = sh_clk_div6_register(div6_clks, DIV6_NR);
408
409 if (!ret)
410 ret = sh_clk_div6_reparent_register(div6_reparent_clks, DIV6_REPARENT_NR);
237 411
238 if (!ret) 412 if (!ret)
239 ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); 413 ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR);
240 414
241 return ret; 415 return ret;
242} 416}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
new file mode 100644
index 000000000000..e073e3eb4c3d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -0,0 +1,162 @@
1/*
2 * arch/sh/kernel/cpu/sh4/clock-sh7757.c
3 *
4 * SH7757 support for the clock framework
5 *
6 * Copyright (C) 2009-2010 Renesas Solutions Corp.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/io.h>
15#include <linux/clkdev.h>
16#include <asm/clock.h>
17#include <asm/freq.h>
18
19/*
20 * Default rate for the root input clock, reset this with clk_set_rate()
21 * from the platform code.
22 */
23static struct clk extal_clk = {
24 .rate = 48000000,
25};
26
27static unsigned long pll_recalc(struct clk *clk)
28{
29 int multiplier;
30
31 multiplier = test_mode_pin(MODE_PIN0) ? 24 : 16;
32
33 return clk->parent->rate * multiplier;
34}
35
36static struct clk_ops pll_clk_ops = {
37 .recalc = pll_recalc,
38};
39
40static struct clk pll_clk = {
41 .ops = &pll_clk_ops,
42 .parent = &extal_clk,
43 .flags = CLK_ENABLE_ON_INIT,
44};
45
46static struct clk *clks[] = {
47 &extal_clk,
48 &pll_clk,
49};
50
51static unsigned int div2[] = { 1, 1, 2, 1, 1, 4, 1, 6,
52 1, 1, 1, 16, 1, 24, 1, 1 };
53
54static struct clk_div_mult_table div4_div_mult_table = {
55 .divisors = div2,
56 .nr_divisors = ARRAY_SIZE(div2),
57};
58
59static struct clk_div4_table div4_table = {
60 .div_mult_table = &div4_div_mult_table,
61};
62
63enum { DIV4_I, DIV4_SH, DIV4_P, DIV4_NR };
64
65#define DIV4(_bit, _mask, _flags) \
66 SH_CLK_DIV4(&pll_clk, FRQCR, _bit, _mask, _flags)
67
68struct clk div4_clks[DIV4_NR] = {
69 /*
70 * P clock is always enable, because some P clock modules is used
71 * by Host PC.
72 */
73 [DIV4_P] = DIV4(0, 0x2800, CLK_ENABLE_ON_INIT),
74 [DIV4_SH] = DIV4(12, 0x00a0, CLK_ENABLE_ON_INIT),
75 [DIV4_I] = DIV4(20, 0x0004, CLK_ENABLE_ON_INIT),
76};
77
78#define MSTPCR0 0xffc80030
79#define MSTPCR1 0xffc80034
80
81enum { MSTP004, MSTP000, MSTP114, MSTP113, MSTP112,
82 MSTP111, MSTP110, MSTP103, MSTP102,
83 MSTP_NR };
84
85static struct clk mstp_clks[MSTP_NR] = {
86 /* MSTPCR0 */
87 [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
88 [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0),
89
90 /* MSTPCR1 */
91 [MSTP114] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 14, 0),
92 [MSTP113] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 13, 0),
93 [MSTP112] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 12, 0),
94 [MSTP111] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 11, 0),
95 [MSTP110] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 10, 0),
96 [MSTP103] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 3, 0),
97 [MSTP102] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 2, 0),
98};
99
100#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
101
102static struct clk_lookup lookups[] = {
103 /* main clocks */
104 CLKDEV_CON_ID("extal", &extal_clk),
105 CLKDEV_CON_ID("pll_clk", &pll_clk),
106
107 /* DIV4 clocks */
108 CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
109 CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
110 CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
111
112 /* MSTP32 clocks */
113 CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP004]),
114 CLKDEV_CON_ID("riic", &mstp_clks[MSTP000]),
115 {
116 /* TMU0 */
117 .dev_id = "sh_tmu.0",
118 .con_id = "tmu_fck",
119 .clk = &mstp_clks[MSTP113],
120 }, {
121 /* TMU1 */
122 .dev_id = "sh_tmu.1",
123 .con_id = "tmu_fck",
124 .clk = &mstp_clks[MSTP114],
125 },
126 {
127 /* SCIF4 (But, ID is 2) */
128 .dev_id = "sh-sci.2",
129 .con_id = "sci_fck",
130 .clk = &mstp_clks[MSTP112],
131 }, {
132 /* SCIF3 */
133 .dev_id = "sh-sci.1",
134 .con_id = "sci_fck",
135 .clk = &mstp_clks[MSTP111],
136 }, {
137 /* SCIF2 */
138 .dev_id = "sh-sci.0",
139 .con_id = "sci_fck",
140 .clk = &mstp_clks[MSTP110],
141 },
142 CLKDEV_CON_ID("usb0", &mstp_clks[MSTP102]),
143};
144
145int __init arch_clk_init(void)
146{
147 int i, ret = 0;
148
149 for (i = 0; i < ARRAY_SIZE(clks); i++)
150 ret |= clk_register(clks[i]);
151 for (i = 0; i < ARRAY_SIZE(lookups); i++)
152 clkdev_add(&lookups[i]);
153
154 if (!ret)
155 ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
156 &div4_table);
157 if (!ret)
158 ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
159
160 return ret;
161}
162
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
index 370cd47642ef..599630fc4d3b 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
@@ -12,6 +12,8 @@
12 */ 12 */
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/io.h>
16#include <linux/clkdev.h>
15#include <asm/clock.h> 17#include <asm/clock.h>
16#include <asm/freq.h> 18#include <asm/freq.h>
17#include <asm/io.h> 19#include <asm/io.h>
@@ -22,7 +24,7 @@ static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 };
22 24
23static void master_clk_init(struct clk *clk) 25static void master_clk_init(struct clk *clk)
24{ 26{
25 clk->rate *= p0fc_divisors[(ctrl_inl(FRQCR) >> 4) & 0x07]; 27 clk->rate *= p0fc_divisors[(__raw_readl(FRQCR) >> 4) & 0x07];
26} 28}
27 29
28static struct clk_ops sh7763_master_clk_ops = { 30static struct clk_ops sh7763_master_clk_ops = {
@@ -31,7 +33,7 @@ static struct clk_ops sh7763_master_clk_ops = {
31 33
32static unsigned long module_clk_recalc(struct clk *clk) 34static unsigned long module_clk_recalc(struct clk *clk)
33{ 35{
34 int idx = ((ctrl_inl(FRQCR) >> 4) & 0x07); 36 int idx = ((__raw_readl(FRQCR) >> 4) & 0x07);
35 return clk->parent->rate / p0fc_divisors[idx]; 37 return clk->parent->rate / p0fc_divisors[idx];
36} 38}
37 39
@@ -41,7 +43,7 @@ static struct clk_ops sh7763_module_clk_ops = {
41 43
42static unsigned long bus_clk_recalc(struct clk *clk) 44static unsigned long bus_clk_recalc(struct clk *clk)
43{ 45{
44 int idx = ((ctrl_inl(FRQCR) >> 16) & 0x07); 46 int idx = ((__raw_readl(FRQCR) >> 16) & 0x07);
45 return clk->parent->rate / bfc_divisors[idx]; 47 return clk->parent->rate / bfc_divisors[idx];
46} 48}
47 49
@@ -68,7 +70,7 @@ void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
68 70
69static unsigned long shyway_clk_recalc(struct clk *clk) 71static unsigned long shyway_clk_recalc(struct clk *clk)
70{ 72{
71 int idx = ((ctrl_inl(FRQCR) >> 20) & 0x07); 73 int idx = ((__raw_readl(FRQCR) >> 20) & 0x07);
72 return clk->parent->rate / cfc_divisors[idx]; 74 return clk->parent->rate / cfc_divisors[idx];
73} 75}
74 76
@@ -77,7 +79,6 @@ static struct clk_ops sh7763_shyway_clk_ops = {
77}; 79};
78 80
79static struct clk sh7763_shyway_clk = { 81static struct clk sh7763_shyway_clk = {
80 .name = "shyway_clk",
81 .flags = CLK_ENABLE_ON_INIT, 82 .flags = CLK_ENABLE_ON_INIT,
82 .ops = &sh7763_shyway_clk_ops, 83 .ops = &sh7763_shyway_clk_ops,
83}; 84};
@@ -90,6 +91,13 @@ static struct clk *sh7763_onchip_clocks[] = {
90 &sh7763_shyway_clk, 91 &sh7763_shyway_clk,
91}; 92};
92 93
94#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
95
96static struct clk_lookup lookups[] = {
97 /* main clocks */
98 CLKDEV_CON_ID("shyway_clk", &sh7763_shyway_clk),
99};
100
93int __init arch_clk_init(void) 101int __init arch_clk_init(void)
94{ 102{
95 struct clk *clk; 103 struct clk *clk;
@@ -107,5 +115,7 @@ int __init arch_clk_init(void)
107 115
108 clk_put(clk); 116 clk_put(clk);
109 117
118 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
119
110 return ret; 120 return ret;
111} 121}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
index e0b896769205..9e3354365d40 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
@@ -21,7 +21,7 @@ static int pfc_divisors[] = { 1, 8, 1,10,12,16, 1, 1 };
21 21
22static void master_clk_init(struct clk *clk) 22static void master_clk_init(struct clk *clk)
23{ 23{
24 clk->rate *= pfc_divisors[(ctrl_inl(FRQCR) >> 28) & 0x000f]; 24 clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> 28) & 0x000f];
25} 25}
26 26
27static struct clk_ops sh7770_master_clk_ops = { 27static struct clk_ops sh7770_master_clk_ops = {
@@ -30,7 +30,7 @@ static struct clk_ops sh7770_master_clk_ops = {
30 30
31static unsigned long module_clk_recalc(struct clk *clk) 31static unsigned long module_clk_recalc(struct clk *clk)
32{ 32{
33 int idx = ((ctrl_inl(FRQCR) >> 28) & 0x000f); 33 int idx = ((__raw_readl(FRQCR) >> 28) & 0x000f);
34 return clk->parent->rate / pfc_divisors[idx]; 34 return clk->parent->rate / pfc_divisors[idx];
35} 35}
36 36
@@ -40,7 +40,7 @@ static struct clk_ops sh7770_module_clk_ops = {
40 40
41static unsigned long bus_clk_recalc(struct clk *clk) 41static unsigned long bus_clk_recalc(struct clk *clk)
42{ 42{
43 int idx = (ctrl_inl(FRQCR) & 0x000f); 43 int idx = (__raw_readl(FRQCR) & 0x000f);
44 return clk->parent->rate / bfc_divisors[idx]; 44 return clk->parent->rate / bfc_divisors[idx];
45} 45}
46 46
@@ -50,7 +50,7 @@ static struct clk_ops sh7770_bus_clk_ops = {
50 50
51static unsigned long cpu_clk_recalc(struct clk *clk) 51static unsigned long cpu_clk_recalc(struct clk *clk)
52{ 52{
53 int idx = ((ctrl_inl(FRQCR) >> 24) & 0x000f); 53 int idx = ((__raw_readl(FRQCR) >> 24) & 0x000f);
54 return clk->parent->rate / ifc_divisors[idx]; 54 return clk->parent->rate / ifc_divisors[idx];
55} 55}
56 56
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
index a249d823578e..8894926479a6 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
@@ -11,6 +11,8 @@
11 */ 11 */
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/io.h>
15#include <linux/clkdev.h>
14#include <asm/clock.h> 16#include <asm/clock.h>
15#include <asm/freq.h> 17#include <asm/freq.h>
16#include <asm/io.h> 18#include <asm/io.h>
@@ -22,7 +24,7 @@ static int cfc_divisors[] = { 1, 1, 4, 1, 6, 1, 1, 1 };
22 24
23static void master_clk_init(struct clk *clk) 25static void master_clk_init(struct clk *clk)
24{ 26{
25 clk->rate *= pfc_divisors[ctrl_inl(FRQCR) & 0x0003]; 27 clk->rate *= pfc_divisors[__raw_readl(FRQCR) & 0x0003];
26} 28}
27 29
28static struct clk_ops sh7780_master_clk_ops = { 30static struct clk_ops sh7780_master_clk_ops = {
@@ -31,7 +33,7 @@ static struct clk_ops sh7780_master_clk_ops = {
31 33
32static unsigned long module_clk_recalc(struct clk *clk) 34static unsigned long module_clk_recalc(struct clk *clk)
33{ 35{
34 int idx = (ctrl_inl(FRQCR) & 0x0003); 36 int idx = (__raw_readl(FRQCR) & 0x0003);
35 return clk->parent->rate / pfc_divisors[idx]; 37 return clk->parent->rate / pfc_divisors[idx];
36} 38}
37 39
@@ -41,7 +43,7 @@ static struct clk_ops sh7780_module_clk_ops = {
41 43
42static unsigned long bus_clk_recalc(struct clk *clk) 44static unsigned long bus_clk_recalc(struct clk *clk)
43{ 45{
44 int idx = ((ctrl_inl(FRQCR) >> 16) & 0x0007); 46 int idx = ((__raw_readl(FRQCR) >> 16) & 0x0007);
45 return clk->parent->rate / bfc_divisors[idx]; 47 return clk->parent->rate / bfc_divisors[idx];
46} 48}
47 49
@@ -51,7 +53,7 @@ static struct clk_ops sh7780_bus_clk_ops = {
51 53
52static unsigned long cpu_clk_recalc(struct clk *clk) 54static unsigned long cpu_clk_recalc(struct clk *clk)
53{ 55{
54 int idx = ((ctrl_inl(FRQCR) >> 24) & 0x0001); 56 int idx = ((__raw_readl(FRQCR) >> 24) & 0x0001);
55 return clk->parent->rate / ifc_divisors[idx]; 57 return clk->parent->rate / ifc_divisors[idx];
56} 58}
57 59
@@ -74,7 +76,7 @@ void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
74 76
75static unsigned long shyway_clk_recalc(struct clk *clk) 77static unsigned long shyway_clk_recalc(struct clk *clk)
76{ 78{
77 int idx = ((ctrl_inl(FRQCR) >> 20) & 0x0007); 79 int idx = ((__raw_readl(FRQCR) >> 20) & 0x0007);
78 return clk->parent->rate / cfc_divisors[idx]; 80 return clk->parent->rate / cfc_divisors[idx];
79} 81}
80 82
@@ -83,7 +85,6 @@ static struct clk_ops sh7780_shyway_clk_ops = {
83}; 85};
84 86
85static struct clk sh7780_shyway_clk = { 87static struct clk sh7780_shyway_clk = {
86 .name = "shyway_clk",
87 .flags = CLK_ENABLE_ON_INIT, 88 .flags = CLK_ENABLE_ON_INIT,
88 .ops = &sh7780_shyway_clk_ops, 89 .ops = &sh7780_shyway_clk_ops,
89}; 90};
@@ -96,6 +97,13 @@ static struct clk *sh7780_onchip_clocks[] = {
96 &sh7780_shyway_clk, 97 &sh7780_shyway_clk,
97}; 98};
98 99
100#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
101
102static struct clk_lookup lookups[] = {
103 /* main clocks */
104 CLKDEV_CON_ID("shyway_clk", &sh7780_shyway_clk),
105};
106
99int __init arch_clk_init(void) 107int __init arch_clk_init(void)
100{ 108{
101 struct clk *clk; 109 struct clk *clk;
@@ -113,5 +121,7 @@ int __init arch_clk_init(void)
113 121
114 clk_put(clk); 122 clk_put(clk);
115 123
124 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
125
116 return ret; 126 return ret;
117} 127}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
index 73abfbf2f16d..2d960247f3eb 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * SH7785 support for the clock framework 4 * SH7785 support for the clock framework
5 * 5 *
6 * Copyright (C) 2007 - 2009 Paul Mundt 6 * Copyright (C) 2007 - 2010 Paul Mundt
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
@@ -14,6 +14,7 @@
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/cpufreq.h> 16#include <linux/cpufreq.h>
17#include <linux/clkdev.h>
17#include <asm/clock.h> 18#include <asm/clock.h>
18#include <asm/freq.h> 19#include <asm/freq.h>
19#include <cpu/sh7785.h> 20#include <cpu/sh7785.h>
@@ -23,8 +24,6 @@
23 * from the platform code. 24 * from the platform code.
24 */ 25 */
25static struct clk extal_clk = { 26static struct clk extal_clk = {
26 .name = "extal",
27 .id = -1,
28 .rate = 33333333, 27 .rate = 33333333,
29}; 28};
30 29
@@ -42,8 +41,6 @@ static struct clk_ops pll_clk_ops = {
42}; 41};
43 42
44static struct clk pll_clk = { 43static struct clk pll_clk = {
45 .name = "pll_clk",
46 .id = -1,
47 .ops = &pll_clk_ops, 44 .ops = &pll_clk_ops,
48 .parent = &extal_clk, 45 .parent = &extal_clk,
49 .flags = CLK_ENABLE_ON_INIT, 46 .flags = CLK_ENABLE_ON_INIT,
@@ -57,56 +54,161 @@ static struct clk *clks[] = {
57static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18, 54static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
58 24, 32, 36, 48 }; 55 24, 32, 36, 48 };
59 56
60static struct clk_div_mult_table div4_table = { 57static struct clk_div_mult_table div4_div_mult_table = {
61 .divisors = div2, 58 .divisors = div2,
62 .nr_divisors = ARRAY_SIZE(div2), 59 .nr_divisors = ARRAY_SIZE(div2),
63}; 60};
64 61
62static struct clk_div4_table div4_table = {
63 .div_mult_table = &div4_div_mult_table,
64};
65
65enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA, 66enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA,
66 DIV4_DU, DIV4_P, DIV4_NR }; 67 DIV4_DU, DIV4_P, DIV4_NR };
67 68
68#define DIV4(_str, _bit, _mask, _flags) \ 69#define DIV4(_bit, _mask, _flags) \
69 SH_CLK_DIV4(_str, &pll_clk, FRQMR1, _bit, _mask, _flags) 70 SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
70 71
71struct clk div4_clks[DIV4_NR] = { 72struct clk div4_clks[DIV4_NR] = {
72 [DIV4_P] = DIV4("peripheral_clk", 0, 0x0f80, 0), 73 [DIV4_P] = DIV4(0, 0x0f80, 0),
73 [DIV4_DU] = DIV4("du_clk", 4, 0x0ff0, 0), 74 [DIV4_DU] = DIV4(4, 0x0ff0, 0),
74 [DIV4_GA] = DIV4("ga_clk", 8, 0x0030, 0), 75 [DIV4_GA] = DIV4(8, 0x0030, 0),
75 [DIV4_DDR] = DIV4("ddr_clk", 12, 0x000c, CLK_ENABLE_ON_INIT), 76 [DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT),
76 [DIV4_B] = DIV4("bus_clk", 16, 0x0fe0, CLK_ENABLE_ON_INIT), 77 [DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT),
77 [DIV4_SH] = DIV4("shyway_clk", 20, 0x000c, CLK_ENABLE_ON_INIT), 78 [DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT),
78 [DIV4_U] = DIV4("umem_clk", 24, 0x000c, CLK_ENABLE_ON_INIT), 79 [DIV4_U] = DIV4(24, 0x000c, CLK_ENABLE_ON_INIT),
79 [DIV4_I] = DIV4("cpu_clk", 28, 0x000e, CLK_ENABLE_ON_INIT), 80 [DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT),
80}; 81};
81 82
82#define MSTPCR0 0xffc80030 83#define MSTPCR0 0xffc80030
83#define MSTPCR1 0xffc80034 84#define MSTPCR1 0xffc80034
84 85
85static struct clk mstp_clks[] = { 86enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024,
87 MSTP021, MSTP020, MSTP017, MSTP016,
88 MSTP013, MSTP012, MSTP009, MSTP008, MSTP003, MSTP002,
89 MSTP119, MSTP117, MSTP105, MSTP104, MSTP100,
90 MSTP_NR };
91
92static struct clk mstp_clks[MSTP_NR] = {
86 /* MSTPCR0 */ 93 /* MSTPCR0 */
87 SH_CLK_MSTP32("scif_fck", 5, &div4_clks[DIV4_P], MSTPCR0, 29, 0), 94 [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
88 SH_CLK_MSTP32("scif_fck", 4, &div4_clks[DIV4_P], MSTPCR0, 28, 0), 95 [MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0),
89 SH_CLK_MSTP32("scif_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 27, 0), 96 [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
90 SH_CLK_MSTP32("scif_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 26, 0), 97 [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
91 SH_CLK_MSTP32("scif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 25, 0), 98 [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
92 SH_CLK_MSTP32("scif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 24, 0), 99 [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
93 SH_CLK_MSTP32("ssi_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 21, 0), 100 [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
94 SH_CLK_MSTP32("ssi_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 20, 0), 101 [MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
95 SH_CLK_MSTP32("hac_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 17, 0), 102 [MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
96 SH_CLK_MSTP32("hac_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 16, 0), 103 [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
97 SH_CLK_MSTP32("mmcif_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 13, 0), 104 [MSTP013] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 13, 0),
98 SH_CLK_MSTP32("flctl_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 12, 0), 105 [MSTP012] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 12, 0),
99 SH_CLK_MSTP32("tmu345_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 9, 0), 106 [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
100 SH_CLK_MSTP32("tmu012_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 8, 0), 107 [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
101 SH_CLK_MSTP32("siof_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 3, 0), 108 [MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
102 SH_CLK_MSTP32("hspi_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 2, 0), 109 [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
103 110
104 /* MSTPCR1 */ 111 /* MSTPCR1 */
105 SH_CLK_MSTP32("hudi_fck", -1, NULL, MSTPCR1, 19, 0), 112 [MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0),
106 SH_CLK_MSTP32("ubc_fck", -1, NULL, MSTPCR1, 17, 0), 113 [MSTP117] = SH_CLK_MSTP32(NULL, MSTPCR1, 17, 0),
107 SH_CLK_MSTP32("dmac_11_6_fck", -1, NULL, MSTPCR1, 5, 0), 114 [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
108 SH_CLK_MSTP32("dmac_5_0_fck", -1, NULL, MSTPCR1, 4, 0), 115 [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
109 SH_CLK_MSTP32("gdta_fck", -1, NULL, MSTPCR1, 0, 0), 116 [MSTP100] = SH_CLK_MSTP32(NULL, MSTPCR1, 0, 0),
117};
118
119#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
120
121static struct clk_lookup lookups[] = {
122 /* main clocks */
123 CLKDEV_CON_ID("extal", &extal_clk),
124 CLKDEV_CON_ID("pll_clk", &pll_clk),
125
126 /* DIV4 clocks */
127 CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
128 CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]),
129 CLKDEV_CON_ID("ga_clk", &div4_clks[DIV4_GA]),
130 CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
131 CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
132 CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
133 CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
134 CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
135
136 /* MSTP32 clocks */
137 {
138 /* SCIF5 */
139 .dev_id = "sh-sci.5",
140 .con_id = "sci_fck",
141 .clk = &mstp_clks[MSTP029],
142 }, {
143 /* SCIF4 */
144 .dev_id = "sh-sci.4",
145 .con_id = "sci_fck",
146 .clk = &mstp_clks[MSTP028],
147 }, {
148 /* SCIF3 */
149 .dev_id = "sh-sci.3",
150 .con_id = "sci_fck",
151 .clk = &mstp_clks[MSTP027],
152 }, {
153 /* SCIF2 */
154 .dev_id = "sh-sci.2",
155 .con_id = "sci_fck",
156 .clk = &mstp_clks[MSTP026],
157 }, {
158 /* SCIF1 */
159 .dev_id = "sh-sci.1",
160 .con_id = "sci_fck",
161 .clk = &mstp_clks[MSTP025],
162 }, {
163 /* SCIF0 */
164 .dev_id = "sh-sci.0",
165 .con_id = "sci_fck",
166 .clk = &mstp_clks[MSTP024],
167 },
168 CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
169 CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]),
170 CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]),
171 CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
172 CLKDEV_CON_ID("mmcif_fck", &mstp_clks[MSTP013]),
173 CLKDEV_CON_ID("flctl_fck", &mstp_clks[MSTP012]),
174 {
175 /* TMU0 */
176 .dev_id = "sh_tmu.0",
177 .con_id = "tmu_fck",
178 .clk = &mstp_clks[MSTP008],
179 }, {
180 /* TMU1 */
181 .dev_id = "sh_tmu.1",
182 .con_id = "tmu_fck",
183 .clk = &mstp_clks[MSTP008],
184 }, {
185 /* TMU2 */
186 .dev_id = "sh_tmu.2",
187 .con_id = "tmu_fck",
188 .clk = &mstp_clks[MSTP008],
189 }, {
190 /* TMU3 */
191 .dev_id = "sh_tmu.3",
192 .con_id = "tmu_fck",
193 .clk = &mstp_clks[MSTP009],
194 }, {
195 /* TMU4 */
196 .dev_id = "sh_tmu.4",
197 .con_id = "tmu_fck",
198 .clk = &mstp_clks[MSTP009],
199 }, {
200 /* TMU5 */
201 .dev_id = "sh_tmu.5",
202 .con_id = "tmu_fck",
203 .clk = &mstp_clks[MSTP009],
204 },
205 CLKDEV_CON_ID("siof_fck", &mstp_clks[MSTP003]),
206 CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
207 CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
208 CLKDEV_CON_ID("ubc_fck", &mstp_clks[MSTP117]),
209 CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
210 CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
211 CLKDEV_CON_ID("gdta_fck", &mstp_clks[MSTP100]),
110}; 212};
111 213
112int __init arch_clk_init(void) 214int __init arch_clk_init(void)
@@ -115,12 +217,14 @@ int __init arch_clk_init(void)
115 217
116 for (i = 0; i < ARRAY_SIZE(clks); i++) 218 for (i = 0; i < ARRAY_SIZE(clks); i++)
117 ret |= clk_register(clks[i]); 219 ret |= clk_register(clks[i]);
220 for (i = 0; i < ARRAY_SIZE(lookups); i++)
221 clkdev_add(&lookups[i]);
118 222
119 if (!ret) 223 if (!ret)
120 ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), 224 ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
121 &div4_table); 225 &div4_table);
122 if (!ret) 226 if (!ret)
123 ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); 227 ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
124 228
125 return ret; 229 return ret;
126} 230}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
index a0e8869071ac..42e403be9076 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
@@ -3,11 +3,7 @@
3 * 3 *
4 * SH7786 support for the clock framework 4 * SH7786 support for the clock framework
5 * 5 *
6 * Copyright (C) 2008, 2009 Renesas Solutions Corp. 6 * Copyright (C) 2010 Paul Mundt
7 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
8 *
9 * Based on SH7785
10 * Copyright (C) 2007 Paul Mundt
11 * 7 *
12 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
@@ -15,127 +11,263 @@
15 */ 11 */
16#include <linux/init.h> 12#include <linux/init.h>
17#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/clkdev.h>
18#include <asm/clock.h> 17#include <asm/clock.h>
19#include <asm/freq.h> 18#include <asm/freq.h>
20#include <asm/io.h>
21 19
22static int ifc_divisors[] = { 1, 2, 4, 1 }; 20/*
23static int sfc_divisors[] = { 1, 1, 4, 1 }; 21 * Default rate for the root input clock, reset this with clk_set_rate()
24static int bfc_divisors[] = { 1, 1, 1, 1, 1, 12, 16, 1, 22 * from the platform code.
25 24, 32, 1, 1, 1, 1, 1, 1 }; 23 */
26static int mfc_divisors[] = { 1, 1, 4, 1 }; 24static struct clk extal_clk = {
27static int pfc_divisors[] = { 1, 1, 1, 1, 1, 1, 16, 1, 25 .rate = 33333333,
28 24, 32, 1, 48, 1, 1, 1, 1 }; 26};
29 27
30static void master_clk_init(struct clk *clk) 28static unsigned long pll_recalc(struct clk *clk)
31{ 29{
32 clk->rate *= pfc_divisors[ctrl_inl(FRQMR1) & 0x000f]; 30 int multiplier;
33}
34 31
35static struct clk_ops sh7786_master_clk_ops = { 32 /*
36 .init = master_clk_init, 33 * Clock modes 0, 1, and 2 use an x64 multiplier against PLL1,
37}; 34 * while modes 3, 4, and 5 use an x32.
35 */
36 multiplier = (sh_mv.mv_mode_pins() & 0xf) < 3 ? 64 : 32;
38 37
39static unsigned long module_clk_recalc(struct clk *clk) 38 return clk->parent->rate * multiplier;
40{
41 int idx = (ctrl_inl(FRQMR1) & 0x000f);
42 return clk->parent->rate / pfc_divisors[idx];
43} 39}
44 40
45static struct clk_ops sh7786_module_clk_ops = { 41static struct clk_ops pll_clk_ops = {
46 .recalc = module_clk_recalc, 42 .recalc = pll_recalc,
47}; 43};
48 44
49static unsigned long bus_clk_recalc(struct clk *clk) 45static struct clk pll_clk = {
50{ 46 .ops = &pll_clk_ops,
51 int idx = ((ctrl_inl(FRQMR1) >> 16) & 0x000f); 47 .parent = &extal_clk,
52 return clk->parent->rate / bfc_divisors[idx]; 48 .flags = CLK_ENABLE_ON_INIT,
53} 49};
54 50
55static struct clk_ops sh7786_bus_clk_ops = { 51static struct clk *clks[] = {
56 .recalc = bus_clk_recalc, 52 &extal_clk,
53 &pll_clk,
57}; 54};
58 55
59static unsigned long cpu_clk_recalc(struct clk *clk) 56static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
60{ 57 24, 32, 36, 48 };
61 int idx = ((ctrl_inl(FRQMR1) >> 28) & 0x0003);
62 return clk->parent->rate / ifc_divisors[idx];
63}
64 58
65static struct clk_ops sh7786_cpu_clk_ops = { 59static struct clk_div_mult_table div4_div_mult_table = {
66 .recalc = cpu_clk_recalc, 60 .divisors = div2,
61 .nr_divisors = ARRAY_SIZE(div2),
67}; 62};
68 63
69static struct clk_ops *sh7786_clk_ops[] = { 64static struct clk_div4_table div4_table = {
70 &sh7786_master_clk_ops, 65 .div_mult_table = &div4_div_mult_table,
71 &sh7786_module_clk_ops,
72 &sh7786_bus_clk_ops,
73 &sh7786_cpu_clk_ops,
74}; 66};
75 67
76void __init arch_init_clk_ops(struct clk_ops **ops, int idx) 68enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_DU, DIV4_P, DIV4_NR };
77{
78 if (idx < ARRAY_SIZE(sh7786_clk_ops))
79 *ops = sh7786_clk_ops[idx];
80}
81 69
82static unsigned long shyway_clk_recalc(struct clk *clk) 70#define DIV4(_bit, _mask, _flags) \
83{ 71 SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
84 int idx = ((ctrl_inl(FRQMR1) >> 20) & 0x0003);
85 return clk->parent->rate / sfc_divisors[idx];
86}
87 72
88static struct clk_ops sh7786_shyway_clk_ops = { 73struct clk div4_clks[DIV4_NR] = {
89 .recalc = shyway_clk_recalc, 74 [DIV4_P] = DIV4(0, 0x0b40, 0),
75 [DIV4_DU] = DIV4(4, 0x0010, 0),
76 [DIV4_DDR] = DIV4(12, 0x0002, CLK_ENABLE_ON_INIT),
77 [DIV4_B] = DIV4(16, 0x0360, CLK_ENABLE_ON_INIT),
78 [DIV4_SH] = DIV4(20, 0x0002, CLK_ENABLE_ON_INIT),
79 [DIV4_I] = DIV4(28, 0x0006, CLK_ENABLE_ON_INIT),
90}; 80};
91 81
92static struct clk sh7786_shyway_clk = { 82#define MSTPCR0 0xffc40030
93 .name = "shyway_clk", 83#define MSTPCR1 0xffc40034
94 .flags = CLK_ENABLE_ON_INIT,
95 .ops = &sh7786_shyway_clk_ops,
96};
97 84
98static unsigned long ddr_clk_recalc(struct clk *clk) 85enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024,
99{ 86 MSTP023, MSTP022, MSTP021, MSTP020, MSTP017, MSTP016,
100 int idx = ((ctrl_inl(FRQMR1) >> 12) & 0x0003); 87 MSTP015, MSTP014, MSTP011, MSTP010, MSTP009, MSTP008,
101 return clk->parent->rate / mfc_divisors[idx]; 88 MSTP005, MSTP004, MSTP002,
102} 89 MSTP112, MSTP110, MSTP109, MSTP108,
90 MSTP105, MSTP104, MSTP103, MSTP102,
91 MSTP_NR };
103 92
104static struct clk_ops sh7786_ddr_clk_ops = { 93static struct clk mstp_clks[MSTP_NR] = {
105 .recalc = ddr_clk_recalc, 94 /* MSTPCR0 */
106}; 95 [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
96 [MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0),
97 [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
98 [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
99 [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
100 [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
101 [MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
102 [MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
103 [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
104 [MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
105 [MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
106 [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
107 [MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
108 [MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0),
109 [MSTP011] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
110 [MSTP010] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
111 [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
112 [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
113 [MSTP005] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
114 [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
115 [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
107 116
108static struct clk sh7786_ddr_clk = { 117 /* MSTPCR1 */
109 .name = "ddr_clk", 118 [MSTP112] = SH_CLK_MSTP32(NULL, MSTPCR1, 12, 0),
110 .flags = CLK_ENABLE_ON_INIT, 119 [MSTP110] = SH_CLK_MSTP32(NULL, MSTPCR1, 10, 0),
111 .ops = &sh7786_ddr_clk_ops, 120 [MSTP109] = SH_CLK_MSTP32(NULL, MSTPCR1, 9, 0),
121 [MSTP108] = SH_CLK_MSTP32(NULL, MSTPCR1, 8, 0),
122 [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
123 [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
124 [MSTP103] = SH_CLK_MSTP32(NULL, MSTPCR1, 3, 0),
125 [MSTP102] = SH_CLK_MSTP32(NULL, MSTPCR1, 2, 0),
112}; 126};
113 127
114/* 128#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
115 * Additional SH7786-specific on-chip clocks that aren't already part of the 129
116 * clock framework 130static struct clk_lookup lookups[] = {
117 */ 131 /* main clocks */
118static struct clk *sh7786_onchip_clocks[] = { 132 CLKDEV_CON_ID("extal", &extal_clk),
119 &sh7786_shyway_clk, 133 CLKDEV_CON_ID("pll_clk", &pll_clk),
120 &sh7786_ddr_clk, 134
135 /* DIV4 clocks */
136 CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
137 CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]),
138 CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
139 CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
140 CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
141 CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
142
143 /* MSTP32 clocks */
144 {
145 /* SCIF5 */
146 .dev_id = "sh-sci.5",
147 .con_id = "sci_fck",
148 .clk = &mstp_clks[MSTP029],
149 }, {
150 /* SCIF4 */
151 .dev_id = "sh-sci.4",
152 .con_id = "sci_fck",
153 .clk = &mstp_clks[MSTP028],
154 }, {
155 /* SCIF3 */
156 .dev_id = "sh-sci.3",
157 .con_id = "sci_fck",
158 .clk = &mstp_clks[MSTP027],
159 }, {
160 /* SCIF2 */
161 .dev_id = "sh-sci.2",
162 .con_id = "sci_fck",
163 .clk = &mstp_clks[MSTP026],
164 }, {
165 /* SCIF1 */
166 .dev_id = "sh-sci.1",
167 .con_id = "sci_fck",
168 .clk = &mstp_clks[MSTP025],
169 }, {
170 /* SCIF0 */
171 .dev_id = "sh-sci.0",
172 .con_id = "sci_fck",
173 .clk = &mstp_clks[MSTP024],
174 },
175 CLKDEV_CON_ID("ssi3_fck", &mstp_clks[MSTP023]),
176 CLKDEV_CON_ID("ssi2_fck", &mstp_clks[MSTP022]),
177 CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
178 CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]),
179 CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]),
180 CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
181 CLKDEV_CON_ID("i2c1_fck", &mstp_clks[MSTP015]),
182 CLKDEV_CON_ID("i2c0_fck", &mstp_clks[MSTP014]),
183 {
184 /* TMU0 */
185 .dev_id = "sh_tmu.0",
186 .con_id = "tmu_fck",
187 .clk = &mstp_clks[MSTP008],
188 }, {
189 /* TMU1 */
190 .dev_id = "sh_tmu.1",
191 .con_id = "tmu_fck",
192 .clk = &mstp_clks[MSTP008],
193 }, {
194 /* TMU2 */
195 .dev_id = "sh_tmu.2",
196 .con_id = "tmu_fck",
197 .clk = &mstp_clks[MSTP008],
198 }, {
199 /* TMU3 */
200 .dev_id = "sh_tmu.3",
201 .con_id = "tmu_fck",
202 .clk = &mstp_clks[MSTP009],
203 }, {
204 /* TMU4 */
205 .dev_id = "sh_tmu.4",
206 .con_id = "tmu_fck",
207 .clk = &mstp_clks[MSTP009],
208 }, {
209 /* TMU5 */
210 .dev_id = "sh_tmu.5",
211 .con_id = "tmu_fck",
212 .clk = &mstp_clks[MSTP009],
213 }, {
214 /* TMU6 */
215 .dev_id = "sh_tmu.6",
216 .con_id = "tmu_fck",
217 .clk = &mstp_clks[MSTP010],
218 }, {
219 /* TMU7 */
220 .dev_id = "sh_tmu.7",
221 .con_id = "tmu_fck",
222 .clk = &mstp_clks[MSTP010],
223 }, {
224 /* TMU8 */
225 .dev_id = "sh_tmu.8",
226 .con_id = "tmu_fck",
227 .clk = &mstp_clks[MSTP010],
228 }, {
229 /* TMU9 */
230 .dev_id = "sh_tmu.9",
231 .con_id = "tmu_fck",
232 .clk = &mstp_clks[MSTP011],
233 }, {
234 /* TMU10 */
235 .dev_id = "sh_tmu.10",
236 .con_id = "tmu_fck",
237 .clk = &mstp_clks[MSTP011],
238 }, {
239 /* TMU11 */
240 .dev_id = "sh_tmu.11",
241 .con_id = "tmu_fck",
242 .clk = &mstp_clks[MSTP011],
243 },
244 CLKDEV_CON_ID("sdif1_fck", &mstp_clks[MSTP005]),
245 CLKDEV_CON_ID("sdif0_fck", &mstp_clks[MSTP004]),
246 CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
247 CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP112]),
248 CLKDEV_CON_ID("pcie2_fck", &mstp_clks[MSTP110]),
249 CLKDEV_CON_ID("pcie1_fck", &mstp_clks[MSTP109]),
250 CLKDEV_CON_ID("pcie0_fck", &mstp_clks[MSTP108]),
251 CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
252 CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
253 CLKDEV_CON_ID("du_fck", &mstp_clks[MSTP103]),
254 CLKDEV_CON_ID("ether_fck", &mstp_clks[MSTP102]),
121}; 255};
122 256
123int __init arch_clk_init(void) 257int __init arch_clk_init(void)
124{ 258{
125 struct clk *clk;
126 int i, ret = 0; 259 int i, ret = 0;
127 260
128 cpg_clk_init(); 261 for (i = 0; i < ARRAY_SIZE(clks); i++)
129 262 ret |= clk_register(clks[i]);
130 clk = clk_get(NULL, "master_clk"); 263 for (i = 0; i < ARRAY_SIZE(lookups); i++)
131 for (i = 0; i < ARRAY_SIZE(sh7786_onchip_clocks); i++) { 264 clkdev_add(&lookups[i]);
132 struct clk *clkp = sh7786_onchip_clocks[i];
133
134 clkp->parent = clk;
135 ret |= clk_register(clkp);
136 }
137 265
138 clk_put(clk); 266 if (!ret)
267 ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
268 &div4_table);
269 if (!ret)
270 ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
139 271
140 return ret; 272 return ret;
141} 273}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
index 23c27d32d982..1afdb93b8ccb 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (C) 2006-2007 Renesas Technology Corp. 6 * Copyright (C) 2006-2007 Renesas Technology Corp.
7 * Copyright (C) 2006-2007 Renesas Solutions Corp. 7 * Copyright (C) 2006-2007 Renesas Solutions Corp.
8 * Copyright (C) 2006-2007 Paul Mundt 8 * Copyright (C) 2006-2010 Paul Mundt
9 * 9 *
10 * This file is subject to the terms and conditions of the GNU General Public 10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive 11 * License. See the file "COPYING" in the main directory of this archive
@@ -13,116 +13,184 @@
13 */ 13 */
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/io.h>
17#include <linux/clkdev.h>
16#include <asm/clock.h> 18#include <asm/clock.h>
17#include <asm/freq.h> 19#include <asm/freq.h>
18#include <asm/io.h>
19
20static int ifc_divisors[] = { 1, 2, 4 ,6 };
21static int bfc_divisors[] = { 1, 1, 1, 1, 1, 12, 16, 18, 24, 32, 36, 48 };
22static int pfc_divisors[] = { 1, 1, 1, 1, 1, 1, 1, 18, 24, 32, 36, 48 };
23static int cfc_divisors[] = { 1, 1, 4, 6 };
24
25#define IFC_POS 28
26#define IFC_MSK 0x0003
27#define BFC_MSK 0x000f
28#define PFC_MSK 0x000f
29#define CFC_MSK 0x0003
30#define BFC_POS 16
31#define PFC_POS 0
32#define CFC_POS 20
33
34static void master_clk_init(struct clk *clk)
35{
36 clk->rate *= pfc_divisors[(ctrl_inl(FRQCR) >> PFC_POS) & PFC_MSK];
37}
38 20
39static struct clk_ops shx3_master_clk_ops = { 21/*
40 .init = master_clk_init, 22 * Default rate for the root input clock, reset this with clk_set_rate()
23 * from the platform code.
24 */
25static struct clk extal_clk = {
26 .rate = 16666666,
41}; 27};
42 28
43static unsigned long module_clk_recalc(struct clk *clk) 29static unsigned long pll_recalc(struct clk *clk)
44{ 30{
45 int idx = ((ctrl_inl(FRQCR) >> PFC_POS) & PFC_MSK); 31 /* PLL1 has a fixed x72 multiplier. */
46 return clk->parent->rate / pfc_divisors[idx]; 32 return clk->parent->rate * 72;
47} 33}
48 34
49static struct clk_ops shx3_module_clk_ops = { 35static struct clk_ops pll_clk_ops = {
50 .recalc = module_clk_recalc, 36 .recalc = pll_recalc,
51}; 37};
52 38
53static unsigned long bus_clk_recalc(struct clk *clk) 39static struct clk pll_clk = {
54{ 40 .ops = &pll_clk_ops,
55 int idx = ((ctrl_inl(FRQCR) >> BFC_POS) & BFC_MSK); 41 .parent = &extal_clk,
56 return clk->parent->rate / bfc_divisors[idx]; 42 .flags = CLK_ENABLE_ON_INIT,
57} 43};
58 44
59static struct clk_ops shx3_bus_clk_ops = { 45static struct clk *clks[] = {
60 .recalc = bus_clk_recalc, 46 &extal_clk,
47 &pll_clk,
61}; 48};
62 49
63static unsigned long cpu_clk_recalc(struct clk *clk) 50static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
64{ 51 24, 32, 36, 48 };
65 int idx = ((ctrl_inl(FRQCR) >> IFC_POS) & IFC_MSK);
66 return clk->parent->rate / ifc_divisors[idx];
67}
68 52
69static struct clk_ops shx3_cpu_clk_ops = { 53static struct clk_div_mult_table div4_div_mult_table = {
70 .recalc = cpu_clk_recalc, 54 .divisors = div2,
55 .nr_divisors = ARRAY_SIZE(div2),
71}; 56};
72 57
73static struct clk_ops *shx3_clk_ops[] = { 58static struct clk_div4_table div4_table = {
74 &shx3_master_clk_ops, 59 .div_mult_table = &div4_div_mult_table,
75 &shx3_module_clk_ops,
76 &shx3_bus_clk_ops,
77 &shx3_cpu_clk_ops,
78}; 60};
79 61
80void __init arch_init_clk_ops(struct clk_ops **ops, int idx) 62enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_SHA, DIV4_P, DIV4_NR };
81{
82 if (idx < ARRAY_SIZE(shx3_clk_ops))
83 *ops = shx3_clk_ops[idx];
84}
85 63
86static unsigned long shyway_clk_recalc(struct clk *clk) 64#define DIV4(_bit, _mask, _flags) \
87{ 65 SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
88 int idx = ((ctrl_inl(FRQCR) >> CFC_POS) & CFC_MSK);
89 return clk->parent->rate / cfc_divisors[idx];
90}
91 66
92static struct clk_ops shx3_shyway_clk_ops = { 67struct clk div4_clks[DIV4_NR] = {
93 .recalc = shyway_clk_recalc, 68 [DIV4_P] = DIV4(0, 0x0f80, 0),
69 [DIV4_SHA] = DIV4(4, 0x0ff0, 0),
70 [DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT),
71 [DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT),
72 [DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT),
73 [DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT),
94}; 74};
95 75
96static struct clk shx3_shyway_clk = { 76#define MSTPCR0 0xffc00030
97 .name = "shyway_clk", 77#define MSTPCR1 0xffc00034
98 .flags = CLK_ENABLE_ON_INIT, 78
99 .ops = &shx3_shyway_clk_ops, 79enum { MSTP027, MSTP026, MSTP025, MSTP024,
80 MSTP009, MSTP008, MSTP003, MSTP002,
81 MSTP001, MSTP000, MSTP119, MSTP105,
82 MSTP104, MSTP_NR };
83
84static struct clk mstp_clks[MSTP_NR] = {
85 /* MSTPCR0 */
86 [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
87 [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
88 [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
89 [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
90 [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
91 [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
92 [MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
93 [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
94 [MSTP001] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
95 [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0),
96
97 /* MSTPCR1 */
98 [MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0),
99 [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
100 [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
100}; 101};
101 102
102/* 103#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
103 * Additional SHx3-specific on-chip clocks that aren't already part of the 104
104 * clock framework 105static struct clk_lookup lookups[] = {
105 */ 106 /* main clocks */
106static struct clk *shx3_onchip_clocks[] = { 107 CLKDEV_CON_ID("extal", &extal_clk),
107 &shx3_shyway_clk, 108 CLKDEV_CON_ID("pll_clk", &pll_clk),
109
110 /* DIV4 clocks */
111 CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
112 CLKDEV_CON_ID("shywaya_clk", &div4_clks[DIV4_SHA]),
113 CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
114 CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
115 CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
116 CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
117
118 /* MSTP32 clocks */
119 {
120 /* SCIF3 */
121 .dev_id = "sh-sci.3",
122 .con_id = "sci_fck",
123 .clk = &mstp_clks[MSTP027],
124 }, {
125 /* SCIF2 */
126 .dev_id = "sh-sci.2",
127 .con_id = "sci_fck",
128 .clk = &mstp_clks[MSTP026],
129 }, {
130 /* SCIF1 */
131 .dev_id = "sh-sci.1",
132 .con_id = "sci_fck",
133 .clk = &mstp_clks[MSTP025],
134 }, {
135 /* SCIF0 */
136 .dev_id = "sh-sci.0",
137 .con_id = "sci_fck",
138 .clk = &mstp_clks[MSTP024],
139 },
140 CLKDEV_CON_ID("h8ex_fck", &mstp_clks[MSTP003]),
141 CLKDEV_CON_ID("csm_fck", &mstp_clks[MSTP002]),
142 CLKDEV_CON_ID("fe1_fck", &mstp_clks[MSTP001]),
143 CLKDEV_CON_ID("fe0_fck", &mstp_clks[MSTP000]),
144 {
145 /* TMU0 */
146 .dev_id = "sh_tmu.0",
147 .con_id = "tmu_fck",
148 .clk = &mstp_clks[MSTP008],
149 }, {
150 /* TMU1 */
151 .dev_id = "sh_tmu.1",
152 .con_id = "tmu_fck",
153 .clk = &mstp_clks[MSTP008],
154 }, {
155 /* TMU2 */
156 .dev_id = "sh_tmu.2",
157 .con_id = "tmu_fck",
158 .clk = &mstp_clks[MSTP008],
159 }, {
160 /* TMU3 */
161 .dev_id = "sh_tmu.3",
162 .con_id = "tmu_fck",
163 .clk = &mstp_clks[MSTP009],
164 }, {
165 /* TMU4 */
166 .dev_id = "sh_tmu.4",
167 .con_id = "tmu_fck",
168 .clk = &mstp_clks[MSTP009],
169 }, {
170 /* TMU5 */
171 .dev_id = "sh_tmu.5",
172 .con_id = "tmu_fck",
173 .clk = &mstp_clks[MSTP009],
174 },
175 CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
176 CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
177 CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
108}; 178};
109 179
110int __init arch_clk_init(void) 180int __init arch_clk_init(void)
111{ 181{
112 struct clk *clk;
113 int i, ret = 0; 182 int i, ret = 0;
114 183
115 cpg_clk_init(); 184 for (i = 0; i < ARRAY_SIZE(clks); i++)
116 185 ret |= clk_register(clks[i]);
117 clk = clk_get(NULL, "master_clk"); 186 for (i = 0; i < ARRAY_SIZE(lookups); i++)
118 for (i = 0; i < ARRAY_SIZE(shx3_onchip_clocks); i++) { 187 clkdev_add(&lookups[i]);
119 struct clk *clkp = shx3_onchip_clocks[i];
120
121 clkp->parent = clk;
122 ret |= clk_register(clkp);
123 }
124 188
125 clk_put(clk); 189 if (!ret)
190 ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
191 &div4_table);
192 if (!ret)
193 ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
126 194
127 return ret; 195 return ret;
128} 196}
diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
new file mode 100644
index 000000000000..a288b5d92341
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
@@ -0,0 +1,106 @@
1/*
2 * arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
3 *
4 * SH7722 hardware block support
5 *
6 * Copyright (C) 2009 Magnus Damm
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/io.h>
24#include <asm/suspend.h>
25#include <asm/hwblk.h>
26#include <cpu/sh7722.h>
27
28/* SH7722 registers */
29#define MSTPCR0 0xa4150030
30#define MSTPCR1 0xa4150034
31#define MSTPCR2 0xa4150038
32
33/* SH7722 Power Domains */
34enum { CORE_AREA, SUB_AREA, CORE_AREA_BM };
35static struct hwblk_area sh7722_hwblk_area[] = {
36 [CORE_AREA] = HWBLK_AREA(0, 0),
37 [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA),
38 [SUB_AREA] = HWBLK_AREA(0, 0),
39};
40
41/* Table mapping HWBLK to Module Stop Bit and Power Domain */
42static struct hwblk sh7722_hwblk[HWBLK_NR] = {
43 [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA),
44 [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA),
45 [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA),
46 [HWBLK_URAM] = HWBLK(MSTPCR0, 28, CORE_AREA),
47 [HWBLK_XYMEM] = HWBLK(MSTPCR0, 26, CORE_AREA),
48 [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA),
49 [HWBLK_DMAC] = HWBLK(MSTPCR0, 21, CORE_AREA_BM),
50 [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA),
51 [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA),
52 [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA),
53 [HWBLK_TMU] = HWBLK(MSTPCR0, 15, CORE_AREA),
54 [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA),
55 [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA),
56 [HWBLK_FLCTL] = HWBLK(MSTPCR0, 10, CORE_AREA),
57 [HWBLK_SCIF0] = HWBLK(MSTPCR0, 7, CORE_AREA),
58 [HWBLK_SCIF1] = HWBLK(MSTPCR0, 6, CORE_AREA),
59 [HWBLK_SCIF2] = HWBLK(MSTPCR0, 5, CORE_AREA),
60 [HWBLK_SIO] = HWBLK(MSTPCR0, 3, CORE_AREA),
61 [HWBLK_SIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA),
62 [HWBLK_SIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA),
63
64 [HWBLK_IIC] = HWBLK(MSTPCR1, 9, CORE_AREA),
65 [HWBLK_RTC] = HWBLK(MSTPCR1, 8, SUB_AREA),
66
67 [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA),
68 [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA),
69 [HWBLK_SDHI] = HWBLK(MSTPCR2, 18, CORE_AREA),
70 [HWBLK_SIM] = HWBLK(MSTPCR2, 16, CORE_AREA),
71 [HWBLK_KEYSC] = HWBLK(MSTPCR2, 14, SUB_AREA),
72 [HWBLK_TSIF] = HWBLK(MSTPCR2, 13, SUB_AREA),
73 [HWBLK_USBF] = HWBLK(MSTPCR2, 11, CORE_AREA),
74 [HWBLK_2DG] = HWBLK(MSTPCR2, 9, CORE_AREA_BM),
75 [HWBLK_SIU] = HWBLK(MSTPCR2, 8, CORE_AREA),
76 [HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM),
77 [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM),
78 [HWBLK_BEU] = HWBLK(MSTPCR2, 4, CORE_AREA_BM),
79 [HWBLK_CEU] = HWBLK(MSTPCR2, 3, CORE_AREA_BM),
80 [HWBLK_VEU] = HWBLK(MSTPCR2, 2, CORE_AREA_BM),
81 [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM),
82 [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM),
83};
84
85static struct hwblk_info sh7722_hwblk_info = {
86 .areas = sh7722_hwblk_area,
87 .nr_areas = ARRAY_SIZE(sh7722_hwblk_area),
88 .hwblks = sh7722_hwblk,
89 .nr_hwblks = ARRAY_SIZE(sh7722_hwblk),
90};
91
92int arch_hwblk_sleep_mode(void)
93{
94 if (!sh7722_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE])
95 return SUSP_SH_STANDBY | SUSP_SH_SF;
96
97 if (!sh7722_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE])
98 return SUSP_SH_SLEEP | SUSP_SH_SF;
99
100 return SUSP_SH_SLEEP;
101}
102
103int __init arch_hwblk_init(void)
104{
105 return hwblk_register(&sh7722_hwblk_info);
106}
diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c
new file mode 100644
index 000000000000..a7f4684d2032
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c
@@ -0,0 +1,117 @@
1/*
2 * arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c
3 *
4 * SH7723 hardware block support
5 *
6 * Copyright (C) 2009 Magnus Damm
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/io.h>
24#include <asm/suspend.h>
25#include <asm/hwblk.h>
26#include <cpu/sh7723.h>
27
28/* SH7723 registers */
29#define MSTPCR0 0xa4150030
30#define MSTPCR1 0xa4150034
31#define MSTPCR2 0xa4150038
32
33/* SH7723 Power Domains */
34enum { CORE_AREA, SUB_AREA, CORE_AREA_BM };
35static struct hwblk_area sh7723_hwblk_area[] = {
36 [CORE_AREA] = HWBLK_AREA(0, 0),
37 [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA),
38 [SUB_AREA] = HWBLK_AREA(0, 0),
39};
40
41/* Table mapping HWBLK to Module Stop Bit and Power Domain */
42static struct hwblk sh7723_hwblk[HWBLK_NR] = {
43 [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA),
44 [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA),
45 [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA),
46 [HWBLK_L2C] = HWBLK(MSTPCR0, 28, CORE_AREA),
47 [HWBLK_ILMEM] = HWBLK(MSTPCR0, 27, CORE_AREA),
48 [HWBLK_FPU] = HWBLK(MSTPCR0, 24, CORE_AREA),
49 [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA),
50 [HWBLK_DMAC0] = HWBLK(MSTPCR0, 21, CORE_AREA_BM),
51 [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA),
52 [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA),
53 [HWBLK_DBG] = HWBLK(MSTPCR0, 18, CORE_AREA),
54 [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA),
55 [HWBLK_SUBC] = HWBLK(MSTPCR0, 16, CORE_AREA),
56 [HWBLK_TMU0] = HWBLK(MSTPCR0, 15, CORE_AREA),
57 [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA),
58 [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA),
59 [HWBLK_DMAC1] = HWBLK(MSTPCR0, 12, CORE_AREA_BM),
60 [HWBLK_TMU1] = HWBLK(MSTPCR0, 11, CORE_AREA),
61 [HWBLK_FLCTL] = HWBLK(MSTPCR0, 10, CORE_AREA),
62 [HWBLK_SCIF0] = HWBLK(MSTPCR0, 9, CORE_AREA),
63 [HWBLK_SCIF1] = HWBLK(MSTPCR0, 8, CORE_AREA),
64 [HWBLK_SCIF2] = HWBLK(MSTPCR0, 7, CORE_AREA),
65 [HWBLK_SCIF3] = HWBLK(MSTPCR0, 6, CORE_AREA),
66 [HWBLK_SCIF4] = HWBLK(MSTPCR0, 5, CORE_AREA),
67 [HWBLK_SCIF5] = HWBLK(MSTPCR0, 4, CORE_AREA),
68 [HWBLK_MSIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA),
69 [HWBLK_MSIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA),
70 [HWBLK_MERAM] = HWBLK(MSTPCR0, 0, CORE_AREA),
71
72 [HWBLK_IIC] = HWBLK(MSTPCR1, 9, CORE_AREA),
73 [HWBLK_RTC] = HWBLK(MSTPCR1, 8, SUB_AREA),
74
75 [HWBLK_ATAPI] = HWBLK(MSTPCR2, 28, CORE_AREA_BM),
76 [HWBLK_ADC] = HWBLK(MSTPCR2, 27, CORE_AREA),
77 [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA),
78 [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA),
79 [HWBLK_TSIF] = HWBLK(MSTPCR2, 22, CORE_AREA),
80 [HWBLK_ICB] = HWBLK(MSTPCR2, 21, CORE_AREA_BM),
81 [HWBLK_SDHI0] = HWBLK(MSTPCR2, 18, CORE_AREA),
82 [HWBLK_SDHI1] = HWBLK(MSTPCR2, 17, CORE_AREA),
83 [HWBLK_KEYSC] = HWBLK(MSTPCR2, 14, SUB_AREA),
84 [HWBLK_USB] = HWBLK(MSTPCR2, 11, CORE_AREA),
85 [HWBLK_2DG] = HWBLK(MSTPCR2, 10, CORE_AREA_BM),
86 [HWBLK_SIU] = HWBLK(MSTPCR2, 8, CORE_AREA),
87 [HWBLK_VEU2H1] = HWBLK(MSTPCR2, 6, CORE_AREA_BM),
88 [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM),
89 [HWBLK_BEU] = HWBLK(MSTPCR2, 4, CORE_AREA_BM),
90 [HWBLK_CEU] = HWBLK(MSTPCR2, 3, CORE_AREA_BM),
91 [HWBLK_VEU2H0] = HWBLK(MSTPCR2, 2, CORE_AREA_BM),
92 [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM),
93 [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM),
94};
95
96static struct hwblk_info sh7723_hwblk_info = {
97 .areas = sh7723_hwblk_area,
98 .nr_areas = ARRAY_SIZE(sh7723_hwblk_area),
99 .hwblks = sh7723_hwblk,
100 .nr_hwblks = ARRAY_SIZE(sh7723_hwblk),
101};
102
103int arch_hwblk_sleep_mode(void)
104{
105 if (!sh7723_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE])
106 return SUSP_SH_STANDBY | SUSP_SH_SF;
107
108 if (!sh7723_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE])
109 return SUSP_SH_SLEEP | SUSP_SH_SF;
110
111 return SUSP_SH_SLEEP;
112}
113
114int __init arch_hwblk_init(void)
115{
116 return hwblk_register(&sh7723_hwblk_info);
117}
diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c
new file mode 100644
index 000000000000..1613ad6013c3
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c
@@ -0,0 +1,121 @@
1/*
2 * arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c
3 *
4 * SH7724 hardware block support
5 *
6 * Copyright (C) 2009 Magnus Damm
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/io.h>
24#include <asm/suspend.h>
25#include <asm/hwblk.h>
26#include <cpu/sh7724.h>
27
28/* SH7724 registers */
29#define MSTPCR0 0xa4150030
30#define MSTPCR1 0xa4150034
31#define MSTPCR2 0xa4150038
32
33/* SH7724 Power Domains */
34enum { CORE_AREA, SUB_AREA, CORE_AREA_BM };
35static struct hwblk_area sh7724_hwblk_area[] = {
36 [CORE_AREA] = HWBLK_AREA(0, 0),
37 [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA),
38 [SUB_AREA] = HWBLK_AREA(0, 0),
39};
40
41/* Table mapping HWBLK to Module Stop Bit and Power Domain */
42static struct hwblk sh7724_hwblk[HWBLK_NR] = {
43 [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA),
44 [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA),
45 [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA),
46 [HWBLK_RSMEM] = HWBLK(MSTPCR0, 28, CORE_AREA),
47 [HWBLK_ILMEM] = HWBLK(MSTPCR0, 27, CORE_AREA),
48 [HWBLK_L2C] = HWBLK(MSTPCR0, 26, CORE_AREA),
49 [HWBLK_FPU] = HWBLK(MSTPCR0, 24, CORE_AREA),
50 [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA),
51 [HWBLK_DMAC0] = HWBLK(MSTPCR0, 21, CORE_AREA_BM),
52 [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA),
53 [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA),
54 [HWBLK_DBG] = HWBLK(MSTPCR0, 18, CORE_AREA),
55 [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA),
56 [HWBLK_TMU0] = HWBLK(MSTPCR0, 15, CORE_AREA),
57 [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA),
58 [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA),
59 [HWBLK_DMAC1] = HWBLK(MSTPCR0, 12, CORE_AREA_BM),
60 [HWBLK_TMU1] = HWBLK(MSTPCR0, 10, CORE_AREA),
61 [HWBLK_SCIF0] = HWBLK(MSTPCR0, 9, CORE_AREA),
62 [HWBLK_SCIF1] = HWBLK(MSTPCR0, 8, CORE_AREA),
63 [HWBLK_SCIF2] = HWBLK(MSTPCR0, 7, CORE_AREA),
64 [HWBLK_SCIF3] = HWBLK(MSTPCR0, 6, CORE_AREA),
65 [HWBLK_SCIF4] = HWBLK(MSTPCR0, 5, CORE_AREA),
66 [HWBLK_SCIF5] = HWBLK(MSTPCR0, 4, CORE_AREA),
67 [HWBLK_MSIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA),
68 [HWBLK_MSIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA),
69
70 [HWBLK_KEYSC] = HWBLK(MSTPCR1, 12, SUB_AREA),
71 [HWBLK_RTC] = HWBLK(MSTPCR1, 11, SUB_AREA),
72 [HWBLK_IIC0] = HWBLK(MSTPCR1, 9, CORE_AREA),
73 [HWBLK_IIC1] = HWBLK(MSTPCR1, 8, CORE_AREA),
74
75 [HWBLK_MMC] = HWBLK(MSTPCR2, 29, CORE_AREA),
76 [HWBLK_ETHER] = HWBLK(MSTPCR2, 28, CORE_AREA_BM),
77 [HWBLK_ATAPI] = HWBLK(MSTPCR2, 26, CORE_AREA_BM),
78 [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA),
79 [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA),
80 [HWBLK_TSIF] = HWBLK(MSTPCR2, 22, CORE_AREA),
81 [HWBLK_USB1] = HWBLK(MSTPCR2, 21, CORE_AREA),
82 [HWBLK_USB0] = HWBLK(MSTPCR2, 20, CORE_AREA),
83 [HWBLK_2DG] = HWBLK(MSTPCR2, 19, CORE_AREA_BM),
84 [HWBLK_SDHI0] = HWBLK(MSTPCR2, 18, CORE_AREA),
85 [HWBLK_SDHI1] = HWBLK(MSTPCR2, 17, CORE_AREA),
86 [HWBLK_VEU1] = HWBLK(MSTPCR2, 15, CORE_AREA_BM),
87 [HWBLK_CEU1] = HWBLK(MSTPCR2, 13, CORE_AREA_BM),
88 [HWBLK_BEU1] = HWBLK(MSTPCR2, 12, CORE_AREA_BM),
89 [HWBLK_2DDMAC] = HWBLK(MSTPCR2, 10, CORE_AREA_BM),
90 [HWBLK_SPU] = HWBLK(MSTPCR2, 9, CORE_AREA_BM),
91 [HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM),
92 [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM),
93 [HWBLK_BEU0] = HWBLK(MSTPCR2, 4, CORE_AREA_BM),
94 [HWBLK_CEU0] = HWBLK(MSTPCR2, 3, CORE_AREA_BM),
95 [HWBLK_VEU0] = HWBLK(MSTPCR2, 2, CORE_AREA_BM),
96 [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM),
97 [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM),
98};
99
100static struct hwblk_info sh7724_hwblk_info = {
101 .areas = sh7724_hwblk_area,
102 .nr_areas = ARRAY_SIZE(sh7724_hwblk_area),
103 .hwblks = sh7724_hwblk,
104 .nr_hwblks = ARRAY_SIZE(sh7724_hwblk),
105};
106
107int arch_hwblk_sleep_mode(void)
108{
109 if (!sh7724_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE])
110 return SUSP_SH_STANDBY | SUSP_SH_SF;
111
112 if (!sh7724_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE])
113 return SUSP_SH_SLEEP | SUSP_SH_SF;
114
115 return SUSP_SH_SLEEP;
116}
117
118int __init arch_hwblk_init(void)
119{
120 return hwblk_register(&sh7724_hwblk_info);
121}
diff --git a/arch/sh/kernel/cpu/sh4a/intc-shx3.c b/arch/sh/kernel/cpu/sh4a/intc-shx3.c
new file mode 100644
index 000000000000..78c971486b4e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/intc-shx3.c
@@ -0,0 +1,34 @@
1/*
2 * Shared support for SH-X3 interrupt controllers.
3 *
4 * Copyright (C) 2009 - 2010 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/irq.h>
11#include <linux/io.h>
12#include <linux/init.h>
13
14#define INTACK 0xfe4100b8
15#define INTACKCLR 0xfe4100bc
16#define INTC_USERIMASK 0xfe411000
17
18#ifdef CONFIG_INTC_BALANCING
19unsigned int irq_lookup(unsigned int irq)
20{
21 return __raw_readl(INTACK) & 1 ? irq : NO_IRQ_IGNORE;
22}
23
24void irq_finish(unsigned int irq)
25{
26 __raw_writel(irq2evt(irq), INTACKCLR);
27}
28#endif
29
30static int __init shx3_irq_setup(void)
31{
32 return register_intc_userimask(INTC_USERIMASK);
33}
34arch_initcall(shx3_irq_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c
new file mode 100644
index 000000000000..17e6bebfede0
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/perf_event.c
@@ -0,0 +1,287 @@
1/*
2 * Performance events support for SH-4A performance counters
3 *
4 * Copyright (C) 2009, 2010 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/io.h>
13#include <linux/irq.h>
14#include <linux/perf_event.h>
15#include <asm/processor.h>
16
17#define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))
18#define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))
19
20#define CCBR_CIT_MASK (0x7ff << 6)
21#define CCBR_DUC (1 << 3)
22#define CCBR_CMDS (1 << 1)
23#define CCBR_PPCE (1 << 0)
24
25#ifdef CONFIG_CPU_SHX3
26/*
27 * The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR
28 * and PMCTR locations remains tentatively constant. This change remains
29 * wholly undocumented, and was simply found through trial and error.
30 *
31 * Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and
32 * it's unclear when this ceased to be the case. For now we always use
33 * the new location (if future parts keep up with this trend then
34 * scanning for them at runtime also remains a viable option.)
35 *
36 * The gap in the register space also suggests that there are other
37 * undocumented counters, so this will need to be revisited at a later
38 * point in time.
39 */
40#define PPC_PMCAT 0xfc100240
41#else
42#define PPC_PMCAT 0xfc100080
43#endif
44
45#define PMCAT_OVF3 (1 << 27)
46#define PMCAT_CNN3 (1 << 26)
47#define PMCAT_CLR3 (1 << 25)
48#define PMCAT_OVF2 (1 << 19)
49#define PMCAT_CLR2 (1 << 17)
50#define PMCAT_OVF1 (1 << 11)
51#define PMCAT_CNN1 (1 << 10)
52#define PMCAT_CLR1 (1 << 9)
53#define PMCAT_OVF0 (1 << 3)
54#define PMCAT_CLR0 (1 << 1)
55
56static struct sh_pmu sh4a_pmu;
57
58/*
59 * Supported raw event codes:
60 *
61 * Event Code Description
62 * ---------- -----------
63 *
64 * 0x0000 number of elapsed cycles
65 * 0x0200 number of elapsed cycles in privileged mode
66 * 0x0280 number of elapsed cycles while SR.BL is asserted
67 * 0x0202 instruction execution
68 * 0x0203 instruction execution in parallel
69 * 0x0204 number of unconditional branches
70 * 0x0208 number of exceptions
71 * 0x0209 number of interrupts
72 * 0x0220 UTLB miss caused by instruction fetch
73 * 0x0222 UTLB miss caused by operand access
74 * 0x02a0 number of ITLB misses
75 * 0x0028 number of accesses to instruction memories
76 * 0x0029 number of accesses to instruction cache
77 * 0x002a instruction cache miss
78 * 0x022e number of access to instruction X/Y memory
79 * 0x0030 number of reads to operand memories
80 * 0x0038 number of writes to operand memories
81 * 0x0031 number of operand cache read accesses
82 * 0x0039 number of operand cache write accesses
83 * 0x0032 operand cache read miss
84 * 0x003a operand cache write miss
85 * 0x0236 number of reads to operand X/Y memory
86 * 0x023e number of writes to operand X/Y memory
87 * 0x0237 number of reads to operand U memory
88 * 0x023f number of writes to operand U memory
89 * 0x0337 number of U memory read buffer misses
90 * 0x02b4 number of wait cycles due to operand read access
91 * 0x02bc number of wait cycles due to operand write access
92 * 0x0033 number of wait cycles due to operand cache read miss
93 * 0x003b number of wait cycles due to operand cache write miss
94 */
95
96/*
97 * Special reserved bits used by hardware emulators, read values will
98 * vary, but writes must always be 0.
99 */
100#define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
101
102static const int sh4a_general_events[] = {
103 [PERF_COUNT_HW_CPU_CYCLES] = 0x0000,
104 [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202,
105 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */
106 [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */
107 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204,
108 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
109 [PERF_COUNT_HW_BUS_CYCLES] = -1,
110};
111
112#define C(x) PERF_COUNT_HW_CACHE_##x
113
114static const int sh4a_cache_events
115 [PERF_COUNT_HW_CACHE_MAX]
116 [PERF_COUNT_HW_CACHE_OP_MAX]
117 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
118{
119 [ C(L1D) ] = {
120 [ C(OP_READ) ] = {
121 [ C(RESULT_ACCESS) ] = 0x0031,
122 [ C(RESULT_MISS) ] = 0x0032,
123 },
124 [ C(OP_WRITE) ] = {
125 [ C(RESULT_ACCESS) ] = 0x0039,
126 [ C(RESULT_MISS) ] = 0x003a,
127 },
128 [ C(OP_PREFETCH) ] = {
129 [ C(RESULT_ACCESS) ] = 0,
130 [ C(RESULT_MISS) ] = 0,
131 },
132 },
133
134 [ C(L1I) ] = {
135 [ C(OP_READ) ] = {
136 [ C(RESULT_ACCESS) ] = 0x0029,
137 [ C(RESULT_MISS) ] = 0x002a,
138 },
139 [ C(OP_WRITE) ] = {
140 [ C(RESULT_ACCESS) ] = -1,
141 [ C(RESULT_MISS) ] = -1,
142 },
143 [ C(OP_PREFETCH) ] = {
144 [ C(RESULT_ACCESS) ] = 0,
145 [ C(RESULT_MISS) ] = 0,
146 },
147 },
148
149 [ C(LL) ] = {
150 [ C(OP_READ) ] = {
151 [ C(RESULT_ACCESS) ] = 0x0030,
152 [ C(RESULT_MISS) ] = 0,
153 },
154 [ C(OP_WRITE) ] = {
155 [ C(RESULT_ACCESS) ] = 0x0038,
156 [ C(RESULT_MISS) ] = 0,
157 },
158 [ C(OP_PREFETCH) ] = {
159 [ C(RESULT_ACCESS) ] = 0,
160 [ C(RESULT_MISS) ] = 0,
161 },
162 },
163
164 [ C(DTLB) ] = {
165 [ C(OP_READ) ] = {
166 [ C(RESULT_ACCESS) ] = 0x0222,
167 [ C(RESULT_MISS) ] = 0x0220,
168 },
169 [ C(OP_WRITE) ] = {
170 [ C(RESULT_ACCESS) ] = 0,
171 [ C(RESULT_MISS) ] = 0,
172 },
173 [ C(OP_PREFETCH) ] = {
174 [ C(RESULT_ACCESS) ] = 0,
175 [ C(RESULT_MISS) ] = 0,
176 },
177 },
178
179 [ C(ITLB) ] = {
180 [ C(OP_READ) ] = {
181 [ C(RESULT_ACCESS) ] = 0,
182 [ C(RESULT_MISS) ] = 0x02a0,
183 },
184 [ C(OP_WRITE) ] = {
185 [ C(RESULT_ACCESS) ] = -1,
186 [ C(RESULT_MISS) ] = -1,
187 },
188 [ C(OP_PREFETCH) ] = {
189 [ C(RESULT_ACCESS) ] = -1,
190 [ C(RESULT_MISS) ] = -1,
191 },
192 },
193
194 [ C(BPU) ] = {
195 [ C(OP_READ) ] = {
196 [ C(RESULT_ACCESS) ] = -1,
197 [ C(RESULT_MISS) ] = -1,
198 },
199 [ C(OP_WRITE) ] = {
200 [ C(RESULT_ACCESS) ] = -1,
201 [ C(RESULT_MISS) ] = -1,
202 },
203 [ C(OP_PREFETCH) ] = {
204 [ C(RESULT_ACCESS) ] = -1,
205 [ C(RESULT_MISS) ] = -1,
206 },
207 },
208};
209
210static int sh4a_event_map(int event)
211{
212 return sh4a_general_events[event];
213}
214
215static u64 sh4a_pmu_read(int idx)
216{
217 return __raw_readl(PPC_PMCTR(idx));
218}
219
220static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
221{
222 unsigned int tmp;
223
224 tmp = __raw_readl(PPC_CCBR(idx));
225 tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
226 __raw_writel(tmp, PPC_CCBR(idx));
227}
228
229static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
230{
231 unsigned int tmp;
232
233 tmp = __raw_readl(PPC_PMCAT);
234 tmp &= ~PMCAT_EMU_CLR_MASK;
235 tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
236 __raw_writel(tmp, PPC_PMCAT);
237
238 tmp = __raw_readl(PPC_CCBR(idx));
239 tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
240 __raw_writel(tmp, PPC_CCBR(idx));
241
242 __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
243}
244
245static void sh4a_pmu_disable_all(void)
246{
247 int i;
248
249 for (i = 0; i < sh4a_pmu.num_events; i++)
250 __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
251}
252
253static void sh4a_pmu_enable_all(void)
254{
255 int i;
256
257 for (i = 0; i < sh4a_pmu.num_events; i++)
258 __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
259}
260
261static struct sh_pmu sh4a_pmu = {
262 .name = "sh4a",
263 .num_events = 2,
264 .event_map = sh4a_event_map,
265 .max_events = ARRAY_SIZE(sh4a_general_events),
266 .raw_event_mask = 0x3ff,
267 .cache_events = &sh4a_cache_events,
268 .read = sh4a_pmu_read,
269 .disable = sh4a_pmu_disable,
270 .enable = sh4a_pmu_enable,
271 .disable_all = sh4a_pmu_disable_all,
272 .enable_all = sh4a_pmu_enable_all,
273};
274
275static int __init sh4a_pmu_init(void)
276{
277 /*
278 * Make sure this CPU actually has perf counters.
279 */
280 if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
281 pr_notice("HW perf events unsupported, software events only.\n");
282 return -ENODEV;
283 }
284
285 return register_sh_pmu(&sh4a_pmu);
286}
287early_initcall(sh4a_pmu_init);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
index cb9d07bd59f8..0688a7502f86 100644
--- a/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
@@ -278,6 +278,7 @@ enum {
278 HIZA8_LCDC, HIZA8_HIZ, 278 HIZA8_LCDC, HIZA8_HIZ,
279 HIZA7_LCDC, HIZA7_HIZ, 279 HIZA7_LCDC, HIZA7_HIZ,
280 HIZA6_LCDC, HIZA6_HIZ, 280 HIZA6_LCDC, HIZA6_HIZ,
281 HIZB4_SIUA, HIZB4_HIZ,
281 HIZB1_VIO, HIZB1_HIZ, 282 HIZB1_VIO, HIZB1_HIZ,
282 HIZB0_VIO, HIZB0_HIZ, 283 HIZB0_VIO, HIZB0_HIZ,
283 HIZC15_IRQ7, HIZC15_HIZ, 284 HIZC15_IRQ7, HIZC15_HIZ,
@@ -546,7 +547,7 @@ static pinmux_enum_t pinmux_data[] = {
546 PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2, 547 PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2,
547 HIZB0_VIO, FOE_VIO_VD2), 548 HIZB0_VIO, FOE_VIO_VD2),
548 PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2, 549 PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2,
549 HIZB1_VIO, HIZB1_VIO, FCE_VIO_HD2), 550 HIZB1_VIO, FCE_VIO_HD2),
550 PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2, 551 PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2,
551 HIZB1_VIO, FRB_VIO_CLK2), 552 HIZB1_VIO, FRB_VIO_CLK2),
552 553
@@ -658,14 +659,14 @@ static pinmux_enum_t pinmux_data[] = {
658 PINMUX_DATA(SDHICLK_MARK, SDHICLK), 659 PINMUX_DATA(SDHICLK_MARK, SDHICLK),
659 660
660 /* SIU - Port A */ 661 /* SIU - Port A */
661 PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, SIUAOLR_SIOF1_SYNC), 662 PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, HIZB4_SIUA, SIUAOLR_SIOF1_SYNC),
662 PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, SIUAOBT_SIOF1_SCK), 663 PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, HIZB4_SIUA, SIUAOBT_SIOF1_SCK),
663 PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, SIUAISLD_SIOF1_RXD), 664 PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, HIZB4_SIUA, SIUAISLD_SIOF1_RXD),
664 PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, SIUAILR_SIOF1_SS2), 665 PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, HIZB4_SIUA, SIUAILR_SIOF1_SS2),
665 PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, SIUAIBT_SIOF1_SS1), 666 PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, HIZB4_SIUA, SIUAIBT_SIOF1_SS1),
666 PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, SIUAOSLD_SIOF1_TXD), 667 PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, HIZB4_SIUA, SIUAOSLD_SIOF1_TXD),
667 PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, PSB1_SIUMCKA, PTK0), 668 PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, HIZB4_SIUA, PSB1_SIUMCKA, PTK0),
668 PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, PTK0), 669 PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, HIZB4_SIUA, PTK0),
669 670
670 /* SIU - Port B */ 671 /* SIU - Port B */
671 PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR), 672 PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR),
@@ -1612,7 +1613,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
1612 0, 0, 1613 0, 0,
1613 0, 0, 1614 0, 0,
1614 0, 0, 1615 0, 0,
1615 0, 0, 1616 HIZB4_SIUA, HIZB4_HIZ,
1616 0, 0, 1617 0, 0,
1617 0, 0, 1618 0, 0,
1618 HIZB1_VIO, HIZB1_HIZ, 1619 HIZB1_VIO, HIZB1_HIZ,
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c
new file mode 100644
index 000000000000..4c74bd04bba4
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c
@@ -0,0 +1,2287 @@
1/*
2 * SH7757 (B0 step) Pinmux
3 *
4 * Copyright (C) 2009-2010 Renesas Solutions Corp.
5 *
6 * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
7 *
8 * Based on SH7723 Pinmux
9 * Copyright (C) 2008 Magnus Damm
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 */
15
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/gpio.h>
19#include <cpu/sh7757.h>
20
21enum {
22 PINMUX_RESERVED = 0,
23
24 PINMUX_DATA_BEGIN,
25 PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
26 PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
27 PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
28 PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
29 PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
30 PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
31 PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
32 PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
33 PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
34 PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
35 PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
36 PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
37 PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA,
38 PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
39 PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
40 PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
41 PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA,
42 PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA,
43 PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
44 PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
45 PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
46 PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
47 PTL6_DATA, PTL5_DATA, PTL4_DATA,
48 PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
49 PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
50 PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
51 PTN6_DATA, PTN5_DATA, PTN4_DATA,
52 PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
53 PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA,
54 PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA,
55 PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA,
56 PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA,
57 PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
58 PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
59 PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
60 PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
61 PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
62 PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
63 PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
64 PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
65 PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
66 PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
67 PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
68 PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
69 PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
70 PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
71 PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
72 PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
73 PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
74 PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
75 PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
76 PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
77 PINMUX_DATA_END,
78
79 PINMUX_INPUT_BEGIN,
80 PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
81 PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
82 PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
83 PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
84 PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
85 PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
86 PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
87 PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
88 PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN,
89 PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
90 PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
91 PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
92 PTG7_IN, PTG6_IN, PTG5_IN, PTG4_IN,
93 PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN,
94 PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
95 PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
96 PTI7_IN, PTI6_IN, PTI5_IN, PTI4_IN,
97 PTI3_IN, PTI2_IN, PTI1_IN, PTI0_IN,
98 PTJ6_IN, PTJ5_IN, PTJ4_IN,
99 PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
100 PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
101 PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
102 PTL6_IN, PTL5_IN, PTL4_IN,
103 PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
104 PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
105 PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
106 PTN6_IN, PTN5_IN, PTN4_IN,
107 PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
108 PTO7_IN, PTO6_IN, PTO5_IN, PTO4_IN,
109 PTO3_IN, PTO2_IN, PTO1_IN, PTO0_IN,
110 PTP7_IN, PTP6_IN, PTP5_IN, PTP4_IN,
111 PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN,
112 PTQ6_IN, PTQ5_IN, PTQ4_IN,
113 PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
114 PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
115 PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
116 PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN,
117 PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
118 PTT7_IN, PTT6_IN, PTT5_IN, PTT4_IN,
119 PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
120 PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN,
121 PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
122 PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
123 PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
124 PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
125 PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
126 PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
127 PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
128 PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
129 PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
130 PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
131 PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
132 PINMUX_INPUT_END,
133
134 PINMUX_INPUT_PULLUP_BEGIN,
135 PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
136 PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
137 PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
138 PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
139 PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU,
140 PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
141 PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU,
142 PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU,
143 PTG7_IN_PU, PTG6_IN_PU, PTG4_IN_PU,
144 PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
145 PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
146 PTI7_IN_PU, PTI6_IN_PU, PTI4_IN_PU,
147 PTI3_IN_PU, PTI2_IN_PU, PTI1_IN_PU, PTI0_IN_PU,
148 PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU,
149 PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
150 PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU,
151 PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
152 PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU,
153 PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU,
154 PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
155 PTN4_IN_PU,
156 PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU,
157 PTO7_IN_PU, PTO6_IN_PU, PTO5_IN_PU, PTO4_IN_PU,
158 PTO3_IN_PU, PTO2_IN_PU, PTO1_IN_PU, PTO0_IN_PU,
159 PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU,
160 PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
161 PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU,
162 PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
163 PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU,
164 PTV3_IN_PU, PTV2_IN_PU,
165 PTW1_IN_PU, PTW0_IN_PU,
166 PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU,
167 PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU,
168 PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU,
169 PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU,
170 PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU,
171 PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU,
172 PINMUX_INPUT_PULLUP_END,
173
174 PINMUX_OUTPUT_BEGIN,
175 PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
176 PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
177 PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
178 PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
179 PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
180 PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
181 PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
182 PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
183 PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT,
184 PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
185 PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
186 PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
187 PTG7_OUT, PTG6_OUT, PTG5_OUT, PTG4_OUT,
188 PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
189 PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
190 PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
191 PTI7_OUT, PTI6_OUT, PTI5_OUT, PTI4_OUT,
192 PTI3_OUT, PTI2_OUT, PTI1_OUT, PTI0_OUT,
193 PTJ6_OUT, PTJ5_OUT, PTJ4_OUT,
194 PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
195 PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
196 PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
197 PTL6_OUT, PTL5_OUT, PTL4_OUT,
198 PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
199 PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
200 PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
201 PTN6_OUT, PTN5_OUT, PTN4_OUT,
202 PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
203 PTO7_OUT, PTO6_OUT, PTO5_OUT, PTO4_OUT,
204 PTO3_OUT, PTO2_OUT, PTO1_OUT, PTO0_OUT,
205 PTP7_OUT, PTP6_OUT, PTP5_OUT, PTP4_OUT,
206 PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT,
207 PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
208 PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
209 PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
210 PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT,
211 PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT,
212 PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
213 PTT7_OUT, PTT6_OUT, PTT5_OUT, PTT4_OUT,
214 PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
215 PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT,
216 PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
217 PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
218 PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
219 PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
220 PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
221 PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
222 PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
223 PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
224 PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
225 PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
226 PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
227 PINMUX_OUTPUT_END,
228
229 PINMUX_FUNCTION_BEGIN,
230 PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
231 PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
232 PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
233 PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
234 PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
235 PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
236 PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
237 PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
238 PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN,
239 PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
240 PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
241 PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
242 PTG7_FN, PTG6_FN, PTG5_FN, PTG4_FN,
243 PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
244 PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
245 PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
246 PTI7_FN, PTI6_FN, PTI5_FN, PTI4_FN,
247 PTI3_FN, PTI2_FN, PTI1_FN, PTI0_FN,
248 PTJ6_FN, PTJ5_FN, PTJ4_FN,
249 PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
250 PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
251 PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
252 PTL6_FN, PTL5_FN, PTL4_FN,
253 PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
254 PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
255 PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
256 PTN6_FN, PTN5_FN, PTN4_FN,
257 PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
258 PTO7_FN, PTO6_FN, PTO5_FN, PTO4_FN,
259 PTO3_FN, PTO2_FN, PTO1_FN, PTO0_FN,
260 PTP7_FN, PTP6_FN, PTP5_FN, PTP4_FN,
261 PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN,
262 PTQ6_FN, PTQ5_FN, PTQ4_FN,
263 PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
264 PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
265 PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
266 PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN,
267 PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
268 PTT7_FN, PTT6_FN, PTT5_FN, PTT4_FN,
269 PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
270 PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN,
271 PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
272 PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
273 PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
274 PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
275 PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
276 PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
277 PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
278 PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
279 PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
280 PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
281 PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
282
283 PS0_15_FN1, PS0_15_FN2,
284 PS0_14_FN1, PS0_14_FN2,
285 PS0_13_FN1, PS0_13_FN2,
286 PS0_12_FN1, PS0_12_FN2,
287 PS0_11_FN1, PS0_11_FN2,
288 PS0_10_FN1, PS0_10_FN2,
289 PS0_9_FN1, PS0_9_FN2,
290 PS0_8_FN1, PS0_8_FN2,
291 PS0_7_FN1, PS0_7_FN2,
292 PS0_6_FN1, PS0_6_FN2,
293 PS0_5_FN1, PS0_5_FN2,
294 PS0_4_FN1, PS0_4_FN2,
295 PS0_3_FN1, PS0_3_FN2,
296 PS0_2_FN1, PS0_2_FN2,
297
298 PS1_10_FN1, PS1_10_FN2,
299 PS1_9_FN1, PS1_9_FN2,
300 PS1_8_FN1, PS1_8_FN2,
301 PS1_2_FN1, PS1_2_FN2,
302
303 PS2_13_FN1, PS2_13_FN2,
304 PS2_12_FN1, PS2_12_FN2,
305 PS2_7_FN1, PS2_7_FN2,
306 PS2_6_FN1, PS2_6_FN2,
307 PS2_5_FN1, PS2_5_FN2,
308 PS2_4_FN1, PS2_4_FN2,
309 PS2_2_FN1, PS2_2_FN2,
310
311 PS3_15_FN1, PS3_15_FN2,
312 PS3_14_FN1, PS3_14_FN2,
313 PS3_13_FN1, PS3_13_FN2,
314 PS3_12_FN1, PS3_12_FN2,
315 PS3_11_FN1, PS3_11_FN2,
316 PS3_10_FN1, PS3_10_FN2,
317 PS3_9_FN1, PS3_9_FN2,
318 PS3_8_FN1, PS3_8_FN2,
319 PS3_7_FN1, PS3_7_FN2,
320 PS3_2_FN1, PS3_2_FN2,
321 PS3_1_FN1, PS3_1_FN2,
322
323 PS4_14_FN1, PS4_14_FN2,
324 PS4_13_FN1, PS4_13_FN2,
325 PS4_12_FN1, PS4_12_FN2,
326 PS4_10_FN1, PS4_10_FN2,
327 PS4_9_FN1, PS4_9_FN2,
328 PS4_8_FN1, PS4_8_FN2,
329 PS4_4_FN1, PS4_4_FN2,
330 PS4_3_FN1, PS4_3_FN2,
331 PS4_2_FN1, PS4_2_FN2,
332 PS4_1_FN1, PS4_1_FN2,
333 PS4_0_FN1, PS4_0_FN2,
334
335 PS5_11_FN1, PS5_11_FN2,
336 PS5_10_FN1, PS5_10_FN2,
337 PS5_9_FN1, PS5_9_FN2,
338 PS5_8_FN1, PS5_8_FN2,
339 PS5_7_FN1, PS5_7_FN2,
340 PS5_6_FN1, PS5_6_FN2,
341 PS5_5_FN1, PS5_5_FN2,
342 PS5_4_FN1, PS5_4_FN2,
343 PS5_3_FN1, PS5_3_FN2,
344 PS5_2_FN1, PS5_2_FN2,
345
346 PS6_15_FN1, PS6_15_FN2,
347 PS6_14_FN1, PS6_14_FN2,
348 PS6_13_FN1, PS6_13_FN2,
349 PS6_12_FN1, PS6_12_FN2,
350 PS6_11_FN1, PS6_11_FN2,
351 PS6_10_FN1, PS6_10_FN2,
352 PS6_9_FN1, PS6_9_FN2,
353 PS6_8_FN1, PS6_8_FN2,
354 PS6_7_FN1, PS6_7_FN2,
355 PS6_6_FN1, PS6_6_FN2,
356 PS6_5_FN1, PS6_5_FN2,
357 PS6_4_FN1, PS6_4_FN2,
358 PS6_3_FN1, PS6_3_FN2,
359 PS6_2_FN1, PS6_2_FN2,
360 PS6_1_FN1, PS6_1_FN2,
361 PS6_0_FN1, PS6_0_FN2,
362
363 PS7_15_FN1, PS7_15_FN2,
364 PS7_14_FN1, PS7_14_FN2,
365 PS7_13_FN1, PS7_13_FN2,
366 PS7_12_FN1, PS7_12_FN2,
367 PS7_11_FN1, PS7_11_FN2,
368 PS7_10_FN1, PS7_10_FN2,
369 PS7_9_FN1, PS7_9_FN2,
370 PS7_8_FN1, PS7_8_FN2,
371 PS7_7_FN1, PS7_7_FN2,
372 PS7_6_FN1, PS7_6_FN2,
373 PS7_5_FN1, PS7_5_FN2,
374 PS7_4_FN1, PS7_4_FN2,
375
376 PS8_15_FN1, PS8_15_FN2,
377 PS8_14_FN1, PS8_14_FN2,
378 PS8_13_FN1, PS8_13_FN2,
379 PS8_12_FN1, PS8_12_FN2,
380 PS8_11_FN1, PS8_11_FN2,
381 PS8_10_FN1, PS8_10_FN2,
382 PS8_9_FN1, PS8_9_FN2,
383 PS8_8_FN1, PS8_8_FN2,
384 PINMUX_FUNCTION_END,
385
386 PINMUX_MARK_BEGIN,
387 /* PTA (mobule: LBSC, RGMII) */
388 BS_MARK, RDWR_MARK, WE1_MARK, RDY_MARK,
389 ET0_MDC_MARK, ET0_MDIO_MARK, ET1_MDC_MARK, ET1_MDIO_MARK,
390
391 /* PTB (mobule: INTC, ONFI, TMU) */
392 IRQ15_MARK, IRQ14_MARK, IRQ13_MARK, IRQ12_MARK,
393 IRQ11_MARK, IRQ10_MARK, IRQ9_MARK, IRQ8_MARK,
394 ON_NRE_MARK, ON_NWE_MARK, ON_NWP_MARK, ON_NCE0_MARK,
395 ON_R_B0_MARK, ON_ALE_MARK, ON_CLE_MARK, TCLK_MARK,
396
397 /* PTC (mobule: IRQ, PWMU) */
398 IRQ7_MARK, IRQ6_MARK, IRQ5_MARK, IRQ4_MARK,
399 IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK,
400 PWMU0_MARK, PWMU1_MARK, PWMU2_MARK, PWMU3_MARK,
401 PWMU4_MARK, PWMU5_MARK,
402
403 /* PTD (mobule: SPI0, DMAC) */
404 SP0_MOSI_MARK, SP0_MISO_MARK, SP0_SCK_MARK, SP0_SCK_FB_MARK,
405 SP0_SS0_MARK, SP0_SS1_MARK, SP0_SS2_MARK, SP0_SS3_MARK,
406 DREQ0_MARK, DACK0_MARK, TEND0_MARK,
407
408 /* PTE (mobule: RMII) */
409 RMII0_CRS_DV_MARK, RMII0_TXD1_MARK,
410 RMII0_TXD0_MARK, RMII0_TXEN_MARK,
411 RMII0_REFCLK_MARK, RMII0_RXD1_MARK,
412 RMII0_RXD0_MARK, RMII0_RX_ER_MARK,
413
414 /* PTF (mobule: RMII, SerMux) */
415 RMII1_CRS_DV_MARK, RMII1_TXD1_MARK,
416 RMII1_TXD0_MARK, RMII1_TXEN_MARK,
417 RMII1_REFCLK_MARK, RMII1_RXD1_MARK,
418 RMII1_RXD0_MARK, RMII1_RX_ER_MARK,
419 RAC_RI_MARK,
420
421 /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */
422 BOOTFMS_MARK, BOOTWP_MARK, A25_MARK, A24_MARK,
423 SERIRQ_MARK, WDTOVF_MARK, LPCPD_MARK, LDRQ_MARK,
424 MMCCLK_MARK, MMCCMD_MARK,
425
426 /* PTH (mobule: SPI1, LPC, DMAC, ADC) */
427 SP1_MOSI_MARK, SP1_MISO_MARK, SP1_SCK_MARK, SP1_SCK_FB_MARK,
428 SP1_SS0_MARK, SP1_SS1_MARK, WP_MARK, FMS0_MARK,
429 TEND1_MARK, DREQ1_MARK, DACK1_MARK, ADTRG1_MARK,
430 ADTRG0_MARK,
431
432 /* PTI (mobule: LBSC, SDHI) */
433 D15_MARK, D14_MARK, D13_MARK, D12_MARK,
434 D11_MARK, D10_MARK, D9_MARK, D8_MARK,
435 SD_WP_MARK, SD_CD_MARK, SD_CLK_MARK, SD_CMD_MARK,
436 SD_D3_MARK, SD_D2_MARK, SD_D1_MARK, SD_D0_MARK,
437
438 /* PTJ (mobule: SCIF234) */
439 RTS3_MARK, CTS3_MARK, TXD3_MARK, RXD3_MARK,
440 RTS4_MARK, RXD4_MARK, TXD4_MARK,
441
442 /* PTK (mobule: SERMUX, LBSC, SCIF) */
443 COM2_TXD_MARK, COM2_RXD_MARK, COM2_RTS_MARK, COM2_CTS_MARK,
444 COM2_DTR_MARK, COM2_DSR_MARK, COM2_DCD_MARK, CLKOUT_MARK,
445 SCK2_MARK, SCK4_MARK, SCK3_MARK,
446
447 /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */
448 RAC_RXD_MARK, RAC_RTS_MARK, RAC_CTS_MARK, RAC_DTR_MARK,
449 RAC_DSR_MARK, RAC_DCD_MARK, RAC_TXD_MARK, RXD2_MARK,
450 CS5_MARK, CS6_MARK, AUDSYNC_MARK, AUDCK_MARK,
451 TXD2_MARK,
452
453 /* PTM (mobule: LBSC, IIC) */
454 CS4_MARK, RD_MARK, WE0_MARK, CS0_MARK,
455 SDA6_MARK, SCL6_MARK, SDA7_MARK, SCL7_MARK,
456
457 /* PTN (mobule: USB, JMC, SGPIO, WDT) */
458 VBUS_EN_MARK, VBUS_OC_MARK, JMCTCK_MARK, JMCTMS_MARK,
459 JMCTDO_MARK, JMCTDI_MARK, JMCTRST_MARK,
460 SGPIO1_CLK_MARK, SGPIO1_LOAD_MARK, SGPIO1_DI_MARK,
461 SGPIO1_DO_MARK, SUB_CLKIN_MARK,
462
463 /* PTO (mobule: SGPIO, SerMux) */
464 SGPIO0_CLK_MARK, SGPIO0_LOAD_MARK, SGPIO0_DI_MARK,
465 SGPIO0_DO_MARK, SGPIO2_CLK_MARK, SGPIO2_LOAD_MARK,
466 SGPIO2_DI_MARK, SGPIO2_DO_MARK,
467 COM1_TXD_MARK, COM1_RXD_MARK, COM1_RTS_MARK, COM1_CTS_MARK,
468
469 /* PTQ (mobule: LPC) */
470 LAD3_MARK, LAD2_MARK, LAD1_MARK, LAD0_MARK,
471 LFRAME_MARK, LRESET_MARK, LCLK_MARK,
472
473 /* PTR (mobule: GRA, IIC) */
474 DDC3_MARK, DDC2_MARK, SDA2_MARK, SCL2_MARK,
475 SDA1_MARK, SCL1_MARK, SDA0_MARK, SCL0_MARK,
476 SDA8_MARK, SCL8_MARK,
477
478 /* PTS (mobule: GRA, IIC) */
479 DDC1_MARK, DDC0_MARK, SDA5_MARK, SCL5_MARK,
480 SDA4_MARK, SCL4_MARK, SDA3_MARK, SCL3_MARK,
481 SDA9_MARK, SCL9_MARK,
482
483 /* PTT (mobule: PWMX, AUD) */
484 PWMX7_MARK, PWMX6_MARK, PWMX5_MARK, PWMX4_MARK,
485 PWMX3_MARK, PWMX2_MARK, PWMX1_MARK, PWMX0_MARK,
486 AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
487 STATUS1_MARK, STATUS0_MARK,
488
489 /* PTU (mobule: LPC, APM) */
490 LGPIO7_MARK, LGPIO6_MARK, LGPIO5_MARK, LGPIO4_MARK,
491 LGPIO3_MARK, LGPIO2_MARK, LGPIO1_MARK, LGPIO0_MARK,
492 APMONCTL_O_MARK, APMPWBTOUT_O_MARK, APMSCI_O_MARK,
493 APMVDDON_MARK, APMSLPBTN_MARK, APMPWRBTN_MARK, APMS5N_MARK,
494 APMS3N_MARK,
495
496 /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */
497 A23_MARK, A22_MARK, A21_MARK, A20_MARK,
498 A19_MARK, A18_MARK, A17_MARK, A16_MARK,
499 COM2_RI_MARK, R_SPI_MOSI_MARK, R_SPI_MISO_MARK,
500 R_SPI_RSPCK_MARK, R_SPI_SSL0_MARK, R_SPI_SSL1_MARK,
501 EVENT7_MARK, EVENT6_MARK, VBIOS_DI_MARK, VBIOS_DO_MARK,
502 VBIOS_CLK_MARK, VBIOS_CS_MARK,
503
504 /* PTW (mobule: LBSC, EVC, SCIF) */
505 A15_MARK, A14_MARK, A13_MARK, A12_MARK,
506 A11_MARK, A10_MARK, A9_MARK, A8_MARK,
507 EVENT5_MARK, EVENT4_MARK, EVENT3_MARK, EVENT2_MARK,
508 EVENT1_MARK, EVENT0_MARK, CTS4_MARK, CTS2_MARK,
509
510 /* PTX (mobule: LBSC, SCIF, SIM) */
511 A7_MARK, A6_MARK, A5_MARK, A4_MARK,
512 A3_MARK, A2_MARK, A1_MARK, A0_MARK,
513 RTS2_MARK, SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
514
515 /* PTY (mobule: LBSC) */
516 D7_MARK, D6_MARK, D5_MARK, D4_MARK,
517 D3_MARK, D2_MARK, D1_MARK, D0_MARK,
518
519 /* PTZ (mobule: eMMC, ONFI) */
520 MMCDAT7_MARK, MMCDAT6_MARK, MMCDAT5_MARK, MMCDAT4_MARK,
521 MMCDAT3_MARK, MMCDAT2_MARK, MMCDAT1_MARK, MMCDAT0_MARK,
522 ON_DQ7_MARK, ON_DQ6_MARK, ON_DQ5_MARK, ON_DQ4_MARK,
523 ON_DQ3_MARK, ON_DQ2_MARK, ON_DQ1_MARK, ON_DQ0_MARK,
524
525 PINMUX_MARK_END,
526};
527
528static pinmux_enum_t pinmux_data[] = {
529 /* PTA GPIO */
530 PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
531 PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
532 PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
533 PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT),
534 PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT),
535 PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT),
536 PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT),
537 PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT),
538
539 /* PTB GPIO */
540 PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
541 PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
542 PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
543 PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
544 PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
545 PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT),
546 PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT),
547 PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
548
549 /* PTC GPIO */
550 PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT),
551 PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT),
552 PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT),
553 PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
554 PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
555 PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
556 PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT),
557 PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
558
559 /* PTD GPIO */
560 PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT),
561 PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT),
562 PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT),
563 PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT),
564 PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT),
565 PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT),
566 PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT),
567 PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
568
569 /* PTE GPIO */
570 PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT),
571 PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT),
572 PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT),
573 PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
574 PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
575 PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT),
576 PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT),
577 PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT),
578
579 /* PTF GPIO */
580 PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT),
581 PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT),
582 PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT),
583 PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT),
584 PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT),
585 PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT),
586 PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT),
587 PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT),
588
589 /* PTG GPIO */
590 PINMUX_DATA(PTG7_DATA, PTG7_IN, PTG7_OUT),
591 PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT),
592 PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT),
593 PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT),
594 PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT),
595 PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT),
596 PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT),
597 PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT),
598
599 /* PTH GPIO */
600 PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT),
601 PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT),
602 PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT),
603 PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT),
604 PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT),
605 PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT),
606 PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT),
607 PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT),
608
609 /* PTI GPIO */
610 PINMUX_DATA(PTI7_DATA, PTI7_IN, PTI7_OUT),
611 PINMUX_DATA(PTI6_DATA, PTI6_IN, PTI6_OUT),
612 PINMUX_DATA(PTI5_DATA, PTI5_IN, PTI5_OUT),
613 PINMUX_DATA(PTI4_DATA, PTI4_IN, PTI4_OUT),
614 PINMUX_DATA(PTI3_DATA, PTI3_IN, PTI3_OUT),
615 PINMUX_DATA(PTI2_DATA, PTI2_IN, PTI2_OUT),
616 PINMUX_DATA(PTI1_DATA, PTI1_IN, PTI1_OUT),
617 PINMUX_DATA(PTI0_DATA, PTI0_IN, PTI0_OUT),
618
619 /* PTJ GPIO */
620 PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT),
621 PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT),
622 PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT),
623 PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT),
624 PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT),
625 PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT),
626 PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT),
627
628 /* PTK GPIO */
629 PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT),
630 PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT),
631 PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT),
632 PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT),
633 PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT),
634 PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT),
635 PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT),
636 PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
637
638 /* PTL GPIO */
639 PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
640 PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
641 PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
642 PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT),
643 PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT),
644 PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT),
645 PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT),
646
647 /* PTM GPIO */
648 PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT),
649 PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT),
650 PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT),
651 PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT),
652 PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT),
653 PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT),
654 PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
655
656 /* PTN GPIO */
657 PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT),
658 PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT),
659 PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT),
660 PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT),
661 PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT),
662 PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT),
663 PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT),
664
665 /* PTO GPIO */
666 PINMUX_DATA(PTO7_DATA, PTO7_IN, PTO7_OUT),
667 PINMUX_DATA(PTO6_DATA, PTO6_IN, PTO6_OUT),
668 PINMUX_DATA(PTO5_DATA, PTO5_IN, PTO5_OUT),
669 PINMUX_DATA(PTO4_DATA, PTO4_IN, PTO4_OUT),
670 PINMUX_DATA(PTO3_DATA, PTO3_IN, PTO3_OUT),
671 PINMUX_DATA(PTO2_DATA, PTO2_IN, PTO2_OUT),
672 PINMUX_DATA(PTO1_DATA, PTO1_IN, PTO1_OUT),
673 PINMUX_DATA(PTO0_DATA, PTO0_IN, PTO0_OUT),
674
675 /* PTQ GPIO */
676 PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT),
677 PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT),
678 PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT),
679 PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT),
680 PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT),
681 PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT),
682 PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT),
683
684 /* PTR GPIO */
685 PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT),
686 PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT),
687 PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
688 PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
689 PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT),
690 PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT),
691 PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
692 PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
693
694 /* PTS GPIO */
695 PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT),
696 PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT),
697 PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT),
698 PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT),
699 PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT),
700 PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT),
701 PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT),
702 PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
703
704 /* PTT GPIO */
705 PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT),
706 PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT),
707 PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT),
708 PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
709 PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
710 PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT),
711 PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT),
712 PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT),
713
714 /* PTU GPIO */
715 PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT),
716 PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT),
717 PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT),
718 PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT),
719 PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT),
720 PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT),
721 PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT),
722 PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT),
723
724 /* PTV GPIO */
725 PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT),
726 PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT),
727 PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT),
728 PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT),
729 PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT),
730 PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT),
731 PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT),
732 PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT),
733
734 /* PTW GPIO */
735 PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT),
736 PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT),
737 PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT),
738 PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT),
739 PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT),
740 PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT),
741 PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT),
742 PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT),
743
744 /* PTX GPIO */
745 PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT),
746 PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT),
747 PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT),
748 PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT),
749 PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT),
750 PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT),
751 PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT),
752 PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT),
753
754 /* PTY GPIO */
755 PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT),
756 PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT),
757 PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT),
758 PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT),
759 PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT),
760 PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT),
761 PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT),
762 PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT),
763
764 /* PTZ GPIO */
765 PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT),
766 PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT),
767 PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT),
768 PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT),
769 PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT),
770 PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT),
771 PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT),
772 PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT),
773
774 /* PTA FN */
775 PINMUX_DATA(BS_MARK, PTA7_FN),
776 PINMUX_DATA(RDWR_MARK, PTA6_FN),
777 PINMUX_DATA(WE1_MARK, PTA5_FN),
778 PINMUX_DATA(RDY_MARK, PTA4_FN),
779 PINMUX_DATA(ET0_MDC_MARK, PTA3_FN),
780 PINMUX_DATA(ET0_MDIO_MARK, PTA2_FN),
781 PINMUX_DATA(ET1_MDC_MARK, PTA1_FN),
782 PINMUX_DATA(ET1_MDIO_MARK, PTA0_FN),
783
784 /* PTB FN */
785 PINMUX_DATA(IRQ15_MARK, PS0_15_FN1, PTB7_FN),
786 PINMUX_DATA(ON_NRE_MARK, PS0_15_FN2, PTB7_FN),
787 PINMUX_DATA(IRQ14_MARK, PS0_14_FN1, PTB6_FN),
788 PINMUX_DATA(ON_NWE_MARK, PS0_14_FN2, PTB6_FN),
789 PINMUX_DATA(IRQ13_MARK, PS0_13_FN1, PTB5_FN),
790 PINMUX_DATA(ON_NWP_MARK, PS0_13_FN2, PTB5_FN),
791 PINMUX_DATA(IRQ12_MARK, PS0_12_FN1, PTB4_FN),
792 PINMUX_DATA(ON_NCE0_MARK, PS0_12_FN2, PTB4_FN),
793 PINMUX_DATA(IRQ11_MARK, PS0_11_FN1, PTB3_FN),
794 PINMUX_DATA(ON_R_B0_MARK, PS0_11_FN2, PTB3_FN),
795 PINMUX_DATA(IRQ10_MARK, PS0_10_FN1, PTB2_FN),
796 PINMUX_DATA(ON_ALE_MARK, PS0_10_FN2, PTB2_FN),
797 PINMUX_DATA(IRQ9_MARK, PS0_9_FN1, PTB1_FN),
798 PINMUX_DATA(ON_CLE_MARK, PS0_9_FN2, PTB1_FN),
799 PINMUX_DATA(IRQ8_MARK, PS0_8_FN1, PTB0_FN),
800 PINMUX_DATA(TCLK_MARK, PS0_8_FN2, PTB0_FN),
801
802 /* PTC FN */
803 PINMUX_DATA(IRQ7_MARK, PS0_7_FN1, PTC7_FN),
804 PINMUX_DATA(PWMU0_MARK, PS0_7_FN2, PTC7_FN),
805 PINMUX_DATA(IRQ6_MARK, PS0_6_FN1, PTC6_FN),
806 PINMUX_DATA(PWMU1_MARK, PS0_6_FN2, PTC6_FN),
807 PINMUX_DATA(IRQ5_MARK, PS0_5_FN1, PTC5_FN),
808 PINMUX_DATA(PWMU2_MARK, PS0_5_FN2, PTC5_FN),
809 PINMUX_DATA(IRQ4_MARK, PS0_4_FN1, PTC5_FN),
810 PINMUX_DATA(PWMU3_MARK, PS0_4_FN2, PTC4_FN),
811 PINMUX_DATA(IRQ3_MARK, PS0_3_FN1, PTC3_FN),
812 PINMUX_DATA(PWMU4_MARK, PS0_3_FN2, PTC3_FN),
813 PINMUX_DATA(IRQ2_MARK, PS0_2_FN1, PTC2_FN),
814 PINMUX_DATA(PWMU5_MARK, PS0_2_FN2, PTC2_FN),
815 PINMUX_DATA(IRQ1_MARK, PTC1_FN),
816 PINMUX_DATA(IRQ0_MARK, PTC0_FN),
817
818 /* PTD FN */
819 PINMUX_DATA(SP0_MOSI_MARK, PTD7_FN),
820 PINMUX_DATA(SP0_MISO_MARK, PTD6_FN),
821 PINMUX_DATA(SP0_SCK_MARK, PTD5_FN),
822 PINMUX_DATA(SP0_SCK_FB_MARK, PTD4_FN),
823 PINMUX_DATA(SP0_SS0_MARK, PTD3_FN),
824 PINMUX_DATA(SP0_SS1_MARK, PS1_10_FN1, PTD2_FN),
825 PINMUX_DATA(DREQ0_MARK, PS1_10_FN2, PTD2_FN),
826 PINMUX_DATA(SP0_SS2_MARK, PS1_9_FN1, PTD1_FN),
827 PINMUX_DATA(DACK0_MARK, PS1_9_FN2, PTD1_FN),
828 PINMUX_DATA(SP0_SS3_MARK, PS1_8_FN1, PTD0_FN),
829 PINMUX_DATA(TEND0_MARK, PS1_8_FN2, PTD0_FN),
830
831 /* PTE FN */
832 PINMUX_DATA(RMII0_CRS_DV_MARK, PTE7_FN),
833 PINMUX_DATA(RMII0_TXD1_MARK, PTE6_FN),
834 PINMUX_DATA(RMII0_TXD0_MARK, PTE5_FN),
835 PINMUX_DATA(RMII0_TXEN_MARK, PTE4_FN),
836 PINMUX_DATA(RMII0_REFCLK_MARK, PTE3_FN),
837 PINMUX_DATA(RMII0_RXD1_MARK, PTE2_FN),
838 PINMUX_DATA(RMII0_RXD0_MARK, PTE1_FN),
839 PINMUX_DATA(RMII0_RX_ER_MARK, PTE0_FN),
840
841 /* PTF FN */
842 PINMUX_DATA(RMII1_CRS_DV_MARK, PTF7_FN),
843 PINMUX_DATA(RMII1_TXD1_MARK, PTF6_FN),
844 PINMUX_DATA(RMII1_TXD0_MARK, PTF5_FN),
845 PINMUX_DATA(RMII1_TXEN_MARK, PTF4_FN),
846 PINMUX_DATA(RMII1_REFCLK_MARK, PTF3_FN),
847 PINMUX_DATA(RMII1_RXD1_MARK, PS1_2_FN1, PTF2_FN),
848 PINMUX_DATA(RAC_RI_MARK, PS1_2_FN2, PTF2_FN),
849 PINMUX_DATA(RMII1_RXD0_MARK, PTF1_FN),
850 PINMUX_DATA(RMII1_RX_ER_MARK, PTF0_FN),
851
852 /* PTG FN */
853 PINMUX_DATA(BOOTFMS_MARK, PTG7_FN),
854 PINMUX_DATA(BOOTWP_MARK, PTG6_FN),
855 PINMUX_DATA(A25_MARK, PS2_13_FN1, PTG5_FN),
856 PINMUX_DATA(MMCCLK_MARK, PS2_13_FN2, PTG5_FN),
857 PINMUX_DATA(A24_MARK, PS2_12_FN1, PTG4_FN),
858 PINMUX_DATA(MMCCMD_MARK, PS2_12_FN2, PTG4_FN),
859 PINMUX_DATA(SERIRQ_MARK, PTG3_FN),
860 PINMUX_DATA(WDTOVF_MARK, PTG2_FN),
861 PINMUX_DATA(LPCPD_MARK, PTG1_FN),
862 PINMUX_DATA(LDRQ_MARK, PTG0_FN),
863
864 /* PTH FN */
865 PINMUX_DATA(SP1_MOSI_MARK, PS2_7_FN1, PTH7_FN),
866 PINMUX_DATA(TEND1_MARK, PS2_7_FN2, PTH7_FN),
867 PINMUX_DATA(SP1_MISO_MARK, PS2_6_FN1, PTH6_FN),
868 PINMUX_DATA(DREQ1_MARK, PS2_6_FN2, PTH6_FN),
869 PINMUX_DATA(SP1_SCK_MARK, PS2_5_FN1, PTH5_FN),
870 PINMUX_DATA(DACK1_MARK, PS2_5_FN2, PTH5_FN),
871 PINMUX_DATA(SP1_SCK_FB_MARK, PS2_4_FN1, PTH4_FN),
872 PINMUX_DATA(ADTRG1_MARK, PS2_4_FN2, PTH4_FN),
873 PINMUX_DATA(SP1_SS0_MARK, PTH3_FN),
874 PINMUX_DATA(SP1_SS1_MARK, PS2_2_FN1, PTH2_FN),
875 PINMUX_DATA(ADTRG0_MARK, PS2_2_FN2, PTH2_FN),
876 PINMUX_DATA(WP_MARK, PTH1_FN),
877 PINMUX_DATA(FMS0_MARK, PTH0_FN),
878
879 /* PTI FN */
880 PINMUX_DATA(D15_MARK, PS3_15_FN1, PTI7_FN),
881 PINMUX_DATA(SD_WP_MARK, PS3_15_FN2, PTI7_FN),
882 PINMUX_DATA(D14_MARK, PS3_14_FN1, PTI6_FN),
883 PINMUX_DATA(SD_CD_MARK, PS3_14_FN2, PTI6_FN),
884 PINMUX_DATA(D13_MARK, PS3_13_FN1, PTI5_FN),
885 PINMUX_DATA(SD_CLK_MARK, PS3_13_FN2, PTI5_FN),
886 PINMUX_DATA(D12_MARK, PS3_12_FN1, PTI4_FN),
887 PINMUX_DATA(SD_CMD_MARK, PS3_12_FN2, PTI4_FN),
888 PINMUX_DATA(D11_MARK, PS3_11_FN1, PTI3_FN),
889 PINMUX_DATA(SD_D3_MARK, PS3_11_FN2, PTI3_FN),
890 PINMUX_DATA(D10_MARK, PS3_10_FN1, PTI2_FN),
891 PINMUX_DATA(SD_D2_MARK, PS3_10_FN2, PTI2_FN),
892 PINMUX_DATA(D9_MARK, PS3_9_FN1, PTI1_FN),
893 PINMUX_DATA(SD_D1_MARK, PS3_9_FN2, PTI1_FN),
894 PINMUX_DATA(D8_MARK, PS3_8_FN1, PTI0_FN),
895 PINMUX_DATA(SD_D0_MARK, PS3_8_FN2, PTI0_FN),
896
897 /* PTJ FN */
898 PINMUX_DATA(RTS3_MARK, PTJ6_FN),
899 PINMUX_DATA(CTS3_MARK, PTJ5_FN),
900 PINMUX_DATA(TXD3_MARK, PTJ4_FN),
901 PINMUX_DATA(RXD3_MARK, PTJ3_FN),
902 PINMUX_DATA(RTS4_MARK, PTJ2_FN),
903 PINMUX_DATA(RXD4_MARK, PTJ1_FN),
904 PINMUX_DATA(TXD4_MARK, PTJ0_FN),
905
906 /* PTK FN */
907 PINMUX_DATA(COM2_TXD_MARK, PS3_7_FN1, PTK7_FN),
908 PINMUX_DATA(SCK2_MARK, PS3_7_FN2, PTK7_FN),
909 PINMUX_DATA(COM2_RXD_MARK, PTK6_FN),
910 PINMUX_DATA(COM2_RTS_MARK, PTK5_FN),
911 PINMUX_DATA(COM2_CTS_MARK, PTK4_FN),
912 PINMUX_DATA(COM2_DTR_MARK, PTK3_FN),
913 PINMUX_DATA(COM2_DSR_MARK, PS3_2_FN1, PTK2_FN),
914 PINMUX_DATA(SCK4_MARK, PS3_2_FN2, PTK2_FN),
915 PINMUX_DATA(COM2_DCD_MARK, PS3_1_FN1, PTK1_FN),
916 PINMUX_DATA(SCK3_MARK, PS3_1_FN2, PTK1_FN),
917 PINMUX_DATA(CLKOUT_MARK, PTK0_FN),
918
919 /* PTL FN */
920 PINMUX_DATA(RAC_RXD_MARK, PS4_14_FN1, PTL6_FN),
921 PINMUX_DATA(RXD2_MARK, PS4_14_FN2, PTL6_FN),
922 PINMUX_DATA(RAC_RTS_MARK, PS4_13_FN1, PTL5_FN),
923 PINMUX_DATA(CS5_MARK, PS4_13_FN2, PTL5_FN),
924 PINMUX_DATA(RAC_CTS_MARK, PS4_12_FN1, PTL4_FN),
925 PINMUX_DATA(CS6_MARK, PS4_12_FN2, PTL4_FN),
926 PINMUX_DATA(RAC_DTR_MARK, PTL3_FN),
927 PINMUX_DATA(RAC_DSR_MARK, PS4_10_FN1, PTL2_FN),
928 PINMUX_DATA(AUDSYNC_MARK, PS4_10_FN2, PTL2_FN),
929 PINMUX_DATA(RAC_DCD_MARK, PS4_9_FN1, PTL1_FN),
930 PINMUX_DATA(AUDCK_MARK, PS4_9_FN2, PTL1_FN),
931 PINMUX_DATA(RAC_TXD_MARK, PS4_8_FN1, PTL0_FN),
932 PINMUX_DATA(TXD2_MARK, PS4_8_FN1, PTL0_FN),
933
934 /* PTM FN */
935 PINMUX_DATA(CS4_MARK, PTM7_FN),
936 PINMUX_DATA(RD_MARK, PTM6_FN),
937 PINMUX_DATA(WE0_MARK, PTM7_FN),
938 PINMUX_DATA(CS0_MARK, PTM4_FN),
939 PINMUX_DATA(SDA6_MARK, PTM3_FN),
940 PINMUX_DATA(SCL6_MARK, PTM2_FN),
941 PINMUX_DATA(SDA7_MARK, PTM1_FN),
942 PINMUX_DATA(SCL7_MARK, PTM0_FN),
943
944 /* PTN FN */
945 PINMUX_DATA(VBUS_EN_MARK, PTN6_FN),
946 PINMUX_DATA(VBUS_OC_MARK, PTN5_FN),
947 PINMUX_DATA(JMCTCK_MARK, PS4_4_FN1, PTN4_FN),
948 PINMUX_DATA(SGPIO1_CLK_MARK, PS4_4_FN2, PTN4_FN),
949 PINMUX_DATA(JMCTMS_MARK, PS4_3_FN1, PTN5_FN),
950 PINMUX_DATA(SGPIO1_LOAD_MARK, PS4_3_FN2, PTN5_FN),
951 PINMUX_DATA(JMCTDO_MARK, PS4_2_FN1, PTN2_FN),
952 PINMUX_DATA(SGPIO1_DO_MARK, PS4_2_FN2, PTN2_FN),
953 PINMUX_DATA(JMCTDI_MARK, PS4_1_FN1, PTN1_FN),
954 PINMUX_DATA(SGPIO1_DI_MARK, PS4_1_FN2, PTN1_FN),
955 PINMUX_DATA(JMCTRST_MARK, PS4_0_FN1, PTN0_FN),
956 PINMUX_DATA(SUB_CLKIN_MARK, PS4_0_FN2, PTN0_FN),
957
958 /* PTO FN */
959 PINMUX_DATA(SGPIO0_CLK_MARK, PTO7_FN),
960 PINMUX_DATA(SGPIO0_LOAD_MARK, PTO6_FN),
961 PINMUX_DATA(SGPIO0_DI_MARK, PTO5_FN),
962 PINMUX_DATA(SGPIO0_DO_MARK, PTO4_FN),
963 PINMUX_DATA(SGPIO2_CLK_MARK, PS5_11_FN1, PTO3_FN),
964 PINMUX_DATA(COM1_TXD_MARK, PS5_11_FN2, PTO3_FN),
965 PINMUX_DATA(SGPIO2_LOAD_MARK, PS5_10_FN1, PTO2_FN),
966 PINMUX_DATA(COM1_RXD_MARK, PS5_10_FN2, PTO2_FN),
967 PINMUX_DATA(SGPIO2_DI_MARK, PS5_9_FN1, PTO1_FN),
968 PINMUX_DATA(COM1_RTS_MARK, PS5_9_FN2, PTO1_FN),
969 PINMUX_DATA(SGPIO2_DO_MARK, PS5_8_FN1, PTO0_FN),
970 PINMUX_DATA(COM1_CTS_MARK, PS5_8_FN2, PTO0_FN),
971
972 /* PTP FN */
973
974 /* PTQ FN */
975 PINMUX_DATA(LAD3_MARK, PTQ6_FN),
976 PINMUX_DATA(LAD2_MARK, PTQ5_FN),
977 PINMUX_DATA(LAD1_MARK, PTQ4_FN),
978 PINMUX_DATA(LAD0_MARK, PTQ3_FN),
979 PINMUX_DATA(LFRAME_MARK, PTQ2_FN),
980 PINMUX_DATA(LRESET_MARK, PTQ1_FN),
981 PINMUX_DATA(LCLK_MARK, PTQ0_FN),
982
983 /* PTR FN */
984 PINMUX_DATA(SDA8_MARK, PTR7_FN), /* DDC3? */
985 PINMUX_DATA(SCL8_MARK, PTR6_FN), /* DDC2? */
986 PINMUX_DATA(SDA2_MARK, PTR5_FN),
987 PINMUX_DATA(SCL2_MARK, PTR4_FN),
988 PINMUX_DATA(SDA1_MARK, PTR3_FN),
989 PINMUX_DATA(SCL1_MARK, PTR2_FN),
990 PINMUX_DATA(SDA0_MARK, PTR1_FN),
991 PINMUX_DATA(SCL0_MARK, PTR0_FN),
992
993 /* PTS FN */
994 PINMUX_DATA(SDA9_MARK, PTS7_FN), /* DDC1? */
995 PINMUX_DATA(SCL9_MARK, PTS6_FN), /* DDC0? */
996 PINMUX_DATA(SDA5_MARK, PTS5_FN),
997 PINMUX_DATA(SCL5_MARK, PTS4_FN),
998 PINMUX_DATA(SDA4_MARK, PTS3_FN),
999 PINMUX_DATA(SCL4_MARK, PTS2_FN),
1000 PINMUX_DATA(SDA3_MARK, PTS1_FN),
1001 PINMUX_DATA(SCL3_MARK, PTS0_FN),
1002
1003 /* PTT FN */
1004 PINMUX_DATA(PWMX7_MARK, PS5_7_FN1, PTT7_FN),
1005 PINMUX_DATA(AUDATA3_MARK, PS5_7_FN2, PTT7_FN),
1006 PINMUX_DATA(PWMX6_MARK, PS5_6_FN1, PTT6_FN),
1007 PINMUX_DATA(AUDATA2_MARK, PS5_6_FN2, PTT6_FN),
1008 PINMUX_DATA(PWMX5_MARK, PS5_5_FN1, PTT5_FN),
1009 PINMUX_DATA(AUDATA1_MARK, PS5_5_FN2, PTT5_FN),
1010 PINMUX_DATA(PWMX4_MARK, PS5_4_FN1, PTT4_FN),
1011 PINMUX_DATA(AUDATA0_MARK, PS5_4_FN2, PTT4_FN),
1012 PINMUX_DATA(PWMX3_MARK, PS5_3_FN1, PTT3_FN),
1013 PINMUX_DATA(STATUS1_MARK, PS5_3_FN2, PTT3_FN),
1014 PINMUX_DATA(PWMX2_MARK, PS5_2_FN1, PTT2_FN),
1015 PINMUX_DATA(STATUS0_MARK, PS5_2_FN2, PTT2_FN),
1016 PINMUX_DATA(PWMX1_MARK, PTT1_FN),
1017 PINMUX_DATA(PWMX0_MARK, PTT0_FN),
1018
1019 /* PTU FN */
1020 PINMUX_DATA(LGPIO7_MARK, PS6_15_FN1, PTU7_FN),
1021 PINMUX_DATA(APMONCTL_O_MARK, PS6_15_FN2, PTU7_FN),
1022 PINMUX_DATA(LGPIO6_MARK, PS6_14_FN1, PTU6_FN),
1023 PINMUX_DATA(APMPWBTOUT_O_MARK, PS6_14_FN2, PTU6_FN),
1024 PINMUX_DATA(LGPIO5_MARK, PS6_13_FN1, PTU5_FN),
1025 PINMUX_DATA(APMSCI_O_MARK, PS6_13_FN2, PTU5_FN),
1026 PINMUX_DATA(LGPIO4_MARK, PS6_12_FN1, PTU4_FN),
1027 PINMUX_DATA(APMVDDON_MARK, PS6_12_FN2, PTU4_FN),
1028 PINMUX_DATA(LGPIO3_MARK, PS6_11_FN1, PTU3_FN),
1029 PINMUX_DATA(APMSLPBTN_MARK, PS6_11_FN2, PTU3_FN),
1030 PINMUX_DATA(LGPIO2_MARK, PS6_10_FN1, PTU2_FN),
1031 PINMUX_DATA(APMPWRBTN_MARK, PS6_10_FN2, PTU2_FN),
1032 PINMUX_DATA(LGPIO1_MARK, PS6_9_FN1, PTU1_FN),
1033 PINMUX_DATA(APMS5N_MARK, PS6_9_FN2, PTU1_FN),
1034 PINMUX_DATA(LGPIO0_MARK, PS6_8_FN1, PTU0_FN),
1035 PINMUX_DATA(APMS3N_MARK, PS6_8_FN2, PTU0_FN),
1036
1037 /* PTV FN */
1038 PINMUX_DATA(A23_MARK, PS6_7_FN1, PTV7_FN),
1039 PINMUX_DATA(COM2_RI_MARK, PS6_7_FN2, PTV7_FN),
1040 PINMUX_DATA(A22_MARK, PS6_6_FN1, PTV6_FN),
1041 PINMUX_DATA(R_SPI_MOSI_MARK, PS6_6_FN2, PTV6_FN),
1042 PINMUX_DATA(A21_MARK, PS6_5_FN1, PTV5_FN),
1043 PINMUX_DATA(R_SPI_MISO_MARK, PS6_5_FN2, PTV5_FN),
1044 PINMUX_DATA(A20_MARK, PS6_4_FN1, PTV4_FN),
1045 PINMUX_DATA(R_SPI_RSPCK_MARK, PS6_4_FN2, PTV4_FN),
1046 PINMUX_DATA(A19_MARK, PS6_3_FN1, PTV3_FN),
1047 PINMUX_DATA(R_SPI_SSL0_MARK, PS6_3_FN2, PTV3_FN),
1048 PINMUX_DATA(A18_MARK, PS6_2_FN1, PTV2_FN),
1049 PINMUX_DATA(R_SPI_SSL1_MARK, PS6_2_FN2, PTV2_FN),
1050 PINMUX_DATA(A17_MARK, PS6_1_FN1, PTV1_FN),
1051 PINMUX_DATA(EVENT7_MARK, PS6_1_FN2, PTV1_FN),
1052 PINMUX_DATA(A16_MARK, PS6_0_FN1, PTV0_FN),
1053 PINMUX_DATA(EVENT6_MARK, PS6_0_FN1, PTV0_FN),
1054
1055 /* PTW FN */
1056 PINMUX_DATA(A15_MARK, PS7_15_FN1, PTW7_FN),
1057 PINMUX_DATA(EVENT5_MARK, PS7_15_FN2, PTW7_FN),
1058 PINMUX_DATA(A14_MARK, PS7_14_FN1, PTW6_FN),
1059 PINMUX_DATA(EVENT4_MARK, PS7_14_FN2, PTW6_FN),
1060 PINMUX_DATA(A13_MARK, PS7_13_FN1, PTW5_FN),
1061 PINMUX_DATA(EVENT3_MARK, PS7_13_FN2, PTW5_FN),
1062 PINMUX_DATA(A12_MARK, PS7_12_FN1, PTW4_FN),
1063 PINMUX_DATA(EVENT2_MARK, PS7_12_FN2, PTW4_FN),
1064 PINMUX_DATA(A11_MARK, PS7_11_FN1, PTW3_FN),
1065 PINMUX_DATA(EVENT1_MARK, PS7_11_FN2, PTW3_FN),
1066 PINMUX_DATA(A10_MARK, PS7_10_FN1, PTW2_FN),
1067 PINMUX_DATA(EVENT0_MARK, PS7_10_FN2, PTW2_FN),
1068 PINMUX_DATA(A9_MARK, PS7_9_FN1, PTW1_FN),
1069 PINMUX_DATA(CTS4_MARK, PS7_9_FN2, PTW1_FN),
1070 PINMUX_DATA(A8_MARK, PS7_8_FN1, PTW0_FN),
1071 PINMUX_DATA(CTS2_MARK, PS7_8_FN2, PTW0_FN),
1072
1073 /* PTX FN */
1074 PINMUX_DATA(A7_MARK, PS7_7_FN1, PTX7_FN),
1075 PINMUX_DATA(RTS2_MARK, PS7_7_FN2, PTX7_FN),
1076 PINMUX_DATA(A6_MARK, PS7_6_FN1, PTX6_FN),
1077 PINMUX_DATA(SIM_D_MARK, PS7_6_FN2, PTX6_FN),
1078 PINMUX_DATA(A5_MARK, PS7_5_FN1, PTX5_FN),
1079 PINMUX_DATA(SIM_CLK_MARK, PS7_5_FN2, PTX5_FN),
1080 PINMUX_DATA(A4_MARK, PS7_4_FN1, PTX4_FN),
1081 PINMUX_DATA(SIM_RST_MARK, PS7_4_FN2, PTX4_FN),
1082 PINMUX_DATA(A3_MARK, PTX3_FN),
1083 PINMUX_DATA(A2_MARK, PTX2_FN),
1084 PINMUX_DATA(A1_MARK, PTX1_FN),
1085 PINMUX_DATA(A0_MARK, PTX0_FN),
1086
1087 /* PTY FN */
1088 PINMUX_DATA(D7_MARK, PTY7_FN),
1089 PINMUX_DATA(D6_MARK, PTY6_FN),
1090 PINMUX_DATA(D5_MARK, PTY5_FN),
1091 PINMUX_DATA(D4_MARK, PTY4_FN),
1092 PINMUX_DATA(D3_MARK, PTY3_FN),
1093 PINMUX_DATA(D2_MARK, PTY2_FN),
1094 PINMUX_DATA(D1_MARK, PTY1_FN),
1095 PINMUX_DATA(D0_MARK, PTY0_FN),
1096
1097 /* PTZ FN */
1098 PINMUX_DATA(MMCDAT7_MARK, PS8_15_FN1, PTZ7_FN),
1099 PINMUX_DATA(ON_DQ7_MARK, PS8_15_FN2, PTZ7_FN),
1100 PINMUX_DATA(MMCDAT6_MARK, PS8_14_FN1, PTZ6_FN),
1101 PINMUX_DATA(ON_DQ6_MARK, PS8_14_FN2, PTZ6_FN),
1102 PINMUX_DATA(MMCDAT5_MARK, PS8_13_FN1, PTZ5_FN),
1103 PINMUX_DATA(ON_DQ5_MARK, PS8_13_FN2, PTZ5_FN),
1104 PINMUX_DATA(MMCDAT4_MARK, PS8_12_FN1, PTZ4_FN),
1105 PINMUX_DATA(ON_DQ4_MARK, PS8_12_FN2, PTZ4_FN),
1106 PINMUX_DATA(MMCDAT3_MARK, PS8_11_FN1, PTZ3_FN),
1107 PINMUX_DATA(ON_DQ3_MARK, PS8_11_FN2, PTZ3_FN),
1108 PINMUX_DATA(MMCDAT2_MARK, PS8_10_FN1, PTZ2_FN),
1109 PINMUX_DATA(ON_DQ2_MARK, PS8_10_FN2, PTZ2_FN),
1110 PINMUX_DATA(MMCDAT1_MARK, PS8_9_FN1, PTZ1_FN),
1111 PINMUX_DATA(ON_DQ1_MARK, PS8_9_FN2, PTZ1_FN),
1112 PINMUX_DATA(MMCDAT0_MARK, PS8_8_FN1, PTZ0_FN),
1113 PINMUX_DATA(ON_DQ0_MARK, PS8_8_FN2, PTZ0_FN),
1114};
1115
1116static struct pinmux_gpio pinmux_gpios[] = {
1117 /* PTA */
1118 PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
1119 PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
1120 PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
1121 PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
1122 PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
1123 PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
1124 PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
1125 PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
1126
1127 /* PTB */
1128 PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
1129 PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
1130 PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
1131 PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
1132 PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
1133 PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
1134 PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
1135 PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
1136
1137 /* PTC */
1138 PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
1139 PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
1140 PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
1141 PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
1142 PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
1143 PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
1144 PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
1145 PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
1146
1147 /* PTD */
1148 PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
1149 PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
1150 PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
1151 PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
1152 PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
1153 PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
1154 PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
1155 PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
1156
1157 /* PTE */
1158 PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
1159 PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
1160 PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
1161 PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
1162 PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
1163 PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
1164 PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
1165 PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
1166
1167 /* PTF */
1168 PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
1169 PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
1170 PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
1171 PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
1172 PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
1173 PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
1174 PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
1175 PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
1176
1177 /* PTG */
1178 PINMUX_GPIO(GPIO_PTG7, PTG7_DATA),
1179 PINMUX_GPIO(GPIO_PTG6, PTG6_DATA),
1180 PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
1181 PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
1182 PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
1183 PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
1184 PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
1185 PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
1186
1187 /* PTH */
1188 PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
1189 PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
1190 PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
1191 PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
1192 PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
1193 PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
1194 PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
1195 PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
1196
1197 /* PTI */
1198 PINMUX_GPIO(GPIO_PTI7, PTI7_DATA),
1199 PINMUX_GPIO(GPIO_PTI6, PTI6_DATA),
1200 PINMUX_GPIO(GPIO_PTI5, PTI5_DATA),
1201 PINMUX_GPIO(GPIO_PTI4, PTI4_DATA),
1202 PINMUX_GPIO(GPIO_PTI3, PTI3_DATA),
1203 PINMUX_GPIO(GPIO_PTI2, PTI2_DATA),
1204 PINMUX_GPIO(GPIO_PTI1, PTI1_DATA),
1205 PINMUX_GPIO(GPIO_PTI0, PTI0_DATA),
1206
1207 /* PTJ */
1208 PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
1209 PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
1210 PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA),
1211 PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
1212 PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
1213 PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
1214 PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
1215
1216 /* PTK */
1217 PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
1218 PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
1219 PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
1220 PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
1221 PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
1222 PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
1223 PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
1224 PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
1225
1226 /* PTL */
1227 PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
1228 PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
1229 PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
1230 PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
1231 PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
1232 PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
1233 PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
1234
1235 /* PTM */
1236 PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
1237 PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
1238 PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
1239 PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
1240 PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
1241 PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
1242 PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
1243 PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
1244
1245 /* PTN */
1246 PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
1247 PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
1248 PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
1249 PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
1250 PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
1251 PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
1252 PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
1253
1254 /* PTO */
1255 PINMUX_GPIO(GPIO_PTO7, PTO7_DATA),
1256 PINMUX_GPIO(GPIO_PTO6, PTO6_DATA),
1257 PINMUX_GPIO(GPIO_PTO5, PTO5_DATA),
1258 PINMUX_GPIO(GPIO_PTO4, PTO4_DATA),
1259 PINMUX_GPIO(GPIO_PTO3, PTO3_DATA),
1260 PINMUX_GPIO(GPIO_PTO2, PTO2_DATA),
1261 PINMUX_GPIO(GPIO_PTO1, PTO1_DATA),
1262 PINMUX_GPIO(GPIO_PTO0, PTO0_DATA),
1263
1264 /* PTP */
1265 PINMUX_GPIO(GPIO_PTP7, PTP7_DATA),
1266 PINMUX_GPIO(GPIO_PTP6, PTP6_DATA),
1267 PINMUX_GPIO(GPIO_PTP5, PTP5_DATA),
1268 PINMUX_GPIO(GPIO_PTP4, PTP4_DATA),
1269 PINMUX_GPIO(GPIO_PTP3, PTP3_DATA),
1270 PINMUX_GPIO(GPIO_PTP2, PTP2_DATA),
1271 PINMUX_GPIO(GPIO_PTP1, PTP1_DATA),
1272 PINMUX_GPIO(GPIO_PTP0, PTP0_DATA),
1273
1274 /* PTQ */
1275 PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
1276 PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
1277 PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
1278 PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
1279 PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
1280 PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
1281 PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
1282
1283 /* PTR */
1284 PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
1285 PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
1286 PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
1287 PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
1288 PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
1289 PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
1290 PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
1291 PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
1292
1293 /* PTS */
1294 PINMUX_GPIO(GPIO_PTS7, PTS7_DATA),
1295 PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
1296 PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
1297 PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
1298 PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
1299 PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
1300 PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
1301 PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
1302
1303 /* PTT */
1304 PINMUX_GPIO(GPIO_PTT7, PTT7_DATA),
1305 PINMUX_GPIO(GPIO_PTT6, PTT6_DATA),
1306 PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
1307 PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
1308 PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
1309 PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
1310 PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
1311 PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
1312
1313 /* PTU */
1314 PINMUX_GPIO(GPIO_PTU7, PTU7_DATA),
1315 PINMUX_GPIO(GPIO_PTU6, PTU6_DATA),
1316 PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
1317 PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
1318 PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
1319 PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
1320 PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
1321 PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
1322
1323 /* PTV */
1324 PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
1325 PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
1326 PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
1327 PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
1328 PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
1329 PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
1330 PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
1331 PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
1332
1333 /* PTW */
1334 PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
1335 PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
1336 PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
1337 PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
1338 PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
1339 PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
1340 PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
1341 PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
1342
1343 /* PTX */
1344 PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
1345 PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
1346 PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
1347 PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
1348 PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
1349 PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
1350 PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
1351 PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
1352
1353 /* PTY */
1354 PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
1355 PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
1356 PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
1357 PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
1358 PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
1359 PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
1360 PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
1361 PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
1362
1363 /* PTZ */
1364 PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
1365 PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
1366 PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
1367 PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
1368 PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
1369 PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
1370 PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
1371 PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
1372
1373 /* PTA (mobule: LBSC, RGMII) */
1374 PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
1375 PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
1376 PINMUX_GPIO(GPIO_FN_WE1, WE1_MARK),
1377 PINMUX_GPIO(GPIO_FN_RDY, RDY_MARK),
1378 PINMUX_GPIO(GPIO_FN_ET0_MDC, ET0_MDC_MARK),
1379 PINMUX_GPIO(GPIO_FN_ET0_MDIO, ET0_MDC_MARK),
1380 PINMUX_GPIO(GPIO_FN_ET1_MDC, ET1_MDC_MARK),
1381 PINMUX_GPIO(GPIO_FN_ET1_MDIO, ET1_MDC_MARK),
1382
1383 /* PTB (mobule: INTC, ONFI, TMU) */
1384 PINMUX_GPIO(GPIO_FN_IRQ15, IRQ15_MARK),
1385 PINMUX_GPIO(GPIO_FN_IRQ14, IRQ14_MARK),
1386 PINMUX_GPIO(GPIO_FN_IRQ13, IRQ13_MARK),
1387 PINMUX_GPIO(GPIO_FN_IRQ12, IRQ12_MARK),
1388 PINMUX_GPIO(GPIO_FN_IRQ11, IRQ11_MARK),
1389 PINMUX_GPIO(GPIO_FN_IRQ10, IRQ10_MARK),
1390 PINMUX_GPIO(GPIO_FN_IRQ9, IRQ9_MARK),
1391 PINMUX_GPIO(GPIO_FN_IRQ8, IRQ8_MARK),
1392 PINMUX_GPIO(GPIO_FN_ON_NRE, ON_NRE_MARK),
1393 PINMUX_GPIO(GPIO_FN_ON_NWE, ON_NWE_MARK),
1394 PINMUX_GPIO(GPIO_FN_ON_NWP, ON_NWP_MARK),
1395 PINMUX_GPIO(GPIO_FN_ON_NCE0, ON_NCE0_MARK),
1396 PINMUX_GPIO(GPIO_FN_ON_R_B0, ON_R_B0_MARK),
1397 PINMUX_GPIO(GPIO_FN_ON_ALE, ON_ALE_MARK),
1398 PINMUX_GPIO(GPIO_FN_ON_CLE, ON_CLE_MARK),
1399 PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
1400
1401 /* PTC (mobule: IRQ, PWMU) */
1402 PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
1403 PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
1404 PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
1405 PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
1406 PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
1407 PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
1408 PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
1409 PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
1410 PINMUX_GPIO(GPIO_FN_PWMU0, PWMU0_MARK),
1411 PINMUX_GPIO(GPIO_FN_PWMU1, PWMU1_MARK),
1412 PINMUX_GPIO(GPIO_FN_PWMU2, PWMU2_MARK),
1413 PINMUX_GPIO(GPIO_FN_PWMU3, PWMU3_MARK),
1414 PINMUX_GPIO(GPIO_FN_PWMU4, PWMU4_MARK),
1415 PINMUX_GPIO(GPIO_FN_PWMU5, PWMU5_MARK),
1416
1417 /* PTD (mobule: SPI0, DMAC) */
1418 PINMUX_GPIO(GPIO_FN_SP0_MOSI, SP0_MOSI_MARK),
1419 PINMUX_GPIO(GPIO_FN_SP0_MISO, SP0_MISO_MARK),
1420 PINMUX_GPIO(GPIO_FN_SP0_SCK, SP0_SCK_MARK),
1421 PINMUX_GPIO(GPIO_FN_SP0_SCK_FB, SP0_SCK_FB_MARK),
1422 PINMUX_GPIO(GPIO_FN_SP0_SS0, SP0_SS0_MARK),
1423 PINMUX_GPIO(GPIO_FN_SP0_SS1, SP0_SS1_MARK),
1424 PINMUX_GPIO(GPIO_FN_SP0_SS2, SP0_SS2_MARK),
1425 PINMUX_GPIO(GPIO_FN_SP0_SS3, SP0_SS3_MARK),
1426 PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
1427 PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
1428 PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
1429
1430 /* PTE (mobule: RMII) */
1431 PINMUX_GPIO(GPIO_FN_RMII0_CRS_DV, RMII0_CRS_DV_MARK),
1432 PINMUX_GPIO(GPIO_FN_RMII0_TXD1, RMII0_TXD1_MARK),
1433 PINMUX_GPIO(GPIO_FN_RMII0_TXD0, RMII0_TXD0_MARK),
1434 PINMUX_GPIO(GPIO_FN_RMII0_TXEN, RMII0_TXEN_MARK),
1435 PINMUX_GPIO(GPIO_FN_RMII0_REFCLK, RMII0_REFCLK_MARK),
1436 PINMUX_GPIO(GPIO_FN_RMII0_RXD1, RMII0_RXD1_MARK),
1437 PINMUX_GPIO(GPIO_FN_RMII0_RXD0, RMII0_RXD0_MARK),
1438 PINMUX_GPIO(GPIO_FN_RMII0_RX_ER, RMII0_RX_ER_MARK),
1439
1440 /* PTF (mobule: RMII, SerMux) */
1441 PINMUX_GPIO(GPIO_FN_RMII1_CRS_DV, RMII1_CRS_DV_MARK),
1442 PINMUX_GPIO(GPIO_FN_RMII1_TXD1, RMII1_TXD1_MARK),
1443 PINMUX_GPIO(GPIO_FN_RMII1_TXD0, RMII1_TXD0_MARK),
1444 PINMUX_GPIO(GPIO_FN_RMII1_TXEN, RMII1_TXEN_MARK),
1445 PINMUX_GPIO(GPIO_FN_RMII1_REFCLK, RMII1_REFCLK_MARK),
1446 PINMUX_GPIO(GPIO_FN_RMII1_RXD1, RMII1_RXD1_MARK),
1447 PINMUX_GPIO(GPIO_FN_RMII1_RXD0, RMII1_RXD0_MARK),
1448 PINMUX_GPIO(GPIO_FN_RMII1_RX_ER, RMII1_RX_ER_MARK),
1449 PINMUX_GPIO(GPIO_FN_RAC_RI, RAC_RI_MARK),
1450
1451 /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */
1452 PINMUX_GPIO(GPIO_FN_BOOTFMS, BOOTFMS_MARK),
1453 PINMUX_GPIO(GPIO_FN_BOOTWP, BOOTWP_MARK),
1454 PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
1455 PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
1456 PINMUX_GPIO(GPIO_FN_SERIRQ, SERIRQ_MARK),
1457 PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
1458 PINMUX_GPIO(GPIO_FN_LPCPD, LPCPD_MARK),
1459 PINMUX_GPIO(GPIO_FN_LDRQ, LDRQ_MARK),
1460 PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK),
1461 PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK),
1462
1463 /* PTH (mobule: SPI1, LPC, DMAC, ADC) */
1464 PINMUX_GPIO(GPIO_FN_SP1_MOSI, SP1_MOSI_MARK),
1465 PINMUX_GPIO(GPIO_FN_SP1_MISO, SP1_MISO_MARK),
1466 PINMUX_GPIO(GPIO_FN_SP1_SCK, SP1_SCK_MARK),
1467 PINMUX_GPIO(GPIO_FN_SP1_SCK_FB, SP1_SCK_FB_MARK),
1468 PINMUX_GPIO(GPIO_FN_SP1_SS0, SP1_SS0_MARK),
1469 PINMUX_GPIO(GPIO_FN_SP1_SS1, SP1_SS1_MARK),
1470 PINMUX_GPIO(GPIO_FN_WP, WP_MARK),
1471 PINMUX_GPIO(GPIO_FN_FMS0, FMS0_MARK),
1472 PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
1473 PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
1474 PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
1475 PINMUX_GPIO(GPIO_FN_ADTRG1, ADTRG1_MARK),
1476 PINMUX_GPIO(GPIO_FN_ADTRG0, ADTRG0_MARK),
1477
1478 /* PTI (mobule: LBSC, SDHI) */
1479 PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
1480 PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
1481 PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
1482 PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
1483 PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
1484 PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
1485 PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
1486 PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
1487 PINMUX_GPIO(GPIO_FN_SD_WP, SD_WP_MARK),
1488 PINMUX_GPIO(GPIO_FN_SD_CD, SD_CD_MARK),
1489 PINMUX_GPIO(GPIO_FN_SD_CLK, SD_CLK_MARK),
1490 PINMUX_GPIO(GPIO_FN_SD_CMD, SD_CMD_MARK),
1491 PINMUX_GPIO(GPIO_FN_SD_D3, SD_D3_MARK),
1492 PINMUX_GPIO(GPIO_FN_SD_D2, SD_D2_MARK),
1493 PINMUX_GPIO(GPIO_FN_SD_D1, SD_D1_MARK),
1494 PINMUX_GPIO(GPIO_FN_SD_D0, SD_D0_MARK),
1495
1496 /* PTJ (mobule: SCIF234, SERMUX) */
1497 PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
1498 PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
1499 PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
1500 PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
1501 PINMUX_GPIO(GPIO_FN_RTS4, RTS4_MARK),
1502 PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
1503 PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
1504
1505 /* PTK (mobule: SERMUX, LBSC, SCIF) */
1506 PINMUX_GPIO(GPIO_FN_COM2_TXD, COM2_TXD_MARK),
1507 PINMUX_GPIO(GPIO_FN_COM2_RXD, COM2_RXD_MARK),
1508 PINMUX_GPIO(GPIO_FN_COM2_RTS, COM2_RTS_MARK),
1509 PINMUX_GPIO(GPIO_FN_COM2_CTS, COM2_CTS_MARK),
1510 PINMUX_GPIO(GPIO_FN_COM2_DTR, COM2_DTR_MARK),
1511 PINMUX_GPIO(GPIO_FN_COM2_DSR, COM2_DSR_MARK),
1512 PINMUX_GPIO(GPIO_FN_COM2_DCD, COM2_DCD_MARK),
1513 PINMUX_GPIO(GPIO_FN_CLKOUT, CLKOUT_MARK),
1514 PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
1515 PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK),
1516 PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
1517
1518 /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */
1519 PINMUX_GPIO(GPIO_FN_RAC_RXD, RAC_RXD_MARK),
1520 PINMUX_GPIO(GPIO_FN_RAC_RTS, RAC_RTS_MARK),
1521 PINMUX_GPIO(GPIO_FN_RAC_CTS, RAC_CTS_MARK),
1522 PINMUX_GPIO(GPIO_FN_RAC_DTR, RAC_DTR_MARK),
1523 PINMUX_GPIO(GPIO_FN_RAC_DSR, RAC_DSR_MARK),
1524 PINMUX_GPIO(GPIO_FN_RAC_DCD, RAC_DCD_MARK),
1525 PINMUX_GPIO(GPIO_FN_RAC_TXD, RAC_TXD_MARK),
1526 PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
1527 PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
1528 PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
1529 PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
1530 PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
1531 PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
1532
1533 /* PTM (mobule: LBSC, IIC) */
1534 PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
1535 PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
1536 PINMUX_GPIO(GPIO_FN_WE0, WE0_MARK),
1537 PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
1538 PINMUX_GPIO(GPIO_FN_SDA6, SDA6_MARK),
1539 PINMUX_GPIO(GPIO_FN_SCL6, SCL6_MARK),
1540 PINMUX_GPIO(GPIO_FN_SDA7, SDA7_MARK),
1541 PINMUX_GPIO(GPIO_FN_SCL7, SCL7_MARK),
1542
1543 /* PTN (mobule: USB, JMC, SGPIO, WDT) */
1544 PINMUX_GPIO(GPIO_FN_VBUS_EN, VBUS_EN_MARK),
1545 PINMUX_GPIO(GPIO_FN_VBUS_OC, VBUS_OC_MARK),
1546 PINMUX_GPIO(GPIO_FN_JMCTCK, JMCTCK_MARK),
1547 PINMUX_GPIO(GPIO_FN_JMCTMS, JMCTMS_MARK),
1548 PINMUX_GPIO(GPIO_FN_JMCTDO, JMCTDO_MARK),
1549 PINMUX_GPIO(GPIO_FN_JMCTDI, JMCTDI_MARK),
1550 PINMUX_GPIO(GPIO_FN_JMCTRST, JMCTRST_MARK),
1551 PINMUX_GPIO(GPIO_FN_SGPIO1_CLK, SGPIO1_CLK_MARK),
1552 PINMUX_GPIO(GPIO_FN_SGPIO1_LOAD, SGPIO1_LOAD_MARK),
1553 PINMUX_GPIO(GPIO_FN_SGPIO1_DI, SGPIO1_DI_MARK),
1554 PINMUX_GPIO(GPIO_FN_SGPIO1_DO, SGPIO1_DO_MARK),
1555 PINMUX_GPIO(GPIO_FN_SUB_CLKIN, SUB_CLKIN_MARK),
1556
1557 /* PTO (mobule: SGPIO, SerMux) */
1558 PINMUX_GPIO(GPIO_FN_SGPIO0_CLK, SGPIO0_CLK_MARK),
1559 PINMUX_GPIO(GPIO_FN_SGPIO0_LOAD, SGPIO0_LOAD_MARK),
1560 PINMUX_GPIO(GPIO_FN_SGPIO0_DI, SGPIO0_DI_MARK),
1561 PINMUX_GPIO(GPIO_FN_SGPIO0_DO, SGPIO0_DO_MARK),
1562 PINMUX_GPIO(GPIO_FN_SGPIO2_CLK, SGPIO2_CLK_MARK),
1563 PINMUX_GPIO(GPIO_FN_SGPIO2_LOAD, SGPIO2_LOAD_MARK),
1564 PINMUX_GPIO(GPIO_FN_SGPIO2_DI, SGPIO2_DI_MARK),
1565 PINMUX_GPIO(GPIO_FN_SGPIO2_DO, SGPIO2_DO_MARK),
1566 PINMUX_GPIO(GPIO_FN_COM1_TXD, COM1_TXD_MARK),
1567 PINMUX_GPIO(GPIO_FN_COM1_RXD, COM1_RXD_MARK),
1568 PINMUX_GPIO(GPIO_FN_COM1_RTS, COM1_RTS_MARK),
1569 PINMUX_GPIO(GPIO_FN_COM1_CTS, COM1_CTS_MARK),
1570
1571 /* PTP (mobule: EVC, ADC) */
1572
1573 /* PTQ (mobule: LPC) */
1574 PINMUX_GPIO(GPIO_FN_LAD3, LAD3_MARK),
1575 PINMUX_GPIO(GPIO_FN_LAD2, LAD2_MARK),
1576 PINMUX_GPIO(GPIO_FN_LAD1, LAD1_MARK),
1577 PINMUX_GPIO(GPIO_FN_LAD0, LAD0_MARK),
1578 PINMUX_GPIO(GPIO_FN_LFRAME, LFRAME_MARK),
1579 PINMUX_GPIO(GPIO_FN_LRESET, LRESET_MARK),
1580 PINMUX_GPIO(GPIO_FN_LCLK, LCLK_MARK),
1581
1582 /* PTR (mobule: GRA, IIC) */
1583 PINMUX_GPIO(GPIO_FN_DDC3, DDC3_MARK),
1584 PINMUX_GPIO(GPIO_FN_DDC2, DDC2_MARK),
1585 PINMUX_GPIO(GPIO_FN_SDA8, SDA8_MARK),
1586 PINMUX_GPIO(GPIO_FN_SCL8, SCL8_MARK),
1587 PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
1588 PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
1589 PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
1590 PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
1591 PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
1592 PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
1593
1594 /* PTS (mobule: GRA, IIC) */
1595 PINMUX_GPIO(GPIO_FN_DDC1, DDC1_MARK),
1596 PINMUX_GPIO(GPIO_FN_DDC0, DDC0_MARK),
1597 PINMUX_GPIO(GPIO_FN_SDA9, SDA9_MARK),
1598 PINMUX_GPIO(GPIO_FN_SCL9, SCL9_MARK),
1599 PINMUX_GPIO(GPIO_FN_SDA5, SDA5_MARK),
1600 PINMUX_GPIO(GPIO_FN_SCL5, SCL5_MARK),
1601 PINMUX_GPIO(GPIO_FN_SDA4, SDA4_MARK),
1602 PINMUX_GPIO(GPIO_FN_SCL4, SCL4_MARK),
1603 PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK),
1604 PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK),
1605
1606 /* PTT (mobule: PWMX, AUD) */
1607 PINMUX_GPIO(GPIO_FN_PWMX7, PWMX7_MARK),
1608 PINMUX_GPIO(GPIO_FN_PWMX6, PWMX6_MARK),
1609 PINMUX_GPIO(GPIO_FN_PWMX5, PWMX5_MARK),
1610 PINMUX_GPIO(GPIO_FN_PWMX4, PWMX4_MARK),
1611 PINMUX_GPIO(GPIO_FN_PWMX3, PWMX3_MARK),
1612 PINMUX_GPIO(GPIO_FN_PWMX2, PWMX2_MARK),
1613 PINMUX_GPIO(GPIO_FN_PWMX1, PWMX1_MARK),
1614 PINMUX_GPIO(GPIO_FN_PWMX0, PWMX0_MARK),
1615 PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
1616 PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
1617 PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
1618 PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
1619 PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
1620 PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
1621
1622 /* PTU (mobule: LPC, APM) */
1623 PINMUX_GPIO(GPIO_FN_LGPIO7, LGPIO7_MARK),
1624 PINMUX_GPIO(GPIO_FN_LGPIO6, LGPIO6_MARK),
1625 PINMUX_GPIO(GPIO_FN_LGPIO5, LGPIO5_MARK),
1626 PINMUX_GPIO(GPIO_FN_LGPIO4, LGPIO4_MARK),
1627 PINMUX_GPIO(GPIO_FN_LGPIO3, LGPIO3_MARK),
1628 PINMUX_GPIO(GPIO_FN_LGPIO2, LGPIO2_MARK),
1629 PINMUX_GPIO(GPIO_FN_LGPIO1, LGPIO1_MARK),
1630 PINMUX_GPIO(GPIO_FN_LGPIO0, LGPIO0_MARK),
1631 PINMUX_GPIO(GPIO_FN_APMONCTL_O, APMONCTL_O_MARK),
1632 PINMUX_GPIO(GPIO_FN_APMPWBTOUT_O, APMPWBTOUT_O_MARK),
1633 PINMUX_GPIO(GPIO_FN_APMSCI_O, APMSCI_O_MARK),
1634 PINMUX_GPIO(GPIO_FN_APMVDDON, APMVDDON_MARK),
1635 PINMUX_GPIO(GPIO_FN_APMSLPBTN, APMSLPBTN_MARK),
1636 PINMUX_GPIO(GPIO_FN_APMPWRBTN, APMPWRBTN_MARK),
1637 PINMUX_GPIO(GPIO_FN_APMS5N, APMS5N_MARK),
1638 PINMUX_GPIO(GPIO_FN_APMS3N, APMS3N_MARK),
1639
1640 /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */
1641 PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
1642 PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
1643 PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
1644 PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
1645 PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
1646 PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
1647 PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
1648 PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
1649 PINMUX_GPIO(GPIO_FN_COM2_RI, COM2_RI_MARK),
1650 PINMUX_GPIO(GPIO_FN_R_SPI_MOSI, R_SPI_MOSI_MARK),
1651 PINMUX_GPIO(GPIO_FN_R_SPI_MISO, R_SPI_MISO_MARK),
1652 PINMUX_GPIO(GPIO_FN_R_SPI_RSPCK, R_SPI_RSPCK_MARK),
1653 PINMUX_GPIO(GPIO_FN_R_SPI_SSL0, R_SPI_SSL0_MARK),
1654 PINMUX_GPIO(GPIO_FN_R_SPI_SSL1, R_SPI_SSL1_MARK),
1655 PINMUX_GPIO(GPIO_FN_EVENT7, EVENT7_MARK),
1656 PINMUX_GPIO(GPIO_FN_EVENT6, EVENT6_MARK),
1657 PINMUX_GPIO(GPIO_FN_VBIOS_DI, VBIOS_DI_MARK),
1658 PINMUX_GPIO(GPIO_FN_VBIOS_DO, VBIOS_DO_MARK),
1659 PINMUX_GPIO(GPIO_FN_VBIOS_CLK, VBIOS_CLK_MARK),
1660 PINMUX_GPIO(GPIO_FN_VBIOS_CS, VBIOS_CS_MARK),
1661
1662 /* PTW (mobule: LBSC, EVC, SCIF) */
1663 PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
1664 PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
1665 PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
1666 PINMUX_GPIO(GPIO_FN_A13, A13_MARK),
1667 PINMUX_GPIO(GPIO_FN_A12, A12_MARK),
1668 PINMUX_GPIO(GPIO_FN_A11, A11_MARK),
1669 PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
1670 PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
1671 PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
1672 PINMUX_GPIO(GPIO_FN_EVENT5, EVENT5_MARK),
1673 PINMUX_GPIO(GPIO_FN_EVENT4, EVENT4_MARK),
1674 PINMUX_GPIO(GPIO_FN_EVENT3, EVENT3_MARK),
1675 PINMUX_GPIO(GPIO_FN_EVENT2, EVENT2_MARK),
1676 PINMUX_GPIO(GPIO_FN_EVENT1, EVENT1_MARK),
1677 PINMUX_GPIO(GPIO_FN_EVENT0, EVENT0_MARK),
1678 PINMUX_GPIO(GPIO_FN_CTS4, CTS4_MARK),
1679 PINMUX_GPIO(GPIO_FN_CTS2, CTS2_MARK),
1680
1681 /* PTX (mobule: LBSC) */
1682 PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
1683 PINMUX_GPIO(GPIO_FN_A6, A6_MARK),
1684 PINMUX_GPIO(GPIO_FN_A5, A5_MARK),
1685 PINMUX_GPIO(GPIO_FN_A4, A4_MARK),
1686 PINMUX_GPIO(GPIO_FN_A3, A3_MARK),
1687 PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
1688 PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
1689 PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
1690 PINMUX_GPIO(GPIO_FN_RTS2, RTS2_MARK),
1691 PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
1692 PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
1693 PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
1694
1695 /* PTY (mobule: LBSC) */
1696 PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
1697 PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
1698 PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
1699 PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
1700 PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
1701 PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
1702 PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
1703 PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
1704
1705 /* PTZ (mobule: eMMC, ONFI) */
1706 PINMUX_GPIO(GPIO_FN_MMCDAT7, MMCDAT7_MARK),
1707 PINMUX_GPIO(GPIO_FN_MMCDAT6, MMCDAT6_MARK),
1708 PINMUX_GPIO(GPIO_FN_MMCDAT5, MMCDAT5_MARK),
1709 PINMUX_GPIO(GPIO_FN_MMCDAT4, MMCDAT4_MARK),
1710 PINMUX_GPIO(GPIO_FN_MMCDAT3, MMCDAT3_MARK),
1711 PINMUX_GPIO(GPIO_FN_MMCDAT2, MMCDAT2_MARK),
1712 PINMUX_GPIO(GPIO_FN_MMCDAT1, MMCDAT1_MARK),
1713 PINMUX_GPIO(GPIO_FN_MMCDAT0, MMCDAT0_MARK),
1714 PINMUX_GPIO(GPIO_FN_ON_DQ7, ON_DQ7_MARK),
1715 PINMUX_GPIO(GPIO_FN_ON_DQ6, ON_DQ6_MARK),
1716 PINMUX_GPIO(GPIO_FN_ON_DQ5, ON_DQ5_MARK),
1717 PINMUX_GPIO(GPIO_FN_ON_DQ4, ON_DQ4_MARK),
1718 PINMUX_GPIO(GPIO_FN_ON_DQ3, ON_DQ3_MARK),
1719 PINMUX_GPIO(GPIO_FN_ON_DQ2, ON_DQ2_MARK),
1720 PINMUX_GPIO(GPIO_FN_ON_DQ1, ON_DQ1_MARK),
1721 PINMUX_GPIO(GPIO_FN_ON_DQ0, ON_DQ0_MARK),
1722 };
1723
1724static struct pinmux_cfg_reg pinmux_config_regs[] = {
1725 { PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2) {
1726 PTA7_FN, PTA7_OUT, PTA7_IN, PTA7_IN_PU,
1727 PTA6_FN, PTA6_OUT, PTA6_IN, PTA6_IN_PU,
1728 PTA5_FN, PTA5_OUT, PTA5_IN, PTA5_IN_PU,
1729 PTA4_FN, PTA4_OUT, PTA4_IN, PTA4_IN_PU,
1730 PTA3_FN, PTA3_OUT, PTA3_IN, PTA3_IN_PU,
1731 PTA2_FN, PTA2_OUT, PTA2_IN, PTA2_IN_PU,
1732 PTA1_FN, PTA1_OUT, PTA1_IN, PTA1_IN_PU,
1733 PTA0_FN, PTA0_OUT, PTA0_IN, PTA0_IN_PU }
1734 },
1735 { PINMUX_CFG_REG("PBCR", 0xffec0002, 16, 2) {
1736 PTB7_FN, PTB7_OUT, PTB7_IN, 0,
1737 PTB6_FN, PTB6_OUT, PTB6_IN, 0,
1738 PTB5_FN, PTB5_OUT, PTB5_IN, 0,
1739 PTB4_FN, PTB4_OUT, PTB4_IN, 0,
1740 PTB3_FN, PTB3_OUT, PTB3_IN, 0,
1741 PTB2_FN, PTB2_OUT, PTB2_IN, 0,
1742 PTB1_FN, PTB1_OUT, PTB1_IN, 0,
1743 PTB0_FN, PTB0_OUT, PTB0_IN, 0 }
1744 },
1745 { PINMUX_CFG_REG("PCCR", 0xffec0004, 16, 2) {
1746 PTC7_FN, PTC7_OUT, PTC7_IN, 0,
1747 PTC6_FN, PTC6_OUT, PTC6_IN, 0,
1748 PTC5_FN, PTC5_OUT, PTC5_IN, 0,
1749 PTC4_FN, PTC4_OUT, PTC4_IN, 0,
1750 PTC3_FN, PTC3_OUT, PTC3_IN, 0,
1751 PTC2_FN, PTC2_OUT, PTC2_IN, 0,
1752 PTC1_FN, PTC1_OUT, PTC1_IN, 0,
1753 PTC0_FN, PTC0_OUT, PTC0_IN, 0 }
1754 },
1755 { PINMUX_CFG_REG("PDCR", 0xffec0006, 16, 2) {
1756 PTD7_FN, PTD7_OUT, PTD7_IN, PTD7_IN_PU,
1757 PTD6_FN, PTD6_OUT, PTD6_IN, PTD6_IN_PU,
1758 PTD5_FN, PTD5_OUT, PTD5_IN, PTD5_IN_PU,
1759 PTD4_FN, PTD4_OUT, PTD4_IN, PTD4_IN_PU,
1760 PTD3_FN, PTD3_OUT, PTD3_IN, PTD3_IN_PU,
1761 PTD2_FN, PTD2_OUT, PTD2_IN, PTD2_IN_PU,
1762 PTD1_FN, PTD1_OUT, PTD1_IN, PTD1_IN_PU,
1763 PTD0_FN, PTD0_OUT, PTD0_IN, PTD0_IN_PU }
1764 },
1765 { PINMUX_CFG_REG("PECR", 0xffec0008, 16, 2) {
1766 PTE7_FN, PTE7_OUT, PTE7_IN, PTE7_IN_PU,
1767 PTE6_FN, PTE6_OUT, PTE6_IN, PTE6_IN_PU,
1768 PTE5_FN, PTE5_OUT, PTE5_IN, PTE5_IN_PU,
1769 PTE4_FN, PTE4_OUT, PTE4_IN, PTE4_IN_PU,
1770 PTE3_FN, PTE3_OUT, PTE3_IN, PTE3_IN_PU,
1771 PTE2_FN, PTE2_OUT, PTE2_IN, PTE2_IN_PU,
1772 PTE1_FN, PTE1_OUT, PTE1_IN, PTE1_IN_PU,
1773 PTE0_FN, PTE0_OUT, PTE0_IN, PTE0_IN_PU }
1774 },
1775 { PINMUX_CFG_REG("PFCR", 0xffec000a, 16, 2) {
1776 PTF7_FN, PTF7_OUT, PTF7_IN, PTF7_IN_PU,
1777 PTF6_FN, PTF6_OUT, PTF6_IN, PTF6_IN_PU,
1778 PTF5_FN, PTF5_OUT, PTF5_IN, PTF5_IN_PU,
1779 PTF4_FN, PTF4_OUT, PTF4_IN, PTF4_IN_PU,
1780 PTF3_FN, PTF3_OUT, PTF3_IN, PTF3_IN_PU,
1781 PTF2_FN, PTF2_OUT, PTF2_IN, PTF2_IN_PU,
1782 PTF1_FN, PTF1_OUT, PTF1_IN, PTF1_IN_PU,
1783 PTF0_FN, PTF0_OUT, PTF0_IN, PTF0_IN_PU }
1784 },
1785 { PINMUX_CFG_REG("PGCR", 0xffec000c, 16, 2) {
1786 PTG7_FN, PTG7_OUT, PTG7_IN, PTG7_IN_PU ,
1787 PTG6_FN, PTG6_OUT, PTG6_IN, PTG6_IN_PU ,
1788 PTG5_FN, PTG5_OUT, PTG5_IN, 0,
1789 PTG4_FN, PTG4_OUT, PTG4_IN, PTG4_IN_PU ,
1790 PTG3_FN, PTG3_OUT, PTG3_IN, 0,
1791 PTG2_FN, PTG2_OUT, PTG2_IN, 0,
1792 PTG1_FN, PTG1_OUT, PTG1_IN, 0,
1793 PTG0_FN, PTG0_OUT, PTG0_IN, 0 }
1794 },
1795 { PINMUX_CFG_REG("PHCR", 0xffec000e, 16, 2) {
1796 PTH7_FN, PTH7_OUT, PTH7_IN, PTH7_IN_PU,
1797 PTH6_FN, PTH6_OUT, PTH6_IN, PTH6_IN_PU,
1798 PTH5_FN, PTH5_OUT, PTH5_IN, PTH5_IN_PU,
1799 PTH4_FN, PTH4_OUT, PTH4_IN, PTH4_IN_PU,
1800 PTH3_FN, PTH3_OUT, PTH3_IN, PTH3_IN_PU,
1801 PTH2_FN, PTH2_OUT, PTH2_IN, PTH2_IN_PU,
1802 PTH1_FN, PTH1_OUT, PTH1_IN, PTH1_IN_PU,
1803 PTH0_FN, PTH0_OUT, PTH0_IN, PTH0_IN_PU }
1804 },
1805 { PINMUX_CFG_REG("PICR", 0xffec0010, 16, 2) {
1806 PTI7_FN, PTI7_OUT, PTI7_IN, PTI7_IN_PU,
1807 PTI6_FN, PTI6_OUT, PTI6_IN, PTI6_IN_PU,
1808 PTI5_FN, PTI5_OUT, PTI5_IN, 0,
1809 PTI4_FN, PTI4_OUT, PTI4_IN, PTI4_IN_PU,
1810 PTI3_FN, PTI3_OUT, PTI3_IN, PTI3_IN_PU,
1811 PTI2_FN, PTI2_OUT, PTI2_IN, PTI2_IN_PU,
1812 PTI1_FN, PTI1_OUT, PTI1_IN, PTI1_IN_PU,
1813 PTI0_FN, PTI0_OUT, PTI0_IN, PTI0_IN_PU }
1814 },
1815 { PINMUX_CFG_REG("PJCR", 0xffec0012, 16, 2) {
1816 0, 0, 0, 0, /* reserved: always set 1 */
1817 PTJ6_FN, PTJ6_OUT, PTJ6_IN, PTJ6_IN_PU,
1818 PTJ5_FN, PTJ5_OUT, PTJ5_IN, PTJ5_IN_PU,
1819 PTJ4_FN, PTJ4_OUT, PTJ4_IN, PTJ4_IN_PU,
1820 PTJ3_FN, PTJ3_OUT, PTJ3_IN, PTJ3_IN_PU,
1821 PTJ2_FN, PTJ2_OUT, PTJ2_IN, PTJ2_IN_PU,
1822 PTJ1_FN, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU,
1823 PTJ0_FN, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU }
1824 },
1825 { PINMUX_CFG_REG("PKCR", 0xffec0014, 16, 2) {
1826 PTK7_FN, PTK7_OUT, PTK7_IN, PTK7_IN_PU,
1827 PTK6_FN, PTK6_OUT, PTK6_IN, PTK6_IN_PU,
1828 PTK5_FN, PTK5_OUT, PTK5_IN, PTK5_IN_PU,
1829 PTK4_FN, PTK4_OUT, PTK4_IN, PTK4_IN_PU,
1830 PTK3_FN, PTK3_OUT, PTK3_IN, PTK3_IN_PU,
1831 PTK2_FN, PTK2_OUT, PTK2_IN, PTK2_IN_PU,
1832 PTK1_FN, PTK1_OUT, PTK1_IN, PTK1_IN_PU,
1833 PTK0_FN, PTK0_OUT, PTK0_IN, PTK0_IN_PU }
1834 },
1835 { PINMUX_CFG_REG("PLCR", 0xffec0016, 16, 2) {
1836 0, 0, 0, 0, /* reserved: always set 1 */
1837 PTL6_FN, PTL6_OUT, PTL6_IN, PTL6_IN_PU,
1838 PTL5_FN, PTL5_OUT, PTL5_IN, PTL5_IN_PU,
1839 PTL4_FN, PTL4_OUT, PTL4_IN, PTL4_IN_PU,
1840 PTL3_FN, PTL3_OUT, PTL3_IN, PTL3_IN_PU,
1841 PTL2_FN, PTL2_OUT, PTL2_IN, PTL2_IN_PU,
1842 PTL1_FN, PTL1_OUT, PTL1_IN, PTL1_IN_PU,
1843 PTL0_FN, PTL0_OUT, PTL0_IN, PTL0_IN_PU }
1844 },
1845 { PINMUX_CFG_REG("PMCR", 0xffec0018, 16, 2) {
1846 PTM7_FN, PTM7_OUT, PTM7_IN, PTM7_IN_PU,
1847 PTM6_FN, PTM6_OUT, PTM6_IN, PTM6_IN_PU,
1848 PTM5_FN, PTM5_OUT, PTM5_IN, PTM5_IN_PU,
1849 PTM4_FN, PTM4_OUT, PTM4_IN, PTM4_IN_PU,
1850 PTM3_FN, PTM3_OUT, PTM3_IN, 0,
1851 PTM2_FN, PTM2_OUT, PTM2_IN, 0,
1852 PTM1_FN, PTM1_OUT, PTM1_IN, 0,
1853 PTM0_FN, PTM0_OUT, PTM0_IN, 0 }
1854 },
1855 { PINMUX_CFG_REG("PNCR", 0xffec001a, 16, 2) {
1856 0, 0, 0, 0, /* reserved: always set 1 */
1857 PTN6_FN, PTN6_OUT, PTN6_IN, 0,
1858 PTN5_FN, PTN5_OUT, PTN5_IN, 0,
1859 PTN4_FN, PTN4_OUT, PTN4_IN, PTN4_IN_PU,
1860 PTN3_FN, PTN3_OUT, PTN3_IN, PTN3_IN_PU,
1861 PTN2_FN, PTN2_OUT, PTN2_IN, PTN2_IN_PU,
1862 PTN1_FN, PTN1_OUT, PTN1_IN, PTN1_IN_PU,
1863 PTN0_FN, PTN0_OUT, PTN0_IN, PTN0_IN_PU }
1864 },
1865 { PINMUX_CFG_REG("POCR", 0xffec001c, 16, 2) {
1866 PTO7_FN, PTO7_OUT, PTO7_IN, PTO7_IN_PU,
1867 PTO6_FN, PTO6_OUT, PTO6_IN, PTO6_IN_PU,
1868 PTO5_FN, PTO5_OUT, PTO5_IN, PTO5_IN_PU,
1869 PTO4_FN, PTO4_OUT, PTO4_IN, PTO4_IN_PU,
1870 PTO3_FN, PTO3_OUT, PTO3_IN, PTO3_IN_PU,
1871 PTO2_FN, PTO2_OUT, PTO2_IN, PTO2_IN_PU,
1872 PTO1_FN, PTO1_OUT, PTO1_IN, PTO1_IN_PU,
1873 PTO0_FN, PTO0_OUT, PTO0_IN, PTO0_IN_PU }
1874 },
1875#if 0 /* FIXME: Remove it? */
1876 { PINMUX_CFG_REG("PPCR", 0xffec001e, 16, 2) {
1877 0, 0, 0, 0, /* reserved: always set 1 */
1878 PTP6_FN, PTP6_OUT, PTP6_IN, 0,
1879 PTP5_FN, PTP5_OUT, PTP5_IN, 0,
1880 PTP4_FN, PTP4_OUT, PTP4_IN, 0,
1881 PTP3_FN, PTP3_OUT, PTP3_IN, 0,
1882 PTP2_FN, PTP2_OUT, PTP2_IN, 0,
1883 PTP1_FN, PTP1_OUT, PTP1_IN, 0,
1884 PTP0_FN, PTP0_OUT, PTP0_IN, 0 }
1885 },
1886#endif
1887 { PINMUX_CFG_REG("PQCR", 0xffec0020, 16, 2) {
1888 0, 0, 0, 0, /* reserved: always set 1 */
1889 PTQ6_FN, PTQ6_OUT, PTQ6_IN, 0,
1890 PTQ5_FN, PTQ5_OUT, PTQ5_IN, 0,
1891 PTQ4_FN, PTQ4_OUT, PTQ4_IN, 0,
1892 PTQ3_FN, PTQ3_OUT, PTQ3_IN, 0,
1893 PTQ2_FN, PTQ2_OUT, PTQ2_IN, 0,
1894 PTQ1_FN, PTQ1_OUT, PTQ1_IN, 0,
1895 PTQ0_FN, PTQ0_OUT, PTQ0_IN, 0 }
1896 },
1897 { PINMUX_CFG_REG("PRCR", 0xffec0022, 16, 2) {
1898 PTR7_FN, PTR7_OUT, PTR7_IN, 0,
1899 PTR6_FN, PTR6_OUT, PTR6_IN, 0,
1900 PTR5_FN, PTR5_OUT, PTR5_IN, 0,
1901 PTR4_FN, PTR4_OUT, PTR4_IN, 0,
1902 PTR3_FN, PTR3_OUT, PTR3_IN, 0,
1903 PTR2_FN, PTR2_OUT, PTR2_IN, 0,
1904 PTR1_FN, PTR1_OUT, PTR1_IN, 0,
1905 PTR0_FN, PTR0_OUT, PTR0_IN, 0 }
1906 },
1907 { PINMUX_CFG_REG("PSCR", 0xffec0024, 16, 2) {
1908 PTS7_FN, PTS7_OUT, PTS7_IN, 0,
1909 PTS6_FN, PTS6_OUT, PTS6_IN, 0,
1910 PTS5_FN, PTS5_OUT, PTS5_IN, 0,
1911 PTS4_FN, PTS4_OUT, PTS4_IN, 0,
1912 PTS3_FN, PTS3_OUT, PTS3_IN, 0,
1913 PTS2_FN, PTS2_OUT, PTS2_IN, 0,
1914 PTS1_FN, PTS1_OUT, PTS1_IN, 0,
1915 PTS0_FN, PTS0_OUT, PTS0_IN, 0 }
1916 },
1917 { PINMUX_CFG_REG("PTCR", 0xffec0026, 16, 2) {
1918 PTT7_FN, PTT7_OUT, PTT7_IN, PTO7_IN_PU,
1919 PTT6_FN, PTT6_OUT, PTT6_IN, PTO6_IN_PU,
1920 PTT5_FN, PTT5_OUT, PTT5_IN, PTO5_IN_PU,
1921 PTT4_FN, PTT4_OUT, PTT4_IN, PTO4_IN_PU,
1922 PTT3_FN, PTT3_OUT, PTT3_IN, PTO3_IN_PU,
1923 PTT2_FN, PTT2_OUT, PTT2_IN, PTO2_IN_PU,
1924 PTT1_FN, PTT1_OUT, PTT1_IN, PTO1_IN_PU,
1925 PTT0_FN, PTT0_OUT, PTT0_IN, PTO0_IN_PU }
1926 },
1927 { PINMUX_CFG_REG("PUCR", 0xffec0028, 16, 2) {
1928 PTU7_FN, PTU7_OUT, PTU7_IN, PTU7_IN_PU,
1929 PTU6_FN, PTU6_OUT, PTU6_IN, PTU6_IN_PU,
1930 PTU5_FN, PTU5_OUT, PTU5_IN, PTU5_IN_PU,
1931 PTU4_FN, PTU4_OUT, PTU4_IN, PTU4_IN_PU,
1932 PTU3_FN, PTU3_OUT, PTU3_IN, PTU3_IN_PU,
1933 PTU2_FN, PTU2_OUT, PTU2_IN, PTU2_IN_PU,
1934 PTU1_FN, PTU1_OUT, PTU1_IN, PTU1_IN_PU,
1935 PTU0_FN, PTU0_OUT, PTU0_IN, PTU0_IN_PU }
1936 },
1937 { PINMUX_CFG_REG("PVCR", 0xffec002a, 16, 2) {
1938 PTV7_FN, PTV7_OUT, PTV7_IN, PTV7_IN_PU,
1939 PTV6_FN, PTV6_OUT, PTV6_IN, PTV6_IN_PU,
1940 PTV5_FN, PTV5_OUT, PTV5_IN, PTV5_IN_PU,
1941 PTV4_FN, PTV4_OUT, PTV4_IN, PTV4_IN_PU,
1942 PTV3_FN, PTV3_OUT, PTV3_IN, PTV3_IN_PU,
1943 PTV2_FN, PTV2_OUT, PTV2_IN, PTV2_IN_PU,
1944 PTV1_FN, PTV1_OUT, PTV1_IN, 0,
1945 PTV0_FN, PTV0_OUT, PTV0_IN, 0 }
1946 },
1947 { PINMUX_CFG_REG("PWCR", 0xffec002c, 16, 2) {
1948 PTW7_FN, PTW7_OUT, PTW7_IN, 0,
1949 PTW6_FN, PTW6_OUT, PTW6_IN, 0,
1950 PTW5_FN, PTW5_OUT, PTW5_IN, 0,
1951 PTW4_FN, PTW4_OUT, PTW4_IN, 0,
1952 PTW3_FN, PTW3_OUT, PTW3_IN, 0,
1953 PTW2_FN, PTW2_OUT, PTW2_IN, 0,
1954 PTW1_FN, PTW1_OUT, PTW1_IN, PTW1_IN_PU,
1955 PTW0_FN, PTW0_OUT, PTW0_IN, PTW0_IN_PU }
1956 },
1957 { PINMUX_CFG_REG("PXCR", 0xffec002e, 16, 2) {
1958 PTX7_FN, PTX7_OUT, PTX7_IN, PTX7_IN_PU,
1959 PTX6_FN, PTX6_OUT, PTX6_IN, PTX6_IN_PU,
1960 PTX5_FN, PTX5_OUT, PTX5_IN, PTX5_IN_PU,
1961 PTX4_FN, PTX4_OUT, PTX4_IN, PTX4_IN_PU,
1962 PTX3_FN, PTX3_OUT, PTX3_IN, PTX3_IN_PU,
1963 PTX2_FN, PTX2_OUT, PTX2_IN, PTX2_IN_PU,
1964 PTX1_FN, PTX1_OUT, PTX1_IN, PTX1_IN_PU,
1965 PTX0_FN, PTX0_OUT, PTX0_IN, PTX0_IN_PU }
1966 },
1967 { PINMUX_CFG_REG("PYCR", 0xffec0030, 16, 2) {
1968 PTY7_FN, PTY7_OUT, PTY7_IN, PTY7_IN_PU,
1969 PTY6_FN, PTY6_OUT, PTY6_IN, PTY6_IN_PU,
1970 PTY5_FN, PTY5_OUT, PTY5_IN, PTY5_IN_PU,
1971 PTY4_FN, PTY4_OUT, PTY4_IN, PTY4_IN_PU,
1972 PTY3_FN, PTY3_OUT, PTY3_IN, PTY3_IN_PU,
1973 PTY2_FN, PTY2_OUT, PTY2_IN, PTY2_IN_PU,
1974 PTY1_FN, PTY1_OUT, PTY1_IN, PTY1_IN_PU,
1975 PTY0_FN, PTY0_OUT, PTY0_IN, PTY0_IN_PU }
1976 },
1977 { PINMUX_CFG_REG("PZCR", 0xffec0032, 16, 2) {
1978 PTZ7_FN, PTZ7_OUT, PTZ7_IN, 0,
1979 PTZ6_FN, PTZ6_OUT, PTZ6_IN, 0,
1980 PTZ5_FN, PTZ5_OUT, PTZ5_IN, 0,
1981 PTZ4_FN, PTZ4_OUT, PTZ4_IN, 0,
1982 PTZ3_FN, PTZ3_OUT, PTZ3_IN, 0,
1983 PTZ2_FN, PTZ2_OUT, PTZ2_IN, 0,
1984 PTZ1_FN, PTZ1_OUT, PTZ1_IN, 0,
1985 PTZ0_FN, PTZ0_OUT, PTZ0_IN, 0 }
1986 },
1987
1988 { PINMUX_CFG_REG("PSEL0", 0xffec0070, 16, 1) {
1989 PS0_15_FN1, PS0_15_FN2,
1990 PS0_14_FN1, PS0_14_FN2,
1991 PS0_13_FN1, PS0_13_FN2,
1992 PS0_12_FN1, PS0_12_FN2,
1993 PS0_11_FN1, PS0_11_FN2,
1994 PS0_10_FN1, PS0_10_FN2,
1995 PS0_9_FN1, PS0_9_FN2,
1996 PS0_8_FN1, PS0_8_FN2,
1997 PS0_7_FN1, PS0_7_FN2,
1998 PS0_6_FN1, PS0_6_FN2,
1999 PS0_5_FN1, PS0_5_FN2,
2000 PS0_4_FN1, PS0_4_FN2,
2001 PS0_3_FN1, PS0_3_FN2,
2002 PS0_2_FN1, PS0_2_FN2,
2003 0, 0,
2004 0, 0, }
2005 },
2006 { PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1) {
2007 0, 0,
2008 0, 0,
2009 0, 0,
2010 0, 0,
2011 0, 0,
2012 PS1_10_FN1, PS1_10_FN2,
2013 PS1_9_FN1, PS1_9_FN2,
2014 PS1_8_FN1, PS1_8_FN2,
2015 0, 0,
2016 0, 0,
2017 0, 0,
2018 0, 0,
2019 0, 0,
2020 PS1_2_FN1, PS1_2_FN2,
2021 0, 0,
2022 0, 0, }
2023 },
2024 { PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1) {
2025 0, 0,
2026 0, 0,
2027 PS2_13_FN1, PS2_13_FN2,
2028 PS2_12_FN1, PS2_12_FN2,
2029 0, 0,
2030 0, 0,
2031 0, 0,
2032 0, 0,
2033 PS2_7_FN1, PS2_7_FN2,
2034 PS2_6_FN1, PS2_6_FN2,
2035 PS2_5_FN1, PS2_5_FN2,
2036 PS2_4_FN1, PS2_4_FN2,
2037 0, 0,
2038 PS2_2_FN1, PS2_2_FN2,
2039 0, 0,
2040 0, 0, }
2041 },
2042 { PINMUX_CFG_REG("PSEL3", 0xffec0076, 16, 1) {
2043 PS3_15_FN1, PS3_15_FN2,
2044 PS3_14_FN1, PS3_14_FN2,
2045 PS3_13_FN1, PS3_13_FN2,
2046 PS3_12_FN1, PS3_12_FN2,
2047 PS3_11_FN1, PS3_11_FN2,
2048 PS3_10_FN1, PS3_10_FN2,
2049 PS3_9_FN1, PS3_9_FN2,
2050 PS3_8_FN1, PS3_8_FN2,
2051 PS3_7_FN1, PS3_7_FN2,
2052 0, 0,
2053 0, 0,
2054 0, 0,
2055 0, 0,
2056 PS3_2_FN1, PS3_2_FN2,
2057 PS3_1_FN1, PS3_1_FN2,
2058 0, 0, }
2059 },
2060
2061 { PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1) {
2062 0, 0,
2063 PS4_14_FN1, PS4_14_FN2,
2064 PS4_13_FN1, PS4_13_FN2,
2065 PS4_12_FN1, PS4_12_FN2,
2066 0, 0,
2067 PS4_10_FN1, PS4_10_FN2,
2068 PS4_9_FN1, PS4_9_FN2,
2069 PS4_8_FN1, PS4_8_FN2,
2070 0, 0,
2071 0, 0,
2072 0, 0,
2073 PS4_4_FN1, PS4_4_FN2,
2074 PS4_3_FN1, PS4_3_FN2,
2075 PS4_2_FN1, PS4_2_FN2,
2076 PS4_1_FN1, PS4_1_FN2,
2077 PS4_0_FN1, PS4_0_FN2, }
2078 },
2079 { PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1) {
2080 0, 0,
2081 0, 0,
2082 0, 0,
2083 0, 0,
2084 PS5_11_FN1, PS5_11_FN2,
2085 PS5_10_FN1, PS5_10_FN2,
2086 PS5_9_FN1, PS5_9_FN2,
2087 PS5_8_FN1, PS5_8_FN2,
2088 PS5_7_FN1, PS5_7_FN2,
2089 PS5_6_FN1, PS5_6_FN2,
2090 PS5_5_FN1, PS5_5_FN2,
2091 PS5_4_FN1, PS5_4_FN2,
2092 PS5_3_FN1, PS5_3_FN2,
2093 PS5_2_FN1, PS5_2_FN2,
2094 0, 0,
2095 0, 0, }
2096 },
2097 { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1) {
2098 PS6_15_FN1, PS6_15_FN2,
2099 PS6_14_FN1, PS6_14_FN2,
2100 PS6_13_FN1, PS6_13_FN2,
2101 PS6_12_FN1, PS6_12_FN2,
2102 PS6_11_FN1, PS6_11_FN2,
2103 PS6_10_FN1, PS6_10_FN2,
2104 PS6_9_FN1, PS6_9_FN2,
2105 PS6_8_FN1, PS6_8_FN2,
2106 PS6_7_FN1, PS6_7_FN2,
2107 PS6_6_FN1, PS6_6_FN2,
2108 PS6_5_FN1, PS6_5_FN2,
2109 PS6_4_FN1, PS6_4_FN2,
2110 PS6_3_FN1, PS6_3_FN2,
2111 PS6_2_FN1, PS6_2_FN2,
2112 PS6_1_FN1, PS6_1_FN2,
2113 PS6_0_FN1, PS6_0_FN2, }
2114 },
2115 { PINMUX_CFG_REG("PSEL7", 0xffec0082, 16, 1) {
2116 PS7_15_FN1, PS7_15_FN2,
2117 PS7_14_FN1, PS7_14_FN2,
2118 PS7_13_FN1, PS7_13_FN2,
2119 PS7_12_FN1, PS7_12_FN2,
2120 PS7_11_FN1, PS7_11_FN2,
2121 PS7_10_FN1, PS7_10_FN2,
2122 PS7_9_FN1, PS7_9_FN2,
2123 PS7_8_FN1, PS7_8_FN2,
2124 PS7_7_FN1, PS7_7_FN2,
2125 PS7_6_FN1, PS7_6_FN2,
2126 PS7_5_FN1, PS7_5_FN2,
2127 0, 0,
2128 0, 0,
2129 0, 0,
2130 0, 0,
2131 0, 0, }
2132 },
2133 { PINMUX_CFG_REG("PSEL8", 0xffec0084, 16, 1) {
2134 PS8_15_FN1, PS8_15_FN2,
2135 PS8_14_FN1, PS8_14_FN2,
2136 PS8_13_FN1, PS8_13_FN2,
2137 PS8_12_FN1, PS8_12_FN2,
2138 PS8_11_FN1, PS8_11_FN2,
2139 PS8_10_FN1, PS8_10_FN2,
2140 PS8_9_FN1, PS8_9_FN2,
2141 PS8_8_FN1, PS8_8_FN2,
2142 0, 0,
2143 0, 0,
2144 0, 0,
2145 0, 0,
2146 0, 0,
2147 0, 0,
2148 0, 0,
2149 0, 0, }
2150 },
2151 {}
2152};
2153
2154static struct pinmux_data_reg pinmux_data_regs[] = {
2155 { PINMUX_DATA_REG("PADR", 0xffec0034, 8) {
2156 PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
2157 PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
2158 },
2159 { PINMUX_DATA_REG("PBDR", 0xffec0036, 8) {
2160 PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
2161 PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
2162 },
2163 { PINMUX_DATA_REG("PCDR", 0xffec0038, 8) {
2164 PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
2165 PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
2166 },
2167 { PINMUX_DATA_REG("PDDR", 0xffec003a, 8) {
2168 PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
2169 PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
2170 },
2171 { PINMUX_DATA_REG("PEDR", 0xffec003c, 8) {
2172 PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
2173 PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
2174 },
2175 { PINMUX_DATA_REG("PFDR", 0xffec003e, 8) {
2176 PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
2177 PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
2178 },
2179 { PINMUX_DATA_REG("PGDR", 0xffec0040, 8) {
2180 PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA,
2181 PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
2182 },
2183 { PINMUX_DATA_REG("PHDR", 0xffec0042, 8) {
2184 PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
2185 PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
2186 },
2187 { PINMUX_DATA_REG("PIDR", 0xffec0044, 8) {
2188 PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA,
2189 PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA }
2190 },
2191 { PINMUX_DATA_REG("PJDR", 0xffec0046, 8) {
2192 0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
2193 PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
2194 },
2195 { PINMUX_DATA_REG("PKDR", 0xffec0048, 8) {
2196 PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
2197 PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
2198 },
2199 { PINMUX_DATA_REG("PLDR", 0xffec004a, 8) {
2200 0, PTL6_DATA, PTL5_DATA, PTL4_DATA,
2201 PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
2202 },
2203 { PINMUX_DATA_REG("PMDR", 0xffec004c, 8) {
2204 PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
2205 PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
2206 },
2207 { PINMUX_DATA_REG("PNDR", 0xffec004e, 8) {
2208 0, PTN6_DATA, PTN5_DATA, PTN4_DATA,
2209 PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
2210 },
2211 { PINMUX_DATA_REG("PODR", 0xffec0050, 8) {
2212 PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA,
2213 PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA }
2214 },
2215 { PINMUX_DATA_REG("PPDR", 0xffec0052, 8) {
2216 PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA,
2217 PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA }
2218 },
2219 { PINMUX_DATA_REG("PQDR", 0xffec0054, 8) {
2220 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
2221 PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
2222 },
2223 { PINMUX_DATA_REG("PRDR", 0xffec0056, 8) {
2224 PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
2225 PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
2226 },
2227 { PINMUX_DATA_REG("PSDR", 0xffec0058, 8) {
2228 PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
2229 PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
2230 },
2231 { PINMUX_DATA_REG("PTDR", 0xffec005a, 8) {
2232 PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
2233 PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
2234 },
2235 { PINMUX_DATA_REG("PUDR", 0xffec005c, 8) {
2236 PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
2237 PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
2238 },
2239 { PINMUX_DATA_REG("PVDR", 0xffec005e, 8) {
2240 PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
2241 PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
2242 },
2243 { PINMUX_DATA_REG("PWDR", 0xffec0060, 8) {
2244 PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
2245 PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
2246 },
2247 { PINMUX_DATA_REG("PXDR", 0xffec0062, 8) {
2248 PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
2249 PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
2250 },
2251 { PINMUX_DATA_REG("PYDR", 0xffec0064, 8) {
2252 PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
2253 PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
2254 },
2255 { PINMUX_DATA_REG("PZDR", 0xffec0066, 8) {
2256 PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
2257 PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
2258 },
2259 { },
2260};
2261
2262static struct pinmux_info sh7757_pinmux_info = {
2263 .name = "sh7757_pfc",
2264 .reserved_id = PINMUX_RESERVED,
2265 .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
2266 .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
2267 .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
2268 .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
2269 .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
2270 .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
2271
2272 .first_gpio = GPIO_PTA0,
2273 .last_gpio = GPIO_FN_ON_DQ0,
2274
2275 .gpios = pinmux_gpios,
2276 .cfg_regs = pinmux_config_regs,
2277 .data_regs = pinmux_data_regs,
2278
2279 .gpio_data = pinmux_data,
2280 .gpio_data_size = ARRAY_SIZE(pinmux_data),
2281};
2282
2283static int __init plat_pinmux_setup(void)
2284{
2285 return register_pinmux(&sh7757_pinmux_info);
2286}
2287arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c b/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c
new file mode 100644
index 000000000000..aaa5338abbff
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c
@@ -0,0 +1,587 @@
1/*
2 * SH-X3 prototype CPU pinmux
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/gpio.h>
13#include <cpu/shx3.h>
14
15enum {
16 PINMUX_RESERVED = 0,
17
18 PINMUX_DATA_BEGIN,
19 PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
20 PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
21 PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
22 PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
23 PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
24 PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
25 PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
26 PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
27 PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
28 PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
29 PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
30 PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
31 PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
32 PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
33
34 PH5_DATA, PH4_DATA,
35 PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
36 PINMUX_DATA_END,
37
38 PINMUX_INPUT_BEGIN,
39 PA7_IN, PA6_IN, PA5_IN, PA4_IN,
40 PA3_IN, PA2_IN, PA1_IN, PA0_IN,
41 PB7_IN, PB6_IN, PB5_IN, PB4_IN,
42 PB3_IN, PB2_IN, PB1_IN, PB0_IN,
43 PC7_IN, PC6_IN, PC5_IN, PC4_IN,
44 PC3_IN, PC2_IN, PC1_IN, PC0_IN,
45 PD7_IN, PD6_IN, PD5_IN, PD4_IN,
46 PD3_IN, PD2_IN, PD1_IN, PD0_IN,
47 PE7_IN, PE6_IN, PE5_IN, PE4_IN,
48 PE3_IN, PE2_IN, PE1_IN, PE0_IN,
49 PF7_IN, PF6_IN, PF5_IN, PF4_IN,
50 PF3_IN, PF2_IN, PF1_IN, PF0_IN,
51 PG7_IN, PG6_IN, PG5_IN, PG4_IN,
52 PG3_IN, PG2_IN, PG1_IN, PG0_IN,
53
54 PH5_IN, PH4_IN,
55 PH3_IN, PH2_IN, PH1_IN, PH0_IN,
56 PINMUX_INPUT_END,
57
58 PINMUX_INPUT_PULLUP_BEGIN,
59 PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
60 PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
61 PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
62 PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
63 PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
64 PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
65 PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
66 PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
67 PE7_IN_PU, PE6_IN_PU, PE5_IN_PU, PE4_IN_PU,
68 PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU,
69 PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
70 PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
71 PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU,
72 PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU,
73
74 PH5_IN_PU, PH4_IN_PU,
75 PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
76 PINMUX_INPUT_PULLUP_END,
77
78 PINMUX_OUTPUT_BEGIN,
79 PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
80 PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
81 PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
82 PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT,
83 PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
84 PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
85 PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
86 PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
87 PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT,
88 PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
89 PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
90 PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
91 PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT,
92 PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT,
93
94 PH5_OUT, PH4_OUT,
95 PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT,
96 PINMUX_OUTPUT_END,
97
98 PINMUX_FUNCTION_BEGIN,
99 PA7_FN, PA6_FN, PA5_FN, PA4_FN,
100 PA3_FN, PA2_FN, PA1_FN, PA0_FN,
101 PB7_FN, PB6_FN, PB5_FN, PB4_FN,
102 PB3_FN, PB2_FN, PB1_FN, PB0_FN,
103 PC7_FN, PC6_FN, PC5_FN, PC4_FN,
104 PC3_FN, PC2_FN, PC1_FN, PC0_FN,
105 PD7_FN, PD6_FN, PD5_FN, PD4_FN,
106 PD3_FN, PD2_FN, PD1_FN, PD0_FN,
107 PE7_FN, PE6_FN, PE5_FN, PE4_FN,
108 PE3_FN, PE2_FN, PE1_FN, PE0_FN,
109 PF7_FN, PF6_FN, PF5_FN, PF4_FN,
110 PF3_FN, PF2_FN, PF1_FN, PF0_FN,
111 PG7_FN, PG6_FN, PG5_FN, PG4_FN,
112 PG3_FN, PG2_FN, PG1_FN, PG0_FN,
113
114 PH5_FN, PH4_FN,
115 PH3_FN, PH2_FN, PH1_FN, PH0_FN,
116 PINMUX_FUNCTION_END,
117
118 PINMUX_MARK_BEGIN,
119
120 D31_MARK, D30_MARK, D29_MARK, D28_MARK, D27_MARK, D26_MARK,
121 D25_MARK, D24_MARK, D23_MARK, D22_MARK, D21_MARK, D20_MARK,
122 D19_MARK, D18_MARK, D17_MARK, D16_MARK,
123
124 BACK_MARK, BREQ_MARK,
125 WE3_MARK, WE2_MARK,
126 CS6_MARK, CS5_MARK, CS4_MARK,
127 CLKOUTENB_MARK,
128
129 DACK3_MARK, DACK2_MARK, DACK1_MARK, DACK0_MARK,
130 DREQ3_MARK, DREQ2_MARK, DREQ1_MARK, DREQ0_MARK,
131
132 IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK,
133
134 DRAK3_MARK, DRAK2_MARK, DRAK1_MARK, DRAK0_MARK,
135
136 SCK3_MARK, SCK2_MARK, SCK1_MARK, SCK0_MARK,
137 IRL3_MARK, IRL2_MARK, IRL1_MARK, IRL0_MARK,
138 TXD3_MARK, TXD2_MARK, TXD1_MARK, TXD0_MARK,
139 RXD3_MARK, RXD2_MARK, RXD1_MARK, RXD0_MARK,
140
141 CE2B_MARK, CE2A_MARK, IOIS16_MARK,
142 STATUS1_MARK, STATUS0_MARK,
143
144 IRQOUT_MARK,
145
146 PINMUX_MARK_END,
147};
148
149static pinmux_enum_t shx3_pinmux_data[] = {
150
151 /* PA GPIO */
152 PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
153 PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
154 PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
155 PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
156 PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
157 PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
158 PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
159 PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
160
161 /* PB GPIO */
162 PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
163 PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
164 PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
165 PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
166 PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
167 PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
168 PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
169 PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
170
171 /* PC GPIO */
172 PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
173 PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
174 PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
175 PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
176 PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
177 PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
178 PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
179 PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
180
181 /* PD GPIO */
182 PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
183 PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
184 PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
185 PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
186 PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
187 PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
188 PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
189 PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
190
191 /* PE GPIO */
192 PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT, PE7_IN_PU),
193 PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT, PE6_IN_PU),
194 PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU),
195 PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU),
196 PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU),
197 PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU),
198 PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU),
199 PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU),
200
201 /* PF GPIO */
202 PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
203 PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
204 PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
205 PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
206 PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
207 PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
208 PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
209 PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
210
211 /* PG GPIO */
212 PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
213 PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
214 PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
215 PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU),
216 PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU),
217 PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU),
218 PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU),
219 PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU),
220
221 /* PH GPIO */
222 PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
223 PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
224 PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
225 PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
226 PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
227 PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
228
229 /* PA FN */
230 PINMUX_DATA(D31_MARK, PA7_FN),
231 PINMUX_DATA(D30_MARK, PA6_FN),
232 PINMUX_DATA(D29_MARK, PA5_FN),
233 PINMUX_DATA(D28_MARK, PA4_FN),
234 PINMUX_DATA(D27_MARK, PA3_FN),
235 PINMUX_DATA(D26_MARK, PA2_FN),
236 PINMUX_DATA(D25_MARK, PA1_FN),
237 PINMUX_DATA(D24_MARK, PA0_FN),
238
239 /* PB FN */
240 PINMUX_DATA(D23_MARK, PB7_FN),
241 PINMUX_DATA(D22_MARK, PB6_FN),
242 PINMUX_DATA(D21_MARK, PB5_FN),
243 PINMUX_DATA(D20_MARK, PB4_FN),
244 PINMUX_DATA(D19_MARK, PB3_FN),
245 PINMUX_DATA(D18_MARK, PB2_FN),
246 PINMUX_DATA(D17_MARK, PB1_FN),
247 PINMUX_DATA(D16_MARK, PB0_FN),
248
249 /* PC FN */
250 PINMUX_DATA(BACK_MARK, PC7_FN),
251 PINMUX_DATA(BREQ_MARK, PC6_FN),
252 PINMUX_DATA(WE3_MARK, PC5_FN),
253 PINMUX_DATA(WE2_MARK, PC4_FN),
254 PINMUX_DATA(CS6_MARK, PC3_FN),
255 PINMUX_DATA(CS5_MARK, PC2_FN),
256 PINMUX_DATA(CS4_MARK, PC1_FN),
257 PINMUX_DATA(CLKOUTENB_MARK, PC0_FN),
258
259 /* PD FN */
260 PINMUX_DATA(DACK3_MARK, PD7_FN),
261 PINMUX_DATA(DACK2_MARK, PD6_FN),
262 PINMUX_DATA(DACK1_MARK, PD5_FN),
263 PINMUX_DATA(DACK0_MARK, PD4_FN),
264 PINMUX_DATA(DREQ3_MARK, PD3_FN),
265 PINMUX_DATA(DREQ2_MARK, PD2_FN),
266 PINMUX_DATA(DREQ1_MARK, PD1_FN),
267 PINMUX_DATA(DREQ0_MARK, PD0_FN),
268
269 /* PE FN */
270 PINMUX_DATA(IRQ3_MARK, PE7_FN),
271 PINMUX_DATA(IRQ2_MARK, PE6_FN),
272 PINMUX_DATA(IRQ1_MARK, PE5_FN),
273 PINMUX_DATA(IRQ0_MARK, PE4_FN),
274 PINMUX_DATA(DRAK3_MARK, PE3_FN),
275 PINMUX_DATA(DRAK2_MARK, PE2_FN),
276 PINMUX_DATA(DRAK1_MARK, PE1_FN),
277 PINMUX_DATA(DRAK0_MARK, PE0_FN),
278
279 /* PF FN */
280 PINMUX_DATA(SCK3_MARK, PF7_FN),
281 PINMUX_DATA(SCK2_MARK, PF6_FN),
282 PINMUX_DATA(SCK1_MARK, PF5_FN),
283 PINMUX_DATA(SCK0_MARK, PF4_FN),
284 PINMUX_DATA(IRL3_MARK, PF3_FN),
285 PINMUX_DATA(IRL2_MARK, PF2_FN),
286 PINMUX_DATA(IRL1_MARK, PF1_FN),
287 PINMUX_DATA(IRL0_MARK, PF0_FN),
288
289 /* PG FN */
290 PINMUX_DATA(TXD3_MARK, PG7_FN),
291 PINMUX_DATA(TXD2_MARK, PG6_FN),
292 PINMUX_DATA(TXD1_MARK, PG5_FN),
293 PINMUX_DATA(TXD0_MARK, PG4_FN),
294 PINMUX_DATA(RXD3_MARK, PG3_FN),
295 PINMUX_DATA(RXD2_MARK, PG2_FN),
296 PINMUX_DATA(RXD1_MARK, PG1_FN),
297 PINMUX_DATA(RXD0_MARK, PG0_FN),
298
299 /* PH FN */
300 PINMUX_DATA(CE2B_MARK, PH5_FN),
301 PINMUX_DATA(CE2A_MARK, PH4_FN),
302 PINMUX_DATA(IOIS16_MARK, PH3_FN),
303 PINMUX_DATA(STATUS1_MARK, PH2_FN),
304 PINMUX_DATA(STATUS0_MARK, PH1_FN),
305 PINMUX_DATA(IRQOUT_MARK, PH0_FN),
306};
307
308static struct pinmux_gpio shx3_pinmux_gpios[] = {
309 /* PA */
310 PINMUX_GPIO(GPIO_PA7, PA7_DATA),
311 PINMUX_GPIO(GPIO_PA6, PA6_DATA),
312 PINMUX_GPIO(GPIO_PA5, PA5_DATA),
313 PINMUX_GPIO(GPIO_PA4, PA4_DATA),
314 PINMUX_GPIO(GPIO_PA3, PA3_DATA),
315 PINMUX_GPIO(GPIO_PA2, PA2_DATA),
316 PINMUX_GPIO(GPIO_PA1, PA1_DATA),
317 PINMUX_GPIO(GPIO_PA0, PA0_DATA),
318
319 /* PB */
320 PINMUX_GPIO(GPIO_PB7, PB7_DATA),
321 PINMUX_GPIO(GPIO_PB6, PB6_DATA),
322 PINMUX_GPIO(GPIO_PB5, PB5_DATA),
323 PINMUX_GPIO(GPIO_PB4, PB4_DATA),
324 PINMUX_GPIO(GPIO_PB3, PB3_DATA),
325 PINMUX_GPIO(GPIO_PB2, PB2_DATA),
326 PINMUX_GPIO(GPIO_PB1, PB1_DATA),
327 PINMUX_GPIO(GPIO_PB0, PB0_DATA),
328
329 /* PC */
330 PINMUX_GPIO(GPIO_PC7, PC7_DATA),
331 PINMUX_GPIO(GPIO_PC6, PC6_DATA),
332 PINMUX_GPIO(GPIO_PC5, PC5_DATA),
333 PINMUX_GPIO(GPIO_PC4, PC4_DATA),
334 PINMUX_GPIO(GPIO_PC3, PC3_DATA),
335 PINMUX_GPIO(GPIO_PC2, PC2_DATA),
336 PINMUX_GPIO(GPIO_PC1, PC1_DATA),
337 PINMUX_GPIO(GPIO_PC0, PC0_DATA),
338
339 /* PD */
340 PINMUX_GPIO(GPIO_PD7, PD7_DATA),
341 PINMUX_GPIO(GPIO_PD6, PD6_DATA),
342 PINMUX_GPIO(GPIO_PD5, PD5_DATA),
343 PINMUX_GPIO(GPIO_PD4, PD4_DATA),
344 PINMUX_GPIO(GPIO_PD3, PD3_DATA),
345 PINMUX_GPIO(GPIO_PD2, PD2_DATA),
346 PINMUX_GPIO(GPIO_PD1, PD1_DATA),
347 PINMUX_GPIO(GPIO_PD0, PD0_DATA),
348
349 /* PE */
350 PINMUX_GPIO(GPIO_PE7, PE7_DATA),
351 PINMUX_GPIO(GPIO_PE6, PE6_DATA),
352 PINMUX_GPIO(GPIO_PE5, PE5_DATA),
353 PINMUX_GPIO(GPIO_PE4, PE4_DATA),
354 PINMUX_GPIO(GPIO_PE3, PE3_DATA),
355 PINMUX_GPIO(GPIO_PE2, PE2_DATA),
356 PINMUX_GPIO(GPIO_PE1, PE1_DATA),
357 PINMUX_GPIO(GPIO_PE0, PE0_DATA),
358
359 /* PF */
360 PINMUX_GPIO(GPIO_PF7, PF7_DATA),
361 PINMUX_GPIO(GPIO_PF6, PF6_DATA),
362 PINMUX_GPIO(GPIO_PF5, PF5_DATA),
363 PINMUX_GPIO(GPIO_PF4, PF4_DATA),
364 PINMUX_GPIO(GPIO_PF3, PF3_DATA),
365 PINMUX_GPIO(GPIO_PF2, PF2_DATA),
366 PINMUX_GPIO(GPIO_PF1, PF1_DATA),
367 PINMUX_GPIO(GPIO_PF0, PF0_DATA),
368
369 /* PG */
370 PINMUX_GPIO(GPIO_PG7, PG7_DATA),
371 PINMUX_GPIO(GPIO_PG6, PG6_DATA),
372 PINMUX_GPIO(GPIO_PG5, PG5_DATA),
373 PINMUX_GPIO(GPIO_PG4, PG4_DATA),
374 PINMUX_GPIO(GPIO_PG3, PG3_DATA),
375 PINMUX_GPIO(GPIO_PG2, PG2_DATA),
376 PINMUX_GPIO(GPIO_PG1, PG1_DATA),
377 PINMUX_GPIO(GPIO_PG0, PG0_DATA),
378
379 /* PH */
380 PINMUX_GPIO(GPIO_PH5, PH5_DATA),
381 PINMUX_GPIO(GPIO_PH4, PH4_DATA),
382 PINMUX_GPIO(GPIO_PH3, PH3_DATA),
383 PINMUX_GPIO(GPIO_PH2, PH2_DATA),
384 PINMUX_GPIO(GPIO_PH1, PH1_DATA),
385 PINMUX_GPIO(GPIO_PH0, PH0_DATA),
386
387 /* FN */
388 PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
389 PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
390 PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
391 PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
392 PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
393 PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
394 PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
395 PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
396 PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
397 PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
398 PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
399 PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
400 PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
401 PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
402 PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
403 PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
404 PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
405 PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
406 PINMUX_GPIO(GPIO_FN_WE3, WE3_MARK),
407 PINMUX_GPIO(GPIO_FN_WE2, WE2_MARK),
408 PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
409 PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
410 PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
411 PINMUX_GPIO(GPIO_FN_CLKOUTENB, CLKOUTENB_MARK),
412 PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
413 PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
414 PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
415 PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
416 PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
417 PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
418 PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
419 PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
420 PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
421 PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
422 PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
423 PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
424 PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
425 PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
426 PINMUX_GPIO(GPIO_FN_DRAK1, DRAK1_MARK),
427 PINMUX_GPIO(GPIO_FN_DRAK0, DRAK0_MARK),
428 PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
429 PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
430 PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
431 PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
432 PINMUX_GPIO(GPIO_FN_IRL3, IRL3_MARK),
433 PINMUX_GPIO(GPIO_FN_IRL2, IRL2_MARK),
434 PINMUX_GPIO(GPIO_FN_IRL1, IRL1_MARK),
435 PINMUX_GPIO(GPIO_FN_IRL0, IRL0_MARK),
436 PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
437 PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
438 PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
439 PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
440 PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
441 PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
442 PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
443 PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
444 PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
445 PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
446 PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
447 PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
448 PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
449 PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
450};
451
452static struct pinmux_cfg_reg shx3_pinmux_config_regs[] = {
453 { PINMUX_CFG_REG("PABCR", 0xffc70000, 32, 2) {
454 PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
455 PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
456 PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
457 PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
458 PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
459 PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
460 PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
461 PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU,
462 PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
463 PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
464 PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
465 PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
466 PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
467 PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
468 PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
469 PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU, },
470 },
471 { PINMUX_CFG_REG("PCDCR", 0xffc70004, 32, 2) {
472 PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
473 PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
474 PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
475 PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
476 PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
477 PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
478 PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
479 PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU,
480 PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
481 PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
482 PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
483 PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
484 PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
485 PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
486 PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
487 PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU, },
488 },
489 { PINMUX_CFG_REG("PEFCR", 0xffc70008, 32, 2) {
490 PE7_FN, PE7_OUT, PE7_IN, PE7_IN_PU,
491 PE6_FN, PE6_OUT, PE6_IN, PE6_IN_PU,
492 PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU,
493 PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU,
494 PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU,
495 PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU,
496 PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU,
497 PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU,
498 PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
499 PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
500 PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
501 PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
502 PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
503 PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
504 PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
505 PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU, },
506 },
507 { PINMUX_CFG_REG("PGHCR", 0xffc7000c, 32, 2) {
508 PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
509 PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
510 PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
511 PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU,
512 PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU,
513 PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU,
514 PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU,
515 PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU,
516 0, 0, 0, 0,
517 0, 0, 0, 0,
518 PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
519 PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
520 PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
521 PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
522 PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
523 PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU, },
524 },
525 { },
526};
527
528static struct pinmux_data_reg shx3_pinmux_data_regs[] = {
529 { PINMUX_DATA_REG("PABDR", 0xffc70010, 32) {
530 0, 0, 0, 0, 0, 0, 0, 0,
531 PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
532 PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
533 0, 0, 0, 0, 0, 0, 0, 0,
534 PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
535 PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, },
536 },
537 { PINMUX_DATA_REG("PCDDR", 0xffc70014, 32) {
538 0, 0, 0, 0, 0, 0, 0, 0,
539 PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
540 PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
541 0, 0, 0, 0, 0, 0, 0, 0,
542 PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
543 PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, },
544 },
545 { PINMUX_DATA_REG("PEFDR", 0xffc70018, 32) {
546 0, 0, 0, 0, 0, 0, 0, 0,
547 PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
548 PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
549 0, 0, 0, 0, 0, 0, 0, 0,
550 PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
551 PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, },
552 },
553 { PINMUX_DATA_REG("PGHDR", 0xffc7001c, 32) {
554 0, 0, 0, 0, 0, 0, 0, 0,
555 PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
556 PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
557 0, 0, 0, 0, 0, 0, 0, 0,
558 0, 0, PH5_DATA, PH4_DATA,
559 PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA, },
560 },
561 { },
562};
563
564static struct pinmux_info shx3_pinmux_info = {
565 .name = "shx3_pfc",
566 .reserved_id = PINMUX_RESERVED,
567 .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
568 .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
569 .input_pu = { PINMUX_INPUT_PULLUP_BEGIN,
570 PINMUX_INPUT_PULLUP_END },
571 .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
572 .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
573 .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
574 .first_gpio = GPIO_PA7,
575 .last_gpio = GPIO_FN_IRQOUT,
576 .gpios = shx3_pinmux_gpios,
577 .gpio_data = shx3_pinmux_data,
578 .gpio_data_size = ARRAY_SIZE(shx3_pinmux_data),
579 .cfg_regs = shx3_pinmux_config_regs,
580 .data_regs = shx3_pinmux_data_regs,
581};
582
583static int __init shx3_pinmux_setup(void)
584{
585 return register_pinmux(&shx3_pinmux_info);
586}
587arch_initcall(shx3_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index fbae06b1c98d..0d1be5d1fb86 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -15,6 +15,75 @@
15#include <linux/sh_timer.h> 15#include <linux/sh_timer.h>
16#include <asm/clock.h> 16#include <asm/clock.h>
17 17
18/* Serial */
19static struct plat_sci_port scif0_platform_data = {
20 .mapbase = 0xffe00000,
21 .flags = UPF_BOOT_AUTOCONF,
22 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
23 .scbrr_algo_id = SCBRR_ALGO_2,
24 .type = PORT_SCIF,
25 .irqs = { 80, 80, 80, 80 },
26};
27
28static struct platform_device scif0_device = {
29 .name = "sh-sci",
30 .id = 0,
31 .dev = {
32 .platform_data = &scif0_platform_data,
33 },
34};
35
36static struct plat_sci_port scif1_platform_data = {
37 .mapbase = 0xffe10000,
38 .flags = UPF_BOOT_AUTOCONF,
39 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
40 .scbrr_algo_id = SCBRR_ALGO_2,
41 .type = PORT_SCIF,
42 .irqs = { 81, 81, 81, 81 },
43};
44
45static struct platform_device scif1_device = {
46 .name = "sh-sci",
47 .id = 1,
48 .dev = {
49 .platform_data = &scif1_platform_data,
50 },
51};
52
53static struct plat_sci_port scif2_platform_data = {
54 .mapbase = 0xffe20000,
55 .flags = UPF_BOOT_AUTOCONF,
56 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
57 .scbrr_algo_id = SCBRR_ALGO_2,
58 .type = PORT_SCIF,
59 .irqs = { 82, 82, 82, 82 },
60};
61
62static struct platform_device scif2_device = {
63 .name = "sh-sci",
64 .id = 2,
65 .dev = {
66 .platform_data = &scif2_platform_data,
67 },
68};
69
70static struct plat_sci_port scif3_platform_data = {
71 .mapbase = 0xffe30000,
72 .flags = UPF_BOOT_AUTOCONF,
73 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
74 .scbrr_algo_id = SCBRR_ALGO_2,
75 .type = PORT_SCIF,
76 .irqs = { 83, 83, 83, 83 },
77};
78
79static struct platform_device scif3_device = {
80 .name = "sh-sci",
81 .id = 3,
82 .dev = {
83 .platform_data = &scif3_platform_data,
84 },
85};
86
18static struct resource iic0_resources[] = { 87static struct resource iic0_resources[] = {
19 [0] = { 88 [0] = {
20 .name = "IIC0", 89 .name = "IIC0",
@@ -142,17 +211,14 @@ static struct platform_device jpu_device = {
142}; 211};
143 212
144static struct sh_timer_config cmt_platform_data = { 213static struct sh_timer_config cmt_platform_data = {
145 .name = "CMT",
146 .channel_offset = 0x60, 214 .channel_offset = 0x60,
147 .timer_bit = 5, 215 .timer_bit = 5,
148 .clk = "cmt0",
149 .clockevent_rating = 125, 216 .clockevent_rating = 125,
150 .clocksource_rating = 200, 217 .clocksource_rating = 200,
151}; 218};
152 219
153static struct resource cmt_resources[] = { 220static struct resource cmt_resources[] = {
154 [0] = { 221 [0] = {
155 .name = "CMT",
156 .start = 0x044a0060, 222 .start = 0x044a0060,
157 .end = 0x044a006b, 223 .end = 0x044a006b,
158 .flags = IORESOURCE_MEM, 224 .flags = IORESOURCE_MEM,
@@ -174,16 +240,13 @@ static struct platform_device cmt_device = {
174}; 240};
175 241
176static struct sh_timer_config tmu0_platform_data = { 242static struct sh_timer_config tmu0_platform_data = {
177 .name = "TMU0",
178 .channel_offset = 0x04, 243 .channel_offset = 0x04,
179 .timer_bit = 0, 244 .timer_bit = 0,
180 .clk = "tmu0",
181 .clockevent_rating = 200, 245 .clockevent_rating = 200,
182}; 246};
183 247
184static struct resource tmu0_resources[] = { 248static struct resource tmu0_resources[] = {
185 [0] = { 249 [0] = {
186 .name = "TMU0",
187 .start = 0xffd80008, 250 .start = 0xffd80008,
188 .end = 0xffd80013, 251 .end = 0xffd80013,
189 .flags = IORESOURCE_MEM, 252 .flags = IORESOURCE_MEM,
@@ -205,16 +268,13 @@ static struct platform_device tmu0_device = {
205}; 268};
206 269
207static struct sh_timer_config tmu1_platform_data = { 270static struct sh_timer_config tmu1_platform_data = {
208 .name = "TMU1",
209 .channel_offset = 0x10, 271 .channel_offset = 0x10,
210 .timer_bit = 1, 272 .timer_bit = 1,
211 .clk = "tmu0",
212 .clocksource_rating = 200, 273 .clocksource_rating = 200,
213}; 274};
214 275
215static struct resource tmu1_resources[] = { 276static struct resource tmu1_resources[] = {
216 [0] = { 277 [0] = {
217 .name = "TMU1",
218 .start = 0xffd80014, 278 .start = 0xffd80014,
219 .end = 0xffd8001f, 279 .end = 0xffd8001f,
220 .flags = IORESOURCE_MEM, 280 .flags = IORESOURCE_MEM,
@@ -236,15 +296,12 @@ static struct platform_device tmu1_device = {
236}; 296};
237 297
238static struct sh_timer_config tmu2_platform_data = { 298static struct sh_timer_config tmu2_platform_data = {
239 .name = "TMU2",
240 .channel_offset = 0x1c, 299 .channel_offset = 0x1c,
241 .timer_bit = 2, 300 .timer_bit = 2,
242 .clk = "tmu0",
243}; 301};
244 302
245static struct resource tmu2_resources[] = { 303static struct resource tmu2_resources[] = {
246 [0] = { 304 [0] = {
247 .name = "TMU2",
248 .start = 0xffd80020, 305 .start = 0xffd80020,
249 .end = 0xffd8002b, 306 .end = 0xffd8002b,
250 .flags = IORESOURCE_MEM, 307 .flags = IORESOURCE_MEM,
@@ -265,60 +322,17 @@ static struct platform_device tmu2_device = {
265 .num_resources = ARRAY_SIZE(tmu2_resources), 322 .num_resources = ARRAY_SIZE(tmu2_resources),
266}; 323};
267 324
268static struct plat_sci_port sci_platform_data[] = {
269 {
270 .mapbase = 0xffe00000,
271 .flags = UPF_BOOT_AUTOCONF,
272 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
273 .scbrr_algo_id = SCBRR_ALGO_2,
274 .type = PORT_SCIF,
275 .irqs = { 80, 80, 80, 80 },
276 .clk = "scif0",
277 }, {
278 .mapbase = 0xffe10000,
279 .flags = UPF_BOOT_AUTOCONF,
280 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
281 .scbrr_algo_id = SCBRR_ALGO_2,
282 .type = PORT_SCIF,
283 .irqs = { 81, 81, 81, 81 },
284 .clk = "scif1",
285 }, {
286 .mapbase = 0xffe20000,
287 .flags = UPF_BOOT_AUTOCONF,
288 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
289 .scbrr_algo_id = SCBRR_ALGO_2,
290 .type = PORT_SCIF,
291 .irqs = { 82, 82, 82, 82 },
292 .clk = "scif2",
293 }, {
294 .mapbase = 0xffe30000,
295 .flags = UPF_BOOT_AUTOCONF,
296 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
297 .scbrr_algo_id = SCBRR_ALGO_2,
298 .type = PORT_SCIF,
299 .irqs = { 83, 83, 83, 83 },
300 .clk = "scif3",
301 }, {
302 .flags = 0,
303 }
304};
305
306static struct platform_device sci_device = {
307 .name = "sh-sci",
308 .id = -1,
309 .dev = {
310 .platform_data = sci_platform_data,
311 },
312};
313
314static struct platform_device *sh7343_devices[] __initdata = { 325static struct platform_device *sh7343_devices[] __initdata = {
326 &scif0_device,
327 &scif1_device,
328 &scif2_device,
329 &scif3_device,
315 &cmt_device, 330 &cmt_device,
316 &tmu0_device, 331 &tmu0_device,
317 &tmu1_device, 332 &tmu1_device,
318 &tmu2_device, 333 &tmu2_device,
319 &iic0_device, 334 &iic0_device,
320 &iic1_device, 335 &iic1_device,
321 &sci_device,
322 &vpu_device, 336 &vpu_device,
323 &veu_device, 337 &veu_device,
324 &jpu_device, 338 &jpu_device,
@@ -333,9 +347,13 @@ static int __init sh7343_devices_setup(void)
333 return platform_add_devices(sh7343_devices, 347 return platform_add_devices(sh7343_devices,
334 ARRAY_SIZE(sh7343_devices)); 348 ARRAY_SIZE(sh7343_devices));
335} 349}
336__initcall(sh7343_devices_setup); 350arch_initcall(sh7343_devices_setup);
337 351
338static struct platform_device *sh7343_early_devices[] __initdata = { 352static struct platform_device *sh7343_early_devices[] __initdata = {
353 &scif0_device,
354 &scif1_device,
355 &scif2_device,
356 &scif3_device,
339 &cmt_device, 357 &cmt_device,
340 &tmu0_device, 358 &tmu0_device,
341 &tmu1_device, 359 &tmu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index d4ee429032b1..f96b7eeb21eb 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -18,6 +18,23 @@
18#include <linux/usb/r8a66597.h> 18#include <linux/usb/r8a66597.h>
19#include <asm/clock.h> 19#include <asm/clock.h>
20 20
21static struct plat_sci_port scif0_platform_data = {
22 .mapbase = 0xffe00000,
23 .flags = UPF_BOOT_AUTOCONF,
24 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
25 .scbrr_algo_id = SCBRR_ALGO_2,
26 .type = PORT_SCIF,
27 .irqs = { 80, 80, 80, 80 },
28};
29
30static struct platform_device scif0_device = {
31 .name = "sh-sci",
32 .id = 0,
33 .dev = {
34 .platform_data = &scif0_platform_data,
35 },
36};
37
21static struct resource iic_resources[] = { 38static struct resource iic_resources[] = {
22 [0] = { 39 [0] = {
23 .name = "IIC", 40 .name = "IIC",
@@ -40,7 +57,7 @@ static struct platform_device iic_device = {
40}; 57};
41 58
42static struct r8a66597_platdata r8a66597_data = { 59static struct r8a66597_platdata r8a66597_data = {
43 /* This set zero to all members */ 60 .on_chip = 1,
44}; 61};
45 62
46static struct resource usb_host_resources[] = { 63static struct resource usb_host_resources[] = {
@@ -153,17 +170,14 @@ static struct platform_device veu1_device = {
153}; 170};
154 171
155static struct sh_timer_config cmt_platform_data = { 172static struct sh_timer_config cmt_platform_data = {
156 .name = "CMT",
157 .channel_offset = 0x60, 173 .channel_offset = 0x60,
158 .timer_bit = 5, 174 .timer_bit = 5,
159 .clk = "cmt0",
160 .clockevent_rating = 125, 175 .clockevent_rating = 125,
161 .clocksource_rating = 200, 176 .clocksource_rating = 200,
162}; 177};
163 178
164static struct resource cmt_resources[] = { 179static struct resource cmt_resources[] = {
165 [0] = { 180 [0] = {
166 .name = "CMT",
167 .start = 0x044a0060, 181 .start = 0x044a0060,
168 .end = 0x044a006b, 182 .end = 0x044a006b,
169 .flags = IORESOURCE_MEM, 183 .flags = IORESOURCE_MEM,
@@ -185,16 +199,13 @@ static struct platform_device cmt_device = {
185}; 199};
186 200
187static struct sh_timer_config tmu0_platform_data = { 201static struct sh_timer_config tmu0_platform_data = {
188 .name = "TMU0",
189 .channel_offset = 0x04, 202 .channel_offset = 0x04,
190 .timer_bit = 0, 203 .timer_bit = 0,
191 .clk = "tmu0",
192 .clockevent_rating = 200, 204 .clockevent_rating = 200,
193}; 205};
194 206
195static struct resource tmu0_resources[] = { 207static struct resource tmu0_resources[] = {
196 [0] = { 208 [0] = {
197 .name = "TMU0",
198 .start = 0xffd80008, 209 .start = 0xffd80008,
199 .end = 0xffd80013, 210 .end = 0xffd80013,
200 .flags = IORESOURCE_MEM, 211 .flags = IORESOURCE_MEM,
@@ -216,16 +227,13 @@ static struct platform_device tmu0_device = {
216}; 227};
217 228
218static struct sh_timer_config tmu1_platform_data = { 229static struct sh_timer_config tmu1_platform_data = {
219 .name = "TMU1",
220 .channel_offset = 0x10, 230 .channel_offset = 0x10,
221 .timer_bit = 1, 231 .timer_bit = 1,
222 .clk = "tmu0",
223 .clocksource_rating = 200, 232 .clocksource_rating = 200,
224}; 233};
225 234
226static struct resource tmu1_resources[] = { 235static struct resource tmu1_resources[] = {
227 [0] = { 236 [0] = {
228 .name = "TMU1",
229 .start = 0xffd80014, 237 .start = 0xffd80014,
230 .end = 0xffd8001f, 238 .end = 0xffd8001f,
231 .flags = IORESOURCE_MEM, 239 .flags = IORESOURCE_MEM,
@@ -247,15 +255,12 @@ static struct platform_device tmu1_device = {
247}; 255};
248 256
249static struct sh_timer_config tmu2_platform_data = { 257static struct sh_timer_config tmu2_platform_data = {
250 .name = "TMU2",
251 .channel_offset = 0x1c, 258 .channel_offset = 0x1c,
252 .timer_bit = 2, 259 .timer_bit = 2,
253 .clk = "tmu0",
254}; 260};
255 261
256static struct resource tmu2_resources[] = { 262static struct resource tmu2_resources[] = {
257 [0] = { 263 [0] = {
258 .name = "TMU2",
259 .start = 0xffd80020, 264 .start = 0xffd80020,
260 .end = 0xffd8002b, 265 .end = 0xffd8002b,
261 .flags = IORESOURCE_MEM, 266 .flags = IORESOURCE_MEM,
@@ -276,35 +281,13 @@ static struct platform_device tmu2_device = {
276 .num_resources = ARRAY_SIZE(tmu2_resources), 281 .num_resources = ARRAY_SIZE(tmu2_resources),
277}; 282};
278 283
279static struct plat_sci_port sci_platform_data[] = {
280 {
281 .mapbase = 0xffe00000,
282 .flags = UPF_BOOT_AUTOCONF,
283 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
284 .scbrr_algo_id = SCBRR_ALGO_2,
285 .type = PORT_SCIF,
286 .irqs = { 80, 80, 80, 80 },
287 .clk = "scif0",
288 }, {
289 .flags = 0,
290 }
291};
292
293static struct platform_device sci_device = {
294 .name = "sh-sci",
295 .id = -1,
296 .dev = {
297 .platform_data = sci_platform_data,
298 },
299};
300
301static struct platform_device *sh7366_devices[] __initdata = { 284static struct platform_device *sh7366_devices[] __initdata = {
285 &scif0_device,
302 &cmt_device, 286 &cmt_device,
303 &tmu0_device, 287 &tmu0_device,
304 &tmu1_device, 288 &tmu1_device,
305 &tmu2_device, 289 &tmu2_device,
306 &iic_device, 290 &iic_device,
307 &sci_device,
308 &usb_host_device, 291 &usb_host_device,
309 &vpu_device, 292 &vpu_device,
310 &veu0_device, 293 &veu0_device,
@@ -320,9 +303,10 @@ static int __init sh7366_devices_setup(void)
320 return platform_add_devices(sh7366_devices, 303 return platform_add_devices(sh7366_devices,
321 ARRAY_SIZE(sh7366_devices)); 304 ARRAY_SIZE(sh7366_devices));
322} 305}
323__initcall(sh7366_devices_setup); 306arch_initcall(sh7366_devices_setup);
324 307
325static struct platform_device *sh7366_early_devices[] __initdata = { 308static struct platform_device *sh7366_early_devices[] __initdata = {
309 &scif0_device,
326 &cmt_device, 310 &cmt_device,
327 &tmu0_device, 311 &tmu0_device,
328 &tmu1_device, 312 &tmu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index f7b0551bf104..73737d00e2e7 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -7,15 +7,227 @@
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 */ 9 */
10#include <linux/platform_device.h>
11#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/mm.h>
12#include <linux/platform_device.h>
12#include <linux/serial.h> 13#include <linux/serial.h>
13#include <linux/serial_sci.h> 14#include <linux/serial_sci.h>
14#include <linux/mm.h>
15#include <linux/uio_driver.h>
16#include <linux/sh_timer.h> 15#include <linux/sh_timer.h>
16#include <linux/uio_driver.h>
17#include <linux/usb/m66592.h>
18
17#include <asm/clock.h> 19#include <asm/clock.h>
18#include <asm/mmzone.h> 20#include <asm/mmzone.h>
21#include <asm/siu.h>
22
23#include <cpu/dma-register.h>
24#include <cpu/sh7722.h>
25
26static const struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
27 {
28 .slave_id = SHDMA_SLAVE_SCIF0_TX,
29 .addr = 0xffe0000c,
30 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
31 .mid_rid = 0x21,
32 }, {
33 .slave_id = SHDMA_SLAVE_SCIF0_RX,
34 .addr = 0xffe00014,
35 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
36 .mid_rid = 0x22,
37 }, {
38 .slave_id = SHDMA_SLAVE_SCIF1_TX,
39 .addr = 0xffe1000c,
40 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
41 .mid_rid = 0x25,
42 }, {
43 .slave_id = SHDMA_SLAVE_SCIF1_RX,
44 .addr = 0xffe10014,
45 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
46 .mid_rid = 0x26,
47 }, {
48 .slave_id = SHDMA_SLAVE_SCIF2_TX,
49 .addr = 0xffe2000c,
50 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
51 .mid_rid = 0x29,
52 }, {
53 .slave_id = SHDMA_SLAVE_SCIF2_RX,
54 .addr = 0xffe20014,
55 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
56 .mid_rid = 0x2a,
57 }, {
58 .slave_id = SHDMA_SLAVE_SIUA_TX,
59 .addr = 0xa454c098,
60 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
61 .mid_rid = 0xb1,
62 }, {
63 .slave_id = SHDMA_SLAVE_SIUA_RX,
64 .addr = 0xa454c090,
65 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
66 .mid_rid = 0xb2,
67 }, {
68 .slave_id = SHDMA_SLAVE_SIUB_TX,
69 .addr = 0xa454c09c,
70 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
71 .mid_rid = 0xb5,
72 }, {
73 .slave_id = SHDMA_SLAVE_SIUB_RX,
74 .addr = 0xa454c094,
75 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
76 .mid_rid = 0xb6,
77 }, {
78 .slave_id = SHDMA_SLAVE_SDHI0_TX,
79 .addr = 0x04ce0030,
80 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
81 .mid_rid = 0xc1,
82 }, {
83 .slave_id = SHDMA_SLAVE_SDHI0_RX,
84 .addr = 0x04ce0030,
85 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
86 .mid_rid = 0xc2,
87 },
88};
89
90static const struct sh_dmae_channel sh7722_dmae_channels[] = {
91 {
92 .offset = 0,
93 .dmars = 0,
94 .dmars_bit = 0,
95 }, {
96 .offset = 0x10,
97 .dmars = 0,
98 .dmars_bit = 8,
99 }, {
100 .offset = 0x20,
101 .dmars = 4,
102 .dmars_bit = 0,
103 }, {
104 .offset = 0x30,
105 .dmars = 4,
106 .dmars_bit = 8,
107 }, {
108 .offset = 0x50,
109 .dmars = 8,
110 .dmars_bit = 0,
111 }, {
112 .offset = 0x60,
113 .dmars = 8,
114 .dmars_bit = 8,
115 }
116};
117
118static const unsigned int ts_shift[] = TS_SHIFT;
119
120static struct sh_dmae_pdata dma_platform_data = {
121 .slave = sh7722_dmae_slaves,
122 .slave_num = ARRAY_SIZE(sh7722_dmae_slaves),
123 .channel = sh7722_dmae_channels,
124 .channel_num = ARRAY_SIZE(sh7722_dmae_channels),
125 .ts_low_shift = CHCR_TS_LOW_SHIFT,
126 .ts_low_mask = CHCR_TS_LOW_MASK,
127 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
128 .ts_high_mask = CHCR_TS_HIGH_MASK,
129 .ts_shift = ts_shift,
130 .ts_shift_num = ARRAY_SIZE(ts_shift),
131 .dmaor_init = DMAOR_INIT,
132};
133
134static struct resource sh7722_dmae_resources[] = {
135 [0] = {
136 /* Channel registers and DMAOR */
137 .start = 0xfe008020,
138 .end = 0xfe00808f,
139 .flags = IORESOURCE_MEM,
140 },
141 [1] = {
142 /* DMARSx */
143 .start = 0xfe009000,
144 .end = 0xfe00900b,
145 .flags = IORESOURCE_MEM,
146 },
147 {
148 /* DMA error IRQ */
149 .start = 78,
150 .end = 78,
151 .flags = IORESOURCE_IRQ,
152 },
153 {
154 /* IRQ for channels 0-3 */
155 .start = 48,
156 .end = 51,
157 .flags = IORESOURCE_IRQ,
158 },
159 {
160 /* IRQ for channels 4-5 */
161 .start = 76,
162 .end = 77,
163 .flags = IORESOURCE_IRQ,
164 },
165};
166
167struct platform_device dma_device = {
168 .name = "sh-dma-engine",
169 .id = -1,
170 .resource = sh7722_dmae_resources,
171 .num_resources = ARRAY_SIZE(sh7722_dmae_resources),
172 .dev = {
173 .platform_data = &dma_platform_data,
174 },
175 .archdata = {
176 .hwblk_id = HWBLK_DMAC,
177 },
178};
179
180/* Serial */
181static struct plat_sci_port scif0_platform_data = {
182 .mapbase = 0xffe00000,
183 .flags = UPF_BOOT_AUTOCONF,
184 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
185 .scbrr_algo_id = SCBRR_ALGO_2,
186 .type = PORT_SCIF,
187 .irqs = { 80, 80, 80, 80 },
188};
189
190static struct platform_device scif0_device = {
191 .name = "sh-sci",
192 .id = 0,
193 .dev = {
194 .platform_data = &scif0_platform_data,
195 },
196};
197
198static struct plat_sci_port scif1_platform_data = {
199 .mapbase = 0xffe10000,
200 .flags = UPF_BOOT_AUTOCONF,
201 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
202 .scbrr_algo_id = SCBRR_ALGO_2,
203 .type = PORT_SCIF,
204 .irqs = { 81, 81, 81, 81 },
205};
206
207static struct platform_device scif1_device = {
208 .name = "sh-sci",
209 .id = 1,
210 .dev = {
211 .platform_data = &scif1_platform_data,
212 },
213};
214
215static struct plat_sci_port scif2_platform_data = {
216 .mapbase = 0xffe20000,
217 .flags = UPF_BOOT_AUTOCONF,
218 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
219 .scbrr_algo_id = SCBRR_ALGO_2,
220 .type = PORT_SCIF,
221 .irqs = { 82, 82, 82, 82 },
222};
223
224static struct platform_device scif2_device = {
225 .name = "sh-sci",
226 .id = 2,
227 .dev = {
228 .platform_data = &scif2_platform_data,
229 },
230};
19 231
20static struct resource rtc_resources[] = { 232static struct resource rtc_resources[] = {
21 [0] = { 233 [0] = {
@@ -45,11 +257,18 @@ static struct platform_device rtc_device = {
45 .id = -1, 257 .id = -1,
46 .num_resources = ARRAY_SIZE(rtc_resources), 258 .num_resources = ARRAY_SIZE(rtc_resources),
47 .resource = rtc_resources, 259 .resource = rtc_resources,
260 .archdata = {
261 .hwblk_id = HWBLK_RTC,
262 },
263};
264
265static struct m66592_platdata usbf_platdata = {
266 .on_chip = 1,
48}; 267};
49 268
50static struct resource usbf_resources[] = { 269static struct resource usbf_resources[] = {
51 [0] = { 270 [0] = {
52 .name = "m66592_udc", 271 .name = "USBF",
53 .start = 0x04480000, 272 .start = 0x04480000,
54 .end = 0x044800FF, 273 .end = 0x044800FF,
55 .flags = IORESOURCE_MEM, 274 .flags = IORESOURCE_MEM,
@@ -67,9 +286,13 @@ static struct platform_device usbf_device = {
67 .dev = { 286 .dev = {
68 .dma_mask = NULL, 287 .dma_mask = NULL,
69 .coherent_dma_mask = 0xffffffff, 288 .coherent_dma_mask = 0xffffffff,
289 .platform_data = &usbf_platdata,
70 }, 290 },
71 .num_resources = ARRAY_SIZE(usbf_resources), 291 .num_resources = ARRAY_SIZE(usbf_resources),
72 .resource = usbf_resources, 292 .resource = usbf_resources,
293 .archdata = {
294 .hwblk_id = HWBLK_USBF,
295 },
73}; 296};
74 297
75static struct resource iic_resources[] = { 298static struct resource iic_resources[] = {
@@ -91,6 +314,9 @@ static struct platform_device iic_device = {
91 .id = 0, /* "i2c0" clock */ 314 .id = 0, /* "i2c0" clock */
92 .num_resources = ARRAY_SIZE(iic_resources), 315 .num_resources = ARRAY_SIZE(iic_resources),
93 .resource = iic_resources, 316 .resource = iic_resources,
317 .archdata = {
318 .hwblk_id = HWBLK_IIC,
319 },
94}; 320};
95 321
96static struct uio_info vpu_platform_data = { 322static struct uio_info vpu_platform_data = {
@@ -119,6 +345,9 @@ static struct platform_device vpu_device = {
119 }, 345 },
120 .resource = vpu_resources, 346 .resource = vpu_resources,
121 .num_resources = ARRAY_SIZE(vpu_resources), 347 .num_resources = ARRAY_SIZE(vpu_resources),
348 .archdata = {
349 .hwblk_id = HWBLK_VPU,
350 },
122}; 351};
123 352
124static struct uio_info veu_platform_data = { 353static struct uio_info veu_platform_data = {
@@ -147,6 +376,9 @@ static struct platform_device veu_device = {
147 }, 376 },
148 .resource = veu_resources, 377 .resource = veu_resources,
149 .num_resources = ARRAY_SIZE(veu_resources), 378 .num_resources = ARRAY_SIZE(veu_resources),
379 .archdata = {
380 .hwblk_id = HWBLK_VEU,
381 },
150}; 382};
151 383
152static struct uio_info jpu_platform_data = { 384static struct uio_info jpu_platform_data = {
@@ -175,20 +407,20 @@ static struct platform_device jpu_device = {
175 }, 407 },
176 .resource = jpu_resources, 408 .resource = jpu_resources,
177 .num_resources = ARRAY_SIZE(jpu_resources), 409 .num_resources = ARRAY_SIZE(jpu_resources),
410 .archdata = {
411 .hwblk_id = HWBLK_JPU,
412 },
178}; 413};
179 414
180static struct sh_timer_config cmt_platform_data = { 415static struct sh_timer_config cmt_platform_data = {
181 .name = "CMT",
182 .channel_offset = 0x60, 416 .channel_offset = 0x60,
183 .timer_bit = 5, 417 .timer_bit = 5,
184 .clk = "cmt0",
185 .clockevent_rating = 125, 418 .clockevent_rating = 125,
186 .clocksource_rating = 125, 419 .clocksource_rating = 125,
187}; 420};
188 421
189static struct resource cmt_resources[] = { 422static struct resource cmt_resources[] = {
190 [0] = { 423 [0] = {
191 .name = "CMT",
192 .start = 0x044a0060, 424 .start = 0x044a0060,
193 .end = 0x044a006b, 425 .end = 0x044a006b,
194 .flags = IORESOURCE_MEM, 426 .flags = IORESOURCE_MEM,
@@ -207,19 +439,19 @@ static struct platform_device cmt_device = {
207 }, 439 },
208 .resource = cmt_resources, 440 .resource = cmt_resources,
209 .num_resources = ARRAY_SIZE(cmt_resources), 441 .num_resources = ARRAY_SIZE(cmt_resources),
442 .archdata = {
443 .hwblk_id = HWBLK_CMT,
444 },
210}; 445};
211 446
212static struct sh_timer_config tmu0_platform_data = { 447static struct sh_timer_config tmu0_platform_data = {
213 .name = "TMU0",
214 .channel_offset = 0x04, 448 .channel_offset = 0x04,
215 .timer_bit = 0, 449 .timer_bit = 0,
216 .clk = "tmu0",
217 .clockevent_rating = 200, 450 .clockevent_rating = 200,
218}; 451};
219 452
220static struct resource tmu0_resources[] = { 453static struct resource tmu0_resources[] = {
221 [0] = { 454 [0] = {
222 .name = "TMU0",
223 .start = 0xffd80008, 455 .start = 0xffd80008,
224 .end = 0xffd80013, 456 .end = 0xffd80013,
225 .flags = IORESOURCE_MEM, 457 .flags = IORESOURCE_MEM,
@@ -238,19 +470,19 @@ static struct platform_device tmu0_device = {
238 }, 470 },
239 .resource = tmu0_resources, 471 .resource = tmu0_resources,
240 .num_resources = ARRAY_SIZE(tmu0_resources), 472 .num_resources = ARRAY_SIZE(tmu0_resources),
473 .archdata = {
474 .hwblk_id = HWBLK_TMU,
475 },
241}; 476};
242 477
243static struct sh_timer_config tmu1_platform_data = { 478static struct sh_timer_config tmu1_platform_data = {
244 .name = "TMU1",
245 .channel_offset = 0x10, 479 .channel_offset = 0x10,
246 .timer_bit = 1, 480 .timer_bit = 1,
247 .clk = "tmu0",
248 .clocksource_rating = 200, 481 .clocksource_rating = 200,
249}; 482};
250 483
251static struct resource tmu1_resources[] = { 484static struct resource tmu1_resources[] = {
252 [0] = { 485 [0] = {
253 .name = "TMU1",
254 .start = 0xffd80014, 486 .start = 0xffd80014,
255 .end = 0xffd8001f, 487 .end = 0xffd8001f,
256 .flags = IORESOURCE_MEM, 488 .flags = IORESOURCE_MEM,
@@ -269,18 +501,18 @@ static struct platform_device tmu1_device = {
269 }, 501 },
270 .resource = tmu1_resources, 502 .resource = tmu1_resources,
271 .num_resources = ARRAY_SIZE(tmu1_resources), 503 .num_resources = ARRAY_SIZE(tmu1_resources),
504 .archdata = {
505 .hwblk_id = HWBLK_TMU,
506 },
272}; 507};
273 508
274static struct sh_timer_config tmu2_platform_data = { 509static struct sh_timer_config tmu2_platform_data = {
275 .name = "TMU2",
276 .channel_offset = 0x1c, 510 .channel_offset = 0x1c,
277 .timer_bit = 2, 511 .timer_bit = 2,
278 .clk = "tmu0",
279}; 512};
280 513
281static struct resource tmu2_resources[] = { 514static struct resource tmu2_resources[] = {
282 [0] = { 515 [0] = {
283 .name = "TMU2",
284 .start = 0xffd80020, 516 .start = 0xffd80020,
285 .end = 0xffd8002b, 517 .end = 0xffd8002b,
286 .flags = IORESOURCE_MEM, 518 .flags = IORESOURCE_MEM,
@@ -299,47 +531,48 @@ static struct platform_device tmu2_device = {
299 }, 531 },
300 .resource = tmu2_resources, 532 .resource = tmu2_resources,
301 .num_resources = ARRAY_SIZE(tmu2_resources), 533 .num_resources = ARRAY_SIZE(tmu2_resources),
534 .archdata = {
535 .hwblk_id = HWBLK_TMU,
536 },
302}; 537};
303 538
304static struct plat_sci_port sci_platform_data[] = { 539static struct siu_platform siu_platform_data = {
305 { 540 .dma_dev = &dma_device.dev,
306 .mapbase = 0xffe00000, 541 .dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX,
307 .flags = UPF_BOOT_AUTOCONF, 542 .dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX,
308 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 543 .dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX,
309 .scbrr_algo_id = SCBRR_ALGO_2, 544 .dma_slave_rx_b = SHDMA_SLAVE_SIUB_RX,
310 .type = PORT_SCIF,
311 .irqs = { 80, 80, 80, 80 },
312 .clk = "scif0",
313 }, {
314 .mapbase = 0xffe10000,
315 .flags = UPF_BOOT_AUTOCONF,
316 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
317 .scbrr_algo_id = SCBRR_ALGO_2,
318 .type = PORT_SCIF,
319 .irqs = { 81, 81, 81, 81 },
320 .clk = "scif1",
321 }, {
322 .mapbase = 0xffe20000,
323 .flags = UPF_BOOT_AUTOCONF,
324 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
325 .scbrr_algo_id = SCBRR_ALGO_2,
326 .type = PORT_SCIF,
327 .irqs = { 82, 82, 82, 82 },
328 .clk = "scif2",
329 }, {
330 .flags = 0,
331 }
332}; 545};
333 546
334static struct platform_device sci_device = { 547static struct resource siu_resources[] = {
335 .name = "sh-sci", 548 [0] = {
549 .start = 0xa4540000,
550 .end = 0xa454c10f,
551 .flags = IORESOURCE_MEM,
552 },
553 [1] = {
554 .start = 108,
555 .flags = IORESOURCE_IRQ,
556 },
557};
558
559static struct platform_device siu_device = {
560 .name = "siu-pcm-audio",
336 .id = -1, 561 .id = -1,
337 .dev = { 562 .dev = {
338 .platform_data = sci_platform_data, 563 .platform_data = &siu_platform_data,
564 },
565 .resource = siu_resources,
566 .num_resources = ARRAY_SIZE(siu_resources),
567 .archdata = {
568 .hwblk_id = HWBLK_SIU,
339 }, 569 },
340}; 570};
341 571
342static struct platform_device *sh7722_devices[] __initdata = { 572static struct platform_device *sh7722_devices[] __initdata = {
573 &scif0_device,
574 &scif1_device,
575 &scif2_device,
343 &cmt_device, 576 &cmt_device,
344 &tmu0_device, 577 &tmu0_device,
345 &tmu1_device, 578 &tmu1_device,
@@ -347,10 +580,11 @@ static struct platform_device *sh7722_devices[] __initdata = {
347 &rtc_device, 580 &rtc_device,
348 &usbf_device, 581 &usbf_device,
349 &iic_device, 582 &iic_device,
350 &sci_device,
351 &vpu_device, 583 &vpu_device,
352 &veu_device, 584 &veu_device,
353 &jpu_device, 585 &jpu_device,
586 &siu_device,
587 &dma_device,
354}; 588};
355 589
356static int __init sh7722_devices_setup(void) 590static int __init sh7722_devices_setup(void)
@@ -362,9 +596,12 @@ static int __init sh7722_devices_setup(void)
362 return platform_add_devices(sh7722_devices, 596 return platform_add_devices(sh7722_devices,
363 ARRAY_SIZE(sh7722_devices)); 597 ARRAY_SIZE(sh7722_devices));
364} 598}
365__initcall(sh7722_devices_setup); 599arch_initcall(sh7722_devices_setup);
366 600
367static struct platform_device *sh7722_early_devices[] __initdata = { 601static struct platform_device *sh7722_early_devices[] __initdata = {
602 &scif0_device,
603 &scif1_device,
604 &scif2_device,
368 &cmt_device, 605 &cmt_device,
369 &tmu0_device, 606 &tmu0_device,
370 &tmu1_device, 607 &tmu1_device,
@@ -379,6 +616,8 @@ void __init plat_early_device_setup(void)
379 616
380enum { 617enum {
381 UNUSED=0, 618 UNUSED=0,
619 ENABLED,
620 DISABLED,
382 621
383 /* interrupt sources */ 622 /* interrupt sources */
384 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, 623 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -394,7 +633,6 @@ enum {
394 SCIF0, SCIF1, SCIF2, SIOF0, SIOF1, SIO, 633 SCIF0, SCIF1, SCIF2, SIOF0, SIOF1, SIO,
395 FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, 634 FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
396 I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI, 635 I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
397 SDHI0, SDHI1, SDHI2, SDHI3,
398 CMT, TSIF, SIU, TWODG, 636 CMT, TSIF, SIU, TWODG,
399 TMU0, TMU1, TMU2, 637 TMU0, TMU1, TMU2,
400 IRDA, JPU, LCDC, 638 IRDA, JPU, LCDC,
@@ -427,8 +665,8 @@ static struct intc_vect vectors[] __initdata = {
427 INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), 665 INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
428 INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20), 666 INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
429 INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60), 667 INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
430 INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0), 668 INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
431 INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0), 669 INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
432 INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), 670 INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
433 INTC_VECT(SIU, 0xf80), INTC_VECT(TWODG, 0xfa0), 671 INTC_VECT(SIU, 0xf80), INTC_VECT(TWODG, 0xfa0),
434 INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), 672 INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
@@ -446,7 +684,6 @@ static struct intc_group groups[] __initdata = {
446 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, 684 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
447 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), 685 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
448 INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI), 686 INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
449 INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
450}; 687};
451 688
452static struct intc_mask_reg mask_registers[] __initdata = { 689static struct intc_mask_reg mask_registers[] __initdata = {
@@ -468,7 +705,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
468 { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, 705 { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
469 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, 706 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
470 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ 707 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
471 { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, TWODG, SIU } }, 708 { DISABLED, DISABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
472 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ 709 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
473 { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } }, 710 { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } },
474 { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ 711 { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -506,9 +743,13 @@ static struct intc_mask_reg ack_registers[] __initdata = {
506 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, 743 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
507}; 744};
508 745
509static DECLARE_INTC_DESC_ACK(intc_desc, "sh7722", vectors, groups, 746static struct intc_desc intc_desc __initdata = {
510 mask_registers, prio_registers, sense_registers, 747 .name = "sh7722",
511 ack_registers); 748 .force_enable = ENABLED,
749 .force_disable = DISABLED,
750 .hw = INTC_HW_DESC(vectors, groups, mask_registers,
751 prio_registers, sense_registers, ack_registers),
752};
512 753
513void __init plat_irq_setup(void) 754void __init plat_irq_setup(void)
514{ 755{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index bb4837b9dcf4..264983ddc8da 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -18,6 +18,110 @@
18#include <linux/io.h> 18#include <linux/io.h>
19#include <asm/clock.h> 19#include <asm/clock.h>
20#include <asm/mmzone.h> 20#include <asm/mmzone.h>
21#include <cpu/sh7723.h>
22
23/* Serial */
24static struct plat_sci_port scif0_platform_data = {
25 .mapbase = 0xffe00000,
26 .flags = UPF_BOOT_AUTOCONF,
27 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
28 .scbrr_algo_id = SCBRR_ALGO_2,
29 .type = PORT_SCIF,
30 .irqs = { 80, 80, 80, 80 },
31};
32
33static struct platform_device scif0_device = {
34 .name = "sh-sci",
35 .id = 0,
36 .dev = {
37 .platform_data = &scif0_platform_data,
38 },
39};
40
41static struct plat_sci_port scif1_platform_data = {
42 .mapbase = 0xffe10000,
43 .flags = UPF_BOOT_AUTOCONF,
44 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
45 .scbrr_algo_id = SCBRR_ALGO_2,
46 .type = PORT_SCIF,
47 .irqs = { 81, 81, 81, 81 },
48};
49
50static struct platform_device scif1_device = {
51 .name = "sh-sci",
52 .id = 1,
53 .dev = {
54 .platform_data = &scif1_platform_data,
55 },
56};
57
58static struct plat_sci_port scif2_platform_data = {
59 .mapbase = 0xffe20000,
60 .flags = UPF_BOOT_AUTOCONF,
61 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
62 .scbrr_algo_id = SCBRR_ALGO_2,
63 .type = PORT_SCIF,
64 .irqs = { 82, 82, 82, 82 },
65};
66
67static struct platform_device scif2_device = {
68 .name = "sh-sci",
69 .id = 2,
70 .dev = {
71 .platform_data = &scif2_platform_data,
72 },
73};
74
75static struct plat_sci_port scif3_platform_data = {
76 .mapbase = 0xa4e30000,
77 .flags = UPF_BOOT_AUTOCONF,
78 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
79 .scbrr_algo_id = SCBRR_ALGO_3,
80 .type = PORT_SCIFA,
81 .irqs = { 56, 56, 56, 56 },
82};
83
84static struct platform_device scif3_device = {
85 .name = "sh-sci",
86 .id = 3,
87 .dev = {
88 .platform_data = &scif3_platform_data,
89 },
90};
91
92static struct plat_sci_port scif4_platform_data = {
93 .mapbase = 0xa4e40000,
94 .flags = UPF_BOOT_AUTOCONF,
95 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
96 .scbrr_algo_id = SCBRR_ALGO_3,
97 .type = PORT_SCIFA,
98 .irqs = { 88, 88, 88, 88 },
99};
100
101static struct platform_device scif4_device = {
102 .name = "sh-sci",
103 .id = 4,
104 .dev = {
105 .platform_data = &scif4_platform_data,
106 },
107};
108
109static struct plat_sci_port scif5_platform_data = {
110 .mapbase = 0xa4e50000,
111 .flags = UPF_BOOT_AUTOCONF,
112 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
113 .scbrr_algo_id = SCBRR_ALGO_3,
114 .type = PORT_SCIFA,
115 .irqs = { 109, 109, 109, 109 },
116};
117
118static struct platform_device scif5_device = {
119 .name = "sh-sci",
120 .id = 5,
121 .dev = {
122 .platform_data = &scif5_platform_data,
123 },
124};
21 125
22static struct uio_info vpu_platform_data = { 126static struct uio_info vpu_platform_data = {
23 .name = "VPU5", 127 .name = "VPU5",
@@ -45,6 +149,9 @@ static struct platform_device vpu_device = {
45 }, 149 },
46 .resource = vpu_resources, 150 .resource = vpu_resources,
47 .num_resources = ARRAY_SIZE(vpu_resources), 151 .num_resources = ARRAY_SIZE(vpu_resources),
152 .archdata = {
153 .hwblk_id = HWBLK_VPU,
154 },
48}; 155};
49 156
50static struct uio_info veu0_platform_data = { 157static struct uio_info veu0_platform_data = {
@@ -73,6 +180,9 @@ static struct platform_device veu0_device = {
73 }, 180 },
74 .resource = veu0_resources, 181 .resource = veu0_resources,
75 .num_resources = ARRAY_SIZE(veu0_resources), 182 .num_resources = ARRAY_SIZE(veu0_resources),
183 .archdata = {
184 .hwblk_id = HWBLK_VEU2H0,
185 },
76}; 186};
77 187
78static struct uio_info veu1_platform_data = { 188static struct uio_info veu1_platform_data = {
@@ -101,20 +211,20 @@ static struct platform_device veu1_device = {
101 }, 211 },
102 .resource = veu1_resources, 212 .resource = veu1_resources,
103 .num_resources = ARRAY_SIZE(veu1_resources), 213 .num_resources = ARRAY_SIZE(veu1_resources),
214 .archdata = {
215 .hwblk_id = HWBLK_VEU2H1,
216 },
104}; 217};
105 218
106static struct sh_timer_config cmt_platform_data = { 219static struct sh_timer_config cmt_platform_data = {
107 .name = "CMT",
108 .channel_offset = 0x60, 220 .channel_offset = 0x60,
109 .timer_bit = 5, 221 .timer_bit = 5,
110 .clk = "cmt0",
111 .clockevent_rating = 125, 222 .clockevent_rating = 125,
112 .clocksource_rating = 125, 223 .clocksource_rating = 125,
113}; 224};
114 225
115static struct resource cmt_resources[] = { 226static struct resource cmt_resources[] = {
116 [0] = { 227 [0] = {
117 .name = "CMT",
118 .start = 0x044a0060, 228 .start = 0x044a0060,
119 .end = 0x044a006b, 229 .end = 0x044a006b,
120 .flags = IORESOURCE_MEM, 230 .flags = IORESOURCE_MEM,
@@ -133,19 +243,19 @@ static struct platform_device cmt_device = {
133 }, 243 },
134 .resource = cmt_resources, 244 .resource = cmt_resources,
135 .num_resources = ARRAY_SIZE(cmt_resources), 245 .num_resources = ARRAY_SIZE(cmt_resources),
246 .archdata = {
247 .hwblk_id = HWBLK_CMT,
248 },
136}; 249};
137 250
138static struct sh_timer_config tmu0_platform_data = { 251static struct sh_timer_config tmu0_platform_data = {
139 .name = "TMU0",
140 .channel_offset = 0x04, 252 .channel_offset = 0x04,
141 .timer_bit = 0, 253 .timer_bit = 0,
142 .clk = "tmu0",
143 .clockevent_rating = 200, 254 .clockevent_rating = 200,
144}; 255};
145 256
146static struct resource tmu0_resources[] = { 257static struct resource tmu0_resources[] = {
147 [0] = { 258 [0] = {
148 .name = "TMU0",
149 .start = 0xffd80008, 259 .start = 0xffd80008,
150 .end = 0xffd80013, 260 .end = 0xffd80013,
151 .flags = IORESOURCE_MEM, 261 .flags = IORESOURCE_MEM,
@@ -164,19 +274,19 @@ static struct platform_device tmu0_device = {
164 }, 274 },
165 .resource = tmu0_resources, 275 .resource = tmu0_resources,
166 .num_resources = ARRAY_SIZE(tmu0_resources), 276 .num_resources = ARRAY_SIZE(tmu0_resources),
277 .archdata = {
278 .hwblk_id = HWBLK_TMU0,
279 },
167}; 280};
168 281
169static struct sh_timer_config tmu1_platform_data = { 282static struct sh_timer_config tmu1_platform_data = {
170 .name = "TMU1",
171 .channel_offset = 0x10, 283 .channel_offset = 0x10,
172 .timer_bit = 1, 284 .timer_bit = 1,
173 .clk = "tmu0",
174 .clocksource_rating = 200, 285 .clocksource_rating = 200,
175}; 286};
176 287
177static struct resource tmu1_resources[] = { 288static struct resource tmu1_resources[] = {
178 [0] = { 289 [0] = {
179 .name = "TMU1",
180 .start = 0xffd80014, 290 .start = 0xffd80014,
181 .end = 0xffd8001f, 291 .end = 0xffd8001f,
182 .flags = IORESOURCE_MEM, 292 .flags = IORESOURCE_MEM,
@@ -195,18 +305,18 @@ static struct platform_device tmu1_device = {
195 }, 305 },
196 .resource = tmu1_resources, 306 .resource = tmu1_resources,
197 .num_resources = ARRAY_SIZE(tmu1_resources), 307 .num_resources = ARRAY_SIZE(tmu1_resources),
308 .archdata = {
309 .hwblk_id = HWBLK_TMU0,
310 },
198}; 311};
199 312
200static struct sh_timer_config tmu2_platform_data = { 313static struct sh_timer_config tmu2_platform_data = {
201 .name = "TMU2",
202 .channel_offset = 0x1c, 314 .channel_offset = 0x1c,
203 .timer_bit = 2, 315 .timer_bit = 2,
204 .clk = "tmu0",
205}; 316};
206 317
207static struct resource tmu2_resources[] = { 318static struct resource tmu2_resources[] = {
208 [0] = { 319 [0] = {
209 .name = "TMU2",
210 .start = 0xffd80020, 320 .start = 0xffd80020,
211 .end = 0xffd8002b, 321 .end = 0xffd8002b,
212 .flags = IORESOURCE_MEM, 322 .flags = IORESOURCE_MEM,
@@ -225,18 +335,18 @@ static struct platform_device tmu2_device = {
225 }, 335 },
226 .resource = tmu2_resources, 336 .resource = tmu2_resources,
227 .num_resources = ARRAY_SIZE(tmu2_resources), 337 .num_resources = ARRAY_SIZE(tmu2_resources),
338 .archdata = {
339 .hwblk_id = HWBLK_TMU0,
340 },
228}; 341};
229 342
230static struct sh_timer_config tmu3_platform_data = { 343static struct sh_timer_config tmu3_platform_data = {
231 .name = "TMU3",
232 .channel_offset = 0x04, 344 .channel_offset = 0x04,
233 .timer_bit = 0, 345 .timer_bit = 0,
234 .clk = "tmu1",
235}; 346};
236 347
237static struct resource tmu3_resources[] = { 348static struct resource tmu3_resources[] = {
238 [0] = { 349 [0] = {
239 .name = "TMU3",
240 .start = 0xffd90008, 350 .start = 0xffd90008,
241 .end = 0xffd90013, 351 .end = 0xffd90013,
242 .flags = IORESOURCE_MEM, 352 .flags = IORESOURCE_MEM,
@@ -255,18 +365,18 @@ static struct platform_device tmu3_device = {
255 }, 365 },
256 .resource = tmu3_resources, 366 .resource = tmu3_resources,
257 .num_resources = ARRAY_SIZE(tmu3_resources), 367 .num_resources = ARRAY_SIZE(tmu3_resources),
368 .archdata = {
369 .hwblk_id = HWBLK_TMU1,
370 },
258}; 371};
259 372
260static struct sh_timer_config tmu4_platform_data = { 373static struct sh_timer_config tmu4_platform_data = {
261 .name = "TMU4",
262 .channel_offset = 0x10, 374 .channel_offset = 0x10,
263 .timer_bit = 1, 375 .timer_bit = 1,
264 .clk = "tmu1",
265}; 376};
266 377
267static struct resource tmu4_resources[] = { 378static struct resource tmu4_resources[] = {
268 [0] = { 379 [0] = {
269 .name = "TMU4",
270 .start = 0xffd90014, 380 .start = 0xffd90014,
271 .end = 0xffd9001f, 381 .end = 0xffd9001f,
272 .flags = IORESOURCE_MEM, 382 .flags = IORESOURCE_MEM,
@@ -285,18 +395,18 @@ static struct platform_device tmu4_device = {
285 }, 395 },
286 .resource = tmu4_resources, 396 .resource = tmu4_resources,
287 .num_resources = ARRAY_SIZE(tmu4_resources), 397 .num_resources = ARRAY_SIZE(tmu4_resources),
398 .archdata = {
399 .hwblk_id = HWBLK_TMU1,
400 },
288}; 401};
289 402
290static struct sh_timer_config tmu5_platform_data = { 403static struct sh_timer_config tmu5_platform_data = {
291 .name = "TMU5",
292 .channel_offset = 0x1c, 404 .channel_offset = 0x1c,
293 .timer_bit = 2, 405 .timer_bit = 2,
294 .clk = "tmu1",
295}; 406};
296 407
297static struct resource tmu5_resources[] = { 408static struct resource tmu5_resources[] = {
298 [0] = { 409 [0] = {
299 .name = "TMU5",
300 .start = 0xffd90020, 410 .start = 0xffd90020,
301 .end = 0xffd9002b, 411 .end = 0xffd9002b,
302 .flags = IORESOURCE_MEM, 412 .flags = IORESOURCE_MEM,
@@ -315,67 +425,8 @@ static struct platform_device tmu5_device = {
315 }, 425 },
316 .resource = tmu5_resources, 426 .resource = tmu5_resources,
317 .num_resources = ARRAY_SIZE(tmu5_resources), 427 .num_resources = ARRAY_SIZE(tmu5_resources),
318}; 428 .archdata = {
319 429 .hwblk_id = HWBLK_TMU1,
320static struct plat_sci_port sci_platform_data[] = {
321 {
322 .mapbase = 0xffe00000,
323 .flags = UPF_BOOT_AUTOCONF,
324 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
325 .scbrr_algo_id = SCBRR_ALGO_2,
326 .type = PORT_SCIF,
327 .irqs = { 80, 80, 80, 80 },
328 .clk = "scif0",
329 },{
330 .mapbase = 0xffe10000,
331 .flags = UPF_BOOT_AUTOCONF,
332 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
333 .scbrr_algo_id = SCBRR_ALGO_2,
334 .type = PORT_SCIF,
335 .irqs = { 81, 81, 81, 81 },
336 .clk = "scif1",
337 },{
338 .mapbase = 0xffe20000,
339 .flags = UPF_BOOT_AUTOCONF,
340 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
341 .scbrr_algo_id = SCBRR_ALGO_2,
342 .type = PORT_SCIF,
343 .irqs = { 82, 82, 82, 82 },
344 .clk = "scif2",
345 },{
346 .mapbase = 0xa4e30000,
347 .flags = UPF_BOOT_AUTOCONF,
348 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
349 .scbrr_algo_id = SCBRR_ALGO_3,
350 .type = PORT_SCIFA,
351 .irqs = { 56, 56, 56, 56 },
352 .clk = "scif3",
353 },{
354 .mapbase = 0xa4e40000,
355 .flags = UPF_BOOT_AUTOCONF,
356 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
357 .scbrr_algo_id = SCBRR_ALGO_3,
358 .type = PORT_SCIFA,
359 .irqs = { 88, 88, 88, 88 },
360 .clk = "scif4",
361 },{
362 .mapbase = 0xa4e50000,
363 .flags = UPF_BOOT_AUTOCONF,
364 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
365 .scbrr_algo_id = SCBRR_ALGO_3,
366 .type = PORT_SCIFA,
367 .irqs = { 109, 109, 109, 109 },
368 .clk = "scif5",
369 }, {
370 .flags = 0,
371 }
372};
373
374static struct platform_device sci_device = {
375 .name = "sh-sci",
376 .id = -1,
377 .dev = {
378 .platform_data = sci_platform_data,
379 }, 430 },
380}; 431};
381 432
@@ -407,10 +458,13 @@ static struct platform_device rtc_device = {
407 .id = -1, 458 .id = -1,
408 .num_resources = ARRAY_SIZE(rtc_resources), 459 .num_resources = ARRAY_SIZE(rtc_resources),
409 .resource = rtc_resources, 460 .resource = rtc_resources,
461 .archdata = {
462 .hwblk_id = HWBLK_RTC,
463 },
410}; 464};
411 465
412static struct r8a66597_platdata r8a66597_data = { 466static struct r8a66597_platdata r8a66597_data = {
413 /* This set zero to all members */ 467 .on_chip = 1,
414}; 468};
415 469
416static struct resource sh7723_usb_host_resources[] = { 470static struct resource sh7723_usb_host_resources[] = {
@@ -436,6 +490,9 @@ static struct platform_device sh7723_usb_host_device = {
436 }, 490 },
437 .num_resources = ARRAY_SIZE(sh7723_usb_host_resources), 491 .num_resources = ARRAY_SIZE(sh7723_usb_host_resources),
438 .resource = sh7723_usb_host_resources, 492 .resource = sh7723_usb_host_resources,
493 .archdata = {
494 .hwblk_id = HWBLK_USB,
495 },
439}; 496};
440 497
441static struct resource iic_resources[] = { 498static struct resource iic_resources[] = {
@@ -457,9 +514,18 @@ static struct platform_device iic_device = {
457 .id = 0, /* "i2c0" clock */ 514 .id = 0, /* "i2c0" clock */
458 .num_resources = ARRAY_SIZE(iic_resources), 515 .num_resources = ARRAY_SIZE(iic_resources),
459 .resource = iic_resources, 516 .resource = iic_resources,
517 .archdata = {
518 .hwblk_id = HWBLK_IIC,
519 },
460}; 520};
461 521
462static struct platform_device *sh7723_devices[] __initdata = { 522static struct platform_device *sh7723_devices[] __initdata = {
523 &scif0_device,
524 &scif1_device,
525 &scif2_device,
526 &scif3_device,
527 &scif4_device,
528 &scif5_device,
463 &cmt_device, 529 &cmt_device,
464 &tmu0_device, 530 &tmu0_device,
465 &tmu1_device, 531 &tmu1_device,
@@ -467,7 +533,6 @@ static struct platform_device *sh7723_devices[] __initdata = {
467 &tmu3_device, 533 &tmu3_device,
468 &tmu4_device, 534 &tmu4_device,
469 &tmu5_device, 535 &tmu5_device,
470 &sci_device,
471 &rtc_device, 536 &rtc_device,
472 &iic_device, 537 &iic_device,
473 &sh7723_usb_host_device, 538 &sh7723_usb_host_device,
@@ -485,9 +550,15 @@ static int __init sh7723_devices_setup(void)
485 return platform_add_devices(sh7723_devices, 550 return platform_add_devices(sh7723_devices,
486 ARRAY_SIZE(sh7723_devices)); 551 ARRAY_SIZE(sh7723_devices));
487} 552}
488__initcall(sh7723_devices_setup); 553arch_initcall(sh7723_devices_setup);
489 554
490static struct platform_device *sh7723_early_devices[] __initdata = { 555static struct platform_device *sh7723_early_devices[] __initdata = {
556 &scif0_device,
557 &scif1_device,
558 &scif2_device,
559 &scif3_device,
560 &scif4_device,
561 &scif5_device,
491 &cmt_device, 562 &cmt_device,
492 &tmu0_device, 563 &tmu0_device,
493 &tmu1_device, 564 &tmu1_device,
@@ -506,14 +577,17 @@ void __init plat_early_device_setup(void)
506#define RAMCR_CACHE_L2FC 0x0002 577#define RAMCR_CACHE_L2FC 0x0002
507#define RAMCR_CACHE_L2E 0x0001 578#define RAMCR_CACHE_L2E 0x0001
508#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC) 579#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
509void __uses_jump_to_uncached l2_cache_init(void) 580
581void l2_cache_init(void)
510{ 582{
511 /* Enable L2 cache */ 583 /* Enable L2 cache */
512 ctrl_outl(L2_CACHE_ENABLE, RAMCR); 584 __raw_writel(L2_CACHE_ENABLE, RAMCR);
513} 585}
514 586
515enum { 587enum {
516 UNUSED=0, 588 UNUSED=0,
589 ENABLED,
590 DISABLED,
517 591
518 /* interrupt sources */ 592 /* interrupt sources */
519 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, 593 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -536,7 +610,6 @@ enum {
536 SCIFA_SCIFA1, 610 SCIFA_SCIFA1,
537 FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I, 611 FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I,
538 I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI, 612 I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI,
539 SDHI0_SDHII0,SDHI0_SDHII1,SDHI0_SDHII2,
540 CMT_CMTI, 613 CMT_CMTI,
541 TSIF_TSIFI, 614 TSIF_TSIFI,
542 SIU_SIUI, 615 SIU_SIUI,
@@ -544,7 +617,6 @@ enum {
544 TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, 617 TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2,
545 IRDA_IRDAI, 618 IRDA_IRDAI,
546 ATAPI_ATAPII, 619 ATAPI_ATAPII,
547 SDHI1_SDHII0,SDHI1_SDHII1,SDHI1_SDHII2,
548 VEU2H1_VEU2HI, 620 VEU2H1_VEU2HI,
549 LCDC_LCDCI, 621 LCDC_LCDCI,
550 TMU1_TUNI0,TMU1_TUNI1,TMU1_TUNI2, 622 TMU1_TUNI0,TMU1_TUNI1,TMU1_TUNI2,
@@ -615,9 +687,9 @@ static struct intc_vect vectors[] __initdata = {
615 INTC_VECT(I2C_WAITI,0xE40), 687 INTC_VECT(I2C_WAITI,0xE40),
616 INTC_VECT(I2C_DTEI,0xE60), 688 INTC_VECT(I2C_DTEI,0xE60),
617 689
618 INTC_VECT(SDHI0_SDHII0,0xE80), 690 INTC_VECT(SDHI0, 0xE80),
619 INTC_VECT(SDHI0_SDHII1,0xEA0), 691 INTC_VECT(SDHI0, 0xEA0),
620 INTC_VECT(SDHI0_SDHII2,0xEC0), 692 INTC_VECT(SDHI0, 0xEC0),
621 693
622 INTC_VECT(CMT_CMTI,0xF00), 694 INTC_VECT(CMT_CMTI,0xF00),
623 INTC_VECT(TSIF_TSIFI,0xF20), 695 INTC_VECT(TSIF_TSIFI,0xF20),
@@ -631,9 +703,9 @@ static struct intc_vect vectors[] __initdata = {
631 INTC_VECT(IRDA_IRDAI,0x480), 703 INTC_VECT(IRDA_IRDAI,0x480),
632 INTC_VECT(ATAPI_ATAPII,0x4A0), 704 INTC_VECT(ATAPI_ATAPII,0x4A0),
633 705
634 INTC_VECT(SDHI1_SDHII0,0x4E0), 706 INTC_VECT(SDHI1, 0x4E0),
635 INTC_VECT(SDHI1_SDHII1,0x500), 707 INTC_VECT(SDHI1, 0x500),
636 INTC_VECT(SDHI1_SDHII2,0x520), 708 INTC_VECT(SDHI1, 0x520),
637 709
638 INTC_VECT(VEU2H1_VEU2HI,0x560), 710 INTC_VECT(VEU2H1_VEU2HI,0x560),
639 INTC_VECT(LCDC_LCDCI,0x580), 711 INTC_VECT(LCDC_LCDCI,0x580),
@@ -652,15 +724,14 @@ static struct intc_group groups[] __initdata = {
652 INTC_GROUP(FLCTL,FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I), 724 INTC_GROUP(FLCTL,FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I),
653 INTC_GROUP(I2C,I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI), 725 INTC_GROUP(I2C,I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI),
654 INTC_GROUP(_2DG, _2DG_TRI,_2DG_INI,_2DG_CEI), 726 INTC_GROUP(_2DG, _2DG_TRI,_2DG_INI,_2DG_CEI),
655 INTC_GROUP(SDHI1, SDHI1_SDHII0,SDHI1_SDHII1,SDHI1_SDHII2),
656 INTC_GROUP(RTC, RTC_ATI,RTC_PRI,RTC_CUI), 727 INTC_GROUP(RTC, RTC_ATI,RTC_PRI,RTC_CUI),
657 INTC_GROUP(DMAC1B, DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR), 728 INTC_GROUP(DMAC1B, DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR),
658 INTC_GROUP(SDHI0,SDHI0_SDHII0,SDHI0_SDHII1,SDHI0_SDHII2),
659}; 729};
660 730
661static struct intc_mask_reg mask_registers[] __initdata = { 731static struct intc_mask_reg mask_registers[] __initdata = {
662 { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ 732 { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
663 { 0, TMU1_TUNI2,TMU1_TUNI1,TMU1_TUNI0,0,SDHI1_SDHII2,SDHI1_SDHII1,SDHI1_SDHII0} }, 733 { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
734 0, DISABLED, ENABLED, ENABLED } },
664 { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ 735 { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
665 { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } }, 736 { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } },
666 { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ 737 { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
@@ -677,7 +748,8 @@ static struct intc_mask_reg mask_registers[] __initdata = {
677 { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, 748 { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
678 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, 749 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
679 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ 750 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
680 { 0,SDHI0_SDHII2,SDHI0_SDHII1,SDHI0_SDHII0,0,0,SCIFA_SCIFA2,SIU_SIUI } }, 751 { 0, DISABLED, ENABLED, ENABLED,
752 0, 0, SCIFA_SCIFA2, SIU_SIUI } },
681 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ 753 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
682 { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } }, 754 { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } },
683 { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ 755 { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -717,9 +789,13 @@ static struct intc_mask_reg ack_registers[] __initdata = {
717 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, 789 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
718}; 790};
719 791
720static DECLARE_INTC_DESC_ACK(intc_desc, "sh7723", vectors, groups, 792static struct intc_desc intc_desc __initdata = {
721 mask_registers, prio_registers, sense_registers, 793 .name = "sh7723",
722 ack_registers); 794 .force_enable = ENABLED,
795 .force_disable = DISABLED,
796 .hw = INTC_HW_DESC(vectors, groups, mask_registers,
797 prio_registers, sense_registers, ack_registers),
798};
723 799
724void __init plat_irq_setup(void) 800void __init plat_irq_setup(void)
725{ 801{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index c934b78e5658..4a8b751c5f17 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -18,71 +18,341 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/serial_sci.h> 19#include <linux/serial_sci.h>
20#include <linux/uio_driver.h> 20#include <linux/uio_driver.h>
21#include <linux/sh_dma.h>
21#include <linux/sh_timer.h> 22#include <linux/sh_timer.h>
22#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/notifier.h>
25
26#include <asm/suspend.h>
23#include <asm/clock.h> 27#include <asm/clock.h>
24#include <asm/mmzone.h> 28#include <asm/mmzone.h>
25 29
26/* Serial */ 30#include <cpu/dma-register.h>
27static struct plat_sci_port sci_platform_data[] = { 31#include <cpu/sh7724.h>
32
33/* DMA */
34static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = {
28 { 35 {
29 .mapbase = 0xffe00000, 36 .slave_id = SHDMA_SLAVE_SCIF0_TX,
30 .flags = UPF_BOOT_AUTOCONF, 37 .addr = 0xffe0000c,
31 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 38 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
32 .scbrr_algo_id = SCBRR_ALGO_2, 39 .mid_rid = 0x21,
33 .type = PORT_SCIF, 40 }, {
34 .irqs = { 80, 80, 80, 80 }, 41 .slave_id = SHDMA_SLAVE_SCIF0_RX,
35 .clk = "scif0", 42 .addr = 0xffe00014,
43 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
44 .mid_rid = 0x22,
45 }, {
46 .slave_id = SHDMA_SLAVE_SCIF1_TX,
47 .addr = 0xffe1000c,
48 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
49 .mid_rid = 0x25,
50 }, {
51 .slave_id = SHDMA_SLAVE_SCIF1_RX,
52 .addr = 0xffe10014,
53 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
54 .mid_rid = 0x26,
55 }, {
56 .slave_id = SHDMA_SLAVE_SCIF2_TX,
57 .addr = 0xffe2000c,
58 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
59 .mid_rid = 0x29,
36 }, { 60 }, {
37 .mapbase = 0xffe10000, 61 .slave_id = SHDMA_SLAVE_SCIF2_RX,
38 .flags = UPF_BOOT_AUTOCONF, 62 .addr = 0xffe20014,
39 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 63 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
40 .scbrr_algo_id = SCBRR_ALGO_2, 64 .mid_rid = 0x2a,
41 .type = PORT_SCIF,
42 .irqs = { 81, 81, 81, 81 },
43 .clk = "scif1",
44 }, { 65 }, {
45 .mapbase = 0xffe20000, 66 .slave_id = SHDMA_SLAVE_SCIF3_TX,
46 .flags = UPF_BOOT_AUTOCONF, 67 .addr = 0xa4e30020,
47 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 68 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
48 .scbrr_algo_id = SCBRR_ALGO_2, 69 .mid_rid = 0x2d,
49 .type = PORT_SCIF,
50 .irqs = { 82, 82, 82, 82 },
51 .clk = "scif2",
52 }, { 70 }, {
53 .mapbase = 0xa4e30000, 71 .slave_id = SHDMA_SLAVE_SCIF3_RX,
54 .flags = UPF_BOOT_AUTOCONF, 72 .addr = 0xa4e30024,
55 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 73 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
56 .scbrr_algo_id = SCBRR_ALGO_3, 74 .mid_rid = 0x2e,
57 .type = PORT_SCIFA,
58 .irqs = { 56, 56, 56, 56 },
59 .clk = "scif3",
60 }, { 75 }, {
61 .mapbase = 0xa4e40000, 76 .slave_id = SHDMA_SLAVE_SCIF4_TX,
62 .flags = UPF_BOOT_AUTOCONF, 77 .addr = 0xa4e40020,
63 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 78 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
64 .scbrr_algo_id = SCBRR_ALGO_3, 79 .mid_rid = 0x31,
65 .type = PORT_SCIFA,
66 .irqs = { 88, 88, 88, 88 },
67 .clk = "scif4",
68 }, { 80 }, {
69 .mapbase = 0xa4e50000, 81 .slave_id = SHDMA_SLAVE_SCIF4_RX,
70 .flags = UPF_BOOT_AUTOCONF, 82 .addr = 0xa4e40024,
71 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 83 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
72 .scbrr_algo_id = SCBRR_ALGO_3, 84 .mid_rid = 0x32,
73 .type = PORT_SCIFA,
74 .irqs = { 109, 109, 109, 109 },
75 .clk = "scif5",
76 }, { 85 }, {
77 .flags = 0, 86 .slave_id = SHDMA_SLAVE_SCIF5_TX,
87 .addr = 0xa4e50020,
88 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
89 .mid_rid = 0x35,
90 }, {
91 .slave_id = SHDMA_SLAVE_SCIF5_RX,
92 .addr = 0xa4e50024,
93 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
94 .mid_rid = 0x36,
95 }, {
96 .slave_id = SHDMA_SLAVE_SDHI0_TX,
97 .addr = 0x04ce0030,
98 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
99 .mid_rid = 0xc1,
100 }, {
101 .slave_id = SHDMA_SLAVE_SDHI0_RX,
102 .addr = 0x04ce0030,
103 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
104 .mid_rid = 0xc2,
105 }, {
106 .slave_id = SHDMA_SLAVE_SDHI1_TX,
107 .addr = 0x04cf0030,
108 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
109 .mid_rid = 0xc9,
110 }, {
111 .slave_id = SHDMA_SLAVE_SDHI1_RX,
112 .addr = 0x04cf0030,
113 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
114 .mid_rid = 0xca,
115 },
116};
117
118static const struct sh_dmae_channel sh7724_dmae_channels[] = {
119 {
120 .offset = 0,
121 .dmars = 0,
122 .dmars_bit = 0,
123 }, {
124 .offset = 0x10,
125 .dmars = 0,
126 .dmars_bit = 8,
127 }, {
128 .offset = 0x20,
129 .dmars = 4,
130 .dmars_bit = 0,
131 }, {
132 .offset = 0x30,
133 .dmars = 4,
134 .dmars_bit = 8,
135 }, {
136 .offset = 0x50,
137 .dmars = 8,
138 .dmars_bit = 0,
139 }, {
140 .offset = 0x60,
141 .dmars = 8,
142 .dmars_bit = 8,
78 } 143 }
79}; 144};
80 145
81static struct platform_device sci_device = { 146static const unsigned int ts_shift[] = TS_SHIFT;
147
148static struct sh_dmae_pdata dma_platform_data = {
149 .slave = sh7724_dmae_slaves,
150 .slave_num = ARRAY_SIZE(sh7724_dmae_slaves),
151 .channel = sh7724_dmae_channels,
152 .channel_num = ARRAY_SIZE(sh7724_dmae_channels),
153 .ts_low_shift = CHCR_TS_LOW_SHIFT,
154 .ts_low_mask = CHCR_TS_LOW_MASK,
155 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
156 .ts_high_mask = CHCR_TS_HIGH_MASK,
157 .ts_shift = ts_shift,
158 .ts_shift_num = ARRAY_SIZE(ts_shift),
159 .dmaor_init = DMAOR_INIT,
160};
161
162/* Resource order important! */
163static struct resource sh7724_dmae0_resources[] = {
164 {
165 /* Channel registers and DMAOR */
166 .start = 0xfe008020,
167 .end = 0xfe00808f,
168 .flags = IORESOURCE_MEM,
169 },
170 {
171 /* DMARSx */
172 .start = 0xfe009000,
173 .end = 0xfe00900b,
174 .flags = IORESOURCE_MEM,
175 },
176 {
177 /* DMA error IRQ */
178 .start = 78,
179 .end = 78,
180 .flags = IORESOURCE_IRQ,
181 },
182 {
183 /* IRQ for channels 0-3 */
184 .start = 48,
185 .end = 51,
186 .flags = IORESOURCE_IRQ,
187 },
188 {
189 /* IRQ for channels 4-5 */
190 .start = 76,
191 .end = 77,
192 .flags = IORESOURCE_IRQ,
193 },
194};
195
196/* Resource order important! */
197static struct resource sh7724_dmae1_resources[] = {
198 {
199 /* Channel registers and DMAOR */
200 .start = 0xfdc08020,
201 .end = 0xfdc0808f,
202 .flags = IORESOURCE_MEM,
203 },
204 {
205 /* DMARSx */
206 .start = 0xfdc09000,
207 .end = 0xfdc0900b,
208 .flags = IORESOURCE_MEM,
209 },
210 {
211 /* DMA error IRQ */
212 .start = 74,
213 .end = 74,
214 .flags = IORESOURCE_IRQ,
215 },
216 {
217 /* IRQ for channels 0-3 */
218 .start = 40,
219 .end = 43,
220 .flags = IORESOURCE_IRQ,
221 },
222 {
223 /* IRQ for channels 4-5 */
224 .start = 72,
225 .end = 73,
226 .flags = IORESOURCE_IRQ,
227 },
228};
229
230static struct platform_device dma0_device = {
231 .name = "sh-dma-engine",
232 .id = 0,
233 .resource = sh7724_dmae0_resources,
234 .num_resources = ARRAY_SIZE(sh7724_dmae0_resources),
235 .dev = {
236 .platform_data = &dma_platform_data,
237 },
238 .archdata = {
239 .hwblk_id = HWBLK_DMAC0,
240 },
241};
242
243static struct platform_device dma1_device = {
244 .name = "sh-dma-engine",
245 .id = 1,
246 .resource = sh7724_dmae1_resources,
247 .num_resources = ARRAY_SIZE(sh7724_dmae1_resources),
248 .dev = {
249 .platform_data = &dma_platform_data,
250 },
251 .archdata = {
252 .hwblk_id = HWBLK_DMAC1,
253 },
254};
255
256/* Serial */
257static struct plat_sci_port scif0_platform_data = {
258 .mapbase = 0xffe00000,
259 .flags = UPF_BOOT_AUTOCONF,
260 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
261 .scbrr_algo_id = SCBRR_ALGO_2,
262 .type = PORT_SCIF,
263 .irqs = { 80, 80, 80, 80 },
264};
265
266static struct platform_device scif0_device = {
82 .name = "sh-sci", 267 .name = "sh-sci",
83 .id = -1, 268 .id = 0,
269 .dev = {
270 .platform_data = &scif0_platform_data,
271 },
272};
273
274static struct plat_sci_port scif1_platform_data = {
275 .mapbase = 0xffe10000,
276 .flags = UPF_BOOT_AUTOCONF,
277 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
278 .scbrr_algo_id = SCBRR_ALGO_2,
279 .type = PORT_SCIF,
280 .irqs = { 81, 81, 81, 81 },
281};
282
283static struct platform_device scif1_device = {
284 .name = "sh-sci",
285 .id = 1,
286 .dev = {
287 .platform_data = &scif1_platform_data,
288 },
289};
290
291static struct plat_sci_port scif2_platform_data = {
292 .mapbase = 0xffe20000,
293 .flags = UPF_BOOT_AUTOCONF,
294 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
295 .scbrr_algo_id = SCBRR_ALGO_2,
296 .type = PORT_SCIF,
297 .irqs = { 82, 82, 82, 82 },
298};
299
300static struct platform_device scif2_device = {
301 .name = "sh-sci",
302 .id = 2,
84 .dev = { 303 .dev = {
85 .platform_data = sci_platform_data, 304 .platform_data = &scif2_platform_data,
305 },
306};
307
308static struct plat_sci_port scif3_platform_data = {
309 .mapbase = 0xa4e30000,
310 .flags = UPF_BOOT_AUTOCONF,
311 .scscr = SCSCR_RE | SCSCR_TE,
312 .scbrr_algo_id = SCBRR_ALGO_3,
313 .type = PORT_SCIFA,
314 .irqs = { 56, 56, 56, 56 },
315};
316
317static struct platform_device scif3_device = {
318 .name = "sh-sci",
319 .id = 3,
320 .dev = {
321 .platform_data = &scif3_platform_data,
322 },
323};
324
325static struct plat_sci_port scif4_platform_data = {
326 .mapbase = 0xa4e40000,
327 .flags = UPF_BOOT_AUTOCONF,
328 .scscr = SCSCR_RE | SCSCR_TE,
329 .scbrr_algo_id = SCBRR_ALGO_3,
330 .type = PORT_SCIFA,
331 .irqs = { 88, 88, 88, 88 },
332};
333
334static struct platform_device scif4_device = {
335 .name = "sh-sci",
336 .id = 4,
337 .dev = {
338 .platform_data = &scif4_platform_data,
339 },
340};
341
342static struct plat_sci_port scif5_platform_data = {
343 .mapbase = 0xa4e50000,
344 .flags = UPF_BOOT_AUTOCONF,
345 .scscr = SCSCR_RE | SCSCR_TE,
346 .scbrr_algo_id = SCBRR_ALGO_3,
347 .type = PORT_SCIFA,
348 .irqs = { 109, 109, 109, 109 },
349};
350
351static struct platform_device scif5_device = {
352 .name = "sh-sci",
353 .id = 5,
354 .dev = {
355 .platform_data = &scif5_platform_data,
86 }, 356 },
87}; 357};
88 358
@@ -115,6 +385,9 @@ static struct platform_device rtc_device = {
115 .id = -1, 385 .id = -1,
116 .num_resources = ARRAY_SIZE(rtc_resources), 386 .num_resources = ARRAY_SIZE(rtc_resources),
117 .resource = rtc_resources, 387 .resource = rtc_resources,
388 .archdata = {
389 .hwblk_id = HWBLK_RTC,
390 },
118}; 391};
119 392
120/* I2C0 */ 393/* I2C0 */
@@ -137,6 +410,9 @@ static struct platform_device iic0_device = {
137 .id = 0, /* "i2c0" clock */ 410 .id = 0, /* "i2c0" clock */
138 .num_resources = ARRAY_SIZE(iic0_resources), 411 .num_resources = ARRAY_SIZE(iic0_resources),
139 .resource = iic0_resources, 412 .resource = iic0_resources,
413 .archdata = {
414 .hwblk_id = HWBLK_IIC0,
415 },
140}; 416};
141 417
142/* I2C1 */ 418/* I2C1 */
@@ -159,6 +435,9 @@ static struct platform_device iic1_device = {
159 .id = 1, /* "i2c1" clock */ 435 .id = 1, /* "i2c1" clock */
160 .num_resources = ARRAY_SIZE(iic1_resources), 436 .num_resources = ARRAY_SIZE(iic1_resources),
161 .resource = iic1_resources, 437 .resource = iic1_resources,
438 .archdata = {
439 .hwblk_id = HWBLK_IIC1,
440 },
162}; 441};
163 442
164/* VPU */ 443/* VPU */
@@ -188,6 +467,9 @@ static struct platform_device vpu_device = {
188 }, 467 },
189 .resource = vpu_resources, 468 .resource = vpu_resources,
190 .num_resources = ARRAY_SIZE(vpu_resources), 469 .num_resources = ARRAY_SIZE(vpu_resources),
470 .archdata = {
471 .hwblk_id = HWBLK_VPU,
472 },
191}; 473};
192 474
193/* VEU0 */ 475/* VEU0 */
@@ -201,7 +483,7 @@ static struct resource veu0_resources[] = {
201 [0] = { 483 [0] = {
202 .name = "VEU3F0", 484 .name = "VEU3F0",
203 .start = 0xfe920000, 485 .start = 0xfe920000,
204 .end = 0xfe9200cb - 1, 486 .end = 0xfe9200cb,
205 .flags = IORESOURCE_MEM, 487 .flags = IORESOURCE_MEM,
206 }, 488 },
207 [1] = { 489 [1] = {
@@ -217,6 +499,9 @@ static struct platform_device veu0_device = {
217 }, 499 },
218 .resource = veu0_resources, 500 .resource = veu0_resources,
219 .num_resources = ARRAY_SIZE(veu0_resources), 501 .num_resources = ARRAY_SIZE(veu0_resources),
502 .archdata = {
503 .hwblk_id = HWBLK_VEU0,
504 },
220}; 505};
221 506
222/* VEU1 */ 507/* VEU1 */
@@ -230,7 +515,7 @@ static struct resource veu1_resources[] = {
230 [0] = { 515 [0] = {
231 .name = "VEU3F1", 516 .name = "VEU3F1",
232 .start = 0xfe924000, 517 .start = 0xfe924000,
233 .end = 0xfe9240cb - 1, 518 .end = 0xfe9240cb,
234 .flags = IORESOURCE_MEM, 519 .flags = IORESOURCE_MEM,
235 }, 520 },
236 [1] = { 521 [1] = {
@@ -246,20 +531,84 @@ static struct platform_device veu1_device = {
246 }, 531 },
247 .resource = veu1_resources, 532 .resource = veu1_resources,
248 .num_resources = ARRAY_SIZE(veu1_resources), 533 .num_resources = ARRAY_SIZE(veu1_resources),
534 .archdata = {
535 .hwblk_id = HWBLK_VEU1,
536 },
537};
538
539/* BEU0 */
540static struct uio_info beu0_platform_data = {
541 .name = "BEU0",
542 .version = "0",
543 .irq = evt2irq(0x8A0),
544};
545
546static struct resource beu0_resources[] = {
547 [0] = {
548 .name = "BEU0",
549 .start = 0xfe930000,
550 .end = 0xfe933400,
551 .flags = IORESOURCE_MEM,
552 },
553 [1] = {
554 /* place holder for contiguous memory */
555 },
556};
557
558static struct platform_device beu0_device = {
559 .name = "uio_pdrv_genirq",
560 .id = 6,
561 .dev = {
562 .platform_data = &beu0_platform_data,
563 },
564 .resource = beu0_resources,
565 .num_resources = ARRAY_SIZE(beu0_resources),
566 .archdata = {
567 .hwblk_id = HWBLK_BEU0,
568 },
569};
570
571/* BEU1 */
572static struct uio_info beu1_platform_data = {
573 .name = "BEU1",
574 .version = "0",
575 .irq = evt2irq(0xA00),
576};
577
578static struct resource beu1_resources[] = {
579 [0] = {
580 .name = "BEU1",
581 .start = 0xfe940000,
582 .end = 0xfe943400,
583 .flags = IORESOURCE_MEM,
584 },
585 [1] = {
586 /* place holder for contiguous memory */
587 },
588};
589
590static struct platform_device beu1_device = {
591 .name = "uio_pdrv_genirq",
592 .id = 7,
593 .dev = {
594 .platform_data = &beu1_platform_data,
595 },
596 .resource = beu1_resources,
597 .num_resources = ARRAY_SIZE(beu1_resources),
598 .archdata = {
599 .hwblk_id = HWBLK_BEU1,
600 },
249}; 601};
250 602
251static struct sh_timer_config cmt_platform_data = { 603static struct sh_timer_config cmt_platform_data = {
252 .name = "CMT",
253 .channel_offset = 0x60, 604 .channel_offset = 0x60,
254 .timer_bit = 5, 605 .timer_bit = 5,
255 .clk = "cmt0",
256 .clockevent_rating = 125, 606 .clockevent_rating = 125,
257 .clocksource_rating = 200, 607 .clocksource_rating = 200,
258}; 608};
259 609
260static struct resource cmt_resources[] = { 610static struct resource cmt_resources[] = {
261 [0] = { 611 [0] = {
262 .name = "CMT",
263 .start = 0x044a0060, 612 .start = 0x044a0060,
264 .end = 0x044a006b, 613 .end = 0x044a006b,
265 .flags = IORESOURCE_MEM, 614 .flags = IORESOURCE_MEM,
@@ -278,19 +627,19 @@ static struct platform_device cmt_device = {
278 }, 627 },
279 .resource = cmt_resources, 628 .resource = cmt_resources,
280 .num_resources = ARRAY_SIZE(cmt_resources), 629 .num_resources = ARRAY_SIZE(cmt_resources),
630 .archdata = {
631 .hwblk_id = HWBLK_CMT,
632 },
281}; 633};
282 634
283static struct sh_timer_config tmu0_platform_data = { 635static struct sh_timer_config tmu0_platform_data = {
284 .name = "TMU0",
285 .channel_offset = 0x04, 636 .channel_offset = 0x04,
286 .timer_bit = 0, 637 .timer_bit = 0,
287 .clk = "tmu0",
288 .clockevent_rating = 200, 638 .clockevent_rating = 200,
289}; 639};
290 640
291static struct resource tmu0_resources[] = { 641static struct resource tmu0_resources[] = {
292 [0] = { 642 [0] = {
293 .name = "TMU0",
294 .start = 0xffd80008, 643 .start = 0xffd80008,
295 .end = 0xffd80013, 644 .end = 0xffd80013,
296 .flags = IORESOURCE_MEM, 645 .flags = IORESOURCE_MEM,
@@ -309,19 +658,19 @@ static struct platform_device tmu0_device = {
309 }, 658 },
310 .resource = tmu0_resources, 659 .resource = tmu0_resources,
311 .num_resources = ARRAY_SIZE(tmu0_resources), 660 .num_resources = ARRAY_SIZE(tmu0_resources),
661 .archdata = {
662 .hwblk_id = HWBLK_TMU0,
663 },
312}; 664};
313 665
314static struct sh_timer_config tmu1_platform_data = { 666static struct sh_timer_config tmu1_platform_data = {
315 .name = "TMU1",
316 .channel_offset = 0x10, 667 .channel_offset = 0x10,
317 .timer_bit = 1, 668 .timer_bit = 1,
318 .clk = "tmu0",
319 .clocksource_rating = 200, 669 .clocksource_rating = 200,
320}; 670};
321 671
322static struct resource tmu1_resources[] = { 672static struct resource tmu1_resources[] = {
323 [0] = { 673 [0] = {
324 .name = "TMU1",
325 .start = 0xffd80014, 674 .start = 0xffd80014,
326 .end = 0xffd8001f, 675 .end = 0xffd8001f,
327 .flags = IORESOURCE_MEM, 676 .flags = IORESOURCE_MEM,
@@ -340,18 +689,18 @@ static struct platform_device tmu1_device = {
340 }, 689 },
341 .resource = tmu1_resources, 690 .resource = tmu1_resources,
342 .num_resources = ARRAY_SIZE(tmu1_resources), 691 .num_resources = ARRAY_SIZE(tmu1_resources),
692 .archdata = {
693 .hwblk_id = HWBLK_TMU0,
694 },
343}; 695};
344 696
345static struct sh_timer_config tmu2_platform_data = { 697static struct sh_timer_config tmu2_platform_data = {
346 .name = "TMU2",
347 .channel_offset = 0x1c, 698 .channel_offset = 0x1c,
348 .timer_bit = 2, 699 .timer_bit = 2,
349 .clk = "tmu0",
350}; 700};
351 701
352static struct resource tmu2_resources[] = { 702static struct resource tmu2_resources[] = {
353 [0] = { 703 [0] = {
354 .name = "TMU2",
355 .start = 0xffd80020, 704 .start = 0xffd80020,
356 .end = 0xffd8002b, 705 .end = 0xffd8002b,
357 .flags = IORESOURCE_MEM, 706 .flags = IORESOURCE_MEM,
@@ -370,19 +719,19 @@ static struct platform_device tmu2_device = {
370 }, 719 },
371 .resource = tmu2_resources, 720 .resource = tmu2_resources,
372 .num_resources = ARRAY_SIZE(tmu2_resources), 721 .num_resources = ARRAY_SIZE(tmu2_resources),
722 .archdata = {
723 .hwblk_id = HWBLK_TMU0,
724 },
373}; 725};
374 726
375 727
376static struct sh_timer_config tmu3_platform_data = { 728static struct sh_timer_config tmu3_platform_data = {
377 .name = "TMU3",
378 .channel_offset = 0x04, 729 .channel_offset = 0x04,
379 .timer_bit = 0, 730 .timer_bit = 0,
380 .clk = "tmu1",
381}; 731};
382 732
383static struct resource tmu3_resources[] = { 733static struct resource tmu3_resources[] = {
384 [0] = { 734 [0] = {
385 .name = "TMU3",
386 .start = 0xffd90008, 735 .start = 0xffd90008,
387 .end = 0xffd90013, 736 .end = 0xffd90013,
388 .flags = IORESOURCE_MEM, 737 .flags = IORESOURCE_MEM,
@@ -401,18 +750,18 @@ static struct platform_device tmu3_device = {
401 }, 750 },
402 .resource = tmu3_resources, 751 .resource = tmu3_resources,
403 .num_resources = ARRAY_SIZE(tmu3_resources), 752 .num_resources = ARRAY_SIZE(tmu3_resources),
753 .archdata = {
754 .hwblk_id = HWBLK_TMU1,
755 },
404}; 756};
405 757
406static struct sh_timer_config tmu4_platform_data = { 758static struct sh_timer_config tmu4_platform_data = {
407 .name = "TMU4",
408 .channel_offset = 0x10, 759 .channel_offset = 0x10,
409 .timer_bit = 1, 760 .timer_bit = 1,
410 .clk = "tmu1",
411}; 761};
412 762
413static struct resource tmu4_resources[] = { 763static struct resource tmu4_resources[] = {
414 [0] = { 764 [0] = {
415 .name = "TMU4",
416 .start = 0xffd90014, 765 .start = 0xffd90014,
417 .end = 0xffd9001f, 766 .end = 0xffd9001f,
418 .flags = IORESOURCE_MEM, 767 .flags = IORESOURCE_MEM,
@@ -431,18 +780,18 @@ static struct platform_device tmu4_device = {
431 }, 780 },
432 .resource = tmu4_resources, 781 .resource = tmu4_resources,
433 .num_resources = ARRAY_SIZE(tmu4_resources), 782 .num_resources = ARRAY_SIZE(tmu4_resources),
783 .archdata = {
784 .hwblk_id = HWBLK_TMU1,
785 },
434}; 786};
435 787
436static struct sh_timer_config tmu5_platform_data = { 788static struct sh_timer_config tmu5_platform_data = {
437 .name = "TMU5",
438 .channel_offset = 0x1c, 789 .channel_offset = 0x1c,
439 .timer_bit = 2, 790 .timer_bit = 2,
440 .clk = "tmu1",
441}; 791};
442 792
443static struct resource tmu5_resources[] = { 793static struct resource tmu5_resources[] = {
444 [0] = { 794 [0] = {
445 .name = "TMU5",
446 .start = 0xffd90020, 795 .start = 0xffd90020,
447 .end = 0xffd9002b, 796 .end = 0xffd9002b,
448 .flags = IORESOURCE_MEM, 797 .flags = IORESOURCE_MEM,
@@ -461,6 +810,9 @@ static struct platform_device tmu5_device = {
461 }, 810 },
462 .resource = tmu5_resources, 811 .resource = tmu5_resources,
463 .num_resources = ARRAY_SIZE(tmu5_resources), 812 .num_resources = ARRAY_SIZE(tmu5_resources),
813 .archdata = {
814 .hwblk_id = HWBLK_TMU1,
815 },
464}; 816};
465 817
466/* JPU */ 818/* JPU */
@@ -490,9 +842,82 @@ static struct platform_device jpu_device = {
490 }, 842 },
491 .resource = jpu_resources, 843 .resource = jpu_resources,
492 .num_resources = ARRAY_SIZE(jpu_resources), 844 .num_resources = ARRAY_SIZE(jpu_resources),
845 .archdata = {
846 .hwblk_id = HWBLK_JPU,
847 },
848};
849
850/* SPU2DSP0 */
851static struct uio_info spu0_platform_data = {
852 .name = "SPU2DSP0",
853 .version = "0",
854 .irq = 86,
855};
856
857static struct resource spu0_resources[] = {
858 [0] = {
859 .name = "SPU2DSP0",
860 .start = 0xFE200000,
861 .end = 0xFE2FFFFF,
862 .flags = IORESOURCE_MEM,
863 },
864 [1] = {
865 /* place holder for contiguous memory */
866 },
867};
868
869static struct platform_device spu0_device = {
870 .name = "uio_pdrv_genirq",
871 .id = 4,
872 .dev = {
873 .platform_data = &spu0_platform_data,
874 },
875 .resource = spu0_resources,
876 .num_resources = ARRAY_SIZE(spu0_resources),
877 .archdata = {
878 .hwblk_id = HWBLK_SPU,
879 },
880};
881
882/* SPU2DSP1 */
883static struct uio_info spu1_platform_data = {
884 .name = "SPU2DSP1",
885 .version = "0",
886 .irq = 87,
887};
888
889static struct resource spu1_resources[] = {
890 [0] = {
891 .name = "SPU2DSP1",
892 .start = 0xFE300000,
893 .end = 0xFE3FFFFF,
894 .flags = IORESOURCE_MEM,
895 },
896 [1] = {
897 /* place holder for contiguous memory */
898 },
899};
900
901static struct platform_device spu1_device = {
902 .name = "uio_pdrv_genirq",
903 .id = 5,
904 .dev = {
905 .platform_data = &spu1_platform_data,
906 },
907 .resource = spu1_resources,
908 .num_resources = ARRAY_SIZE(spu1_resources),
909 .archdata = {
910 .hwblk_id = HWBLK_SPU,
911 },
493}; 912};
494 913
495static struct platform_device *sh7724_devices[] __initdata = { 914static struct platform_device *sh7724_devices[] __initdata = {
915 &scif0_device,
916 &scif1_device,
917 &scif2_device,
918 &scif3_device,
919 &scif4_device,
920 &scif5_device,
496 &cmt_device, 921 &cmt_device,
497 &tmu0_device, 922 &tmu0_device,
498 &tmu1_device, 923 &tmu1_device,
@@ -500,14 +925,19 @@ static struct platform_device *sh7724_devices[] __initdata = {
500 &tmu3_device, 925 &tmu3_device,
501 &tmu4_device, 926 &tmu4_device,
502 &tmu5_device, 927 &tmu5_device,
503 &sci_device, 928 &dma0_device,
929 &dma1_device,
504 &rtc_device, 930 &rtc_device,
505 &iic0_device, 931 &iic0_device,
506 &iic1_device, 932 &iic1_device,
507 &vpu_device, 933 &vpu_device,
508 &veu0_device, 934 &veu0_device,
509 &veu1_device, 935 &veu1_device,
936 &beu0_device,
937 &beu1_device,
510 &jpu_device, 938 &jpu_device,
939 &spu0_device,
940 &spu1_device,
511}; 941};
512 942
513static int __init sh7724_devices_setup(void) 943static int __init sh7724_devices_setup(void)
@@ -516,13 +946,21 @@ static int __init sh7724_devices_setup(void)
516 platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); 946 platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
517 platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); 947 platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
518 platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); 948 platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20);
949 platform_resource_setup_memory(&spu0_device, "spu0", 2 << 20);
950 platform_resource_setup_memory(&spu1_device, "spu1", 2 << 20);
519 951
520 return platform_add_devices(sh7724_devices, 952 return platform_add_devices(sh7724_devices,
521 ARRAY_SIZE(sh7724_devices)); 953 ARRAY_SIZE(sh7724_devices));
522} 954}
523device_initcall(sh7724_devices_setup); 955arch_initcall(sh7724_devices_setup);
524 956
525static struct platform_device *sh7724_early_devices[] __initdata = { 957static struct platform_device *sh7724_early_devices[] __initdata = {
958 &scif0_device,
959 &scif1_device,
960 &scif2_device,
961 &scif3_device,
962 &scif4_device,
963 &scif5_device,
526 &cmt_device, 964 &cmt_device,
527 &tmu0_device, 965 &tmu0_device,
528 &tmu1_device, 966 &tmu1_device,
@@ -541,14 +979,17 @@ void __init plat_early_device_setup(void)
541#define RAMCR_CACHE_L2FC 0x0002 979#define RAMCR_CACHE_L2FC 0x0002
542#define RAMCR_CACHE_L2E 0x0001 980#define RAMCR_CACHE_L2E 0x0001
543#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC) 981#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
544void __uses_jump_to_uncached l2_cache_init(void) 982
983void l2_cache_init(void)
545{ 984{
546 /* Enable L2 cache */ 985 /* Enable L2 cache */
547 ctrl_outl(L2_CACHE_ENABLE, RAMCR); 986 __raw_writel(L2_CACHE_ENABLE, RAMCR);
548} 987}
549 988
550enum { 989enum {
551 UNUSED = 0, 990 UNUSED = 0,
991 ENABLED,
992 DISABLED,
552 993
553 /* interrupt sources */ 994 /* interrupt sources */
554 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, 995 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -577,14 +1018,12 @@ enum {
577 ETHI, 1018 ETHI,
578 I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI, 1019 I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
579 I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI, 1020 I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
580 SDHI0_SDHII0, SDHI0_SDHII1, SDHI0_SDHII2, SDHI0_SDHII3,
581 CMT, 1021 CMT,
582 TSIF, 1022 TSIF,
583 FSI, 1023 FSI,
584 SCIFA5, 1024 SCIFA5,
585 TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, 1025 TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2,
586 IRDA, 1026 IRDA,
587 SDHI1_SDHII0, SDHI1_SDHII1, SDHI1_SDHII2,
588 JPU, 1027 JPU,
589 _2DDMAC, 1028 _2DDMAC,
590 MMC_MMC2I, MMC_MMC3I, 1029 MMC_MMC2I, MMC_MMC3I,
@@ -666,10 +1105,10 @@ static struct intc_vect vectors[] __initdata = {
666 INTC_VECT(I2C0_WAITI, 0xE40), 1105 INTC_VECT(I2C0_WAITI, 0xE40),
667 INTC_VECT(I2C0_DTEI, 0xE60), 1106 INTC_VECT(I2C0_DTEI, 0xE60),
668 1107
669 INTC_VECT(SDHI0_SDHII0, 0xE80), 1108 INTC_VECT(SDHI0, 0xE80),
670 INTC_VECT(SDHI0_SDHII1, 0xEA0), 1109 INTC_VECT(SDHI0, 0xEA0),
671 INTC_VECT(SDHI0_SDHII2, 0xEC0), 1110 INTC_VECT(SDHI0, 0xEC0),
672 INTC_VECT(SDHI0_SDHII3, 0xEE0), 1111 INTC_VECT(SDHI0, 0xEE0),
673 1112
674 INTC_VECT(CMT, 0xF00), 1113 INTC_VECT(CMT, 0xF00),
675 INTC_VECT(TSIF, 0xF20), 1114 INTC_VECT(TSIF, 0xF20),
@@ -682,9 +1121,9 @@ static struct intc_vect vectors[] __initdata = {
682 1121
683 INTC_VECT(IRDA, 0x480), 1122 INTC_VECT(IRDA, 0x480),
684 1123
685 INTC_VECT(SDHI1_SDHII0, 0x4E0), 1124 INTC_VECT(SDHI1, 0x4E0),
686 INTC_VECT(SDHI1_SDHII1, 0x500), 1125 INTC_VECT(SDHI1, 0x500),
687 INTC_VECT(SDHI1_SDHII2, 0x520), 1126 INTC_VECT(SDHI1, 0x520),
688 1127
689 INTC_VECT(JPU, 0x560), 1128 INTC_VECT(JPU, 0x560),
690 INTC_VECT(_2DDMAC, 0x4A0), 1129 INTC_VECT(_2DDMAC, 0x4A0),
@@ -710,8 +1149,6 @@ static struct intc_group groups[] __initdata = {
710 INTC_GROUP(DMAC0B, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR), 1149 INTC_GROUP(DMAC0B, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR),
711 INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI), 1150 INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
712 INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI), 1151 INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
713 INTC_GROUP(SDHI0, SDHI0_SDHII0, SDHI0_SDHII1, SDHI0_SDHII2, SDHI0_SDHII3),
714 INTC_GROUP(SDHI1, SDHI1_SDHII0, SDHI1_SDHII1, SDHI1_SDHII2),
715 INTC_GROUP(SPU, SPU_SPUI0, SPU_SPUI1), 1152 INTC_GROUP(SPU, SPU_SPUI0, SPU_SPUI1),
716 INTC_GROUP(MMCIF, MMC_MMC2I, MMC_MMC3I), 1153 INTC_GROUP(MMCIF, MMC_MMC2I, MMC_MMC3I),
717}; 1154};
@@ -719,7 +1156,7 @@ static struct intc_group groups[] __initdata = {
719static struct intc_mask_reg mask_registers[] __initdata = { 1156static struct intc_mask_reg mask_registers[] __initdata = {
720 { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ 1157 { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
721 { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, 1158 { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
722 0, SDHI1_SDHII2, SDHI1_SDHII1, SDHI1_SDHII0 } }, 1159 0, DISABLED, ENABLED, ENABLED } },
723 { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ 1160 { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
724 { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0, 1161 { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0,
725 DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } }, 1162 DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } },
@@ -741,7 +1178,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
741 { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI, 1178 { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
742 I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } }, 1179 I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } },
743 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ 1180 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
744 { SDHI0_SDHII3, SDHI0_SDHII2, SDHI0_SDHII1, SDHI0_SDHII0, 1181 { DISABLED, DISABLED, ENABLED, ENABLED,
745 0, 0, SCIFA5, FSI } }, 1182 0, 0, SCIFA5, FSI } },
746 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ 1183 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
747 { 0, 0, 0, CMT, 0, USB1, USB0, 0 } }, 1184 { 0, 0, 0, CMT, 0, USB1, USB0, 0 } },
@@ -788,11 +1225,205 @@ static struct intc_mask_reg ack_registers[] __initdata = {
788 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, 1225 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
789}; 1226};
790 1227
791static DECLARE_INTC_DESC_ACK(intc_desc, "sh7724", vectors, groups, 1228static struct intc_desc intc_desc __initdata = {
792 mask_registers, prio_registers, sense_registers, 1229 .name = "sh7724",
793 ack_registers); 1230 .force_enable = ENABLED,
1231 .force_disable = DISABLED,
1232 .hw = INTC_HW_DESC(vectors, groups, mask_registers,
1233 prio_registers, sense_registers, ack_registers),
1234};
794 1235
795void __init plat_irq_setup(void) 1236void __init plat_irq_setup(void)
796{ 1237{
797 register_intc_controller(&intc_desc); 1238 register_intc_controller(&intc_desc);
798} 1239}
1240
1241static struct {
1242 /* BSC */
1243 unsigned long mmselr;
1244 unsigned long cs0bcr;
1245 unsigned long cs4bcr;
1246 unsigned long cs5abcr;
1247 unsigned long cs5bbcr;
1248 unsigned long cs6abcr;
1249 unsigned long cs6bbcr;
1250 unsigned long cs4wcr;
1251 unsigned long cs5awcr;
1252 unsigned long cs5bwcr;
1253 unsigned long cs6awcr;
1254 unsigned long cs6bwcr;
1255 /* INTC */
1256 unsigned short ipra;
1257 unsigned short iprb;
1258 unsigned short iprc;
1259 unsigned short iprd;
1260 unsigned short ipre;
1261 unsigned short iprf;
1262 unsigned short iprg;
1263 unsigned short iprh;
1264 unsigned short ipri;
1265 unsigned short iprj;
1266 unsigned short iprk;
1267 unsigned short iprl;
1268 unsigned char imr0;
1269 unsigned char imr1;
1270 unsigned char imr2;
1271 unsigned char imr3;
1272 unsigned char imr4;
1273 unsigned char imr5;
1274 unsigned char imr6;
1275 unsigned char imr7;
1276 unsigned char imr8;
1277 unsigned char imr9;
1278 unsigned char imr10;
1279 unsigned char imr11;
1280 unsigned char imr12;
1281 /* RWDT */
1282 unsigned short rwtcnt;
1283 unsigned short rwtcsr;
1284 /* CPG */
1285 unsigned long irdaclk;
1286 unsigned long spuclk;
1287} sh7724_rstandby_state;
1288
1289static int sh7724_pre_sleep_notifier_call(struct notifier_block *nb,
1290 unsigned long flags, void *unused)
1291{
1292 if (!(flags & SUSP_SH_RSTANDBY))
1293 return NOTIFY_DONE;
1294
1295 /* BCR */
1296 sh7724_rstandby_state.mmselr = __raw_readl(0xff800020); /* MMSELR */
1297 sh7724_rstandby_state.mmselr |= 0xa5a50000;
1298 sh7724_rstandby_state.cs0bcr = __raw_readl(0xfec10004); /* CS0BCR */
1299 sh7724_rstandby_state.cs4bcr = __raw_readl(0xfec10010); /* CS4BCR */
1300 sh7724_rstandby_state.cs5abcr = __raw_readl(0xfec10014); /* CS5ABCR */
1301 sh7724_rstandby_state.cs5bbcr = __raw_readl(0xfec10018); /* CS5BBCR */
1302 sh7724_rstandby_state.cs6abcr = __raw_readl(0xfec1001c); /* CS6ABCR */
1303 sh7724_rstandby_state.cs6bbcr = __raw_readl(0xfec10020); /* CS6BBCR */
1304 sh7724_rstandby_state.cs4wcr = __raw_readl(0xfec10030); /* CS4WCR */
1305 sh7724_rstandby_state.cs5awcr = __raw_readl(0xfec10034); /* CS5AWCR */
1306 sh7724_rstandby_state.cs5bwcr = __raw_readl(0xfec10038); /* CS5BWCR */
1307 sh7724_rstandby_state.cs6awcr = __raw_readl(0xfec1003c); /* CS6AWCR */
1308 sh7724_rstandby_state.cs6bwcr = __raw_readl(0xfec10040); /* CS6BWCR */
1309
1310 /* INTC */
1311 sh7724_rstandby_state.ipra = __raw_readw(0xa4080000); /* IPRA */
1312 sh7724_rstandby_state.iprb = __raw_readw(0xa4080004); /* IPRB */
1313 sh7724_rstandby_state.iprc = __raw_readw(0xa4080008); /* IPRC */
1314 sh7724_rstandby_state.iprd = __raw_readw(0xa408000c); /* IPRD */
1315 sh7724_rstandby_state.ipre = __raw_readw(0xa4080010); /* IPRE */
1316 sh7724_rstandby_state.iprf = __raw_readw(0xa4080014); /* IPRF */
1317 sh7724_rstandby_state.iprg = __raw_readw(0xa4080018); /* IPRG */
1318 sh7724_rstandby_state.iprh = __raw_readw(0xa408001c); /* IPRH */
1319 sh7724_rstandby_state.ipri = __raw_readw(0xa4080020); /* IPRI */
1320 sh7724_rstandby_state.iprj = __raw_readw(0xa4080024); /* IPRJ */
1321 sh7724_rstandby_state.iprk = __raw_readw(0xa4080028); /* IPRK */
1322 sh7724_rstandby_state.iprl = __raw_readw(0xa408002c); /* IPRL */
1323 sh7724_rstandby_state.imr0 = __raw_readb(0xa4080080); /* IMR0 */
1324 sh7724_rstandby_state.imr1 = __raw_readb(0xa4080084); /* IMR1 */
1325 sh7724_rstandby_state.imr2 = __raw_readb(0xa4080088); /* IMR2 */
1326 sh7724_rstandby_state.imr3 = __raw_readb(0xa408008c); /* IMR3 */
1327 sh7724_rstandby_state.imr4 = __raw_readb(0xa4080090); /* IMR4 */
1328 sh7724_rstandby_state.imr5 = __raw_readb(0xa4080094); /* IMR5 */
1329 sh7724_rstandby_state.imr6 = __raw_readb(0xa4080098); /* IMR6 */
1330 sh7724_rstandby_state.imr7 = __raw_readb(0xa408009c); /* IMR7 */
1331 sh7724_rstandby_state.imr8 = __raw_readb(0xa40800a0); /* IMR8 */
1332 sh7724_rstandby_state.imr9 = __raw_readb(0xa40800a4); /* IMR9 */
1333 sh7724_rstandby_state.imr10 = __raw_readb(0xa40800a8); /* IMR10 */
1334 sh7724_rstandby_state.imr11 = __raw_readb(0xa40800ac); /* IMR11 */
1335 sh7724_rstandby_state.imr12 = __raw_readb(0xa40800b0); /* IMR12 */
1336
1337 /* RWDT */
1338 sh7724_rstandby_state.rwtcnt = __raw_readb(0xa4520000); /* RWTCNT */
1339 sh7724_rstandby_state.rwtcnt |= 0x5a00;
1340 sh7724_rstandby_state.rwtcsr = __raw_readb(0xa4520004); /* RWTCSR */
1341 sh7724_rstandby_state.rwtcsr |= 0xa500;
1342 __raw_writew(sh7724_rstandby_state.rwtcsr & 0x07, 0xa4520004);
1343
1344 /* CPG */
1345 sh7724_rstandby_state.irdaclk = __raw_readl(0xa4150018); /* IRDACLKCR */
1346 sh7724_rstandby_state.spuclk = __raw_readl(0xa415003c); /* SPUCLKCR */
1347
1348 return NOTIFY_DONE;
1349}
1350
1351static int sh7724_post_sleep_notifier_call(struct notifier_block *nb,
1352 unsigned long flags, void *unused)
1353{
1354 if (!(flags & SUSP_SH_RSTANDBY))
1355 return NOTIFY_DONE;
1356
1357 /* BCR */
1358 __raw_writel(sh7724_rstandby_state.mmselr, 0xff800020); /* MMSELR */
1359 __raw_writel(sh7724_rstandby_state.cs0bcr, 0xfec10004); /* CS0BCR */
1360 __raw_writel(sh7724_rstandby_state.cs4bcr, 0xfec10010); /* CS4BCR */
1361 __raw_writel(sh7724_rstandby_state.cs5abcr, 0xfec10014); /* CS5ABCR */
1362 __raw_writel(sh7724_rstandby_state.cs5bbcr, 0xfec10018); /* CS5BBCR */
1363 __raw_writel(sh7724_rstandby_state.cs6abcr, 0xfec1001c); /* CS6ABCR */
1364 __raw_writel(sh7724_rstandby_state.cs6bbcr, 0xfec10020); /* CS6BBCR */
1365 __raw_writel(sh7724_rstandby_state.cs4wcr, 0xfec10030); /* CS4WCR */
1366 __raw_writel(sh7724_rstandby_state.cs5awcr, 0xfec10034); /* CS5AWCR */
1367 __raw_writel(sh7724_rstandby_state.cs5bwcr, 0xfec10038); /* CS5BWCR */
1368 __raw_writel(sh7724_rstandby_state.cs6awcr, 0xfec1003c); /* CS6AWCR */
1369 __raw_writel(sh7724_rstandby_state.cs6bwcr, 0xfec10040); /* CS6BWCR */
1370
1371 /* INTC */
1372 __raw_writew(sh7724_rstandby_state.ipra, 0xa4080000); /* IPRA */
1373 __raw_writew(sh7724_rstandby_state.iprb, 0xa4080004); /* IPRB */
1374 __raw_writew(sh7724_rstandby_state.iprc, 0xa4080008); /* IPRC */
1375 __raw_writew(sh7724_rstandby_state.iprd, 0xa408000c); /* IPRD */
1376 __raw_writew(sh7724_rstandby_state.ipre, 0xa4080010); /* IPRE */
1377 __raw_writew(sh7724_rstandby_state.iprf, 0xa4080014); /* IPRF */
1378 __raw_writew(sh7724_rstandby_state.iprg, 0xa4080018); /* IPRG */
1379 __raw_writew(sh7724_rstandby_state.iprh, 0xa408001c); /* IPRH */
1380 __raw_writew(sh7724_rstandby_state.ipri, 0xa4080020); /* IPRI */
1381 __raw_writew(sh7724_rstandby_state.iprj, 0xa4080024); /* IPRJ */
1382 __raw_writew(sh7724_rstandby_state.iprk, 0xa4080028); /* IPRK */
1383 __raw_writew(sh7724_rstandby_state.iprl, 0xa408002c); /* IPRL */
1384 __raw_writeb(sh7724_rstandby_state.imr0, 0xa4080080); /* IMR0 */
1385 __raw_writeb(sh7724_rstandby_state.imr1, 0xa4080084); /* IMR1 */
1386 __raw_writeb(sh7724_rstandby_state.imr2, 0xa4080088); /* IMR2 */
1387 __raw_writeb(sh7724_rstandby_state.imr3, 0xa408008c); /* IMR3 */
1388 __raw_writeb(sh7724_rstandby_state.imr4, 0xa4080090); /* IMR4 */
1389 __raw_writeb(sh7724_rstandby_state.imr5, 0xa4080094); /* IMR5 */
1390 __raw_writeb(sh7724_rstandby_state.imr6, 0xa4080098); /* IMR6 */
1391 __raw_writeb(sh7724_rstandby_state.imr7, 0xa408009c); /* IMR7 */
1392 __raw_writeb(sh7724_rstandby_state.imr8, 0xa40800a0); /* IMR8 */
1393 __raw_writeb(sh7724_rstandby_state.imr9, 0xa40800a4); /* IMR9 */
1394 __raw_writeb(sh7724_rstandby_state.imr10, 0xa40800a8); /* IMR10 */
1395 __raw_writeb(sh7724_rstandby_state.imr11, 0xa40800ac); /* IMR11 */
1396 __raw_writeb(sh7724_rstandby_state.imr12, 0xa40800b0); /* IMR12 */
1397
1398 /* RWDT */
1399 __raw_writew(sh7724_rstandby_state.rwtcnt, 0xa4520000); /* RWTCNT */
1400 __raw_writew(sh7724_rstandby_state.rwtcsr, 0xa4520004); /* RWTCSR */
1401
1402 /* CPG */
1403 __raw_writel(sh7724_rstandby_state.irdaclk, 0xa4150018); /* IRDACLKCR */
1404 __raw_writel(sh7724_rstandby_state.spuclk, 0xa415003c); /* SPUCLKCR */
1405
1406 return NOTIFY_DONE;
1407}
1408
1409static struct notifier_block sh7724_pre_sleep_notifier = {
1410 .notifier_call = sh7724_pre_sleep_notifier_call,
1411 .priority = SH_MOBILE_PRE(SH_MOBILE_SLEEP_CPU),
1412};
1413
1414static struct notifier_block sh7724_post_sleep_notifier = {
1415 .notifier_call = sh7724_post_sleep_notifier_call,
1416 .priority = SH_MOBILE_POST(SH_MOBILE_SLEEP_CPU),
1417};
1418
1419static int __init sh7724_sleep_setup(void)
1420{
1421 atomic_notifier_chain_register(&sh_mobile_pre_sleep_notifier_list,
1422 &sh7724_pre_sleep_notifier);
1423
1424 atomic_notifier_chain_register(&sh_mobile_post_sleep_notifier_list,
1425 &sh7724_post_sleep_notifier);
1426 return 0;
1427}
1428arch_initcall(sh7724_sleep_setup);
1429
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
new file mode 100644
index 000000000000..9c1de2633ac3
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -0,0 +1,623 @@
1/*
2 * SH7757 Setup
3 *
4 * Copyright (C) 2009 Renesas Solutions Corp.
5 *
6 * based on setup-sh7785.c : Copyright (C) 2007 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/platform_device.h>
13#include <linux/init.h>
14#include <linux/serial.h>
15#include <linux/serial_sci.h>
16#include <linux/io.h>
17#include <linux/mm.h>
18#include <linux/sh_timer.h>
19
20static struct plat_sci_port scif2_platform_data = {
21 .mapbase = 0xfe4b0000, /* SCIF2 */
22 .flags = UPF_BOOT_AUTOCONF,
23 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
24 .scbrr_algo_id = SCBRR_ALGO_2,
25 .type = PORT_SCIF,
26 .irqs = { 40, 40, 40, 40 },
27};
28
29static struct platform_device scif2_device = {
30 .name = "sh-sci",
31 .id = 0,
32 .dev = {
33 .platform_data = &scif2_platform_data,
34 },
35};
36
37static struct plat_sci_port scif3_platform_data = {
38 .mapbase = 0xfe4c0000, /* SCIF3 */
39 .flags = UPF_BOOT_AUTOCONF,
40 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
41 .scbrr_algo_id = SCBRR_ALGO_2,
42 .type = PORT_SCIF,
43 .irqs = { 76, 76, 76, 76 },
44};
45
46static struct platform_device scif3_device = {
47 .name = "sh-sci",
48 .id = 1,
49 .dev = {
50 .platform_data = &scif3_platform_data,
51 },
52};
53
54static struct plat_sci_port scif4_platform_data = {
55 .mapbase = 0xfe4d0000, /* SCIF4 */
56 .flags = UPF_BOOT_AUTOCONF,
57 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
58 .scbrr_algo_id = SCBRR_ALGO_2,
59 .type = PORT_SCIF,
60 .irqs = { 104, 104, 104, 104 },
61};
62
63static struct platform_device scif4_device = {
64 .name = "sh-sci",
65 .id = 2,
66 .dev = {
67 .platform_data = &scif4_platform_data,
68 },
69};
70
71static struct sh_timer_config tmu0_platform_data = {
72 .channel_offset = 0x04,
73 .timer_bit = 0,
74 .clockevent_rating = 200,
75};
76
77static struct resource tmu0_resources[] = {
78 [0] = {
79 .start = 0xfe430008,
80 .end = 0xfe430013,
81 .flags = IORESOURCE_MEM,
82 },
83 [1] = {
84 .start = 28,
85 .flags = IORESOURCE_IRQ,
86 },
87};
88
89static struct platform_device tmu0_device = {
90 .name = "sh_tmu",
91 .id = 0,
92 .dev = {
93 .platform_data = &tmu0_platform_data,
94 },
95 .resource = tmu0_resources,
96 .num_resources = ARRAY_SIZE(tmu0_resources),
97};
98
99static struct sh_timer_config tmu1_platform_data = {
100 .channel_offset = 0x10,
101 .timer_bit = 1,
102 .clocksource_rating = 200,
103};
104
105static struct resource tmu1_resources[] = {
106 [0] = {
107 .start = 0xfe430014,
108 .end = 0xfe43001f,
109 .flags = IORESOURCE_MEM,
110 },
111 [1] = {
112 .start = 29,
113 .flags = IORESOURCE_IRQ,
114 },
115};
116
117static struct platform_device tmu1_device = {
118 .name = "sh_tmu",
119 .id = 1,
120 .dev = {
121 .platform_data = &tmu1_platform_data,
122 },
123 .resource = tmu1_resources,
124 .num_resources = ARRAY_SIZE(tmu1_resources),
125};
126
127static struct platform_device *sh7757_devices[] __initdata = {
128 &scif2_device,
129 &scif3_device,
130 &scif4_device,
131 &tmu0_device,
132 &tmu1_device,
133};
134
135static int __init sh7757_devices_setup(void)
136{
137 return platform_add_devices(sh7757_devices,
138 ARRAY_SIZE(sh7757_devices));
139}
140arch_initcall(sh7757_devices_setup);
141
142static struct platform_device *sh7757_early_devices[] __initdata = {
143 &scif2_device,
144 &scif3_device,
145 &scif4_device,
146 &tmu0_device,
147 &tmu1_device,
148};
149
150void __init plat_early_device_setup(void)
151{
152 early_platform_add_devices(sh7757_early_devices,
153 ARRAY_SIZE(sh7757_early_devices));
154}
155
156enum {
157 UNUSED = 0,
158
159 /* interrupt sources */
160
161 IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
162 IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
163 IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
164 IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
165
166 IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
167 IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
168 IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
169 IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
170 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
171
172 SDHI, DVC,
173 IRQ8, IRQ9, IRQ11, IRQ10, IRQ12, IRQ13, IRQ14, IRQ15,
174 TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5,
175 HUDI,
176 ARC4,
177 DMAC0_5, DMAC6_7, DMAC8_11,
178 SCIF0, SCIF1, SCIF2, SCIF3, SCIF4,
179 USB0, USB1,
180 JMC,
181 SPI0, SPI1,
182 TMR01, TMR23, TMR45,
183 FRT,
184 LPC, LPC5, LPC6, LPC7, LPC8,
185 PECI0, PECI1, PECI2, PECI3, PECI4, PECI5,
186 ETHERC,
187 ADC0, ADC1,
188 SIM,
189 IIC0_0, IIC0_1, IIC0_2, IIC0_3,
190 IIC1_0, IIC1_1, IIC1_2, IIC1_3,
191 IIC2_0, IIC2_1, IIC2_2, IIC2_3,
192 IIC3_0, IIC3_1, IIC3_2, IIC3_3,
193 IIC4_0, IIC4_1, IIC4_2, IIC4_3,
194 IIC5_0, IIC5_1, IIC5_2, IIC5_3,
195 IIC6_0, IIC6_1, IIC6_2, IIC6_3,
196 IIC7_0, IIC7_1, IIC7_2, IIC7_3,
197 IIC8_0, IIC8_1, IIC8_2, IIC8_3,
198 IIC9_0, IIC9_1, IIC9_2, IIC9_3,
199 ONFICTL,
200 MMC1, MMC2,
201 ECCU,
202 PCIC,
203 G200,
204 RSPI,
205 SGPIO,
206 DMINT12, DMINT13, DMINT14, DMINT15, DMINT16, DMINT17, DMINT18, DMINT19,
207 DMINT20, DMINT21, DMINT22, DMINT23,
208 DDRECC,
209 TSIP,
210 PCIE_BRIDGE,
211 WDT0B, WDT1B, WDT2B, WDT3B, WDT4B, WDT5B, WDT6B, WDT7B, WDT8B,
212 GETHER0, GETHER1, GETHER2,
213 PBIA, PBIB, PBIC,
214 DMAE2, DMAE3,
215 SERMUX2, SERMUX3,
216
217 /* interrupt groups */
218
219 TMU012, TMU345,
220};
221
222static struct intc_vect vectors[] __initdata = {
223 INTC_VECT(SDHI, 0x480), INTC_VECT(SDHI, 0x04a0),
224 INTC_VECT(SDHI, 0x4c0),
225 INTC_VECT(DVC, 0x4e0),
226 INTC_VECT(IRQ8, 0x500), INTC_VECT(IRQ9, 0x520),
227 INTC_VECT(IRQ10, 0x540),
228 INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
229 INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
230 INTC_VECT(HUDI, 0x600),
231 INTC_VECT(ARC4, 0x620),
232 INTC_VECT(DMAC0_5, 0x640), INTC_VECT(DMAC0_5, 0x660),
233 INTC_VECT(DMAC0_5, 0x680), INTC_VECT(DMAC0_5, 0x6a0),
234 INTC_VECT(DMAC0_5, 0x6c0),
235 INTC_VECT(IRQ11, 0x6e0),
236 INTC_VECT(SCIF2, 0x700), INTC_VECT(SCIF2, 0x720),
237 INTC_VECT(SCIF2, 0x740), INTC_VECT(SCIF2, 0x760),
238 INTC_VECT(DMAC0_5, 0x780), INTC_VECT(DMAC0_5, 0x7a0),
239 INTC_VECT(DMAC6_7, 0x7c0), INTC_VECT(DMAC6_7, 0x7e0),
240 INTC_VECT(USB0, 0x840),
241 INTC_VECT(IRQ12, 0x880),
242 INTC_VECT(JMC, 0x8a0),
243 INTC_VECT(SPI1, 0x8c0),
244 INTC_VECT(IRQ13, 0x8e0), INTC_VECT(IRQ14, 0x900),
245 INTC_VECT(USB1, 0x920),
246 INTC_VECT(TMR01, 0xa00), INTC_VECT(TMR23, 0xa20),
247 INTC_VECT(TMR45, 0xa40),
248 INTC_VECT(FRT, 0xa80),
249 INTC_VECT(LPC, 0xaa0), INTC_VECT(LPC, 0xac0),
250 INTC_VECT(LPC, 0xae0), INTC_VECT(LPC, 0xb00),
251 INTC_VECT(LPC, 0xb20),
252 INTC_VECT(SCIF0, 0xb40), INTC_VECT(SCIF1, 0xb60),
253 INTC_VECT(SCIF3, 0xb80), INTC_VECT(SCIF3, 0xba0),
254 INTC_VECT(SCIF3, 0xbc0), INTC_VECT(SCIF3, 0xbe0),
255 INTC_VECT(PECI0, 0xc00), INTC_VECT(PECI1, 0xc20),
256 INTC_VECT(PECI2, 0xc40),
257 INTC_VECT(IRQ15, 0xc60),
258 INTC_VECT(ETHERC, 0xc80), INTC_VECT(ETHERC, 0xca0),
259 INTC_VECT(SPI0, 0xcc0),
260 INTC_VECT(ADC1, 0xce0),
261 INTC_VECT(DMAC8_11, 0xd00), INTC_VECT(DMAC8_11, 0xd20),
262 INTC_VECT(DMAC8_11, 0xd40), INTC_VECT(DMAC8_11, 0xd60),
263 INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0),
264 INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0),
265 INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
266 INTC_VECT(TMU5, 0xe40),
267 INTC_VECT(ADC0, 0xe60),
268 INTC_VECT(SCIF4, 0xf00), INTC_VECT(SCIF4, 0xf20),
269 INTC_VECT(SCIF4, 0xf40), INTC_VECT(SCIF4, 0xf60),
270 INTC_VECT(IIC0_0, 0x1400), INTC_VECT(IIC0_1, 0x1420),
271 INTC_VECT(IIC0_2, 0x1440), INTC_VECT(IIC0_3, 0x1460),
272 INTC_VECT(IIC1_0, 0x1480), INTC_VECT(IIC1_1, 0x14e0),
273 INTC_VECT(IIC1_2, 0x1500), INTC_VECT(IIC1_3, 0x1520),
274 INTC_VECT(IIC2_0, 0x1540), INTC_VECT(IIC2_1, 0x1560),
275 INTC_VECT(IIC2_2, 0x1580), INTC_VECT(IIC2_3, 0x1600),
276 INTC_VECT(IIC3_0, 0x1620), INTC_VECT(IIC3_1, 0x1640),
277 INTC_VECT(IIC3_2, 0x16e0), INTC_VECT(IIC3_3, 0x1700),
278 INTC_VECT(IIC4_0, 0x17c0), INTC_VECT(IIC4_1, 0x1800),
279 INTC_VECT(IIC4_2, 0x1820), INTC_VECT(IIC4_3, 0x1840),
280 INTC_VECT(IIC5_0, 0x1860), INTC_VECT(IIC5_1, 0x1880),
281 INTC_VECT(IIC5_2, 0x18a0), INTC_VECT(IIC5_3, 0x18c0),
282 INTC_VECT(IIC6_0, 0x18e0), INTC_VECT(IIC6_1, 0x1900),
283 INTC_VECT(IIC6_2, 0x1920),
284 INTC_VECT(ONFICTL, 0x1960),
285 INTC_VECT(IIC6_3, 0x1980),
286 INTC_VECT(IIC7_0, 0x19a0), INTC_VECT(IIC7_1, 0x1a00),
287 INTC_VECT(IIC7_2, 0x1a20), INTC_VECT(IIC7_3, 0x1a40),
288 INTC_VECT(IIC8_0, 0x1a60), INTC_VECT(IIC8_1, 0x1a80),
289 INTC_VECT(IIC8_2, 0x1aa0), INTC_VECT(IIC8_3, 0x1b40),
290 INTC_VECT(IIC9_0, 0x1b60), INTC_VECT(IIC9_1, 0x1b80),
291 INTC_VECT(IIC9_2, 0x1c00), INTC_VECT(IIC9_3, 0x1c20),
292 INTC_VECT(MMC1, 0x1c60), INTC_VECT(MMC2, 0x1c80),
293 INTC_VECT(ECCU, 0x1cc0),
294 INTC_VECT(PCIC, 0x1ce0),
295 INTC_VECT(G200, 0x1d00),
296 INTC_VECT(RSPI, 0x1d80), INTC_VECT(RSPI, 0x1da0),
297 INTC_VECT(RSPI, 0x1dc0), INTC_VECT(RSPI, 0x1de0),
298 INTC_VECT(PECI3, 0x1ec0), INTC_VECT(PECI4, 0x1ee0),
299 INTC_VECT(PECI5, 0x1f00),
300 INTC_VECT(SGPIO, 0x1f80), INTC_VECT(SGPIO, 0x1fa0),
301 INTC_VECT(SGPIO, 0x1fc0),
302 INTC_VECT(DMINT12, 0x2400), INTC_VECT(DMINT13, 0x2420),
303 INTC_VECT(DMINT14, 0x2440), INTC_VECT(DMINT15, 0x2460),
304 INTC_VECT(DMINT16, 0x2480), INTC_VECT(DMINT17, 0x24e0),
305 INTC_VECT(DMINT18, 0x2500), INTC_VECT(DMINT19, 0x2520),
306 INTC_VECT(DMINT20, 0x2540), INTC_VECT(DMINT21, 0x2560),
307 INTC_VECT(DMINT22, 0x2580), INTC_VECT(DMINT23, 0x2600),
308 INTC_VECT(DDRECC, 0x2620),
309 INTC_VECT(TSIP, 0x2640),
310 INTC_VECT(PCIE_BRIDGE, 0x27c0),
311 INTC_VECT(WDT0B, 0x2800), INTC_VECT(WDT1B, 0x2820),
312 INTC_VECT(WDT2B, 0x2840), INTC_VECT(WDT3B, 0x2860),
313 INTC_VECT(WDT4B, 0x2880), INTC_VECT(WDT5B, 0x28a0),
314 INTC_VECT(WDT6B, 0x28c0), INTC_VECT(WDT7B, 0x28e0),
315 INTC_VECT(WDT8B, 0x2900),
316 INTC_VECT(GETHER0, 0x2960), INTC_VECT(GETHER1, 0x2980),
317 INTC_VECT(GETHER2, 0x29a0),
318 INTC_VECT(PBIA, 0x2a00), INTC_VECT(PBIB, 0x2a20),
319 INTC_VECT(PBIC, 0x2a40),
320 INTC_VECT(DMAE2, 0x2a60), INTC_VECT(DMAE3, 0x2a80),
321 INTC_VECT(SERMUX2, 0x2aa0), INTC_VECT(SERMUX3, 0x2b40),
322 INTC_VECT(LPC5, 0x2b60), INTC_VECT(LPC6, 0x2b80),
323 INTC_VECT(LPC7, 0x2c00), INTC_VECT(LPC8, 0x2c20),
324};
325
326static struct intc_group groups[] __initdata = {
327 INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
328 INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
329};
330
331static struct intc_mask_reg mask_registers[] __initdata = {
332 { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
333 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
334
335 { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
336 { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
337 IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
338 IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
339 IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0,
340 IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
341 IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
342 IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
343 IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
344
345 { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
346 { 0, 0, 0, 0, 0, 0, 0, 0,
347 0, DMAC8_11, 0, PECI0, LPC, FRT, 0, TMR45,
348 TMR23, TMR01, 0, 0, 0, 0, 0, DMAC0_5,
349 HUDI, 0, 0, SCIF3, SCIF2, SDHI, TMU345, TMU012
350 } },
351
352 { 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
353 { IRQ15, IRQ14, IRQ13, IRQ12, IRQ11, IRQ10, SCIF4, ETHERC,
354 IRQ9, IRQ8, SCIF1, SCIF0, USB0, 0, 0, USB1,
355 ADC1, 0, DMAC6_7, ADC0, SPI0, SIM, PECI2, PECI1,
356 ARC4, 0, SPI1, JMC, 0, 0, 0, DVC
357 } },
358
359 { 0xffd10038, 0xffd1003c, 32, /* INT2MSKR2 / INT2MSKCR2 */
360 { IIC4_1, IIC4_2, IIC5_0, ONFICTL, 0, 0, SGPIO, 0,
361 0, G200, 0, IIC9_2, IIC8_2, IIC8_1, IIC8_0, IIC7_3,
362 IIC7_2, IIC7_1, IIC6_3, IIC0_0, IIC0_1, IIC0_2, IIC0_3, IIC3_1,
363 IIC2_3, 0, IIC2_1, IIC9_1, IIC3_3, IIC1_0, 0, IIC2_2
364 } },
365
366 { 0xffd100d0, 0xffd100d4, 32, /* INT2MSKR3 / INT2MSKCR3 */
367 { MMC1, IIC6_1, IIC6_0, IIC5_1, IIC3_2, IIC2_0, PECI5, MMC2,
368 IIC1_3, IIC1_2, IIC9_0, IIC8_3, IIC4_3, IIC7_0, 0, IIC6_2,
369 PCIC, 0, IIC4_0, 0, ECCU, RSPI, 0, IIC9_3,
370 IIC3_0, 0, IIC5_3, IIC5_2, 0, 0, 0, IIC1_1
371 } },
372
373 { 0xffd20038, 0xffd2003c, 32, /* INT2MSKR4 / INT2MSKCR4 */
374 { WDT0B, WDT1B, WDT3B, GETHER0, 0, 0, 0, 0,
375 0, 0, 0, LPC7, SERMUX2, DMAE3, DMAE2, PBIC,
376 PBIB, PBIA, GETHER1, DMINT12, DMINT13, DMINT14, DMINT15, TSIP,
377 DMINT23, 0, DMINT21, LPC6, 0, DMINT16, 0, DMINT22
378 } },
379
380 { 0xffd200d0, 0xffd200d4, 32, /* INT2MSKR5 / INT2MSKCR5 */
381 { 0, WDT8B, WDT7B, WDT4B, 0, DMINT20, 0, 0,
382 DMINT19, DMINT18, LPC5, SERMUX3, WDT2B, GETHER2, 0, 0,
383 0, 0, PCIE_BRIDGE, 0, 0, 0, 0, LPC8,
384 DDRECC, 0, WDT6B, WDT5B, 0, 0, 0, DMINT17
385 } },
386};
387
388#define INTPRI 0xffd00010
389#define INT2PRI0 0xffd40000
390#define INT2PRI1 0xffd40004
391#define INT2PRI2 0xffd40008
392#define INT2PRI3 0xffd4000c
393#define INT2PRI4 0xffd40010
394#define INT2PRI5 0xffd40014
395#define INT2PRI6 0xffd40018
396#define INT2PRI7 0xffd4001c
397#define INT2PRI8 0xffd400a0
398#define INT2PRI9 0xffd400a4
399#define INT2PRI10 0xffd400a8
400#define INT2PRI11 0xffd400ac
401#define INT2PRI12 0xffd400b0
402#define INT2PRI13 0xffd400b4
403#define INT2PRI14 0xffd400b8
404#define INT2PRI15 0xffd400bc
405#define INT2PRI16 0xffd10000
406#define INT2PRI17 0xffd10004
407#define INT2PRI18 0xffd10008
408#define INT2PRI19 0xffd1000c
409#define INT2PRI20 0xffd10010
410#define INT2PRI21 0xffd10014
411#define INT2PRI22 0xffd10018
412#define INT2PRI23 0xffd1001c
413#define INT2PRI24 0xffd100a0
414#define INT2PRI25 0xffd100a4
415#define INT2PRI26 0xffd100a8
416#define INT2PRI27 0xffd100ac
417#define INT2PRI28 0xffd100b0
418#define INT2PRI29 0xffd100b4
419#define INT2PRI30 0xffd100b8
420#define INT2PRI31 0xffd100bc
421#define INT2PRI32 0xffd20000
422#define INT2PRI33 0xffd20004
423#define INT2PRI34 0xffd20008
424#define INT2PRI35 0xffd2000c
425#define INT2PRI36 0xffd20010
426#define INT2PRI37 0xffd20014
427#define INT2PRI38 0xffd20018
428#define INT2PRI39 0xffd2001c
429#define INT2PRI40 0xffd200a0
430#define INT2PRI41 0xffd200a4
431#define INT2PRI42 0xffd200a8
432#define INT2PRI43 0xffd200ac
433#define INT2PRI44 0xffd200b0
434#define INT2PRI45 0xffd200b4
435#define INT2PRI46 0xffd200b8
436#define INT2PRI47 0xffd200bc
437
438static struct intc_prio_reg prio_registers[] __initdata = {
439 { INTPRI, 0, 32, 4, { IRQ0, IRQ1, IRQ2, IRQ3,
440 IRQ4, IRQ5, IRQ6, IRQ7 } },
441
442 { INT2PRI0, 0, 32, 8, { TMU0, TMU1, TMU2, TMU2_TICPI } },
443 { INT2PRI1, 0, 32, 8, { TMU3, TMU4, TMU5, SDHI } },
444 { INT2PRI2, 0, 32, 8, { SCIF2, SCIF3, 0, IRQ8 } },
445 { INT2PRI3, 0, 32, 8, { HUDI, DMAC0_5, ADC0, IRQ9 } },
446 { INT2PRI4, 0, 32, 8, { IRQ10, 0, TMR01, TMR23 } },
447 { INT2PRI5, 0, 32, 8, { TMR45, 0, FRT, LPC } },
448 { INT2PRI6, 0, 32, 8, { PECI0, ETHERC, DMAC8_11, 0 } },
449 { INT2PRI7, 0, 32, 8, { SCIF4, 0, IRQ11, IRQ12 } },
450 { INT2PRI8, 0, 32, 8, { 0, 0, 0, DVC } },
451 { INT2PRI9, 0, 32, 8, { ARC4, 0, SPI1, JMC } },
452 { INT2PRI10, 0, 32, 8, { SPI0, SIM, PECI2, PECI1 } },
453 { INT2PRI11, 0, 32, 8, { ADC1, IRQ13, DMAC6_7, IRQ14 } },
454 { INT2PRI12, 0, 32, 8, { USB0, 0, IRQ15, USB1 } },
455 { INT2PRI13, 0, 32, 8, { 0, 0, SCIF1, SCIF0 } },
456
457 { INT2PRI16, 0, 32, 8, { IIC2_2, 0, 0, 0 } },
458 { INT2PRI17, 0, 32, 8, { 0, 0, 0, IIC1_0 } },
459 { INT2PRI18, 0, 32, 8, { IIC3_3, IIC9_1, IIC2_1, IIC1_2 } },
460 { INT2PRI19, 0, 32, 8, { IIC2_3, IIC3_1, 0, IIC1_3 } },
461 { INT2PRI20, 0, 32, 8, { IIC2_0, IIC6_3, IIC7_1, IIC7_2 } },
462 { INT2PRI21, 0, 32, 8, { IIC7_3, IIC8_0, IIC8_1, IIC8_2 } },
463 { INT2PRI22, 0, 32, 8, { IIC9_2, MMC2, G200, 0 } },
464 { INT2PRI23, 0, 32, 8, { PECI5, SGPIO, IIC3_2, IIC5_1 } },
465 { INT2PRI24, 0, 32, 8, { PECI4, PECI3, 0, IIC1_1 } },
466 { INT2PRI25, 0, 32, 8, { IIC3_0, 0, IIC5_3, IIC5_2 } },
467 { INT2PRI26, 0, 32, 8, { ECCU, RSPI, 0, IIC9_3 } },
468 { INT2PRI27, 0, 32, 8, { PCIC, IIC6_0, IIC4_0, IIC6_1 } },
469 { INT2PRI28, 0, 32, 8, { IIC4_3, IIC7_0, MMC1, IIC6_2 } },
470 { INT2PRI29, 0, 32, 8, { 0, 0, IIC9_0, IIC8_3 } },
471 { INT2PRI30, 0, 32, 8, { IIC4_1, IIC4_2, IIC5_0, ONFICTL } },
472 { INT2PRI31, 0, 32, 8, { IIC0_0, IIC0_1, IIC0_2, IIC0_3 } },
473 { INT2PRI32, 0, 32, 8, { DMINT22, 0, 0, 0 } },
474 { INT2PRI33, 0, 32, 8, { 0, 0, 0, DMINT16 } },
475 { INT2PRI34, 0, 32, 8, { 0, LPC6, DMINT21, DMINT18 } },
476 { INT2PRI35, 0, 32, 8, { DMINT23, TSIP, 0, DMINT19 } },
477 { INT2PRI36, 0, 32, 8, { DMINT20, GETHER1, PBIA, PBIB } },
478 { INT2PRI37, 0, 32, 8, { PBIC, DMAE2, DMAE3, SERMUX2 } },
479 { INT2PRI38, 0, 32, 8, { LPC7, 0, 0, 0 } },
480 { INT2PRI39, 0, 32, 8, { 0, 0, 0, WDT4B } },
481 { INT2PRI40, 0, 32, 8, { 0, 0, 0, DMINT17 } },
482 { INT2PRI41, 0, 32, 8, { DDRECC, 0, WDT6B, WDT5B } },
483 { INT2PRI42, 0, 32, 8, { 0, 0, 0, LPC8 } },
484 { INT2PRI43, 0, 32, 8, { 0, WDT7B, PCIE_BRIDGE, WDT8B } },
485 { INT2PRI44, 0, 32, 8, { WDT2B, GETHER2, 0, 0 } },
486 { INT2PRI45, 0, 32, 8, { 0, 0, LPC5, SERMUX3 } },
487 { INT2PRI46, 0, 32, 8, { WDT0B, WDT1B, WDT3B, GETHER0 } },
488 { INT2PRI47, 0, 32, 8, { DMINT12, DMINT13, DMINT14, DMINT15 } },
489};
490
491static struct intc_sense_reg sense_registers_irq8to15[] __initdata = {
492 { 0xffd100f8, 32, 2, /* ICR2 */ { IRQ15, IRQ14, IRQ13, IRQ12,
493 IRQ11, IRQ10, IRQ9, IRQ8 } },
494};
495
496static DECLARE_INTC_DESC(intc_desc, "sh7757", vectors, groups,
497 mask_registers, prio_registers,
498 sense_registers_irq8to15);
499
500/* Support for external interrupt pins in IRQ mode */
501static struct intc_vect vectors_irq0123[] __initdata = {
502 INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
503 INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
504};
505
506static struct intc_vect vectors_irq4567[] __initdata = {
507 INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
508 INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
509};
510
511static struct intc_sense_reg sense_registers[] __initdata = {
512 { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
513 IRQ4, IRQ5, IRQ6, IRQ7 } },
514};
515
516static struct intc_mask_reg ack_registers[] __initdata = {
517 { 0xffd00024, 0, 32, /* INTREQ */
518 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
519};
520
521static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7757-irq0123",
522 vectors_irq0123, NULL, mask_registers,
523 prio_registers, sense_registers, ack_registers);
524
525static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7757-irq4567",
526 vectors_irq4567, NULL, mask_registers,
527 prio_registers, sense_registers, ack_registers);
528
529/* External interrupt pins in IRL mode */
530static struct intc_vect vectors_irl0123[] __initdata = {
531 INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
532 INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
533 INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
534 INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
535 INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
536 INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
537 INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
538 INTC_VECT(IRL0_HHHL, 0x3c0),
539};
540
541static struct intc_vect vectors_irl4567[] __initdata = {
542 INTC_VECT(IRL4_LLLL, 0xb00), INTC_VECT(IRL4_LLLH, 0xb20),
543 INTC_VECT(IRL4_LLHL, 0xb40), INTC_VECT(IRL4_LLHH, 0xb60),
544 INTC_VECT(IRL4_LHLL, 0xb80), INTC_VECT(IRL4_LHLH, 0xba0),
545 INTC_VECT(IRL4_LHHL, 0xbc0), INTC_VECT(IRL4_LHHH, 0xbe0),
546 INTC_VECT(IRL4_HLLL, 0xc00), INTC_VECT(IRL4_HLLH, 0xc20),
547 INTC_VECT(IRL4_HLHL, 0xc40), INTC_VECT(IRL4_HLHH, 0xc60),
548 INTC_VECT(IRL4_HHLL, 0xc80), INTC_VECT(IRL4_HHLH, 0xca0),
549 INTC_VECT(IRL4_HHHL, 0xcc0),
550};
551
552static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7757-irl0123", vectors_irl0123,
553 NULL, mask_registers, NULL, NULL);
554
555static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7757-irl4567", vectors_irl4567,
556 NULL, mask_registers, NULL, NULL);
557
558#define INTC_ICR0 0xffd00000
559#define INTC_INTMSK0 0xffd00044
560#define INTC_INTMSK1 0xffd00048
561#define INTC_INTMSK2 0xffd40080
562#define INTC_INTMSKCLR1 0xffd00068
563#define INTC_INTMSKCLR2 0xffd40084
564
565void __init plat_irq_setup(void)
566{
567 /* disable IRQ3-0 + IRQ7-4 */
568 __raw_writel(0xff000000, INTC_INTMSK0);
569
570 /* disable IRL3-0 + IRL7-4 */
571 __raw_writel(0xc0000000, INTC_INTMSK1);
572 __raw_writel(0xfffefffe, INTC_INTMSK2);
573
574 /* select IRL mode for IRL3-0 + IRL7-4 */
575 __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
576
577 /* disable holding function, ie enable "SH-4 Mode" */
578 __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
579
580 register_intc_controller(&intc_desc);
581}
582
583void __init plat_irq_setup_pins(int mode)
584{
585 switch (mode) {
586 case IRQ_MODE_IRQ7654:
587 /* select IRQ mode for IRL7-4 */
588 __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
589 register_intc_controller(&intc_desc_irq4567);
590 break;
591 case IRQ_MODE_IRQ3210:
592 /* select IRQ mode for IRL3-0 */
593 __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
594 register_intc_controller(&intc_desc_irq0123);
595 break;
596 case IRQ_MODE_IRL7654:
597 /* enable IRL7-4 but don't provide any masking */
598 __raw_writel(0x40000000, INTC_INTMSKCLR1);
599 __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
600 break;
601 case IRQ_MODE_IRL3210:
602 /* enable IRL0-3 but don't provide any masking */
603 __raw_writel(0x80000000, INTC_INTMSKCLR1);
604 __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
605 break;
606 case IRQ_MODE_IRL7654_MASK:
607 /* enable IRL7-4 and mask using cpu intc controller */
608 __raw_writel(0x40000000, INTC_INTMSKCLR1);
609 register_intc_controller(&intc_desc_irl4567);
610 break;
611 case IRQ_MODE_IRL3210_MASK:
612 /* enable IRL0-3 and mask using cpu intc controller */
613 __raw_writel(0x80000000, INTC_INTMSKCLR1);
614 register_intc_controller(&intc_desc_irl0123);
615 break;
616 default:
617 BUG();
618 }
619}
620
621void __init plat_mem_setup(void)
622{
623}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index ab02771ee888..593eca6509b5 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -16,6 +16,57 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/serial_sci.h> 17#include <linux/serial_sci.h>
18 18
19static struct plat_sci_port scif0_platform_data = {
20 .mapbase = 0xffe00000,
21 .flags = UPF_BOOT_AUTOCONF,
22 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
23 .scbrr_algo_id = SCBRR_ALGO_2,
24 .type = PORT_SCIF,
25 .irqs = { 40, 40, 40, 40 },
26};
27
28static struct platform_device scif0_device = {
29 .name = "sh-sci",
30 .id = 0,
31 .dev = {
32 .platform_data = &scif0_platform_data,
33 },
34};
35
36static struct plat_sci_port scif1_platform_data = {
37 .mapbase = 0xffe08000,
38 .flags = UPF_BOOT_AUTOCONF,
39 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
40 .scbrr_algo_id = SCBRR_ALGO_2,
41 .type = PORT_SCIF,
42 .irqs = { 76, 76, 76, 76 },
43};
44
45static struct platform_device scif1_device = {
46 .name = "sh-sci",
47 .id = 1,
48 .dev = {
49 .platform_data = &scif1_platform_data,
50 },
51};
52
53static struct plat_sci_port scif2_platform_data = {
54 .mapbase = 0xffe10000,
55 .flags = UPF_BOOT_AUTOCONF,
56 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
57 .scbrr_algo_id = SCBRR_ALGO_2,
58 .type = PORT_SCIF,
59 .irqs = { 104, 104, 104, 104 },
60};
61
62static struct platform_device scif2_device = {
63 .name = "sh-sci",
64 .id = 2,
65 .dev = {
66 .platform_data = &scif2_platform_data,
67 },
68};
69
19static struct resource rtc_resources[] = { 70static struct resource rtc_resources[] = {
20 [0] = { 71 [0] = {
21 .start = 0xffe80000, 72 .start = 0xffe80000,
@@ -36,41 +87,6 @@ static struct platform_device rtc_device = {
36 .resource = rtc_resources, 87 .resource = rtc_resources,
37}; 88};
38 89
39static struct plat_sci_port sci_platform_data[] = {
40 {
41 .mapbase = 0xffe00000,
42 .flags = UPF_BOOT_AUTOCONF,
43 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
44 .scbrr_algo_id = SCBRR_ALGO_2,
45 .type = PORT_SCIF,
46 .irqs = { 40, 40, 40, 40 },
47 }, {
48 .mapbase = 0xffe08000,
49 .flags = UPF_BOOT_AUTOCONF,
50 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
51 .scbrr_algo_id = SCBRR_ALGO_2,
52 .type = PORT_SCIF,
53 .irqs = { 76, 76, 76, 76 },
54 }, {
55 .mapbase = 0xffe10000,
56 .flags = UPF_BOOT_AUTOCONF,
57 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
58 .scbrr_algo_id = SCBRR_ALGO_2,
59 .type = PORT_SCIF,
60 .irqs = { 104, 104, 104, 104 },
61 }, {
62 .flags = 0,
63 }
64};
65
66static struct platform_device sci_device = {
67 .name = "sh-sci",
68 .id = -1,
69 .dev = {
70 .platform_data = sci_platform_data,
71 },
72};
73
74static struct resource usb_ohci_resources[] = { 90static struct resource usb_ohci_resources[] = {
75 [0] = { 91 [0] = {
76 .start = 0xffec8000, 92 .start = 0xffec8000,
@@ -121,16 +137,13 @@ static struct platform_device usbf_device = {
121}; 137};
122 138
123static struct sh_timer_config tmu0_platform_data = { 139static struct sh_timer_config tmu0_platform_data = {
124 .name = "TMU0",
125 .channel_offset = 0x04, 140 .channel_offset = 0x04,
126 .timer_bit = 0, 141 .timer_bit = 0,
127 .clk = "peripheral_clk",
128 .clockevent_rating = 200, 142 .clockevent_rating = 200,
129}; 143};
130 144
131static struct resource tmu0_resources[] = { 145static struct resource tmu0_resources[] = {
132 [0] = { 146 [0] = {
133 .name = "TMU0",
134 .start = 0xffd80008, 147 .start = 0xffd80008,
135 .end = 0xffd80013, 148 .end = 0xffd80013,
136 .flags = IORESOURCE_MEM, 149 .flags = IORESOURCE_MEM,
@@ -152,16 +165,13 @@ static struct platform_device tmu0_device = {
152}; 165};
153 166
154static struct sh_timer_config tmu1_platform_data = { 167static struct sh_timer_config tmu1_platform_data = {
155 .name = "TMU1",
156 .channel_offset = 0x10, 168 .channel_offset = 0x10,
157 .timer_bit = 1, 169 .timer_bit = 1,
158 .clk = "peripheral_clk",
159 .clocksource_rating = 200, 170 .clocksource_rating = 200,
160}; 171};
161 172
162static struct resource tmu1_resources[] = { 173static struct resource tmu1_resources[] = {
163 [0] = { 174 [0] = {
164 .name = "TMU1",
165 .start = 0xffd80014, 175 .start = 0xffd80014,
166 .end = 0xffd8001f, 176 .end = 0xffd8001f,
167 .flags = IORESOURCE_MEM, 177 .flags = IORESOURCE_MEM,
@@ -183,15 +193,12 @@ static struct platform_device tmu1_device = {
183}; 193};
184 194
185static struct sh_timer_config tmu2_platform_data = { 195static struct sh_timer_config tmu2_platform_data = {
186 .name = "TMU2",
187 .channel_offset = 0x1c, 196 .channel_offset = 0x1c,
188 .timer_bit = 2, 197 .timer_bit = 2,
189 .clk = "peripheral_clk",
190}; 198};
191 199
192static struct resource tmu2_resources[] = { 200static struct resource tmu2_resources[] = {
193 [0] = { 201 [0] = {
194 .name = "TMU2",
195 .start = 0xffd80020, 202 .start = 0xffd80020,
196 .end = 0xffd8002f, 203 .end = 0xffd8002f,
197 .flags = IORESOURCE_MEM, 204 .flags = IORESOURCE_MEM,
@@ -213,15 +220,12 @@ static struct platform_device tmu2_device = {
213}; 220};
214 221
215static struct sh_timer_config tmu3_platform_data = { 222static struct sh_timer_config tmu3_platform_data = {
216 .name = "TMU3",
217 .channel_offset = 0x04, 223 .channel_offset = 0x04,
218 .timer_bit = 0, 224 .timer_bit = 0,
219 .clk = "peripheral_clk",
220}; 225};
221 226
222static struct resource tmu3_resources[] = { 227static struct resource tmu3_resources[] = {
223 [0] = { 228 [0] = {
224 .name = "TMU3",
225 .start = 0xffd88008, 229 .start = 0xffd88008,
226 .end = 0xffd88013, 230 .end = 0xffd88013,
227 .flags = IORESOURCE_MEM, 231 .flags = IORESOURCE_MEM,
@@ -243,15 +247,12 @@ static struct platform_device tmu3_device = {
243}; 247};
244 248
245static struct sh_timer_config tmu4_platform_data = { 249static struct sh_timer_config tmu4_platform_data = {
246 .name = "TMU4",
247 .channel_offset = 0x10, 250 .channel_offset = 0x10,
248 .timer_bit = 1, 251 .timer_bit = 1,
249 .clk = "peripheral_clk",
250}; 252};
251 253
252static struct resource tmu4_resources[] = { 254static struct resource tmu4_resources[] = {
253 [0] = { 255 [0] = {
254 .name = "TMU4",
255 .start = 0xffd88014, 256 .start = 0xffd88014,
256 .end = 0xffd8801f, 257 .end = 0xffd8801f,
257 .flags = IORESOURCE_MEM, 258 .flags = IORESOURCE_MEM,
@@ -273,15 +274,12 @@ static struct platform_device tmu4_device = {
273}; 274};
274 275
275static struct sh_timer_config tmu5_platform_data = { 276static struct sh_timer_config tmu5_platform_data = {
276 .name = "TMU5",
277 .channel_offset = 0x1c, 277 .channel_offset = 0x1c,
278 .timer_bit = 2, 278 .timer_bit = 2,
279 .clk = "peripheral_clk",
280}; 279};
281 280
282static struct resource tmu5_resources[] = { 281static struct resource tmu5_resources[] = {
283 [0] = { 282 [0] = {
284 .name = "TMU5",
285 .start = 0xffd88020, 283 .start = 0xffd88020,
286 .end = 0xffd8802b, 284 .end = 0xffd8802b,
287 .flags = IORESOURCE_MEM, 285 .flags = IORESOURCE_MEM,
@@ -303,6 +301,9 @@ static struct platform_device tmu5_device = {
303}; 301};
304 302
305static struct platform_device *sh7763_devices[] __initdata = { 303static struct platform_device *sh7763_devices[] __initdata = {
304 &scif0_device,
305 &scif1_device,
306 &scif2_device,
306 &tmu0_device, 307 &tmu0_device,
307 &tmu1_device, 308 &tmu1_device,
308 &tmu2_device, 309 &tmu2_device,
@@ -310,7 +311,6 @@ static struct platform_device *sh7763_devices[] __initdata = {
310 &tmu4_device, 311 &tmu4_device,
311 &tmu5_device, 312 &tmu5_device,
312 &rtc_device, 313 &rtc_device,
313 &sci_device,
314 &usb_ohci_device, 314 &usb_ohci_device,
315 &usbf_device, 315 &usbf_device,
316}; 316};
@@ -320,9 +320,12 @@ static int __init sh7763_devices_setup(void)
320 return platform_add_devices(sh7763_devices, 320 return platform_add_devices(sh7763_devices,
321 ARRAY_SIZE(sh7763_devices)); 321 ARRAY_SIZE(sh7763_devices));
322} 322}
323__initcall(sh7763_devices_setup); 323arch_initcall(sh7763_devices_setup);
324 324
325static struct platform_device *sh7763_early_devices[] __initdata = { 325static struct platform_device *sh7763_early_devices[] __initdata = {
326 &scif0_device,
327 &scif1_device,
328 &scif2_device,
326 &tmu0_device, 329 &tmu0_device,
327 &tmu1_device, 330 &tmu1_device,
328 &tmu2_device, 331 &tmu2_device,
@@ -523,11 +526,11 @@ static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors,
523void __init plat_irq_setup(void) 526void __init plat_irq_setup(void)
524{ 527{
525 /* disable IRQ7-0 */ 528 /* disable IRQ7-0 */
526 ctrl_outl(0xff000000, INTC_INTMSK0); 529 __raw_writel(0xff000000, INTC_INTMSK0);
527 530
528 /* disable IRL3-0 + IRL7-4 */ 531 /* disable IRL3-0 + IRL7-4 */
529 ctrl_outl(0xc0000000, INTC_INTMSK1); 532 __raw_writel(0xc0000000, INTC_INTMSK1);
530 ctrl_outl(0xfffefffe, INTC_INTMSK2); 533 __raw_writel(0xfffefffe, INTC_INTMSK2);
531 534
532 register_intc_controller(&intc_desc); 535 register_intc_controller(&intc_desc);
533} 536}
@@ -537,27 +540,27 @@ void __init plat_irq_setup_pins(int mode)
537 switch (mode) { 540 switch (mode) {
538 case IRQ_MODE_IRQ: 541 case IRQ_MODE_IRQ:
539 /* select IRQ mode for IRL3-0 + IRL7-4 */ 542 /* select IRQ mode for IRL3-0 + IRL7-4 */
540 ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0); 543 __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
541 register_intc_controller(&intc_irq_desc); 544 register_intc_controller(&intc_irq_desc);
542 break; 545 break;
543 case IRQ_MODE_IRL7654: 546 case IRQ_MODE_IRL7654:
544 /* enable IRL7-4 but don't provide any masking */ 547 /* enable IRL7-4 but don't provide any masking */
545 ctrl_outl(0x40000000, INTC_INTMSKCLR1); 548 __raw_writel(0x40000000, INTC_INTMSKCLR1);
546 ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); 549 __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
547 break; 550 break;
548 case IRQ_MODE_IRL3210: 551 case IRQ_MODE_IRL3210:
549 /* enable IRL0-3 but don't provide any masking */ 552 /* enable IRL0-3 but don't provide any masking */
550 ctrl_outl(0x80000000, INTC_INTMSKCLR1); 553 __raw_writel(0x80000000, INTC_INTMSKCLR1);
551 ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); 554 __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
552 break; 555 break;
553 case IRQ_MODE_IRL7654_MASK: 556 case IRQ_MODE_IRL7654_MASK:
554 /* enable IRL7-4 and mask using cpu intc controller */ 557 /* enable IRL7-4 and mask using cpu intc controller */
555 ctrl_outl(0x40000000, INTC_INTMSKCLR1); 558 __raw_writel(0x40000000, INTC_INTMSKCLR1);
556 register_intc_controller(&intc_irl7654_desc); 559 register_intc_controller(&intc_irl7654_desc);
557 break; 560 break;
558 case IRQ_MODE_IRL3210_MASK: 561 case IRQ_MODE_IRL3210_MASK:
559 /* enable IRL0-3 and mask using cpu intc controller */ 562 /* enable IRL0-3 and mask using cpu intc controller */
560 ctrl_outl(0x80000000, INTC_INTMSKCLR1); 563 __raw_writel(0x80000000, INTC_INTMSKCLR1);
561 register_intc_controller(&intc_irl3210_desc); 564 register_intc_controller(&intc_irl3210_desc);
562 break; 565 break;
563 default: 566 default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index 746f4fb9ccf0..2c6aa22cf5f6 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -14,101 +14,184 @@
14#include <linux/sh_timer.h> 14#include <linux/sh_timer.h>
15#include <linux/io.h> 15#include <linux/io.h>
16 16
17static struct plat_sci_port sci_platform_data[] = { 17static struct plat_sci_port scif0_platform_data = {
18 { 18 .mapbase = 0xff923000,
19 .mapbase = 0xff923000, 19 .flags = UPF_BOOT_AUTOCONF,
20 .flags = UPF_BOOT_AUTOCONF, 20 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
21 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 21 .scbrr_algo_id = SCBRR_ALGO_2,
22 .scbrr_algo_id = SCBRR_ALGO_2, 22 .type = PORT_SCIF,
23 .type = PORT_SCIF, 23 .irqs = { 61, 61, 61, 61 },
24 .irqs = { 61, 61, 61, 61 }, 24};
25 }, { 25
26 .mapbase = 0xff924000, 26static struct platform_device scif0_device = {
27 .flags = UPF_BOOT_AUTOCONF, 27 .name = "sh-sci",
28 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 28 .id = 0,
29 .scbrr_algo_id = SCBRR_ALGO_2, 29 .dev = {
30 .type = PORT_SCIF, 30 .platform_data = &scif0_platform_data,
31 .irqs = { 62, 62, 62, 62 }, 31 },
32 }, { 32};
33 .mapbase = 0xff925000, 33
34 .flags = UPF_BOOT_AUTOCONF, 34static struct plat_sci_port scif1_platform_data = {
35 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 35 .mapbase = 0xff924000,
36 .scbrr_algo_id = SCBRR_ALGO_2, 36 .flags = UPF_BOOT_AUTOCONF,
37 .type = PORT_SCIF, 37 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
38 .irqs = { 63, 63, 63, 63 }, 38 .scbrr_algo_id = SCBRR_ALGO_2,
39 }, { 39 .type = PORT_SCIF,
40 .mapbase = 0xff926000, 40 .irqs = { 62, 62, 62, 62 },
41 .flags = UPF_BOOT_AUTOCONF, 41};
42 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 42
43 .scbrr_algo_id = SCBRR_ALGO_2, 43static struct platform_device scif1_device = {
44 .type = PORT_SCIF, 44 .name = "sh-sci",
45 .irqs = { 64, 64, 64, 64 }, 45 .id = 1,
46 }, { 46 .dev = {
47 .mapbase = 0xff927000, 47 .platform_data = &scif1_platform_data,
48 .flags = UPF_BOOT_AUTOCONF, 48 },
49 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 49};
50 .scbrr_algo_id = SCBRR_ALGO_2, 50
51 .type = PORT_SCIF, 51static struct plat_sci_port scif2_platform_data = {
52 .irqs = { 65, 65, 65, 65 }, 52 .mapbase = 0xff925000,
53 }, { 53 .flags = UPF_BOOT_AUTOCONF,
54 .mapbase = 0xff928000, 54 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
55 .flags = UPF_BOOT_AUTOCONF, 55 .scbrr_algo_id = SCBRR_ALGO_2,
56 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 56 .type = PORT_SCIF,
57 .scbrr_algo_id = SCBRR_ALGO_2, 57 .irqs = { 63, 63, 63, 63 },
58 .type = PORT_SCIF, 58};
59 .irqs = { 66, 66, 66, 66 }, 59
60 }, { 60static struct platform_device scif2_device = {
61 .mapbase = 0xff929000, 61 .name = "sh-sci",
62 .flags = UPF_BOOT_AUTOCONF, 62 .id = 2,
63 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 63 .dev = {
64 .scbrr_algo_id = SCBRR_ALGO_2, 64 .platform_data = &scif2_platform_data,
65 .type = PORT_SCIF, 65 },
66 .irqs = { 67, 67, 67, 67 }, 66};
67 }, { 67
68 .mapbase = 0xff92a000, 68static struct plat_sci_port scif3_platform_data = {
69 .flags = UPF_BOOT_AUTOCONF, 69 .mapbase = 0xff926000,
70 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 70 .flags = UPF_BOOT_AUTOCONF,
71 .scbrr_algo_id = SCBRR_ALGO_2, 71 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
72 .type = PORT_SCIF, 72 .scbrr_algo_id = SCBRR_ALGO_2,
73 .irqs = { 68, 68, 68, 68 }, 73 .type = PORT_SCIF,
74 }, { 74 .irqs = { 64, 64, 64, 64 },
75 .mapbase = 0xff92b000, 75};
76 .flags = UPF_BOOT_AUTOCONF, 76
77 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 77static struct platform_device scif3_device = {
78 .scbrr_algo_id = SCBRR_ALGO_2, 78 .name = "sh-sci",
79 .type = PORT_SCIF, 79 .id = 3,
80 .irqs = { 69, 69, 69, 69 }, 80 .dev = {
81 }, { 81 .platform_data = &scif3_platform_data,
82 .mapbase = 0xff92c000, 82 },
83 .flags = UPF_BOOT_AUTOCONF, 83};
84 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 84
85 .scbrr_algo_id = SCBRR_ALGO_2, 85static struct plat_sci_port scif4_platform_data = {
86 .type = PORT_SCIF, 86 .mapbase = 0xff927000,
87 .irqs = { 70, 70, 70, 70 }, 87 .flags = UPF_BOOT_AUTOCONF,
88 }, { 88 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
89 .flags = 0, 89 .scbrr_algo_id = SCBRR_ALGO_2,
90 } 90 .type = PORT_SCIF,
91 .irqs = { 65, 65, 65, 65 },
92};
93
94static struct platform_device scif4_device = {
95 .name = "sh-sci",
96 .id = 4,
97 .dev = {
98 .platform_data = &scif4_platform_data,
99 },
100};
101
102static struct plat_sci_port scif5_platform_data = {
103 .mapbase = 0xff928000,
104 .flags = UPF_BOOT_AUTOCONF,
105 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
106 .scbrr_algo_id = SCBRR_ALGO_2,
107 .type = PORT_SCIF,
108 .irqs = { 66, 66, 66, 66 },
109};
110
111static struct platform_device scif5_device = {
112 .name = "sh-sci",
113 .id = 5,
114 .dev = {
115 .platform_data = &scif5_platform_data,
116 },
117};
118
119static struct plat_sci_port scif6_platform_data = {
120 .mapbase = 0xff929000,
121 .flags = UPF_BOOT_AUTOCONF,
122 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
123 .scbrr_algo_id = SCBRR_ALGO_2,
124 .type = PORT_SCIF,
125 .irqs = { 67, 67, 67, 67 },
126};
127
128static struct platform_device scif6_device = {
129 .name = "sh-sci",
130 .id = 6,
131 .dev = {
132 .platform_data = &scif6_platform_data,
133 },
134};
135
136static struct plat_sci_port scif7_platform_data = {
137 .mapbase = 0xff92a000,
138 .flags = UPF_BOOT_AUTOCONF,
139 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
140 .scbrr_algo_id = SCBRR_ALGO_2,
141 .type = PORT_SCIF,
142 .irqs = { 68, 68, 68, 68 },
143};
144
145static struct platform_device scif7_device = {
146 .name = "sh-sci",
147 .id = 7,
148 .dev = {
149 .platform_data = &scif7_platform_data,
150 },
151};
152
153static struct plat_sci_port scif8_platform_data = {
154 .mapbase = 0xff92b000,
155 .flags = UPF_BOOT_AUTOCONF,
156 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
157 .scbrr_algo_id = SCBRR_ALGO_2,
158 .type = PORT_SCIF,
159 .irqs = { 69, 69, 69, 69 },
160};
161
162static struct platform_device scif8_device = {
163 .name = "sh-sci",
164 .id = 8,
165 .dev = {
166 .platform_data = &scif8_platform_data,
167 },
168};
169
170static struct plat_sci_port scif9_platform_data = {
171 .mapbase = 0xff92c000,
172 .flags = UPF_BOOT_AUTOCONF,
173 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
174 .scbrr_algo_id = SCBRR_ALGO_2,
175 .type = PORT_SCIF,
176 .irqs = { 70, 70, 70, 70 },
91}; 177};
92 178
93static struct platform_device sci_device = { 179static struct platform_device scif9_device = {
94 .name = "sh-sci", 180 .name = "sh-sci",
95 .id = -1, 181 .id = 9,
96 .dev = { 182 .dev = {
97 .platform_data = sci_platform_data, 183 .platform_data = &scif9_platform_data,
98 }, 184 },
99}; 185};
100 186
101static struct sh_timer_config tmu0_platform_data = { 187static struct sh_timer_config tmu0_platform_data = {
102 .name = "TMU0",
103 .channel_offset = 0x04, 188 .channel_offset = 0x04,
104 .timer_bit = 0, 189 .timer_bit = 0,
105 .clk = "peripheral_clk",
106 .clockevent_rating = 200, 190 .clockevent_rating = 200,
107}; 191};
108 192
109static struct resource tmu0_resources[] = { 193static struct resource tmu0_resources[] = {
110 [0] = { 194 [0] = {
111 .name = "TMU0",
112 .start = 0xffd80008, 195 .start = 0xffd80008,
113 .end = 0xffd80013, 196 .end = 0xffd80013,
114 .flags = IORESOURCE_MEM, 197 .flags = IORESOURCE_MEM,
@@ -130,16 +213,13 @@ static struct platform_device tmu0_device = {
130}; 213};
131 214
132static struct sh_timer_config tmu1_platform_data = { 215static struct sh_timer_config tmu1_platform_data = {
133 .name = "TMU1",
134 .channel_offset = 0x10, 216 .channel_offset = 0x10,
135 .timer_bit = 1, 217 .timer_bit = 1,
136 .clk = "peripheral_clk",
137 .clocksource_rating = 200, 218 .clocksource_rating = 200,
138}; 219};
139 220
140static struct resource tmu1_resources[] = { 221static struct resource tmu1_resources[] = {
141 [0] = { 222 [0] = {
142 .name = "TMU1",
143 .start = 0xffd80014, 223 .start = 0xffd80014,
144 .end = 0xffd8001f, 224 .end = 0xffd8001f,
145 .flags = IORESOURCE_MEM, 225 .flags = IORESOURCE_MEM,
@@ -161,15 +241,12 @@ static struct platform_device tmu1_device = {
161}; 241};
162 242
163static struct sh_timer_config tmu2_platform_data = { 243static struct sh_timer_config tmu2_platform_data = {
164 .name = "TMU2",
165 .channel_offset = 0x1c, 244 .channel_offset = 0x1c,
166 .timer_bit = 2, 245 .timer_bit = 2,
167 .clk = "peripheral_clk",
168}; 246};
169 247
170static struct resource tmu2_resources[] = { 248static struct resource tmu2_resources[] = {
171 [0] = { 249 [0] = {
172 .name = "TMU2",
173 .start = 0xffd80020, 250 .start = 0xffd80020,
174 .end = 0xffd8002f, 251 .end = 0xffd8002f,
175 .flags = IORESOURCE_MEM, 252 .flags = IORESOURCE_MEM,
@@ -191,15 +268,12 @@ static struct platform_device tmu2_device = {
191}; 268};
192 269
193static struct sh_timer_config tmu3_platform_data = { 270static struct sh_timer_config tmu3_platform_data = {
194 .name = "TMU3",
195 .channel_offset = 0x04, 271 .channel_offset = 0x04,
196 .timer_bit = 0, 272 .timer_bit = 0,
197 .clk = "peripheral_clk",
198}; 273};
199 274
200static struct resource tmu3_resources[] = { 275static struct resource tmu3_resources[] = {
201 [0] = { 276 [0] = {
202 .name = "TMU3",
203 .start = 0xffd81008, 277 .start = 0xffd81008,
204 .end = 0xffd81013, 278 .end = 0xffd81013,
205 .flags = IORESOURCE_MEM, 279 .flags = IORESOURCE_MEM,
@@ -221,15 +295,12 @@ static struct platform_device tmu3_device = {
221}; 295};
222 296
223static struct sh_timer_config tmu4_platform_data = { 297static struct sh_timer_config tmu4_platform_data = {
224 .name = "TMU4",
225 .channel_offset = 0x10, 298 .channel_offset = 0x10,
226 .timer_bit = 1, 299 .timer_bit = 1,
227 .clk = "peripheral_clk",
228}; 300};
229 301
230static struct resource tmu4_resources[] = { 302static struct resource tmu4_resources[] = {
231 [0] = { 303 [0] = {
232 .name = "TMU4",
233 .start = 0xffd81014, 304 .start = 0xffd81014,
234 .end = 0xffd8101f, 305 .end = 0xffd8101f,
235 .flags = IORESOURCE_MEM, 306 .flags = IORESOURCE_MEM,
@@ -251,15 +322,12 @@ static struct platform_device tmu4_device = {
251}; 322};
252 323
253static struct sh_timer_config tmu5_platform_data = { 324static struct sh_timer_config tmu5_platform_data = {
254 .name = "TMU5",
255 .channel_offset = 0x1c, 325 .channel_offset = 0x1c,
256 .timer_bit = 2, 326 .timer_bit = 2,
257 .clk = "peripheral_clk",
258}; 327};
259 328
260static struct resource tmu5_resources[] = { 329static struct resource tmu5_resources[] = {
261 [0] = { 330 [0] = {
262 .name = "TMU5",
263 .start = 0xffd81020, 331 .start = 0xffd81020,
264 .end = 0xffd8102f, 332 .end = 0xffd8102f,
265 .flags = IORESOURCE_MEM, 333 .flags = IORESOURCE_MEM,
@@ -281,15 +349,12 @@ static struct platform_device tmu5_device = {
281}; 349};
282 350
283static struct sh_timer_config tmu6_platform_data = { 351static struct sh_timer_config tmu6_platform_data = {
284 .name = "TMU6",
285 .channel_offset = 0x04, 352 .channel_offset = 0x04,
286 .timer_bit = 0, 353 .timer_bit = 0,
287 .clk = "peripheral_clk",
288}; 354};
289 355
290static struct resource tmu6_resources[] = { 356static struct resource tmu6_resources[] = {
291 [0] = { 357 [0] = {
292 .name = "TMU6",
293 .start = 0xffd82008, 358 .start = 0xffd82008,
294 .end = 0xffd82013, 359 .end = 0xffd82013,
295 .flags = IORESOURCE_MEM, 360 .flags = IORESOURCE_MEM,
@@ -311,15 +376,12 @@ static struct platform_device tmu6_device = {
311}; 376};
312 377
313static struct sh_timer_config tmu7_platform_data = { 378static struct sh_timer_config tmu7_platform_data = {
314 .name = "TMU7",
315 .channel_offset = 0x10, 379 .channel_offset = 0x10,
316 .timer_bit = 1, 380 .timer_bit = 1,
317 .clk = "peripheral_clk",
318}; 381};
319 382
320static struct resource tmu7_resources[] = { 383static struct resource tmu7_resources[] = {
321 [0] = { 384 [0] = {
322 .name = "TMU7",
323 .start = 0xffd82014, 385 .start = 0xffd82014,
324 .end = 0xffd8201f, 386 .end = 0xffd8201f,
325 .flags = IORESOURCE_MEM, 387 .flags = IORESOURCE_MEM,
@@ -341,15 +403,12 @@ static struct platform_device tmu7_device = {
341}; 403};
342 404
343static struct sh_timer_config tmu8_platform_data = { 405static struct sh_timer_config tmu8_platform_data = {
344 .name = "TMU8",
345 .channel_offset = 0x1c, 406 .channel_offset = 0x1c,
346 .timer_bit = 2, 407 .timer_bit = 2,
347 .clk = "peripheral_clk",
348}; 408};
349 409
350static struct resource tmu8_resources[] = { 410static struct resource tmu8_resources[] = {
351 [0] = { 411 [0] = {
352 .name = "TMU8",
353 .start = 0xffd82020, 412 .start = 0xffd82020,
354 .end = 0xffd8202b, 413 .end = 0xffd8202b,
355 .flags = IORESOURCE_MEM, 414 .flags = IORESOURCE_MEM,
@@ -371,6 +430,16 @@ static struct platform_device tmu8_device = {
371}; 430};
372 431
373static struct platform_device *sh7770_devices[] __initdata = { 432static struct platform_device *sh7770_devices[] __initdata = {
433 &scif0_device,
434 &scif1_device,
435 &scif2_device,
436 &scif3_device,
437 &scif4_device,
438 &scif5_device,
439 &scif6_device,
440 &scif7_device,
441 &scif8_device,
442 &scif9_device,
374 &tmu0_device, 443 &tmu0_device,
375 &tmu1_device, 444 &tmu1_device,
376 &tmu2_device, 445 &tmu2_device,
@@ -380,7 +449,6 @@ static struct platform_device *sh7770_devices[] __initdata = {
380 &tmu6_device, 449 &tmu6_device,
381 &tmu7_device, 450 &tmu7_device,
382 &tmu8_device, 451 &tmu8_device,
383 &sci_device,
384}; 452};
385 453
386static int __init sh7770_devices_setup(void) 454static int __init sh7770_devices_setup(void)
@@ -388,9 +456,19 @@ static int __init sh7770_devices_setup(void)
388 return platform_add_devices(sh7770_devices, 456 return platform_add_devices(sh7770_devices,
389 ARRAY_SIZE(sh7770_devices)); 457 ARRAY_SIZE(sh7770_devices));
390} 458}
391__initcall(sh7770_devices_setup); 459arch_initcall(sh7770_devices_setup);
392 460
393static struct platform_device *sh7770_early_devices[] __initdata = { 461static struct platform_device *sh7770_early_devices[] __initdata = {
462 &scif0_device,
463 &scif1_device,
464 &scif2_device,
465 &scif3_device,
466 &scif4_device,
467 &scif5_device,
468 &scif6_device,
469 &scif7_device,
470 &scif8_device,
471 &scif9_device,
394 &tmu0_device, 472 &tmu0_device,
395 &tmu1_device, 473 &tmu1_device,
396 &tmu2_device, 474 &tmu2_device,
@@ -609,17 +687,17 @@ static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
609void __init plat_irq_setup(void) 687void __init plat_irq_setup(void)
610{ 688{
611 /* disable IRQ7-0 */ 689 /* disable IRQ7-0 */
612 ctrl_outl(0xff000000, INTC_INTMSK0); 690 __raw_writel(0xff000000, INTC_INTMSK0);
613 691
614 /* disable IRL3-0 + IRL7-4 */ 692 /* disable IRL3-0 + IRL7-4 */
615 ctrl_outl(0xc0000000, INTC_INTMSK1); 693 __raw_writel(0xc0000000, INTC_INTMSK1);
616 ctrl_outl(0xfffefffe, INTC_INTMSK2); 694 __raw_writel(0xfffefffe, INTC_INTMSK2);
617 695
618 /* select IRL mode for IRL3-0 + IRL7-4 */ 696 /* select IRL mode for IRL3-0 + IRL7-4 */
619 ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); 697 __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
620 698
621 /* disable holding function, ie enable "SH-4 Mode" */ 699 /* disable holding function, ie enable "SH-4 Mode" */
622 ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0); 700 __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
623 701
624 register_intc_controller(&intc_desc); 702 register_intc_controller(&intc_desc);
625} 703}
@@ -629,27 +707,27 @@ void __init plat_irq_setup_pins(int mode)
629 switch (mode) { 707 switch (mode) {
630 case IRQ_MODE_IRQ: 708 case IRQ_MODE_IRQ:
631 /* select IRQ mode for IRL3-0 + IRL7-4 */ 709 /* select IRQ mode for IRL3-0 + IRL7-4 */
632 ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0); 710 __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
633 register_intc_controller(&intc_irq_desc); 711 register_intc_controller(&intc_irq_desc);
634 break; 712 break;
635 case IRQ_MODE_IRL7654: 713 case IRQ_MODE_IRL7654:
636 /* enable IRL7-4 but don't provide any masking */ 714 /* enable IRL7-4 but don't provide any masking */
637 ctrl_outl(0x40000000, INTC_INTMSKCLR1); 715 __raw_writel(0x40000000, INTC_INTMSKCLR1);
638 ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); 716 __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
639 break; 717 break;
640 case IRQ_MODE_IRL3210: 718 case IRQ_MODE_IRL3210:
641 /* enable IRL0-3 but don't provide any masking */ 719 /* enable IRL0-3 but don't provide any masking */
642 ctrl_outl(0x80000000, INTC_INTMSKCLR1); 720 __raw_writel(0x80000000, INTC_INTMSKCLR1);
643 ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); 721 __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
644 break; 722 break;
645 case IRQ_MODE_IRL7654_MASK: 723 case IRQ_MODE_IRL7654_MASK:
646 /* enable IRL7-4 and mask using cpu intc controller */ 724 /* enable IRL7-4 and mask using cpu intc controller */
647 ctrl_outl(0x40000000, INTC_INTMSKCLR1); 725 __raw_writel(0x40000000, INTC_INTMSKCLR1);
648 register_intc_controller(&intc_irl7654_desc); 726 register_intc_controller(&intc_irl7654_desc);
649 break; 727 break;
650 case IRQ_MODE_IRL3210_MASK: 728 case IRQ_MODE_IRL3210_MASK:
651 /* enable IRL0-3 and mask using cpu intc controller */ 729 /* enable IRL0-3 and mask using cpu intc controller */
652 ctrl_outl(0x80000000, INTC_INTMSKCLR1); 730 __raw_writel(0x80000000, INTC_INTMSKCLR1);
653 register_intc_controller(&intc_irl3210_desc); 731 register_intc_controller(&intc_irl3210_desc);
654 break; 732 break;
655 default: 733 default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index bcd411eb9cb0..08add7fa6849 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -12,19 +12,53 @@
12#include <linux/serial.h> 12#include <linux/serial.h>
13#include <linux/io.h> 13#include <linux/io.h>
14#include <linux/serial_sci.h> 14#include <linux/serial_sci.h>
15#include <linux/sh_dma.h>
15#include <linux/sh_timer.h> 16#include <linux/sh_timer.h>
16 17
18#include <cpu/dma-register.h>
19
20static struct plat_sci_port scif0_platform_data = {
21 .mapbase = 0xffe00000,
22 .flags = UPF_BOOT_AUTOCONF,
23 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
24 .scbrr_algo_id = SCBRR_ALGO_1,
25 .type = PORT_SCIF,
26 .irqs = { 40, 40, 40, 40 },
27};
28
29static struct platform_device scif0_device = {
30 .name = "sh-sci",
31 .id = 0,
32 .dev = {
33 .platform_data = &scif0_platform_data,
34 },
35};
36
37static struct plat_sci_port scif1_platform_data = {
38 .mapbase = 0xffe10000,
39 .flags = UPF_BOOT_AUTOCONF,
40 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
41 .scbrr_algo_id = SCBRR_ALGO_1,
42 .type = PORT_SCIF,
43 .irqs = { 76, 76, 76, 76 },
44};
45
46static struct platform_device scif1_device = {
47 .name = "sh-sci",
48 .id = 1,
49 .dev = {
50 .platform_data = &scif1_platform_data,
51 },
52};
53
17static struct sh_timer_config tmu0_platform_data = { 54static struct sh_timer_config tmu0_platform_data = {
18 .name = "TMU0",
19 .channel_offset = 0x04, 55 .channel_offset = 0x04,
20 .timer_bit = 0, 56 .timer_bit = 0,
21 .clk = "peripheral_clk",
22 .clockevent_rating = 200, 57 .clockevent_rating = 200,
23}; 58};
24 59
25static struct resource tmu0_resources[] = { 60static struct resource tmu0_resources[] = {
26 [0] = { 61 [0] = {
27 .name = "TMU0",
28 .start = 0xffd80008, 62 .start = 0xffd80008,
29 .end = 0xffd80013, 63 .end = 0xffd80013,
30 .flags = IORESOURCE_MEM, 64 .flags = IORESOURCE_MEM,
@@ -46,16 +80,13 @@ static struct platform_device tmu0_device = {
46}; 80};
47 81
48static struct sh_timer_config tmu1_platform_data = { 82static struct sh_timer_config tmu1_platform_data = {
49 .name = "TMU1",
50 .channel_offset = 0x10, 83 .channel_offset = 0x10,
51 .timer_bit = 1, 84 .timer_bit = 1,
52 .clk = "peripheral_clk",
53 .clocksource_rating = 200, 85 .clocksource_rating = 200,
54}; 86};
55 87
56static struct resource tmu1_resources[] = { 88static struct resource tmu1_resources[] = {
57 [0] = { 89 [0] = {
58 .name = "TMU1",
59 .start = 0xffd80014, 90 .start = 0xffd80014,
60 .end = 0xffd8001f, 91 .end = 0xffd8001f,
61 .flags = IORESOURCE_MEM, 92 .flags = IORESOURCE_MEM,
@@ -77,15 +108,12 @@ static struct platform_device tmu1_device = {
77}; 108};
78 109
79static struct sh_timer_config tmu2_platform_data = { 110static struct sh_timer_config tmu2_platform_data = {
80 .name = "TMU2",
81 .channel_offset = 0x1c, 111 .channel_offset = 0x1c,
82 .timer_bit = 2, 112 .timer_bit = 2,
83 .clk = "peripheral_clk",
84}; 113};
85 114
86static struct resource tmu2_resources[] = { 115static struct resource tmu2_resources[] = {
87 [0] = { 116 [0] = {
88 .name = "TMU2",
89 .start = 0xffd80020, 117 .start = 0xffd80020,
90 .end = 0xffd8002f, 118 .end = 0xffd8002f,
91 .flags = IORESOURCE_MEM, 119 .flags = IORESOURCE_MEM,
@@ -107,15 +135,12 @@ static struct platform_device tmu2_device = {
107}; 135};
108 136
109static struct sh_timer_config tmu3_platform_data = { 137static struct sh_timer_config tmu3_platform_data = {
110 .name = "TMU3",
111 .channel_offset = 0x04, 138 .channel_offset = 0x04,
112 .timer_bit = 0, 139 .timer_bit = 0,
113 .clk = "peripheral_clk",
114}; 140};
115 141
116static struct resource tmu3_resources[] = { 142static struct resource tmu3_resources[] = {
117 [0] = { 143 [0] = {
118 .name = "TMU3",
119 .start = 0xffdc0008, 144 .start = 0xffdc0008,
120 .end = 0xffdc0013, 145 .end = 0xffdc0013,
121 .flags = IORESOURCE_MEM, 146 .flags = IORESOURCE_MEM,
@@ -137,15 +162,12 @@ static struct platform_device tmu3_device = {
137}; 162};
138 163
139static struct sh_timer_config tmu4_platform_data = { 164static struct sh_timer_config tmu4_platform_data = {
140 .name = "TMU4",
141 .channel_offset = 0x10, 165 .channel_offset = 0x10,
142 .timer_bit = 1, 166 .timer_bit = 1,
143 .clk = "peripheral_clk",
144}; 167};
145 168
146static struct resource tmu4_resources[] = { 169static struct resource tmu4_resources[] = {
147 [0] = { 170 [0] = {
148 .name = "TMU4",
149 .start = 0xffdc0014, 171 .start = 0xffdc0014,
150 .end = 0xffdc001f, 172 .end = 0xffdc001f,
151 .flags = IORESOURCE_MEM, 173 .flags = IORESOURCE_MEM,
@@ -167,15 +189,12 @@ static struct platform_device tmu4_device = {
167}; 189};
168 190
169static struct sh_timer_config tmu5_platform_data = { 191static struct sh_timer_config tmu5_platform_data = {
170 .name = "TMU5",
171 .channel_offset = 0x1c, 192 .channel_offset = 0x1c,
172 .timer_bit = 2, 193 .timer_bit = 2,
173 .clk = "peripheral_clk",
174}; 194};
175 195
176static struct resource tmu5_resources[] = { 196static struct resource tmu5_resources[] = {
177 [0] = { 197 [0] = {
178 .name = "TMU5",
179 .start = 0xffdc0020, 198 .start = 0xffdc0020,
180 .end = 0xffdc002b, 199 .end = 0xffdc002b,
181 .flags = IORESOURCE_MEM, 200 .flags = IORESOURCE_MEM,
@@ -216,35 +235,137 @@ static struct platform_device rtc_device = {
216 .resource = rtc_resources, 235 .resource = rtc_resources,
217}; 236};
218 237
219static struct plat_sci_port sci_platform_data[] = { 238/* DMA */
239static const struct sh_dmae_channel sh7780_dmae0_channels[] = {
220 { 240 {
221 .mapbase = 0xffe00000, 241 .offset = 0,
222 .flags = UPF_BOOT_AUTOCONF, 242 .dmars = 0,
223 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 243 .dmars_bit = 0,
224 .scbrr_algo_id = SCBRR_ALGO_1, 244 }, {
225 .type = PORT_SCIF, 245 .offset = 0x10,
226 .irqs = { 40, 40, 40, 40 }, 246 .dmars = 0,
247 .dmars_bit = 8,
248 }, {
249 .offset = 0x20,
250 .dmars = 4,
251 .dmars_bit = 0,
227 }, { 252 }, {
228 .mapbase = 0xffe10000, 253 .offset = 0x30,
229 .flags = UPF_BOOT_AUTOCONF, 254 .dmars = 4,
230 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 255 .dmars_bit = 8,
231 .scbrr_algo_id = SCBRR_ALGO_1,
232 .type = PORT_SCIF,
233 .irqs = { 76, 76, 76, 76 },
234 }, { 256 }, {
235 .flags = 0, 257 .offset = 0x50,
258 .dmars = 8,
259 .dmars_bit = 0,
260 }, {
261 .offset = 0x60,
262 .dmars = 8,
263 .dmars_bit = 8,
236 } 264 }
237}; 265};
238 266
239static struct platform_device sci_device = { 267static const struct sh_dmae_channel sh7780_dmae1_channels[] = {
240 .name = "sh-sci", 268 {
241 .id = -1, 269 .offset = 0,
270 }, {
271 .offset = 0x10,
272 }, {
273 .offset = 0x20,
274 }, {
275 .offset = 0x30,
276 }, {
277 .offset = 0x50,
278 }, {
279 .offset = 0x60,
280 }
281};
282
283static const unsigned int ts_shift[] = TS_SHIFT;
284
285static struct sh_dmae_pdata dma0_platform_data = {
286 .channel = sh7780_dmae0_channels,
287 .channel_num = ARRAY_SIZE(sh7780_dmae0_channels),
288 .ts_low_shift = CHCR_TS_LOW_SHIFT,
289 .ts_low_mask = CHCR_TS_LOW_MASK,
290 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
291 .ts_high_mask = CHCR_TS_HIGH_MASK,
292 .ts_shift = ts_shift,
293 .ts_shift_num = ARRAY_SIZE(ts_shift),
294 .dmaor_init = DMAOR_INIT,
295};
296
297static struct sh_dmae_pdata dma1_platform_data = {
298 .channel = sh7780_dmae1_channels,
299 .channel_num = ARRAY_SIZE(sh7780_dmae1_channels),
300 .ts_low_shift = CHCR_TS_LOW_SHIFT,
301 .ts_low_mask = CHCR_TS_LOW_MASK,
302 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
303 .ts_high_mask = CHCR_TS_HIGH_MASK,
304 .ts_shift = ts_shift,
305 .ts_shift_num = ARRAY_SIZE(ts_shift),
306 .dmaor_init = DMAOR_INIT,
307};
308
309static struct resource sh7780_dmae0_resources[] = {
310 [0] = {
311 /* Channel registers and DMAOR */
312 .start = 0xfc808020,
313 .end = 0xfc80808f,
314 .flags = IORESOURCE_MEM,
315 },
316 [1] = {
317 /* DMARSx */
318 .start = 0xfc809000,
319 .end = 0xfc80900b,
320 .flags = IORESOURCE_MEM,
321 },
322 {
323 /* Real DMA error IRQ is 38, and channel IRQs are 34-37, 44-45 */
324 .start = 34,
325 .end = 34,
326 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
327 },
328};
329
330static struct resource sh7780_dmae1_resources[] = {
331 [0] = {
332 /* Channel registers and DMAOR */
333 .start = 0xfc818020,
334 .end = 0xfc81808f,
335 .flags = IORESOURCE_MEM,
336 },
337 /* DMAC1 has no DMARS */
338 {
339 /* Real DMA error IRQ is 38, and channel IRQs are 46-47, 92-95 */
340 .start = 46,
341 .end = 46,
342 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
343 },
344};
345
346static struct platform_device dma0_device = {
347 .name = "sh-dma-engine",
348 .id = 0,
349 .resource = sh7780_dmae0_resources,
350 .num_resources = ARRAY_SIZE(sh7780_dmae0_resources),
351 .dev = {
352 .platform_data = &dma0_platform_data,
353 },
354};
355
356static struct platform_device dma1_device = {
357 .name = "sh-dma-engine",
358 .id = 1,
359 .resource = sh7780_dmae1_resources,
360 .num_resources = ARRAY_SIZE(sh7780_dmae1_resources),
242 .dev = { 361 .dev = {
243 .platform_data = sci_platform_data, 362 .platform_data = &dma1_platform_data,
244 }, 363 },
245}; 364};
246 365
247static struct platform_device *sh7780_devices[] __initdata = { 366static struct platform_device *sh7780_devices[] __initdata = {
367 &scif0_device,
368 &scif1_device,
248 &tmu0_device, 369 &tmu0_device,
249 &tmu1_device, 370 &tmu1_device,
250 &tmu2_device, 371 &tmu2_device,
@@ -252,7 +373,8 @@ static struct platform_device *sh7780_devices[] __initdata = {
252 &tmu4_device, 373 &tmu4_device,
253 &tmu5_device, 374 &tmu5_device,
254 &rtc_device, 375 &rtc_device,
255 &sci_device, 376 &dma0_device,
377 &dma1_device,
256}; 378};
257 379
258static int __init sh7780_devices_setup(void) 380static int __init sh7780_devices_setup(void)
@@ -260,9 +382,11 @@ static int __init sh7780_devices_setup(void)
260 return platform_add_devices(sh7780_devices, 382 return platform_add_devices(sh7780_devices,
261 ARRAY_SIZE(sh7780_devices)); 383 ARRAY_SIZE(sh7780_devices));
262} 384}
263__initcall(sh7780_devices_setup); 385arch_initcall(sh7780_devices_setup);
264 386
265static struct platform_device *sh7780_early_devices[] __initdata = { 387static struct platform_device *sh7780_early_devices[] __initdata = {
388 &scif0_device,
389 &scif1_device,
266 &tmu0_device, 390 &tmu0_device,
267 &tmu1_device, 391 &tmu1_device,
268 &tmu2_device, 392 &tmu2_device,
@@ -273,6 +397,13 @@ static struct platform_device *sh7780_early_devices[] __initdata = {
273 397
274void __init plat_early_device_setup(void) 398void __init plat_early_device_setup(void)
275{ 399{
400 if (mach_is_sh2007()) {
401 scif0_platform_data.scscr &= ~SCSCR_CKE1;
402 scif0_platform_data.scbrr_algo_id = SCBRR_ALGO_2;
403 scif1_platform_data.scscr &= ~SCSCR_CKE1;
404 scif1_platform_data.scbrr_algo_id = SCBRR_ALGO_2;
405 }
406
276 early_platform_add_devices(sh7780_early_devices, 407 early_platform_add_devices(sh7780_early_devices,
277 ARRAY_SIZE(sh7780_early_devices)); 408 ARRAY_SIZE(sh7780_early_devices));
278} 409}
@@ -443,17 +574,17 @@ static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
443void __init plat_irq_setup(void) 574void __init plat_irq_setup(void)
444{ 575{
445 /* disable IRQ7-0 */ 576 /* disable IRQ7-0 */
446 ctrl_outl(0xff000000, INTC_INTMSK0); 577 __raw_writel(0xff000000, INTC_INTMSK0);
447 578
448 /* disable IRL3-0 + IRL7-4 */ 579 /* disable IRL3-0 + IRL7-4 */
449 ctrl_outl(0xc0000000, INTC_INTMSK1); 580 __raw_writel(0xc0000000, INTC_INTMSK1);
450 ctrl_outl(0xfffefffe, INTC_INTMSK2); 581 __raw_writel(0xfffefffe, INTC_INTMSK2);
451 582
452 /* select IRL mode for IRL3-0 + IRL7-4 */ 583 /* select IRL mode for IRL3-0 + IRL7-4 */
453 ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); 584 __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
454 585
455 /* disable holding function, ie enable "SH-4 Mode" */ 586 /* disable holding function, ie enable "SH-4 Mode" */
456 ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0); 587 __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
457 588
458 register_intc_controller(&intc_desc); 589 register_intc_controller(&intc_desc);
459} 590}
@@ -463,27 +594,27 @@ void __init plat_irq_setup_pins(int mode)
463 switch (mode) { 594 switch (mode) {
464 case IRQ_MODE_IRQ: 595 case IRQ_MODE_IRQ:
465 /* select IRQ mode for IRL3-0 + IRL7-4 */ 596 /* select IRQ mode for IRL3-0 + IRL7-4 */
466 ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0); 597 __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
467 register_intc_controller(&intc_irq_desc); 598 register_intc_controller(&intc_irq_desc);
468 break; 599 break;
469 case IRQ_MODE_IRL7654: 600 case IRQ_MODE_IRL7654:
470 /* enable IRL7-4 but don't provide any masking */ 601 /* enable IRL7-4 but don't provide any masking */
471 ctrl_outl(0x40000000, INTC_INTMSKCLR1); 602 __raw_writel(0x40000000, INTC_INTMSKCLR1);
472 ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); 603 __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
473 break; 604 break;
474 case IRQ_MODE_IRL3210: 605 case IRQ_MODE_IRL3210:
475 /* enable IRL0-3 but don't provide any masking */ 606 /* enable IRL0-3 but don't provide any masking */
476 ctrl_outl(0x80000000, INTC_INTMSKCLR1); 607 __raw_writel(0x80000000, INTC_INTMSKCLR1);
477 ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); 608 __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
478 break; 609 break;
479 case IRQ_MODE_IRL7654_MASK: 610 case IRQ_MODE_IRL7654_MASK:
480 /* enable IRL7-4 and mask using cpu intc controller */ 611 /* enable IRL7-4 and mask using cpu intc controller */
481 ctrl_outl(0x40000000, INTC_INTMSKCLR1); 612 __raw_writel(0x40000000, INTC_INTMSKCLR1);
482 register_intc_controller(&intc_irl7654_desc); 613 register_intc_controller(&intc_irl7654_desc);
483 break; 614 break;
484 case IRQ_MODE_IRL3210_MASK: 615 case IRQ_MODE_IRL3210_MASK:
485 /* enable IRL0-3 and mask using cpu intc controller */ 616 /* enable IRL0-3 and mask using cpu intc controller */
486 ctrl_outl(0x80000000, INTC_INTMSKCLR1); 617 __raw_writel(0x80000000, INTC_INTMSKCLR1);
487 register_intc_controller(&intc_irl3210_desc); 618 register_intc_controller(&intc_irl3210_desc);
488 break; 619 break;
489 default: 620 default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index 3ae2e2071009..18d8fc136fb2 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -13,20 +13,123 @@
13#include <linux/serial_sci.h> 13#include <linux/serial_sci.h>
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/sh_dma.h>
16#include <linux/sh_timer.h> 17#include <linux/sh_timer.h>
18
17#include <asm/mmzone.h> 19#include <asm/mmzone.h>
18 20
21#include <cpu/dma-register.h>
22
23static struct plat_sci_port scif0_platform_data = {
24 .mapbase = 0xffea0000,
25 .flags = UPF_BOOT_AUTOCONF,
26 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
27 .scbrr_algo_id = SCBRR_ALGO_1,
28 .type = PORT_SCIF,
29 .irqs = { 40, 40, 40, 40 },
30};
31
32static struct platform_device scif0_device = {
33 .name = "sh-sci",
34 .id = 0,
35 .dev = {
36 .platform_data = &scif0_platform_data,
37 },
38};
39
40static struct plat_sci_port scif1_platform_data = {
41 .mapbase = 0xffeb0000,
42 .flags = UPF_BOOT_AUTOCONF,
43 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
44 .scbrr_algo_id = SCBRR_ALGO_1,
45 .type = PORT_SCIF,
46 .irqs = { 44, 44, 44, 44 },
47};
48
49static struct platform_device scif1_device = {
50 .name = "sh-sci",
51 .id = 1,
52 .dev = {
53 .platform_data = &scif1_platform_data,
54 },
55};
56
57static struct plat_sci_port scif2_platform_data = {
58 .mapbase = 0xffec0000,
59 .flags = UPF_BOOT_AUTOCONF,
60 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
61 .scbrr_algo_id = SCBRR_ALGO_1,
62 .type = PORT_SCIF,
63 .irqs = { 60, 60, 60, 60 },
64};
65
66static struct platform_device scif2_device = {
67 .name = "sh-sci",
68 .id = 2,
69 .dev = {
70 .platform_data = &scif2_platform_data,
71 },
72};
73
74static struct plat_sci_port scif3_platform_data = {
75 .mapbase = 0xffed0000,
76 .flags = UPF_BOOT_AUTOCONF,
77 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
78 .scbrr_algo_id = SCBRR_ALGO_1,
79 .type = PORT_SCIF,
80 .irqs = { 61, 61, 61, 61 },
81};
82
83static struct platform_device scif3_device = {
84 .name = "sh-sci",
85 .id = 3,
86 .dev = {
87 .platform_data = &scif3_platform_data,
88 },
89};
90
91static struct plat_sci_port scif4_platform_data = {
92 .mapbase = 0xffee0000,
93 .flags = UPF_BOOT_AUTOCONF,
94 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
95 .scbrr_algo_id = SCBRR_ALGO_1,
96 .type = PORT_SCIF,
97 .irqs = { 62, 62, 62, 62 },
98};
99
100static struct platform_device scif4_device = {
101 .name = "sh-sci",
102 .id = 4,
103 .dev = {
104 .platform_data = &scif4_platform_data,
105 },
106};
107
108static struct plat_sci_port scif5_platform_data = {
109 .mapbase = 0xffef0000,
110 .flags = UPF_BOOT_AUTOCONF,
111 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
112 .scbrr_algo_id = SCBRR_ALGO_1,
113 .type = PORT_SCIF,
114 .irqs = { 63, 63, 63, 63 },
115};
116
117static struct platform_device scif5_device = {
118 .name = "sh-sci",
119 .id = 5,
120 .dev = {
121 .platform_data = &scif5_platform_data,
122 },
123};
124
19static struct sh_timer_config tmu0_platform_data = { 125static struct sh_timer_config tmu0_platform_data = {
20 .name = "TMU0",
21 .channel_offset = 0x04, 126 .channel_offset = 0x04,
22 .timer_bit = 0, 127 .timer_bit = 0,
23 .clk = "tmu012_fck",
24 .clockevent_rating = 200, 128 .clockevent_rating = 200,
25}; 129};
26 130
27static struct resource tmu0_resources[] = { 131static struct resource tmu0_resources[] = {
28 [0] = { 132 [0] = {
29 .name = "TMU0",
30 .start = 0xffd80008, 133 .start = 0xffd80008,
31 .end = 0xffd80013, 134 .end = 0xffd80013,
32 .flags = IORESOURCE_MEM, 135 .flags = IORESOURCE_MEM,
@@ -48,16 +151,13 @@ static struct platform_device tmu0_device = {
48}; 151};
49 152
50static struct sh_timer_config tmu1_platform_data = { 153static struct sh_timer_config tmu1_platform_data = {
51 .name = "TMU1",
52 .channel_offset = 0x10, 154 .channel_offset = 0x10,
53 .timer_bit = 1, 155 .timer_bit = 1,
54 .clk = "tmu012_fck",
55 .clocksource_rating = 200, 156 .clocksource_rating = 200,
56}; 157};
57 158
58static struct resource tmu1_resources[] = { 159static struct resource tmu1_resources[] = {
59 [0] = { 160 [0] = {
60 .name = "TMU1",
61 .start = 0xffd80014, 161 .start = 0xffd80014,
62 .end = 0xffd8001f, 162 .end = 0xffd8001f,
63 .flags = IORESOURCE_MEM, 163 .flags = IORESOURCE_MEM,
@@ -79,15 +179,12 @@ static struct platform_device tmu1_device = {
79}; 179};
80 180
81static struct sh_timer_config tmu2_platform_data = { 181static struct sh_timer_config tmu2_platform_data = {
82 .name = "TMU2",
83 .channel_offset = 0x1c, 182 .channel_offset = 0x1c,
84 .timer_bit = 2, 183 .timer_bit = 2,
85 .clk = "tmu012_fck",
86}; 184};
87 185
88static struct resource tmu2_resources[] = { 186static struct resource tmu2_resources[] = {
89 [0] = { 187 [0] = {
90 .name = "TMU2",
91 .start = 0xffd80020, 188 .start = 0xffd80020,
92 .end = 0xffd8002f, 189 .end = 0xffd8002f,
93 .flags = IORESOURCE_MEM, 190 .flags = IORESOURCE_MEM,
@@ -109,15 +206,12 @@ static struct platform_device tmu2_device = {
109}; 206};
110 207
111static struct sh_timer_config tmu3_platform_data = { 208static struct sh_timer_config tmu3_platform_data = {
112 .name = "TMU3",
113 .channel_offset = 0x04, 209 .channel_offset = 0x04,
114 .timer_bit = 0, 210 .timer_bit = 0,
115 .clk = "tmu345_fck",
116}; 211};
117 212
118static struct resource tmu3_resources[] = { 213static struct resource tmu3_resources[] = {
119 [0] = { 214 [0] = {
120 .name = "TMU3",
121 .start = 0xffdc0008, 215 .start = 0xffdc0008,
122 .end = 0xffdc0013, 216 .end = 0xffdc0013,
123 .flags = IORESOURCE_MEM, 217 .flags = IORESOURCE_MEM,
@@ -139,15 +233,12 @@ static struct platform_device tmu3_device = {
139}; 233};
140 234
141static struct sh_timer_config tmu4_platform_data = { 235static struct sh_timer_config tmu4_platform_data = {
142 .name = "TMU4",
143 .channel_offset = 0x10, 236 .channel_offset = 0x10,
144 .timer_bit = 1, 237 .timer_bit = 1,
145 .clk = "tmu345_fck",
146}; 238};
147 239
148static struct resource tmu4_resources[] = { 240static struct resource tmu4_resources[] = {
149 [0] = { 241 [0] = {
150 .name = "TMU4",
151 .start = 0xffdc0014, 242 .start = 0xffdc0014,
152 .end = 0xffdc001f, 243 .end = 0xffdc001f,
153 .flags = IORESOURCE_MEM, 244 .flags = IORESOURCE_MEM,
@@ -169,15 +260,12 @@ static struct platform_device tmu4_device = {
169}; 260};
170 261
171static struct sh_timer_config tmu5_platform_data = { 262static struct sh_timer_config tmu5_platform_data = {
172 .name = "TMU5",
173 .channel_offset = 0x1c, 263 .channel_offset = 0x1c,
174 .timer_bit = 2, 264 .timer_bit = 2,
175 .clk = "tmu345_fck",
176}; 265};
177 266
178static struct resource tmu5_resources[] = { 267static struct resource tmu5_resources[] = {
179 [0] = { 268 [0] = {
180 .name = "TMU5",
181 .start = 0xffdc0020, 269 .start = 0xffdc0020,
182 .end = 0xffdc002b, 270 .end = 0xffdc002b,
183 .flags = IORESOURCE_MEM, 271 .flags = IORESOURCE_MEM,
@@ -198,76 +286,149 @@ static struct platform_device tmu5_device = {
198 .num_resources = ARRAY_SIZE(tmu5_resources), 286 .num_resources = ARRAY_SIZE(tmu5_resources),
199}; 287};
200 288
201static struct plat_sci_port sci_platform_data[] = { 289/* DMA */
290static const struct sh_dmae_channel sh7785_dmae0_channels[] = {
202 { 291 {
203 .mapbase = 0xffea0000, 292 .offset = 0,
204 .flags = UPF_BOOT_AUTOCONF, 293 .dmars = 0,
205 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 294 .dmars_bit = 0,
206 .scbrr_algo_id = SCBRR_ALGO_1,
207 .type = PORT_SCIF,
208 .irqs = { 40, 40, 40, 40 },
209 .clk = "scif_fck",
210 }, { 295 }, {
211 .mapbase = 0xffeb0000, 296 .offset = 0x10,
212 .flags = UPF_BOOT_AUTOCONF, 297 .dmars = 0,
213 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 298 .dmars_bit = 8,
214 .scbrr_algo_id = SCBRR_ALGO_1,
215 .type = PORT_SCIF,
216 .irqs = { 44, 44, 44, 44 },
217 .clk = "scif_fck",
218 }, { 299 }, {
219 .mapbase = 0xffec0000, 300 .offset = 0x20,
220 .flags = UPF_BOOT_AUTOCONF, 301 .dmars = 4,
221 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 302 .dmars_bit = 0,
222 .scbrr_algo_id = SCBRR_ALGO_1,
223 .type = PORT_SCIF,
224 .irqs = { 60, 60, 60, 60 },
225 .clk = "scif_fck",
226 }, { 303 }, {
227 .mapbase = 0xffed0000, 304 .offset = 0x30,
228 .flags = UPF_BOOT_AUTOCONF, 305 .dmars = 4,
229 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 306 .dmars_bit = 8,
230 .scbrr_algo_id = SCBRR_ALGO_1,
231 .type = PORT_SCIF,
232 .irqs = { 61, 61, 61, 61 },
233 .clk = "scif_fck",
234 }, { 307 }, {
235 .mapbase = 0xffee0000, 308 .offset = 0x50,
236 .flags = UPF_BOOT_AUTOCONF, 309 .dmars = 8,
237 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 310 .dmars_bit = 0,
238 .scbrr_algo_id = SCBRR_ALGO_1,
239 .type = PORT_SCIF,
240 .irqs = { 62, 62, 62, 62 },
241 .clk = "scif_fck",
242 }, { 311 }, {
243 .mapbase = 0xffef0000, 312 .offset = 0x60,
244 .flags = UPF_BOOT_AUTOCONF, 313 .dmars = 8,
245 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 314 .dmars_bit = 8,
246 .scbrr_algo_id = SCBRR_ALGO_1, 315 }
247 .type = PORT_SCIF, 316};
248 .irqs = { 63, 63, 63, 63 }, 317
249 .clk = "scif_fck", 318static const struct sh_dmae_channel sh7785_dmae1_channels[] = {
319 {
320 .offset = 0,
250 }, { 321 }, {
251 .flags = 0, 322 .offset = 0x10,
323 }, {
324 .offset = 0x20,
325 }, {
326 .offset = 0x30,
327 }, {
328 .offset = 0x50,
329 }, {
330 .offset = 0x60,
252 } 331 }
253}; 332};
254 333
255static struct platform_device sci_device = { 334static const unsigned int ts_shift[] = TS_SHIFT;
256 .name = "sh-sci", 335
257 .id = -1, 336static struct sh_dmae_pdata dma0_platform_data = {
337 .channel = sh7785_dmae0_channels,
338 .channel_num = ARRAY_SIZE(sh7785_dmae0_channels),
339 .ts_low_shift = CHCR_TS_LOW_SHIFT,
340 .ts_low_mask = CHCR_TS_LOW_MASK,
341 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
342 .ts_high_mask = CHCR_TS_HIGH_MASK,
343 .ts_shift = ts_shift,
344 .ts_shift_num = ARRAY_SIZE(ts_shift),
345 .dmaor_init = DMAOR_INIT,
346};
347
348static struct sh_dmae_pdata dma1_platform_data = {
349 .channel = sh7785_dmae1_channels,
350 .channel_num = ARRAY_SIZE(sh7785_dmae1_channels),
351 .ts_low_shift = CHCR_TS_LOW_SHIFT,
352 .ts_low_mask = CHCR_TS_LOW_MASK,
353 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
354 .ts_high_mask = CHCR_TS_HIGH_MASK,
355 .ts_shift = ts_shift,
356 .ts_shift_num = ARRAY_SIZE(ts_shift),
357 .dmaor_init = DMAOR_INIT,
358};
359
360static struct resource sh7785_dmae0_resources[] = {
361 [0] = {
362 /* Channel registers and DMAOR */
363 .start = 0xfc808020,
364 .end = 0xfc80808f,
365 .flags = IORESOURCE_MEM,
366 },
367 [1] = {
368 /* DMARSx */
369 .start = 0xfc809000,
370 .end = 0xfc80900b,
371 .flags = IORESOURCE_MEM,
372 },
373 {
374 /* Real DMA error IRQ is 39, and channel IRQs are 33-38 */
375 .start = 33,
376 .end = 33,
377 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
378 },
379};
380
381static struct resource sh7785_dmae1_resources[] = {
382 [0] = {
383 /* Channel registers and DMAOR */
384 .start = 0xfcc08020,
385 .end = 0xfcc0808f,
386 .flags = IORESOURCE_MEM,
387 },
388 /* DMAC1 has no DMARS */
389 {
390 /* Real DMA error IRQ is 58, and channel IRQs are 52-57 */
391 .start = 52,
392 .end = 52,
393 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
394 },
395};
396
397static struct platform_device dma0_device = {
398 .name = "sh-dma-engine",
399 .id = 0,
400 .resource = sh7785_dmae0_resources,
401 .num_resources = ARRAY_SIZE(sh7785_dmae0_resources),
402 .dev = {
403 .platform_data = &dma0_platform_data,
404 },
405};
406
407static struct platform_device dma1_device = {
408 .name = "sh-dma-engine",
409 .id = 1,
410 .resource = sh7785_dmae1_resources,
411 .num_resources = ARRAY_SIZE(sh7785_dmae1_resources),
258 .dev = { 412 .dev = {
259 .platform_data = sci_platform_data, 413 .platform_data = &dma1_platform_data,
260 }, 414 },
261}; 415};
262 416
263static struct platform_device *sh7785_devices[] __initdata = { 417static struct platform_device *sh7785_devices[] __initdata = {
418 &scif0_device,
419 &scif1_device,
420 &scif2_device,
421 &scif3_device,
422 &scif4_device,
423 &scif5_device,
264 &tmu0_device, 424 &tmu0_device,
265 &tmu1_device, 425 &tmu1_device,
266 &tmu2_device, 426 &tmu2_device,
267 &tmu3_device, 427 &tmu3_device,
268 &tmu4_device, 428 &tmu4_device,
269 &tmu5_device, 429 &tmu5_device,
270 &sci_device, 430 &dma0_device,
431 &dma1_device,
271}; 432};
272 433
273static int __init sh7785_devices_setup(void) 434static int __init sh7785_devices_setup(void)
@@ -275,9 +436,15 @@ static int __init sh7785_devices_setup(void)
275 return platform_add_devices(sh7785_devices, 436 return platform_add_devices(sh7785_devices,
276 ARRAY_SIZE(sh7785_devices)); 437 ARRAY_SIZE(sh7785_devices));
277} 438}
278__initcall(sh7785_devices_setup); 439arch_initcall(sh7785_devices_setup);
279 440
280static struct platform_device *sh7785_early_devices[] __initdata = { 441static struct platform_device *sh7785_early_devices[] __initdata = {
442 &scif0_device,
443 &scif1_device,
444 &scif2_device,
445 &scif3_device,
446 &scif4_device,
447 &scif5_device,
281 &tmu0_device, 448 &tmu0_device,
282 &tmu1_device, 449 &tmu1_device,
283 &tmu2_device, 450 &tmu2_device,
@@ -482,17 +649,17 @@ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567,
482void __init plat_irq_setup(void) 649void __init plat_irq_setup(void)
483{ 650{
484 /* disable IRQ3-0 + IRQ7-4 */ 651 /* disable IRQ3-0 + IRQ7-4 */
485 ctrl_outl(0xff000000, INTC_INTMSK0); 652 __raw_writel(0xff000000, INTC_INTMSK0);
486 653
487 /* disable IRL3-0 + IRL7-4 */ 654 /* disable IRL3-0 + IRL7-4 */
488 ctrl_outl(0xc0000000, INTC_INTMSK1); 655 __raw_writel(0xc0000000, INTC_INTMSK1);
489 ctrl_outl(0xfffefffe, INTC_INTMSK2); 656 __raw_writel(0xfffefffe, INTC_INTMSK2);
490 657
491 /* select IRL mode for IRL3-0 + IRL7-4 */ 658 /* select IRL mode for IRL3-0 + IRL7-4 */
492 ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); 659 __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
493 660
494 /* disable holding function, ie enable "SH-4 Mode" */ 661 /* disable holding function, ie enable "SH-4 Mode" */
495 ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0); 662 __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
496 663
497 register_intc_controller(&intc_desc); 664 register_intc_controller(&intc_desc);
498} 665}
@@ -502,32 +669,32 @@ void __init plat_irq_setup_pins(int mode)
502 switch (mode) { 669 switch (mode) {
503 case IRQ_MODE_IRQ7654: 670 case IRQ_MODE_IRQ7654:
504 /* select IRQ mode for IRL7-4 */ 671 /* select IRQ mode for IRL7-4 */
505 ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0); 672 __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
506 register_intc_controller(&intc_desc_irq4567); 673 register_intc_controller(&intc_desc_irq4567);
507 break; 674 break;
508 case IRQ_MODE_IRQ3210: 675 case IRQ_MODE_IRQ3210:
509 /* select IRQ mode for IRL3-0 */ 676 /* select IRQ mode for IRL3-0 */
510 ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0); 677 __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
511 register_intc_controller(&intc_desc_irq0123); 678 register_intc_controller(&intc_desc_irq0123);
512 break; 679 break;
513 case IRQ_MODE_IRL7654: 680 case IRQ_MODE_IRL7654:
514 /* enable IRL7-4 but don't provide any masking */ 681 /* enable IRL7-4 but don't provide any masking */
515 ctrl_outl(0x40000000, INTC_INTMSKCLR1); 682 __raw_writel(0x40000000, INTC_INTMSKCLR1);
516 ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); 683 __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
517 break; 684 break;
518 case IRQ_MODE_IRL3210: 685 case IRQ_MODE_IRL3210:
519 /* enable IRL0-3 but don't provide any masking */ 686 /* enable IRL0-3 but don't provide any masking */
520 ctrl_outl(0x80000000, INTC_INTMSKCLR1); 687 __raw_writel(0x80000000, INTC_INTMSKCLR1);
521 ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); 688 __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
522 break; 689 break;
523 case IRQ_MODE_IRL7654_MASK: 690 case IRQ_MODE_IRL7654_MASK:
524 /* enable IRL7-4 and mask using cpu intc controller */ 691 /* enable IRL7-4 and mask using cpu intc controller */
525 ctrl_outl(0x40000000, INTC_INTMSKCLR1); 692 __raw_writel(0x40000000, INTC_INTMSKCLR1);
526 register_intc_controller(&intc_desc_irl4567); 693 register_intc_controller(&intc_desc_irl4567);
527 break; 694 break;
528 case IRQ_MODE_IRL3210_MASK: 695 case IRQ_MODE_IRL3210_MASK:
529 /* enable IRL0-3 and mask using cpu intc controller */ 696 /* enable IRL0-3 and mask using cpu intc controller */
530 ctrl_outl(0x80000000, INTC_INTMSKCLR1); 697 __raw_writel(0x80000000, INTC_INTMSKCLR1);
531 register_intc_controller(&intc_desc_irl0123); 698 register_intc_controller(&intc_desc_irl0123);
532 break; 699 break;
533 default: 700 default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 8b7ea4bd965d..1656b8c91faf 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SH7786 Setup 2 * SH7786 Setup
3 * 3 *
4 * Copyright (C) 2009 Renesas Solutions Corp. 4 * Copyright (C) 2009 - 2010 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com> 5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 * Paul Mundt <paul.mundt@renesas.com> 6 * Paul Mundt <paul.mundt@renesas.com>
7 * 7 *
@@ -21,79 +21,124 @@
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/sh_timer.h> 23#include <linux/sh_timer.h>
24#include <linux/sh_dma.h>
25#include <linux/sh_intc.h>
26#include <cpu/dma-register.h>
24#include <asm/mmzone.h> 27#include <asm/mmzone.h>
25 28
26static struct plat_sci_port sci_platform_data[] = { 29static struct plat_sci_port scif0_platform_data = {
27 { 30 .mapbase = 0xffea0000,
28 .mapbase = 0xffea0000, 31 .flags = UPF_BOOT_AUTOCONF,
29 .flags = UPF_BOOT_AUTOCONF, 32 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
30 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 33 .scbrr_algo_id = SCBRR_ALGO_1,
31 .scbrr_algo_id = SCBRR_ALGO_1, 34 .type = PORT_SCIF,
32 .type = PORT_SCIF, 35 .irqs = { 40, 41, 43, 42 },
33 .irqs = { 40, 41, 43, 42 }, 36};
37
38static struct platform_device scif0_device = {
39 .name = "sh-sci",
40 .id = 0,
41 .dev = {
42 .platform_data = &scif0_platform_data,
34 }, 43 },
35 /*
36 * The rest of these all have multiplexed IRQs
37 */
38 {
39 .mapbase = 0xffeb0000,
40 .flags = UPF_BOOT_AUTOCONF,
41 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
42 .scbrr_algo_id = SCBRR_ALGO_1,
43 .type = PORT_SCIF,
44 .irqs = { 44, 44, 44, 44 },
45 }, {
46 .mapbase = 0xffec0000,
47 .flags = UPF_BOOT_AUTOCONF,
48 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
49 .scbrr_algo_id = SCBRR_ALGO_1,
50 .type = PORT_SCIF,
51 .irqs = { 50, 50, 50, 50 },
52 }, {
53 .mapbase = 0xffed0000,
54 .flags = UPF_BOOT_AUTOCONF,
55 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
56 .scbrr_algo_id = SCBRR_ALGO_1,
57 .type = PORT_SCIF,
58 .irqs = { 51, 51, 51, 51 },
59 }, {
60 .mapbase = 0xffee0000,
61 .flags = UPF_BOOT_AUTOCONF,
62 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
63 .scbrr_algo_id = SCBRR_ALGO_1,
64 .type = PORT_SCIF,
65 .irqs = { 52, 52, 52, 52 },
66 }, {
67 .mapbase = 0xffef0000,
68 .flags = UPF_BOOT_AUTOCONF,
69 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
70 .scbrr_algo_id = SCBRR_ALGO_1,
71 .type = PORT_SCIF,
72 .irqs = { 53, 53, 53, 53 },
73 }, {
74 .flags = 0,
75 }
76}; 44};
77 45
78static struct platform_device sci_device = { 46/*
47 * The rest of these all have multiplexed IRQs
48 */
49static struct plat_sci_port scif1_platform_data = {
50 .mapbase = 0xffeb0000,
51 .flags = UPF_BOOT_AUTOCONF,
52 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
53 .scbrr_algo_id = SCBRR_ALGO_1,
54 .type = PORT_SCIF,
55 .irqs = { 44, 44, 44, 44 },
56};
57
58static struct platform_device scif1_device = {
79 .name = "sh-sci", 59 .name = "sh-sci",
80 .id = -1, 60 .id = 1,
61 .dev = {
62 .platform_data = &scif1_platform_data,
63 },
64};
65
66static struct plat_sci_port scif2_platform_data = {
67 .mapbase = 0xffec0000,
68 .flags = UPF_BOOT_AUTOCONF,
69 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
70 .scbrr_algo_id = SCBRR_ALGO_1,
71 .type = PORT_SCIF,
72 .irqs = { 50, 50, 50, 50 },
73};
74
75static struct platform_device scif2_device = {
76 .name = "sh-sci",
77 .id = 2,
81 .dev = { 78 .dev = {
82 .platform_data = sci_platform_data, 79 .platform_data = &scif2_platform_data,
80 },
81};
82
83static struct plat_sci_port scif3_platform_data = {
84 .mapbase = 0xffed0000,
85 .flags = UPF_BOOT_AUTOCONF,
86 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
87 .scbrr_algo_id = SCBRR_ALGO_1,
88 .type = PORT_SCIF,
89 .irqs = { 51, 51, 51, 51 },
90};
91
92static struct platform_device scif3_device = {
93 .name = "sh-sci",
94 .id = 3,
95 .dev = {
96 .platform_data = &scif3_platform_data,
97 },
98};
99
100static struct plat_sci_port scif4_platform_data = {
101 .mapbase = 0xffee0000,
102 .flags = UPF_BOOT_AUTOCONF,
103 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
104 .scbrr_algo_id = SCBRR_ALGO_1,
105 .type = PORT_SCIF,
106 .irqs = { 52, 52, 52, 52 },
107};
108
109static struct platform_device scif4_device = {
110 .name = "sh-sci",
111 .id = 4,
112 .dev = {
113 .platform_data = &scif4_platform_data,
114 },
115};
116
117static struct plat_sci_port scif5_platform_data = {
118 .mapbase = 0xffef0000,
119 .flags = UPF_BOOT_AUTOCONF,
120 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
121 .scbrr_algo_id = SCBRR_ALGO_1,
122 .type = PORT_SCIF,
123 .irqs = { 53, 53, 53, 53 },
124};
125
126static struct platform_device scif5_device = {
127 .name = "sh-sci",
128 .id = 5,
129 .dev = {
130 .platform_data = &scif5_platform_data,
83 }, 131 },
84}; 132};
85 133
86static struct sh_timer_config tmu0_platform_data = { 134static struct sh_timer_config tmu0_platform_data = {
87 .name = "TMU0",
88 .channel_offset = 0x04, 135 .channel_offset = 0x04,
89 .timer_bit = 0, 136 .timer_bit = 0,
90 .clk = "peripheral_clk",
91 .clockevent_rating = 200, 137 .clockevent_rating = 200,
92}; 138};
93 139
94static struct resource tmu0_resources[] = { 140static struct resource tmu0_resources[] = {
95 [0] = { 141 [0] = {
96 .name = "TMU0",
97 .start = 0xffd80008, 142 .start = 0xffd80008,
98 .end = 0xffd80013, 143 .end = 0xffd80013,
99 .flags = IORESOURCE_MEM, 144 .flags = IORESOURCE_MEM,
@@ -115,16 +160,13 @@ static struct platform_device tmu0_device = {
115}; 160};
116 161
117static struct sh_timer_config tmu1_platform_data = { 162static struct sh_timer_config tmu1_platform_data = {
118 .name = "TMU1",
119 .channel_offset = 0x10, 163 .channel_offset = 0x10,
120 .timer_bit = 1, 164 .timer_bit = 1,
121 .clk = "peripheral_clk",
122 .clocksource_rating = 200, 165 .clocksource_rating = 200,
123}; 166};
124 167
125static struct resource tmu1_resources[] = { 168static struct resource tmu1_resources[] = {
126 [0] = { 169 [0] = {
127 .name = "TMU1",
128 .start = 0xffd80014, 170 .start = 0xffd80014,
129 .end = 0xffd8001f, 171 .end = 0xffd8001f,
130 .flags = IORESOURCE_MEM, 172 .flags = IORESOURCE_MEM,
@@ -146,15 +188,12 @@ static struct platform_device tmu1_device = {
146}; 188};
147 189
148static struct sh_timer_config tmu2_platform_data = { 190static struct sh_timer_config tmu2_platform_data = {
149 .name = "TMU2",
150 .channel_offset = 0x1c, 191 .channel_offset = 0x1c,
151 .timer_bit = 2, 192 .timer_bit = 2,
152 .clk = "peripheral_clk",
153}; 193};
154 194
155static struct resource tmu2_resources[] = { 195static struct resource tmu2_resources[] = {
156 [0] = { 196 [0] = {
157 .name = "TMU2",
158 .start = 0xffd80020, 197 .start = 0xffd80020,
159 .end = 0xffd8002f, 198 .end = 0xffd8002f,
160 .flags = IORESOURCE_MEM, 199 .flags = IORESOURCE_MEM,
@@ -176,15 +215,12 @@ static struct platform_device tmu2_device = {
176}; 215};
177 216
178static struct sh_timer_config tmu3_platform_data = { 217static struct sh_timer_config tmu3_platform_data = {
179 .name = "TMU3",
180 .channel_offset = 0x04, 218 .channel_offset = 0x04,
181 .timer_bit = 0, 219 .timer_bit = 0,
182 .clk = "peripheral_clk",
183}; 220};
184 221
185static struct resource tmu3_resources[] = { 222static struct resource tmu3_resources[] = {
186 [0] = { 223 [0] = {
187 .name = "TMU3",
188 .start = 0xffda0008, 224 .start = 0xffda0008,
189 .end = 0xffda0013, 225 .end = 0xffda0013,
190 .flags = IORESOURCE_MEM, 226 .flags = IORESOURCE_MEM,
@@ -206,15 +242,12 @@ static struct platform_device tmu3_device = {
206}; 242};
207 243
208static struct sh_timer_config tmu4_platform_data = { 244static struct sh_timer_config tmu4_platform_data = {
209 .name = "TMU4",
210 .channel_offset = 0x10, 245 .channel_offset = 0x10,
211 .timer_bit = 1, 246 .timer_bit = 1,
212 .clk = "peripheral_clk",
213}; 247};
214 248
215static struct resource tmu4_resources[] = { 249static struct resource tmu4_resources[] = {
216 [0] = { 250 [0] = {
217 .name = "TMU4",
218 .start = 0xffda0014, 251 .start = 0xffda0014,
219 .end = 0xffda001f, 252 .end = 0xffda001f,
220 .flags = IORESOURCE_MEM, 253 .flags = IORESOURCE_MEM,
@@ -236,15 +269,12 @@ static struct platform_device tmu4_device = {
236}; 269};
237 270
238static struct sh_timer_config tmu5_platform_data = { 271static struct sh_timer_config tmu5_platform_data = {
239 .name = "TMU5",
240 .channel_offset = 0x1c, 272 .channel_offset = 0x1c,
241 .timer_bit = 2, 273 .timer_bit = 2,
242 .clk = "peripheral_clk",
243}; 274};
244 275
245static struct resource tmu5_resources[] = { 276static struct resource tmu5_resources[] = {
246 [0] = { 277 [0] = {
247 .name = "TMU5",
248 .start = 0xffda0020, 278 .start = 0xffda0020,
249 .end = 0xffda002b, 279 .end = 0xffda002b,
250 .flags = IORESOURCE_MEM, 280 .flags = IORESOURCE_MEM,
@@ -266,15 +296,12 @@ static struct platform_device tmu5_device = {
266}; 296};
267 297
268static struct sh_timer_config tmu6_platform_data = { 298static struct sh_timer_config tmu6_platform_data = {
269 .name = "TMU6",
270 .channel_offset = 0x04, 299 .channel_offset = 0x04,
271 .timer_bit = 0, 300 .timer_bit = 0,
272 .clk = "peripheral_clk",
273}; 301};
274 302
275static struct resource tmu6_resources[] = { 303static struct resource tmu6_resources[] = {
276 [0] = { 304 [0] = {
277 .name = "TMU6",
278 .start = 0xffdc0008, 305 .start = 0xffdc0008,
279 .end = 0xffdc0013, 306 .end = 0xffdc0013,
280 .flags = IORESOURCE_MEM, 307 .flags = IORESOURCE_MEM,
@@ -296,15 +323,12 @@ static struct platform_device tmu6_device = {
296}; 323};
297 324
298static struct sh_timer_config tmu7_platform_data = { 325static struct sh_timer_config tmu7_platform_data = {
299 .name = "TMU7",
300 .channel_offset = 0x10, 326 .channel_offset = 0x10,
301 .timer_bit = 1, 327 .timer_bit = 1,
302 .clk = "peripheral_clk",
303}; 328};
304 329
305static struct resource tmu7_resources[] = { 330static struct resource tmu7_resources[] = {
306 [0] = { 331 [0] = {
307 .name = "TMU7",
308 .start = 0xffdc0014, 332 .start = 0xffdc0014,
309 .end = 0xffdc001f, 333 .end = 0xffdc001f,
310 .flags = IORESOURCE_MEM, 334 .flags = IORESOURCE_MEM,
@@ -326,15 +350,12 @@ static struct platform_device tmu7_device = {
326}; 350};
327 351
328static struct sh_timer_config tmu8_platform_data = { 352static struct sh_timer_config tmu8_platform_data = {
329 .name = "TMU8",
330 .channel_offset = 0x1c, 353 .channel_offset = 0x1c,
331 .timer_bit = 2, 354 .timer_bit = 2,
332 .clk = "peripheral_clk",
333}; 355};
334 356
335static struct resource tmu8_resources[] = { 357static struct resource tmu8_resources[] = {
336 [0] = { 358 [0] = {
337 .name = "TMU8",
338 .start = 0xffdc0020, 359 .start = 0xffdc0020,
339 .end = 0xffdc002b, 360 .end = 0xffdc002b,
340 .flags = IORESOURCE_MEM, 361 .flags = IORESOURCE_MEM,
@@ -356,15 +377,12 @@ static struct platform_device tmu8_device = {
356}; 377};
357 378
358static struct sh_timer_config tmu9_platform_data = { 379static struct sh_timer_config tmu9_platform_data = {
359 .name = "TMU9",
360 .channel_offset = 0x04, 380 .channel_offset = 0x04,
361 .timer_bit = 0, 381 .timer_bit = 0,
362 .clk = "peripheral_clk",
363}; 382};
364 383
365static struct resource tmu9_resources[] = { 384static struct resource tmu9_resources[] = {
366 [0] = { 385 [0] = {
367 .name = "TMU9",
368 .start = 0xffde0008, 386 .start = 0xffde0008,
369 .end = 0xffde0013, 387 .end = 0xffde0013,
370 .flags = IORESOURCE_MEM, 388 .flags = IORESOURCE_MEM,
@@ -386,15 +404,12 @@ static struct platform_device tmu9_device = {
386}; 404};
387 405
388static struct sh_timer_config tmu10_platform_data = { 406static struct sh_timer_config tmu10_platform_data = {
389 .name = "TMU10",
390 .channel_offset = 0x10, 407 .channel_offset = 0x10,
391 .timer_bit = 1, 408 .timer_bit = 1,
392 .clk = "peripheral_clk",
393}; 409};
394 410
395static struct resource tmu10_resources[] = { 411static struct resource tmu10_resources[] = {
396 [0] = { 412 [0] = {
397 .name = "TMU10",
398 .start = 0xffde0014, 413 .start = 0xffde0014,
399 .end = 0xffde001f, 414 .end = 0xffde001f,
400 .flags = IORESOURCE_MEM, 415 .flags = IORESOURCE_MEM,
@@ -416,15 +431,12 @@ static struct platform_device tmu10_device = {
416}; 431};
417 432
418static struct sh_timer_config tmu11_platform_data = { 433static struct sh_timer_config tmu11_platform_data = {
419 .name = "TMU11",
420 .channel_offset = 0x1c, 434 .channel_offset = 0x1c,
421 .timer_bit = 2, 435 .timer_bit = 2,
422 .clk = "peripheral_clk",
423}; 436};
424 437
425static struct resource tmu11_resources[] = { 438static struct resource tmu11_resources[] = {
426 [0] = { 439 [0] = {
427 .name = "TMU11",
428 .start = 0xffde0020, 440 .start = 0xffde0020,
429 .end = 0xffde002b, 441 .end = 0xffde002b,
430 .flags = IORESOURCE_MEM, 442 .flags = IORESOURCE_MEM,
@@ -445,10 +457,114 @@ static struct platform_device tmu11_device = {
445 .num_resources = ARRAY_SIZE(tmu11_resources), 457 .num_resources = ARRAY_SIZE(tmu11_resources),
446}; 458};
447 459
460static const struct sh_dmae_channel dmac0_channels[] = {
461 {
462 .offset = 0,
463 .dmars = 0,
464 .dmars_bit = 0,
465 }, {
466 .offset = 0x10,
467 .dmars = 0,
468 .dmars_bit = 8,
469 }, {
470 .offset = 0x20,
471 .dmars = 4,
472 .dmars_bit = 0,
473 }, {
474 .offset = 0x30,
475 .dmars = 4,
476 .dmars_bit = 8,
477 }, {
478 .offset = 0x50,
479 .dmars = 8,
480 .dmars_bit = 0,
481 }, {
482 .offset = 0x60,
483 .dmars = 8,
484 .dmars_bit = 8,
485 }
486};
487
488static const unsigned int ts_shift[] = TS_SHIFT;
489
490static struct sh_dmae_pdata dma0_platform_data = {
491 .channel = dmac0_channels,
492 .channel_num = ARRAY_SIZE(dmac0_channels),
493 .ts_low_shift = CHCR_TS_LOW_SHIFT,
494 .ts_low_mask = CHCR_TS_LOW_MASK,
495 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
496 .ts_high_mask = CHCR_TS_HIGH_MASK,
497 .ts_shift = ts_shift,
498 .ts_shift_num = ARRAY_SIZE(ts_shift),
499 .dmaor_init = DMAOR_INIT,
500};
501
502/* Resource order important! */
503static struct resource dmac0_resources[] = {
504 {
505 /* Channel registers and DMAOR */
506 .start = 0xfe008020,
507 .end = 0xfe00808f,
508 .flags = IORESOURCE_MEM,
509 }, {
510 /* DMARSx */
511 .start = 0xfe009000,
512 .end = 0xfe00900b,
513 .flags = IORESOURCE_MEM,
514 }, {
515 /* DMA error IRQ */
516 .start = evt2irq(0x5c0),
517 .end = evt2irq(0x5c0),
518 .flags = IORESOURCE_IRQ,
519 }, {
520 /* IRQ for channels 0-5 */
521 .start = evt2irq(0x500),
522 .end = evt2irq(0x5a0),
523 .flags = IORESOURCE_IRQ,
524 },
525};
526
527static struct platform_device dma0_device = {
528 .name = "sh-dma-engine",
529 .id = 0,
530 .resource = dmac0_resources,
531 .num_resources = ARRAY_SIZE(dmac0_resources),
532 .dev = {
533 .platform_data = &dma0_platform_data,
534 },
535};
536
537#define USB_EHCI_START 0xffe70000
538#define USB_OHCI_START 0xffe70400
539
540static struct resource usb_ehci_resources[] = {
541 [0] = {
542 .start = USB_EHCI_START,
543 .end = USB_EHCI_START + 0x3ff,
544 .flags = IORESOURCE_MEM,
545 },
546 [1] = {
547 .start = 77,
548 .end = 77,
549 .flags = IORESOURCE_IRQ,
550 },
551};
552
553static struct platform_device usb_ehci_device = {
554 .name = "sh_ehci",
555 .id = -1,
556 .dev = {
557 .dma_mask = &usb_ehci_device.dev.coherent_dma_mask,
558 .coherent_dma_mask = DMA_BIT_MASK(32),
559 },
560 .num_resources = ARRAY_SIZE(usb_ehci_resources),
561 .resource = usb_ehci_resources,
562};
563
448static struct resource usb_ohci_resources[] = { 564static struct resource usb_ohci_resources[] = {
449 [0] = { 565 [0] = {
450 .start = 0xffe70400, 566 .start = USB_OHCI_START,
451 .end = 0xffe704ff, 567 .end = USB_OHCI_START + 0x3ff,
452 .flags = IORESOURCE_MEM, 568 .flags = IORESOURCE_MEM,
453 }, 569 },
454 [1] = { 570 [1] = {
@@ -458,12 +574,11 @@ static struct resource usb_ohci_resources[] = {
458 }, 574 },
459}; 575};
460 576
461static u64 usb_ohci_dma_mask = DMA_BIT_MASK(32);
462static struct platform_device usb_ohci_device = { 577static struct platform_device usb_ohci_device = {
463 .name = "sh_ohci", 578 .name = "sh_ohci",
464 .id = -1, 579 .id = -1,
465 .dev = { 580 .dev = {
466 .dma_mask = &usb_ohci_dma_mask, 581 .dma_mask = &usb_ohci_device.dev.coherent_dma_mask,
467 .coherent_dma_mask = DMA_BIT_MASK(32), 582 .coherent_dma_mask = DMA_BIT_MASK(32),
468 }, 583 },
469 .num_resources = ARRAY_SIZE(usb_ohci_resources), 584 .num_resources = ARRAY_SIZE(usb_ohci_resources),
@@ -471,6 +586,12 @@ static struct platform_device usb_ohci_device = {
471}; 586};
472 587
473static struct platform_device *sh7786_early_devices[] __initdata = { 588static struct platform_device *sh7786_early_devices[] __initdata = {
589 &scif0_device,
590 &scif1_device,
591 &scif2_device,
592 &scif3_device,
593 &scif4_device,
594 &scif5_device,
474 &tmu0_device, 595 &tmu0_device,
475 &tmu1_device, 596 &tmu1_device,
476 &tmu2_device, 597 &tmu2_device,
@@ -486,11 +607,11 @@ static struct platform_device *sh7786_early_devices[] __initdata = {
486}; 607};
487 608
488static struct platform_device *sh7786_devices[] __initdata = { 609static struct platform_device *sh7786_devices[] __initdata = {
489 &sci_device, 610 &dma0_device,
611 &usb_ehci_device,
490 &usb_ohci_device, 612 &usb_ohci_device,
491}; 613};
492 614
493
494/* 615/*
495 * Please call this function if your platform board 616 * Please call this function if your platform board
496 * use external clock for USB 617 * use external clock for USB
@@ -498,6 +619,7 @@ static struct platform_device *sh7786_devices[] __initdata = {
498#define USBCTL0 0xffe70858 619#define USBCTL0 0xffe70858
499#define CLOCK_MODE_MASK 0xffffff7f 620#define CLOCK_MODE_MASK 0xffffff7f
500#define EXT_CLOCK_MODE 0x00000080 621#define EXT_CLOCK_MODE 0x00000080
622
501void __init sh7786_usb_use_exclock(void) 623void __init sh7786_usb_use_exclock(void)
502{ 624{
503 u32 val = __raw_readl(USBCTL0) & CLOCK_MODE_MASK; 625 u32 val = __raw_readl(USBCTL0) & CLOCK_MODE_MASK;
@@ -515,6 +637,7 @@ void __init sh7786_usb_use_exclock(void)
515#define PLL_ENB 0x00000002 637#define PLL_ENB 0x00000002
516#define PHY_RST 0x00000004 638#define PHY_RST 0x00000004
517#define ACT_PLL_STATUS 0xc0000000 639#define ACT_PLL_STATUS 0xc0000000
640
518static void __init sh7786_usb_setup(void) 641static void __init sh7786_usb_setup(void)
519{ 642{
520 int i = 1000000; 643 int i = 1000000;
@@ -545,33 +668,10 @@ static void __init sh7786_usb_setup(void)
545 } 668 }
546} 669}
547 670
548static int __init sh7786_devices_setup(void)
549{
550 int ret;
551
552 sh7786_usb_setup();
553
554 ret = platform_add_devices(sh7786_early_devices,
555 ARRAY_SIZE(sh7786_early_devices));
556 if (unlikely(ret != 0))
557 return ret;
558
559 return platform_add_devices(sh7786_devices,
560 ARRAY_SIZE(sh7786_devices));
561}
562device_initcall(sh7786_devices_setup);
563
564void __init plat_early_device_setup(void)
565{
566 early_platform_add_devices(sh7786_early_devices,
567 ARRAY_SIZE(sh7786_early_devices));
568}
569
570enum { 671enum {
571 UNUSED = 0, 672 UNUSED = 0,
572 673
573 /* interrupt sources */ 674 /* interrupt sources */
574
575 IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, 675 IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
576 IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, 676 IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
577 IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH, 677 IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
@@ -609,9 +709,12 @@ enum {
609 Thermal, 709 Thermal,
610 INTICI0, INTICI1, INTICI2, INTICI3, 710 INTICI0, INTICI1, INTICI2, INTICI3,
611 INTICI4, INTICI5, INTICI6, INTICI7, 711 INTICI4, INTICI5, INTICI6, INTICI7,
712
713 /* Muxed sub-events */
714 TXI1, BRI1, RXI1, ERI1,
612}; 715};
613 716
614static struct intc_vect vectors[] __initdata = { 717static struct intc_vect sh7786_vectors[] __initdata = {
615 INTC_VECT(WDT, 0x3e0), 718 INTC_VECT(WDT, 0x3e0),
616 INTC_VECT(TMU0_0, 0x400), INTC_VECT(TMU0_1, 0x420), 719 INTC_VECT(TMU0_0, 0x400), INTC_VECT(TMU0_1, 0x420),
617 INTC_VECT(TMU0_2, 0x440), INTC_VECT(TMU0_3, 0x460), 720 INTC_VECT(TMU0_2, 0x440), INTC_VECT(TMU0_3, 0x460),
@@ -670,9 +773,17 @@ static struct intc_vect vectors[] __initdata = {
670#define INTMSK2 0xfe410068 773#define INTMSK2 0xfe410068
671#define INTMSKCLR2 0xfe41006c 774#define INTMSKCLR2 0xfe41006c
672 775
673static struct intc_mask_reg mask_registers[] __initdata = { 776#define INTDISTCR0 0xfe4100b0
777#define INTDISTCR1 0xfe4100b4
778#define INT2DISTCR0 0xfe410900
779#define INT2DISTCR1 0xfe410904
780#define INT2DISTCR2 0xfe410908
781#define INT2DISTCR3 0xfe41090c
782
783static struct intc_mask_reg sh7786_mask_registers[] __initdata = {
674 { CnINTMSK0, CnINTMSKCLR0, 32, 784 { CnINTMSK0, CnINTMSKCLR0, 32,
675 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, 785 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 },
786 INTC_SMP_BALANCING(INTDISTCR0) },
676 { INTMSK2, INTMSKCLR2, 32, 787 { INTMSK2, INTMSKCLR2, 32,
677 { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, 788 { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
678 IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, 789 IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
@@ -684,7 +795,8 @@ static struct intc_mask_reg mask_registers[] __initdata = {
684 IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } }, 795 IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
685 { CnINT2MSKR0, CnINT2MSKCR0 , 32, 796 { CnINT2MSKR0, CnINT2MSKCR0 , 32,
686 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 797 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
687 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, WDT } }, 798 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, WDT },
799 INTC_SMP_BALANCING(INT2DISTCR0) },
688 { CnINT2MSKR1, CnINT2MSKCR1, 32, 800 { CnINT2MSKR1, CnINT2MSKCR1, 32,
689 { TMU0_0, TMU0_1, TMU0_2, TMU0_3, TMU1_0, TMU1_1, TMU1_2, 0, 801 { TMU0_0, TMU0_1, TMU0_2, TMU0_3, TMU1_0, TMU1_1, TMU1_2, 0,
690 DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6, 802 DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6,
@@ -693,14 +805,14 @@ static struct intc_mask_reg mask_registers[] __initdata = {
693 HPB_0, HPB_1, HPB_2, 805 HPB_0, HPB_1, HPB_2,
694 SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3, 806 SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3,
695 SCIF1, 807 SCIF1,
696 TMU2, TMU3, 0, } }, 808 TMU2, TMU3, 0, }, INTC_SMP_BALANCING(INT2DISTCR1) },
697 { CnINT2MSKR2, CnINT2MSKCR2, 32, 809 { CnINT2MSKR2, CnINT2MSKCR2, 32,
698 { 0, 0, SCIF2, SCIF3, SCIF4, SCIF5, 810 { 0, 0, SCIF2, SCIF3, SCIF4, SCIF5,
699 Eth_0, Eth_1, 811 Eth_0, Eth_1,
700 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 812 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
701 PCIeC0_0, PCIeC0_1, PCIeC0_2, 813 PCIeC0_0, PCIeC0_1, PCIeC0_2,
702 PCIeC1_0, PCIeC1_1, PCIeC1_2, 814 PCIeC1_0, PCIeC1_1, PCIeC1_2,
703 USB, 0, 0 } }, 815 USB, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR2) },
704 { CnINT2MSKR3, CnINT2MSKCR3, 32, 816 { CnINT2MSKR3, CnINT2MSKCR3, 32,
705 { 0, 0, 0, 0, 0, 0, 817 { 0, 0, 0, 0, 0, 0,
706 I2C0, I2C1, 818 I2C0, I2C1,
@@ -709,10 +821,10 @@ static struct intc_mask_reg mask_registers[] __initdata = {
709 HAC0, HAC1, 821 HAC0, HAC1,
710 FLCTL, 0, 822 FLCTL, 0,
711 HSPI, GPIO0, GPIO1, Thermal, 823 HSPI, GPIO0, GPIO1, Thermal,
712 0, 0, 0, 0, 0, 0, 0, 0 } }, 824 0, 0, 0, 0, 0, 0, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR3) },
713}; 825};
714 826
715static struct intc_prio_reg prio_registers[] __initdata = { 827static struct intc_prio_reg sh7786_prio_registers[] __initdata = {
716 { 0xfe410010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3, 828 { 0xfe410010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
717 IRQ4, IRQ5, IRQ6, IRQ7 } }, 829 IRQ4, IRQ5, IRQ6, IRQ7 } },
718 { 0xfe410800, 0, 32, 8, /* INT2PRI0 */ { 0, 0, 0, WDT } }, 830 { 0xfe410800, 0, 32, 8, /* INT2PRI0 */ { 0, 0, 0, WDT } },
@@ -756,11 +868,27 @@ static struct intc_prio_reg prio_registers[] __initdata = {
756 INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 2) }, 868 INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 2) },
757}; 869};
758 870
759static DECLARE_INTC_DESC(intc_desc, "sh7786", vectors, NULL, 871static struct intc_subgroup sh7786_subgroups[] __initdata = {
760 mask_registers, prio_registers, NULL); 872 { 0xfe410c20, 32, SCIF1,
873 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
874 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, TXI1, BRI1, RXI1, ERI1 } },
875};
761 876
762/* Support for external interrupt pins in IRQ mode */ 877static struct intc_desc sh7786_intc_desc __initdata = {
878 .name = "sh7786",
879 .hw = {
880 .vectors = sh7786_vectors,
881 .nr_vectors = ARRAY_SIZE(sh7786_vectors),
882 .mask_regs = sh7786_mask_registers,
883 .nr_mask_regs = ARRAY_SIZE(sh7786_mask_registers),
884 .subgroups = sh7786_subgroups,
885 .nr_subgroups = ARRAY_SIZE(sh7786_subgroups),
886 .prio_regs = sh7786_prio_registers,
887 .nr_prio_regs = ARRAY_SIZE(sh7786_prio_registers),
888 },
889};
763 890
891/* Support for external interrupt pins in IRQ mode */
764static struct intc_vect vectors_irq0123[] __initdata = { 892static struct intc_vect vectors_irq0123[] __initdata = {
765 INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240), 893 INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240),
766 INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0), 894 INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0),
@@ -771,23 +899,25 @@ static struct intc_vect vectors_irq4567[] __initdata = {
771 INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0), 899 INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0),
772}; 900};
773 901
774static struct intc_sense_reg sense_registers[] __initdata = { 902static struct intc_sense_reg sh7786_sense_registers[] __initdata = {
775 { 0xfe41001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, 903 { 0xfe41001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
776 IRQ4, IRQ5, IRQ6, IRQ7 } }, 904 IRQ4, IRQ5, IRQ6, IRQ7 } },
777}; 905};
778 906
779static struct intc_mask_reg ack_registers[] __initdata = { 907static struct intc_mask_reg sh7786_ack_registers[] __initdata = {
780 { 0xfe410024, 0, 32, /* INTREQ */ 908 { 0xfe410024, 0, 32, /* INTREQ */
781 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, 909 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
782}; 910};
783 911
784static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7786-irq0123", 912static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7786-irq0123",
785 vectors_irq0123, NULL, mask_registers, 913 vectors_irq0123, NULL, sh7786_mask_registers,
786 prio_registers, sense_registers, ack_registers); 914 sh7786_prio_registers, sh7786_sense_registers,
915 sh7786_ack_registers);
787 916
788static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7786-irq4567", 917static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7786-irq4567",
789 vectors_irq4567, NULL, mask_registers, 918 vectors_irq4567, NULL, sh7786_mask_registers,
790 prio_registers, sense_registers, ack_registers); 919 sh7786_prio_registers, sh7786_sense_registers,
920 sh7786_ack_registers);
791 921
792/* External interrupt pins in IRL mode */ 922/* External interrupt pins in IRL mode */
793 923
@@ -814,10 +944,10 @@ static struct intc_vect vectors_irl4567[] __initdata = {
814}; 944};
815 945
816static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7786-irl0123", vectors_irl0123, 946static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7786-irl0123", vectors_irl0123,
817 NULL, mask_registers, NULL, NULL); 947 NULL, sh7786_mask_registers, NULL, NULL);
818 948
819static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567, 949static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567,
820 NULL, mask_registers, NULL, NULL); 950 NULL, sh7786_mask_registers, NULL, NULL);
821 951
822#define INTC_ICR0 0xfe410000 952#define INTC_ICR0 0xfe410000
823#define INTC_INTMSK0 CnINTMSK0 953#define INTC_INTMSK0 CnINTMSK0
@@ -829,16 +959,16 @@ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567,
829void __init plat_irq_setup(void) 959void __init plat_irq_setup(void)
830{ 960{
831 /* disable IRQ3-0 + IRQ7-4 */ 961 /* disable IRQ3-0 + IRQ7-4 */
832 ctrl_outl(0xff000000, INTC_INTMSK0); 962 __raw_writel(0xff000000, INTC_INTMSK0);
833 963
834 /* disable IRL3-0 + IRL7-4 */ 964 /* disable IRL3-0 + IRL7-4 */
835 ctrl_outl(0xc0000000, INTC_INTMSK1); 965 __raw_writel(0xc0000000, INTC_INTMSK1);
836 ctrl_outl(0xfffefffe, INTC_INTMSK2); 966 __raw_writel(0xfffefffe, INTC_INTMSK2);
837 967
838 /* select IRL mode for IRL3-0 + IRL7-4 */ 968 /* select IRL mode for IRL3-0 + IRL7-4 */
839 ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); 969 __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
840 970
841 register_intc_controller(&intc_desc); 971 register_intc_controller(&sh7786_intc_desc);
842} 972}
843 973
844void __init plat_irq_setup_pins(int mode) 974void __init plat_irq_setup_pins(int mode)
@@ -846,32 +976,32 @@ void __init plat_irq_setup_pins(int mode)
846 switch (mode) { 976 switch (mode) {
847 case IRQ_MODE_IRQ7654: 977 case IRQ_MODE_IRQ7654:
848 /* select IRQ mode for IRL7-4 */ 978 /* select IRQ mode for IRL7-4 */
849 ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0); 979 __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
850 register_intc_controller(&intc_desc_irq4567); 980 register_intc_controller(&intc_desc_irq4567);
851 break; 981 break;
852 case IRQ_MODE_IRQ3210: 982 case IRQ_MODE_IRQ3210:
853 /* select IRQ mode for IRL3-0 */ 983 /* select IRQ mode for IRL3-0 */
854 ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0); 984 __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
855 register_intc_controller(&intc_desc_irq0123); 985 register_intc_controller(&intc_desc_irq0123);
856 break; 986 break;
857 case IRQ_MODE_IRL7654: 987 case IRQ_MODE_IRL7654:
858 /* enable IRL7-4 but don't provide any masking */ 988 /* enable IRL7-4 but don't provide any masking */
859 ctrl_outl(0x40000000, INTC_INTMSKCLR1); 989 __raw_writel(0x40000000, INTC_INTMSKCLR1);
860 ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); 990 __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
861 break; 991 break;
862 case IRQ_MODE_IRL3210: 992 case IRQ_MODE_IRL3210:
863 /* enable IRL0-3 but don't provide any masking */ 993 /* enable IRL0-3 but don't provide any masking */
864 ctrl_outl(0x80000000, INTC_INTMSKCLR1); 994 __raw_writel(0x80000000, INTC_INTMSKCLR1);
865 ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); 995 __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
866 break; 996 break;
867 case IRQ_MODE_IRL7654_MASK: 997 case IRQ_MODE_IRL7654_MASK:
868 /* enable IRL7-4 and mask using cpu intc controller */ 998 /* enable IRL7-4 and mask using cpu intc controller */
869 ctrl_outl(0x40000000, INTC_INTMSKCLR1); 999 __raw_writel(0x40000000, INTC_INTMSKCLR1);
870 register_intc_controller(&intc_desc_irl4567); 1000 register_intc_controller(&intc_desc_irl4567);
871 break; 1001 break;
872 case IRQ_MODE_IRL3210_MASK: 1002 case IRQ_MODE_IRL3210_MASK:
873 /* enable IRL0-3 and mask using cpu intc controller */ 1003 /* enable IRL0-3 and mask using cpu intc controller */
874 ctrl_outl(0x80000000, INTC_INTMSKCLR1); 1004 __raw_writel(0x80000000, INTC_INTMSKCLR1);
875 register_intc_controller(&intc_desc_irl0123); 1005 register_intc_controller(&intc_desc_irl0123);
876 break; 1006 break;
877 default: 1007 default:
@@ -882,3 +1012,39 @@ void __init plat_irq_setup_pins(int mode)
882void __init plat_mem_setup(void) 1012void __init plat_mem_setup(void)
883{ 1013{
884} 1014}
1015
1016static int __init sh7786_devices_setup(void)
1017{
1018 int ret, irq;
1019
1020 sh7786_usb_setup();
1021
1022 /*
1023 * De-mux SCIF1 IRQs if possible
1024 */
1025 irq = intc_irq_lookup(sh7786_intc_desc.name, TXI1);
1026 if (irq > 0) {
1027 scif1_platform_data.irqs[SCIx_TXI_IRQ] = irq;
1028 scif1_platform_data.irqs[SCIx_ERI_IRQ] =
1029 intc_irq_lookup(sh7786_intc_desc.name, ERI1);
1030 scif1_platform_data.irqs[SCIx_BRI_IRQ] =
1031 intc_irq_lookup(sh7786_intc_desc.name, BRI1);
1032 scif1_platform_data.irqs[SCIx_RXI_IRQ] =
1033 intc_irq_lookup(sh7786_intc_desc.name, RXI1);
1034 }
1035
1036 ret = platform_add_devices(sh7786_early_devices,
1037 ARRAY_SIZE(sh7786_early_devices));
1038 if (unlikely(ret != 0))
1039 return ret;
1040
1041 return platform_add_devices(sh7786_devices,
1042 ARRAY_SIZE(sh7786_devices));
1043}
1044arch_initcall(sh7786_devices_setup);
1045
1046void __init plat_early_device_setup(void)
1047{
1048 early_platform_add_devices(sh7786_early_devices,
1049 ARRAY_SIZE(sh7786_early_devices));
1050}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index 4a26cc304139..bb208806dc1a 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SH-X3 Prototype Setup 2 * SH-X3 Prototype Setup
3 * 3 *
4 * Copyright (C) 2007 - 2009 Paul Mundt 4 * Copyright (C) 2007 - 2010 Paul Mundt
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
@@ -12,62 +12,79 @@
12#include <linux/serial.h> 12#include <linux/serial.h>
13#include <linux/serial_sci.h> 13#include <linux/serial_sci.h>
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/gpio.h>
15#include <linux/sh_timer.h> 16#include <linux/sh_timer.h>
17#include <cpu/shx3.h>
16#include <asm/mmzone.h> 18#include <asm/mmzone.h>
17 19
18static struct plat_sci_port sci_platform_data[] = { 20/*
19 { 21 * This intentionally only registers SCIF ports 0, 1, and 3. SCIF 2
20 .mapbase = 0xffc30000, 22 * INTEVT values overlap with the FPU EXPEVT ones, requiring special
21 .flags = UPF_BOOT_AUTOCONF, 23 * demuxing in the exception dispatch path.
22 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 24 *
23 .scbrr_algo_id = SCBRR_ALGO_2, 25 * As this overlap is something that never should have made it in to
24 .type = PORT_SCIF, 26 * silicon in the first place, we just refuse to deal with the port at
25 .irqs = { 40, 41, 43, 42 }, 27 * all rather than adding infrastructure to hack around it.
26 }, { 28 */
27 .mapbase = 0xffc40000, 29static struct plat_sci_port scif0_platform_data = {
28 .flags = UPF_BOOT_AUTOCONF, 30 .mapbase = 0xffc30000,
29 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 31 .flags = UPF_BOOT_AUTOCONF,
30 .scbrr_algo_id = SCBRR_ALGO_2, 32 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
31 .type = PORT_SCIF, 33 .scbrr_algo_id = SCBRR_ALGO_2,
32 .irqs = { 44, 45, 47, 46 }, 34 .type = PORT_SCIF,
33 }, { 35 .irqs = { 40, 41, 43, 42 },
34 .mapbase = 0xffc50000, 36};
35 .flags = UPF_BOOT_AUTOCONF, 37
36 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 38static struct platform_device scif0_device = {
37 .scbrr_algo_id = SCBRR_ALGO_2, 39 .name = "sh-sci",
38 .type = PORT_SCIF, 40 .id = 0,
39 .irqs = { 48, 49, 51, 50 }, 41 .dev = {
40 }, { 42 .platform_data = &scif0_platform_data,
41 .mapbase = 0xffc60000, 43 },
42 .flags = UPF_BOOT_AUTOCONF, 44};
43 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 45
44 .scbrr_algo_id = SCBRR_ALGO_2, 46static struct plat_sci_port scif1_platform_data = {
45 .type = PORT_SCIF, 47 .mapbase = 0xffc40000,
46 .irqs = { 52, 53, 55, 54 }, 48 .flags = UPF_BOOT_AUTOCONF,
47 }, { 49 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
48 .flags = 0, 50 .scbrr_algo_id = SCBRR_ALGO_2,
49 } 51 .type = PORT_SCIF,
52 .irqs = { 44, 45, 47, 46 },
53};
54
55static struct platform_device scif1_device = {
56 .name = "sh-sci",
57 .id = 1,
58 .dev = {
59 .platform_data = &scif1_platform_data,
60 },
61};
62
63static struct plat_sci_port scif2_platform_data = {
64 .mapbase = 0xffc60000,
65 .flags = UPF_BOOT_AUTOCONF,
66 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
67 .scbrr_algo_id = SCBRR_ALGO_2,
68 .type = PORT_SCIF,
69 .irqs = { 52, 53, 55, 54 },
50}; 70};
51 71
52static struct platform_device sci_device = { 72static struct platform_device scif2_device = {
53 .name = "sh-sci", 73 .name = "sh-sci",
54 .id = -1, 74 .id = 2,
55 .dev = { 75 .dev = {
56 .platform_data = sci_platform_data, 76 .platform_data = &scif2_platform_data,
57 }, 77 },
58}; 78};
59 79
60static struct sh_timer_config tmu0_platform_data = { 80static struct sh_timer_config tmu0_platform_data = {
61 .name = "TMU0",
62 .channel_offset = 0x04, 81 .channel_offset = 0x04,
63 .timer_bit = 0, 82 .timer_bit = 0,
64 .clk = "peripheral_clk",
65 .clockevent_rating = 200, 83 .clockevent_rating = 200,
66}; 84};
67 85
68static struct resource tmu0_resources[] = { 86static struct resource tmu0_resources[] = {
69 [0] = { 87 [0] = {
70 .name = "TMU0",
71 .start = 0xffc10008, 88 .start = 0xffc10008,
72 .end = 0xffc10013, 89 .end = 0xffc10013,
73 .flags = IORESOURCE_MEM, 90 .flags = IORESOURCE_MEM,
@@ -89,16 +106,13 @@ static struct platform_device tmu0_device = {
89}; 106};
90 107
91static struct sh_timer_config tmu1_platform_data = { 108static struct sh_timer_config tmu1_platform_data = {
92 .name = "TMU1",
93 .channel_offset = 0x10, 109 .channel_offset = 0x10,
94 .timer_bit = 1, 110 .timer_bit = 1,
95 .clk = "peripheral_clk",
96 .clocksource_rating = 200, 111 .clocksource_rating = 200,
97}; 112};
98 113
99static struct resource tmu1_resources[] = { 114static struct resource tmu1_resources[] = {
100 [0] = { 115 [0] = {
101 .name = "TMU1",
102 .start = 0xffc10014, 116 .start = 0xffc10014,
103 .end = 0xffc1001f, 117 .end = 0xffc1001f,
104 .flags = IORESOURCE_MEM, 118 .flags = IORESOURCE_MEM,
@@ -120,15 +134,12 @@ static struct platform_device tmu1_device = {
120}; 134};
121 135
122static struct sh_timer_config tmu2_platform_data = { 136static struct sh_timer_config tmu2_platform_data = {
123 .name = "TMU2",
124 .channel_offset = 0x1c, 137 .channel_offset = 0x1c,
125 .timer_bit = 2, 138 .timer_bit = 2,
126 .clk = "peripheral_clk",
127}; 139};
128 140
129static struct resource tmu2_resources[] = { 141static struct resource tmu2_resources[] = {
130 [0] = { 142 [0] = {
131 .name = "TMU2",
132 .start = 0xffc10020, 143 .start = 0xffc10020,
133 .end = 0xffc1002f, 144 .end = 0xffc1002f,
134 .flags = IORESOURCE_MEM, 145 .flags = IORESOURCE_MEM,
@@ -150,15 +161,12 @@ static struct platform_device tmu2_device = {
150}; 161};
151 162
152static struct sh_timer_config tmu3_platform_data = { 163static struct sh_timer_config tmu3_platform_data = {
153 .name = "TMU3",
154 .channel_offset = 0x04, 164 .channel_offset = 0x04,
155 .timer_bit = 0, 165 .timer_bit = 0,
156 .clk = "peripheral_clk",
157}; 166};
158 167
159static struct resource tmu3_resources[] = { 168static struct resource tmu3_resources[] = {
160 [0] = { 169 [0] = {
161 .name = "TMU3",
162 .start = 0xffc20008, 170 .start = 0xffc20008,
163 .end = 0xffc20013, 171 .end = 0xffc20013,
164 .flags = IORESOURCE_MEM, 172 .flags = IORESOURCE_MEM,
@@ -180,15 +188,12 @@ static struct platform_device tmu3_device = {
180}; 188};
181 189
182static struct sh_timer_config tmu4_platform_data = { 190static struct sh_timer_config tmu4_platform_data = {
183 .name = "TMU4",
184 .channel_offset = 0x10, 191 .channel_offset = 0x10,
185 .timer_bit = 1, 192 .timer_bit = 1,
186 .clk = "peripheral_clk",
187}; 193};
188 194
189static struct resource tmu4_resources[] = { 195static struct resource tmu4_resources[] = {
190 [0] = { 196 [0] = {
191 .name = "TMU4",
192 .start = 0xffc20014, 197 .start = 0xffc20014,
193 .end = 0xffc2001f, 198 .end = 0xffc2001f,
194 .flags = IORESOURCE_MEM, 199 .flags = IORESOURCE_MEM,
@@ -210,15 +215,12 @@ static struct platform_device tmu4_device = {
210}; 215};
211 216
212static struct sh_timer_config tmu5_platform_data = { 217static struct sh_timer_config tmu5_platform_data = {
213 .name = "TMU5",
214 .channel_offset = 0x1c, 218 .channel_offset = 0x1c,
215 .timer_bit = 2, 219 .timer_bit = 2,
216 .clk = "peripheral_clk",
217}; 220};
218 221
219static struct resource tmu5_resources[] = { 222static struct resource tmu5_resources[] = {
220 [0] = { 223 [0] = {
221 .name = "TMU5",
222 .start = 0xffc20020, 224 .start = 0xffc20020,
223 .end = 0xffc2002b, 225 .end = 0xffc2002b,
224 .flags = IORESOURCE_MEM, 226 .flags = IORESOURCE_MEM,
@@ -240,6 +242,9 @@ static struct platform_device tmu5_device = {
240}; 242};
241 243
242static struct platform_device *shx3_early_devices[] __initdata = { 244static struct platform_device *shx3_early_devices[] __initdata = {
245 &scif0_device,
246 &scif1_device,
247 &scif2_device,
243 &tmu0_device, 248 &tmu0_device,
244 &tmu1_device, 249 &tmu1_device,
245 &tmu2_device, 250 &tmu2_device,
@@ -248,23 +253,12 @@ static struct platform_device *shx3_early_devices[] __initdata = {
248 &tmu5_device, 253 &tmu5_device,
249}; 254};
250 255
251static struct platform_device *shx3_devices[] __initdata = {
252 &sci_device,
253};
254
255static int __init shx3_devices_setup(void) 256static int __init shx3_devices_setup(void)
256{ 257{
257 int ret; 258 return platform_add_devices(shx3_early_devices,
258
259 ret = platform_add_devices(shx3_early_devices,
260 ARRAY_SIZE(shx3_early_devices)); 259 ARRAY_SIZE(shx3_early_devices));
261 if (unlikely(ret != 0))
262 return ret;
263
264 return platform_add_devices(shx3_devices,
265 ARRAY_SIZE(shx3_devices));
266} 260}
267__initcall(shx3_devices_setup); 261arch_initcall(shx3_devices_setup);
268 262
269void __init plat_early_device_setup(void) 263void __init plat_early_device_setup(void)
270{ 264{
@@ -295,10 +289,7 @@ enum {
295 DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, DMAC1_DMINT9, 289 DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, DMAC1_DMINT9,
296 DMAC1_DMINT10, DMAC1_DMINT11, DMAC1_DMAE, 290 DMAC1_DMINT10, DMAC1_DMINT11, DMAC1_DMAE,
297 IIC, VIN0, VIN1, VCORE0, ATAPI, 291 IIC, VIN0, VIN1, VCORE0, ATAPI,
298 DTU0_TEND, DTU0_AE, DTU0_TMISS, 292 DTU0, DTU1, DTU2, DTU3,
299 DTU1_TEND, DTU1_AE, DTU1_TMISS,
300 DTU2_TEND, DTU2_AE, DTU2_TMISS,
301 DTU3_TEND, DTU3_AE, DTU3_TMISS,
302 FE0, FE1, 293 FE0, FE1,
303 GPIO0, GPIO1, GPIO2, GPIO3, 294 GPIO0, GPIO1, GPIO2, GPIO3,
304 PAM, IRM, 295 PAM, IRM,
@@ -307,7 +298,7 @@ enum {
307 298
308 /* interrupt groups */ 299 /* interrupt groups */
309 IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, 300 IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3,
310 DMAC0, DMAC1, DTU0, DTU1, DTU2, DTU3, 301 DMAC0, DMAC1,
311}; 302};
312 303
313static struct intc_vect vectors[] __initdata = { 304static struct intc_vect vectors[] __initdata = {
@@ -324,8 +315,6 @@ static struct intc_vect vectors[] __initdata = {
324 INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760), 315 INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
325 INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0), 316 INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0),
326 INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0), 317 INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0),
327 INTC_VECT(SCIF2_ERI, 0x800), INTC_VECT(SCIF2_RXI, 0x820),
328 INTC_VECT(SCIF2_BRI, 0x840), INTC_VECT(SCIF2_TXI, 0x860),
329 INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0), 318 INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0),
330 INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0), 319 INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0),
331 INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920), 320 INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920),
@@ -340,14 +329,14 @@ static struct intc_vect vectors[] __initdata = {
340 INTC_VECT(IIC, 0xae0), 329 INTC_VECT(IIC, 0xae0),
341 INTC_VECT(VIN0, 0xb00), INTC_VECT(VIN1, 0xb20), 330 INTC_VECT(VIN0, 0xb00), INTC_VECT(VIN1, 0xb20),
342 INTC_VECT(VCORE0, 0xb00), INTC_VECT(ATAPI, 0xb60), 331 INTC_VECT(VCORE0, 0xb00), INTC_VECT(ATAPI, 0xb60),
343 INTC_VECT(DTU0_TEND, 0xc00), INTC_VECT(DTU0_AE, 0xc20), 332 INTC_VECT(DTU0, 0xc00), INTC_VECT(DTU0, 0xc20),
344 INTC_VECT(DTU0_TMISS, 0xc40), 333 INTC_VECT(DTU0, 0xc40),
345 INTC_VECT(DTU1_TEND, 0xc60), INTC_VECT(DTU1_AE, 0xc80), 334 INTC_VECT(DTU1, 0xc60), INTC_VECT(DTU1, 0xc80),
346 INTC_VECT(DTU1_TMISS, 0xca0), 335 INTC_VECT(DTU1, 0xca0),
347 INTC_VECT(DTU2_TEND, 0xcc0), INTC_VECT(DTU2_AE, 0xce0), 336 INTC_VECT(DTU2, 0xcc0), INTC_VECT(DTU2, 0xce0),
348 INTC_VECT(DTU2_TMISS, 0xd00), 337 INTC_VECT(DTU2, 0xd00),
349 INTC_VECT(DTU3_TEND, 0xd20), INTC_VECT(DTU3_AE, 0xd40), 338 INTC_VECT(DTU3, 0xd20), INTC_VECT(DTU3, 0xd40),
350 INTC_VECT(DTU3_TMISS, 0xd60), 339 INTC_VECT(DTU3, 0xd60),
351 INTC_VECT(FE0, 0xe00), INTC_VECT(FE1, 0xe20), 340 INTC_VECT(FE0, 0xe00), INTC_VECT(FE1, 0xe20),
352 INTC_VECT(GPIO0, 0xe40), INTC_VECT(GPIO1, 0xe60), 341 INTC_VECT(GPIO0, 0xe40), INTC_VECT(GPIO1, 0xe60),
353 INTC_VECT(GPIO2, 0xe80), INTC_VECT(GPIO3, 0xea0), 342 INTC_VECT(GPIO2, 0xe80), INTC_VECT(GPIO3, 0xea0),
@@ -366,18 +355,17 @@ static struct intc_group groups[] __initdata = {
366 INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9), 355 INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9),
367 INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), 356 INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
368 INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), 357 INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
369 INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
370 INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI), 358 INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI),
371 INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, 359 INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
372 DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE), 360 DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
373 INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, 361 INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8,
374 DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11), 362 DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11),
375 INTC_GROUP(DTU0, DTU0_TEND, DTU0_AE, DTU0_TMISS),
376 INTC_GROUP(DTU1, DTU1_TEND, DTU1_AE, DTU1_TMISS),
377 INTC_GROUP(DTU2, DTU2_TEND, DTU2_AE, DTU2_TMISS),
378 INTC_GROUP(DTU3, DTU3_TEND, DTU3_AE, DTU3_TMISS),
379}; 363};
380 364
365#define INT2DISTCR0 0xfe4108a0
366#define INT2DISTCR1 0xfe4108a4
367#define INT2DISTCR2 0xfe4108a8
368
381static struct intc_mask_reg mask_registers[] __initdata = { 369static struct intc_mask_reg mask_registers[] __initdata = {
382 { 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */ 370 { 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */
383 { IRQ0, IRQ1, IRQ2, IRQ3 } }, 371 { IRQ0, IRQ1, IRQ2, IRQ3 } },
@@ -387,20 +375,23 @@ static struct intc_mask_reg mask_registers[] __initdata = {
387 { FE1, FE0, 0, ATAPI, VCORE0, VIN1, VIN0, IIC, 375 { FE1, FE0, 0, ATAPI, VCORE0, VIN1, VIN0, IIC,
388 DU, GPIO3, GPIO2, GPIO1, GPIO0, PAM, 0, 0, 376 DU, GPIO3, GPIO2, GPIO1, GPIO0, PAM, 0, 0,
389 0, 0, 0, 0, 0, 0, 0, 0, /* HUDI bits ignored */ 377 0, 0, 0, 0, 0, 0, 0, 0, /* HUDI bits ignored */
390 0, TMU5, TMU4, TMU3, TMU2, TMU1, TMU0, 0, } }, 378 0, TMU5, TMU4, TMU3, TMU2, TMU1, TMU0, 0, },
379 INTC_SMP_BALANCING(INT2DISTCR0) },
391 { 0xfe410830, 0xfe410860, 32, /* CnINT2MSK1 / CnINT2MSKCLR1 */ 380 { 0xfe410830, 0xfe410860, 32, /* CnINT2MSK1 / CnINT2MSKCLR1 */
392 { 0, 0, 0, 0, DTU3, DTU2, DTU1, DTU0, /* IRM bits ignored */ 381 { 0, 0, 0, 0, DTU3, DTU2, DTU1, DTU0, /* IRM bits ignored */
393 PCII9, PCII8, PCII7, PCII6, PCII5, PCII4, PCII3, PCII2, 382 PCII9, PCII8, PCII7, PCII6, PCII5, PCII4, PCII3, PCII2,
394 PCII1, PCII0, DMAC1_DMAE, DMAC1_DMINT11, 383 PCII1, PCII0, DMAC1_DMAE, DMAC1_DMINT11,
395 DMAC1_DMINT10, DMAC1_DMINT9, DMAC1_DMINT8, DMAC1_DMINT7, 384 DMAC1_DMINT10, DMAC1_DMINT9, DMAC1_DMINT8, DMAC1_DMINT7,
396 DMAC1_DMINT6, DMAC0_DMAE, DMAC0_DMINT5, DMAC0_DMINT4, 385 DMAC1_DMINT6, DMAC0_DMAE, DMAC0_DMINT5, DMAC0_DMINT4,
397 DMAC0_DMINT3, DMAC0_DMINT2, DMAC0_DMINT1, DMAC0_DMINT0 } }, 386 DMAC0_DMINT3, DMAC0_DMINT2, DMAC0_DMINT1, DMAC0_DMINT0 },
387 INTC_SMP_BALANCING(INT2DISTCR1) },
398 { 0xfe410840, 0xfe410870, 32, /* CnINT2MSK2 / CnINT2MSKCLR2 */ 388 { 0xfe410840, 0xfe410870, 32, /* CnINT2MSK2 / CnINT2MSKCLR2 */
399 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 389 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
400 SCIF3_TXI, SCIF3_BRI, SCIF3_RXI, SCIF3_ERI, 390 SCIF3_TXI, SCIF3_BRI, SCIF3_RXI, SCIF3_ERI,
401 SCIF2_TXI, SCIF2_BRI, SCIF2_RXI, SCIF2_ERI, 391 SCIF2_TXI, SCIF2_BRI, SCIF2_RXI, SCIF2_ERI,
402 SCIF1_TXI, SCIF1_BRI, SCIF1_RXI, SCIF1_ERI, 392 SCIF1_TXI, SCIF1_BRI, SCIF1_RXI, SCIF1_ERI,
403 SCIF0_TXI, SCIF0_BRI, SCIF0_RXI, SCIF0_ERI } }, 393 SCIF0_TXI, SCIF0_BRI, SCIF0_RXI, SCIF0_ERI },
394 INTC_SMP_BALANCING(INT2DISTCR2) },
404}; 395};
405 396
406static struct intc_prio_reg prio_registers[] __initdata = { 397static struct intc_prio_reg prio_registers[] __initdata = {
@@ -457,11 +448,33 @@ static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
457 448
458void __init plat_irq_setup_pins(int mode) 449void __init plat_irq_setup_pins(int mode)
459{ 450{
451 int ret = 0;
452
460 switch (mode) { 453 switch (mode) {
461 case IRQ_MODE_IRQ: 454 case IRQ_MODE_IRQ:
455 ret |= gpio_request(GPIO_FN_IRQ3, intc_desc_irq.name);
456 ret |= gpio_request(GPIO_FN_IRQ2, intc_desc_irq.name);
457 ret |= gpio_request(GPIO_FN_IRQ1, intc_desc_irq.name);
458 ret |= gpio_request(GPIO_FN_IRQ0, intc_desc_irq.name);
459
460 if (unlikely(ret)) {
461 pr_err("Failed to set IRQ mode\n");
462 return;
463 }
464
462 register_intc_controller(&intc_desc_irq); 465 register_intc_controller(&intc_desc_irq);
463 break; 466 break;
464 case IRQ_MODE_IRL3210: 467 case IRQ_MODE_IRL3210:
468 ret |= gpio_request(GPIO_FN_IRL3, intc_desc_irl.name);
469 ret |= gpio_request(GPIO_FN_IRL2, intc_desc_irl.name);
470 ret |= gpio_request(GPIO_FN_IRL1, intc_desc_irl.name);
471 ret |= gpio_request(GPIO_FN_IRL0, intc_desc_irl.name);
472
473 if (unlikely(ret)) {
474 pr_err("Failed to set IRL mode\n");
475 return;
476 }
477
465 register_intc_controller(&intc_desc_irl); 478 register_intc_controller(&intc_desc_irl);
466 break; 479 break;
467 default: 480 default:
@@ -471,6 +484,9 @@ void __init plat_irq_setup_pins(int mode)
471 484
472void __init plat_irq_setup(void) 485void __init plat_irq_setup(void)
473{ 486{
487 reserve_intc_vectors(vectors_irq, ARRAY_SIZE(vectors_irq));
488 reserve_intc_vectors(vectors_irl, ARRAY_SIZE(vectors_irl));
489
474 register_intc_controller(&intc_desc); 490 register_intc_controller(&intc_desc);
475} 491}
476 492
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 2b6b0d50c576..de865cac02ee 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SH-X3 SMP 2 * SH-X3 SMP
3 * 3 *
4 * Copyright (C) 2007 - 2008 Paul Mundt 4 * Copyright (C) 2007 - 2010 Paul Mundt
5 * Copyright (C) 2007 Magnus Damm 5 * Copyright (C) 2007 Magnus Damm
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General Public 7 * This file is subject to the terms and conditions of the GNU General Public
@@ -9,10 +9,23 @@
9 * for more details. 9 * for more details.
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h>
12#include <linux/cpumask.h> 13#include <linux/cpumask.h>
13#include <linux/smp.h> 14#include <linux/smp.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
15#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/sched.h>
18#include <linux/delay.h>
19#include <linux/cpu.h>
20#include <asm/sections.h>
21
22#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
23#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
24
25#define STBCR_MSTP 0x00000001
26#define STBCR_RESET 0x00000002
27#define STBCR_SLEEP 0x00000004
28#define STBCR_LTSLP 0x80000000
16 29
17static irqreturn_t ipi_interrupt_handler(int irq, void *arg) 30static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
18{ 31{
@@ -21,22 +34,25 @@ static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
21 unsigned int offs = 4 * cpu; 34 unsigned int offs = 4 * cpu;
22 unsigned int x; 35 unsigned int x;
23 36
24 x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */ 37 x = __raw_readl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
25 x &= (1 << (message << 2)); 38 x &= (1 << (message << 2));
26 ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */ 39 __raw_writel(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
27 40
28 smp_message_recv(message); 41 smp_message_recv(message);
29 42
30 return IRQ_HANDLED; 43 return IRQ_HANDLED;
31} 44}
32 45
33void __init plat_smp_setup(void) 46static void shx3_smp_setup(void)
34{ 47{
35 unsigned int cpu = 0; 48 unsigned int cpu = 0;
36 int i, num; 49 int i, num;
37 50
38 init_cpu_possible(cpumask_of(cpu)); 51 init_cpu_possible(cpumask_of(cpu));
39 52
53 /* Enable light sleep for the boot CPU */
54 __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu));
55
40 __cpu_number_map[0] = 0; 56 __cpu_number_map[0] = 0;
41 __cpu_logical_map[0] = 0; 57 __cpu_logical_map[0] = 0;
42 58
@@ -53,50 +69,98 @@ void __init plat_smp_setup(void)
53 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); 69 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
54} 70}
55 71
56void __init plat_prepare_cpus(unsigned int max_cpus) 72static void shx3_prepare_cpus(unsigned int max_cpus)
57{ 73{
58 int i; 74 int i;
59 75
76 local_timer_setup(0);
77
60 BUILD_BUG_ON(SMP_MSG_NR >= 8); 78 BUILD_BUG_ON(SMP_MSG_NR >= 8);
61 79
62 for (i = 0; i < SMP_MSG_NR; i++) 80 for (i = 0; i < SMP_MSG_NR; i++)
63 request_irq(104 + i, ipi_interrupt_handler, IRQF_DISABLED, 81 request_irq(104 + i, ipi_interrupt_handler,
64 "IPI", (void *)(long)i); 82 IRQF_DISABLED | IRQF_PERCPU, "IPI", (void *)(long)i);
65}
66
67#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
68#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
69 83
70#define STBCR_MSTP 0x00000001 84 for (i = 0; i < max_cpus; i++)
71#define STBCR_RESET 0x00000002 85 set_cpu_present(i, true);
72#define STBCR_LTSLP 0x80000000 86}
73
74#define STBCR_AP_VAL (STBCR_RESET | STBCR_LTSLP)
75 87
76void plat_start_cpu(unsigned int cpu, unsigned long entry_point) 88static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point)
77{ 89{
78 ctrl_outl(entry_point, RESET_REG(cpu)); 90 if (__in_29bit_mode())
91 __raw_writel(entry_point, RESET_REG(cpu));
92 else
93 __raw_writel(virt_to_phys(entry_point), RESET_REG(cpu));
79 94
80 if (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP)) 95 if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
81 ctrl_outl(STBCR_MSTP, STBCR_REG(cpu)); 96 __raw_writel(STBCR_MSTP, STBCR_REG(cpu));
82 97
83 while (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP)) 98 while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
84 cpu_relax(); 99 cpu_relax();
85 100
86 /* Start up secondary processor by sending a reset */ 101 /* Start up secondary processor by sending a reset */
87 ctrl_outl(STBCR_AP_VAL, STBCR_REG(cpu)); 102 __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu));
88} 103}
89 104
90int plat_smp_processor_id(void) 105static unsigned int shx3_smp_processor_id(void)
91{ 106{
92 return ctrl_inl(0xff000048); /* CPIDR */ 107 return __raw_readl(0xff000048); /* CPIDR */
93} 108}
94 109
95void plat_send_ipi(unsigned int cpu, unsigned int message) 110static void shx3_send_ipi(unsigned int cpu, unsigned int message)
96{ 111{
97 unsigned long addr = 0xfe410070 + (cpu * 4); 112 unsigned long addr = 0xfe410070 + (cpu * 4);
98 113
99 BUG_ON(cpu >= 4); 114 BUG_ON(cpu >= 4);
100 115
101 ctrl_outl(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ 116 __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
117}
118
119static void shx3_update_boot_vector(unsigned int cpu)
120{
121 __raw_writel(STBCR_MSTP, STBCR_REG(cpu));
122 while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
123 cpu_relax();
124 __raw_writel(STBCR_RESET, STBCR_REG(cpu));
125}
126
127static int __cpuinit
128shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
129{
130 unsigned int cpu = (unsigned int)hcpu;
131
132 switch (action) {
133 case CPU_UP_PREPARE:
134 shx3_update_boot_vector(cpu);
135 break;
136 case CPU_ONLINE:
137 pr_info("CPU %u is now online\n", cpu);
138 break;
139 case CPU_DEAD:
140 break;
141 }
142
143 return NOTIFY_OK;
144}
145
146static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
147 .notifier_call = shx3_cpu_callback,
148};
149
150static int __cpuinit register_shx3_cpu_notifier(void)
151{
152 register_hotcpu_notifier(&shx3_cpu_notifier);
153 return 0;
102} 154}
155late_initcall(register_shx3_cpu_notifier);
156
157struct plat_smp_ops shx3_smp_ops = {
158 .smp_setup = shx3_smp_setup,
159 .prepare_cpus = shx3_prepare_cpus,
160 .start_cpu = shx3_start_cpu,
161 .smp_processor_id = shx3_smp_processor_id,
162 .send_ipi = shx3_send_ipi,
163 .cpu_die = native_cpu_die,
164 .cpu_disable = native_cpu_disable,
165 .play_dead = native_play_dead,
166};
diff --git a/arch/sh/kernel/cpu/sh4a/ubc.c b/arch/sh/kernel/cpu/sh4a/ubc.c
new file mode 100644
index 000000000000..efb2745bcb36
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/ubc.c
@@ -0,0 +1,133 @@
1/*
2 * arch/sh/kernel/cpu/sh4a/ubc.c
3 *
4 * On-chip UBC support for SH-4A CPUs.
5 *
6 * Copyright (C) 2009 - 2010 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/err.h>
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <asm/hw_breakpoint.h>
17
18#define UBC_CBR(idx) (0xff200000 + (0x20 * idx))
19#define UBC_CRR(idx) (0xff200004 + (0x20 * idx))
20#define UBC_CAR(idx) (0xff200008 + (0x20 * idx))
21#define UBC_CAMR(idx) (0xff20000c + (0x20 * idx))
22
23#define UBC_CCMFR 0xff200600
24#define UBC_CBCR 0xff200620
25
26/* CRR */
27#define UBC_CRR_PCB (1 << 1)
28#define UBC_CRR_BIE (1 << 0)
29
30/* CBR */
31#define UBC_CBR_CE (1 << 0)
32
33static struct sh_ubc sh4a_ubc;
34
35static void sh4a_ubc_enable(struct arch_hw_breakpoint *info, int idx)
36{
37 __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR(idx));
38 __raw_writel(info->address, UBC_CAR(idx));
39}
40
41static void sh4a_ubc_disable(struct arch_hw_breakpoint *info, int idx)
42{
43 __raw_writel(0, UBC_CBR(idx));
44 __raw_writel(0, UBC_CAR(idx));
45}
46
47static void sh4a_ubc_enable_all(unsigned long mask)
48{
49 int i;
50
51 for (i = 0; i < sh4a_ubc.num_events; i++)
52 if (mask & (1 << i))
53 __raw_writel(__raw_readl(UBC_CBR(i)) | UBC_CBR_CE,
54 UBC_CBR(i));
55}
56
57static void sh4a_ubc_disable_all(void)
58{
59 int i;
60
61 for (i = 0; i < sh4a_ubc.num_events; i++)
62 __raw_writel(__raw_readl(UBC_CBR(i)) & ~UBC_CBR_CE,
63 UBC_CBR(i));
64}
65
66static unsigned long sh4a_ubc_active_mask(void)
67{
68 unsigned long active = 0;
69 int i;
70
71 for (i = 0; i < sh4a_ubc.num_events; i++)
72 if (__raw_readl(UBC_CBR(i)) & UBC_CBR_CE)
73 active |= (1 << i);
74
75 return active;
76}
77
78static unsigned long sh4a_ubc_triggered_mask(void)
79{
80 return __raw_readl(UBC_CCMFR);
81}
82
83static void sh4a_ubc_clear_triggered_mask(unsigned long mask)
84{
85 __raw_writel(__raw_readl(UBC_CCMFR) & ~mask, UBC_CCMFR);
86}
87
88static struct sh_ubc sh4a_ubc = {
89 .name = "SH-4A",
90 .num_events = 2,
91 .trap_nr = 0x1e0,
92 .enable = sh4a_ubc_enable,
93 .disable = sh4a_ubc_disable,
94 .enable_all = sh4a_ubc_enable_all,
95 .disable_all = sh4a_ubc_disable_all,
96 .active_mask = sh4a_ubc_active_mask,
97 .triggered_mask = sh4a_ubc_triggered_mask,
98 .clear_triggered_mask = sh4a_ubc_clear_triggered_mask,
99};
100
101static int __init sh4a_ubc_init(void)
102{
103 struct clk *ubc_iclk = clk_get(NULL, "ubc0");
104 int i;
105
106 /*
107 * The UBC MSTP bit is optional, as not all platforms will have
108 * it. Just ignore it if we can't find it.
109 */
110 if (IS_ERR(ubc_iclk))
111 ubc_iclk = NULL;
112
113 clk_enable(ubc_iclk);
114
115 __raw_writel(0, UBC_CBCR);
116
117 for (i = 0; i < sh4a_ubc.num_events; i++) {
118 __raw_writel(0, UBC_CAMR(i));
119 __raw_writel(0, UBC_CBR(i));
120
121 __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR(i));
122
123 /* dummy read for write posting */
124 (void)__raw_readl(UBC_CRR(i));
125 }
126
127 clk_disable(ubc_iclk);
128
129 sh4a_ubc.clk = ubc_iclk;
130
131 return register_sh_ubc(&sh4a_ubc);
132}
133arch_initcall(sh4a_ubc_init);
diff --git a/arch/sh/kernel/cpu/sh5/clock-sh5.c b/arch/sh/kernel/cpu/sh5/clock-sh5.c
index 7f864ebc51d3..9cfc19b8dbe4 100644
--- a/arch/sh/kernel/cpu/sh5/clock-sh5.c
+++ b/arch/sh/kernel/cpu/sh5/clock-sh5.c
@@ -24,7 +24,7 @@ static unsigned long cprc_base;
24 24
25static void master_clk_init(struct clk *clk) 25static void master_clk_init(struct clk *clk)
26{ 26{
27 int idx = (ctrl_inl(cprc_base + 0x00) >> 6) & 0x0007; 27 int idx = (__raw_readl(cprc_base + 0x00) >> 6) & 0x0007;
28 clk->rate *= ifc_table[idx]; 28 clk->rate *= ifc_table[idx];
29} 29}
30 30
@@ -34,7 +34,7 @@ static struct clk_ops sh5_master_clk_ops = {
34 34
35static unsigned long module_clk_recalc(struct clk *clk) 35static unsigned long module_clk_recalc(struct clk *clk)
36{ 36{
37 int idx = (ctrl_inw(cprc_base) >> 12) & 0x0007; 37 int idx = (__raw_readw(cprc_base) >> 12) & 0x0007;
38 return clk->parent->rate / ifc_table[idx]; 38 return clk->parent->rate / ifc_table[idx];
39} 39}
40 40
@@ -44,7 +44,7 @@ static struct clk_ops sh5_module_clk_ops = {
44 44
45static unsigned long bus_clk_recalc(struct clk *clk) 45static unsigned long bus_clk_recalc(struct clk *clk)
46{ 46{
47 int idx = (ctrl_inw(cprc_base) >> 3) & 0x0007; 47 int idx = (__raw_readw(cprc_base) >> 3) & 0x0007;
48 return clk->parent->rate / ifc_table[idx]; 48 return clk->parent->rate / ifc_table[idx];
49} 49}
50 50
@@ -54,7 +54,7 @@ static struct clk_ops sh5_bus_clk_ops = {
54 54
55static unsigned long cpu_clk_recalc(struct clk *clk) 55static unsigned long cpu_clk_recalc(struct clk *clk)
56{ 56{
57 int idx = (ctrl_inw(cprc_base) & 0x0007); 57 int idx = (__raw_readw(cprc_base) & 0x0007);
58 return clk->parent->rate / ifc_table[idx]; 58 return clk->parent->rate / ifc_table[idx];
59} 59}
60 60
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index b0aacf675258..6b80295dd7a4 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -187,7 +187,7 @@ trap_jtable:
187 .rept 6 187 .rept 6
188 .long do_exception_error /* 0x880 - 0x920 */ 188 .long do_exception_error /* 0x880 - 0x920 */
189 .endr 189 .endr
190 .long do_software_break_point /* 0x940 */ 190 .long breakpoint_trap_handler /* 0x940 */
191 .long do_exception_error /* 0x960 */ 191 .long do_exception_error /* 0x960 */
192 .long do_single_step /* 0x980 */ 192 .long do_single_step /* 0x980 */
193 193
@@ -933,7 +933,7 @@ ret_with_reschedule:
933 933
934 pta restore_all, tr1 934 pta restore_all, tr1
935 935
936 movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8 936 movi _TIF_SIGPENDING, r8
937 and r8, r7, r8 937 and r8, r7, r8
938 pta work_notifysig, tr0 938 pta work_notifysig, tr0
939 bne r8, ZERO, tr0 939 bne r8, ZERO, tr0
@@ -1124,7 +1124,7 @@ fpu_error_or_IRQA:
1124 pta its_IRQ, tr0 1124 pta its_IRQ, tr0
1125 beqi/l r4, EVENT_INTERRUPT, tr0 1125 beqi/l r4, EVENT_INTERRUPT, tr0
1126#ifdef CONFIG_SH_FPU 1126#ifdef CONFIG_SH_FPU
1127 movi do_fpu_state_restore, r6 1127 movi fpu_state_restore_trap_handler, r6
1128#else 1128#else
1129 movi do_exception_error, r6 1129 movi do_exception_error, r6
1130#endif 1130#endif
@@ -1135,7 +1135,7 @@ fpu_error_or_IRQB:
1135 pta its_IRQ, tr0 1135 pta its_IRQ, tr0
1136 beqi/l r4, EVENT_INTERRUPT, tr0 1136 beqi/l r4, EVENT_INTERRUPT, tr0
1137#ifdef CONFIG_SH_FPU 1137#ifdef CONFIG_SH_FPU
1138 movi do_fpu_state_restore, r6 1138 movi fpu_state_restore_trap_handler, r6
1139#else 1139#else
1140 movi do_exception_error, r6 1140 movi do_exception_error, r6
1141#endif 1141#endif
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
index dd4f51ffb50e..4b3bb35e99f3 100644
--- a/arch/sh/kernel/cpu/sh5/fpu.c
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -15,26 +15,8 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/signal.h> 16#include <linux/signal.h>
17#include <asm/processor.h> 17#include <asm/processor.h>
18#include <asm/user.h>
19#include <asm/io.h>
20#include <asm/fpu.h>
21 18
22/* 19void save_fpu(struct task_struct *tsk)
23 * Initially load the FPU with signalling NANS. This bit pattern
24 * has the property that no matter whether considered as single or as
25 * double precision, it still represents a signalling NAN.
26 */
27#define sNAN64 0xFFFFFFFFFFFFFFFFULL
28#define sNAN32 0xFFFFFFFFUL
29
30static union sh_fpu_union init_fpuregs = {
31 .hard = {
32 .fp_regs = { [0 ... 63] = sNAN32 },
33 .fpscr = FPSCR_INIT
34 }
35};
36
37void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
38{ 20{
39 asm volatile("fst.p %0, (0*8), fp0\n\t" 21 asm volatile("fst.p %0, (0*8), fp0\n\t"
40 "fst.p %0, (1*8), fp2\n\t" 22 "fst.p %0, (1*8), fp2\n\t"
@@ -72,12 +54,11 @@ void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
72 "fgetscr fr63\n\t" 54 "fgetscr fr63\n\t"
73 "fst.s %0, (32*8), fr63\n\t" 55 "fst.s %0, (32*8), fr63\n\t"
74 : /* no output */ 56 : /* no output */
75 : "r" (&tsk->thread.fpu.hard) 57 : "r" (&tsk->thread.xstate->hardfpu)
76 : "memory"); 58 : "memory");
77} 59}
78 60
79static inline void 61void restore_fpu(struct task_struct *tsk)
80fpload(struct sh_fpu_hard_struct *fpregs)
81{ 62{
82 asm volatile("fld.p %0, (0*8), fp0\n\t" 63 asm volatile("fld.p %0, (0*8), fp0\n\t"
83 "fld.p %0, (1*8), fp2\n\t" 64 "fld.p %0, (1*8), fp2\n\t"
@@ -116,16 +97,11 @@ fpload(struct sh_fpu_hard_struct *fpregs)
116 97
117 "fld.p %0, (31*8), fp62\n\t" 98 "fld.p %0, (31*8), fp62\n\t"
118 : /* no output */ 99 : /* no output */
119 : "r" (fpregs) ); 100 : "r" (&tsk->thread.xstate->hardfpu)
120} 101 : "memory");
121
122void fpinit(struct sh_fpu_hard_struct *fpregs)
123{
124 *fpregs = init_fpuregs.hard;
125} 102}
126 103
127asmlinkage void 104asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
128do_fpu_error(unsigned long ex, struct pt_regs *regs)
129{ 105{
130 struct task_struct *tsk = current; 106 struct task_struct *tsk = current;
131 107
@@ -133,35 +109,6 @@ do_fpu_error(unsigned long ex, struct pt_regs *regs)
133 109
134 tsk->thread.trap_no = 11; 110 tsk->thread.trap_no = 11;
135 tsk->thread.error_code = 0; 111 tsk->thread.error_code = 0;
136 force_sig(SIGFPE, tsk);
137}
138
139
140asmlinkage void
141do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
142{
143 void die(const char *str, struct pt_regs *regs, long err);
144
145 if (! user_mode(regs))
146 die("FPU used in kernel", regs, ex);
147 112
148 regs->sr &= ~SR_FD; 113 force_sig(SIGFPE, tsk);
149
150 if (last_task_used_math == current)
151 return;
152
153 enable_fpu();
154 if (last_task_used_math != NULL)
155 /* Other processes fpu state, save away */
156 save_fpu(last_task_used_math, regs);
157
158 last_task_used_math = current;
159 if (used_math()) {
160 fpload(&current->thread.fpu.hard);
161 } else {
162 /* First time FPU user. */
163 fpload(&init_fpuregs.hard);
164 set_used_math();
165 }
166 disable_fpu();
167} 114}
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
index 92ad844b5c12..9e882409e4e9 100644
--- a/arch/sh/kernel/cpu/sh5/probe.c
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -17,7 +17,7 @@
17#include <asm/cache.h> 17#include <asm/cache.h>
18#include <asm/tlb.h> 18#include <asm/tlb.h>
19 19
20int __init detect_cpu_and_cache_system(void) 20void __cpuinit cpu_probe(void)
21{ 21{
22 unsigned long long cir; 22 unsigned long long cir;
23 23
@@ -34,6 +34,8 @@ int __init detect_cpu_and_cache_system(void)
34 /* CPU.VCR aliased at CIR address on SH5-101 */ 34 /* CPU.VCR aliased at CIR address on SH5-101 */
35 boot_cpu_data.type = CPU_SH5_101; 35 boot_cpu_data.type = CPU_SH5_101;
36 36
37 boot_cpu_data.family = CPU_FAMILY_SH5;
38
37 /* 39 /*
38 * First, setup some sane values for the I-cache. 40 * First, setup some sane values for the I-cache.
39 */ 41 */
@@ -70,6 +72,4 @@ int __init detect_cpu_and_cache_system(void)
70 72
71 /* Setup some I/D TLB defaults */ 73 /* Setup some I/D TLB defaults */
72 sh64_tlb_init(); 74 sh64_tlb_init();
73
74 return 0;
75} 75}
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
index 72aa86ec7446..18419f1de963 100644
--- a/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c
@@ -16,24 +16,20 @@
16#include <linux/sh_timer.h> 16#include <linux/sh_timer.h>
17#include <asm/addrspace.h> 17#include <asm/addrspace.h>
18 18
19static struct plat_sci_port sci_platform_data[] = { 19static struct plat_sci_port scif0_platform_data = {
20 { 20 .mapbase = PHYS_PERIPHERAL_BLOCK + 0x01030000,
21 .mapbase = PHYS_PERIPHERAL_BLOCK + 0x01030000, 21 .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
22 .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, 22 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
23 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 23 .scbrr_algo_id = SCBRR_ALGO_2,
24 .scbrr_algo_id = SCBRR_ALGO_2, 24 .type = PORT_SCIF,
25 .type = PORT_SCIF, 25 .irqs = { 39, 40, 42, 0 },
26 .irqs = { 39, 40, 42, 0 },
27 }, {
28 .flags = 0,
29 }
30}; 26};
31 27
32static struct platform_device sci_device = { 28static struct platform_device scif0_device = {
33 .name = "sh-sci", 29 .name = "sh-sci",
34 .id = -1, 30 .id = 0,
35 .dev = { 31 .dev = {
36 .platform_data = sci_platform_data, 32 .platform_data = &scif0_platform_data,
37 }, 33 },
38}; 34};
39 35
@@ -74,16 +70,13 @@ static struct platform_device rtc_device = {
74#define TMU2_BASE (TMU_BASE + 0x8 + (0xc * 0x2)) 70#define TMU2_BASE (TMU_BASE + 0x8 + (0xc * 0x2))
75 71
76static struct sh_timer_config tmu0_platform_data = { 72static struct sh_timer_config tmu0_platform_data = {
77 .name = "TMU0",
78 .channel_offset = 0x04, 73 .channel_offset = 0x04,
79 .timer_bit = 0, 74 .timer_bit = 0,
80 .clk = "peripheral_clk",
81 .clockevent_rating = 200, 75 .clockevent_rating = 200,
82}; 76};
83 77
84static struct resource tmu0_resources[] = { 78static struct resource tmu0_resources[] = {
85 [0] = { 79 [0] = {
86 .name = "TMU0",
87 .start = TMU0_BASE, 80 .start = TMU0_BASE,
88 .end = TMU0_BASE + 0xc - 1, 81 .end = TMU0_BASE + 0xc - 1,
89 .flags = IORESOURCE_MEM, 82 .flags = IORESOURCE_MEM,
@@ -105,16 +98,13 @@ static struct platform_device tmu0_device = {
105}; 98};
106 99
107static struct sh_timer_config tmu1_platform_data = { 100static struct sh_timer_config tmu1_platform_data = {
108 .name = "TMU1",
109 .channel_offset = 0x10, 101 .channel_offset = 0x10,
110 .timer_bit = 1, 102 .timer_bit = 1,
111 .clk = "peripheral_clk",
112 .clocksource_rating = 200, 103 .clocksource_rating = 200,
113}; 104};
114 105
115static struct resource tmu1_resources[] = { 106static struct resource tmu1_resources[] = {
116 [0] = { 107 [0] = {
117 .name = "TMU1",
118 .start = TMU1_BASE, 108 .start = TMU1_BASE,
119 .end = TMU1_BASE + 0xc - 1, 109 .end = TMU1_BASE + 0xc - 1,
120 .flags = IORESOURCE_MEM, 110 .flags = IORESOURCE_MEM,
@@ -136,15 +126,12 @@ static struct platform_device tmu1_device = {
136}; 126};
137 127
138static struct sh_timer_config tmu2_platform_data = { 128static struct sh_timer_config tmu2_platform_data = {
139 .name = "TMU2",
140 .channel_offset = 0x1c, 129 .channel_offset = 0x1c,
141 .timer_bit = 2, 130 .timer_bit = 2,
142 .clk = "peripheral_clk",
143}; 131};
144 132
145static struct resource tmu2_resources[] = { 133static struct resource tmu2_resources[] = {
146 [0] = { 134 [0] = {
147 .name = "TMU2",
148 .start = TMU2_BASE, 135 .start = TMU2_BASE,
149 .end = TMU2_BASE + 0xc - 1, 136 .end = TMU2_BASE + 0xc - 1,
150 .flags = IORESOURCE_MEM, 137 .flags = IORESOURCE_MEM,
@@ -166,13 +153,13 @@ static struct platform_device tmu2_device = {
166}; 153};
167 154
168static struct platform_device *sh5_early_devices[] __initdata = { 155static struct platform_device *sh5_early_devices[] __initdata = {
156 &scif0_device,
169 &tmu0_device, 157 &tmu0_device,
170 &tmu1_device, 158 &tmu1_device,
171 &tmu2_device, 159 &tmu2_device,
172}; 160};
173 161
174static struct platform_device *sh5_devices[] __initdata = { 162static struct platform_device *sh5_devices[] __initdata = {
175 &sci_device,
176 &rtc_device, 163 &rtc_device,
177}; 164};
178 165
@@ -188,7 +175,7 @@ static int __init sh5_devices_setup(void)
188 return platform_add_devices(sh5_devices, 175 return platform_add_devices(sh5_devices,
189 ARRAY_SIZE(sh5_devices)); 176 ARRAY_SIZE(sh5_devices));
190} 177}
191__initcall(sh5_devices_setup); 178arch_initcall(sh5_devices_setup);
192 179
193void __init plat_early_device_setup(void) 180void __init plat_early_device_setup(void)
194{ 181{
diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile
index 08bfa7c7db29..a39f88ea1a85 100644
--- a/arch/sh/kernel/cpu/shmobile/Makefile
+++ b/arch/sh/kernel/cpu/shmobile/Makefile
@@ -4,3 +4,5 @@
4 4
5# Power Management & Sleep mode 5# Power Management & Sleep mode
6obj-$(CONFIG_PM) += pm.o sleep.o 6obj-$(CONFIG_PM) += pm.o sleep.o
7obj-$(CONFIG_CPU_IDLE) += cpuidle.o
8obj-$(CONFIG_PM_RUNTIME) += pm_runtime.o
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
new file mode 100644
index 000000000000..83972aa319c2
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -0,0 +1,119 @@
1/*
2 * arch/sh/kernel/cpu/shmobile/cpuidle.c
3 *
4 * Cpuidle support code for SuperH Mobile
5 *
6 * Copyright (C) 2009 Magnus Damm
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/io.h>
15#include <linux/suspend.h>
16#include <linux/cpuidle.h>
17#include <asm/suspend.h>
18#include <asm/uaccess.h>
19#include <asm/hwblk.h>
20
21static unsigned long cpuidle_mode[] = {
22 SUSP_SH_SLEEP, /* regular sleep mode */
23 SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */
24 SUSP_SH_STANDBY | SUSP_SH_SF, /* software standby mode + self refresh */
25};
26
27static int cpuidle_sleep_enter(struct cpuidle_device *dev,
28 struct cpuidle_state *state)
29{
30 unsigned long allowed_mode = arch_hwblk_sleep_mode();
31 ktime_t before, after;
32 int requested_state = state - &dev->states[0];
33 int allowed_state;
34 int k;
35
36 /* convert allowed mode to allowed state */
37 for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--)
38 if (cpuidle_mode[k] == allowed_mode)
39 break;
40
41 allowed_state = k;
42
43 /* take the following into account for sleep mode selection:
44 * - allowed_state: best mode allowed by hardware (clock deps)
45 * - requested_state: best mode allowed by software (latencies)
46 */
47 k = min_t(int, allowed_state, requested_state);
48
49 dev->last_state = &dev->states[k];
50 before = ktime_get();
51 sh_mobile_call_standby(cpuidle_mode[k]);
52 after = ktime_get();
53 return ktime_to_ns(ktime_sub(after, before)) >> 10;
54}
55
56static struct cpuidle_device cpuidle_dev;
57static struct cpuidle_driver cpuidle_driver = {
58 .name = "sh_idle",
59 .owner = THIS_MODULE,
60};
61
62void sh_mobile_setup_cpuidle(void)
63{
64 struct cpuidle_device *dev = &cpuidle_dev;
65 struct cpuidle_state *state;
66 int i;
67
68 cpuidle_register_driver(&cpuidle_driver);
69
70 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
71 dev->states[i].name[0] = '\0';
72 dev->states[i].desc[0] = '\0';
73 }
74
75 i = CPUIDLE_DRIVER_STATE_START;
76
77 state = &dev->states[i++];
78 snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
79 strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
80 state->exit_latency = 1;
81 state->target_residency = 1 * 2;
82 state->power_usage = 3;
83 state->flags = 0;
84 state->flags |= CPUIDLE_FLAG_SHALLOW;
85 state->flags |= CPUIDLE_FLAG_TIME_VALID;
86 state->enter = cpuidle_sleep_enter;
87
88 dev->safe_state = state;
89
90 if (sh_mobile_sleep_supported & SUSP_SH_SF) {
91 state = &dev->states[i++];
92 snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
93 strncpy(state->desc, "SuperH Sleep Mode [SF]",
94 CPUIDLE_DESC_LEN);
95 state->exit_latency = 100;
96 state->target_residency = 1 * 2;
97 state->power_usage = 1;
98 state->flags = 0;
99 state->flags |= CPUIDLE_FLAG_TIME_VALID;
100 state->enter = cpuidle_sleep_enter;
101 }
102
103 if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
104 state = &dev->states[i++];
105 snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
106 strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
107 CPUIDLE_DESC_LEN);
108 state->exit_latency = 2300;
109 state->target_residency = 1 * 2;
110 state->power_usage = 1;
111 state->flags = 0;
112 state->flags |= CPUIDLE_FLAG_TIME_VALID;
113 state->enter = cpuidle_sleep_enter;
114 }
115
116 dev->state_count = i;
117
118 cpuidle_register_device(dev);
119}
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index 8c067adf6830..e55968712706 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c 2 * arch/sh/kernel/cpu/shmobile/pm.c
3 * 3 *
4 * Power management support code for SuperH Mobile 4 * Power management support code for SuperH Mobile
5 * 5 *
@@ -15,6 +15,13 @@
15#include <linux/suspend.h> 15#include <linux/suspend.h>
16#include <asm/suspend.h> 16#include <asm/suspend.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include <asm/cacheflush.h>
19
20/*
21 * Notifier lists for pre/post sleep notification
22 */
23ATOMIC_NOTIFIER_HEAD(sh_mobile_pre_sleep_notifier_list);
24ATOMIC_NOTIFIER_HEAD(sh_mobile_post_sleep_notifier_list);
18 25
19/* 26/*
20 * Sleep modes available on SuperH Mobile: 27 * Sleep modes available on SuperH Mobile:
@@ -26,50 +33,106 @@
26#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) 33#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP)
27#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) 34#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF)
28#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) 35#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF)
36#define SUSP_MODE_RSTANDBY_SF \
37 (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_REGS | SUSP_SH_SF)
38 /*
39 * U-standby mode is unsupported since it needs bootloader hacks
40 */
29 41
30/* 42#ifdef CONFIG_CPU_SUBTYPE_SH7724
31 * The following modes are not there yet: 43#define RAM_BASE 0xfd800000 /* RSMEM */
32 * 44#else
33 * R-standby mode is unsupported, but will be added in the future 45#define RAM_BASE 0xe5200000 /* ILRAM */
34 * U-standby mode is low priority since it needs bootloader hacks 46#endif
35 *
36 * All modes should be tied in with cpuidle. But before that can
37 * happen we need to keep track of enabled hardware blocks so we
38 * can avoid entering sleep modes that stop clocks to hardware
39 * blocks that are in use even though the cpu core is idle.
40 */
41
42extern const unsigned char sh_mobile_standby[];
43extern const unsigned int sh_mobile_standby_size;
44 47
45static void sh_mobile_call_standby(unsigned long mode) 48void sh_mobile_call_standby(unsigned long mode)
46{ 49{
47 extern void *vbr_base; 50 void *onchip_mem = (void *)RAM_BASE;
48 void *onchip_mem = (void *)0xe5200000; /* ILRAM */ 51 struct sh_sleep_data *sdp = onchip_mem;
49 void (*standby_onchip_mem)(unsigned long) = onchip_mem; 52 void (*standby_onchip_mem)(unsigned long, unsigned long);
50 53
51 /* Note: Wake up from sleep may generate exceptions! 54 /* code located directly after data structure */
52 * Setup VBR to point to on-chip ram if self-refresh is 55 standby_onchip_mem = (void *)(sdp + 1);
53 * going to be used. 56
54 */ 57 atomic_notifier_call_chain(&sh_mobile_pre_sleep_notifier_list,
55 if (mode & SUSP_SH_SF) 58 mode, NULL);
56 asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory"); 59
57 60 /* flush the caches if MMU flag is set */
58 /* Copy the assembly snippet to the otherwise ununsed ILRAM */ 61 if (mode & SUSP_SH_MMU)
59 memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size); 62 flush_cache_all();
60 wmb();
61 ctrl_barrier();
62 63
63 /* Let assembly snippet in on-chip memory handle the rest */ 64 /* Let assembly snippet in on-chip memory handle the rest */
64 standby_onchip_mem(mode); 65 standby_onchip_mem(mode, RAM_BASE);
66
67 atomic_notifier_call_chain(&sh_mobile_post_sleep_notifier_list,
68 mode, NULL);
69}
70
71extern char sh_mobile_sleep_enter_start;
72extern char sh_mobile_sleep_enter_end;
73
74extern char sh_mobile_sleep_resume_start;
75extern char sh_mobile_sleep_resume_end;
76
77unsigned long sh_mobile_sleep_supported = SUSP_SH_SLEEP;
78
79void sh_mobile_register_self_refresh(unsigned long flags,
80 void *pre_start, void *pre_end,
81 void *post_start, void *post_end)
82{
83 void *onchip_mem = (void *)RAM_BASE;
84 void *vp;
85 struct sh_sleep_data *sdp;
86 int n;
65 87
66 /* Put VBR back in System RAM again */ 88 /* part 0: data area */
67 if (mode & SUSP_SH_SF) 89 sdp = onchip_mem;
68 asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); 90 sdp->addr.stbcr = 0xa4150020; /* STBCR */
91 sdp->addr.bar = 0xa4150040; /* BAR */
92 sdp->addr.pteh = 0xff000000; /* PTEH */
93 sdp->addr.ptel = 0xff000004; /* PTEL */
94 sdp->addr.ttb = 0xff000008; /* TTB */
95 sdp->addr.tea = 0xff00000c; /* TEA */
96 sdp->addr.mmucr = 0xff000010; /* MMUCR */
97 sdp->addr.ptea = 0xff000034; /* PTEA */
98 sdp->addr.pascr = 0xff000070; /* PASCR */
99 sdp->addr.irmcr = 0xff000078; /* IRMCR */
100 sdp->addr.ccr = 0xff00001c; /* CCR */
101 sdp->addr.ramcr = 0xff000074; /* RAMCR */
102 vp = sdp + 1;
103
104 /* part 1: common code to enter sleep mode */
105 n = &sh_mobile_sleep_enter_end - &sh_mobile_sleep_enter_start;
106 memcpy(vp, &sh_mobile_sleep_enter_start, n);
107 vp += roundup(n, 4);
108
109 /* part 2: board specific code to enter self-refresh mode */
110 n = pre_end - pre_start;
111 memcpy(vp, pre_start, n);
112 sdp->sf_pre = (unsigned long)vp;
113 vp += roundup(n, 4);
114
115 /* part 3: board specific code to resume from self-refresh mode */
116 n = post_end - post_start;
117 memcpy(vp, post_start, n);
118 sdp->sf_post = (unsigned long)vp;
119 vp += roundup(n, 4);
120
121 /* part 4: common code to resume from sleep mode */
122 WARN_ON(vp > (onchip_mem + 0x600));
123 vp = onchip_mem + 0x600; /* located at interrupt vector */
124 n = &sh_mobile_sleep_resume_end - &sh_mobile_sleep_resume_start;
125 memcpy(vp, &sh_mobile_sleep_resume_start, n);
126 sdp->resume = (unsigned long)vp;
127
128 sh_mobile_sleep_supported |= flags;
69} 129}
70 130
71static int sh_pm_enter(suspend_state_t state) 131static int sh_pm_enter(suspend_state_t state)
72{ 132{
133 if (!(sh_mobile_sleep_supported & SUSP_MODE_STANDBY_SF))
134 return -ENXIO;
135
73 local_irq_disable(); 136 local_irq_disable();
74 set_bl_bit(); 137 set_bl_bit();
75 sh_mobile_call_standby(SUSP_MODE_STANDBY_SF); 138 sh_mobile_call_standby(SUSP_MODE_STANDBY_SF);
@@ -86,6 +149,7 @@ static struct platform_suspend_ops sh_pm_ops = {
86static int __init sh_pm_init(void) 149static int __init sh_pm_init(void)
87{ 150{
88 suspend_set_ops(&sh_pm_ops); 151 suspend_set_ops(&sh_pm_ops);
152 sh_mobile_setup_cpuidle();
89 return 0; 153 return 0;
90} 154}
91 155
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
new file mode 100644
index 000000000000..6dcb8166a64d
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
@@ -0,0 +1,308 @@
1/*
2 * arch/sh/kernel/cpu/shmobile/pm_runtime.c
3 *
4 * Runtime PM support code for SuperH Mobile
5 *
6 * Copyright (C) 2009 Magnus Damm
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/io.h>
15#include <linux/pm_runtime.h>
16#include <linux/platform_device.h>
17#include <linux/mutex.h>
18#include <asm/hwblk.h>
19
20static DEFINE_SPINLOCK(hwblk_lock);
21static LIST_HEAD(hwblk_idle_list);
22static struct work_struct hwblk_work;
23
24extern struct hwblk_info *hwblk_info;
25
26static void platform_pm_runtime_not_idle(struct platform_device *pdev)
27{
28 unsigned long flags;
29
30 /* remove device from idle list */
31 spin_lock_irqsave(&hwblk_lock, flags);
32 if (test_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags)) {
33 list_del(&pdev->archdata.entry);
34 __clear_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags);
35 }
36 spin_unlock_irqrestore(&hwblk_lock, flags);
37}
38
39static int __platform_pm_runtime_resume(struct platform_device *pdev)
40{
41 struct device *d = &pdev->dev;
42 struct pdev_archdata *ad = &pdev->archdata;
43 int hwblk = ad->hwblk_id;
44 int ret = -ENOSYS;
45
46 dev_dbg(d, "__platform_pm_runtime_resume() [%d]\n", hwblk);
47
48 if (d->driver) {
49 hwblk_enable(hwblk_info, hwblk);
50 ret = 0;
51
52 if (test_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags)) {
53 if (d->driver->pm && d->driver->pm->runtime_resume)
54 ret = d->driver->pm->runtime_resume(d);
55
56 if (!ret)
57 clear_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags);
58 else
59 hwblk_disable(hwblk_info, hwblk);
60 }
61 }
62
63 dev_dbg(d, "__platform_pm_runtime_resume() [%d] - returns %d\n",
64 hwblk, ret);
65
66 return ret;
67}
68
69static int __platform_pm_runtime_suspend(struct platform_device *pdev)
70{
71 struct device *d = &pdev->dev;
72 struct pdev_archdata *ad = &pdev->archdata;
73 int hwblk = ad->hwblk_id;
74 int ret = -ENOSYS;
75
76 dev_dbg(d, "__platform_pm_runtime_suspend() [%d]\n", hwblk);
77
78 if (d->driver) {
79 BUG_ON(!test_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags));
80 ret = 0;
81
82 if (d->driver->pm && d->driver->pm->runtime_suspend) {
83 hwblk_enable(hwblk_info, hwblk);
84 ret = d->driver->pm->runtime_suspend(d);
85 hwblk_disable(hwblk_info, hwblk);
86 }
87
88 if (!ret) {
89 set_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags);
90 platform_pm_runtime_not_idle(pdev);
91 hwblk_cnt_dec(hwblk_info, hwblk, HWBLK_CNT_IDLE);
92 }
93 }
94
95 dev_dbg(d, "__platform_pm_runtime_suspend() [%d] - returns %d\n",
96 hwblk, ret);
97
98 return ret;
99}
100
101static void platform_pm_runtime_work(struct work_struct *work)
102{
103 struct platform_device *pdev;
104 unsigned long flags;
105 int ret;
106
107 /* go through the idle list and suspend one device at a time */
108 do {
109 spin_lock_irqsave(&hwblk_lock, flags);
110 if (list_empty(&hwblk_idle_list))
111 pdev = NULL;
112 else
113 pdev = list_first_entry(&hwblk_idle_list,
114 struct platform_device,
115 archdata.entry);
116 spin_unlock_irqrestore(&hwblk_lock, flags);
117
118 if (pdev) {
119 mutex_lock(&pdev->archdata.mutex);
120 ret = __platform_pm_runtime_suspend(pdev);
121
122 /* at this point the platform device may be:
123 * suspended: ret = 0, FLAG_SUSP set, clock stopped
124 * failed: ret < 0, FLAG_IDLE set, clock stopped
125 */
126 mutex_unlock(&pdev->archdata.mutex);
127 } else {
128 ret = -ENODEV;
129 }
130 } while (!ret);
131}
132
133/* this function gets called from cpuidle context when all devices in the
134 * main power domain are unused but some are counted as idle, ie the hwblk
135 * counter values are (HWBLK_CNT_USAGE == 0) && (HWBLK_CNT_IDLE != 0)
136 */
137void platform_pm_runtime_suspend_idle(void)
138{
139 queue_work(pm_wq, &hwblk_work);
140}
141
142int platform_pm_runtime_suspend(struct device *dev)
143{
144 struct platform_device *pdev = to_platform_device(dev);
145 struct pdev_archdata *ad = &pdev->archdata;
146 unsigned long flags;
147 int hwblk = ad->hwblk_id;
148 int ret = 0;
149
150 dev_dbg(dev, "platform_pm_runtime_suspend() [%d]\n", hwblk);
151
152 /* ignore off-chip platform devices */
153 if (!hwblk)
154 goto out;
155
156 /* interrupt context not allowed */
157 might_sleep();
158
159 /* catch misconfigured drivers not starting with resume */
160 if (test_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags)) {
161 ret = -EINVAL;
162 goto out;
163 }
164
165 /* serialize */
166 mutex_lock(&ad->mutex);
167
168 /* disable clock */
169 hwblk_disable(hwblk_info, hwblk);
170
171 /* put device on idle list */
172 spin_lock_irqsave(&hwblk_lock, flags);
173 list_add_tail(&pdev->archdata.entry, &hwblk_idle_list);
174 __set_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags);
175 spin_unlock_irqrestore(&hwblk_lock, flags);
176
177 /* increase idle count */
178 hwblk_cnt_inc(hwblk_info, hwblk, HWBLK_CNT_IDLE);
179
180 /* at this point the platform device is:
181 * idle: ret = 0, FLAG_IDLE set, clock stopped
182 */
183 mutex_unlock(&ad->mutex);
184
185out:
186 dev_dbg(dev, "platform_pm_runtime_suspend() [%d] returns %d\n",
187 hwblk, ret);
188
189 return ret;
190}
191
192int platform_pm_runtime_resume(struct device *dev)
193{
194 struct platform_device *pdev = to_platform_device(dev);
195 struct pdev_archdata *ad = &pdev->archdata;
196 int hwblk = ad->hwblk_id;
197 int ret = 0;
198
199 dev_dbg(dev, "platform_pm_runtime_resume() [%d]\n", hwblk);
200
201 /* ignore off-chip platform devices */
202 if (!hwblk)
203 goto out;
204
205 /* interrupt context not allowed */
206 might_sleep();
207
208 /* serialize */
209 mutex_lock(&ad->mutex);
210
211 /* make sure device is removed from idle list */
212 platform_pm_runtime_not_idle(pdev);
213
214 /* decrease idle count */
215 if (!test_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags) &&
216 !test_bit(PDEV_ARCHDATA_FLAG_SUSP, &pdev->archdata.flags))
217 hwblk_cnt_dec(hwblk_info, hwblk, HWBLK_CNT_IDLE);
218
219 /* resume the device if needed */
220 ret = __platform_pm_runtime_resume(pdev);
221
222 /* the driver has been initialized now, so clear the init flag */
223 clear_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
224
225 /* at this point the platform device may be:
226 * resumed: ret = 0, flags = 0, clock started
227 * failed: ret < 0, FLAG_SUSP set, clock stopped
228 */
229 mutex_unlock(&ad->mutex);
230out:
231 dev_dbg(dev, "platform_pm_runtime_resume() [%d] returns %d\n",
232 hwblk, ret);
233
234 return ret;
235}
236
237int platform_pm_runtime_idle(struct device *dev)
238{
239 struct platform_device *pdev = to_platform_device(dev);
240 int hwblk = pdev->archdata.hwblk_id;
241 int ret = 0;
242
243 dev_dbg(dev, "platform_pm_runtime_idle() [%d]\n", hwblk);
244
245 /* ignore off-chip platform devices */
246 if (!hwblk)
247 goto out;
248
249 /* interrupt context not allowed, use pm_runtime_put()! */
250 might_sleep();
251
252 /* suspend synchronously to disable clocks immediately */
253 ret = pm_runtime_suspend(dev);
254out:
255 dev_dbg(dev, "platform_pm_runtime_idle() [%d] done!\n", hwblk);
256 return ret;
257}
258
259static int platform_bus_notify(struct notifier_block *nb,
260 unsigned long action, void *data)
261{
262 struct device *dev = data;
263 struct platform_device *pdev = to_platform_device(dev);
264 int hwblk = pdev->archdata.hwblk_id;
265
266 /* ignore off-chip platform devices */
267 if (!hwblk)
268 return 0;
269
270 switch (action) {
271 case BUS_NOTIFY_ADD_DEVICE:
272 INIT_LIST_HEAD(&pdev->archdata.entry);
273 mutex_init(&pdev->archdata.mutex);
274 /* platform devices without drivers should be disabled */
275 hwblk_enable(hwblk_info, hwblk);
276 hwblk_disable(hwblk_info, hwblk);
277 /* make sure driver re-inits itself once */
278 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
279 break;
280 /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */
281 case BUS_NOTIFY_BOUND_DRIVER:
282 /* keep track of number of devices in use per hwblk */
283 hwblk_cnt_inc(hwblk_info, hwblk, HWBLK_CNT_DEVICES);
284 break;
285 case BUS_NOTIFY_UNBOUND_DRIVER:
286 /* keep track of number of devices in use per hwblk */
287 hwblk_cnt_dec(hwblk_info, hwblk, HWBLK_CNT_DEVICES);
288 /* make sure driver re-inits itself once */
289 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
290 break;
291 case BUS_NOTIFY_DEL_DEVICE:
292 break;
293 }
294 return 0;
295}
296
297static struct notifier_block platform_bus_notifier = {
298 .notifier_call = platform_bus_notify
299};
300
301static int __init sh_pm_runtime_init(void)
302{
303 INIT_WORK(&hwblk_work, platform_pm_runtime_work);
304
305 bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
306 return 0;
307}
308core_initcall(sh_pm_runtime_init);
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S
index 5d888ef53d82..e6aac65f5750 100644
--- a/arch/sh/kernel/cpu/shmobile/sleep.S
+++ b/arch/sh/kernel/cpu/shmobile/sleep.S
@@ -16,35 +16,147 @@
16#include <asm/asm-offsets.h> 16#include <asm/asm-offsets.h>
17#include <asm/suspend.h> 17#include <asm/suspend.h>
18 18
19/* manage self-refresh and enter standby mode. 19/*
20 * Kernel mode register usage, see entry.S:
21 * k0 scratch
22 * k1 scratch
23 */
24#define k0 r0
25#define k1 r1
26
27/* manage self-refresh and enter standby mode. must be self-contained.
20 * this code will be copied to on-chip memory and executed from there. 28 * this code will be copied to on-chip memory and executed from there.
21 */ 29 */
30 .balign 4
31ENTRY(sh_mobile_sleep_enter_start)
32
33 /* save mode flags */
34 mov.l r4, @(SH_SLEEP_MODE, r5)
35
36 /* save original vbr */
37 stc vbr, r0
38 mov.l r0, @(SH_SLEEP_VBR, r5)
39
40 /* point vbr to our on-chip memory page */
41 ldc r5, vbr
42
43 /* save return address */
44 sts pr, r0
45 mov.l r0, @(SH_SLEEP_SPC, r5)
46
47 /* save sr */
48 stc sr, r0
49 mov.l r0, @(SH_SLEEP_SR, r5)
50
51 /* save general purpose registers to stack if needed */
52 mov.l @(SH_SLEEP_MODE, r5), r0
53 tst #SUSP_SH_REGS, r0
54 bt skip_regs_save
55
56 sts.l pr, @-r15
57 mov.l r14, @-r15
58 mov.l r13, @-r15
59 mov.l r12, @-r15
60 mov.l r11, @-r15
61 mov.l r10, @-r15
62 mov.l r9, @-r15
63 mov.l r8, @-r15
64
65 /* make sure bank0 is selected, save low registers */
66 mov.l rb_bit, r9
67 not r9, r9
68 bsr set_sr
69 mov #0, r10
70
71 bsr save_low_regs
72 nop
73
74 /* switch to bank 1, save low registers */
75 mov.l rb_bit, r10
76 bsr set_sr
77 mov #-1, r9
78
79 bsr save_low_regs
80 nop
81
82 /* switch back to bank 0 */
83 mov.l rb_bit, r9
84 not r9, r9
85 bsr set_sr
86 mov #0, r10
87
88skip_regs_save:
89
90 /* save sp, also set to internal ram */
91 mov.l r15, @(SH_SLEEP_SP, r5)
92 mov r5, r15
93
94 /* save stbcr */
95 bsr save_register
96 mov #SH_SLEEP_REG_STBCR, r0
97
98 /* save mmu and cache context if needed */
99 mov.l @(SH_SLEEP_MODE, r5), r0
100 tst #SUSP_SH_MMU, r0
101 bt skip_mmu_save_disable
102
103 /* save mmu state */
104 bsr save_register
105 mov #SH_SLEEP_REG_PTEH, r0
106
107 bsr save_register
108 mov #SH_SLEEP_REG_PTEL, r0
109
110 bsr save_register
111 mov #SH_SLEEP_REG_TTB, r0
112
113 bsr save_register
114 mov #SH_SLEEP_REG_TEA, r0
22 115
23 .balign 4096,0,4096 116 bsr save_register
24ENTRY(sh_mobile_standby) 117 mov #SH_SLEEP_REG_MMUCR, r0
25 mov r4, r0
26 118
119 bsr save_register
120 mov #SH_SLEEP_REG_PTEA, r0
121
122 bsr save_register
123 mov #SH_SLEEP_REG_PASCR, r0
124
125 bsr save_register
126 mov #SH_SLEEP_REG_IRMCR, r0
127
128 /* invalidate TLBs and disable the MMU */
129 bsr get_register
130 mov #SH_SLEEP_REG_MMUCR, r0
131 mov #4, r1
132 mov.l r1, @r0
133 icbi @r0
134
135 /* save cache registers and disable caches */
136 bsr save_register
137 mov #SH_SLEEP_REG_CCR, r0
138
139 bsr save_register
140 mov #SH_SLEEP_REG_RAMCR, r0
141
142 bsr get_register
143 mov #SH_SLEEP_REG_CCR, r0
144 mov #0, r1
145 mov.l r1, @r0
146 icbi @r0
147
148skip_mmu_save_disable:
149 /* call self-refresh entering code if needed */
150 mov.l @(SH_SLEEP_MODE, r5), r0
27 tst #SUSP_SH_SF, r0 151 tst #SUSP_SH_SF, r0
28 bt skip_set_sf 152 bt skip_set_sf
29 153
30 /* SDRAM: disable power down and put in self-refresh mode */ 154 mov.l @(SH_SLEEP_SF_PRE, r5), r0
31 mov.l 1f, r4 155 jsr @r0
32 mov.l 2f, r1 156 nop
33 mov.l @r4, r2
34 or r1, r2
35 mov.l 3f, r3
36 and r3, r2
37 mov.l r2, @r4
38 157
39skip_set_sf: 158skip_set_sf:
40 tst #SUSP_SH_SLEEP, r0 159 mov.l @(SH_SLEEP_MODE, r5), r0
41 bt test_standby
42
43 /* set mode to "sleep mode" */
44 bra do_sleep
45 mov #0x00, r1
46
47test_standby:
48 tst #SUSP_SH_STANDBY, r0 160 tst #SUSP_SH_STANDBY, r0
49 bt test_rstandby 161 bt test_rstandby
50 162
@@ -56,70 +168,238 @@ test_rstandby:
56 tst #SUSP_SH_RSTANDBY, r0 168 tst #SUSP_SH_RSTANDBY, r0
57 bt test_ustandby 169 bt test_ustandby
58 170
171 /* setup BAR register */
172 bsr get_register
173 mov #SH_SLEEP_REG_BAR, r0
174 mov.l @(SH_SLEEP_RESUME, r5), r1
175 mov.l r1, @r0
176
59 /* set mode to "r-standby mode" */ 177 /* set mode to "r-standby mode" */
60 bra do_sleep 178 bra do_sleep
61 mov #0x20, r1 179 mov #0x20, r1
62 180
63test_ustandby: 181test_ustandby:
64 tst #SUSP_SH_USTANDBY, r0 182 tst #SUSP_SH_USTANDBY, r0
65 bt done_sleep 183 bt force_sleep
66 184
67 /* set mode to "u-standby mode" */ 185 /* set mode to "u-standby mode" */
68 mov #0x10, r1 186 bra do_sleep
187 mov #0x10, r1
188
189force_sleep:
69 190
70 /* fall-through */ 191 /* set mode to "sleep mode" */
192 mov #0x00, r1
71 193
72do_sleep: 194do_sleep:
73 /* setup and enter selected standby mode */ 195 /* setup and enter selected standby mode */
74 mov.l 5f, r4 196 bsr get_register
75 mov.l r1, @r4 197 mov #SH_SLEEP_REG_STBCR, r0
198 mov.l r1, @r0
199again:
76 sleep 200 sleep
201 bra again
202 nop
77 203
78done_sleep: 204save_register:
79 /* reset standby mode to sleep mode */ 205 add #SH_SLEEP_BASE_ADDR, r0
80 mov.l 5f, r4 206 mov.l @(r0, r5), r1
81 mov #0x00, r1 207 add #-SH_SLEEP_BASE_ADDR, r0
82 mov.l r1, @r4 208 mov.l @r1, r1
209 add #SH_SLEEP_BASE_DATA, r0
210 mov.l r1, @(r0, r5)
211 add #-SH_SLEEP_BASE_DATA, r0
212 rts
213 nop
214
215get_register:
216 add #SH_SLEEP_BASE_ADDR, r0
217 mov.l @(r0, r5), r0
218 rts
219 nop
83 220
221set_sr:
222 stc sr, r8
223 and r9, r8
224 or r10, r8
225 ldc r8, sr
226 rts
227 nop
228
229save_low_regs:
230 mov.l r7, @-r15
231 mov.l r6, @-r15
232 mov.l r5, @-r15
233 mov.l r4, @-r15
234 mov.l r3, @-r15
235 mov.l r2, @-r15
236 mov.l r1, @-r15
237 rts
238 mov.l r0, @-r15
239
240 .balign 4
241rb_bit: .long 0x20000000 ! RB=1
242
243ENTRY(sh_mobile_sleep_enter_end)
244
245 .balign 4
246ENTRY(sh_mobile_sleep_resume_start)
247
248 /* figure out start address */
249 bsr 0f
250 nop
2510:
252 sts pr, k1
253 mov.l 1f, k0
254 and k0, k1
255
256 /* store pointer to data area in VBR */
257 ldc k1, vbr
258
259 /* setup sr with saved sr */
260 mov.l @(SH_SLEEP_SR, k1), k0
261 ldc k0, sr
262
263 /* now: user register set! */
264 stc vbr, r5
265
266 /* setup spc with return address to c code */
267 mov.l @(SH_SLEEP_SPC, r5), r0
268 ldc r0, spc
269
270 /* restore vbr */
271 mov.l @(SH_SLEEP_VBR, r5), r0
272 ldc r0, vbr
273
274 /* setup ssr with saved sr */
275 mov.l @(SH_SLEEP_SR, r5), r0
276 ldc r0, ssr
277
278 /* restore sp */
279 mov.l @(SH_SLEEP_SP, r5), r15
280
281 /* restore sleep mode register */
282 bsr restore_register
283 mov #SH_SLEEP_REG_STBCR, r0
284
285 /* call self-refresh resume code if needed */
286 mov.l @(SH_SLEEP_MODE, r5), r0
84 tst #SUSP_SH_SF, r0 287 tst #SUSP_SH_SF, r0
85 bt skip_restore_sf 288 bt skip_restore_sf
86 289
87 /* SDRAM: set auto-refresh mode */ 290 mov.l @(SH_SLEEP_SF_POST, r5), r0
88 mov.l 1f, r4 291 jsr @r0
89 mov.l @r4, r2 292 nop
90 mov.l 4f, r3 293
91 and r3, r2
92 mov.l r2, @r4
93 mov.l 6f, r4
94 mov.l 7f, r1
95 mov.l 8f, r2
96 mov.l @r4, r3
97 mov #-1, r4
98 add r4, r3
99 or r2, r3
100 mov.l r3, @r1
101skip_restore_sf: 294skip_restore_sf:
102 rts 295 /* restore mmu and cache state if needed */
296 mov.l @(SH_SLEEP_MODE, r5), r0
297 tst #SUSP_SH_MMU, r0
298 bt skip_restore_mmu
299
300 /* restore mmu state */
301 bsr restore_register
302 mov #SH_SLEEP_REG_PTEH, r0
303
304 bsr restore_register
305 mov #SH_SLEEP_REG_PTEL, r0
306
307 bsr restore_register
308 mov #SH_SLEEP_REG_TTB, r0
309
310 bsr restore_register
311 mov #SH_SLEEP_REG_TEA, r0
312
313 bsr restore_register
314 mov #SH_SLEEP_REG_PTEA, r0
315
316 bsr restore_register
317 mov #SH_SLEEP_REG_PASCR, r0
318
319 bsr restore_register
320 mov #SH_SLEEP_REG_IRMCR, r0
321
322 bsr restore_register
323 mov #SH_SLEEP_REG_MMUCR, r0
324 icbi @r0
325
326 /* restore cache settings */
327 bsr restore_register
328 mov #SH_SLEEP_REG_RAMCR, r0
329 icbi @r0
330
331 bsr restore_register
332 mov #SH_SLEEP_REG_CCR, r0
333 icbi @r0
334
335skip_restore_mmu:
336
337 /* restore general purpose registers if needed */
338 mov.l @(SH_SLEEP_MODE, r5), r0
339 tst #SUSP_SH_REGS, r0
340 bt skip_restore_regs
341
342 /* switch to bank 1, restore low registers */
343 mov.l _rb_bit, r10
344 bsr _set_sr
345 mov #-1, r9
346
347 bsr restore_low_regs
103 nop 348 nop
104 349
105 .balign 4 350 /* switch to bank0, restore low registers */
1061: .long 0xfe400008 /* SDCR0 */ 351 mov.l _rb_bit, r9
1072: .long 0x00000400 352 not r9, r9
1083: .long 0xffff7fff 353 bsr _set_sr
1094: .long 0xfffffbff 354 mov #0, r10
1105: .long 0xa4150020 /* STBCR */ 355
1116: .long 0xfe40001c /* RTCOR */ 356 bsr restore_low_regs
1127: .long 0xfe400018 /* RTCNT */ 357 nop
1138: .long 0xa55a0000 358
114 359 /* restore the rest of the registers */
115/* interrupt vector @ 0x600 */ 360 mov.l @r15+, r8
116 .balign 0x400,0,0x400 361 mov.l @r15+, r9
117 .long 0xdeadbeef 362 mov.l @r15+, r10
118 .balign 0x200,0,0x200 363 mov.l @r15+, r11
119 /* sh7722 will end up here in sleep mode */ 364 mov.l @r15+, r12
365 mov.l @r15+, r13
366 mov.l @r15+, r14
367 lds.l @r15+, pr
368
369skip_restore_regs:
120 rte 370 rte
121 nop 371 nop
122sh_mobile_standby_end:
123 372
124ENTRY(sh_mobile_standby_size) 373restore_register:
125 .long sh_mobile_standby_end - sh_mobile_standby 374 add #SH_SLEEP_BASE_DATA, r0
375 mov.l @(r0, r5), r1
376 add #-SH_SLEEP_BASE_DATA, r0
377 add #SH_SLEEP_BASE_ADDR, r0
378 mov.l @(r0, r5), r0
379 mov.l r1, @r0
380 rts
381 nop
382
383_set_sr:
384 stc sr, r8
385 and r9, r8
386 or r10, r8
387 ldc r8, sr
388 rts
389 nop
390
391restore_low_regs:
392 mov.l @r15+, r0
393 mov.l @r15+, r1
394 mov.l @r15+, r2
395 mov.l @r15+, r3
396 mov.l @r15+, r4
397 mov.l @r15+, r5
398 mov.l @r15+, r6
399 rts
400 mov.l @r15+, r7
401
402 .balign 4
403_rb_bit: .long 0x20000000 ! RB=1
4041: .long ~0x7ff
405ENTRY(sh_mobile_sleep_resume_end)
diff --git a/arch/sh/kernel/cpu/ubc.S b/arch/sh/kernel/cpu/ubc.S
deleted file mode 100644
index 81923079fa12..000000000000
--- a/arch/sh/kernel/cpu/ubc.S
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * arch/sh/kernel/cpu/ubc.S
3 *
4 * Set of management routines for the User Break Controller (UBC)
5 *
6 * Copyright (C) 2002 Paul Mundt
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13#include <linux/linkage.h>
14#include <asm/ubc.h>
15
16#define STBCR2 0xffc00010
17
18ENTRY(ubc_sleep)
19 mov #0, r0
20
21 mov.l 1f, r1 ! Zero out UBC_BBRA ..
22 mov.w r0, @r1
23
24 mov.l 2f, r1 ! .. same for BBRB ..
25 mov.w r0, @r1
26
27 mov.l 3f, r1 ! .. and again for BRCR.
28 mov.w r0, @r1
29
30 mov.w @r1, r0 ! Dummy read BRCR
31
32 mov.l 4f, r1 ! Set MSTP5 in STBCR2
33 mov.b @r1, r0
34 or #0x01, r0
35 mov.b r0, @r1
36
37 mov.b @r1, r0 ! Two dummy reads ..
38 mov.b @r1, r0
39
40 rts
41 nop
42
43ENTRY(ubc_wakeup)
44 mov.l 4f, r1 ! Clear MSTP5
45 mov.b @r1, r0
46 and #0xfe, r0
47 mov.b r0, @r1
48
49 mov.b @r1, r0 ! Two more dummy reads ..
50 mov.b @r1, r0
51
52 rts
53 nop
54
551: .long UBC_BBRA
562: .long UBC_BBRB
573: .long UBC_BRCR
584: .long STBCR2
59
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
index e0590ffebd73..0fffacea6ed9 100644
--- a/arch/sh/kernel/cpufreq.c
+++ b/arch/sh/kernel/cpufreq.c
@@ -48,7 +48,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
48 return -ENODEV; 48 return -ENODEV;
49 49
50 cpus_allowed = current->cpus_allowed; 50 cpus_allowed = current->cpus_allowed;
51 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 51 set_cpus_allowed_ptr(current, cpumask_of(cpu));
52 52
53 BUG_ON(smp_processor_id() != cpu); 53 BUG_ON(smp_processor_id() != cpu);
54 54
@@ -66,7 +66,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
66 freqs.flags = 0; 66 freqs.flags = 0;
67 67
68 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 68 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
69 set_cpus_allowed(current, cpus_allowed); 69 set_cpus_allowed_ptr(current, &cpus_allowed);
70 clk_set_rate(cpuclk, freq); 70 clk_set_rate(cpuclk, freq);
71 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 71 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
72 72
@@ -82,7 +82,8 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
82 82
83 cpuclk = clk_get(NULL, "cpu_clk"); 83 cpuclk = clk_get(NULL, "cpu_clk");
84 if (IS_ERR(cpuclk)) { 84 if (IS_ERR(cpuclk)) {
85 printk(KERN_ERR "cpufreq: couldn't get CPU clk\n"); 85 printk(KERN_ERR "cpufreq: couldn't get CPU#%d clk\n",
86 policy->cpu);
86 return PTR_ERR(cpuclk); 87 return PTR_ERR(cpuclk);
87 } 88 }
88 89
@@ -95,22 +96,21 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
95 policy->min = policy->cpuinfo.min_freq; 96 policy->min = policy->cpuinfo.min_freq;
96 policy->max = policy->cpuinfo.max_freq; 97 policy->max = policy->cpuinfo.max_freq;
97 98
98
99 /* 99 /*
100 * Catch the cases where the clock framework hasn't been wired up 100 * Catch the cases where the clock framework hasn't been wired up
101 * properly to support scaling. 101 * properly to support scaling.
102 */ 102 */
103 if (unlikely(policy->min == policy->max)) { 103 if (unlikely(policy->min == policy->max)) {
104 printk(KERN_ERR "cpufreq: clock framework rate rounding " 104 printk(KERN_ERR "cpufreq: clock framework rate rounding "
105 "not supported on this CPU.\n"); 105 "not supported on CPU#%d.\n", policy->cpu);
106 106
107 clk_put(cpuclk); 107 clk_put(cpuclk);
108 return -EINVAL; 108 return -EINVAL;
109 } 109 }
110 110
111 printk(KERN_INFO "cpufreq: Frequencies - Minimum %u.%03u MHz, " 111 printk(KERN_INFO "cpufreq: CPU#%d Frequencies - Minimum %u.%03u MHz, "
112 "Maximum %u.%03u MHz.\n", 112 "Maximum %u.%03u MHz.\n",
113 policy->min / 1000, policy->min % 1000, 113 policy->cpu, policy->min / 1000, policy->min % 1000,
114 policy->max / 1000, policy->max % 1000); 114 policy->max / 1000, policy->max % 1000);
115 115
116 return 0; 116 return 0;
diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c
index 95d216255565..37c97d444576 100644
--- a/arch/sh/kernel/crash_dump.c
+++ b/arch/sh/kernel/crash_dump.c
@@ -4,7 +4,6 @@
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 * Copyright (C) IBM Corporation, 2004. All rights reserved 5 * Copyright (C) IBM Corporation, 2004. All rights reserved
6 */ 6 */
7
8#include <linux/errno.h> 7#include <linux/errno.h>
9#include <linux/crash_dump.h> 8#include <linux/crash_dump.h>
10#include <linux/io.h> 9#include <linux/io.h>
@@ -13,6 +12,25 @@
13/* Stores the physical address of elf header of crash image. */ 12/* Stores the physical address of elf header of crash image. */
14unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; 13unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
15 14
15/*
16 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
17 * is_kdump_kernel() to determine if we are booting after a panic. Hence
18 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
19 *
20 * elfcorehdr= specifies the location of elf core header
21 * stored by the crashed kernel.
22 */
23static int __init parse_elfcorehdr(char *arg)
24{
25 if (!arg)
26 return -EINVAL;
27
28 elfcorehdr_addr = memparse(arg, &arg);
29
30 return 0;
31}
32early_param("elfcorehdr", parse_elfcorehdr);
33
16/** 34/**
17 * copy_oldmem_page - copy one page from "oldmem" 35 * copy_oldmem_page - copy one page from "oldmem"
18 * @pfn: page frame number to be copied 36 * @pfn: page frame number to be copied
diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S
index 591741383ee6..7a1b46fec0f4 100644
--- a/arch/sh/kernel/debugtraps.S
+++ b/arch/sh/kernel/debugtraps.S
@@ -13,7 +13,6 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14 14
15#if !defined(CONFIG_KGDB) 15#if !defined(CONFIG_KGDB)
16#define breakpoint_trap_handler debug_trap_handler
17#define singlestep_trap_handler debug_trap_handler 16#define singlestep_trap_handler debug_trap_handler
18#endif 17#endif
19 18
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
new file mode 100644
index 000000000000..3c55b87f8b63
--- /dev/null
+++ b/arch/sh/kernel/dma-nommu.c
@@ -0,0 +1,82 @@
1/*
2 * DMA mapping support for platforms lacking IOMMUs.
3 *
4 * Copyright (C) 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/dma-mapping.h>
11#include <linux/io.h>
12
13static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
14 unsigned long offset, size_t size,
15 enum dma_data_direction dir,
16 struct dma_attrs *attrs)
17{
18 dma_addr_t addr = page_to_phys(page) + offset;
19
20 WARN_ON(size == 0);
21 dma_cache_sync(dev, page_address(page) + offset, size, dir);
22
23 return addr;
24}
25
26static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
27 int nents, enum dma_data_direction dir,
28 struct dma_attrs *attrs)
29{
30 struct scatterlist *s;
31 int i;
32
33 WARN_ON(nents == 0 || sg[0].length == 0);
34
35 for_each_sg(sg, s, nents, i) {
36 BUG_ON(!sg_page(s));
37
38 dma_cache_sync(dev, sg_virt(s), s->length, dir);
39
40 s->dma_address = sg_phys(s);
41 s->dma_length = s->length;
42 }
43
44 return nents;
45}
46
47#ifdef CONFIG_DMA_NONCOHERENT
48static void nommu_sync_single(struct device *dev, dma_addr_t addr,
49 size_t size, enum dma_data_direction dir)
50{
51 dma_cache_sync(dev, phys_to_virt(addr), size, dir);
52}
53
54static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
55 int nelems, enum dma_data_direction dir)
56{
57 struct scatterlist *s;
58 int i;
59
60 for_each_sg(sg, s, nelems, i)
61 dma_cache_sync(dev, sg_virt(s), s->length, dir);
62}
63#endif
64
65struct dma_map_ops nommu_dma_ops = {
66 .alloc_coherent = dma_generic_alloc_coherent,
67 .free_coherent = dma_generic_free_coherent,
68 .map_page = nommu_map_page,
69 .map_sg = nommu_map_sg,
70#ifdef CONFIG_DMA_NONCOHERENT
71 .sync_single_for_device = nommu_sync_single,
72 .sync_sg_for_device = nommu_sync_sg,
73#endif
74 .is_phys = 1,
75};
76
77void __init no_iommu_init(void)
78{
79 if (dma_ops)
80 return;
81 dma_ops = &nommu_dma_ops;
82}
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
new file mode 100644
index 000000000000..6f5ad1513409
--- /dev/null
+++ b/arch/sh/kernel/dumpstack.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2009 Matt Fleming
5 */
6#include <linux/kallsyms.h>
7#include <linux/ftrace.h>
8#include <linux/debug_locks.h>
9#include <asm/unwinder.h>
10#include <asm/stacktrace.h>
11
12void printk_address(unsigned long address, int reliable)
13{
14 printk(" [<%p>] %s%pS\n", (void *) address,
15 reliable ? "" : "? ", (void *) address);
16}
17
18#ifdef CONFIG_FUNCTION_GRAPH_TRACER
19static void
20print_ftrace_graph_addr(unsigned long addr, void *data,
21 const struct stacktrace_ops *ops,
22 struct thread_info *tinfo, int *graph)
23{
24 struct task_struct *task = tinfo->task;
25 unsigned long ret_addr;
26 int index = task->curr_ret_stack;
27
28 if (addr != (unsigned long)return_to_handler)
29 return;
30
31 if (!task->ret_stack || index < *graph)
32 return;
33
34 index -= *graph;
35 ret_addr = task->ret_stack[index].ret;
36
37 ops->address(data, ret_addr, 1);
38
39 (*graph)++;
40}
41#else
42static inline void
43print_ftrace_graph_addr(unsigned long addr, void *data,
44 const struct stacktrace_ops *ops,
45 struct thread_info *tinfo, int *graph)
46{ }
47#endif
48
49void
50stack_reader_dump(struct task_struct *task, struct pt_regs *regs,
51 unsigned long *sp, const struct stacktrace_ops *ops,
52 void *data)
53{
54 struct thread_info *context;
55 int graph = 0;
56
57 context = (struct thread_info *)
58 ((unsigned long)sp & (~(THREAD_SIZE - 1)));
59
60 while (!kstack_end(sp)) {
61 unsigned long addr = *sp++;
62
63 if (__kernel_text_address(addr)) {
64 ops->address(data, addr, 1);
65
66 print_ftrace_graph_addr(addr, data, ops,
67 context, &graph);
68 }
69 }
70}
71
72static void
73print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
74{
75 printk(data);
76 print_symbol(msg, symbol);
77 printk("\n");
78}
79
80static void print_trace_warning(void *data, char *msg)
81{
82 printk("%s%s\n", (char *)data, msg);
83}
84
85static int print_trace_stack(void *data, char *name)
86{
87 printk("%s <%s> ", (char *)data, name);
88 return 0;
89}
90
91/*
92 * Print one address/symbol entries per line.
93 */
94static void print_trace_address(void *data, unsigned long addr, int reliable)
95{
96 printk(data);
97 printk_address(addr, reliable);
98}
99
100static const struct stacktrace_ops print_trace_ops = {
101 .warning = print_trace_warning,
102 .warning_symbol = print_trace_warning_symbol,
103 .stack = print_trace_stack,
104 .address = print_trace_address,
105};
106
107void show_trace(struct task_struct *tsk, unsigned long *sp,
108 struct pt_regs *regs)
109{
110 if (regs && user_mode(regs))
111 return;
112
113 printk("\nCall trace:\n");
114
115 unwind_stack(tsk, regs, sp, &print_trace_ops, "");
116
117 printk("\n");
118
119 if (!tsk)
120 tsk = current;
121
122 debug_show_held_locks(tsk);
123}
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
new file mode 100644
index 000000000000..49c09c7d5b77
--- /dev/null
+++ b/arch/sh/kernel/dwarf.c
@@ -0,0 +1,1220 @@
1/*
2 * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * This is an implementation of a DWARF unwinder. Its main purpose is
9 * for generating stacktrace information. Based on the DWARF 3
10 * specification from http://www.dwarfstd.org.
11 *
12 * TODO:
13 * - DWARF64 doesn't work.
14 * - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
15 */
16
17/* #define DEBUG */
18#include <linux/kernel.h>
19#include <linux/io.h>
20#include <linux/list.h>
21#include <linux/mempool.h>
22#include <linux/mm.h>
23#include <linux/elf.h>
24#include <linux/ftrace.h>
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <asm/dwarf.h>
28#include <asm/unwinder.h>
29#include <asm/sections.h>
30#include <asm/unaligned.h>
31#include <asm/stacktrace.h>
32
33/* Reserve enough memory for two stack frames */
34#define DWARF_FRAME_MIN_REQ 2
35/* ... with 4 registers per frame. */
36#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
37
38static struct kmem_cache *dwarf_frame_cachep;
39static mempool_t *dwarf_frame_pool;
40
41static struct kmem_cache *dwarf_reg_cachep;
42static mempool_t *dwarf_reg_pool;
43
44static struct rb_root cie_root;
45static DEFINE_SPINLOCK(dwarf_cie_lock);
46
47static struct rb_root fde_root;
48static DEFINE_SPINLOCK(dwarf_fde_lock);
49
50static struct dwarf_cie *cached_cie;
51
52static unsigned int dwarf_unwinder_ready;
53
54/**
55 * dwarf_frame_alloc_reg - allocate memory for a DWARF register
56 * @frame: the DWARF frame whose list of registers we insert on
57 * @reg_num: the register number
58 *
59 * Allocate space for, and initialise, a dwarf reg from
60 * dwarf_reg_pool and insert it onto the (unsorted) linked-list of
61 * dwarf registers for @frame.
62 *
63 * Return the initialised DWARF reg.
64 */
65static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
66 unsigned int reg_num)
67{
68 struct dwarf_reg *reg;
69
70 reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
71 if (!reg) {
72 printk(KERN_WARNING "Unable to allocate a DWARF register\n");
73 /*
74 * Let's just bomb hard here, we have no way to
75 * gracefully recover.
76 */
77 UNWINDER_BUG();
78 }
79
80 reg->number = reg_num;
81 reg->addr = 0;
82 reg->flags = 0;
83
84 list_add(&reg->link, &frame->reg_list);
85
86 return reg;
87}
88
89static void dwarf_frame_free_regs(struct dwarf_frame *frame)
90{
91 struct dwarf_reg *reg, *n;
92
93 list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
94 list_del(&reg->link);
95 mempool_free(reg, dwarf_reg_pool);
96 }
97}
98
99/**
100 * dwarf_frame_reg - return a DWARF register
101 * @frame: the DWARF frame to search in for @reg_num
102 * @reg_num: the register number to search for
103 *
104 * Lookup and return the dwarf reg @reg_num for this frame. Return
105 * NULL if @reg_num is an register invalid number.
106 */
107static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
108 unsigned int reg_num)
109{
110 struct dwarf_reg *reg;
111
112 list_for_each_entry(reg, &frame->reg_list, link) {
113 if (reg->number == reg_num)
114 return reg;
115 }
116
117 return NULL;
118}
119
120/**
121 * dwarf_read_addr - read dwarf data
122 * @src: source address of data
123 * @dst: destination address to store the data to
124 *
125 * Read 'n' bytes from @src, where 'n' is the size of an address on
126 * the native machine. We return the number of bytes read, which
127 * should always be 'n'. We also have to be careful when reading
128 * from @src and writing to @dst, because they can be arbitrarily
129 * aligned. Return 'n' - the number of bytes read.
130 */
131static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
132{
133 u32 val = get_unaligned(src);
134 put_unaligned(val, dst);
135 return sizeof(unsigned long *);
136}
137
138/**
139 * dwarf_read_uleb128 - read unsigned LEB128 data
140 * @addr: the address where the ULEB128 data is stored
141 * @ret: address to store the result
142 *
143 * Decode an unsigned LEB128 encoded datum. The algorithm is taken
144 * from Appendix C of the DWARF 3 spec. For information on the
145 * encodings refer to section "7.6 - Variable Length Data". Return
146 * the number of bytes read.
147 */
148static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
149{
150 unsigned int result;
151 unsigned char byte;
152 int shift, count;
153
154 result = 0;
155 shift = 0;
156 count = 0;
157
158 while (1) {
159 byte = __raw_readb(addr);
160 addr++;
161 count++;
162
163 result |= (byte & 0x7f) << shift;
164 shift += 7;
165
166 if (!(byte & 0x80))
167 break;
168 }
169
170 *ret = result;
171
172 return count;
173}
174
175/**
176 * dwarf_read_leb128 - read signed LEB128 data
177 * @addr: the address of the LEB128 encoded data
178 * @ret: address to store the result
179 *
180 * Decode signed LEB128 data. The algorithm is taken from Appendix
181 * C of the DWARF 3 spec. Return the number of bytes read.
182 */
183static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
184{
185 unsigned char byte;
186 int result, shift;
187 int num_bits;
188 int count;
189
190 result = 0;
191 shift = 0;
192 count = 0;
193
194 while (1) {
195 byte = __raw_readb(addr);
196 addr++;
197 result |= (byte & 0x7f) << shift;
198 shift += 7;
199 count++;
200
201 if (!(byte & 0x80))
202 break;
203 }
204
205 /* The number of bits in a signed integer. */
206 num_bits = 8 * sizeof(result);
207
208 if ((shift < num_bits) && (byte & 0x40))
209 result |= (-1 << shift);
210
211 *ret = result;
212
213 return count;
214}
215
216/**
217 * dwarf_read_encoded_value - return the decoded value at @addr
218 * @addr: the address of the encoded value
219 * @val: where to write the decoded value
220 * @encoding: the encoding with which we can decode @addr
221 *
222 * GCC emits encoded address in the .eh_frame FDE entries. Decode
223 * the value at @addr using @encoding. The decoded value is written
224 * to @val and the number of bytes read is returned.
225 */
226static int dwarf_read_encoded_value(char *addr, unsigned long *val,
227 char encoding)
228{
229 unsigned long decoded_addr = 0;
230 int count = 0;
231
232 switch (encoding & 0x70) {
233 case DW_EH_PE_absptr:
234 break;
235 case DW_EH_PE_pcrel:
236 decoded_addr = (unsigned long)addr;
237 break;
238 default:
239 pr_debug("encoding=0x%x\n", (encoding & 0x70));
240 UNWINDER_BUG();
241 }
242
243 if ((encoding & 0x07) == 0x00)
244 encoding |= DW_EH_PE_udata4;
245
246 switch (encoding & 0x0f) {
247 case DW_EH_PE_sdata4:
248 case DW_EH_PE_udata4:
249 count += 4;
250 decoded_addr += get_unaligned((u32 *)addr);
251 __raw_writel(decoded_addr, val);
252 break;
253 default:
254 pr_debug("encoding=0x%x\n", encoding);
255 UNWINDER_BUG();
256 }
257
258 return count;
259}
260
261/**
262 * dwarf_entry_len - return the length of an FDE or CIE
263 * @addr: the address of the entry
264 * @len: the length of the entry
265 *
266 * Read the initial_length field of the entry and store the size of
267 * the entry in @len. We return the number of bytes read. Return a
268 * count of 0 on error.
269 */
270static inline int dwarf_entry_len(char *addr, unsigned long *len)
271{
272 u32 initial_len;
273 int count;
274
275 initial_len = get_unaligned((u32 *)addr);
276 count = 4;
277
278 /*
279 * An initial length field value in the range DW_LEN_EXT_LO -
280 * DW_LEN_EXT_HI indicates an extension, and should not be
281 * interpreted as a length. The only extension that we currently
282 * understand is the use of DWARF64 addresses.
283 */
284 if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
285 /*
286 * The 64-bit length field immediately follows the
287 * compulsory 32-bit length field.
288 */
289 if (initial_len == DW_EXT_DWARF64) {
290 *len = get_unaligned((u64 *)addr + 4);
291 count = 12;
292 } else {
293 printk(KERN_WARNING "Unknown DWARF extension\n");
294 count = 0;
295 }
296 } else
297 *len = initial_len;
298
299 return count;
300}
301
302/**
303 * dwarf_lookup_cie - locate the cie
304 * @cie_ptr: pointer to help with lookup
305 */
306static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
307{
308 struct rb_node **rb_node = &cie_root.rb_node;
309 struct dwarf_cie *cie = NULL;
310 unsigned long flags;
311
312 spin_lock_irqsave(&dwarf_cie_lock, flags);
313
314 /*
315 * We've cached the last CIE we looked up because chances are
316 * that the FDE wants this CIE.
317 */
318 if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
319 cie = cached_cie;
320 goto out;
321 }
322
323 while (*rb_node) {
324 struct dwarf_cie *cie_tmp;
325
326 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
327 BUG_ON(!cie_tmp);
328
329 if (cie_ptr == cie_tmp->cie_pointer) {
330 cie = cie_tmp;
331 cached_cie = cie_tmp;
332 goto out;
333 } else {
334 if (cie_ptr < cie_tmp->cie_pointer)
335 rb_node = &(*rb_node)->rb_left;
336 else
337 rb_node = &(*rb_node)->rb_right;
338 }
339 }
340
341out:
342 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
343 return cie;
344}
345
346/**
347 * dwarf_lookup_fde - locate the FDE that covers pc
348 * @pc: the program counter
349 */
350struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
351{
352 struct rb_node **rb_node = &fde_root.rb_node;
353 struct dwarf_fde *fde = NULL;
354 unsigned long flags;
355
356 spin_lock_irqsave(&dwarf_fde_lock, flags);
357
358 while (*rb_node) {
359 struct dwarf_fde *fde_tmp;
360 unsigned long tmp_start, tmp_end;
361
362 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
363 BUG_ON(!fde_tmp);
364
365 tmp_start = fde_tmp->initial_location;
366 tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
367
368 if (pc < tmp_start) {
369 rb_node = &(*rb_node)->rb_left;
370 } else {
371 if (pc < tmp_end) {
372 fde = fde_tmp;
373 goto out;
374 } else
375 rb_node = &(*rb_node)->rb_right;
376 }
377 }
378
379out:
380 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
381
382 return fde;
383}
384
385/**
386 * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
387 * @insn_start: address of the first instruction
388 * @insn_end: address of the last instruction
389 * @cie: the CIE for this function
390 * @fde: the FDE for this function
391 * @frame: the instructions calculate the CFA for this frame
392 * @pc: the program counter of the address we're interested in
393 *
394 * Execute the Call Frame instruction sequence starting at
395 * @insn_start and ending at @insn_end. The instructions describe
396 * how to calculate the Canonical Frame Address of a stackframe.
397 * Store the results in @frame.
398 */
399static int dwarf_cfa_execute_insns(unsigned char *insn_start,
400 unsigned char *insn_end,
401 struct dwarf_cie *cie,
402 struct dwarf_fde *fde,
403 struct dwarf_frame *frame,
404 unsigned long pc)
405{
406 unsigned char insn;
407 unsigned char *current_insn;
408 unsigned int count, delta, reg, expr_len, offset;
409 struct dwarf_reg *regp;
410
411 current_insn = insn_start;
412
413 while (current_insn < insn_end && frame->pc <= pc) {
414 insn = __raw_readb(current_insn++);
415
416 /*
417 * Firstly, handle the opcodes that embed their operands
418 * in the instructions.
419 */
420 switch (DW_CFA_opcode(insn)) {
421 case DW_CFA_advance_loc:
422 delta = DW_CFA_operand(insn);
423 delta *= cie->code_alignment_factor;
424 frame->pc += delta;
425 continue;
426 /* NOTREACHED */
427 case DW_CFA_offset:
428 reg = DW_CFA_operand(insn);
429 count = dwarf_read_uleb128(current_insn, &offset);
430 current_insn += count;
431 offset *= cie->data_alignment_factor;
432 regp = dwarf_frame_alloc_reg(frame, reg);
433 regp->addr = offset;
434 regp->flags |= DWARF_REG_OFFSET;
435 continue;
436 /* NOTREACHED */
437 case DW_CFA_restore:
438 reg = DW_CFA_operand(insn);
439 continue;
440 /* NOTREACHED */
441 }
442
443 /*
444 * Secondly, handle the opcodes that don't embed their
445 * operands in the instruction.
446 */
447 switch (insn) {
448 case DW_CFA_nop:
449 continue;
450 case DW_CFA_advance_loc1:
451 delta = *current_insn++;
452 frame->pc += delta * cie->code_alignment_factor;
453 break;
454 case DW_CFA_advance_loc2:
455 delta = get_unaligned((u16 *)current_insn);
456 current_insn += 2;
457 frame->pc += delta * cie->code_alignment_factor;
458 break;
459 case DW_CFA_advance_loc4:
460 delta = get_unaligned((u32 *)current_insn);
461 current_insn += 4;
462 frame->pc += delta * cie->code_alignment_factor;
463 break;
464 case DW_CFA_offset_extended:
465 count = dwarf_read_uleb128(current_insn, &reg);
466 current_insn += count;
467 count = dwarf_read_uleb128(current_insn, &offset);
468 current_insn += count;
469 offset *= cie->data_alignment_factor;
470 break;
471 case DW_CFA_restore_extended:
472 count = dwarf_read_uleb128(current_insn, &reg);
473 current_insn += count;
474 break;
475 case DW_CFA_undefined:
476 count = dwarf_read_uleb128(current_insn, &reg);
477 current_insn += count;
478 regp = dwarf_frame_alloc_reg(frame, reg);
479 regp->flags |= DWARF_UNDEFINED;
480 break;
481 case DW_CFA_def_cfa:
482 count = dwarf_read_uleb128(current_insn,
483 &frame->cfa_register);
484 current_insn += count;
485 count = dwarf_read_uleb128(current_insn,
486 &frame->cfa_offset);
487 current_insn += count;
488
489 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
490 break;
491 case DW_CFA_def_cfa_register:
492 count = dwarf_read_uleb128(current_insn,
493 &frame->cfa_register);
494 current_insn += count;
495 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
496 break;
497 case DW_CFA_def_cfa_offset:
498 count = dwarf_read_uleb128(current_insn, &offset);
499 current_insn += count;
500 frame->cfa_offset = offset;
501 break;
502 case DW_CFA_def_cfa_expression:
503 count = dwarf_read_uleb128(current_insn, &expr_len);
504 current_insn += count;
505
506 frame->cfa_expr = current_insn;
507 frame->cfa_expr_len = expr_len;
508 current_insn += expr_len;
509
510 frame->flags |= DWARF_FRAME_CFA_REG_EXP;
511 break;
512 case DW_CFA_offset_extended_sf:
513 count = dwarf_read_uleb128(current_insn, &reg);
514 current_insn += count;
515 count = dwarf_read_leb128(current_insn, &offset);
516 current_insn += count;
517 offset *= cie->data_alignment_factor;
518 regp = dwarf_frame_alloc_reg(frame, reg);
519 regp->flags |= DWARF_REG_OFFSET;
520 regp->addr = offset;
521 break;
522 case DW_CFA_val_offset:
523 count = dwarf_read_uleb128(current_insn, &reg);
524 current_insn += count;
525 count = dwarf_read_leb128(current_insn, &offset);
526 offset *= cie->data_alignment_factor;
527 regp = dwarf_frame_alloc_reg(frame, reg);
528 regp->flags |= DWARF_VAL_OFFSET;
529 regp->addr = offset;
530 break;
531 case DW_CFA_GNU_args_size:
532 count = dwarf_read_uleb128(current_insn, &offset);
533 current_insn += count;
534 break;
535 case DW_CFA_GNU_negative_offset_extended:
536 count = dwarf_read_uleb128(current_insn, &reg);
537 current_insn += count;
538 count = dwarf_read_uleb128(current_insn, &offset);
539 offset *= cie->data_alignment_factor;
540
541 regp = dwarf_frame_alloc_reg(frame, reg);
542 regp->flags |= DWARF_REG_OFFSET;
543 regp->addr = -offset;
544 break;
545 default:
546 pr_debug("unhandled DWARF instruction 0x%x\n", insn);
547 UNWINDER_BUG();
548 break;
549 }
550 }
551
552 return 0;
553}
554
555/**
556 * dwarf_free_frame - free the memory allocated for @frame
557 * @frame: the frame to free
558 */
559void dwarf_free_frame(struct dwarf_frame *frame)
560{
561 dwarf_frame_free_regs(frame);
562 mempool_free(frame, dwarf_frame_pool);
563}
564
565extern void ret_from_irq(void);
566
567/**
568 * dwarf_unwind_stack - unwind the stack
569 *
570 * @pc: address of the function to unwind
571 * @prev: struct dwarf_frame of the previous stackframe on the callstack
572 *
573 * Return a struct dwarf_frame representing the most recent frame
574 * on the callstack. Each of the lower (older) stack frames are
575 * linked via the "prev" member.
576 */
577struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
578 struct dwarf_frame *prev)
579{
580 struct dwarf_frame *frame;
581 struct dwarf_cie *cie;
582 struct dwarf_fde *fde;
583 struct dwarf_reg *reg;
584 unsigned long addr;
585
586 /*
587 * If we've been called in to before initialization has
588 * completed, bail out immediately.
589 */
590 if (!dwarf_unwinder_ready)
591 return NULL;
592
593 /*
594 * If we're starting at the top of the stack we need get the
595 * contents of a physical register to get the CFA in order to
596 * begin the virtual unwinding of the stack.
597 *
598 * NOTE: the return address is guaranteed to be setup by the
599 * time this function makes its first function call.
600 */
601 if (!pc || !prev)
602 pc = (unsigned long)current_text_addr();
603
604#ifdef CONFIG_FUNCTION_GRAPH_TRACER
605 /*
606 * If our stack has been patched by the function graph tracer
607 * then we might see the address of return_to_handler() where we
608 * expected to find the real return address.
609 */
610 if (pc == (unsigned long)&return_to_handler) {
611 int index = current->curr_ret_stack;
612
613 /*
614 * We currently have no way of tracking how many
615 * return_to_handler()'s we've seen. If there is more
616 * than one patched return address on our stack,
617 * complain loudly.
618 */
619 WARN_ON(index > 0);
620
621 pc = current->ret_stack[index].ret;
622 }
623#endif
624
625 frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
626 if (!frame) {
627 printk(KERN_ERR "Unable to allocate a dwarf frame\n");
628 UNWINDER_BUG();
629 }
630
631 INIT_LIST_HEAD(&frame->reg_list);
632 frame->flags = 0;
633 frame->prev = prev;
634 frame->return_addr = 0;
635
636 fde = dwarf_lookup_fde(pc);
637 if (!fde) {
638 /*
639 * This is our normal exit path. There are two reasons
640 * why we might exit here,
641 *
642 * a) pc has no asscociated DWARF frame info and so
643 * we don't know how to unwind this frame. This is
644 * usually the case when we're trying to unwind a
645 * frame that was called from some assembly code
646 * that has no DWARF info, e.g. syscalls.
647 *
648 * b) the DEBUG info for pc is bogus. There's
649 * really no way to distinguish this case from the
650 * case above, which sucks because we could print a
651 * warning here.
652 */
653 goto bail;
654 }
655
656 cie = dwarf_lookup_cie(fde->cie_pointer);
657
658 frame->pc = fde->initial_location;
659
660 /* CIE initial instructions */
661 dwarf_cfa_execute_insns(cie->initial_instructions,
662 cie->instructions_end, cie, fde,
663 frame, pc);
664
665 /* FDE instructions */
666 dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
667 fde, frame, pc);
668
669 /* Calculate the CFA */
670 switch (frame->flags) {
671 case DWARF_FRAME_CFA_REG_OFFSET:
672 if (prev) {
673 reg = dwarf_frame_reg(prev, frame->cfa_register);
674 UNWINDER_BUG_ON(!reg);
675 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
676
677 addr = prev->cfa + reg->addr;
678 frame->cfa = __raw_readl(addr);
679
680 } else {
681 /*
682 * Again, we're starting from the top of the
683 * stack. We need to physically read
684 * the contents of a register in order to get
685 * the Canonical Frame Address for this
686 * function.
687 */
688 frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
689 }
690
691 frame->cfa += frame->cfa_offset;
692 break;
693 default:
694 UNWINDER_BUG();
695 }
696
697 reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
698
699 /*
700 * If we haven't seen the return address register or the return
701 * address column is undefined then we must assume that this is
702 * the end of the callstack.
703 */
704 if (!reg || reg->flags == DWARF_UNDEFINED)
705 goto bail;
706
707 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
708
709 addr = frame->cfa + reg->addr;
710 frame->return_addr = __raw_readl(addr);
711
712 /*
713 * Ah, the joys of unwinding through interrupts.
714 *
715 * Interrupts are tricky - the DWARF info needs to be _really_
716 * accurate and unfortunately I'm seeing a lot of bogus DWARF
717 * info. For example, I've seen interrupts occur in epilogues
718 * just after the frame pointer (r14) had been restored. The
719 * problem was that the DWARF info claimed that the CFA could be
720 * reached by using the value of the frame pointer before it was
721 * restored.
722 *
723 * So until the compiler can be trusted to produce reliable
724 * DWARF info when it really matters, let's stop unwinding once
725 * we've calculated the function that was interrupted.
726 */
727 if (prev && prev->pc == (unsigned long)ret_from_irq)
728 frame->return_addr = 0;
729
730 return frame;
731
732bail:
733 dwarf_free_frame(frame);
734 return NULL;
735}
736
737static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
738 unsigned char *end, struct module *mod)
739{
740 struct rb_node **rb_node = &cie_root.rb_node;
741 struct rb_node *parent = *rb_node;
742 struct dwarf_cie *cie;
743 unsigned long flags;
744 int count;
745
746 cie = kzalloc(sizeof(*cie), GFP_KERNEL);
747 if (!cie)
748 return -ENOMEM;
749
750 cie->length = len;
751
752 /*
753 * Record the offset into the .eh_frame section
754 * for this CIE. It allows this CIE to be
755 * quickly and easily looked up from the
756 * corresponding FDE.
757 */
758 cie->cie_pointer = (unsigned long)entry;
759
760 cie->version = *(char *)p++;
761 UNWINDER_BUG_ON(cie->version != 1);
762
763 cie->augmentation = p;
764 p += strlen(cie->augmentation) + 1;
765
766 count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
767 p += count;
768
769 count = dwarf_read_leb128(p, &cie->data_alignment_factor);
770 p += count;
771
772 /*
773 * Which column in the rule table contains the
774 * return address?
775 */
776 if (cie->version == 1) {
777 cie->return_address_reg = __raw_readb(p);
778 p++;
779 } else {
780 count = dwarf_read_uleb128(p, &cie->return_address_reg);
781 p += count;
782 }
783
784 if (cie->augmentation[0] == 'z') {
785 unsigned int length, count;
786 cie->flags |= DWARF_CIE_Z_AUGMENTATION;
787
788 count = dwarf_read_uleb128(p, &length);
789 p += count;
790
791 UNWINDER_BUG_ON((unsigned char *)p > end);
792
793 cie->initial_instructions = p + length;
794 cie->augmentation++;
795 }
796
797 while (*cie->augmentation) {
798 /*
799 * "L" indicates a byte showing how the
800 * LSDA pointer is encoded. Skip it.
801 */
802 if (*cie->augmentation == 'L') {
803 p++;
804 cie->augmentation++;
805 } else if (*cie->augmentation == 'R') {
806 /*
807 * "R" indicates a byte showing
808 * how FDE addresses are
809 * encoded.
810 */
811 cie->encoding = *(char *)p++;
812 cie->augmentation++;
813 } else if (*cie->augmentation == 'P') {
814 /*
815 * "R" indicates a personality
816 * routine in the CIE
817 * augmentation.
818 */
819 UNWINDER_BUG();
820 } else if (*cie->augmentation == 'S') {
821 UNWINDER_BUG();
822 } else {
823 /*
824 * Unknown augmentation. Assume
825 * 'z' augmentation.
826 */
827 p = cie->initial_instructions;
828 UNWINDER_BUG_ON(!p);
829 break;
830 }
831 }
832
833 cie->initial_instructions = p;
834 cie->instructions_end = end;
835
836 /* Add to list */
837 spin_lock_irqsave(&dwarf_cie_lock, flags);
838
839 while (*rb_node) {
840 struct dwarf_cie *cie_tmp;
841
842 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
843
844 parent = *rb_node;
845
846 if (cie->cie_pointer < cie_tmp->cie_pointer)
847 rb_node = &parent->rb_left;
848 else if (cie->cie_pointer >= cie_tmp->cie_pointer)
849 rb_node = &parent->rb_right;
850 else
851 WARN_ON(1);
852 }
853
854 rb_link_node(&cie->node, parent, rb_node);
855 rb_insert_color(&cie->node, &cie_root);
856
857#ifdef CONFIG_MODULES
858 if (mod != NULL)
859 list_add_tail(&cie->link, &mod->arch.cie_list);
860#endif
861
862 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
863
864 return 0;
865}
866
867static int dwarf_parse_fde(void *entry, u32 entry_type,
868 void *start, unsigned long len,
869 unsigned char *end, struct module *mod)
870{
871 struct rb_node **rb_node = &fde_root.rb_node;
872 struct rb_node *parent = *rb_node;
873 struct dwarf_fde *fde;
874 struct dwarf_cie *cie;
875 unsigned long flags;
876 int count;
877 void *p = start;
878
879 fde = kzalloc(sizeof(*fde), GFP_KERNEL);
880 if (!fde)
881 return -ENOMEM;
882
883 fde->length = len;
884
885 /*
886 * In a .eh_frame section the CIE pointer is the
887 * delta between the address within the FDE
888 */
889 fde->cie_pointer = (unsigned long)(p - entry_type - 4);
890
891 cie = dwarf_lookup_cie(fde->cie_pointer);
892 fde->cie = cie;
893
894 if (cie->encoding)
895 count = dwarf_read_encoded_value(p, &fde->initial_location,
896 cie->encoding);
897 else
898 count = dwarf_read_addr(p, &fde->initial_location);
899
900 p += count;
901
902 if (cie->encoding)
903 count = dwarf_read_encoded_value(p, &fde->address_range,
904 cie->encoding & 0x0f);
905 else
906 count = dwarf_read_addr(p, &fde->address_range);
907
908 p += count;
909
910 if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
911 unsigned int length;
912 count = dwarf_read_uleb128(p, &length);
913 p += count + length;
914 }
915
916 /* Call frame instructions. */
917 fde->instructions = p;
918 fde->end = end;
919
920 /* Add to list. */
921 spin_lock_irqsave(&dwarf_fde_lock, flags);
922
923 while (*rb_node) {
924 struct dwarf_fde *fde_tmp;
925 unsigned long tmp_start, tmp_end;
926 unsigned long start, end;
927
928 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
929
930 start = fde->initial_location;
931 end = fde->initial_location + fde->address_range;
932
933 tmp_start = fde_tmp->initial_location;
934 tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
935
936 parent = *rb_node;
937
938 if (start < tmp_start)
939 rb_node = &parent->rb_left;
940 else if (start >= tmp_end)
941 rb_node = &parent->rb_right;
942 else
943 WARN_ON(1);
944 }
945
946 rb_link_node(&fde->node, parent, rb_node);
947 rb_insert_color(&fde->node, &fde_root);
948
949#ifdef CONFIG_MODULES
950 if (mod != NULL)
951 list_add_tail(&fde->link, &mod->arch.fde_list);
952#endif
953
954 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
955
956 return 0;
957}
958
959static void dwarf_unwinder_dump(struct task_struct *task,
960 struct pt_regs *regs,
961 unsigned long *sp,
962 const struct stacktrace_ops *ops,
963 void *data)
964{
965 struct dwarf_frame *frame, *_frame;
966 unsigned long return_addr;
967
968 _frame = NULL;
969 return_addr = 0;
970
971 while (1) {
972 frame = dwarf_unwind_stack(return_addr, _frame);
973
974 if (_frame)
975 dwarf_free_frame(_frame);
976
977 _frame = frame;
978
979 if (!frame || !frame->return_addr)
980 break;
981
982 return_addr = frame->return_addr;
983 ops->address(data, return_addr, 1);
984 }
985
986 if (frame)
987 dwarf_free_frame(frame);
988}
989
990static struct unwinder dwarf_unwinder = {
991 .name = "dwarf-unwinder",
992 .dump = dwarf_unwinder_dump,
993 .rating = 150,
994};
995
996static void dwarf_unwinder_cleanup(void)
997{
998 struct rb_node **fde_rb_node = &fde_root.rb_node;
999 struct rb_node **cie_rb_node = &cie_root.rb_node;
1000
1001 /*
1002 * Deallocate all the memory allocated for the DWARF unwinder.
1003 * Traverse all the FDE/CIE lists and remove and free all the
1004 * memory associated with those data structures.
1005 */
1006 while (*fde_rb_node) {
1007 struct dwarf_fde *fde;
1008
1009 fde = rb_entry(*fde_rb_node, struct dwarf_fde, node);
1010 rb_erase(*fde_rb_node, &fde_root);
1011 kfree(fde);
1012 }
1013
1014 while (*cie_rb_node) {
1015 struct dwarf_cie *cie;
1016
1017 cie = rb_entry(*cie_rb_node, struct dwarf_cie, node);
1018 rb_erase(*cie_rb_node, &cie_root);
1019 kfree(cie);
1020 }
1021
1022 kmem_cache_destroy(dwarf_reg_cachep);
1023 kmem_cache_destroy(dwarf_frame_cachep);
1024}
1025
1026/**
1027 * dwarf_parse_section - parse DWARF section
1028 * @eh_frame_start: start address of the .eh_frame section
1029 * @eh_frame_end: end address of the .eh_frame section
1030 * @mod: the kernel module containing the .eh_frame section
1031 *
1032 * Parse the information in a .eh_frame section.
1033 */
1034static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
1035 struct module *mod)
1036{
1037 u32 entry_type;
1038 void *p, *entry;
1039 int count, err = 0;
1040 unsigned long len = 0;
1041 unsigned int c_entries, f_entries;
1042 unsigned char *end;
1043
1044 c_entries = 0;
1045 f_entries = 0;
1046 entry = eh_frame_start;
1047
1048 while ((char *)entry < eh_frame_end) {
1049 p = entry;
1050
1051 count = dwarf_entry_len(p, &len);
1052 if (count == 0) {
1053 /*
1054 * We read a bogus length field value. There is
1055 * nothing we can do here apart from disabling
1056 * the DWARF unwinder. We can't even skip this
1057 * entry and move to the next one because 'len'
1058 * tells us where our next entry is.
1059 */
1060 err = -EINVAL;
1061 goto out;
1062 } else
1063 p += count;
1064
1065 /* initial length does not include itself */
1066 end = p + len;
1067
1068 entry_type = get_unaligned((u32 *)p);
1069 p += 4;
1070
1071 if (entry_type == DW_EH_FRAME_CIE) {
1072 err = dwarf_parse_cie(entry, p, len, end, mod);
1073 if (err < 0)
1074 goto out;
1075 else
1076 c_entries++;
1077 } else {
1078 err = dwarf_parse_fde(entry, entry_type, p, len,
1079 end, mod);
1080 if (err < 0)
1081 goto out;
1082 else
1083 f_entries++;
1084 }
1085
1086 entry = (char *)entry + len + 4;
1087 }
1088
1089 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
1090 c_entries, f_entries);
1091
1092 return 0;
1093
1094out:
1095 return err;
1096}
1097
1098#ifdef CONFIG_MODULES
1099int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
1100 struct module *me)
1101{
1102 unsigned int i, err;
1103 unsigned long start, end;
1104 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
1105
1106 start = end = 0;
1107
1108 for (i = 1; i < hdr->e_shnum; i++) {
1109 /* Alloc bit cleared means "ignore it." */
1110 if ((sechdrs[i].sh_flags & SHF_ALLOC)
1111 && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
1112 start = sechdrs[i].sh_addr;
1113 end = start + sechdrs[i].sh_size;
1114 break;
1115 }
1116 }
1117
1118 /* Did we find the .eh_frame section? */
1119 if (i != hdr->e_shnum) {
1120 INIT_LIST_HEAD(&me->arch.cie_list);
1121 INIT_LIST_HEAD(&me->arch.fde_list);
1122 err = dwarf_parse_section((char *)start, (char *)end, me);
1123 if (err) {
1124 printk(KERN_WARNING "%s: failed to parse DWARF info\n",
1125 me->name);
1126 return err;
1127 }
1128 }
1129
1130 return 0;
1131}
1132
1133/**
1134 * module_dwarf_cleanup - remove FDE/CIEs associated with @mod
1135 * @mod: the module that is being unloaded
1136 *
1137 * Remove any FDEs and CIEs from the global lists that came from
1138 * @mod's .eh_frame section because @mod is being unloaded.
1139 */
1140void module_dwarf_cleanup(struct module *mod)
1141{
1142 struct dwarf_fde *fde, *ftmp;
1143 struct dwarf_cie *cie, *ctmp;
1144 unsigned long flags;
1145
1146 spin_lock_irqsave(&dwarf_cie_lock, flags);
1147
1148 list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
1149 list_del(&cie->link);
1150 rb_erase(&cie->node, &cie_root);
1151 kfree(cie);
1152 }
1153
1154 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
1155
1156 spin_lock_irqsave(&dwarf_fde_lock, flags);
1157
1158 list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
1159 list_del(&fde->link);
1160 rb_erase(&fde->node, &fde_root);
1161 kfree(fde);
1162 }
1163
1164 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
1165}
1166#endif /* CONFIG_MODULES */
1167
1168/**
1169 * dwarf_unwinder_init - initialise the dwarf unwinder
1170 *
1171 * Build the data structures describing the .dwarf_frame section to
1172 * make it easier to lookup CIE and FDE entries. Because the
1173 * .eh_frame section is packed as tightly as possible it is not
1174 * easy to lookup the FDE for a given PC, so we build a list of FDE
1175 * and CIE entries that make it easier.
1176 */
1177static int __init dwarf_unwinder_init(void)
1178{
1179 int err = -ENOMEM;
1180
1181 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
1182 sizeof(struct dwarf_frame), 0,
1183 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1184
1185 dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
1186 sizeof(struct dwarf_reg), 0,
1187 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1188
1189 dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
1190 mempool_alloc_slab,
1191 mempool_free_slab,
1192 dwarf_frame_cachep);
1193 if (!dwarf_frame_pool)
1194 goto out;
1195
1196 dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
1197 mempool_alloc_slab,
1198 mempool_free_slab,
1199 dwarf_reg_cachep);
1200 if (!dwarf_reg_pool)
1201 goto out;
1202
1203 err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
1204 if (err)
1205 goto out;
1206
1207 err = unwinder_register(&dwarf_unwinder);
1208 if (err)
1209 goto out;
1210
1211 dwarf_unwinder_ready = 1;
1212
1213 return 0;
1214
1215out:
1216 printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
1217 dwarf_unwinder_cleanup();
1218 return err;
1219}
1220early_initcall(dwarf_unwinder_init);
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
deleted file mode 100644
index a952dcf9999d..000000000000
--- a/arch/sh/kernel/early_printk.c
+++ /dev/null
@@ -1,241 +0,0 @@
1/*
2 * arch/sh/kernel/early_printk.c
3 *
4 * Copyright (C) 1999, 2000 Niibe Yutaka
5 * Copyright (C) 2002 M. R. Brown
6 * Copyright (C) 2004 - 2007 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/console.h>
13#include <linux/tty.h>
14#include <linux/init.h>
15#include <linux/io.h>
16#include <linux/delay.h>
17
18#ifdef CONFIG_SH_STANDARD_BIOS
19#include <asm/sh_bios.h>
20
21/*
22 * Print a string through the BIOS
23 */
24static void sh_console_write(struct console *co, const char *s,
25 unsigned count)
26{
27 sh_bios_console_write(s, count);
28}
29
30/*
31 * Setup initial baud/bits/parity. We do two things here:
32 * - construct a cflag setting for the first rs_open()
33 * - initialize the serial port
34 * Return non-zero if we didn't find a serial port.
35 */
36static int __init sh_console_setup(struct console *co, char *options)
37{
38 int cflag = CREAD | HUPCL | CLOCAL;
39
40 /*
41 * Now construct a cflag setting.
42 * TODO: this is a totally bogus cflag, as we have
43 * no idea what serial settings the BIOS is using, or
44 * even if its using the serial port at all.
45 */
46 cflag |= B115200 | CS8 | /*no parity*/0;
47
48 co->cflag = cflag;
49
50 return 0;
51}
52
53static struct console bios_console = {
54 .name = "bios",
55 .write = sh_console_write,
56 .setup = sh_console_setup,
57 .flags = CON_PRINTBUFFER,
58 .index = -1,
59};
60#endif
61
62#ifdef CONFIG_EARLY_SCIF_CONSOLE
63#include <linux/serial_core.h>
64#include "../../../drivers/serial/sh-sci.h"
65
66#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
67 defined(CONFIG_CPU_SUBTYPE_SH7721)
68#define EPK_SCSMR_VALUE 0x000
69#define EPK_SCBRR_VALUE 0x00C
70#define EPK_FIFO_SIZE 64
71#define EPK_FIFO_BITS (0x7f00 >> 8)
72#else
73#define EPK_FIFO_SIZE 16
74#define EPK_FIFO_BITS (0x1f00 >> 8)
75#endif
76
77static struct uart_port scif_port = {
78 .type = PORT_SCIF,
79 .mapbase = CONFIG_EARLY_SCIF_CONSOLE_PORT,
80 .membase = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT,
81};
82
83static void scif_sercon_putc(int c)
84{
85 while (((sci_in(&scif_port, SCFDR) & EPK_FIFO_BITS) >= EPK_FIFO_SIZE))
86 ;
87
88 sci_in(&scif_port, SCxSR);
89 sci_out(&scif_port, SCxSR, 0xf3 & ~(0x20 | 0x40));
90 sci_out(&scif_port, SCxTDR, c);
91
92 while ((sci_in(&scif_port, SCxSR) & 0x40) == 0)
93 ;
94
95 if (c == '\n')
96 scif_sercon_putc('\r');
97}
98
99static void scif_sercon_write(struct console *con, const char *s,
100 unsigned count)
101{
102 while (count-- > 0)
103 scif_sercon_putc(*s++);
104}
105
106static int __init scif_sercon_setup(struct console *con, char *options)
107{
108 con->cflag = CREAD | HUPCL | CLOCAL | B115200 | CS8;
109
110 return 0;
111}
112
113static struct console scif_console = {
114 .name = "sercon",
115 .write = scif_sercon_write,
116 .setup = scif_sercon_setup,
117 .flags = CON_PRINTBUFFER,
118 .index = -1,
119};
120
121#if !defined(CONFIG_SH_STANDARD_BIOS)
122#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
123 defined(CONFIG_CPU_SUBTYPE_SH7721)
124static void scif_sercon_init(char *s)
125{
126 sci_out(&scif_port, SCSCR, 0x0000); /* clear TE and RE */
127 sci_out(&scif_port, SCFCR, 0x4006); /* reset */
128 sci_out(&scif_port, SCSCR, 0x0000); /* select internal clock */
129 sci_out(&scif_port, SCSMR, EPK_SCSMR_VALUE);
130 sci_out(&scif_port, SCBRR, EPK_SCBRR_VALUE);
131
132 mdelay(1); /* wait 1-bit time */
133
134 sci_out(&scif_port, SCFCR, 0x0030); /* TTRG=b'11 */
135 sci_out(&scif_port, SCSCR, 0x0030); /* TE, RE */
136}
137#elif defined(CONFIG_CPU_SH4)
138#define DEFAULT_BAUD 115200
139/*
140 * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4
141 * devices that aren't using sh-ipl+g.
142 */
143static void scif_sercon_init(char *s)
144{
145 struct uart_port *port = &scif_port;
146 unsigned baud = DEFAULT_BAUD;
147 unsigned int status;
148 char *e;
149
150 if (*s == ',')
151 ++s;
152
153 if (*s) {
154 /* ignore ioport/device name */
155 s += strcspn(s, ",");
156 if (*s == ',')
157 s++;
158 }
159
160 if (*s) {
161 baud = simple_strtoul(s, &e, 0);
162 if (baud == 0 || s == e)
163 baud = DEFAULT_BAUD;
164 }
165
166 do {
167 status = sci_in(port, SCxSR);
168 } while (!(status & SCxSR_TEND(port)));
169
170 sci_out(port, SCSCR, 0); /* TE=0, RE=0 */
171 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
172 sci_out(port, SCSMR, 0);
173
174 /* Set baud rate */
175 sci_out(port, SCBRR, (CONFIG_SH_PCLK_FREQ + 16 * baud) /
176 (32 * baud) - 1);
177 udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
178
179 sci_out(port, SCSPTR, 0);
180 sci_out(port, SCxSR, 0x60);
181 sci_out(port, SCLSR, 0);
182
183 sci_out(port, SCFCR, 0);
184 sci_out(port, SCSCR, 0x30); /* TE=1, RE=1 */
185}
186#endif /* defined(CONFIG_CPU_SUBTYPE_SH7720) */
187#endif /* !defined(CONFIG_SH_STANDARD_BIOS) */
188#endif /* CONFIG_EARLY_SCIF_CONSOLE */
189
190/*
191 * Setup a default console, if more than one is compiled in, rely on the
192 * earlyprintk= parsing to give priority.
193 */
194static struct console *early_console =
195#ifdef CONFIG_SH_STANDARD_BIOS
196 &bios_console
197#elif defined(CONFIG_EARLY_SCIF_CONSOLE)
198 &scif_console
199#else
200 NULL
201#endif
202 ;
203
204static int __init setup_early_printk(char *buf)
205{
206 int keep_early = 0;
207
208 if (!buf)
209 return 0;
210
211 if (strstr(buf, "keep"))
212 keep_early = 1;
213
214#ifdef CONFIG_SH_STANDARD_BIOS
215 if (!strncmp(buf, "bios", 4))
216 early_console = &bios_console;
217#endif
218#if defined(CONFIG_EARLY_SCIF_CONSOLE)
219 if (!strncmp(buf, "serial", 6)) {
220 early_console = &scif_console;
221
222#if !defined(CONFIG_SH_STANDARD_BIOS)
223#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720) || \
224 defined(CONFIG_CPU_SUBTYPE_SH7721)
225 scif_sercon_init(buf + 6);
226#endif
227#endif
228 }
229#endif
230
231 if (likely(early_console)) {
232 if (keep_early)
233 early_console->flags &= ~CON_BOOT;
234 else
235 early_console->flags |= CON_BOOT;
236 register_console(early_console);
237 }
238
239 return 0;
240}
241early_param("earlyprintk", setup_early_printk);
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index d62359cfbbe2..2b15ae60c3a0 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -43,9 +43,10 @@
43 * syscall # 43 * syscall #
44 * 44 *
45 */ 45 */
46#include <asm/dwarf.h>
46 47
47#if defined(CONFIG_PREEMPT) 48#if defined(CONFIG_PREEMPT)
48# define preempt_stop() cli 49# define preempt_stop() cli ; TRACE_IRQS_OFF
49#else 50#else
50# define preempt_stop() 51# define preempt_stop()
51# define resume_kernel __restore_all 52# define resume_kernel __restore_all
@@ -55,11 +56,7 @@
55 .align 2 56 .align 2
56ENTRY(exception_error) 57ENTRY(exception_error)
57 ! 58 !
58#ifdef CONFIG_TRACE_IRQFLAGS 59 TRACE_IRQS_ON
59 mov.l 2f, r0
60 jsr @r0
61 nop
62#endif
63 sti 60 sti
64 mov.l 1f, r0 61 mov.l 1f, r0
65 jmp @r0 62 jmp @r0
@@ -67,18 +64,21 @@ ENTRY(exception_error)
67 64
68 .align 2 65 .align 2
691: .long do_exception_error 661: .long do_exception_error
70#ifdef CONFIG_TRACE_IRQFLAGS
712: .long trace_hardirqs_on
72#endif
73 67
74 .align 2 68 .align 2
75ret_from_exception: 69ret_from_exception:
70 CFI_STARTPROC simple
71 CFI_DEF_CFA r14, 0
72 CFI_REL_OFFSET 17, 64
73 CFI_REL_OFFSET 15, 60
74 CFI_REL_OFFSET 14, 56
75 CFI_REL_OFFSET 13, 52
76 CFI_REL_OFFSET 12, 48
77 CFI_REL_OFFSET 11, 44
78 CFI_REL_OFFSET 10, 40
79 CFI_REL_OFFSET 9, 36
80 CFI_REL_OFFSET 8, 32
76 preempt_stop() 81 preempt_stop()
77#ifdef CONFIG_TRACE_IRQFLAGS
78 mov.l 4f, r0
79 jsr @r0
80 nop
81#endif
82ENTRY(ret_from_irq) 82ENTRY(ret_from_irq)
83 ! 83 !
84 mov #OFF_SR, r0 84 mov #OFF_SR, r0
@@ -93,6 +93,7 @@ ENTRY(ret_from_irq)
93 nop 93 nop
94ENTRY(resume_kernel) 94ENTRY(resume_kernel)
95 cli 95 cli
96 TRACE_IRQS_OFF
96 mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count 97 mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
97 tst r0, r0 98 tst r0, r0
98 bf noresched 99 bf noresched
@@ -103,8 +104,9 @@ need_resched:
103 104
104 mov #OFF_SR, r0 105 mov #OFF_SR, r0
105 mov.l @(r0,r15), r0 ! get status register 106 mov.l @(r0,r15), r0 ! get status register
106 and #0xf0, r0 ! interrupts off (exception path)? 107 shlr r0
107 cmp/eq #0xf0, r0 108 and #(0xf0>>1), r0 ! interrupts off (exception path)?
109 cmp/eq #(0xf0>>1), r0
108 bt noresched 110 bt noresched
109 mov.l 3f, r0 111 mov.l 3f, r0
110 jsr @r0 ! call preempt_schedule_irq 112 jsr @r0 ! call preempt_schedule_irq
@@ -125,13 +127,9 @@ noresched:
125ENTRY(resume_userspace) 127ENTRY(resume_userspace)
126 ! r8: current_thread_info 128 ! r8: current_thread_info
127 cli 129 cli
128#ifdef CONFIG_TRACE_IRQFLAGS 130 TRACE_IRQS_OFF
129 mov.l 5f, r0
130 jsr @r0
131 nop
132#endif
133 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags 131 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
134 tst #_TIF_WORK_MASK, r0 132 tst #(_TIF_WORK_MASK & 0xff), r0
135 bt/s __restore_all 133 bt/s __restore_all
136 tst #_TIF_NEED_RESCHED, r0 134 tst #_TIF_NEED_RESCHED, r0
137 135
@@ -141,7 +139,7 @@ work_pending:
141 ! r8: current_thread_info 139 ! r8: current_thread_info
142 ! t: result of "tst #_TIF_NEED_RESCHED, r0" 140 ! t: result of "tst #_TIF_NEED_RESCHED, r0"
143 bf/s work_resched 141 bf/s work_resched
144 tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0 142 tst #_TIF_SIGPENDING, r0
145work_notifysig: 143work_notifysig:
146 bt/s __restore_all 144 bt/s __restore_all
147 mov r15, r4 145 mov r15, r4
@@ -156,14 +154,10 @@ work_resched:
156 jsr @r1 ! schedule 154 jsr @r1 ! schedule
157 nop 155 nop
158 cli 156 cli
159#ifdef CONFIG_TRACE_IRQFLAGS 157 TRACE_IRQS_OFF
160 mov.l 5f, r0
161 jsr @r0
162 nop
163#endif
164 ! 158 !
165 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags 159 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
166 tst #_TIF_WORK_MASK, r0 160 tst #(_TIF_WORK_MASK & 0xff), r0
167 bt __restore_all 161 bt __restore_all
168 bra work_pending 162 bra work_pending
169 tst #_TIF_NEED_RESCHED, r0 163 tst #_TIF_NEED_RESCHED, r0
@@ -172,23 +166,15 @@ work_resched:
1721: .long schedule 1661: .long schedule
1732: .long do_notify_resume 1672: .long do_notify_resume
1743: .long resume_userspace 1683: .long resume_userspace
175#ifdef CONFIG_TRACE_IRQFLAGS
1764: .long trace_hardirqs_on
1775: .long trace_hardirqs_off
178#endif
179 169
180 .align 2 170 .align 2
181syscall_exit_work: 171syscall_exit_work:
182 ! r0: current_thread_info->flags 172 ! r0: current_thread_info->flags
183 ! r8: current_thread_info 173 ! r8: current_thread_info
184 tst #_TIF_WORK_SYSCALL_MASK, r0 174 tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0
185 bt/s work_pending 175 bt/s work_pending
186 tst #_TIF_NEED_RESCHED, r0 176 tst #_TIF_NEED_RESCHED, r0
187#ifdef CONFIG_TRACE_IRQFLAGS 177 TRACE_IRQS_ON
188 mov.l 5f, r0
189 jsr @r0
190 nop
191#endif
192 sti 178 sti
193 mov r15, r4 179 mov r15, r4
194 mov.l 8f, r0 ! do_syscall_trace_leave 180 mov.l 8f, r0 ! do_syscall_trace_leave
@@ -226,12 +212,25 @@ syscall_trace_entry:
226 mov.l r0, @(OFF_R0,r15) ! Return value 212 mov.l r0, @(OFF_R0,r15) ! Return value
227 213
228__restore_all: 214__restore_all:
229 mov.l 1f, r0 215 mov #OFF_SR, r0
216 mov.l @(r0,r15), r0 ! get status register
217
218 shlr2 r0
219 and #0x3c, r0
220 cmp/eq #0x3c, r0
221 bt 1f
222 TRACE_IRQS_ON
223 bra 2f
224 nop
2251:
226 TRACE_IRQS_OFF
2272:
228 mov.l 3f, r0
230 jmp @r0 229 jmp @r0
231 nop 230 nop
232 231
233 .align 2 232 .align 2
2341: .long restore_all 2333: .long restore_all
235 234
236 .align 2 235 .align 2
237syscall_badsys: ! Bad syscall number 236syscall_badsys: ! Bad syscall number
@@ -259,6 +258,7 @@ debug_trap:
259 nop 258 nop
260 bra __restore_all 259 bra __restore_all
261 nop 260 nop
261 CFI_ENDPROC
262 262
263 .align 2 263 .align 2
2641: .long debug_trap_table 2641: .long debug_trap_table
@@ -304,6 +304,7 @@ ret_from_fork:
304 * system calls and debug traps through their respective jump tables. 304 * system calls and debug traps through their respective jump tables.
305 */ 305 */
306ENTRY(system_call) 306ENTRY(system_call)
307 setup_frame_reg
307#if !defined(CONFIG_CPU_SH2) 308#if !defined(CONFIG_CPU_SH2)
308 mov.l 1f, r9 309 mov.l 1f, r9
309 mov.l @r9, r8 ! Read from TRA (Trap Address) Register 310 mov.l @r9, r8 ! Read from TRA (Trap Address) Register
@@ -321,18 +322,18 @@ ENTRY(system_call)
321 bt/s debug_trap ! it's a debug trap.. 322 bt/s debug_trap ! it's a debug trap..
322 nop 323 nop
323 324
324#ifdef CONFIG_TRACE_IRQFLAGS 325 TRACE_IRQS_ON
325 mov.l 5f, r10
326 jsr @r10
327 nop
328#endif
329 sti 326 sti
330 327
331 ! 328 !
332 get_current_thread_info r8, r10 329 get_current_thread_info r8, r10
333 mov.l @(TI_FLAGS,r8), r8 330 mov.l @(TI_FLAGS,r8), r8
334 mov #_TIF_WORK_SYSCALL_MASK, r10 331 mov #(_TIF_WORK_SYSCALL_MASK & 0xff), r10
332 mov #(_TIF_WORK_SYSCALL_MASK >> 8), r9
335 tst r10, r8 333 tst r10, r8
334 shll8 r9
335 bf syscall_trace_entry
336 tst r9, r8
336 bf syscall_trace_entry 337 bf syscall_trace_entry
337 ! 338 !
338 mov.l 2f, r8 ! Number of syscalls 339 mov.l 2f, r8 ! Number of syscalls
@@ -351,15 +352,15 @@ syscall_call:
351 ! 352 !
352syscall_exit: 353syscall_exit:
353 cli 354 cli
354#ifdef CONFIG_TRACE_IRQFLAGS 355 TRACE_IRQS_OFF
355 mov.l 6f, r0
356 jsr @r0
357 nop
358#endif
359 ! 356 !
360 get_current_thread_info r8, r0 357 get_current_thread_info r8, r0
361 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags 358 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
362 tst #_TIF_ALLWORK_MASK, r0 359 tst #(_TIF_ALLWORK_MASK & 0xff), r0
360 mov #(_TIF_ALLWORK_MASK >> 8), r1
361 bf syscall_exit_work
362 shlr8 r0
363 tst r0, r1
363 bf syscall_exit_work 364 bf syscall_exit_work
364 bra __restore_all 365 bra __restore_all
365 nop 366 nop
@@ -369,9 +370,5 @@ syscall_exit:
369#endif 370#endif
3702: .long NR_syscalls 3712: .long NR_syscalls
3713: .long sys_call_table 3723: .long sys_call_table
372#ifdef CONFIG_TRACE_IRQFLAGS
3735: .long trace_hardirqs_on
3746: .long trace_hardirqs_off
375#endif
3767: .long do_syscall_trace_enter 3737: .long do_syscall_trace_enter
3778: .long do_syscall_trace_leave 3748: .long do_syscall_trace_leave
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 066f37dc32a9..30e13196d35b 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -16,9 +16,13 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/kernel.h>
19#include <asm/ftrace.h> 20#include <asm/ftrace.h>
20#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/unistd.h>
23#include <trace/syscall.h>
21 24
25#ifdef CONFIG_DYNAMIC_FTRACE
22static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; 26static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
23 27
24static unsigned char ftrace_nop[4]; 28static unsigned char ftrace_nop[4];
@@ -58,6 +62,150 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
58 return ftrace_replaced_code; 62 return ftrace_replaced_code;
59} 63}
60 64
65/*
66 * Modifying code must take extra care. On an SMP machine, if
67 * the code being modified is also being executed on another CPU
68 * that CPU will have undefined results and possibly take a GPF.
69 * We use kstop_machine to stop other CPUS from exectuing code.
70 * But this does not stop NMIs from happening. We still need
71 * to protect against that. We separate out the modification of
72 * the code to take care of this.
73 *
74 * Two buffers are added: An IP buffer and a "code" buffer.
75 *
76 * 1) Put the instruction pointer into the IP buffer
77 * and the new code into the "code" buffer.
78 * 2) Wait for any running NMIs to finish and set a flag that says
79 * we are modifying code, it is done in an atomic operation.
80 * 3) Write the code
81 * 4) clear the flag.
82 * 5) Wait for any running NMIs to finish.
83 *
84 * If an NMI is executed, the first thing it does is to call
85 * "ftrace_nmi_enter". This will check if the flag is set to write
86 * and if it is, it will write what is in the IP and "code" buffers.
87 *
88 * The trick is, it does not matter if everyone is writing the same
89 * content to the code location. Also, if a CPU is executing code
90 * it is OK to write to that code location if the contents being written
91 * are the same as what exists.
92 */
93#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
94static atomic_t nmi_running = ATOMIC_INIT(0);
95static int mod_code_status; /* holds return value of text write */
96static void *mod_code_ip; /* holds the IP to write to */
97static void *mod_code_newcode; /* holds the text to write to the IP */
98
99static unsigned nmi_wait_count;
100static atomic_t nmi_update_count = ATOMIC_INIT(0);
101
102int ftrace_arch_read_dyn_info(char *buf, int size)
103{
104 int r;
105
106 r = snprintf(buf, size, "%u %u",
107 nmi_wait_count,
108 atomic_read(&nmi_update_count));
109 return r;
110}
111
112static void clear_mod_flag(void)
113{
114 int old = atomic_read(&nmi_running);
115
116 for (;;) {
117 int new = old & ~MOD_CODE_WRITE_FLAG;
118
119 if (old == new)
120 break;
121
122 old = atomic_cmpxchg(&nmi_running, old, new);
123 }
124}
125
126static void ftrace_mod_code(void)
127{
128 /*
129 * Yes, more than one CPU process can be writing to mod_code_status.
130 * (and the code itself)
131 * But if one were to fail, then they all should, and if one were
132 * to succeed, then they all should.
133 */
134 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
135 MCOUNT_INSN_SIZE);
136
137 /* if we fail, then kill any new writers */
138 if (mod_code_status)
139 clear_mod_flag();
140}
141
142void ftrace_nmi_enter(void)
143{
144 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
145 smp_rmb();
146 ftrace_mod_code();
147 atomic_inc(&nmi_update_count);
148 }
149 /* Must have previous changes seen before executions */
150 smp_mb();
151}
152
153void ftrace_nmi_exit(void)
154{
155 /* Finish all executions before clearing nmi_running */
156 smp_mb();
157 atomic_dec(&nmi_running);
158}
159
160static void wait_for_nmi_and_set_mod_flag(void)
161{
162 if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
163 return;
164
165 do {
166 cpu_relax();
167 } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
168
169 nmi_wait_count++;
170}
171
172static void wait_for_nmi(void)
173{
174 if (!atomic_read(&nmi_running))
175 return;
176
177 do {
178 cpu_relax();
179 } while (atomic_read(&nmi_running));
180
181 nmi_wait_count++;
182}
183
184static int
185do_ftrace_mod_code(unsigned long ip, void *new_code)
186{
187 mod_code_ip = (void *)ip;
188 mod_code_newcode = new_code;
189
190 /* The buffers need to be visible before we let NMIs write them */
191 smp_mb();
192
193 wait_for_nmi_and_set_mod_flag();
194
195 /* Make sure all running NMIs have finished before we write the code */
196 smp_mb();
197
198 ftrace_mod_code();
199
200 /* Make sure the write happens before clearing the bit */
201 smp_mb();
202
203 clear_mod_flag();
204 wait_for_nmi();
205
206 return mod_code_status;
207}
208
61static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, 209static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
62 unsigned char *new_code) 210 unsigned char *new_code)
63{ 211{
@@ -82,7 +230,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
82 return -EINVAL; 230 return -EINVAL;
83 231
84 /* replace the text with the new text */ 232 /* replace the text with the new text */
85 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) 233 if (do_ftrace_mod_code(ip, new_code))
86 return -EPERM; 234 return -EPERM;
87 235
88 flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); 236 flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
@@ -131,3 +279,123 @@ int __init ftrace_dyn_arch_init(void *data)
131 279
132 return 0; 280 return 0;
133} 281}
282#endif /* CONFIG_DYNAMIC_FTRACE */
283
284#ifdef CONFIG_FUNCTION_GRAPH_TRACER
285#ifdef CONFIG_DYNAMIC_FTRACE
286extern void ftrace_graph_call(void);
287
288static int ftrace_mod(unsigned long ip, unsigned long old_addr,
289 unsigned long new_addr)
290{
291 unsigned char code[MCOUNT_INSN_SIZE];
292
293 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
294 return -EFAULT;
295
296 if (old_addr != __raw_readl((unsigned long *)code))
297 return -EINVAL;
298
299 __raw_writel(new_addr, ip);
300 return 0;
301}
302
303int ftrace_enable_ftrace_graph_caller(void)
304{
305 unsigned long ip, old_addr, new_addr;
306
307 ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
308 old_addr = (unsigned long)(&skip_trace);
309 new_addr = (unsigned long)(&ftrace_graph_caller);
310
311 return ftrace_mod(ip, old_addr, new_addr);
312}
313
314int ftrace_disable_ftrace_graph_caller(void)
315{
316 unsigned long ip, old_addr, new_addr;
317
318 ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
319 old_addr = (unsigned long)(&ftrace_graph_caller);
320 new_addr = (unsigned long)(&skip_trace);
321
322 return ftrace_mod(ip, old_addr, new_addr);
323}
324#endif /* CONFIG_DYNAMIC_FTRACE */
325
326/*
327 * Hook the return address and push it in the stack of return addrs
328 * in the current thread info.
329 *
330 * This is the main routine for the function graph tracer. The function
331 * graph tracer essentially works like this:
332 *
333 * parent is the stack address containing self_addr's return address.
334 * We pull the real return address out of parent and store it in
335 * current's ret_stack. Then, we replace the return address on the stack
336 * with the address of return_to_handler. self_addr is the function that
337 * called mcount.
338 *
339 * When self_addr returns, it will jump to return_to_handler which calls
340 * ftrace_return_to_handler. ftrace_return_to_handler will pull the real
341 * return address off of current's ret_stack and jump to it.
342 */
343void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
344{
345 unsigned long old;
346 int faulted, err;
347 struct ftrace_graph_ent trace;
348 unsigned long return_hooker = (unsigned long)&return_to_handler;
349
350 if (unlikely(atomic_read(&current->tracing_graph_pause)))
351 return;
352
353 /*
354 * Protect against fault, even if it shouldn't
355 * happen. This tool is too much intrusive to
356 * ignore such a protection.
357 */
358 __asm__ __volatile__(
359 "1: \n\t"
360 "mov.l @%2, %0 \n\t"
361 "2: \n\t"
362 "mov.l %3, @%2 \n\t"
363 "mov #0, %1 \n\t"
364 "3: \n\t"
365 ".section .fixup, \"ax\" \n\t"
366 "4: \n\t"
367 "mov.l 5f, %0 \n\t"
368 "jmp @%0 \n\t"
369 " mov #1, %1 \n\t"
370 ".balign 4 \n\t"
371 "5: .long 3b \n\t"
372 ".previous \n\t"
373 ".section __ex_table,\"a\" \n\t"
374 ".long 1b, 4b \n\t"
375 ".long 2b, 4b \n\t"
376 ".previous \n\t"
377 : "=&r" (old), "=r" (faulted)
378 : "r" (parent), "r" (return_hooker)
379 );
380
381 if (unlikely(faulted)) {
382 ftrace_graph_stop();
383 WARN_ON(1);
384 return;
385 }
386
387 err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
388 if (err == -EBUSY) {
389 __raw_writel(old, parent);
390 return;
391 }
392
393 trace.func = self_addr;
394
395 /* Only trace if the calling function expects to */
396 if (!ftrace_graph_entry(&trace)) {
397 current->curr_ret_stack--;
398 __raw_writel(old, parent);
399 }
400}
401#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sh/kernel/gpio.c b/arch/sh/kernel/gpio.c
deleted file mode 100644
index d22e5af699f9..000000000000
--- a/arch/sh/kernel/gpio.c
+++ /dev/null
@@ -1,584 +0,0 @@
1/*
2 * Pinmuxed GPIO support for SuperH.
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/errno.h>
12#include <linux/kernel.h>
13#include <linux/list.h>
14#include <linux/module.h>
15#include <linux/clk.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/irq.h>
19#include <linux/bitops.h>
20#include <linux/gpio.h>
21
22static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
23{
24 if (enum_id < r->begin)
25 return 0;
26
27 if (enum_id > r->end)
28 return 0;
29
30 return 1;
31}
32
33static unsigned long gpio_read_raw_reg(unsigned long reg,
34 unsigned long reg_width)
35{
36 switch (reg_width) {
37 case 8:
38 return ctrl_inb(reg);
39 case 16:
40 return ctrl_inw(reg);
41 case 32:
42 return ctrl_inl(reg);
43 }
44
45 BUG();
46 return 0;
47}
48
49static void gpio_write_raw_reg(unsigned long reg,
50 unsigned long reg_width,
51 unsigned long data)
52{
53 switch (reg_width) {
54 case 8:
55 ctrl_outb(data, reg);
56 return;
57 case 16:
58 ctrl_outw(data, reg);
59 return;
60 case 32:
61 ctrl_outl(data, reg);
62 return;
63 }
64
65 BUG();
66}
67
68static void gpio_write_bit(struct pinmux_data_reg *dr,
69 unsigned long in_pos, unsigned long value)
70{
71 unsigned long pos;
72
73 pos = dr->reg_width - (in_pos + 1);
74
75#ifdef DEBUG
76 pr_info("write_bit addr = %lx, value = %ld, pos = %ld, "
77 "r_width = %ld\n",
78 dr->reg, !!value, pos, dr->reg_width);
79#endif
80
81 if (value)
82 set_bit(pos, &dr->reg_shadow);
83 else
84 clear_bit(pos, &dr->reg_shadow);
85
86 gpio_write_raw_reg(dr->reg, dr->reg_width, dr->reg_shadow);
87}
88
89static int gpio_read_reg(unsigned long reg, unsigned long reg_width,
90 unsigned long field_width, unsigned long in_pos)
91{
92 unsigned long data, mask, pos;
93
94 data = 0;
95 mask = (1 << field_width) - 1;
96 pos = reg_width - ((in_pos + 1) * field_width);
97
98#ifdef DEBUG
99 pr_info("read_reg: addr = %lx, pos = %ld, "
100 "r_width = %ld, f_width = %ld\n",
101 reg, pos, reg_width, field_width);
102#endif
103
104 data = gpio_read_raw_reg(reg, reg_width);
105 return (data >> pos) & mask;
106}
107
108static void gpio_write_reg(unsigned long reg, unsigned long reg_width,
109 unsigned long field_width, unsigned long in_pos,
110 unsigned long value)
111{
112 unsigned long mask, pos;
113
114 mask = (1 << field_width) - 1;
115 pos = reg_width - ((in_pos + 1) * field_width);
116
117#ifdef DEBUG
118 pr_info("write_reg addr = %lx, value = %ld, pos = %ld, "
119 "r_width = %ld, f_width = %ld\n",
120 reg, value, pos, reg_width, field_width);
121#endif
122
123 mask = ~(mask << pos);
124 value = value << pos;
125
126 switch (reg_width) {
127 case 8:
128 ctrl_outb((ctrl_inb(reg) & mask) | value, reg);
129 break;
130 case 16:
131 ctrl_outw((ctrl_inw(reg) & mask) | value, reg);
132 break;
133 case 32:
134 ctrl_outl((ctrl_inl(reg) & mask) | value, reg);
135 break;
136 }
137}
138
139static int setup_data_reg(struct pinmux_info *gpioc, unsigned gpio)
140{
141 struct pinmux_gpio *gpiop = &gpioc->gpios[gpio];
142 struct pinmux_data_reg *data_reg;
143 int k, n;
144
145 if (!enum_in_range(gpiop->enum_id, &gpioc->data))
146 return -1;
147
148 k = 0;
149 while (1) {
150 data_reg = gpioc->data_regs + k;
151
152 if (!data_reg->reg_width)
153 break;
154
155 for (n = 0; n < data_reg->reg_width; n++) {
156 if (data_reg->enum_ids[n] == gpiop->enum_id) {
157 gpiop->flags &= ~PINMUX_FLAG_DREG;
158 gpiop->flags |= (k << PINMUX_FLAG_DREG_SHIFT);
159 gpiop->flags &= ~PINMUX_FLAG_DBIT;
160 gpiop->flags |= (n << PINMUX_FLAG_DBIT_SHIFT);
161 return 0;
162 }
163 }
164 k++;
165 }
166
167 BUG();
168
169 return -1;
170}
171
172static void setup_data_regs(struct pinmux_info *gpioc)
173{
174 struct pinmux_data_reg *drp;
175 int k;
176
177 for (k = gpioc->first_gpio; k <= gpioc->last_gpio; k++)
178 setup_data_reg(gpioc, k);
179
180 k = 0;
181 while (1) {
182 drp = gpioc->data_regs + k;
183
184 if (!drp->reg_width)
185 break;
186
187 drp->reg_shadow = gpio_read_raw_reg(drp->reg, drp->reg_width);
188 k++;
189 }
190}
191
192static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio,
193 struct pinmux_data_reg **drp, int *bitp)
194{
195 struct pinmux_gpio *gpiop = &gpioc->gpios[gpio];
196 int k, n;
197
198 if (!enum_in_range(gpiop->enum_id, &gpioc->data))
199 return -1;
200
201 k = (gpiop->flags & PINMUX_FLAG_DREG) >> PINMUX_FLAG_DREG_SHIFT;
202 n = (gpiop->flags & PINMUX_FLAG_DBIT) >> PINMUX_FLAG_DBIT_SHIFT;
203 *drp = gpioc->data_regs + k;
204 *bitp = n;
205 return 0;
206}
207
208static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id,
209 struct pinmux_cfg_reg **crp, int *indexp,
210 unsigned long **cntp)
211{
212 struct pinmux_cfg_reg *config_reg;
213 unsigned long r_width, f_width;
214 int k, n;
215
216 k = 0;
217 while (1) {
218 config_reg = gpioc->cfg_regs + k;
219
220 r_width = config_reg->reg_width;
221 f_width = config_reg->field_width;
222
223 if (!r_width)
224 break;
225 for (n = 0; n < (r_width / f_width) * 1 << f_width; n++) {
226 if (config_reg->enum_ids[n] == enum_id) {
227 *crp = config_reg;
228 *indexp = n;
229 *cntp = &config_reg->cnt[n / (1 << f_width)];
230 return 0;
231 }
232 }
233 k++;
234 }
235
236 return -1;
237}
238
239static int get_gpio_enum_id(struct pinmux_info *gpioc, unsigned gpio,
240 int pos, pinmux_enum_t *enum_idp)
241{
242 pinmux_enum_t enum_id = gpioc->gpios[gpio].enum_id;
243 pinmux_enum_t *data = gpioc->gpio_data;
244 int k;
245
246 if (!enum_in_range(enum_id, &gpioc->data)) {
247 if (!enum_in_range(enum_id, &gpioc->mark)) {
248 pr_err("non data/mark enum_id for gpio %d\n", gpio);
249 return -1;
250 }
251 }
252
253 if (pos) {
254 *enum_idp = data[pos + 1];
255 return pos + 1;
256 }
257
258 for (k = 0; k < gpioc->gpio_data_size; k++) {
259 if (data[k] == enum_id) {
260 *enum_idp = data[k + 1];
261 return k + 1;
262 }
263 }
264
265 pr_err("cannot locate data/mark enum_id for gpio %d\n", gpio);
266 return -1;
267}
268
269static void write_config_reg(struct pinmux_info *gpioc,
270 struct pinmux_cfg_reg *crp,
271 int index)
272{
273 unsigned long ncomb, pos, value;
274
275 ncomb = 1 << crp->field_width;
276 pos = index / ncomb;
277 value = index % ncomb;
278
279 gpio_write_reg(crp->reg, crp->reg_width, crp->field_width, pos, value);
280}
281
282static int check_config_reg(struct pinmux_info *gpioc,
283 struct pinmux_cfg_reg *crp,
284 int index)
285{
286 unsigned long ncomb, pos, value;
287
288 ncomb = 1 << crp->field_width;
289 pos = index / ncomb;
290 value = index % ncomb;
291
292 if (gpio_read_reg(crp->reg, crp->reg_width,
293 crp->field_width, pos) == value)
294 return 0;
295
296 return -1;
297}
298
299enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
300
301static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
302 int pinmux_type, int cfg_mode)
303{
304 struct pinmux_cfg_reg *cr = NULL;
305 pinmux_enum_t enum_id;
306 struct pinmux_range *range;
307 int in_range, pos, index;
308 unsigned long *cntp;
309
310 switch (pinmux_type) {
311
312 case PINMUX_TYPE_FUNCTION:
313 range = NULL;
314 break;
315
316 case PINMUX_TYPE_OUTPUT:
317 range = &gpioc->output;
318 break;
319
320 case PINMUX_TYPE_INPUT:
321 range = &gpioc->input;
322 break;
323
324 case PINMUX_TYPE_INPUT_PULLUP:
325 range = &gpioc->input_pu;
326 break;
327
328 case PINMUX_TYPE_INPUT_PULLDOWN:
329 range = &gpioc->input_pd;
330 break;
331
332 default:
333 goto out_err;
334 }
335
336 pos = 0;
337 enum_id = 0;
338 index = 0;
339 while (1) {
340 pos = get_gpio_enum_id(gpioc, gpio, pos, &enum_id);
341 if (pos <= 0)
342 goto out_err;
343
344 if (!enum_id)
345 break;
346
347 in_range = enum_in_range(enum_id, &gpioc->function);
348 if (!in_range && range) {
349 in_range = enum_in_range(enum_id, range);
350
351 if (in_range && enum_id == range->force)
352 continue;
353 }
354
355 if (!in_range)
356 continue;
357
358 if (get_config_reg(gpioc, enum_id, &cr, &index, &cntp) != 0)
359 goto out_err;
360
361 switch (cfg_mode) {
362 case GPIO_CFG_DRYRUN:
363 if (!*cntp || !check_config_reg(gpioc, cr, index))
364 continue;
365 break;
366
367 case GPIO_CFG_REQ:
368 write_config_reg(gpioc, cr, index);
369 *cntp = *cntp + 1;
370 break;
371
372 case GPIO_CFG_FREE:
373 *cntp = *cntp - 1;
374 break;
375 }
376 }
377
378 return 0;
379 out_err:
380 return -1;
381}
382
383static DEFINE_SPINLOCK(gpio_lock);
384
385static struct pinmux_info *chip_to_pinmux(struct gpio_chip *chip)
386{
387 return container_of(chip, struct pinmux_info, chip);
388}
389
390static int sh_gpio_request(struct gpio_chip *chip, unsigned offset)
391{
392 struct pinmux_info *gpioc = chip_to_pinmux(chip);
393 struct pinmux_data_reg *dummy;
394 unsigned long flags;
395 int i, ret, pinmux_type;
396
397 ret = -EINVAL;
398
399 if (!gpioc)
400 goto err_out;
401
402 spin_lock_irqsave(&gpio_lock, flags);
403
404 if ((gpioc->gpios[offset].flags & PINMUX_FLAG_TYPE) != PINMUX_TYPE_NONE)
405 goto err_unlock;
406
407 /* setup pin function here if no data is associated with pin */
408
409 if (get_data_reg(gpioc, offset, &dummy, &i) != 0)
410 pinmux_type = PINMUX_TYPE_FUNCTION;
411 else
412 pinmux_type = PINMUX_TYPE_GPIO;
413
414 if (pinmux_type == PINMUX_TYPE_FUNCTION) {
415 if (pinmux_config_gpio(gpioc, offset,
416 pinmux_type,
417 GPIO_CFG_DRYRUN) != 0)
418 goto err_unlock;
419
420 if (pinmux_config_gpio(gpioc, offset,
421 pinmux_type,
422 GPIO_CFG_REQ) != 0)
423 BUG();
424 }
425
426 gpioc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
427 gpioc->gpios[offset].flags |= pinmux_type;
428
429 ret = 0;
430 err_unlock:
431 spin_unlock_irqrestore(&gpio_lock, flags);
432 err_out:
433 return ret;
434}
435
436static void sh_gpio_free(struct gpio_chip *chip, unsigned offset)
437{
438 struct pinmux_info *gpioc = chip_to_pinmux(chip);
439 unsigned long flags;
440 int pinmux_type;
441
442 if (!gpioc)
443 return;
444
445 spin_lock_irqsave(&gpio_lock, flags);
446
447 pinmux_type = gpioc->gpios[offset].flags & PINMUX_FLAG_TYPE;
448 pinmux_config_gpio(gpioc, offset, pinmux_type, GPIO_CFG_FREE);
449 gpioc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
450 gpioc->gpios[offset].flags |= PINMUX_TYPE_NONE;
451
452 spin_unlock_irqrestore(&gpio_lock, flags);
453}
454
455static int pinmux_direction(struct pinmux_info *gpioc,
456 unsigned gpio, int new_pinmux_type)
457{
458 int pinmux_type;
459 int ret = -EINVAL;
460
461 if (!gpioc)
462 goto err_out;
463
464 pinmux_type = gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE;
465
466 switch (pinmux_type) {
467 case PINMUX_TYPE_GPIO:
468 break;
469 case PINMUX_TYPE_OUTPUT:
470 case PINMUX_TYPE_INPUT:
471 case PINMUX_TYPE_INPUT_PULLUP:
472 case PINMUX_TYPE_INPUT_PULLDOWN:
473 pinmux_config_gpio(gpioc, gpio, pinmux_type, GPIO_CFG_FREE);
474 break;
475 default:
476 goto err_out;
477 }
478
479 if (pinmux_config_gpio(gpioc, gpio,
480 new_pinmux_type,
481 GPIO_CFG_DRYRUN) != 0)
482 goto err_out;
483
484 if (pinmux_config_gpio(gpioc, gpio,
485 new_pinmux_type,
486 GPIO_CFG_REQ) != 0)
487 BUG();
488
489 gpioc->gpios[gpio].flags &= ~PINMUX_FLAG_TYPE;
490 gpioc->gpios[gpio].flags |= new_pinmux_type;
491
492 ret = 0;
493 err_out:
494 return ret;
495}
496
497static int sh_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
498{
499 struct pinmux_info *gpioc = chip_to_pinmux(chip);
500 unsigned long flags;
501 int ret;
502
503 spin_lock_irqsave(&gpio_lock, flags);
504 ret = pinmux_direction(gpioc, offset, PINMUX_TYPE_INPUT);
505 spin_unlock_irqrestore(&gpio_lock, flags);
506
507 return ret;
508}
509
510static void sh_gpio_set_value(struct pinmux_info *gpioc,
511 unsigned gpio, int value)
512{
513 struct pinmux_data_reg *dr = NULL;
514 int bit = 0;
515
516 if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0)
517 BUG();
518 else
519 gpio_write_bit(dr, bit, value);
520}
521
522static int sh_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
523 int value)
524{
525 struct pinmux_info *gpioc = chip_to_pinmux(chip);
526 unsigned long flags;
527 int ret;
528
529 sh_gpio_set_value(gpioc, offset, value);
530 spin_lock_irqsave(&gpio_lock, flags);
531 ret = pinmux_direction(gpioc, offset, PINMUX_TYPE_OUTPUT);
532 spin_unlock_irqrestore(&gpio_lock, flags);
533
534 return ret;
535}
536
537static int sh_gpio_get_value(struct pinmux_info *gpioc, unsigned gpio)
538{
539 struct pinmux_data_reg *dr = NULL;
540 int bit = 0;
541
542 if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0) {
543 BUG();
544 return 0;
545 }
546
547 return gpio_read_reg(dr->reg, dr->reg_width, 1, bit);
548}
549
550static int sh_gpio_get(struct gpio_chip *chip, unsigned offset)
551{
552 return sh_gpio_get_value(chip_to_pinmux(chip), offset);
553}
554
555static void sh_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
556{
557 sh_gpio_set_value(chip_to_pinmux(chip), offset, value);
558}
559
560int register_pinmux(struct pinmux_info *pip)
561{
562 struct gpio_chip *chip = &pip->chip;
563
564 pr_info("sh pinmux: %s handling gpio %d -> %d\n",
565 pip->name, pip->first_gpio, pip->last_gpio);
566
567 setup_data_regs(pip);
568
569 chip->request = sh_gpio_request;
570 chip->free = sh_gpio_free;
571 chip->direction_input = sh_gpio_direction_input;
572 chip->get = sh_gpio_get;
573 chip->direction_output = sh_gpio_direction_output;
574 chip->set = sh_gpio_set;
575
576 WARN_ON(pip->first_gpio != 0); /* needs testing */
577
578 chip->label = pip->name;
579 chip->owner = THIS_MODULE;
580 chip->base = pip->first_gpio;
581 chip->ngpio = (pip->last_gpio - pip->first_gpio) + 1;
582
583 return gpiochip_add(chip);
584}
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index a78be74b8d3e..7db248936b60 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -3,6 +3,7 @@
3 * arch/sh/kernel/head.S 3 * arch/sh/kernel/head.S
4 * 4 *
5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
6 * Copyright (C) 2010 Matt Fleming
6 * 7 *
7 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
@@ -13,6 +14,8 @@
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/linkage.h> 15#include <linux/linkage.h>
15#include <asm/thread_info.h> 16#include <asm/thread_info.h>
17#include <asm/mmu.h>
18#include <cpu/mmu_context.h>
16 19
17#ifdef CONFIG_CPU_SH4A 20#ifdef CONFIG_CPU_SH4A
18#define SYNCO() synco 21#define SYNCO() synco
@@ -82,6 +85,211 @@ ENTRY(_stext)
82 ldc r0, r7_bank ! ... and initial thread_info 85 ldc r0, r7_bank ! ... and initial thread_info
83#endif 86#endif
84 87
88#ifdef CONFIG_PMB
89/*
90 * Reconfigure the initial PMB mappings setup by the hardware.
91 *
92 * When we boot in 32-bit MMU mode there are 2 PMB entries already
93 * setup for us.
94 *
95 * Entry VPN PPN V SZ C UB WT
96 * ---------------------------------------------------------------
97 * 0 0x80000000 0x00000000 1 512MB 1 0 1
98 * 1 0xA0000000 0x00000000 1 512MB 0 0 0
99 *
100 * But we reprogram them here because we want complete control over
101 * our address space and the initial mappings may not map PAGE_OFFSET
102 * to __MEMORY_START (or even map all of our RAM).
103 *
104 * Once we've setup cached and uncached mappings we clear the rest of the
105 * PMB entries. This clearing also deals with the fact that PMB entries
106 * can persist across reboots. The PMB could have been left in any state
107 * when the reboot occurred, so to be safe we clear all entries and start
108 * with with a clean slate.
109 *
110 * The uncached mapping is constructed using the smallest possible
111 * mapping with a single unbufferable page. Only the kernel text needs to
112 * be covered via the uncached mapping so that certain functions can be
113 * run uncached.
114 *
115 * Drivers and the like that have previously abused the 1:1 identity
116 * mapping are unsupported in 32-bit mode and must specify their caching
117 * preference when page tables are constructed.
118 *
119 * This frees up the P2 space for more nefarious purposes.
120 *
121 * Register utilization is as follows:
122 *
123 * r0 = PMB_DATA data field
124 * r1 = PMB_DATA address field
125 * r2 = PMB_ADDR data field
126 * r3 = PMB_ADDR address field
127 * r4 = PMB_E_SHIFT
128 * r5 = remaining amount of RAM to map
129 * r6 = PMB mapping size we're trying to use
130 * r7 = cached_to_uncached
131 * r8 = scratch register
132 * r9 = scratch register
133 * r10 = number of PMB entries we've setup
134 * r11 = scratch register
135 */
136
137 mov.l .LMMUCR, r1 /* Flush the TLB */
138 mov.l @r1, r0
139 or #MMUCR_TI, r0
140 mov.l r0, @r1
141
142 mov.l .LMEMORY_SIZE, r5
143
144 mov #PMB_E_SHIFT, r0
145 mov #0x1, r4
146 shld r0, r4
147
148 mov.l .LFIRST_DATA_ENTRY, r0
149 mov.l .LPMB_DATA, r1
150 mov.l .LFIRST_ADDR_ENTRY, r2
151 mov.l .LPMB_ADDR, r3
152
153 /*
154 * First we need to walk the PMB and figure out if there are any
155 * existing mappings that match the initial mappings VPN/PPN.
156 * If these have already been established by the bootloader, we
157 * don't bother setting up new entries here, and let the late PMB
158 * initialization take care of things instead.
159 *
160 * Note that we may need to coalesce and merge entries in order
161 * to reclaim more available PMB slots, which is much more than
162 * we want to do at this early stage.
163 */
164 mov #0, r10
165 mov #NR_PMB_ENTRIES, r9
166
167 mov r1, r7 /* temporary PMB_DATA iter */
168
169.Lvalidate_existing_mappings:
170
171 mov.l .LPMB_DATA_MASK, r11
172 mov.l @r7, r8
173 and r11, r8
174 cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
175 bt .Lpmb_done
176
177 add #1, r10 /* Increment the loop counter */
178 cmp/eq r9, r10
179 bf/s .Lvalidate_existing_mappings
180 add r4, r7 /* Increment to the next PMB_DATA entry */
181
182 /*
183 * If we've fallen through, continue with setting up the initial
184 * mappings.
185 */
186
187 mov r5, r7 /* cached_to_uncached */
188 mov #0, r10
189
190#ifdef CONFIG_UNCACHED_MAPPING
191 /*
192 * Uncached mapping
193 */
194 mov #(PMB_SZ_16M >> 2), r9
195 shll2 r9
196
197 mov #(PMB_UB >> 8), r8
198 shll8 r8
199
200 or r0, r8
201 or r9, r8
202 mov.l r8, @r1
203 mov r2, r8
204 add r7, r8
205 mov.l r8, @r3
206
207 add r4, r1
208 add r4, r3
209 add #1, r10
210#endif
211
212/*
213 * Iterate over all of the available sizes from largest to
214 * smallest for constructing the cached mapping.
215 */
216#define __PMB_ITER_BY_SIZE(size) \
217.L##size: \
218 mov #(size >> 4), r6; \
219 shll16 r6; \
220 shll8 r6; \
221 \
222 cmp/hi r5, r6; \
223 bt 9999f; \
224 \
225 mov #(PMB_SZ_##size##M >> 2), r9; \
226 shll2 r9; \
227 \
228 /* \
229 * Cached mapping \
230 */ \
231 mov #PMB_C, r8; \
232 or r0, r8; \
233 or r9, r8; \
234 mov.l r8, @r1; \
235 mov.l r2, @r3; \
236 \
237 /* Increment to the next PMB_DATA entry */ \
238 add r4, r1; \
239 /* Increment to the next PMB_ADDR entry */ \
240 add r4, r3; \
241 /* Increment number of PMB entries */ \
242 add #1, r10; \
243 \
244 sub r6, r5; \
245 add r6, r0; \
246 add r6, r2; \
247 \
248 bra .L##size; \
2499999:
250
251 __PMB_ITER_BY_SIZE(512)
252 __PMB_ITER_BY_SIZE(128)
253 __PMB_ITER_BY_SIZE(64)
254 __PMB_ITER_BY_SIZE(16)
255
256#ifdef CONFIG_UNCACHED_MAPPING
257 /*
258 * Now that we can access it, update cached_to_uncached and
259 * uncached_size.
260 */
261 mov.l .Lcached_to_uncached, r0
262 mov.l r7, @r0
263
264 mov.l .Luncached_size, r0
265 mov #1, r7
266 shll16 r7
267 shll8 r7
268 mov.l r7, @r0
269#endif
270
271 /*
272 * Clear the remaining PMB entries.
273 *
274 * r3 = entry to begin clearing from
275 * r10 = number of entries we've setup so far
276 */
277 mov #0, r1
278 mov #NR_PMB_ENTRIES, r0
279
280.Lagain:
281 mov.l r1, @r3 /* Clear PMB_ADDR entry */
282 add #1, r10 /* Increment the loop counter */
283 cmp/eq r0, r10
284 bf/s .Lagain
285 add r4, r3 /* Increment to the next PMB_ADDR entry */
286
287 mov.l 6f, r0
288 icbi @r0
289
290.Lpmb_done:
291#endif /* CONFIG_PMB */
292
85#ifndef CONFIG_SH_NO_BSS_INIT 293#ifndef CONFIG_SH_NO_BSS_INIT
86 /* 294 /*
87 * Don't clear BSS if running on slow platforms such as an RTL simulation, 295 * Don't clear BSS if running on slow platforms such as an RTL simulation,
@@ -122,12 +330,26 @@ ENTRY(_stext)
122#if defined(CONFIG_CPU_SH2) 330#if defined(CONFIG_CPU_SH2)
1231: .long 0x000000F0 ! IMASK=0xF 3311: .long 0x000000F0 ! IMASK=0xF
124#else 332#else
1251: .long 0x400080F0 ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF 3331: .long 0x500080F0 ! MD=1, RB=0, BL=1, FD=1, IMASK=0xF
126#endif 334#endif
127ENTRY(stack_start) 335ENTRY(stack_start)
1282: .long init_thread_union+THREAD_SIZE 3362: .long init_thread_union+THREAD_SIZE
1293: .long __bss_start 3373: .long __bss_start
1304: .long _end 3384: .long _end
1315: .long start_kernel 3395: .long start_kernel
1326: .long sh_cpu_init 3406: .long cpu_init
1337: .long init_thread_union 3417: .long init_thread_union
342
343#ifdef CONFIG_PMB
344.LPMB_ADDR: .long PMB_ADDR
345.LPMB_DATA: .long PMB_DATA
346.LPMB_DATA_MASK: .long PMB_PFN_MASK | PMB_V
347.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
348.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
349.LMMUCR: .long MMUCR
350.LMEMORY_SIZE: .long __MEMORY_SIZE
351#ifdef CONFIG_UNCACHED_MAPPING
352.Lcached_to_uncached: .long cached_to_uncached
353.Luncached_size: .long uncached_size
354#endif
355#endif
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
index 3ea765844c74..defd851abefa 100644
--- a/arch/sh/kernel/head_64.S
+++ b/arch/sh/kernel/head_64.S
@@ -220,7 +220,6 @@ clear_DTLB:
220 add.l r22, r63, r22 /* Sign extend */ 220 add.l r22, r63, r22 /* Sign extend */
221 putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */ 221 putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
222 222
223#ifdef CONFIG_EARLY_PRINTK
224 /* 223 /*
225 * Setup a DTLB translation for SCIF phys. 224 * Setup a DTLB translation for SCIF phys.
226 */ 225 */
@@ -231,7 +230,6 @@ clear_DTLB:
231 movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */ 230 movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
232 shori 0x0003, r22 231 shori 0x0003, r22
233 putcfg r21, 0, r22 /* PTEH last */ 232 putcfg r21, 0, r22 /* PTEH last */
234#endif
235 233
236 /* 234 /*
237 * Set cache behaviours. 235 * Set cache behaviours.
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
new file mode 100644
index 000000000000..efae6ab3d54c
--- /dev/null
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -0,0 +1,420 @@
1/*
2 * arch/sh/kernel/hw_breakpoint.c
3 *
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
5 *
6 * Copyright (C) 2009 - 2010 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/perf_event.h>
14#include <linux/hw_breakpoint.h>
15#include <linux/percpu.h>
16#include <linux/kallsyms.h>
17#include <linux/notifier.h>
18#include <linux/kprobes.h>
19#include <linux/kdebug.h>
20#include <linux/io.h>
21#include <linux/clk.h>
22#include <asm/hw_breakpoint.h>
23#include <asm/mmu_context.h>
24#include <asm/ptrace.h>
25
26/*
27 * Stores the breakpoints currently in use on each breakpoint address
28 * register for each cpus
29 */
30static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
31
32/*
33 * A dummy placeholder for early accesses until the CPUs get a chance to
34 * register their UBCs later in the boot process.
35 */
36static struct sh_ubc ubc_dummy = { .num_events = 0 };
37
38static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
39
40/*
41 * Install a perf counter breakpoint.
42 *
43 * We seek a free UBC channel and use it for this breakpoint.
44 *
45 * Atomic: we hold the counter->ctx->lock and we only handle variables
46 * and registers local to this cpu.
47 */
48int arch_install_hw_breakpoint(struct perf_event *bp)
49{
50 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
51 int i;
52
53 for (i = 0; i < sh_ubc->num_events; i++) {
54 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
55
56 if (!*slot) {
57 *slot = bp;
58 break;
59 }
60 }
61
62 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
63 return -EBUSY;
64
65 clk_enable(sh_ubc->clk);
66 sh_ubc->enable(info, i);
67
68 return 0;
69}
70
71/*
72 * Uninstall the breakpoint contained in the given counter.
73 *
74 * First we search the debug address register it uses and then we disable
75 * it.
76 *
77 * Atomic: we hold the counter->ctx->lock and we only handle variables
78 * and registers local to this cpu.
79 */
80void arch_uninstall_hw_breakpoint(struct perf_event *bp)
81{
82 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
83 int i;
84
85 for (i = 0; i < sh_ubc->num_events; i++) {
86 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
87
88 if (*slot == bp) {
89 *slot = NULL;
90 break;
91 }
92 }
93
94 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
95 return;
96
97 sh_ubc->disable(info, i);
98 clk_disable(sh_ubc->clk);
99}
100
101static int get_hbp_len(u16 hbp_len)
102{
103 unsigned int len_in_bytes = 0;
104
105 switch (hbp_len) {
106 case SH_BREAKPOINT_LEN_1:
107 len_in_bytes = 1;
108 break;
109 case SH_BREAKPOINT_LEN_2:
110 len_in_bytes = 2;
111 break;
112 case SH_BREAKPOINT_LEN_4:
113 len_in_bytes = 4;
114 break;
115 case SH_BREAKPOINT_LEN_8:
116 len_in_bytes = 8;
117 break;
118 }
119 return len_in_bytes;
120}
121
122/*
123 * Check for virtual address in kernel space.
124 */
125int arch_check_bp_in_kernelspace(struct perf_event *bp)
126{
127 unsigned int len;
128 unsigned long va;
129 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
130
131 va = info->address;
132 len = get_hbp_len(info->len);
133
134 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
135}
136
137int arch_bp_generic_fields(int sh_len, int sh_type,
138 int *gen_len, int *gen_type)
139{
140 /* Len */
141 switch (sh_len) {
142 case SH_BREAKPOINT_LEN_1:
143 *gen_len = HW_BREAKPOINT_LEN_1;
144 break;
145 case SH_BREAKPOINT_LEN_2:
146 *gen_len = HW_BREAKPOINT_LEN_2;
147 break;
148 case SH_BREAKPOINT_LEN_4:
149 *gen_len = HW_BREAKPOINT_LEN_4;
150 break;
151 case SH_BREAKPOINT_LEN_8:
152 *gen_len = HW_BREAKPOINT_LEN_8;
153 break;
154 default:
155 return -EINVAL;
156 }
157
158 /* Type */
159 switch (sh_type) {
160 case SH_BREAKPOINT_READ:
161 *gen_type = HW_BREAKPOINT_R;
162 case SH_BREAKPOINT_WRITE:
163 *gen_type = HW_BREAKPOINT_W;
164 break;
165 case SH_BREAKPOINT_RW:
166 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
167 break;
168 default:
169 return -EINVAL;
170 }
171
172 return 0;
173}
174
175static int arch_build_bp_info(struct perf_event *bp)
176{
177 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
178
179 info->address = bp->attr.bp_addr;
180
181 /* Len */
182 switch (bp->attr.bp_len) {
183 case HW_BREAKPOINT_LEN_1:
184 info->len = SH_BREAKPOINT_LEN_1;
185 break;
186 case HW_BREAKPOINT_LEN_2:
187 info->len = SH_BREAKPOINT_LEN_2;
188 break;
189 case HW_BREAKPOINT_LEN_4:
190 info->len = SH_BREAKPOINT_LEN_4;
191 break;
192 case HW_BREAKPOINT_LEN_8:
193 info->len = SH_BREAKPOINT_LEN_8;
194 break;
195 default:
196 return -EINVAL;
197 }
198
199 /* Type */
200 switch (bp->attr.bp_type) {
201 case HW_BREAKPOINT_R:
202 info->type = SH_BREAKPOINT_READ;
203 break;
204 case HW_BREAKPOINT_W:
205 info->type = SH_BREAKPOINT_WRITE;
206 break;
207 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
208 info->type = SH_BREAKPOINT_RW;
209 break;
210 default:
211 return -EINVAL;
212 }
213
214 return 0;
215}
216
217/*
218 * Validate the arch-specific HW Breakpoint register settings
219 */
220int arch_validate_hwbkpt_settings(struct perf_event *bp)
221{
222 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
223 unsigned int align;
224 int ret;
225
226 ret = arch_build_bp_info(bp);
227 if (ret)
228 return ret;
229
230 ret = -EINVAL;
231
232 switch (info->len) {
233 case SH_BREAKPOINT_LEN_1:
234 align = 0;
235 break;
236 case SH_BREAKPOINT_LEN_2:
237 align = 1;
238 break;
239 case SH_BREAKPOINT_LEN_4:
240 align = 3;
241 break;
242 case SH_BREAKPOINT_LEN_8:
243 align = 7;
244 break;
245 default:
246 return ret;
247 }
248
249 /*
250 * For kernel-addresses, either the address or symbol name can be
251 * specified.
252 */
253 if (info->name)
254 info->address = (unsigned long)kallsyms_lookup_name(info->name);
255
256 /*
257 * Check that the low-order bits of the address are appropriate
258 * for the alignment implied by len.
259 */
260 if (info->address & align)
261 return -EINVAL;
262
263 return 0;
264}
265
266/*
267 * Release the user breakpoints used by ptrace
268 */
269void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
270{
271 int i;
272 struct thread_struct *t = &tsk->thread;
273
274 for (i = 0; i < sh_ubc->num_events; i++) {
275 unregister_hw_breakpoint(t->ptrace_bps[i]);
276 t->ptrace_bps[i] = NULL;
277 }
278}
279
280static int __kprobes hw_breakpoint_handler(struct die_args *args)
281{
282 int cpu, i, rc = NOTIFY_STOP;
283 struct perf_event *bp;
284 unsigned int cmf, resume_mask;
285
286 /*
287 * Do an early return if none of the channels triggered.
288 */
289 cmf = sh_ubc->triggered_mask();
290 if (unlikely(!cmf))
291 return NOTIFY_DONE;
292
293 /*
294 * By default, resume all of the active channels.
295 */
296 resume_mask = sh_ubc->active_mask();
297
298 /*
299 * Disable breakpoints during exception handling.
300 */
301 sh_ubc->disable_all();
302
303 cpu = get_cpu();
304 for (i = 0; i < sh_ubc->num_events; i++) {
305 unsigned long event_mask = (1 << i);
306
307 if (likely(!(cmf & event_mask)))
308 continue;
309
310 /*
311 * The counter may be concurrently released but that can only
312 * occur from a call_rcu() path. We can then safely fetch
313 * the breakpoint, use its callback, touch its counter
314 * while we are in an rcu_read_lock() path.
315 */
316 rcu_read_lock();
317
318 bp = per_cpu(bp_per_reg[i], cpu);
319 if (bp)
320 rc = NOTIFY_DONE;
321
322 /*
323 * Reset the condition match flag to denote completion of
324 * exception handling.
325 */
326 sh_ubc->clear_triggered_mask(event_mask);
327
328 /*
329 * bp can be NULL due to concurrent perf counter
330 * removing.
331 */
332 if (!bp) {
333 rcu_read_unlock();
334 break;
335 }
336
337 /*
338 * Don't restore the channel if the breakpoint is from
339 * ptrace, as it always operates in one-shot mode.
340 */
341 if (bp->overflow_handler == ptrace_triggered)
342 resume_mask &= ~(1 << i);
343
344 perf_bp_event(bp, args->regs);
345
346 /* Deliver the signal to userspace */
347 if (!arch_check_bp_in_kernelspace(bp)) {
348 siginfo_t info;
349
350 info.si_signo = args->signr;
351 info.si_errno = notifier_to_errno(rc);
352 info.si_code = TRAP_HWBKPT;
353
354 force_sig_info(args->signr, &info, current);
355 }
356
357 rcu_read_unlock();
358 }
359
360 if (cmf == 0)
361 rc = NOTIFY_DONE;
362
363 sh_ubc->enable_all(resume_mask);
364
365 put_cpu();
366
367 return rc;
368}
369
370BUILD_TRAP_HANDLER(breakpoint)
371{
372 unsigned long ex = lookup_exception_vector();
373 TRAP_HANDLER_DECL;
374
375 notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
376}
377
378/*
379 * Handle debug exception notifications.
380 */
381int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
382 unsigned long val, void *data)
383{
384 struct die_args *args = data;
385
386 if (val != DIE_BREAKPOINT)
387 return NOTIFY_DONE;
388
389 /*
390 * If the breakpoint hasn't been triggered by the UBC, it's
391 * probably from a debugger, so don't do anything more here.
392 *
393 * This also permits the UBC interface clock to remain off for
394 * non-UBC breakpoints, as we don't need to check the triggered
395 * or active channel masks.
396 */
397 if (args->trapnr != sh_ubc->trap_nr)
398 return NOTIFY_DONE;
399
400 return hw_breakpoint_handler(data);
401}
402
403void hw_breakpoint_pmu_read(struct perf_event *bp)
404{
405 /* TODO */
406}
407
408int register_sh_ubc(struct sh_ubc *ubc)
409{
410 /* Bail if it's already assigned */
411 if (sh_ubc != &ubc_dummy)
412 return -EBUSY;
413 sh_ubc = ubc;
414
415 pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
416
417 WARN_ON(ubc->num_events > HBP_NUM);
418
419 return 0;
420}
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 27ff2dc093c7..425d604e3a28 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -19,11 +19,11 @@
19#include <asm/pgalloc.h> 19#include <asm/pgalloc.h>
20#include <asm/system.h> 20#include <asm/system.h>
21#include <asm/atomic.h> 21#include <asm/atomic.h>
22#include <asm/smp.h>
23
24void (*pm_idle)(void) = NULL;
22 25
23static int hlt_counter; 26static int hlt_counter;
24void (*pm_idle)(void);
25void (*pm_power_off)(void);
26EXPORT_SYMBOL(pm_power_off);
27 27
28static int __init nohlt_setup(char *__unused) 28static int __init nohlt_setup(char *__unused)
29{ 29{
@@ -39,52 +39,110 @@ static int __init hlt_setup(char *__unused)
39} 39}
40__setup("hlt", hlt_setup); 40__setup("hlt", hlt_setup);
41 41
42static inline int hlt_works(void)
43{
44 return !hlt_counter;
45}
46
47/*
48 * On SMP it's slightly faster (but much more power-consuming!)
49 * to poll the ->work.need_resched flag instead of waiting for the
50 * cross-CPU IPI to arrive. Use this option with caution.
51 */
52static void poll_idle(void)
53{
54 local_irq_enable();
55 while (!need_resched())
56 cpu_relax();
57}
58
42void default_idle(void) 59void default_idle(void)
43{ 60{
44 if (!hlt_counter) { 61 if (hlt_works()) {
45 clear_thread_flag(TIF_POLLING_NRFLAG); 62 clear_thread_flag(TIF_POLLING_NRFLAG);
46 smp_mb__after_clear_bit(); 63 smp_mb__after_clear_bit();
47 set_bl_bit();
48 stop_critical_timings();
49 64
50 while (!need_resched()) 65 set_bl_bit();
66 if (!need_resched()) {
67 local_irq_enable();
51 cpu_sleep(); 68 cpu_sleep();
69 } else
70 local_irq_enable();
52 71
53 start_critical_timings();
54 clear_bl_bit();
55 set_thread_flag(TIF_POLLING_NRFLAG); 72 set_thread_flag(TIF_POLLING_NRFLAG);
73 clear_bl_bit();
56 } else 74 } else
57 while (!need_resched()) 75 poll_idle();
58 cpu_relax();
59} 76}
60 77
78/*
79 * The idle thread. There's no useful work to be done, so just try to conserve
80 * power and have a low exit latency (ie sit in a loop waiting for somebody to
81 * say that they'd like to reschedule)
82 */
61void cpu_idle(void) 83void cpu_idle(void)
62{ 84{
85 unsigned int cpu = smp_processor_id();
86
63 set_thread_flag(TIF_POLLING_NRFLAG); 87 set_thread_flag(TIF_POLLING_NRFLAG);
64 88
65 /* endless idle loop with no priority at all */ 89 /* endless idle loop with no priority at all */
66 while (1) { 90 while (1) {
67 void (*idle)(void) = pm_idle; 91 tick_nohz_stop_sched_tick(1);
68 92
69 if (!idle) 93 while (!need_resched()) {
70 idle = default_idle; 94 check_pgt_cache();
95 rmb();
71 96
72 tick_nohz_stop_sched_tick(1); 97 if (cpu_is_offline(cpu))
73 while (!need_resched()) 98 play_dead();
74 idle();
75 tick_nohz_restart_sched_tick();
76 99
100 local_irq_disable();
101 /* Don't trace irqs off for idle */
102 stop_critical_timings();
103 pm_idle();
104 /*
105 * Sanity check to ensure that pm_idle() returns
106 * with IRQs enabled
107 */
108 WARN_ON(irqs_disabled());
109 start_critical_timings();
110 }
111
112 tick_nohz_restart_sched_tick();
77 preempt_enable_no_resched(); 113 preempt_enable_no_resched();
78 schedule(); 114 schedule();
79 preempt_disable(); 115 preempt_disable();
80 check_pgt_cache();
81 } 116 }
82} 117}
83 118
119void __init select_idle_routine(void)
120{
121 /*
122 * If a platform has set its own idle routine, leave it alone.
123 */
124 if (pm_idle)
125 return;
126
127 if (hlt_works())
128 pm_idle = default_idle;
129 else
130 pm_idle = poll_idle;
131}
132
84static void do_nothing(void *unused) 133static void do_nothing(void *unused)
85{ 134{
86} 135}
87 136
137void stop_this_cpu(void *unused)
138{
139 local_irq_disable();
140 set_cpu_online(smp_processor_id(), false);
141
142 for (;;)
143 cpu_sleep();
144}
145
88/* 146/*
89 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of 147 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
90 * pm_idle and update to new pm_idle value. Required while changing pm_idle 148 * pm_idle and update to new pm_idle value. Required while changing pm_idle
diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
index 1719957c0a69..11f2ea556a6b 100644
--- a/arch/sh/kernel/init_task.c
+++ b/arch/sh/kernel/init_task.c
@@ -17,9 +17,8 @@ struct pt_regs fake_swapper_regs;
17 * way process stacks are handled. This is done by having a special 17 * way process stacks are handled. This is done by having a special
18 * "init_task" linker map entry.. 18 * "init_task" linker map entry..
19 */ 19 */
20union thread_union init_thread_union 20union thread_union init_thread_union __init_task_data =
21 __attribute__((__section__(".data.init_task"))) = 21 { INIT_THREAD_INFO(init_task) };
22 { INIT_THREAD_INFO(init_task) };
23 22
24/* 23/*
25 * Initial task structure. 24 * Initial task structure.
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index 4f85fffaa557..5c51b794ba2a 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -1,12 +1,9 @@
1/* 1/*
2 * linux/arch/sh/kernel/io.c 2 * arch/sh/kernel/io.c - Machine independent I/O functions.
3 * 3 *
4 * Copyright (C) 2000 Stuart Menefy 4 * Copyright (C) 2000 - 2009 Stuart Menefy
5 * Copyright (C) 2005 Paul Mundt 5 * Copyright (C) 2005 Paul Mundt
6 * 6 *
7 * Provide real functions which expand to whatever the header file defined.
8 * Also definitions of machine independent IO functions.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public 7 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive 8 * License. See the file "COPYING" in the main directory of this archive
12 * for more details. 9 * for more details.
@@ -18,33 +15,87 @@
18 15
19/* 16/*
20 * Copy data from IO memory space to "real" memory space. 17 * Copy data from IO memory space to "real" memory space.
21 * This needs to be optimized.
22 */ 18 */
23void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count) 19void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count)
24{ 20{
25 unsigned char *p = to; 21 /*
26 while (count) { 22 * Would it be worthwhile doing byte and long transfers first
27 count--; 23 * to try and get aligned?
28 *p = readb(from); 24 */
29 p++; 25#ifdef CONFIG_CPU_SH4
30 from++; 26 if ((count >= 0x20) &&
31 } 27 (((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) {
28 int tmp2, tmp3, tmp4, tmp5, tmp6;
29
30 __asm__ __volatile__(
31 "1: \n\t"
32 "mov.l @%7+, r0 \n\t"
33 "mov.l @%7+, %2 \n\t"
34 "movca.l r0, @%0 \n\t"
35 "mov.l @%7+, %3 \n\t"
36 "mov.l @%7+, %4 \n\t"
37 "mov.l @%7+, %5 \n\t"
38 "mov.l @%7+, %6 \n\t"
39 "mov.l @%7+, r7 \n\t"
40 "mov.l @%7+, r0 \n\t"
41 "mov.l %2, @(0x04,%0) \n\t"
42 "mov #0x20, %2 \n\t"
43 "mov.l %3, @(0x08,%0) \n\t"
44 "sub %2, %1 \n\t"
45 "mov.l %4, @(0x0c,%0) \n\t"
46 "cmp/hi %1, %2 ! T if 32 > count \n\t"
47 "mov.l %5, @(0x10,%0) \n\t"
48 "mov.l %6, @(0x14,%0) \n\t"
49 "mov.l r7, @(0x18,%0) \n\t"
50 "mov.l r0, @(0x1c,%0) \n\t"
51 "bf.s 1b \n\t"
52 " add #0x20, %0 \n\t"
53 : "=&r" (to), "=&r" (count),
54 "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
55 "=&r" (tmp5), "=&r" (tmp6), "=&r" (from)
56 : "7"(from), "0" (to), "1" (count)
57 : "r0", "r7", "t", "memory");
58 }
59#endif
60
61 if ((((u32)to | (u32)from) & 0x3) == 0) {
62 for (; count > 3; count -= 4) {
63 *(u32 *)to = *(volatile u32 *)from;
64 to += 4;
65 from += 4;
66 }
67 }
68
69 for (; count > 0; count--) {
70 *(u8 *)to = *(volatile u8 *)from;
71 to++;
72 from++;
73 }
74
75 mb();
32} 76}
33EXPORT_SYMBOL(memcpy_fromio); 77EXPORT_SYMBOL(memcpy_fromio);
34 78
35/* 79/*
36 * Copy data from "real" memory space to IO memory space. 80 * Copy data from "real" memory space to IO memory space.
37 * This needs to be optimized.
38 */ 81 */
39void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count) 82void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count)
40{ 83{
41 const unsigned char *p = from; 84 if ((((u32)to | (u32)from) & 0x3) == 0) {
42 while (count) { 85 for ( ; count > 3; count -= 4) {
43 count--; 86 *(volatile u32 *)to = *(u32 *)from;
44 writeb(*p, to); 87 to += 4;
45 p++; 88 from += 4;
46 to++; 89 }
47 } 90 }
91
92 for (; count > 0; count--) {
93 *(volatile u8 *)to = *(u8 *)from;
94 to++;
95 from++;
96 }
97
98 mb();
48} 99}
49EXPORT_SYMBOL(memcpy_toio); 100EXPORT_SYMBOL(memcpy_toio);
50 101
@@ -61,21 +112,3 @@ void memset_io(volatile void __iomem *dst, int c, unsigned long count)
61 } 112 }
62} 113}
63EXPORT_SYMBOL(memset_io); 114EXPORT_SYMBOL(memset_io);
64
65void __iomem *ioport_map(unsigned long port, unsigned int nr)
66{
67 void __iomem *ret;
68
69 ret = __ioport_map_trapped(port, nr);
70 if (ret)
71 return ret;
72
73 return __ioport_map(port, nr);
74}
75EXPORT_SYMBOL(ioport_map);
76
77void ioport_unmap(void __iomem *addr)
78{
79 sh_mv.mv_ioport_unmap(addr);
80}
81EXPORT_SYMBOL(ioport_unmap);
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
deleted file mode 100644
index 5a7f554d9ca1..000000000000
--- a/arch/sh/kernel/io_generic.c
+++ /dev/null
@@ -1,189 +0,0 @@
1/*
2 * arch/sh/kernel/io_generic.c
3 *
4 * Copyright (C) 2000 Niibe Yutaka
5 * Copyright (C) 2005 - 2007 Paul Mundt
6 *
7 * Generic I/O routine. These can be used where a machine specific version
8 * is not required.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14#include <linux/module.h>
15#include <linux/io.h>
16#include <asm/machvec.h>
17
18#ifdef CONFIG_CPU_SH3
19/* SH3 has a PCMCIA bug that needs a dummy read from area 6 for a
20 * workaround. */
21/* I'm not sure SH7709 has this kind of bug */
22#define dummy_read() __raw_readb(0xba000000)
23#else
24#define dummy_read()
25#endif
26
27unsigned long generic_io_base;
28
29u8 generic_inb(unsigned long port)
30{
31 return __raw_readb(__ioport_map(port, 1));
32}
33
34u16 generic_inw(unsigned long port)
35{
36 return __raw_readw(__ioport_map(port, 2));
37}
38
39u32 generic_inl(unsigned long port)
40{
41 return __raw_readl(__ioport_map(port, 4));
42}
43
44u8 generic_inb_p(unsigned long port)
45{
46 unsigned long v = generic_inb(port);
47
48 ctrl_delay();
49 return v;
50}
51
52u16 generic_inw_p(unsigned long port)
53{
54 unsigned long v = generic_inw(port);
55
56 ctrl_delay();
57 return v;
58}
59
60u32 generic_inl_p(unsigned long port)
61{
62 unsigned long v = generic_inl(port);
63
64 ctrl_delay();
65 return v;
66}
67
68/*
69 * insb/w/l all read a series of bytes/words/longs from a fixed port
70 * address. However as the port address doesn't change we only need to
71 * convert the port address to real address once.
72 */
73
74void generic_insb(unsigned long port, void *dst, unsigned long count)
75{
76 volatile u8 *port_addr;
77 u8 *buf = dst;
78
79 port_addr = (volatile u8 __force *)__ioport_map(port, 1);
80 while (count--)
81 *buf++ = *port_addr;
82}
83
84void generic_insw(unsigned long port, void *dst, unsigned long count)
85{
86 volatile u16 *port_addr;
87 u16 *buf = dst;
88
89 port_addr = (volatile u16 __force *)__ioport_map(port, 2);
90 while (count--)
91 *buf++ = *port_addr;
92
93 dummy_read();
94}
95
96void generic_insl(unsigned long port, void *dst, unsigned long count)
97{
98 volatile u32 *port_addr;
99 u32 *buf = dst;
100
101 port_addr = (volatile u32 __force *)__ioport_map(port, 4);
102 while (count--)
103 *buf++ = *port_addr;
104
105 dummy_read();
106}
107
108void generic_outb(u8 b, unsigned long port)
109{
110 __raw_writeb(b, __ioport_map(port, 1));
111}
112
113void generic_outw(u16 b, unsigned long port)
114{
115 __raw_writew(b, __ioport_map(port, 2));
116}
117
118void generic_outl(u32 b, unsigned long port)
119{
120 __raw_writel(b, __ioport_map(port, 4));
121}
122
123void generic_outb_p(u8 b, unsigned long port)
124{
125 generic_outb(b, port);
126 ctrl_delay();
127}
128
129void generic_outw_p(u16 b, unsigned long port)
130{
131 generic_outw(b, port);
132 ctrl_delay();
133}
134
135void generic_outl_p(u32 b, unsigned long port)
136{
137 generic_outl(b, port);
138 ctrl_delay();
139}
140
141/*
142 * outsb/w/l all write a series of bytes/words/longs to a fixed port
143 * address. However as the port address doesn't change we only need to
144 * convert the port address to real address once.
145 */
146void generic_outsb(unsigned long port, const void *src, unsigned long count)
147{
148 volatile u8 *port_addr;
149 const u8 *buf = src;
150
151 port_addr = (volatile u8 __force *)__ioport_map(port, 1);
152
153 while (count--)
154 *port_addr = *buf++;
155}
156
157void generic_outsw(unsigned long port, const void *src, unsigned long count)
158{
159 volatile u16 *port_addr;
160 const u16 *buf = src;
161
162 port_addr = (volatile u16 __force *)__ioport_map(port, 2);
163
164 while (count--)
165 *port_addr = *buf++;
166
167 dummy_read();
168}
169
170void generic_outsl(unsigned long port, const void *src, unsigned long count)
171{
172 volatile u32 *port_addr;
173 const u32 *buf = src;
174
175 port_addr = (volatile u32 __force *)__ioport_map(port, 4);
176 while (count--)
177 *port_addr = *buf++;
178
179 dummy_read();
180}
181
182void __iomem *generic_ioport_map(unsigned long addr, unsigned int size)
183{
184 return (void __iomem *)(addr + generic_io_base);
185}
186
187void generic_ioport_unmap(void __iomem *addr)
188{
189}
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
index 77dfecb64373..32c385ef1011 100644
--- a/arch/sh/kernel/io_trapped.c
+++ b/arch/sh/kernel/io_trapped.c
@@ -91,10 +91,14 @@ int register_trapped_io(struct trapped_io *tiop)
91 tiop->magic = IO_TRAPPED_MAGIC; 91 tiop->magic = IO_TRAPPED_MAGIC;
92 INIT_LIST_HEAD(&tiop->list); 92 INIT_LIST_HEAD(&tiop->list);
93 spin_lock_irq(&trapped_lock); 93 spin_lock_irq(&trapped_lock);
94#ifdef CONFIG_HAS_IOPORT
94 if (flags & IORESOURCE_IO) 95 if (flags & IORESOURCE_IO)
95 list_add(&tiop->list, &trapped_io); 96 list_add(&tiop->list, &trapped_io);
97#endif
98#ifdef CONFIG_HAS_IOMEM
96 if (flags & IORESOURCE_MEM) 99 if (flags & IORESOURCE_MEM)
97 list_add(&tiop->list, &trapped_mem); 100 list_add(&tiop->list, &trapped_mem);
101#endif
98 spin_unlock_irq(&trapped_lock); 102 spin_unlock_irq(&trapped_lock);
99 103
100 return 0; 104 return 0;
@@ -112,14 +116,15 @@ void __iomem *match_trapped_io_handler(struct list_head *list,
112 struct trapped_io *tiop; 116 struct trapped_io *tiop;
113 struct resource *res; 117 struct resource *res;
114 int k, len; 118 int k, len;
119 unsigned long flags;
115 120
116 spin_lock_irq(&trapped_lock); 121 spin_lock_irqsave(&trapped_lock, flags);
117 list_for_each_entry(tiop, list, list) { 122 list_for_each_entry(tiop, list, list) {
118 voffs = 0; 123 voffs = 0;
119 for (k = 0; k < tiop->num_resources; k++) { 124 for (k = 0; k < tiop->num_resources; k++) {
120 res = tiop->resource + k; 125 res = tiop->resource + k;
121 if (res->start == offset) { 126 if (res->start == offset) {
122 spin_unlock_irq(&trapped_lock); 127 spin_unlock_irqrestore(&trapped_lock, flags);
123 return tiop->virt_base + voffs; 128 return tiop->virt_base + voffs;
124 } 129 }
125 130
@@ -127,7 +132,7 @@ void __iomem *match_trapped_io_handler(struct list_head *list,
127 voffs += roundup(len, PAGE_SIZE); 132 voffs += roundup(len, PAGE_SIZE);
128 } 133 }
129 } 134 }
130 spin_unlock_irq(&trapped_lock); 135 spin_unlock_irqrestore(&trapped_lock, flags);
131 return NULL; 136 return NULL;
132} 137}
133EXPORT_SYMBOL_GPL(match_trapped_io_handler); 138EXPORT_SYMBOL_GPL(match_trapped_io_handler);
@@ -183,31 +188,31 @@ static unsigned long long copy_word(unsigned long src_addr, int src_len,
183 188
184 switch (src_len) { 189 switch (src_len) {
185 case 1: 190 case 1:
186 tmp = ctrl_inb(src_addr); 191 tmp = __raw_readb(src_addr);
187 break; 192 break;
188 case 2: 193 case 2:
189 tmp = ctrl_inw(src_addr); 194 tmp = __raw_readw(src_addr);
190 break; 195 break;
191 case 4: 196 case 4:
192 tmp = ctrl_inl(src_addr); 197 tmp = __raw_readl(src_addr);
193 break; 198 break;
194 case 8: 199 case 8:
195 tmp = ctrl_inq(src_addr); 200 tmp = __raw_readq(src_addr);
196 break; 201 break;
197 } 202 }
198 203
199 switch (dst_len) { 204 switch (dst_len) {
200 case 1: 205 case 1:
201 ctrl_outb(tmp, dst_addr); 206 __raw_writeb(tmp, dst_addr);
202 break; 207 break;
203 case 2: 208 case 2:
204 ctrl_outw(tmp, dst_addr); 209 __raw_writew(tmp, dst_addr);
205 break; 210 break;
206 case 4: 211 case 4:
207 ctrl_outl(tmp, dst_addr); 212 __raw_writel(tmp, dst_addr);
208 break; 213 break;
209 case 8: 214 case 8:
210 ctrl_outq(tmp, dst_addr); 215 __raw_writeq(tmp, dst_addr);
211 break; 216 break;
212 } 217 }
213 218
@@ -270,6 +275,8 @@ int handle_trapped_io(struct pt_regs *regs, unsigned long address)
270 insn_size_t instruction; 275 insn_size_t instruction;
271 int tmp; 276 int tmp;
272 277
278 if (trapped_io_disable)
279 return 0;
273 if (!lookup_tiop(address)) 280 if (!lookup_tiop(address))
274 return 0; 281 return 0;
275 282
@@ -283,7 +290,8 @@ int handle_trapped_io(struct pt_regs *regs, unsigned long address)
283 return 0; 290 return 0;
284 } 291 }
285 292
286 tmp = handle_unaligned_access(instruction, regs, &trapped_io_access); 293 tmp = handle_unaligned_access(instruction, regs,
294 &trapped_io_access, 1, address);
287 set_fs(oldfs); 295 set_fs(oldfs);
288 return tmp == 0; 296 return tmp == 0;
289} 297}
diff --git a/arch/sh/kernel/iomap.c b/arch/sh/kernel/iomap.c
new file mode 100644
index 000000000000..2e8e8b9b9cef
--- /dev/null
+++ b/arch/sh/kernel/iomap.c
@@ -0,0 +1,165 @@
1/*
2 * arch/sh/kernel/iomap.c
3 *
4 * Copyright (C) 2000 Niibe Yutaka
5 * Copyright (C) 2005 - 2007 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/module.h>
12#include <linux/io.h>
13
14unsigned int ioread8(void __iomem *addr)
15{
16 return readb(addr);
17}
18EXPORT_SYMBOL(ioread8);
19
20unsigned int ioread16(void __iomem *addr)
21{
22 return readw(addr);
23}
24EXPORT_SYMBOL(ioread16);
25
26unsigned int ioread16be(void __iomem *addr)
27{
28 return be16_to_cpu(__raw_readw(addr));
29}
30EXPORT_SYMBOL(ioread16be);
31
32unsigned int ioread32(void __iomem *addr)
33{
34 return readl(addr);
35}
36EXPORT_SYMBOL(ioread32);
37
38unsigned int ioread32be(void __iomem *addr)
39{
40 return be32_to_cpu(__raw_readl(addr));
41}
42EXPORT_SYMBOL(ioread32be);
43
44void iowrite8(u8 val, void __iomem *addr)
45{
46 writeb(val, addr);
47}
48EXPORT_SYMBOL(iowrite8);
49
50void iowrite16(u16 val, void __iomem *addr)
51{
52 writew(val, addr);
53}
54EXPORT_SYMBOL(iowrite16);
55
56void iowrite16be(u16 val, void __iomem *addr)
57{
58 __raw_writew(cpu_to_be16(val), addr);
59}
60EXPORT_SYMBOL(iowrite16be);
61
62void iowrite32(u32 val, void __iomem *addr)
63{
64 writel(val, addr);
65}
66EXPORT_SYMBOL(iowrite32);
67
68void iowrite32be(u32 val, void __iomem *addr)
69{
70 __raw_writel(cpu_to_be32(val), addr);
71}
72EXPORT_SYMBOL(iowrite32be);
73
74/*
75 * These are the "repeat MMIO read/write" functions.
76 * Note the "__raw" accesses, since we don't want to
77 * convert to CPU byte order. We write in "IO byte
78 * order" (we also don't have IO barriers).
79 */
80static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
81{
82 while (--count >= 0) {
83 u8 data = __raw_readb(addr);
84 *dst = data;
85 dst++;
86 }
87}
88
89static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
90{
91 while (--count >= 0) {
92 u16 data = __raw_readw(addr);
93 *dst = data;
94 dst++;
95 }
96}
97
98static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
99{
100 while (--count >= 0) {
101 u32 data = __raw_readl(addr);
102 *dst = data;
103 dst++;
104 }
105}
106
107static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
108{
109 while (--count >= 0) {
110 __raw_writeb(*src, addr);
111 src++;
112 }
113}
114
115static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
116{
117 while (--count >= 0) {
118 __raw_writew(*src, addr);
119 src++;
120 }
121}
122
123static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
124{
125 while (--count >= 0) {
126 __raw_writel(*src, addr);
127 src++;
128 }
129}
130
131void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
132{
133 mmio_insb(addr, dst, count);
134}
135EXPORT_SYMBOL(ioread8_rep);
136
137void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
138{
139 mmio_insw(addr, dst, count);
140}
141EXPORT_SYMBOL(ioread16_rep);
142
143void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
144{
145 mmio_insl(addr, dst, count);
146}
147EXPORT_SYMBOL(ioread32_rep);
148
149void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
150{
151 mmio_outsb(addr, src, count);
152}
153EXPORT_SYMBOL(iowrite8_rep);
154
155void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
156{
157 mmio_outsw(addr, src, count);
158}
159EXPORT_SYMBOL(iowrite16_rep);
160
161void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
162{
163 mmio_outsl(addr, src, count);
164}
165EXPORT_SYMBOL(iowrite32_rep);
diff --git a/arch/sh/kernel/ioport.c b/arch/sh/kernel/ioport.c
new file mode 100644
index 000000000000..e3ad6103e7c1
--- /dev/null
+++ b/arch/sh/kernel/ioport.c
@@ -0,0 +1,43 @@
1/*
2 * arch/sh/kernel/ioport.c
3 *
4 * Copyright (C) 2000 Niibe Yutaka
5 * Copyright (C) 2005 - 2007 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/module.h>
12#include <linux/io.h>
13
14const unsigned long sh_io_port_base __read_mostly = -1;
15EXPORT_SYMBOL(sh_io_port_base);
16
17void __iomem *__ioport_map(unsigned long addr, unsigned int size)
18{
19 if (sh_mv.mv_ioport_map)
20 return sh_mv.mv_ioport_map(addr, size);
21
22 return (void __iomem *)(addr + sh_io_port_base);
23}
24EXPORT_SYMBOL(__ioport_map);
25
26void __iomem *ioport_map(unsigned long port, unsigned int nr)
27{
28 void __iomem *ret;
29
30 ret = __ioport_map_trapped(port, nr);
31 if (ret)
32 return ret;
33
34 return __ioport_map(port, nr);
35}
36EXPORT_SYMBOL(ioport_map);
37
38void ioport_unmap(void __iomem *addr)
39{
40 if (sh_mv.mv_ioport_unmap)
41 sh_mv.mv_ioport_unmap(addr);
42}
43EXPORT_SYMBOL(ioport_unmap);
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 3d09062f4682..68ecbe6c881a 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -11,6 +11,8 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/kernel_stat.h> 12#include <linux/kernel_stat.h>
13#include <linux/seq_file.h> 13#include <linux/seq_file.h>
14#include <linux/ftrace.h>
15#include <linux/delay.h>
14#include <asm/processor.h> 16#include <asm/processor.h>
15#include <asm/machvec.h> 17#include <asm/machvec.h>
16#include <asm/uaccess.h> 18#include <asm/uaccess.h>
@@ -36,7 +38,15 @@ void ack_bad_irq(unsigned int irq)
36 */ 38 */
37static int show_other_interrupts(struct seq_file *p, int prec) 39static int show_other_interrupts(struct seq_file *p, int prec)
38{ 40{
41 int j;
42
43 seq_printf(p, "%*s: ", prec, "NMI");
44 for_each_online_cpu(j)
45 seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
46 seq_printf(p, " Non-maskable interrupts\n");
47
39 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); 48 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
49
40 return 0; 50 return 0;
41} 51}
42 52
@@ -46,6 +56,8 @@ int show_interrupts(struct seq_file *p, void *v)
46 int i = *(loff_t *)v, j, prec; 56 int i = *(loff_t *)v, j, prec;
47 struct irqaction *action; 57 struct irqaction *action;
48 struct irq_desc *desc; 58 struct irq_desc *desc;
59 struct irq_data *data;
60 struct irq_chip *chip;
49 61
50 if (i > nr_irqs) 62 if (i > nr_irqs)
51 return 0; 63 return 0;
@@ -67,7 +79,10 @@ int show_interrupts(struct seq_file *p, void *v)
67 if (!desc) 79 if (!desc)
68 return 0; 80 return 0;
69 81
70 spin_lock_irqsave(&desc->lock, flags); 82 data = irq_get_irq_data(i);
83 chip = irq_data_get_irq_chip(data);
84
85 raw_spin_lock_irqsave(&desc->lock, flags);
71 for_each_online_cpu(j) 86 for_each_online_cpu(j)
72 any_count |= kstat_irqs_cpu(i, j); 87 any_count |= kstat_irqs_cpu(i, j);
73 action = desc->action; 88 action = desc->action;
@@ -77,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v)
77 seq_printf(p, "%*d: ", prec, i); 92 seq_printf(p, "%*d: ", prec, i);
78 for_each_online_cpu(j) 93 for_each_online_cpu(j)
79 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 94 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
80 seq_printf(p, " %14s", desc->chip->name); 95 seq_printf(p, " %14s", chip->name);
81 seq_printf(p, "-%-8s", desc->name); 96 seq_printf(p, "-%-8s", desc->name);
82 97
83 if (action) { 98 if (action) {
@@ -88,7 +103,7 @@ int show_interrupts(struct seq_file *p, void *v)
88 103
89 seq_putc(p, '\n'); 104 seq_putc(p, '\n');
90out: 105out:
91 spin_unlock_irqrestore(&desc->lock, flags); 106 raw_spin_unlock_irqrestore(&desc->lock, flags);
92 return 0; 107 return 0;
93} 108}
94#endif 109#endif
@@ -104,36 +119,14 @@ union irq_ctx {
104 119
105static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; 120static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
106static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; 121static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
107#endif
108 122
109asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs) 123static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
124static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
125
126static inline void handle_one_irq(unsigned int irq)
110{ 127{
111 struct pt_regs *old_regs = set_irq_regs(regs);
112#ifdef CONFIG_IRQSTACKS
113 union irq_ctx *curctx, *irqctx; 128 union irq_ctx *curctx, *irqctx;
114#endif
115
116 irq_enter();
117
118#ifdef CONFIG_DEBUG_STACKOVERFLOW
119 /* Debugging check for stack overflow: is there less than 1KB free? */
120 {
121 long sp;
122
123 __asm__ __volatile__ ("and r15, %0" :
124 "=r" (sp) : "0" (THREAD_SIZE - 1));
125
126 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
127 printk("do_IRQ: stack overflow: %ld\n",
128 sp - sizeof(struct thread_info));
129 dump_stack();
130 }
131 }
132#endif
133 129
134 irq = irq_demux(intc_evt2irq(irq));
135
136#ifdef CONFIG_IRQSTACKS
137 curctx = (union irq_ctx *)current_thread_info(); 130 curctx = (union irq_ctx *)current_thread_info();
138 irqctx = hardirq_ctx[smp_processor_id()]; 131 irqctx = hardirq_ctx[smp_processor_id()];
139 132
@@ -172,22 +165,9 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs)
172 "r5", "r6", "r7", "r8", "t", "pr" 165 "r5", "r6", "r7", "r8", "t", "pr"
173 ); 166 );
174 } else 167 } else
175#endif
176 generic_handle_irq(irq); 168 generic_handle_irq(irq);
177
178 irq_exit();
179
180 set_irq_regs(old_regs);
181 return 1;
182} 169}
183 170
184#ifdef CONFIG_IRQSTACKS
185static char softirq_stack[NR_CPUS * THREAD_SIZE]
186 __attribute__((__section__(".bss.page_aligned")));
187
188static char hardirq_stack[NR_CPUS * THREAD_SIZE]
189 __attribute__((__section__(".bss.page_aligned")));
190
191/* 171/*
192 * allocate per-cpu stacks for hardirq and for softirq processing 172 * allocate per-cpu stacks for hardirq and for softirq processing
193 */ 173 */
@@ -267,8 +247,33 @@ asmlinkage void do_softirq(void)
267 247
268 local_irq_restore(flags); 248 local_irq_restore(flags);
269} 249}
250#else
251static inline void handle_one_irq(unsigned int irq)
252{
253 generic_handle_irq(irq);
254}
270#endif 255#endif
271 256
257asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
258{
259 struct pt_regs *old_regs = set_irq_regs(regs);
260
261 irq_enter();
262
263 irq = irq_demux(irq_lookup(irq));
264
265 if (irq != NO_IRQ_IGNORE) {
266 handle_one_irq(irq);
267 irq_finish(irq);
268 }
269
270 irq_exit();
271
272 set_irq_regs(old_regs);
273
274 return IRQ_HANDLED;
275}
276
272void __init init_IRQ(void) 277void __init init_IRQ(void)
273{ 278{
274 plat_irq_setup(); 279 plat_irq_setup();
@@ -277,6 +282,8 @@ void __init init_IRQ(void)
277 if (sh_mv.mv_init_irq) 282 if (sh_mv.mv_init_irq)
278 sh_mv.mv_init_irq(); 283 sh_mv.mv_init_irq();
279 284
285 intc_finalize();
286
280 irq_ctx_init(smp_processor_id()); 287 irq_ctx_init(smp_processor_id());
281} 288}
282 289
@@ -284,6 +291,51 @@ void __init init_IRQ(void)
284int __init arch_probe_nr_irqs(void) 291int __init arch_probe_nr_irqs(void)
285{ 292{
286 nr_irqs = sh_mv.mv_nr_irqs; 293 nr_irqs = sh_mv.mv_nr_irqs;
287 return 0; 294 return NR_IRQS_LEGACY;
295}
296#endif
297
298#ifdef CONFIG_HOTPLUG_CPU
299static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
300{
301 struct irq_desc *desc = irq_to_desc(irq);
302 struct irq_chip *chip = irq_data_get_irq_chip(data);
303
304 printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
305 irq, data->node, cpu);
306
307 raw_spin_lock_irq(&desc->lock);
308 chip->irq_set_affinity(data, cpumask_of(cpu), false);
309 raw_spin_unlock_irq(&desc->lock);
310}
311
312/*
313 * The CPU has been marked offline. Migrate IRQs off this CPU. If
314 * the affinity settings do not allow other CPUs, force them onto any
315 * available CPU.
316 */
317void migrate_irqs(void)
318{
319 unsigned int irq, cpu = smp_processor_id();
320
321 for_each_active_irq(irq) {
322 struct irq_data *data = irq_get_irq_data(irq);
323
324 if (data->node == cpu) {
325 unsigned int newcpu = cpumask_any_and(data->affinity,
326 cpu_online_mask);
327 if (newcpu >= nr_cpu_ids) {
328 if (printk_ratelimit())
329 printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
330 irq, cpu);
331
332 cpumask_setall(data->affinity);
333 newcpu = cpumask_any_and(data->affinity,
334 cpu_online_mask);
335 }
336
337 route_irq(data, irq, newcpu);
338 }
339 }
288} 340}
289#endif 341#endif
diff --git a/arch/sh/kernel/irq_32.c b/arch/sh/kernel/irq_32.c
new file mode 100644
index 000000000000..e5a755be9129
--- /dev/null
+++ b/arch/sh/kernel/irq_32.c
@@ -0,0 +1,57 @@
1/*
2 * SHcompact irqflags support
3 *
4 * Copyright (C) 2006 - 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/irqflags.h>
11#include <linux/module.h>
12
13void notrace arch_local_irq_restore(unsigned long flags)
14{
15 unsigned long __dummy0, __dummy1;
16
17 if (flags == ARCH_IRQ_DISABLED) {
18 __asm__ __volatile__ (
19 "stc sr, %0\n\t"
20 "or #0xf0, %0\n\t"
21 "ldc %0, sr\n\t"
22 : "=&z" (__dummy0)
23 : /* no inputs */
24 : "memory"
25 );
26 } else {
27 __asm__ __volatile__ (
28 "stc sr, %0\n\t"
29 "and %1, %0\n\t"
30#ifdef CONFIG_CPU_HAS_SR_RB
31 "stc r6_bank, %1\n\t"
32 "or %1, %0\n\t"
33#endif
34 "ldc %0, sr\n\t"
35 : "=&r" (__dummy0), "=r" (__dummy1)
36 : "1" (~ARCH_IRQ_DISABLED)
37 : "memory"
38 );
39 }
40}
41EXPORT_SYMBOL(arch_local_irq_restore);
42
43unsigned long notrace arch_local_save_flags(void)
44{
45 unsigned long flags;
46
47 __asm__ __volatile__ (
48 "stc sr, %0\n\t"
49 "and #0xf0, %0\n\t"
50 : "=&z" (flags)
51 : /* no inputs */
52 : "memory"
53 );
54
55 return flags;
56}
57EXPORT_SYMBOL(arch_local_save_flags);
diff --git a/arch/sh/kernel/irq_64.c b/arch/sh/kernel/irq_64.c
new file mode 100644
index 000000000000..8fc05b997b6d
--- /dev/null
+++ b/arch/sh/kernel/irq_64.c
@@ -0,0 +1,51 @@
1/*
2 * SHmedia irqflags support
3 *
4 * Copyright (C) 2006 - 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/irqflags.h>
11#include <linux/module.h>
12#include <cpu/registers.h>
13
14void notrace arch_local_irq_restore(unsigned long flags)
15{
16 unsigned long long __dummy;
17
18 if (flags == ARCH_IRQ_DISABLED) {
19 __asm__ __volatile__ (
20 "getcon " __SR ", %0\n\t"
21 "or %0, %1, %0\n\t"
22 "putcon %0, " __SR "\n\t"
23 : "=&r" (__dummy)
24 : "r" (ARCH_IRQ_DISABLED)
25 );
26 } else {
27 __asm__ __volatile__ (
28 "getcon " __SR ", %0\n\t"
29 "and %0, %1, %0\n\t"
30 "putcon %0, " __SR "\n\t"
31 : "=&r" (__dummy)
32 : "r" (~ARCH_IRQ_DISABLED)
33 );
34 }
35}
36EXPORT_SYMBOL(arch_local_irq_restore);
37
38unsigned long notrace arch_local_save_flags(void)
39{
40 unsigned long flags;
41
42 __asm__ __volatile__ (
43 "getcon " __SR ", %0\n\t"
44 "and %0, %1, %0"
45 : "=&r" (flags)
46 : "r" (ARCH_IRQ_DISABLED)
47 );
48
49 return flags;
50}
51EXPORT_SYMBOL(arch_local_save_flags);
diff --git a/arch/sh/kernel/kdebugfs.c b/arch/sh/kernel/kdebugfs.c
new file mode 100644
index 000000000000..e11c30bb100c
--- /dev/null
+++ b/arch/sh/kernel/kdebugfs.c
@@ -0,0 +1,16 @@
1#include <linux/module.h>
2#include <linux/init.h>
3#include <linux/debugfs.h>
4
5struct dentry *arch_debugfs_dir;
6EXPORT_SYMBOL(arch_debugfs_dir);
7
8static int __init arch_kdebugfs_init(void)
9{
10 arch_debugfs_dir = debugfs_create_dir("sh", NULL);
11 if (!arch_debugfs_dir)
12 return -ENOMEM;
13
14 return 0;
15}
16arch_initcall(arch_kdebugfs_init);
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 305aad742aec..efb6d398dec3 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SuperH KGDB support 2 * SuperH KGDB support
3 * 3 *
4 * Copyright (C) 2008 Paul Mundt 4 * Copyright (C) 2008 - 2009 Paul Mundt
5 * 5 *
6 * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel. 6 * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
7 * 7 *
@@ -15,8 +15,6 @@
15#include <linux/io.h> 15#include <linux/io.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17 17
18char in_nmi = 0; /* Set during NMI to prevent re-entry */
19
20/* Macros for single step instruction identification */ 18/* Macros for single step instruction identification */
21#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900) 19#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
22#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00) 20#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00)
@@ -195,8 +193,6 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
195 regs->gbr = gdb_regs[GDB_GBR]; 193 regs->gbr = gdb_regs[GDB_GBR];
196 regs->mach = gdb_regs[GDB_MACH]; 194 regs->mach = gdb_regs[GDB_MACH];
197 regs->macl = gdb_regs[GDB_MACL]; 195 regs->macl = gdb_regs[GDB_MACL];
198
199 __asm__ __volatile__ ("ldc %0, vbr" : : "r" (gdb_regs[GDB_VBR]));
200} 196}
201 197
202void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) 198void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
@@ -241,6 +237,18 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
241 return -1; 237 return -1;
242} 238}
243 239
240unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
241{
242 if (exception == 60)
243 return instruction_pointer(regs) - 2;
244 return instruction_pointer(regs);
245}
246
247void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
248{
249 regs->pc = ip;
250}
251
244/* 252/*
245 * The primary entry points for the kgdb debug trap table entries. 253 * The primary entry points for the kgdb debug trap table entries.
246 */ 254 */
@@ -251,28 +259,64 @@ BUILD_TRAP_HANDLER(singlestep)
251 259
252 local_irq_save(flags); 260 local_irq_save(flags);
253 regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); 261 regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
254 kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs); 262 kgdb_handle_exception(0, SIGTRAP, 0, regs);
255 local_irq_restore(flags); 263 local_irq_restore(flags);
256} 264}
257 265
266static int __kgdb_notify(struct die_args *args, unsigned long cmd)
267{
268 int ret;
269
270 switch (cmd) {
271 case DIE_BREAKPOINT:
272 /*
273 * This means a user thread is single stepping
274 * a system call which should be ignored
275 */
276 if (test_thread_flag(TIF_SINGLESTEP))
277 return NOTIFY_DONE;
278
279 ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
280 args->err, args->regs);
281 if (ret)
282 return NOTIFY_DONE;
283
284 break;
285 }
286
287 return NOTIFY_STOP;
288}
258 289
259BUILD_TRAP_HANDLER(breakpoint) 290static int
291kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
260{ 292{
261 unsigned long flags; 293 unsigned long flags;
262 TRAP_HANDLER_DECL; 294 int ret;
263 295
264 local_irq_save(flags); 296 local_irq_save(flags);
265 kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs); 297 ret = __kgdb_notify(ptr, cmd);
266 local_irq_restore(flags); 298 local_irq_restore(flags);
299
300 return ret;
267} 301}
268 302
303static struct notifier_block kgdb_notifier = {
304 .notifier_call = kgdb_notify,
305
306 /*
307 * Lowest-prio notifier priority, we want to be notified last:
308 */
309 .priority = -INT_MAX,
310};
311
269int kgdb_arch_init(void) 312int kgdb_arch_init(void)
270{ 313{
271 return 0; 314 return register_die_notifier(&kgdb_notifier);
272} 315}
273 316
274void kgdb_arch_exit(void) 317void kgdb_arch_exit(void)
275{ 318{
319 unregister_die_notifier(&kgdb_notifier);
276} 320}
277 321
278struct kgdb_arch arch_kgdb_ops = { 322struct kgdb_arch arch_kgdb_ops = {
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index c96850b061fb..1208b09e95c3 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -13,15 +13,16 @@
13#include <linux/ptrace.h> 13#include <linux/ptrace.h>
14#include <linux/preempt.h> 14#include <linux/preempt.h>
15#include <linux/kdebug.h> 15#include <linux/kdebug.h>
16#include <linux/slab.h>
16#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
17#include <asm/uaccess.h> 18#include <asm/uaccess.h>
18 19
19DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 20DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
20DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 21DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
21 22
22static struct kprobe saved_current_opcode; 23static DEFINE_PER_CPU(struct kprobe, saved_current_opcode);
23static struct kprobe saved_next_opcode; 24static DEFINE_PER_CPU(struct kprobe, saved_next_opcode);
24static struct kprobe saved_next_opcode2; 25static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2);
25 26
26#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b) 27#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b)
27#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b) 28#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b)
@@ -101,16 +102,21 @@ int __kprobes kprobe_handle_illslot(unsigned long pc)
101 102
102void __kprobes arch_remove_kprobe(struct kprobe *p) 103void __kprobes arch_remove_kprobe(struct kprobe *p)
103{ 104{
104 if (saved_next_opcode.addr != 0x0) { 105 struct kprobe *saved = &__get_cpu_var(saved_next_opcode);
106
107 if (saved->addr) {
105 arch_disarm_kprobe(p); 108 arch_disarm_kprobe(p);
106 arch_disarm_kprobe(&saved_next_opcode); 109 arch_disarm_kprobe(saved);
107 saved_next_opcode.addr = 0x0; 110
108 saved_next_opcode.opcode = 0x0; 111 saved->addr = NULL;
109 112 saved->opcode = 0;
110 if (saved_next_opcode2.addr != 0x0) { 113
111 arch_disarm_kprobe(&saved_next_opcode2); 114 saved = &__get_cpu_var(saved_next_opcode2);
112 saved_next_opcode2.addr = 0x0; 115 if (saved->addr) {
113 saved_next_opcode2.opcode = 0x0; 116 arch_disarm_kprobe(saved);
117
118 saved->addr = NULL;
119 saved->opcode = 0;
114 } 120 }
115 } 121 }
116} 122}
@@ -140,57 +146,59 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
140 */ 146 */
141static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 147static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
142{ 148{
143 kprobe_opcode_t *addr = NULL; 149 __get_cpu_var(saved_current_opcode).addr = (kprobe_opcode_t *)regs->pc;
144 saved_current_opcode.addr = (kprobe_opcode_t *) (regs->pc);
145 addr = saved_current_opcode.addr;
146 150
147 if (p != NULL) { 151 if (p != NULL) {
152 struct kprobe *op1, *op2;
153
148 arch_disarm_kprobe(p); 154 arch_disarm_kprobe(p);
149 155
156 op1 = &__get_cpu_var(saved_next_opcode);
157 op2 = &__get_cpu_var(saved_next_opcode2);
158
150 if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) { 159 if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
151 unsigned int reg_nr = ((p->opcode >> 8) & 0x000F); 160 unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
152 saved_next_opcode.addr = 161 op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr];
153 (kprobe_opcode_t *) regs->regs[reg_nr];
154 } else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) { 162 } else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
155 unsigned long disp = (p->opcode & 0x0FFF); 163 unsigned long disp = (p->opcode & 0x0FFF);
156 saved_next_opcode.addr = 164 op1->addr =
157 (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); 165 (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
158 166
159 } else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) { 167 } else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
160 unsigned int reg_nr = ((p->opcode >> 8) & 0x000F); 168 unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
161 saved_next_opcode.addr = 169 op1->addr =
162 (kprobe_opcode_t *) (regs->pc + 4 + 170 (kprobe_opcode_t *) (regs->pc + 4 +
163 regs->regs[reg_nr]); 171 regs->regs[reg_nr]);
164 172
165 } else if (OPCODE_RTS(p->opcode)) { 173 } else if (OPCODE_RTS(p->opcode)) {
166 saved_next_opcode.addr = (kprobe_opcode_t *) regs->pr; 174 op1->addr = (kprobe_opcode_t *) regs->pr;
167 175
168 } else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) { 176 } else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
169 unsigned long disp = (p->opcode & 0x00FF); 177 unsigned long disp = (p->opcode & 0x00FF);
170 /* case 1 */ 178 /* case 1 */
171 saved_next_opcode.addr = p->addr + 1; 179 op1->addr = p->addr + 1;
172 /* case 2 */ 180 /* case 2 */
173 saved_next_opcode2.addr = 181 op2->addr =
174 (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); 182 (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
175 saved_next_opcode2.opcode = *(saved_next_opcode2.addr); 183 op2->opcode = *(op2->addr);
176 arch_arm_kprobe(&saved_next_opcode2); 184 arch_arm_kprobe(op2);
177 185
178 } else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) { 186 } else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
179 unsigned long disp = (p->opcode & 0x00FF); 187 unsigned long disp = (p->opcode & 0x00FF);
180 /* case 1 */ 188 /* case 1 */
181 saved_next_opcode.addr = p->addr + 2; 189 op1->addr = p->addr + 2;
182 /* case 2 */ 190 /* case 2 */
183 saved_next_opcode2.addr = 191 op2->addr =
184 (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); 192 (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
185 saved_next_opcode2.opcode = *(saved_next_opcode2.addr); 193 op2->opcode = *(op2->addr);
186 arch_arm_kprobe(&saved_next_opcode2); 194 arch_arm_kprobe(op2);
187 195
188 } else { 196 } else {
189 saved_next_opcode.addr = p->addr + 1; 197 op1->addr = p->addr + 1;
190 } 198 }
191 199
192 saved_next_opcode.opcode = *(saved_next_opcode.addr); 200 op1->opcode = *(op1->addr);
193 arch_arm_kprobe(&saved_next_opcode); 201 arch_arm_kprobe(op1);
194 } 202 }
195} 203}
196 204
@@ -375,21 +383,23 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
375 cur->post_handler(cur, regs, 0); 383 cur->post_handler(cur, regs, 0);
376 } 384 }
377 385
378 if (saved_next_opcode.addr != 0x0) { 386 p = &__get_cpu_var(saved_next_opcode);
379 arch_disarm_kprobe(&saved_next_opcode); 387 if (p->addr) {
380 saved_next_opcode.addr = 0x0; 388 arch_disarm_kprobe(p);
381 saved_next_opcode.opcode = 0x0; 389 p->addr = NULL;
390 p->opcode = 0;
382 391
383 addr = saved_current_opcode.addr; 392 addr = __get_cpu_var(saved_current_opcode).addr;
384 saved_current_opcode.addr = 0x0; 393 __get_cpu_var(saved_current_opcode).addr = NULL;
385 394
386 p = get_kprobe(addr); 395 p = get_kprobe(addr);
387 arch_arm_kprobe(p); 396 arch_arm_kprobe(p);
388 397
389 if (saved_next_opcode2.addr != 0x0) { 398 p = &__get_cpu_var(saved_next_opcode2);
390 arch_disarm_kprobe(&saved_next_opcode2); 399 if (p->addr) {
391 saved_next_opcode2.addr = 0x0; 400 arch_disarm_kprobe(p);
392 saved_next_opcode2.opcode = 0x0; 401 p->addr = NULL;
402 p->opcode = 0;
393 } 403 }
394 } 404 }
395 405
@@ -571,14 +581,5 @@ static struct kprobe trampoline_p = {
571 581
572int __init arch_init_kprobes(void) 582int __init arch_init_kprobes(void)
573{ 583{
574 saved_next_opcode.addr = 0x0;
575 saved_next_opcode.opcode = 0x0;
576
577 saved_current_opcode.addr = 0x0;
578 saved_current_opcode.opcode = 0x0;
579
580 saved_next_opcode2.addr = 0x0;
581 saved_next_opcode2.opcode = 0x0;
582
583 return register_kprobe(&trampoline_p); 584 return register_kprobe(&trampoline_p);
584} 585}
diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c
index 96e8eaea1e62..8bfc6dfa8b94 100644
--- a/arch/sh/kernel/localtimer.c
+++ b/arch/sh/kernel/localtimer.c
@@ -22,6 +22,7 @@
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
23#include <linux/percpu.h> 23#include <linux/percpu.h>
24#include <linux/clockchips.h> 24#include <linux/clockchips.h>
25#include <linux/hardirq.h>
25#include <linux/irq.h> 26#include <linux/irq.h>
26 27
27static DEFINE_PER_CPU(struct clock_event_device, local_clockevent); 28static DEFINE_PER_CPU(struct clock_event_device, local_clockevent);
@@ -33,7 +34,9 @@ void local_timer_interrupt(void)
33{ 34{
34 struct clock_event_device *clk = &__get_cpu_var(local_clockevent); 35 struct clock_event_device *clk = &__get_cpu_var(local_clockevent);
35 36
37 irq_enter();
36 clk->event_handler(clk); 38 clk->event_handler(clk);
39 irq_exit();
37} 40}
38 41
39static void dummy_timer_set_mode(enum clock_event_mode mode, 42static void dummy_timer_set_mode(enum clock_event_mode mode,
@@ -41,13 +44,15 @@ static void dummy_timer_set_mode(enum clock_event_mode mode,
41{ 44{
42} 45}
43 46
44void __cpuinit local_timer_setup(unsigned int cpu) 47void local_timer_setup(unsigned int cpu)
45{ 48{
46 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); 49 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
47 50
48 clk->name = "dummy_timer"; 51 clk->name = "dummy_timer";
49 clk->features = CLOCK_EVT_FEAT_DUMMY; 52 clk->features = CLOCK_EVT_FEAT_ONESHOT |
50 clk->rating = 200; 53 CLOCK_EVT_FEAT_PERIODIC |
54 CLOCK_EVT_FEAT_DUMMY;
55 clk->rating = 400;
51 clk->mult = 1; 56 clk->mult = 1;
52 clk->set_mode = dummy_timer_set_mode; 57 clk->set_mode = dummy_timer_set_mode;
53 clk->broadcast = smp_timer_broadcast; 58 clk->broadcast = smp_timer_broadcast;
@@ -55,3 +60,7 @@ void __cpuinit local_timer_setup(unsigned int cpu)
55 60
56 clockevents_register_device(clk); 61 clockevents_register_device(clk);
57} 62}
63
64void local_timer_stop(unsigned int cpu)
65{
66}
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 7ea2704ea033..e2a3af31ff99 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -8,7 +8,6 @@
8 * This source code is licensed under the GNU General Public License, 8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details. 9 * Version 2. See the file COPYING for more details.
10 */ 10 */
11
12#include <linux/mm.h> 11#include <linux/mm.h>
13#include <linux/kexec.h> 12#include <linux/kexec.h>
14#include <linux/delay.h> 13#include <linux/delay.h>
@@ -16,11 +15,14 @@
16#include <linux/numa.h> 15#include <linux/numa.h>
17#include <linux/ftrace.h> 16#include <linux/ftrace.h>
18#include <linux/suspend.h> 17#include <linux/suspend.h>
18#include <linux/memblock.h>
19#include <asm/pgtable.h> 19#include <asm/pgtable.h>
20#include <asm/pgalloc.h> 20#include <asm/pgalloc.h>
21#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
24#include <asm/sh_bios.h>
25#include <asm/reboot.h>
24 26
25typedef void (*relocate_new_kernel_t)(unsigned long indirection_page, 27typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
26 unsigned long reboot_code_buffer, 28 unsigned long reboot_code_buffer,
@@ -28,15 +30,11 @@ typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
28 30
29extern const unsigned char relocate_new_kernel[]; 31extern const unsigned char relocate_new_kernel[];
30extern const unsigned int relocate_new_kernel_size; 32extern const unsigned int relocate_new_kernel_size;
31extern void *gdb_vbr_vector;
32extern void *vbr_base; 33extern void *vbr_base;
33 34
34void machine_shutdown(void) 35void native_machine_crash_shutdown(struct pt_regs *regs)
35{
36}
37
38void machine_crash_shutdown(struct pt_regs *regs)
39{ 36{
37 /* Nothing to do for UP, but definitely broken for SMP.. */
40} 38}
41 39
42/* 40/*
@@ -46,12 +44,6 @@ void machine_crash_shutdown(struct pt_regs *regs)
46 */ 44 */
47int machine_kexec_prepare(struct kimage *image) 45int machine_kexec_prepare(struct kimage *image)
48{ 46{
49 /* older versions of kexec-tools are passing
50 * the zImage entry point as a virtual address.
51 */
52 if (image->start != PHYSADDR(image->start))
53 return -EINVAL; /* upgrade your kexec-tools */
54
55 return 0; 47 return 0;
56} 48}
57 49
@@ -123,11 +115,7 @@ void machine_kexec(struct kimage *image)
123 kexec_info(image); 115 kexec_info(image);
124 flush_cache_all(); 116 flush_cache_all();
125 117
126#if defined(CONFIG_SH_STANDARD_BIOS) 118 sh_bios_vbr_reload();
127 asm volatile("ldc %0, vbr" :
128 : "r" (((unsigned long) gdb_vbr_vector) - 0x100)
129 : "memory");
130#endif
131 119
132 /* now call it */ 120 /* now call it */
133 rnk = (relocate_new_kernel_t) reboot_code_buffer; 121 rnk = (relocate_new_kernel_t) reboot_code_buffer;
@@ -159,4 +147,64 @@ void arch_crash_save_vmcoreinfo(void)
159 VMCOREINFO_SYMBOL(node_data); 147 VMCOREINFO_SYMBOL(node_data);
160 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); 148 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
161#endif 149#endif
150#ifdef CONFIG_X2TLB
151 VMCOREINFO_CONFIG(X2TLB);
152#endif
153}
154
155void __init reserve_crashkernel(void)
156{
157 unsigned long long crash_size, crash_base;
158 int ret;
159
160 /* this is necessary because of memblock_phys_mem_size() */
161 memblock_analyze();
162
163 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
164 &crash_size, &crash_base);
165 if (ret == 0 && crash_size > 0) {
166 crashk_res.start = crash_base;
167 crashk_res.end = crash_base + crash_size - 1;
168 }
169
170 if (crashk_res.end == crashk_res.start)
171 goto disable;
172
173 crash_size = PAGE_ALIGN(crashk_res.end - crashk_res.start + 1);
174 if (!crashk_res.start) {
175 unsigned long max = memblock_end_of_DRAM() - memory_limit;
176 crashk_res.start = __memblock_alloc_base(crash_size, PAGE_SIZE, max);
177 if (!crashk_res.start) {
178 pr_err("crashkernel allocation failed\n");
179 goto disable;
180 }
181 } else {
182 ret = memblock_reserve(crashk_res.start, crash_size);
183 if (unlikely(ret < 0)) {
184 pr_err("crashkernel reservation failed - "
185 "memory is in use\n");
186 goto disable;
187 }
188 }
189
190 crashk_res.end = crashk_res.start + crash_size - 1;
191
192 /*
193 * Crash kernel trumps memory limit
194 */
195 if ((memblock_end_of_DRAM() - memory_limit) <= crashk_res.end) {
196 memory_limit = 0;
197 pr_info("Disabled memory limit for crashkernel\n");
198 }
199
200 pr_info("Reserving %ldMB of memory at 0x%08lx "
201 "for crashkernel (System RAM: %ldMB)\n",
202 (unsigned long)(crash_size >> 20),
203 (unsigned long)(crashk_res.start),
204 (unsigned long)(memblock_phys_mem_size() >> 20));
205
206 return;
207
208disable:
209 crashk_res.start = crashk_res.end = 0;
162} 210}
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c
index 548f6607fd0f..3d722e49db08 100644
--- a/arch/sh/kernel/machvec.c
+++ b/arch/sh/kernel/machvec.c
@@ -14,6 +14,7 @@
14#include <linux/string.h> 14#include <linux/string.h>
15#include <asm/machvec.h> 15#include <asm/machvec.h>
16#include <asm/sections.h> 16#include <asm/sections.h>
17#include <asm/addrspace.h>
17#include <asm/setup.h> 18#include <asm/setup.h>
18#include <asm/io.h> 19#include <asm/io.h>
19#include <asm/irq.h> 20#include <asm/irq.h>
@@ -117,19 +118,9 @@ void __init sh_mv_setup(void)
117 sh_mv.mv_##elem = generic_##elem; \ 118 sh_mv.mv_##elem = generic_##elem; \
118} while (0) 119} while (0)
119 120
120 mv_set(inb); mv_set(inw); mv_set(inl);
121 mv_set(outb); mv_set(outw); mv_set(outl);
122
123 mv_set(inb_p); mv_set(inw_p); mv_set(inl_p);
124 mv_set(outb_p); mv_set(outw_p); mv_set(outl_p);
125
126 mv_set(insb); mv_set(insw); mv_set(insl);
127 mv_set(outsb); mv_set(outsw); mv_set(outsl);
128
129 mv_set(ioport_map);
130 mv_set(ioport_unmap);
131 mv_set(irq_demux); 121 mv_set(irq_demux);
132 mv_set(mode_pins); 122 mv_set(mode_pins);
123 mv_set(mem_init);
133 124
134 if (!sh_mv.mv_nr_irqs) 125 if (!sh_mv.mv_nr_irqs)
135 sh_mv.mv_nr_irqs = NR_IRQS; 126 sh_mv.mv_nr_irqs = NR_IRQS;
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index c2efdcde266f..ae0be697a89e 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -32,6 +32,7 @@
32#include <linux/string.h> 32#include <linux/string.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <asm/unaligned.h> 34#include <asm/unaligned.h>
35#include <asm/dwarf.h>
35 36
36void *module_alloc(unsigned long size) 37void *module_alloc(unsigned long size)
37{ 38{
@@ -145,10 +146,14 @@ int module_finalize(const Elf_Ehdr *hdr,
145 const Elf_Shdr *sechdrs, 146 const Elf_Shdr *sechdrs,
146 struct module *me) 147 struct module *me)
147{ 148{
148 return module_bug_finalize(hdr, sechdrs, me); 149 int ret = 0;
150
151 ret |= module_dwarf_finalize(hdr, sechdrs, me);
152
153 return ret;
149} 154}
150 155
151void module_arch_cleanup(struct module *mod) 156void module_arch_cleanup(struct module *mod)
152{ 157{
153 module_bug_cleanup(mod); 158 module_dwarf_cleanup(mod);
154} 159}
diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c
new file mode 100644
index 000000000000..ff0abbd1e652
--- /dev/null
+++ b/arch/sh/kernel/nmi_debug.c
@@ -0,0 +1,77 @@
1/*
2 * Copyright (C) 2007 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/delay.h>
9#include <linux/kdebug.h>
10#include <linux/notifier.h>
11#include <linux/sched.h>
12#include <linux/hardirq.h>
13
14enum nmi_action {
15 NMI_SHOW_STATE = 1 << 0,
16 NMI_SHOW_REGS = 1 << 1,
17 NMI_DIE = 1 << 2,
18 NMI_DEBOUNCE = 1 << 3,
19};
20
21static unsigned long nmi_actions;
22
23static int nmi_debug_notify(struct notifier_block *self,
24 unsigned long val, void *data)
25{
26 struct die_args *args = data;
27
28 if (likely(val != DIE_NMI))
29 return NOTIFY_DONE;
30
31 if (nmi_actions & NMI_SHOW_STATE)
32 show_state();
33 if (nmi_actions & NMI_SHOW_REGS)
34 show_regs(args->regs);
35 if (nmi_actions & NMI_DEBOUNCE)
36 mdelay(10);
37 if (nmi_actions & NMI_DIE)
38 return NOTIFY_BAD;
39
40 return NOTIFY_OK;
41}
42
43static struct notifier_block nmi_debug_nb = {
44 .notifier_call = nmi_debug_notify,
45};
46
47static int __init nmi_debug_setup(char *str)
48{
49 char *p, *sep;
50
51 register_die_notifier(&nmi_debug_nb);
52
53 if (*str != '=')
54 return 0;
55
56 for (p = str + 1; *p; p = sep + 1) {
57 sep = strchr(p, ',');
58 if (sep)
59 *sep = 0;
60 if (strcmp(p, "state") == 0)
61 nmi_actions |= NMI_SHOW_STATE;
62 else if (strcmp(p, "regs") == 0)
63 nmi_actions |= NMI_SHOW_REGS;
64 else if (strcmp(p, "debounce") == 0)
65 nmi_actions |= NMI_DEBOUNCE;
66 else if (strcmp(p, "die") == 0)
67 nmi_actions |= NMI_DIE;
68 else
69 printk(KERN_WARNING "NMI: Unrecognized action `%s'\n",
70 p);
71 if (!sep)
72 break;
73 }
74
75 return 0;
76}
77__setup("nmi_debug", nmi_debug_setup);
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
new file mode 100644
index 000000000000..d5ca1ef50fa9
--- /dev/null
+++ b/arch/sh/kernel/perf_callchain.c
@@ -0,0 +1,53 @@
1/*
2 * Performance event callchain support - SuperH architecture code
3 *
4 * Copyright (C) 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/perf_event.h>
13#include <linux/percpu.h>
14#include <asm/unwinder.h>
15#include <asm/ptrace.h>
16
17
18static void callchain_warning(void *data, char *msg)
19{
20}
21
22static void
23callchain_warning_symbol(void *data, char *msg, unsigned long symbol)
24{
25}
26
27static int callchain_stack(void *data, char *name)
28{
29 return 0;
30}
31
32static void callchain_address(void *data, unsigned long addr, int reliable)
33{
34 struct perf_callchain_entry *entry = data;
35
36 if (reliable)
37 perf_callchain_store(entry, addr);
38}
39
40static const struct stacktrace_ops callchain_ops = {
41 .warning = callchain_warning,
42 .warning_symbol = callchain_warning_symbol,
43 .stack = callchain_stack,
44 .address = callchain_address,
45};
46
47void
48perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
49{
50 perf_callchain_store(entry, regs->pc);
51
52 unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
53}
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
new file mode 100644
index 000000000000..2ee21a47b5af
--- /dev/null
+++ b/arch/sh/kernel/perf_event.c
@@ -0,0 +1,395 @@
1/*
2 * Performance event support framework for SuperH hardware counters.
3 *
4 * Copyright (C) 2009 Paul Mundt
5 *
6 * Heavily based on the x86 and PowerPC implementations.
7 *
8 * x86:
9 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
10 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
11 * Copyright (C) 2009 Jaswinder Singh Rajput
12 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
13 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
14 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
15 *
16 * ppc:
17 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
18 *
19 * This file is subject to the terms and conditions of the GNU General Public
20 * License. See the file "COPYING" in the main directory of this archive
21 * for more details.
22 */
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/io.h>
26#include <linux/irq.h>
27#include <linux/perf_event.h>
28#include <asm/processor.h>
29
30struct cpu_hw_events {
31 struct perf_event *events[MAX_HWEVENTS];
32 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
33 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
34};
35
36DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
37
38static struct sh_pmu *sh_pmu __read_mostly;
39
40/* Number of perf_events counting hardware events */
41static atomic_t num_events;
42/* Used to avoid races in calling reserve/release_pmc_hardware */
43static DEFINE_MUTEX(pmc_reserve_mutex);
44
45/*
46 * Stub these out for now, do something more profound later.
47 */
48int reserve_pmc_hardware(void)
49{
50 return 0;
51}
52
53void release_pmc_hardware(void)
54{
55}
56
57static inline int sh_pmu_initialized(void)
58{
59 return !!sh_pmu;
60}
61
62const char *perf_pmu_name(void)
63{
64 if (!sh_pmu)
65 return NULL;
66
67 return sh_pmu->name;
68}
69EXPORT_SYMBOL_GPL(perf_pmu_name);
70
71int perf_num_counters(void)
72{
73 if (!sh_pmu)
74 return 0;
75
76 return sh_pmu->num_events;
77}
78EXPORT_SYMBOL_GPL(perf_num_counters);
79
80/*
81 * Release the PMU if this is the last perf_event.
82 */
83static void hw_perf_event_destroy(struct perf_event *event)
84{
85 if (!atomic_add_unless(&num_events, -1, 1)) {
86 mutex_lock(&pmc_reserve_mutex);
87 if (atomic_dec_return(&num_events) == 0)
88 release_pmc_hardware();
89 mutex_unlock(&pmc_reserve_mutex);
90 }
91}
92
93static int hw_perf_cache_event(int config, int *evp)
94{
95 unsigned long type, op, result;
96 int ev;
97
98 if (!sh_pmu->cache_events)
99 return -EINVAL;
100
101 /* unpack config */
102 type = config & 0xff;
103 op = (config >> 8) & 0xff;
104 result = (config >> 16) & 0xff;
105
106 if (type >= PERF_COUNT_HW_CACHE_MAX ||
107 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
108 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
109 return -EINVAL;
110
111 ev = (*sh_pmu->cache_events)[type][op][result];
112 if (ev == 0)
113 return -EOPNOTSUPP;
114 if (ev == -1)
115 return -EINVAL;
116 *evp = ev;
117 return 0;
118}
119
120static int __hw_perf_event_init(struct perf_event *event)
121{
122 struct perf_event_attr *attr = &event->attr;
123 struct hw_perf_event *hwc = &event->hw;
124 int config = -1;
125 int err;
126
127 if (!sh_pmu_initialized())
128 return -ENODEV;
129
130 /*
131 * All of the on-chip counters are "limited", in that they have
132 * no interrupts, and are therefore unable to do sampling without
133 * further work and timer assistance.
134 */
135 if (hwc->sample_period)
136 return -EINVAL;
137
138 /*
139 * See if we need to reserve the counter.
140 *
141 * If no events are currently in use, then we have to take a
142 * mutex to ensure that we don't race with another task doing
143 * reserve_pmc_hardware or release_pmc_hardware.
144 */
145 err = 0;
146 if (!atomic_inc_not_zero(&num_events)) {
147 mutex_lock(&pmc_reserve_mutex);
148 if (atomic_read(&num_events) == 0 &&
149 reserve_pmc_hardware())
150 err = -EBUSY;
151 else
152 atomic_inc(&num_events);
153 mutex_unlock(&pmc_reserve_mutex);
154 }
155
156 if (err)
157 return err;
158
159 event->destroy = hw_perf_event_destroy;
160
161 switch (attr->type) {
162 case PERF_TYPE_RAW:
163 config = attr->config & sh_pmu->raw_event_mask;
164 break;
165 case PERF_TYPE_HW_CACHE:
166 err = hw_perf_cache_event(attr->config, &config);
167 if (err)
168 return err;
169 break;
170 case PERF_TYPE_HARDWARE:
171 if (attr->config >= sh_pmu->max_events)
172 return -EINVAL;
173
174 config = sh_pmu->event_map(attr->config);
175 break;
176 }
177
178 if (config == -1)
179 return -EINVAL;
180
181 hwc->config |= config;
182
183 return 0;
184}
185
186static void sh_perf_event_update(struct perf_event *event,
187 struct hw_perf_event *hwc, int idx)
188{
189 u64 prev_raw_count, new_raw_count;
190 s64 delta;
191 int shift = 0;
192
193 /*
194 * Depending on the counter configuration, they may or may not
195 * be chained, in which case the previous counter value can be
196 * updated underneath us if the lower-half overflows.
197 *
198 * Our tactic to handle this is to first atomically read and
199 * exchange a new raw count - then add that new-prev delta
200 * count to the generic counter atomically.
201 *
202 * As there is no interrupt associated with the overflow events,
203 * this is the simplest approach for maintaining consistency.
204 */
205again:
206 prev_raw_count = local64_read(&hwc->prev_count);
207 new_raw_count = sh_pmu->read(idx);
208
209 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
210 new_raw_count) != prev_raw_count)
211 goto again;
212
213 /*
214 * Now we have the new raw value and have updated the prev
215 * timestamp already. We can now calculate the elapsed delta
216 * (counter-)time and add that to the generic counter.
217 *
218 * Careful, not all hw sign-extends above the physical width
219 * of the count.
220 */
221 delta = (new_raw_count << shift) - (prev_raw_count << shift);
222 delta >>= shift;
223
224 local64_add(delta, &event->count);
225}
226
227static void sh_pmu_stop(struct perf_event *event, int flags)
228{
229 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
230 struct hw_perf_event *hwc = &event->hw;
231 int idx = hwc->idx;
232
233 if (!(event->hw.state & PERF_HES_STOPPED)) {
234 sh_pmu->disable(hwc, idx);
235 cpuc->events[idx] = NULL;
236 event->hw.state |= PERF_HES_STOPPED;
237 }
238
239 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
240 sh_perf_event_update(event, &event->hw, idx);
241 event->hw.state |= PERF_HES_UPTODATE;
242 }
243}
244
245static void sh_pmu_start(struct perf_event *event, int flags)
246{
247 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
248 struct hw_perf_event *hwc = &event->hw;
249 int idx = hwc->idx;
250
251 if (WARN_ON_ONCE(idx == -1))
252 return;
253
254 if (flags & PERF_EF_RELOAD)
255 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
256
257 cpuc->events[idx] = event;
258 event->hw.state = 0;
259 sh_pmu->enable(hwc, idx);
260}
261
262static void sh_pmu_del(struct perf_event *event, int flags)
263{
264 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
265
266 sh_pmu_stop(event, PERF_EF_UPDATE);
267 __clear_bit(event->hw.idx, cpuc->used_mask);
268
269 perf_event_update_userpage(event);
270}
271
272static int sh_pmu_add(struct perf_event *event, int flags)
273{
274 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
275 struct hw_perf_event *hwc = &event->hw;
276 int idx = hwc->idx;
277 int ret = -EAGAIN;
278
279 perf_pmu_disable(event->pmu);
280
281 if (__test_and_set_bit(idx, cpuc->used_mask)) {
282 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
283 if (idx == sh_pmu->num_events)
284 goto out;
285
286 __set_bit(idx, cpuc->used_mask);
287 hwc->idx = idx;
288 }
289
290 sh_pmu->disable(hwc, idx);
291
292 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
293 if (flags & PERF_EF_START)
294 sh_pmu_start(event, PERF_EF_RELOAD);
295
296 perf_event_update_userpage(event);
297 ret = 0;
298out:
299 perf_pmu_enable(event->pmu);
300 return ret;
301}
302
303static void sh_pmu_read(struct perf_event *event)
304{
305 sh_perf_event_update(event, &event->hw, event->hw.idx);
306}
307
308static int sh_pmu_event_init(struct perf_event *event)
309{
310 int err;
311
312 switch (event->attr.type) {
313 case PERF_TYPE_RAW:
314 case PERF_TYPE_HW_CACHE:
315 case PERF_TYPE_HARDWARE:
316 err = __hw_perf_event_init(event);
317 break;
318
319 default:
320 return -ENOENT;
321 }
322
323 if (unlikely(err)) {
324 if (event->destroy)
325 event->destroy(event);
326 }
327
328 return err;
329}
330
331static void sh_pmu_enable(struct pmu *pmu)
332{
333 if (!sh_pmu_initialized())
334 return;
335
336 sh_pmu->enable_all();
337}
338
339static void sh_pmu_disable(struct pmu *pmu)
340{
341 if (!sh_pmu_initialized())
342 return;
343
344 sh_pmu->disable_all();
345}
346
347static struct pmu pmu = {
348 .pmu_enable = sh_pmu_enable,
349 .pmu_disable = sh_pmu_disable,
350 .event_init = sh_pmu_event_init,
351 .add = sh_pmu_add,
352 .del = sh_pmu_del,
353 .start = sh_pmu_start,
354 .stop = sh_pmu_stop,
355 .read = sh_pmu_read,
356};
357
358static void sh_pmu_setup(int cpu)
359{
360 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
361
362 memset(cpuhw, 0, sizeof(struct cpu_hw_events));
363}
364
365static int __cpuinit
366sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
367{
368 unsigned int cpu = (long)hcpu;
369
370 switch (action & ~CPU_TASKS_FROZEN) {
371 case CPU_UP_PREPARE:
372 sh_pmu_setup(cpu);
373 break;
374
375 default:
376 break;
377 }
378
379 return NOTIFY_OK;
380}
381
382int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
383{
384 if (sh_pmu)
385 return -EBUSY;
386 sh_pmu = _pmu;
387
388 pr_info("Performance Events: %s support registered\n", _pmu->name);
389
390 WARN_ON(_pmu->num_events > MAX_HWEVENTS);
391
392 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
393 perf_cpu_notifier(sh_pmu_notifier);
394 return 0;
395}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
new file mode 100644
index 000000000000..dcb126dc76fd
--- /dev/null
+++ b/arch/sh/kernel/process.c
@@ -0,0 +1,101 @@
1#include <linux/mm.h>
2#include <linux/kernel.h>
3#include <linux/slab.h>
4#include <linux/sched.h>
5
6struct kmem_cache *task_xstate_cachep = NULL;
7unsigned int xstate_size;
8
9int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
10{
11 *dst = *src;
12
13 if (src->thread.xstate) {
14 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
15 GFP_KERNEL);
16 if (!dst->thread.xstate)
17 return -ENOMEM;
18 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
19 }
20
21 return 0;
22}
23
24void free_thread_xstate(struct task_struct *tsk)
25{
26 if (tsk->thread.xstate) {
27 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
28 tsk->thread.xstate = NULL;
29 }
30}
31
32#if THREAD_SHIFT < PAGE_SHIFT
33static struct kmem_cache *thread_info_cache;
34
35struct thread_info *alloc_thread_info(struct task_struct *tsk)
36{
37 struct thread_info *ti;
38
39 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
40 if (unlikely(ti == NULL))
41 return NULL;
42#ifdef CONFIG_DEBUG_STACK_USAGE
43 memset(ti, 0, THREAD_SIZE);
44#endif
45 return ti;
46}
47
48void free_thread_info(struct thread_info *ti)
49{
50 free_thread_xstate(ti->task);
51 kmem_cache_free(thread_info_cache, ti);
52}
53
54void thread_info_cache_init(void)
55{
56 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
57 THREAD_SIZE, SLAB_PANIC, NULL);
58}
59#else
60struct thread_info *alloc_thread_info(struct task_struct *tsk)
61{
62#ifdef CONFIG_DEBUG_STACK_USAGE
63 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
64#else
65 gfp_t mask = GFP_KERNEL;
66#endif
67 return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
68}
69
70void free_thread_info(struct thread_info *ti)
71{
72 free_thread_xstate(ti->task);
73 free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
74}
75#endif /* THREAD_SHIFT < PAGE_SHIFT */
76
77void arch_task_cache_init(void)
78{
79 if (!xstate_size)
80 return;
81
82 task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
83 __alignof__(union thread_xstate),
84 SLAB_PANIC | SLAB_NOTRACK, NULL);
85}
86
87#ifdef CONFIG_SH_FPU_EMU
88# define HAVE_SOFTFP 1
89#else
90# define HAVE_SOFTFP 0
91#endif
92
93void __cpuinit init_thread_xstate(void)
94{
95 if (boot_cpu_data.flags & CPU_HAS_FPU)
96 xstate_size = sizeof(struct sh_fpu_hard_struct);
97 else if (HAVE_SOFTFP)
98 xstate_size = sizeof(struct sh_fpu_soft_struct);
99 else
100 xstate_size = 0;
101}
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 92d7740faab1..762a13984bbd 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -15,46 +15,18 @@
15 */ 15 */
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/slab.h>
18#include <linux/elfcore.h> 19#include <linux/elfcore.h>
19#include <linux/pm.h>
20#include <linux/kallsyms.h> 20#include <linux/kallsyms.h>
21#include <linux/kexec.h>
22#include <linux/kdebug.h>
23#include <linux/tick.h>
24#include <linux/reboot.h>
25#include <linux/fs.h> 21#include <linux/fs.h>
26#include <linux/preempt.h> 22#include <linux/ftrace.h>
23#include <linux/hw_breakpoint.h>
27#include <asm/uaccess.h> 24#include <asm/uaccess.h>
28#include <asm/mmu_context.h> 25#include <asm/mmu_context.h>
29#include <asm/pgalloc.h>
30#include <asm/system.h> 26#include <asm/system.h>
31#include <asm/ubc.h>
32#include <asm/fpu.h> 27#include <asm/fpu.h>
33#include <asm/syscalls.h> 28#include <asm/syscalls.h>
34 29
35int ubc_usercnt = 0;
36
37void machine_restart(char * __unused)
38{
39 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
40 asm volatile("ldc %0, sr\n\t"
41 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
42}
43
44void machine_halt(void)
45{
46 local_irq_disable();
47
48 while (1)
49 cpu_sleep();
50}
51
52void machine_power_off(void)
53{
54 if (pm_power_off)
55 pm_power_off();
56}
57
58void show_regs(struct pt_regs * regs) 30void show_regs(struct pt_regs * regs)
59{ 31{
60 printk("\n"); 32 printk("\n");
@@ -70,7 +42,7 @@ void show_regs(struct pt_regs * regs)
70 printk("PC : %08lx SP : %08lx SR : %08lx ", 42 printk("PC : %08lx SP : %08lx SR : %08lx ",
71 regs->pc, regs->regs[15], regs->sr); 43 regs->pc, regs->regs[15], regs->sr);
72#ifdef CONFIG_MMU 44#ifdef CONFIG_MMU
73 printk("TEA : %08x\n", ctrl_inl(MMU_TEA)); 45 printk("TEA : %08x\n", __raw_readl(MMU_TEA));
74#else 46#else
75 printk("\n"); 47 printk("\n");
76#endif 48#endif
@@ -113,7 +85,10 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
113 regs.regs[5] = (unsigned long)fn; 85 regs.regs[5] = (unsigned long)fn;
114 86
115 regs.pc = (unsigned long)kernel_thread_helper; 87 regs.pc = (unsigned long)kernel_thread_helper;
116 regs.sr = (1 << 30); 88 regs.sr = SR_MD;
89#if defined(CONFIG_SH_FPU)
90 regs.sr |= SR_FD;
91#endif
117 92
118 /* Ok, create the new process.. */ 93 /* Ok, create the new process.. */
119 pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, 94 pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
@@ -121,22 +96,36 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
121 96
122 return pid; 97 return pid;
123} 98}
99EXPORT_SYMBOL(kernel_thread);
100
101void start_thread(struct pt_regs *regs, unsigned long new_pc,
102 unsigned long new_sp)
103{
104 set_fs(USER_DS);
105
106 regs->pr = 0;
107 regs->sr = SR_FD;
108 regs->pc = new_pc;
109 regs->regs[15] = new_sp;
110
111 free_thread_xstate(current);
112}
113EXPORT_SYMBOL(start_thread);
124 114
125/* 115/*
126 * Free current thread data structures etc.. 116 * Free current thread data structures etc..
127 */ 117 */
128void exit_thread(void) 118void exit_thread(void)
129{ 119{
130 if (current->thread.ubc_pc) {
131 current->thread.ubc_pc = 0;
132 ubc_usercnt -= 1;
133 }
134} 120}
135 121
136void flush_thread(void) 122void flush_thread(void)
137{ 123{
138#if defined(CONFIG_SH_FPU)
139 struct task_struct *tsk = current; 124 struct task_struct *tsk = current;
125
126 flush_ptrace_hw_breakpoint(tsk);
127
128#if defined(CONFIG_SH_FPU)
140 /* Forget lazy FPU state */ 129 /* Forget lazy FPU state */
141 clear_fpu(tsk, task_pt_regs(tsk)); 130 clear_fpu(tsk, task_pt_regs(tsk));
142 clear_used_math(); 131 clear_used_math();
@@ -165,6 +154,16 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
165 154
166 return fpvalid; 155 return fpvalid;
167} 156}
157EXPORT_SYMBOL(dump_fpu);
158
159/*
160 * This gets called before we allocate a new thread and copy
161 * the current task into it.
162 */
163void prepare_to_copy(struct task_struct *tsk)
164{
165 unlazy_fpu(tsk, task_pt_regs(tsk));
166}
168 167
169asmlinkage void ret_from_fork(void); 168asmlinkage void ret_from_fork(void);
170 169
@@ -174,17 +173,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
174{ 173{
175 struct thread_info *ti = task_thread_info(p); 174 struct thread_info *ti = task_thread_info(p);
176 struct pt_regs *childregs; 175 struct pt_regs *childregs;
177#if defined(CONFIG_SH_FPU) || defined(CONFIG_SH_DSP)
178 struct task_struct *tsk = current;
179#endif
180
181#if defined(CONFIG_SH_FPU)
182 unlazy_fpu(tsk, regs);
183 p->thread.fpu = tsk->thread.fpu;
184 copy_to_stopped_child_used_math(p);
185#endif
186 176
187#if defined(CONFIG_SH_DSP) 177#if defined(CONFIG_SH_DSP)
178 struct task_struct *tsk = current;
179
188 if (is_dsp_enabled(tsk)) { 180 if (is_dsp_enabled(tsk)) {
189 /* We can use the __save_dsp or just copy the struct: 181 /* We can use the __save_dsp or just copy the struct:
190 * __save_dsp(p); 182 * __save_dsp(p);
@@ -203,6 +195,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
203 } else { 195 } else {
204 childregs->regs[15] = (unsigned long)childregs; 196 childregs->regs[15] = (unsigned long)childregs;
205 ti->addr_limit = KERNEL_DS; 197 ti->addr_limit = KERNEL_DS;
198 ti->status &= ~TS_USEDFPU;
199 p->fpu_counter = 0;
206 } 200 }
207 201
208 if (clone_flags & CLONE_SETTLS) 202 if (clone_flags & CLONE_SETTLS)
@@ -213,63 +207,25 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
213 p->thread.sp = (unsigned long) childregs; 207 p->thread.sp = (unsigned long) childregs;
214 p->thread.pc = (unsigned long) ret_from_fork; 208 p->thread.pc = (unsigned long) ret_from_fork;
215 209
216 p->thread.ubc_pc = 0; 210 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
217 211
218 return 0; 212 return 0;
219} 213}
220 214
221/* Tracing by user break controller. */
222static void ubc_set_tracing(int asid, unsigned long pc)
223{
224#if defined(CONFIG_CPU_SH4A)
225 unsigned long val;
226
227 val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
228 val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
229
230 ctrl_outl(val, UBC_CBR0);
231 ctrl_outl(pc, UBC_CAR0);
232 ctrl_outl(0x0, UBC_CAMR0);
233 ctrl_outl(0x0, UBC_CBCR);
234
235 val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
236 ctrl_outl(val, UBC_CRR0);
237
238 /* Read UBC register that we wrote last, for checking update */
239 val = ctrl_inl(UBC_CRR0);
240
241#else /* CONFIG_CPU_SH4A */
242 ctrl_outl(pc, UBC_BARA);
243
244#ifdef CONFIG_MMU
245 ctrl_outb(asid, UBC_BASRA);
246#endif
247
248 ctrl_outl(0, UBC_BAMRA);
249
250 if (current_cpu_data.type == CPU_SH7729 ||
251 current_cpu_data.type == CPU_SH7710 ||
252 current_cpu_data.type == CPU_SH7712 ||
253 current_cpu_data.type == CPU_SH7203){
254 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
255 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
256 } else {
257 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
258 ctrl_outw(BRCR_PCBA, UBC_BRCR);
259 }
260#endif /* CONFIG_CPU_SH4A */
261}
262
263/* 215/*
264 * switch_to(x,y) should switch tasks from x to y. 216 * switch_to(x,y) should switch tasks from x to y.
265 * 217 *
266 */ 218 */
267struct task_struct *__switch_to(struct task_struct *prev, 219__notrace_funcgraph struct task_struct *
268 struct task_struct *next) 220__switch_to(struct task_struct *prev, struct task_struct *next)
269{ 221{
270#if defined(CONFIG_SH_FPU) 222 struct thread_struct *next_t = &next->thread;
223
271 unlazy_fpu(prev, task_pt_regs(prev)); 224 unlazy_fpu(prev, task_pt_regs(prev));
272#endif 225
226 /* we're going to use this soon, after a few expensive things */
227 if (next->fpu_counter > 5)
228 prefetch(next_t->xstate);
273 229
274#ifdef CONFIG_MMU 230#ifdef CONFIG_MMU
275 /* 231 /*
@@ -281,24 +237,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
281 : "r" (task_thread_info(next))); 237 : "r" (task_thread_info(next)));
282#endif 238#endif
283 239
284 /* If no tasks are using the UBC, we're done */ 240 /*
285 if (ubc_usercnt == 0) 241 * If the task has used fpu the last 5 timeslices, just do a full
286 /* If no tasks are using the UBC, we're done */; 242 * restore of the math state immediately to avoid the trap; the
287 else if (next->thread.ubc_pc && next->mm) { 243 * chances of needing FPU soon are obviously high now
288 int asid = 0; 244 */
289#ifdef CONFIG_MMU 245 if (next->fpu_counter > 5)
290 asid |= cpu_asid(smp_processor_id(), next->mm); 246 __fpu_state_restore();
291#endif
292 ubc_set_tracing(asid, next->thread.ubc_pc);
293 } else {
294#if defined(CONFIG_CPU_SH4A)
295 ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
296 ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
297#else
298 ctrl_outw(0, UBC_BBRA);
299 ctrl_outw(0, UBC_BBRB);
300#endif
301 }
302 247
303 return prev; 248 return prev;
304} 249}
@@ -351,9 +296,10 @@ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
351/* 296/*
352 * sys_execve() executes a new program. 297 * sys_execve() executes a new program.
353 */ 298 */
354asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv, 299asmlinkage int sys_execve(const char __user *ufilename,
355 char __user * __user *uenvp, unsigned long r7, 300 const char __user *const __user *uargv,
356 struct pt_regs __regs) 301 const char __user *const __user *uenvp,
302 unsigned long r7, struct pt_regs __regs)
357{ 303{
358 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 304 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
359 int error; 305 int error;
@@ -391,20 +337,3 @@ unsigned long get_wchan(struct task_struct *p)
391 337
392 return pc; 338 return pc;
393} 339}
394
395asmlinkage void break_point_trap(void)
396{
397 /* Clear tracing. */
398#if defined(CONFIG_CPU_SH4A)
399 ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
400 ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
401#else
402 ctrl_outw(0, UBC_BBRA);
403 ctrl_outw(0, UBC_BBRB);
404 ctrl_outl(0, UBC_BRCR);
405#endif
406 current->thread.ubc_pc = 0;
407 ubc_usercnt -= 1;
408
409 force_sig(SIGTRAP, current);
410}
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 24de74214940..210c1cabcb7f 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -21,6 +21,7 @@
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/reboot.h> 23#include <linux/reboot.h>
24#include <linux/slab.h>
24#include <linux/init.h> 25#include <linux/init.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/io.h> 27#include <linux/io.h>
@@ -32,30 +33,7 @@
32 33
33struct task_struct *last_task_used_math = NULL; 34struct task_struct *last_task_used_math = NULL;
34 35
35void machine_restart(char * __unused) 36void show_regs(struct pt_regs *regs)
36{
37 extern void phys_stext(void);
38
39 phys_stext();
40}
41
42void machine_halt(void)
43{
44 for (;;);
45}
46
47void machine_power_off(void)
48{
49 __asm__ __volatile__ (
50 "sleep\n\t"
51 "synci\n\t"
52 "nop;nop;nop;nop\n\t"
53 );
54
55 panic("Unexpected wakeup!\n");
56}
57
58void show_regs(struct pt_regs * regs)
59{ 37{
60 unsigned long long ah, al, bh, bl, ch, cl; 38 unsigned long long ah, al, bh, bl, ch, cl;
61 39
@@ -335,6 +313,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
335 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, 313 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
336 &regs, 0, NULL, NULL); 314 &regs, 0, NULL, NULL);
337} 315}
316EXPORT_SYMBOL(kernel_thread);
338 317
339/* 318/*
340 * Free current thread data structures etc.. 319 * Free current thread data structures etc..
@@ -367,7 +346,7 @@ void exit_thread(void)
367void flush_thread(void) 346void flush_thread(void)
368{ 347{
369 348
370 /* Called by fs/exec.c (flush_old_exec) to remove traces of a 349 /* Called by fs/exec.c (setup_new_exec) to remove traces of a
371 * previously running executable. */ 350 * previously running executable. */
372#ifdef CONFIG_SH_FPU 351#ifdef CONFIG_SH_FPU
373 if (last_task_used_math == current) { 352 if (last_task_used_math == current) {
@@ -403,13 +382,13 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
403 if (fpvalid) { 382 if (fpvalid) {
404 if (current == last_task_used_math) { 383 if (current == last_task_used_math) {
405 enable_fpu(); 384 enable_fpu();
406 save_fpu(tsk, regs); 385 save_fpu(tsk);
407 disable_fpu(); 386 disable_fpu();
408 last_task_used_math = 0; 387 last_task_used_math = 0;
409 regs->sr |= SR_FD; 388 regs->sr |= SR_FD;
410 } 389 }
411 390
412 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); 391 memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
413 } 392 }
414 393
415 return fpvalid; 394 return fpvalid;
@@ -417,6 +396,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
417 return 0; /* Task didn't use the fpu at all. */ 396 return 0; /* Task didn't use the fpu at all. */
418#endif 397#endif
419} 398}
399EXPORT_SYMBOL(dump_fpu);
420 400
421asmlinkage void ret_from_fork(void); 401asmlinkage void ret_from_fork(void);
422 402
@@ -425,12 +405,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
425 struct task_struct *p, struct pt_regs *regs) 405 struct task_struct *p, struct pt_regs *regs)
426{ 406{
427 struct pt_regs *childregs; 407 struct pt_regs *childregs;
428 unsigned long long se; /* Sign extension */
429 408
430#ifdef CONFIG_SH_FPU 409#ifdef CONFIG_SH_FPU
431 if(last_task_used_math == current) { 410 if(last_task_used_math == current) {
432 enable_fpu(); 411 enable_fpu();
433 save_fpu(current, regs); 412 save_fpu(current);
434 disable_fpu(); 413 disable_fpu();
435 last_task_used_math = NULL; 414 last_task_used_math = NULL;
436 regs->sr |= SR_FD; 415 regs->sr |= SR_FD;
@@ -441,11 +420,19 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
441 420
442 *childregs = *regs; 421 *childregs = *regs;
443 422
423 /*
424 * Sign extend the edited stack.
425 * Note that thread.pc and thread.pc will stay
426 * 32-bit wide and context switch must take care
427 * of NEFF sign extension.
428 */
444 if (user_mode(regs)) { 429 if (user_mode(regs)) {
445 childregs->regs[15] = usp; 430 childregs->regs[15] = neff_sign_extend(usp);
446 p->thread.uregs = childregs; 431 p->thread.uregs = childregs;
447 } else { 432 } else {
448 childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE; 433 childregs->regs[15] =
434 neff_sign_extend((unsigned long)task_stack_page(p) +
435 THREAD_SIZE);
449 } 436 }
450 437
451 childregs->regs[9] = 0; /* Set return value for child */ 438 childregs->regs[9] = 0; /* Set return value for child */
@@ -454,17 +441,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
454 p->thread.sp = (unsigned long) childregs; 441 p->thread.sp = (unsigned long) childregs;
455 p->thread.pc = (unsigned long) ret_from_fork; 442 p->thread.pc = (unsigned long) ret_from_fork;
456 443
457 /*
458 * Sign extend the edited stack.
459 * Note that thread.pc and thread.pc will stay
460 * 32-bit wide and context switch must take care
461 * of NEFF sign extension.
462 */
463
464 se = childregs->regs[15];
465 se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
466 childregs->regs[15] = se;
467
468 return 0; 444 return 0;
469} 445}
470 446
@@ -507,7 +483,7 @@ asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
507/* 483/*
508 * sys_execve() executes a new program. 484 * sys_execve() executes a new program.
509 */ 485 */
510asmlinkage int sys_execve(char *ufilename, char **uargv, 486asmlinkage int sys_execve(const char *ufilename, char **uargv,
511 char **uenvp, unsigned long r5, 487 char **uenvp, unsigned long r5,
512 unsigned long r6, unsigned long r7, 488 unsigned long r6, unsigned long r7,
513 struct pt_regs *pregs) 489 struct pt_regs *pregs)
@@ -521,21 +497,14 @@ asmlinkage int sys_execve(char *ufilename, char **uargv,
521 goto out; 497 goto out;
522 498
523 error = do_execve(filename, 499 error = do_execve(filename,
524 (char __user * __user *)uargv, 500 (const char __user *const __user *)uargv,
525 (char __user * __user *)uenvp, 501 (const char __user *const __user *)uenvp,
526 pregs); 502 pregs);
527 putname(filename); 503 putname(filename);
528out: 504out:
529 return error; 505 return error;
530} 506}
531 507
532/*
533 * These bracket the sleeping functions..
534 */
535extern void interruptible_sleep_on(wait_queue_head_t *q);
536
537#define mid_sched ((unsigned long) interruptible_sleep_on)
538
539#ifdef CONFIG_FRAME_POINTER 508#ifdef CONFIG_FRAME_POINTER
540static int in_sh64_switch_to(unsigned long pc) 509static int in_sh64_switch_to(unsigned long pc)
541{ 510{
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
new file mode 100644
index 000000000000..0a05983633ca
--- /dev/null
+++ b/arch/sh/kernel/ptrace.c
@@ -0,0 +1,33 @@
1#include <linux/ptrace.h>
2
3/**
4 * regs_query_register_offset() - query register offset from its name
5 * @name: the name of a register
6 *
7 * regs_query_register_offset() returns the offset of a register in struct
8 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
9 */
10int regs_query_register_offset(const char *name)
11{
12 const struct pt_regs_offset *roff;
13 for (roff = regoffset_table; roff->name != NULL; roff++)
14 if (!strcmp(roff->name, name))
15 return roff->offset;
16 return -EINVAL;
17}
18
19/**
20 * regs_query_register_name() - query register name from its offset
21 * @offset: the offset of a register in struct pt_regs.
22 *
23 * regs_query_register_name() returns the name of a register from its
24 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
25 */
26const char *regs_query_register_name(unsigned int offset)
27{
28 const struct pt_regs_offset *roff;
29 for (roff = regoffset_table; roff->name != NULL; roff++)
30 if (roff->offset == offset)
31 return roff->name;
32 return NULL;
33}
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 3392e835a374..90a15d29feeb 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -2,7 +2,7 @@
2 * SuperH process tracing 2 * SuperH process tracing
3 * 3 *
4 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka 4 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
5 * Copyright (C) 2002 - 2008 Paul Mundt 5 * Copyright (C) 2002 - 2009 Paul Mundt
6 * 6 *
7 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> 7 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
8 * 8 *
@@ -17,7 +17,6 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/ptrace.h> 18#include <linux/ptrace.h>
19#include <linux/user.h> 19#include <linux/user.h>
20#include <linux/slab.h>
21#include <linux/security.h> 20#include <linux/security.h>
22#include <linux/signal.h> 21#include <linux/signal.h>
23#include <linux/io.h> 22#include <linux/io.h>
@@ -26,6 +25,7 @@
26#include <linux/tracehook.h> 25#include <linux/tracehook.h>
27#include <linux/elf.h> 26#include <linux/elf.h>
28#include <linux/regset.h> 27#include <linux/regset.h>
28#include <linux/hw_breakpoint.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/pgtable.h> 30#include <asm/pgtable.h>
31#include <asm/system.h> 31#include <asm/system.h>
@@ -34,6 +34,9 @@
34#include <asm/syscalls.h> 34#include <asm/syscalls.h>
35#include <asm/fpu.h> 35#include <asm/fpu.h>
36 36
37#define CREATE_TRACE_POINTS
38#include <trace/events/syscalls.h>
39
37/* 40/*
38 * This routine will get a word off of the process kernel stack. 41 * This routine will get a word off of the process kernel stack.
39 */ 42 */
@@ -60,33 +63,64 @@ static inline int put_stack_long(struct task_struct *task, int offset,
60 return 0; 63 return 0;
61} 64}
62 65
63void user_enable_single_step(struct task_struct *child) 66void ptrace_triggered(struct perf_event *bp, int nmi,
67 struct perf_sample_data *data, struct pt_regs *regs)
64{ 68{
65 /* Next scheduling will set up UBC */ 69 struct perf_event_attr attr;
66 if (child->thread.ubc_pc == 0)
67 ubc_usercnt += 1;
68 70
69 child->thread.ubc_pc = get_stack_long(child, 71 /*
70 offsetof(struct pt_regs, pc)); 72 * Disable the breakpoint request here since ptrace has defined a
73 * one-shot behaviour for breakpoint exceptions.
74 */
75 attr = bp->attr;
76 attr.disabled = true;
77 modify_user_hw_breakpoint(bp, &attr);
78}
79
80static int set_single_step(struct task_struct *tsk, unsigned long addr)
81{
82 struct thread_struct *thread = &tsk->thread;
83 struct perf_event *bp;
84 struct perf_event_attr attr;
85
86 bp = thread->ptrace_bps[0];
87 if (!bp) {
88 ptrace_breakpoint_init(&attr);
89
90 attr.bp_addr = addr;
91 attr.bp_len = HW_BREAKPOINT_LEN_2;
92 attr.bp_type = HW_BREAKPOINT_R;
93
94 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
95 if (IS_ERR(bp))
96 return PTR_ERR(bp);
97
98 thread->ptrace_bps[0] = bp;
99 } else {
100 int err;
101
102 attr = bp->attr;
103 attr.bp_addr = addr;
104 err = modify_user_hw_breakpoint(bp, &attr);
105 if (unlikely(err))
106 return err;
107 }
108
109 return 0;
110}
111
112void user_enable_single_step(struct task_struct *child)
113{
114 unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
71 115
72 set_tsk_thread_flag(child, TIF_SINGLESTEP); 116 set_tsk_thread_flag(child, TIF_SINGLESTEP);
117
118 set_single_step(child, pc);
73} 119}
74 120
75void user_disable_single_step(struct task_struct *child) 121void user_disable_single_step(struct task_struct *child)
76{ 122{
77 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 123 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
78
79 /*
80 * Ensure the UBC is not programmed at the next context switch.
81 *
82 * Normally this is not needed but there are sequences such as
83 * singlestep, signal delivery, and continue that leave the
84 * ubc_pc non-zero leading to spurious SIGTRAPs.
85 */
86 if (child->thread.ubc_pc != 0) {
87 ubc_usercnt -= 1;
88 child->thread.ubc_pc = 0;
89 }
90} 124}
91 125
92/* 126/*
@@ -160,10 +194,10 @@ int fpregs_get(struct task_struct *target,
160 194
161 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 195 if ((boot_cpu_data.flags & CPU_HAS_FPU))
162 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 196 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
163 &target->thread.fpu.hard, 0, -1); 197 &target->thread.xstate->hardfpu, 0, -1);
164 198
165 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 199 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
166 &target->thread.fpu.soft, 0, -1); 200 &target->thread.xstate->softfpu, 0, -1);
167} 201}
168 202
169static int fpregs_set(struct task_struct *target, 203static int fpregs_set(struct task_struct *target,
@@ -181,10 +215,10 @@ static int fpregs_set(struct task_struct *target,
181 215
182 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 216 if ((boot_cpu_data.flags & CPU_HAS_FPU))
183 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 217 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
184 &target->thread.fpu.hard, 0, -1); 218 &target->thread.xstate->hardfpu, 0, -1);
185 219
186 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 220 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
187 &target->thread.fpu.soft, 0, -1); 221 &target->thread.xstate->softfpu, 0, -1);
188} 222}
189 223
190static int fpregs_active(struct task_struct *target, 224static int fpregs_active(struct task_struct *target,
@@ -240,6 +274,33 @@ static int dspregs_active(struct task_struct *target,
240} 274}
241#endif 275#endif
242 276
277const struct pt_regs_offset regoffset_table[] = {
278 REGS_OFFSET_NAME(0),
279 REGS_OFFSET_NAME(1),
280 REGS_OFFSET_NAME(2),
281 REGS_OFFSET_NAME(3),
282 REGS_OFFSET_NAME(4),
283 REGS_OFFSET_NAME(5),
284 REGS_OFFSET_NAME(6),
285 REGS_OFFSET_NAME(7),
286 REGS_OFFSET_NAME(8),
287 REGS_OFFSET_NAME(9),
288 REGS_OFFSET_NAME(10),
289 REGS_OFFSET_NAME(11),
290 REGS_OFFSET_NAME(12),
291 REGS_OFFSET_NAME(13),
292 REGS_OFFSET_NAME(14),
293 REGS_OFFSET_NAME(15),
294 REG_OFFSET_NAME(pc),
295 REG_OFFSET_NAME(pr),
296 REG_OFFSET_NAME(sr),
297 REG_OFFSET_NAME(gbr),
298 REG_OFFSET_NAME(mach),
299 REG_OFFSET_NAME(macl),
300 REG_OFFSET_NAME(tra),
301 REG_OFFSET_END,
302};
303
243/* 304/*
244 * These are our native regset flavours. 305 * These are our native regset flavours.
245 */ 306 */
@@ -304,9 +365,9 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
304 return &user_sh_native_view; 365 return &user_sh_native_view;
305} 366}
306 367
307long arch_ptrace(struct task_struct *child, long request, long addr, long data) 368long arch_ptrace(struct task_struct *child, long request,
369 unsigned long addr, unsigned long data)
308{ 370{
309 struct user * dummy = NULL;
310 unsigned long __user *datap = (unsigned long __user *)data; 371 unsigned long __user *datap = (unsigned long __user *)data;
311 int ret; 372 int ret;
312 373
@@ -322,17 +383,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
322 383
323 if (addr < sizeof(struct pt_regs)) 384 if (addr < sizeof(struct pt_regs))
324 tmp = get_stack_long(child, addr); 385 tmp = get_stack_long(child, addr);
325 else if (addr >= (long) &dummy->fpu && 386 else if (addr >= offsetof(struct user, fpu) &&
326 addr < (long) &dummy->u_fpvalid) { 387 addr < offsetof(struct user, u_fpvalid)) {
327 if (!tsk_used_math(child)) { 388 if (!tsk_used_math(child)) {
328 if (addr == (long)&dummy->fpu.fpscr) 389 if (addr == offsetof(struct user, fpu.fpscr))
329 tmp = FPSCR_INIT; 390 tmp = FPSCR_INIT;
330 else 391 else
331 tmp = 0; 392 tmp = 0;
332 } else 393 } else {
333 tmp = ((long *)&child->thread.fpu) 394 unsigned long index;
334 [(addr - (long)&dummy->fpu) >> 2]; 395 index = addr - offsetof(struct user, fpu);
335 } else if (addr == (long) &dummy->u_fpvalid) 396 tmp = ((unsigned long *)child->thread.xstate)
397 [index >> 2];
398 }
399 } else if (addr == offsetof(struct user, u_fpvalid))
336 tmp = !!tsk_used_math(child); 400 tmp = !!tsk_used_math(child);
337 else if (addr == PT_TEXT_ADDR) 401 else if (addr == PT_TEXT_ADDR)
338 tmp = child->mm->start_code; 402 tmp = child->mm->start_code;
@@ -356,13 +420,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
356 420
357 if (addr < sizeof(struct pt_regs)) 421 if (addr < sizeof(struct pt_regs))
358 ret = put_stack_long(child, addr, data); 422 ret = put_stack_long(child, addr, data);
359 else if (addr >= (long) &dummy->fpu && 423 else if (addr >= offsetof(struct user, fpu) &&
360 addr < (long) &dummy->u_fpvalid) { 424 addr < offsetof(struct user, u_fpvalid)) {
425 unsigned long index;
426 index = addr - offsetof(struct user, fpu);
361 set_stopped_child_used_math(child); 427 set_stopped_child_used_math(child);
362 ((long *)&child->thread.fpu) 428 ((unsigned long *)child->thread.xstate)
363 [(addr - (long)&dummy->fpu) >> 2] = data; 429 [index >> 2] = data;
364 ret = 0; 430 ret = 0;
365 } else if (addr == (long) &dummy->u_fpvalid) { 431 } else if (addr == offsetof(struct user, u_fpvalid)) {
366 conditional_stopped_child_used_math(data, child); 432 conditional_stopped_child_used_math(data, child);
367 ret = 0; 433 ret = 0;
368 } 434 }
@@ -372,58 +438,35 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
372 return copy_regset_to_user(child, &user_sh_native_view, 438 return copy_regset_to_user(child, &user_sh_native_view,
373 REGSET_GENERAL, 439 REGSET_GENERAL,
374 0, sizeof(struct pt_regs), 440 0, sizeof(struct pt_regs),
375 (void __user *)data); 441 datap);
376 case PTRACE_SETREGS: 442 case PTRACE_SETREGS:
377 return copy_regset_from_user(child, &user_sh_native_view, 443 return copy_regset_from_user(child, &user_sh_native_view,
378 REGSET_GENERAL, 444 REGSET_GENERAL,
379 0, sizeof(struct pt_regs), 445 0, sizeof(struct pt_regs),
380 (const void __user *)data); 446 datap);
381#ifdef CONFIG_SH_FPU 447#ifdef CONFIG_SH_FPU
382 case PTRACE_GETFPREGS: 448 case PTRACE_GETFPREGS:
383 return copy_regset_to_user(child, &user_sh_native_view, 449 return copy_regset_to_user(child, &user_sh_native_view,
384 REGSET_FPU, 450 REGSET_FPU,
385 0, sizeof(struct user_fpu_struct), 451 0, sizeof(struct user_fpu_struct),
386 (void __user *)data); 452 datap);
387 case PTRACE_SETFPREGS: 453 case PTRACE_SETFPREGS:
388 return copy_regset_from_user(child, &user_sh_native_view, 454 return copy_regset_from_user(child, &user_sh_native_view,
389 REGSET_FPU, 455 REGSET_FPU,
390 0, sizeof(struct user_fpu_struct), 456 0, sizeof(struct user_fpu_struct),
391 (const void __user *)data); 457 datap);
392#endif 458#endif
393#ifdef CONFIG_SH_DSP 459#ifdef CONFIG_SH_DSP
394 case PTRACE_GETDSPREGS: 460 case PTRACE_GETDSPREGS:
395 return copy_regset_to_user(child, &user_sh_native_view, 461 return copy_regset_to_user(child, &user_sh_native_view,
396 REGSET_DSP, 462 REGSET_DSP,
397 0, sizeof(struct pt_dspregs), 463 0, sizeof(struct pt_dspregs),
398 (void __user *)data); 464 datap);
399 case PTRACE_SETDSPREGS: 465 case PTRACE_SETDSPREGS:
400 return copy_regset_from_user(child, &user_sh_native_view, 466 return copy_regset_from_user(child, &user_sh_native_view,
401 REGSET_DSP, 467 REGSET_DSP,
402 0, sizeof(struct pt_dspregs), 468 0, sizeof(struct pt_dspregs),
403 (const void __user *)data); 469 datap);
404#endif
405#ifdef CONFIG_BINFMT_ELF_FDPIC
406 case PTRACE_GETFDPIC: {
407 unsigned long tmp = 0;
408
409 switch (addr) {
410 case PTRACE_GETFDPIC_EXEC:
411 tmp = child->mm->context.exec_fdpic_loadmap;
412 break;
413 case PTRACE_GETFDPIC_INTERP:
414 tmp = child->mm->context.interp_fdpic_loadmap;
415 break;
416 default:
417 break;
418 }
419
420 ret = 0;
421 if (put_user(tmp, datap)) {
422 ret = -EFAULT;
423 break;
424 }
425 break;
426 }
427#endif 470#endif
428 default: 471 default:
429 ret = ptrace_request(child, request, addr, data); 472 ret = ptrace_request(child, request, addr, data);
@@ -459,6 +502,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
459 */ 502 */
460 ret = -1L; 503 ret = -1L;
461 504
505 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
506 trace_sys_enter(regs, regs->regs[0]);
507
462 if (unlikely(current->audit_context)) 508 if (unlikely(current->audit_context))
463 audit_syscall_entry(audit_arch(), regs->regs[3], 509 audit_syscall_entry(audit_arch(), regs->regs[3],
464 regs->regs[4], regs->regs[5], 510 regs->regs[4], regs->regs[5],
@@ -475,6 +521,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
475 audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]), 521 audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
476 regs->regs[0]); 522 regs->regs[0]);
477 523
524 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
525 trace_sys_exit(regs, regs->regs[0]);
526
478 step = test_thread_flag(TIF_SINGLESTEP); 527 step = test_thread_flag(TIF_SINGLESTEP);
479 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 528 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
480 tracehook_report_syscall_exit(regs, step); 529 tracehook_report_syscall_exit(regs, step);
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 695097438f02..4436eacddb15 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -20,7 +20,7 @@
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/smp_lock.h> 23#include <linux/bitops.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/ptrace.h> 25#include <linux/ptrace.h>
26#include <linux/user.h> 26#include <linux/user.h>
@@ -40,6 +40,9 @@
40#include <asm/syscalls.h> 40#include <asm/syscalls.h>
41#include <asm/fpu.h> 41#include <asm/fpu.h>
42 42
43#define CREATE_TRACE_POINTS
44#include <trace/events/syscalls.h>
45
43/* This mask defines the bits of the SR which the user is not allowed to 46/* This mask defines the bits of the SR which the user is not allowed to
44 change, which are everything except S, Q, M, PR, SZ, FR. */ 47 change, which are everything except S, Q, M, PR, SZ, FR. */
45#define SR_MASK (0xffff8cfd) 48#define SR_MASK (0xffff8cfd)
@@ -79,13 +82,13 @@ get_fpu_long(struct task_struct *task, unsigned long addr)
79 82
80 if (last_task_used_math == task) { 83 if (last_task_used_math == task) {
81 enable_fpu(); 84 enable_fpu();
82 save_fpu(task, regs); 85 save_fpu(task);
83 disable_fpu(); 86 disable_fpu();
84 last_task_used_math = 0; 87 last_task_used_math = 0;
85 regs->sr |= SR_FD; 88 regs->sr |= SR_FD;
86 } 89 }
87 90
88 tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)]; 91 tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
89 return tmp; 92 return tmp;
90} 93}
91 94
@@ -111,17 +114,16 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
111 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1; 114 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
112 115
113 if (!tsk_used_math(task)) { 116 if (!tsk_used_math(task)) {
114 fpinit(&task->thread.fpu.hard); 117 init_fpu(task);
115 set_stopped_child_used_math(task);
116 } else if (last_task_used_math == task) { 118 } else if (last_task_used_math == task) {
117 enable_fpu(); 119 enable_fpu();
118 save_fpu(task, regs); 120 save_fpu(task);
119 disable_fpu(); 121 disable_fpu();
120 last_task_used_math = 0; 122 last_task_used_math = 0;
121 regs->sr |= SR_FD; 123 regs->sr |= SR_FD;
122 } 124 }
123 125
124 ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data; 126 ((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
125 return 0; 127 return 0;
126} 128}
127 129
@@ -130,6 +132,8 @@ void user_enable_single_step(struct task_struct *child)
130 struct pt_regs *regs = child->thread.uregs; 132 struct pt_regs *regs = child->thread.uregs;
131 133
132 regs->sr |= SR_SSTEP; /* auto-resetting upon exception */ 134 regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
135
136 set_tsk_thread_flag(child, TIF_SINGLESTEP);
133} 137}
134 138
135void user_disable_single_step(struct task_struct *child) 139void user_disable_single_step(struct task_struct *child)
@@ -137,6 +141,8 @@ void user_disable_single_step(struct task_struct *child)
137 struct pt_regs *regs = child->thread.uregs; 141 struct pt_regs *regs = child->thread.uregs;
138 142
139 regs->sr &= ~SR_SSTEP; 143 regs->sr &= ~SR_SSTEP;
144
145 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
140} 146}
141 147
142static int genregs_get(struct task_struct *target, 148static int genregs_get(struct task_struct *target,
@@ -219,7 +225,7 @@ int fpregs_get(struct task_struct *target,
219 return ret; 225 return ret;
220 226
221 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 227 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
222 &target->thread.fpu.hard, 0, -1); 228 &target->thread.xstate->hardfpu, 0, -1);
223} 229}
224 230
225static int fpregs_set(struct task_struct *target, 231static int fpregs_set(struct task_struct *target,
@@ -236,7 +242,7 @@ static int fpregs_set(struct task_struct *target,
236 set_stopped_child_used_math(target); 242 set_stopped_child_used_math(target);
237 243
238 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 244 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
239 &target->thread.fpu.hard, 0, -1); 245 &target->thread.xstate->hardfpu, 0, -1);
240} 246}
241 247
242static int fpregs_active(struct task_struct *target, 248static int fpregs_active(struct task_struct *target,
@@ -246,6 +252,85 @@ static int fpregs_active(struct task_struct *target,
246} 252}
247#endif 253#endif
248 254
255const struct pt_regs_offset regoffset_table[] = {
256 REG_OFFSET_NAME(pc),
257 REG_OFFSET_NAME(sr),
258 REG_OFFSET_NAME(syscall_nr),
259 REGS_OFFSET_NAME(0),
260 REGS_OFFSET_NAME(1),
261 REGS_OFFSET_NAME(2),
262 REGS_OFFSET_NAME(3),
263 REGS_OFFSET_NAME(4),
264 REGS_OFFSET_NAME(5),
265 REGS_OFFSET_NAME(6),
266 REGS_OFFSET_NAME(7),
267 REGS_OFFSET_NAME(8),
268 REGS_OFFSET_NAME(9),
269 REGS_OFFSET_NAME(10),
270 REGS_OFFSET_NAME(11),
271 REGS_OFFSET_NAME(12),
272 REGS_OFFSET_NAME(13),
273 REGS_OFFSET_NAME(14),
274 REGS_OFFSET_NAME(15),
275 REGS_OFFSET_NAME(16),
276 REGS_OFFSET_NAME(17),
277 REGS_OFFSET_NAME(18),
278 REGS_OFFSET_NAME(19),
279 REGS_OFFSET_NAME(20),
280 REGS_OFFSET_NAME(21),
281 REGS_OFFSET_NAME(22),
282 REGS_OFFSET_NAME(23),
283 REGS_OFFSET_NAME(24),
284 REGS_OFFSET_NAME(25),
285 REGS_OFFSET_NAME(26),
286 REGS_OFFSET_NAME(27),
287 REGS_OFFSET_NAME(28),
288 REGS_OFFSET_NAME(29),
289 REGS_OFFSET_NAME(30),
290 REGS_OFFSET_NAME(31),
291 REGS_OFFSET_NAME(32),
292 REGS_OFFSET_NAME(33),
293 REGS_OFFSET_NAME(34),
294 REGS_OFFSET_NAME(35),
295 REGS_OFFSET_NAME(36),
296 REGS_OFFSET_NAME(37),
297 REGS_OFFSET_NAME(38),
298 REGS_OFFSET_NAME(39),
299 REGS_OFFSET_NAME(40),
300 REGS_OFFSET_NAME(41),
301 REGS_OFFSET_NAME(42),
302 REGS_OFFSET_NAME(43),
303 REGS_OFFSET_NAME(44),
304 REGS_OFFSET_NAME(45),
305 REGS_OFFSET_NAME(46),
306 REGS_OFFSET_NAME(47),
307 REGS_OFFSET_NAME(48),
308 REGS_OFFSET_NAME(49),
309 REGS_OFFSET_NAME(50),
310 REGS_OFFSET_NAME(51),
311 REGS_OFFSET_NAME(52),
312 REGS_OFFSET_NAME(53),
313 REGS_OFFSET_NAME(54),
314 REGS_OFFSET_NAME(55),
315 REGS_OFFSET_NAME(56),
316 REGS_OFFSET_NAME(57),
317 REGS_OFFSET_NAME(58),
318 REGS_OFFSET_NAME(59),
319 REGS_OFFSET_NAME(60),
320 REGS_OFFSET_NAME(61),
321 REGS_OFFSET_NAME(62),
322 REGS_OFFSET_NAME(63),
323 TREGS_OFFSET_NAME(0),
324 TREGS_OFFSET_NAME(1),
325 TREGS_OFFSET_NAME(2),
326 TREGS_OFFSET_NAME(3),
327 TREGS_OFFSET_NAME(4),
328 TREGS_OFFSET_NAME(5),
329 TREGS_OFFSET_NAME(6),
330 TREGS_OFFSET_NAME(7),
331 REG_OFFSET_END,
332};
333
249/* 334/*
250 * These are our native regset flavours. 335 * These are our native regset flavours.
251 */ 336 */
@@ -298,9 +383,11 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
298 return &user_sh64_native_view; 383 return &user_sh64_native_view;
299} 384}
300 385
301long arch_ptrace(struct task_struct *child, long request, long addr, long data) 386long arch_ptrace(struct task_struct *child, long request,
387 unsigned long addr, unsigned long data)
302{ 388{
303 int ret; 389 int ret;
390 unsigned long __user *datap = (unsigned long __user *) data;
304 391
305 switch (request) { 392 switch (request) {
306 /* read the word at location addr in the USER area. */ 393 /* read the word at location addr in the USER area. */
@@ -315,13 +402,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
315 tmp = get_stack_long(child, addr); 402 tmp = get_stack_long(child, addr);
316 else if ((addr >= offsetof(struct user, fpu)) && 403 else if ((addr >= offsetof(struct user, fpu)) &&
317 (addr < offsetof(struct user, u_fpvalid))) { 404 (addr < offsetof(struct user, u_fpvalid))) {
318 tmp = get_fpu_long(child, addr - offsetof(struct user, fpu)); 405 unsigned long index;
406 index = addr - offsetof(struct user, fpu);
407 tmp = get_fpu_long(child, index);
319 } else if (addr == offsetof(struct user, u_fpvalid)) { 408 } else if (addr == offsetof(struct user, u_fpvalid)) {
320 tmp = !!tsk_used_math(child); 409 tmp = !!tsk_used_math(child);
321 } else { 410 } else {
322 break; 411 break;
323 } 412 }
324 ret = put_user(tmp, (unsigned long *)data); 413 ret = put_user(tmp, datap);
325 break; 414 break;
326 } 415 }
327 416
@@ -352,7 +441,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
352 } 441 }
353 else if ((addr >= offsetof(struct user, fpu)) && 442 else if ((addr >= offsetof(struct user, fpu)) &&
354 (addr < offsetof(struct user, u_fpvalid))) { 443 (addr < offsetof(struct user, u_fpvalid))) {
355 ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data); 444 unsigned long index;
445 index = addr - offsetof(struct user, fpu);
446 ret = put_fpu_long(child, index, data);
356 } 447 }
357 break; 448 break;
358 449
@@ -360,23 +451,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
360 return copy_regset_to_user(child, &user_sh64_native_view, 451 return copy_regset_to_user(child, &user_sh64_native_view,
361 REGSET_GENERAL, 452 REGSET_GENERAL,
362 0, sizeof(struct pt_regs), 453 0, sizeof(struct pt_regs),
363 (void __user *)data); 454 datap);
364 case PTRACE_SETREGS: 455 case PTRACE_SETREGS:
365 return copy_regset_from_user(child, &user_sh64_native_view, 456 return copy_regset_from_user(child, &user_sh64_native_view,
366 REGSET_GENERAL, 457 REGSET_GENERAL,
367 0, sizeof(struct pt_regs), 458 0, sizeof(struct pt_regs),
368 (const void __user *)data); 459 datap);
369#ifdef CONFIG_SH_FPU 460#ifdef CONFIG_SH_FPU
370 case PTRACE_GETFPREGS: 461 case PTRACE_GETFPREGS:
371 return copy_regset_to_user(child, &user_sh64_native_view, 462 return copy_regset_to_user(child, &user_sh64_native_view,
372 REGSET_FPU, 463 REGSET_FPU,
373 0, sizeof(struct user_fpu_struct), 464 0, sizeof(struct user_fpu_struct),
374 (void __user *)data); 465 datap);
375 case PTRACE_SETFPREGS: 466 case PTRACE_SETFPREGS:
376 return copy_regset_from_user(child, &user_sh64_native_view, 467 return copy_regset_from_user(child, &user_sh64_native_view,
377 REGSET_FPU, 468 REGSET_FPU,
378 0, sizeof(struct user_fpu_struct), 469 0, sizeof(struct user_fpu_struct),
379 (const void __user *)data); 470 datap);
380#endif 471#endif
381 default: 472 default:
382 ret = ptrace_request(child, request, addr, data); 473 ret = ptrace_request(child, request, addr, data);
@@ -386,13 +477,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
386 return ret; 477 return ret;
387} 478}
388 479
389asmlinkage int sh64_ptrace(long request, long pid, long addr, long data) 480asmlinkage int sh64_ptrace(long request, long pid,
481 unsigned long addr, unsigned long data)
390{ 482{
391#define WPC_DBRMODE 0x0d104008 483#define WPC_DBRMODE 0x0d104008
392 static int first_call = 1; 484 static unsigned long first_call;
393 485
394 lock_kernel(); 486 if (!test_and_set_bit(0, &first_call)) {
395 if (first_call) {
396 /* Set WPC.DBRMODE to 0. This makes all debug events get 487 /* Set WPC.DBRMODE to 0. This makes all debug events get
397 * delivered through RESVEC, i.e. into the handlers in entry.S. 488 * delivered through RESVEC, i.e. into the handlers in entry.S.
398 * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE 489 * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
@@ -402,9 +493,7 @@ asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
402 * the remote gdb.) */ 493 * the remote gdb.) */
403 printk("DBRMODE set to 0 to permit native debugging\n"); 494 printk("DBRMODE set to 0 to permit native debugging\n");
404 poke_real_address_q(WPC_DBRMODE, 0); 495 poke_real_address_q(WPC_DBRMODE, 0);
405 first_call = 0;
406 } 496 }
407 unlock_kernel();
408 497
409 return sys_ptrace(request, pid, addr, data); 498 return sys_ptrace(request, pid, addr, data);
410} 499}
@@ -438,6 +527,9 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
438 */ 527 */
439 ret = -1LL; 528 ret = -1LL;
440 529
530 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
531 trace_sys_enter(regs, regs->regs[9]);
532
441 if (unlikely(current->audit_context)) 533 if (unlikely(current->audit_context))
442 audit_syscall_entry(audit_arch(), regs->regs[1], 534 audit_syscall_entry(audit_arch(), regs->regs[1],
443 regs->regs[2], regs->regs[3], 535 regs->regs[2], regs->regs[3],
@@ -448,12 +540,18 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
448 540
449asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) 541asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
450{ 542{
543 int step;
544
451 if (unlikely(current->audit_context)) 545 if (unlikely(current->audit_context))
452 audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]), 546 audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
453 regs->regs[9]); 547 regs->regs[9]);
454 548
455 if (test_thread_flag(TIF_SYSCALL_TRACE)) 549 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
456 tracehook_report_syscall_exit(regs, 0); 550 trace_sys_exit(regs, regs->regs[9]);
551
552 step = test_thread_flag(TIF_SINGLESTEP);
553 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
554 tracehook_report_syscall_exit(regs, step);
457} 555}
458 556
459/* Called with interrupts disabled */ 557/* Called with interrupts disabled */
@@ -470,9 +568,10 @@ asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
470} 568}
471 569
472/* Called with interrupts disabled */ 570/* Called with interrupts disabled */
473asmlinkage void do_software_break_point(unsigned long long vec, 571BUILD_TRAP_HANDLER(breakpoint)
474 struct pt_regs *regs)
475{ 572{
573 TRAP_HANDLER_DECL;
574
476 /* We need to forward step the PC, to counteract the backstep done 575 /* We need to forward step the PC, to counteract the backstep done
477 in signal.c. */ 576 in signal.c. */
478 local_irq_enable(); 577 local_irq_enable();
diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c
new file mode 100644
index 000000000000..ca6a5ca64015
--- /dev/null
+++ b/arch/sh/kernel/reboot.c
@@ -0,0 +1,102 @@
1#include <linux/pm.h>
2#include <linux/kexec.h>
3#include <linux/kernel.h>
4#include <linux/reboot.h>
5#include <linux/module.h>
6#ifdef CONFIG_SUPERH32
7#include <asm/watchdog.h>
8#endif
9#include <asm/addrspace.h>
10#include <asm/reboot.h>
11#include <asm/system.h>
12#include <asm/tlbflush.h>
13
14void (*pm_power_off)(void);
15EXPORT_SYMBOL(pm_power_off);
16
17#ifdef CONFIG_SUPERH32
18static void watchdog_trigger_immediate(void)
19{
20 sh_wdt_write_cnt(0xFF);
21 sh_wdt_write_csr(0xC2);
22}
23#endif
24
25static void native_machine_restart(char * __unused)
26{
27 local_irq_disable();
28
29 /* Destroy all of the TLBs in preparation for reset by MMU */
30 __flush_tlb_global();
31
32 /* Address error with SR.BL=1 first. */
33 trigger_address_error();
34
35#ifdef CONFIG_SUPERH32
36 /* If that fails or is unsupported, go for the watchdog next. */
37 watchdog_trigger_immediate();
38#endif
39
40 /*
41 * Give up and sleep.
42 */
43 while (1)
44 cpu_sleep();
45}
46
47static void native_machine_shutdown(void)
48{
49 smp_send_stop();
50}
51
52static void native_machine_power_off(void)
53{
54 if (pm_power_off)
55 pm_power_off();
56}
57
58static void native_machine_halt(void)
59{
60 /* stop other cpus */
61 machine_shutdown();
62
63 /* stop this cpu */
64 stop_this_cpu(NULL);
65}
66
67struct machine_ops machine_ops = {
68 .power_off = native_machine_power_off,
69 .shutdown = native_machine_shutdown,
70 .restart = native_machine_restart,
71 .halt = native_machine_halt,
72#ifdef CONFIG_KEXEC
73 .crash_shutdown = native_machine_crash_shutdown,
74#endif
75};
76
77void machine_power_off(void)
78{
79 machine_ops.power_off();
80}
81
82void machine_shutdown(void)
83{
84 machine_ops.shutdown();
85}
86
87void machine_restart(char *cmd)
88{
89 machine_ops.restart(cmd);
90}
91
92void machine_halt(void)
93{
94 machine_ops.halt();
95}
96
97#ifdef CONFIG_KEXEC
98void machine_crash_shutdown(struct pt_regs *regs)
99{
100 machine_ops.crash_shutdown(regs);
101}
102#endif
diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c
new file mode 100644
index 000000000000..5124aeb28c3f
--- /dev/null
+++ b/arch/sh/kernel/return_address.c
@@ -0,0 +1,59 @@
1/*
2 * arch/sh/kernel/return_address.c
3 *
4 * Copyright (C) 2009 Matt Fleming
5 * Copyright (C) 2009 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <asm/dwarf.h>
14
15#ifdef CONFIG_DWARF_UNWINDER
16
17void *return_address(unsigned int depth)
18{
19 struct dwarf_frame *frame;
20 unsigned long ra;
21 int i;
22
23 for (i = 0, frame = NULL, ra = 0; i <= depth; i++) {
24 struct dwarf_frame *tmp;
25
26 tmp = dwarf_unwind_stack(ra, frame);
27 if (!tmp)
28 return NULL;
29
30 if (frame)
31 dwarf_free_frame(frame);
32
33 frame = tmp;
34
35 if (!frame || !frame->return_addr)
36 break;
37
38 ra = frame->return_addr;
39 }
40
41 /* Failed to unwind the stack to the specified depth. */
42 WARN_ON(i != depth + 1);
43
44 if (frame)
45 dwarf_free_frame(frame);
46
47 return (void *)ra;
48}
49
50#else
51
52void *return_address(unsigned int depth)
53{
54 return NULL;
55}
56
57#endif
58
59EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index dd38338553ef..4f267160c515 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -4,7 +4,7 @@
4 * This file handles the architecture-dependent parts of initialization 4 * This file handles the architecture-dependent parts of initialization
5 * 5 *
6 * Copyright (C) 1999 Niibe Yutaka 6 * Copyright (C) 1999 Niibe Yutaka
7 * Copyright (C) 2002 - 2007 Paul Mundt 7 * Copyright (C) 2002 - 2010 Paul Mundt
8 */ 8 */
9#include <linux/screen_info.h> 9#include <linux/screen_info.h>
10#include <linux/ioport.h> 10#include <linux/ioport.h>
@@ -12,7 +12,6 @@
12#include <linux/initrd.h> 12#include <linux/initrd.h>
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <linux/console.h> 14#include <linux/console.h>
15#include <linux/seq_file.h>
16#include <linux/root_dev.h> 15#include <linux/root_dev.h>
17#include <linux/utsname.h> 16#include <linux/utsname.h>
18#include <linux/nodemask.h> 17#include <linux/nodemask.h>
@@ -24,12 +23,12 @@
24#include <linux/module.h> 23#include <linux/module.h>
25#include <linux/smp.h> 24#include <linux/smp.h>
26#include <linux/err.h> 25#include <linux/err.h>
27#include <linux/debugfs.h>
28#include <linux/crash_dump.h> 26#include <linux/crash_dump.h>
29#include <linux/mmzone.h> 27#include <linux/mmzone.h>
30#include <linux/clk.h> 28#include <linux/clk.h>
31#include <linux/delay.h> 29#include <linux/delay.h>
32#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/memblock.h>
33#include <asm/uaccess.h> 32#include <asm/uaccess.h>
34#include <asm/io.h> 33#include <asm/io.h>
35#include <asm/page.h> 34#include <asm/page.h>
@@ -38,7 +37,10 @@
38#include <asm/irq.h> 37#include <asm/irq.h>
39#include <asm/setup.h> 38#include <asm/setup.h>
40#include <asm/clock.h> 39#include <asm/clock.h>
40#include <asm/smp.h>
41#include <asm/mmu_context.h> 41#include <asm/mmu_context.h>
42#include <asm/mmzone.h>
43#include <asm/sparsemem.h>
42 44
43/* 45/*
44 * Initialize loops_per_jiffy as 10000000 (1000MIPS). 46 * Initialize loops_per_jiffy as 10000000 (1000MIPS).
@@ -48,7 +50,9 @@
48struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = { 50struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
49 [0] = { 51 [0] = {
50 .type = CPU_SH_NONE, 52 .type = CPU_SH_NONE,
53 .family = CPU_FAMILY_UNKNOWN,
51 .loops_per_jiffy = 10000000, 54 .loops_per_jiffy = 10000000,
55 .phys_bits = MAX_PHYSMEM_BITS,
52 }, 56 },
53}; 57};
54EXPORT_SYMBOL(cpu_data); 58EXPORT_SYMBOL(cpu_data);
@@ -91,6 +95,7 @@ unsigned long memory_start;
91EXPORT_SYMBOL(memory_start); 95EXPORT_SYMBOL(memory_start);
92unsigned long memory_end = 0; 96unsigned long memory_end = 0;
93EXPORT_SYMBOL(memory_end); 97EXPORT_SYMBOL(memory_end);
98unsigned long memory_limit = 0;
94 99
95static struct resource mem_resources[MAX_NUMNODES]; 100static struct resource mem_resources[MAX_NUMNODES];
96 101
@@ -98,92 +103,74 @@ int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
98 103
99static int __init early_parse_mem(char *p) 104static int __init early_parse_mem(char *p)
100{ 105{
101 unsigned long size; 106 if (!p)
102 107 return 1;
103 memory_start = (unsigned long)__va(__MEMORY_START); 108
104 size = memparse(p, &p); 109 memory_limit = PAGE_ALIGN(memparse(p, &p));
105
106 if (size > __MEMORY_SIZE) {
107 printk(KERN_ERR
108 "Using mem= to increase the size of kernel memory "
109 "is not allowed.\n"
110 " Recompile the kernel with the correct value for "
111 "CONFIG_MEMORY_SIZE.\n");
112 return 0;
113 }
114 110
115 memory_end = memory_start + size; 111 pr_notice("Memory limited to %ldMB\n", memory_limit >> 20);
116 112
117 return 0; 113 return 0;
118} 114}
119early_param("mem", early_parse_mem); 115early_param("mem", early_parse_mem);
120 116
121/* 117void __init check_for_initrd(void)
122 * Register fully available low RAM pages with the bootmem allocator.
123 */
124static void __init register_bootmem_low_pages(void)
125{ 118{
126 unsigned long curr_pfn, last_pfn, pages; 119#ifdef CONFIG_BLK_DEV_INITRD
120 unsigned long start, end;
127 121
128 /* 122 /*
129 * We are rounding up the start address of usable memory: 123 * Check for the rare cases where boot loaders adhere to the boot
124 * ABI.
130 */ 125 */
131 curr_pfn = PFN_UP(__MEMORY_START); 126 if (!LOADER_TYPE || !INITRD_START || !INITRD_SIZE)
127 goto disable;
128
129 start = INITRD_START + __MEMORY_START;
130 end = start + INITRD_SIZE;
131
132 if (unlikely(end <= start))
133 goto disable;
134 if (unlikely(start & ~PAGE_MASK)) {
135 pr_err("initrd must be page aligned\n");
136 goto disable;
137 }
138
139 if (unlikely(start < __MEMORY_START)) {
140 pr_err("initrd start (%08lx) < __MEMORY_START(%x)\n",
141 start, __MEMORY_START);
142 goto disable;
143 }
144
145 if (unlikely(end > memblock_end_of_DRAM())) {
146 pr_err("initrd extends beyond end of memory "
147 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
148 end, (unsigned long)memblock_end_of_DRAM());
149 goto disable;
150 }
132 151
133 /* 152 /*
134 * ... and at the end of the usable range downwards: 153 * If we got this far inspite of the boot loader's best efforts
154 * to the contrary, assume we actually have a valid initrd and
155 * fix up the root dev.
135 */ 156 */
136 last_pfn = PFN_DOWN(__pa(memory_end)); 157 ROOT_DEV = Root_RAM0;
137 158
138 if (last_pfn > max_low_pfn) 159 /*
139 last_pfn = max_low_pfn; 160 * Address sanitization
161 */
162 initrd_start = (unsigned long)__va(start);
163 initrd_end = initrd_start + INITRD_SIZE;
140 164
141 pages = last_pfn - curr_pfn; 165 memblock_reserve(__pa(initrd_start), INITRD_SIZE);
142 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
143}
144 166
145#ifdef CONFIG_KEXEC 167 return;
146static void __init reserve_crashkernel(void) 168
147{ 169disable:
148 unsigned long long free_mem; 170 pr_info("initrd disabled\n");
149 unsigned long long crash_size, crash_base; 171 initrd_start = initrd_end = 0;
150 void *vp;
151 int ret;
152
153 free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
154
155 ret = parse_crashkernel(boot_command_line, free_mem,
156 &crash_size, &crash_base);
157 if (ret == 0 && crash_size) {
158 if (crash_base <= 0) {
159 vp = alloc_bootmem_nopanic(crash_size);
160 if (!vp) {
161 printk(KERN_INFO "crashkernel allocation "
162 "failed\n");
163 return;
164 }
165 crash_base = __pa(vp);
166 } else if (reserve_bootmem(crash_base, crash_size,
167 BOOTMEM_EXCLUSIVE) < 0) {
168 printk(KERN_INFO "crashkernel reservation failed - "
169 "memory is in use\n");
170 return;
171 }
172
173 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
174 "for crashkernel (System RAM: %ldMB)\n",
175 (unsigned long)(crash_size >> 20),
176 (unsigned long)(crash_base >> 20),
177 (unsigned long)(free_mem >> 20));
178 crashk_res.start = crash_base;
179 crashk_res.end = crash_base + crash_size - 1;
180 insert_resource(&iomem_resource, &crashk_res);
181 }
182}
183#else
184static inline void __init reserve_crashkernel(void)
185{}
186#endif 172#endif
173}
187 174
188void __cpuinit calibrate_delay(void) 175void __cpuinit calibrate_delay(void)
189{ 176{
@@ -205,13 +192,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
205 unsigned long end_pfn) 192 unsigned long end_pfn)
206{ 193{
207 struct resource *res = &mem_resources[nid]; 194 struct resource *res = &mem_resources[nid];
195 unsigned long start, end;
208 196
209 WARN_ON(res->name); /* max one active range per node for now */ 197 WARN_ON(res->name); /* max one active range per node for now */
210 198
199 start = start_pfn << PAGE_SHIFT;
200 end = end_pfn << PAGE_SHIFT;
201
211 res->name = "System RAM"; 202 res->name = "System RAM";
212 res->start = start_pfn << PAGE_SHIFT; 203 res->start = start;
213 res->end = (end_pfn << PAGE_SHIFT) - 1; 204 res->end = end - 1;
214 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 205 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
206
215 if (request_resource(&iomem_resource, res)) { 207 if (request_resource(&iomem_resource, res)) {
216 pr_err("unable to request memory_resource 0x%lx 0x%lx\n", 208 pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
217 start_pfn, end_pfn); 209 start_pfn, end_pfn);
@@ -227,107 +219,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
227 request_resource(res, &data_resource); 219 request_resource(res, &data_resource);
228 request_resource(res, &bss_resource); 220 request_resource(res, &bss_resource);
229 221
230 add_active_range(nid, start_pfn, end_pfn);
231}
232
233void __init setup_bootmem_allocator(unsigned long free_pfn)
234{
235 unsigned long bootmap_size;
236
237 /* 222 /*
238 * Find a proper area for the bootmem bitmap. After this 223 * Also make sure that there is a PMB mapping that covers this
239 * bootstrap step all allocations (until the page allocator 224 * range before we attempt to activate it, to avoid reset by MMU.
240 * is intact) must be done via bootmem_alloc(). 225 * We can hit this path with NUMA or memory hot-add.
241 */ 226 */
242 bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn, 227 pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
243 min_low_pfn, max_low_pfn); 228 PAGE_KERNEL);
244
245 __add_active_range(0, min_low_pfn, max_low_pfn);
246 register_bootmem_low_pages();
247 229
248 node_set_online(0); 230 add_active_range(nid, start_pfn, end_pfn);
249
250 /*
251 * Reserve the kernel text and
252 * Reserve the bootmem bitmap. We do this in two steps (first step
253 * was init_bootmem()), because this catches the (definitely buggy)
254 * case of us accidentally initializing the bootmem allocator with
255 * an invalid RAM area.
256 */
257 reserve_bootmem(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
258 (PFN_PHYS(free_pfn) + bootmap_size + PAGE_SIZE - 1) -
259 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET),
260 BOOTMEM_DEFAULT);
261
262 /*
263 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
264 */
265 if (CONFIG_ZERO_PAGE_OFFSET != 0)
266 reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET,
267 BOOTMEM_DEFAULT);
268
269 sparse_memory_present_with_active_regions(0);
270
271#ifdef CONFIG_BLK_DEV_INITRD
272 ROOT_DEV = Root_RAM0;
273
274 if (LOADER_TYPE && INITRD_START) {
275 unsigned long initrd_start_phys = INITRD_START + __MEMORY_START;
276
277 if (initrd_start_phys + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
278 reserve_bootmem(initrd_start_phys, INITRD_SIZE,
279 BOOTMEM_DEFAULT);
280 initrd_start = (unsigned long)__va(initrd_start_phys);
281 initrd_end = initrd_start + INITRD_SIZE;
282 } else {
283 printk("initrd extends beyond end of memory "
284 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
285 initrd_start_phys + INITRD_SIZE,
286 (unsigned long)PFN_PHYS(max_low_pfn));
287 initrd_start = 0;
288 }
289 }
290#endif
291
292 reserve_crashkernel();
293}
294
295#ifndef CONFIG_NEED_MULTIPLE_NODES
296static void __init setup_memory(void)
297{
298 unsigned long start_pfn;
299
300 /*
301 * Partially used pages are not usable - thus
302 * we are rounding upwards:
303 */
304 start_pfn = PFN_UP(__pa(_end));
305 setup_bootmem_allocator(start_pfn);
306}
307#else
308extern void __init setup_memory(void);
309#endif
310
311/*
312 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
313 * is_kdump_kernel() to determine if we are booting after a panic. Hence
314 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
315 */
316#ifdef CONFIG_CRASH_DUMP
317/* elfcorehdr= specifies the location of elf core header
318 * stored by the crashed kernel.
319 */
320static int __init parse_elfcorehdr(char *arg)
321{
322 if (!arg)
323 return -EINVAL;
324 elfcorehdr_addr = memparse(arg, &arg);
325 return 0;
326} 231}
327early_param("elfcorehdr", parse_elfcorehdr);
328#endif
329 232
330void __init __attribute__ ((weak)) plat_early_device_setup(void) 233void __init __weak plat_early_device_setup(void)
331{ 234{
332} 235}
333 236
@@ -368,14 +271,14 @@ void __init setup_arch(char **cmdline_p)
368 bss_resource.start = virt_to_phys(__bss_start); 271 bss_resource.start = virt_to_phys(__bss_start);
369 bss_resource.end = virt_to_phys(_ebss)-1; 272 bss_resource.end = virt_to_phys(_ebss)-1;
370 273
371 memory_start = (unsigned long)__va(__MEMORY_START); 274#ifdef CONFIG_CMDLINE_OVERWRITE
372 if (!memory_end)
373 memory_end = memory_start + __MEMORY_SIZE;
374
375#ifdef CONFIG_CMDLINE_BOOL
376 strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); 275 strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
377#else 276#else
378 strlcpy(command_line, COMMAND_LINE, sizeof(command_line)); 277 strlcpy(command_line, COMMAND_LINE, sizeof(command_line));
278#ifdef CONFIG_CMDLINE_EXTEND
279 strlcat(command_line, " ", sizeof(command_line));
280 strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
281#endif
379#endif 282#endif
380 283
381 /* Save unparsed command line copy for /proc/cmdline */ 284 /* Save unparsed command line copy for /proc/cmdline */
@@ -388,22 +291,10 @@ void __init setup_arch(char **cmdline_p)
388 291
389 sh_mv_setup(); 292 sh_mv_setup();
390 293
391 /* 294 /* Let earlyprintk output early console messages */
392 * Find the highest page frame number we have available 295 early_platform_driver_probe("earlyprintk", 1, 1);
393 */
394 max_pfn = PFN_DOWN(__pa(memory_end));
395
396 /*
397 * Determine low and high memory ranges:
398 */
399 max_low_pfn = max_pfn;
400 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
401
402 nodes_clear(node_online_map);
403 296
404 /* Setup bootmem with available RAM */ 297 paging_init();
405 setup_memory();
406 sparse_init();
407 298
408#ifdef CONFIG_DUMMY_CONSOLE 299#ifdef CONFIG_DUMMY_CONSOLE
409 conswitchp = &dummy_con; 300 conswitchp = &dummy_con;
@@ -413,11 +304,7 @@ void __init setup_arch(char **cmdline_p)
413 if (likely(sh_mv.mv_setup)) 304 if (likely(sh_mv.mv_setup))
414 sh_mv.mv_setup(cmdline_p); 305 sh_mv.mv_setup(cmdline_p);
415 306
416 paging_init();
417
418#ifdef CONFIG_SMP
419 plat_smp_setup(); 307 plat_smp_setup();
420#endif
421} 308}
422 309
423/* processor boot mode configuration */ 310/* processor boot mode configuration */
@@ -431,156 +318,3 @@ int test_mode_pin(int pin)
431{ 318{
432 return sh_mv.mv_mode_pins() & pin; 319 return sh_mv.mv_mode_pins() & pin;
433} 320}
434
435static const char *cpu_name[] = {
436 [CPU_SH7201] = "SH7201",
437 [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263",
438 [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
439 [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
440 [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
441 [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
442 [CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720",
443 [CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729",
444 [CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S",
445 [CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751",
446 [CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760",
447 [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
448 [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
449 [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
450 [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
451 [CPU_SH7786] = "SH7786",
452 [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
453 [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
454 [CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
455 [CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
456 [CPU_SH_NONE] = "Unknown"
457};
458
459const char *get_cpu_subtype(struct sh_cpuinfo *c)
460{
461 return cpu_name[c->type];
462}
463EXPORT_SYMBOL(get_cpu_subtype);
464
465#ifdef CONFIG_PROC_FS
466/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
467static const char *cpu_flags[] = {
468 "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
469 "ptea", "llsc", "l2", "op32", "pteaex", NULL
470};
471
472static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
473{
474 unsigned long i;
475
476 seq_printf(m, "cpu flags\t:");
477
478 if (!c->flags) {
479 seq_printf(m, " %s\n", cpu_flags[0]);
480 return;
481 }
482
483 for (i = 0; cpu_flags[i]; i++)
484 if ((c->flags & (1 << i)))
485 seq_printf(m, " %s", cpu_flags[i+1]);
486
487 seq_printf(m, "\n");
488}
489
490static void show_cacheinfo(struct seq_file *m, const char *type,
491 struct cache_info info)
492{
493 unsigned int cache_size;
494
495 cache_size = info.ways * info.sets * info.linesz;
496
497 seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
498 type, cache_size >> 10, info.ways);
499}
500
501/*
502 * Get CPU information for use by the procfs.
503 */
504static int show_cpuinfo(struct seq_file *m, void *v)
505{
506 struct sh_cpuinfo *c = v;
507 unsigned int cpu = c - cpu_data;
508
509 if (!cpu_online(cpu))
510 return 0;
511
512 if (cpu == 0)
513 seq_printf(m, "machine\t\t: %s\n", get_system_type());
514
515 seq_printf(m, "processor\t: %d\n", cpu);
516 seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
517 seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
518 if (c->cut_major == -1)
519 seq_printf(m, "cut\t\t: unknown\n");
520 else if (c->cut_minor == -1)
521 seq_printf(m, "cut\t\t: %d.x\n", c->cut_major);
522 else
523 seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor);
524
525 show_cpuflags(m, c);
526
527 seq_printf(m, "cache type\t: ");
528
529 /*
530 * Check for what type of cache we have, we support both the
531 * unified cache on the SH-2 and SH-3, as well as the harvard
532 * style cache on the SH-4.
533 */
534 if (c->icache.flags & SH_CACHE_COMBINED) {
535 seq_printf(m, "unified\n");
536 show_cacheinfo(m, "cache", c->icache);
537 } else {
538 seq_printf(m, "split (harvard)\n");
539 show_cacheinfo(m, "icache", c->icache);
540 show_cacheinfo(m, "dcache", c->dcache);
541 }
542
543 /* Optional secondary cache */
544 if (c->flags & CPU_HAS_L2_CACHE)
545 show_cacheinfo(m, "scache", c->scache);
546
547 seq_printf(m, "bogomips\t: %lu.%02lu\n",
548 c->loops_per_jiffy/(500000/HZ),
549 (c->loops_per_jiffy/(5000/HZ)) % 100);
550
551 return 0;
552}
553
554static void *c_start(struct seq_file *m, loff_t *pos)
555{
556 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
557}
558static void *c_next(struct seq_file *m, void *v, loff_t *pos)
559{
560 ++*pos;
561 return c_start(m, pos);
562}
563static void c_stop(struct seq_file *m, void *v)
564{
565}
566const struct seq_operations cpuinfo_op = {
567 .start = c_start,
568 .next = c_next,
569 .stop = c_stop,
570 .show = show_cpuinfo,
571};
572#endif /* CONFIG_PROC_FS */
573
574struct dentry *sh_debugfs_root;
575
576static int __init sh_debugfs_init(void)
577{
578 sh_debugfs_root = debugfs_create_dir("sh", NULL);
579 if (!sh_debugfs_root)
580 return -ENOMEM;
581 if (IS_ERR(sh_debugfs_root))
582 return PTR_ERR(sh_debugfs_root);
583
584 return 0;
585}
586arch_initcall(sh_debugfs_init);
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index c852f7805728..47475cca068a 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
@@ -1,19 +1,30 @@
1/* 1/*
2 * linux/arch/sh/kernel/sh_bios.c
3 * C interface for trapping into the standard LinuxSH BIOS. 2 * C interface for trapping into the standard LinuxSH BIOS.
4 * 3 *
5 * Copyright (C) 2000 Greg Banks, Mitch Davis 4 * Copyright (C) 2000 Greg Banks, Mitch Davis
5 * Copyright (C) 1999, 2000 Niibe Yutaka
6 * Copyright (C) 2002 M. R. Brown
7 * Copyright (C) 2004 - 2010 Paul Mundt
6 * 8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
7 */ 12 */
8#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/console.h>
15#include <linux/tty.h>
16#include <linux/init.h>
17#include <linux/io.h>
18#include <linux/delay.h>
9#include <asm/sh_bios.h> 19#include <asm/sh_bios.h>
10 20
11#define BIOS_CALL_CONSOLE_WRITE 0 21#define BIOS_CALL_CONSOLE_WRITE 0
12#define BIOS_CALL_ETH_NODE_ADDR 10 22#define BIOS_CALL_ETH_NODE_ADDR 10
13#define BIOS_CALL_SHUTDOWN 11 23#define BIOS_CALL_SHUTDOWN 11
14#define BIOS_CALL_CHAR_OUT 0x1f /* TODO: hack */
15#define BIOS_CALL_GDB_DETACH 0xff 24#define BIOS_CALL_GDB_DETACH 0xff
16 25
26void *gdb_vbr_vector = NULL;
27
17static inline long sh_bios_call(long func, long arg0, long arg1, long arg2, 28static inline long sh_bios_call(long func, long arg0, long arg1, long arg2,
18 long arg3) 29 long arg3)
19{ 30{
@@ -23,6 +34,9 @@ static inline long sh_bios_call(long func, long arg0, long arg1, long arg2,
23 register long r6 __asm__("r6") = arg2; 34 register long r6 __asm__("r6") = arg2;
24 register long r7 __asm__("r7") = arg3; 35 register long r7 __asm__("r7") = arg3;
25 36
37 if (!gdb_vbr_vector)
38 return -ENOSYS;
39
26 __asm__ __volatile__("trapa #0x3f":"=z"(r0) 40 __asm__ __volatile__("trapa #0x3f":"=z"(r0)
27 :"0"(r0), "r"(r4), "r"(r5), "r"(r6), "r"(r7) 41 :"0"(r0), "r"(r4), "r"(r5), "r"(r6), "r"(r7)
28 :"memory"); 42 :"memory");
@@ -34,11 +48,6 @@ void sh_bios_console_write(const char *buf, unsigned int len)
34 sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0); 48 sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0);
35} 49}
36 50
37void sh_bios_char_out(char ch)
38{
39 sh_bios_call(BIOS_CALL_CHAR_OUT, ch, 0, 0, 0);
40}
41
42void sh_bios_gdb_detach(void) 51void sh_bios_gdb_detach(void)
43{ 52{
44 sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0); 53 sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0);
@@ -55,3 +64,109 @@ void sh_bios_shutdown(unsigned int how)
55{ 64{
56 sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0); 65 sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0);
57} 66}
67
68/*
69 * Read the old value of the VBR register to initialise the vector
70 * through which debug and BIOS traps are delegated by the Linux trap
71 * handler.
72 */
73void sh_bios_vbr_init(void)
74{
75 unsigned long vbr;
76
77 if (unlikely(gdb_vbr_vector))
78 return;
79
80 __asm__ __volatile__ ("stc vbr, %0" : "=r" (vbr));
81
82 if (vbr) {
83 gdb_vbr_vector = (void *)(vbr + 0x100);
84 printk(KERN_NOTICE "Setting GDB trap vector to %p\n",
85 gdb_vbr_vector);
86 } else
87 printk(KERN_NOTICE "SH-BIOS not detected\n");
88}
89
90/**
91 * sh_bios_vbr_reload - Re-load the system VBR from the BIOS vector.
92 *
93 * This can be used by save/restore code to reinitialize the system VBR
94 * from the fixed BIOS VBR. A no-op if no BIOS VBR is known.
95 */
96void sh_bios_vbr_reload(void)
97{
98 if (gdb_vbr_vector)
99 __asm__ __volatile__ (
100 "ldc %0, vbr"
101 :
102 : "r" (((unsigned long) gdb_vbr_vector) - 0x100)
103 : "memory"
104 );
105}
106
107/*
108 * Print a string through the BIOS
109 */
110static void sh_console_write(struct console *co, const char *s,
111 unsigned count)
112{
113 sh_bios_console_write(s, count);
114}
115
116/*
117 * Setup initial baud/bits/parity. We do two things here:
118 * - construct a cflag setting for the first rs_open()
119 * - initialize the serial port
120 * Return non-zero if we didn't find a serial port.
121 */
122static int __init sh_console_setup(struct console *co, char *options)
123{
124 int cflag = CREAD | HUPCL | CLOCAL;
125
126 /*
127 * Now construct a cflag setting.
128 * TODO: this is a totally bogus cflag, as we have
129 * no idea what serial settings the BIOS is using, or
130 * even if its using the serial port at all.
131 */
132 cflag |= B115200 | CS8 | /*no parity*/0;
133
134 co->cflag = cflag;
135
136 return 0;
137}
138
139static struct console bios_console = {
140 .name = "bios",
141 .write = sh_console_write,
142 .setup = sh_console_setup,
143 .flags = CON_PRINTBUFFER,
144 .index = -1,
145};
146
147static struct console *early_console;
148
149static int __init setup_early_printk(char *buf)
150{
151 int keep_early = 0;
152
153 if (!buf)
154 return 0;
155
156 if (strstr(buf, "keep"))
157 keep_early = 1;
158
159 if (!strncmp(buf, "bios", 4))
160 early_console = &bios_console;
161
162 if (likely(early_console)) {
163 if (keep_early)
164 early_console->flags &= ~CON_BOOT;
165 else
166 early_console->flags |= CON_BOOT;
167 register_console(early_console);
168 }
169
170 return 0;
171}
172early_param("earlyprintk", setup_early_printk);
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index fcc5de31f83b..3896f26efa4a 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -1,37 +1,11 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/smp.h> 2#include <linux/string.h>
3#include <linux/user.h> 3#include <linux/uaccess.h>
4#include <linux/elfcore.h> 4#include <linux/delay.h>
5#include <linux/sched.h> 5#include <linux/mm.h>
6#include <linux/in6.h>
7#include <linux/interrupt.h>
8#include <linux/vmalloc.h>
9#include <linux/pci.h>
10#include <linux/irq.h>
11#include <asm/sections.h>
12#include <asm/processor.h>
13#include <asm/uaccess.h>
14#include <asm/checksum.h> 6#include <asm/checksum.h>
15#include <asm/io.h> 7#include <asm/sections.h>
16#include <asm/delay.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
19#include <asm/ftrace.h>
20
21extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
22
23/* platform dependent support */
24EXPORT_SYMBOL(dump_fpu);
25EXPORT_SYMBOL(kernel_thread);
26EXPORT_SYMBOL(strlen);
27
28/* PCI exports */
29#ifdef CONFIG_PCI
30EXPORT_SYMBOL(pci_alloc_consistent);
31EXPORT_SYMBOL(pci_free_consistent);
32#endif
33 8
34/* mem exports */
35EXPORT_SYMBOL(memchr); 9EXPORT_SYMBOL(memchr);
36EXPORT_SYMBOL(memcpy); 10EXPORT_SYMBOL(memcpy);
37EXPORT_SYMBOL(memset); 11EXPORT_SYMBOL(memset);
@@ -40,6 +14,13 @@ EXPORT_SYMBOL(__copy_user);
40EXPORT_SYMBOL(__udelay); 14EXPORT_SYMBOL(__udelay);
41EXPORT_SYMBOL(__ndelay); 15EXPORT_SYMBOL(__ndelay);
42EXPORT_SYMBOL(__const_udelay); 16EXPORT_SYMBOL(__const_udelay);
17EXPORT_SYMBOL(strlen);
18EXPORT_SYMBOL(csum_partial);
19EXPORT_SYMBOL(csum_partial_copy_generic);
20EXPORT_SYMBOL(copy_page);
21EXPORT_SYMBOL(__clear_user);
22EXPORT_SYMBOL(_ebss);
23EXPORT_SYMBOL(empty_zero_page);
43 24
44#define DECLARE_EXPORT(name) \ 25#define DECLARE_EXPORT(name) \
45 extern void name(void);EXPORT_SYMBOL(name) 26 extern void name(void);EXPORT_SYMBOL(name)
@@ -84,6 +65,21 @@ DECLARE_EXPORT(__movstrSI60);
84DECLARE_EXPORT(__movstr_i4_even); 65DECLARE_EXPORT(__movstr_i4_even);
85DECLARE_EXPORT(__movstr_i4_odd); 66DECLARE_EXPORT(__movstr_i4_odd);
86DECLARE_EXPORT(__movstrSI12_i4); 67DECLARE_EXPORT(__movstrSI12_i4);
68DECLARE_EXPORT(__movmem);
69DECLARE_EXPORT(__movmemSI8);
70DECLARE_EXPORT(__movmemSI12);
71DECLARE_EXPORT(__movmemSI16);
72DECLARE_EXPORT(__movmemSI20);
73DECLARE_EXPORT(__movmemSI24);
74DECLARE_EXPORT(__movmemSI28);
75DECLARE_EXPORT(__movmemSI32);
76DECLARE_EXPORT(__movmemSI36);
77DECLARE_EXPORT(__movmemSI40);
78DECLARE_EXPORT(__movmemSI44);
79DECLARE_EXPORT(__movmemSI48);
80DECLARE_EXPORT(__movmemSI52);
81DECLARE_EXPORT(__movmemSI56);
82DECLARE_EXPORT(__movmemSI60);
87DECLARE_EXPORT(__movmem_i4_even); 83DECLARE_EXPORT(__movmem_i4_even);
88DECLARE_EXPORT(__movmem_i4_odd); 84DECLARE_EXPORT(__movmem_i4_odd);
89DECLARE_EXPORT(__movmemSI12_i4); 85DECLARE_EXPORT(__movmemSI12_i4);
@@ -92,36 +88,6 @@ DECLARE_EXPORT(__sdivsi3_i4);
92DECLARE_EXPORT(__udivsi3_i4); 88DECLARE_EXPORT(__udivsi3_i4);
93DECLARE_EXPORT(__sdivsi3_i4i); 89DECLARE_EXPORT(__sdivsi3_i4i);
94DECLARE_EXPORT(__udivsi3_i4i); 90DECLARE_EXPORT(__udivsi3_i4i);
95 91#ifdef CONFIG_MCOUNT
96#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ 92DECLARE_EXPORT(mcount);
97 defined(CONFIG_SH7705_CACHE_32KB))
98/* needed by some modules */
99EXPORT_SYMBOL(flush_cache_all);
100EXPORT_SYMBOL(flush_cache_range);
101EXPORT_SYMBOL(flush_dcache_page);
102#endif
103
104#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
105 (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
106EXPORT_SYMBOL(clear_user_page);
107#endif
108
109#ifdef CONFIG_FUNCTION_TRACER
110EXPORT_SYMBOL(mcount);
111#endif
112EXPORT_SYMBOL(csum_partial);
113EXPORT_SYMBOL(csum_partial_copy_generic);
114#ifdef CONFIG_IPV6
115EXPORT_SYMBOL(csum_ipv6_magic);
116#endif
117EXPORT_SYMBOL(clear_page);
118EXPORT_SYMBOL(copy_page);
119EXPORT_SYMBOL(__clear_user);
120EXPORT_SYMBOL(_ebss);
121EXPORT_SYMBOL(empty_zero_page);
122
123#ifndef CONFIG_CACHE_OFF
124EXPORT_SYMBOL(__flush_purge_region);
125EXPORT_SYMBOL(__flush_wback_region);
126EXPORT_SYMBOL(__flush_invalidate_region);
127#endif 93#endif
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index f5bd156ea504..45afa5c51f67 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -24,24 +24,6 @@
24#include <asm/delay.h> 24#include <asm/delay.h>
25#include <asm/irq.h> 25#include <asm/irq.h>
26 26
27extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
28
29/* platform dependent support */
30EXPORT_SYMBOL(dump_fpu);
31EXPORT_SYMBOL(kernel_thread);
32
33#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU)
34EXPORT_SYMBOL(clear_user_page);
35#endif
36
37#ifndef CONFIG_CACHE_OFF
38EXPORT_SYMBOL(flush_dcache_page);
39#endif
40
41#ifdef CONFIG_VT
42EXPORT_SYMBOL(screen_info);
43#endif
44
45EXPORT_SYMBOL(__put_user_asm_b); 27EXPORT_SYMBOL(__put_user_asm_b);
46EXPORT_SYMBOL(__put_user_asm_w); 28EXPORT_SYMBOL(__put_user_asm_w);
47EXPORT_SYMBOL(__put_user_asm_l); 29EXPORT_SYMBOL(__put_user_asm_l);
@@ -52,7 +34,6 @@ EXPORT_SYMBOL(__get_user_asm_l);
52EXPORT_SYMBOL(__get_user_asm_q); 34EXPORT_SYMBOL(__get_user_asm_q);
53EXPORT_SYMBOL(__strnlen_user); 35EXPORT_SYMBOL(__strnlen_user);
54EXPORT_SYMBOL(__strncpy_from_user); 36EXPORT_SYMBOL(__strncpy_from_user);
55EXPORT_SYMBOL(clear_page);
56EXPORT_SYMBOL(__clear_user); 37EXPORT_SYMBOL(__clear_user);
57EXPORT_SYMBOL(copy_page); 38EXPORT_SYMBOL(copy_page);
58EXPORT_SYMBOL(__copy_user); 39EXPORT_SYMBOL(__copy_user);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index b5afbec1db59..579cd2ca358d 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -41,6 +41,16 @@ struct fdpic_func_descriptor {
41}; 41};
42 42
43/* 43/*
44 * The following define adds a 64 byte gap between the signal
45 * stack frame and previous contents of the stack. This allows
46 * frame unwinding in a function epilogue but only if a frame
47 * pointer is used in the function. This is necessary because
48 * current gcc compilers (<4.3) do not generate unwind info on
49 * SH for function epilogues.
50 */
51#define UNWINDGUARD 64
52
53/*
44 * Atomically swap in the new signal mask, and wait for a signal. 54 * Atomically swap in the new signal mask, and wait for a signal.
45 */ 55 */
46asmlinkage int 56asmlinkage int
@@ -57,7 +67,8 @@ sys_sigsuspend(old_sigset_t mask,
57 67
58 current->state = TASK_INTERRUPTIBLE; 68 current->state = TASK_INTERRUPTIBLE;
59 schedule(); 69 schedule();
60 set_thread_flag(TIF_RESTORE_SIGMASK); 70 set_restore_sigmask();
71
61 return -ERESTARTNOHAND; 72 return -ERESTARTNOHAND;
62} 73}
63 74
@@ -135,11 +146,11 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
135{ 146{
136 struct task_struct *tsk = current; 147 struct task_struct *tsk = current;
137 148
138 if (!(current_cpu_data.flags & CPU_HAS_FPU)) 149 if (!(boot_cpu_data.flags & CPU_HAS_FPU))
139 return 0; 150 return 0;
140 151
141 set_used_math(); 152 set_used_math();
142 return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0], 153 return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0],
143 sizeof(long)*(16*2+2)); 154 sizeof(long)*(16*2+2));
144} 155}
145 156
@@ -148,7 +159,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
148{ 159{
149 struct task_struct *tsk = current; 160 struct task_struct *tsk = current;
150 161
151 if (!(current_cpu_data.flags & CPU_HAS_FPU)) 162 if (!(boot_cpu_data.flags & CPU_HAS_FPU))
152 return 0; 163 return 0;
153 164
154 if (!used_math()) { 165 if (!used_math()) {
@@ -164,7 +175,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
164 clear_used_math(); 175 clear_used_math();
165 176
166 unlazy_fpu(tsk, regs); 177 unlazy_fpu(tsk, regs);
167 return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard, 178 return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu,
168 sizeof(long)*(16*2+2)); 179 sizeof(long)*(16*2+2));
169} 180}
170#endif /* CONFIG_SH_FPU */ 181#endif /* CONFIG_SH_FPU */
@@ -189,7 +200,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
189#undef COPY 200#undef COPY
190 201
191#ifdef CONFIG_SH_FPU 202#ifdef CONFIG_SH_FPU
192 if (current_cpu_data.flags & CPU_HAS_FPU) { 203 if (boot_cpu_data.flags & CPU_HAS_FPU) {
193 int owned_fp; 204 int owned_fp;
194 struct task_struct *tsk = current; 205 struct task_struct *tsk = current;
195 206
@@ -327,7 +338,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
327 sp = current->sas_ss_sp + current->sas_ss_size; 338 sp = current->sas_ss_sp + current->sas_ss_size;
328 } 339 }
329 340
330 return (void __user *)((sp - frame_size) & -8ul); 341 return (void __user *)((sp - (frame_size+UNWINDGUARD)) & -8ul);
331} 342}
332 343
333/* These symbols are defined with the addresses in the vsyscall page. 344/* These symbols are defined with the addresses in the vsyscall page.
@@ -462,6 +473,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
462 err |= __put_user(OR_R0_R0, &frame->retcode[6]); 473 err |= __put_user(OR_R0_R0, &frame->retcode[6]);
463 err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]); 474 err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
464 regs->pr = (unsigned long) frame->retcode; 475 regs->pr = (unsigned long) frame->retcode;
476 flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
465 } 477 }
466 478
467 if (err) 479 if (err)
@@ -487,8 +499,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
487 pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", 499 pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
488 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); 500 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
489 501
490 flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
491
492 return 0; 502 return 0;
493 503
494give_sigsegv: 504give_sigsegv:
@@ -518,7 +528,7 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
518 /* fallthrough */ 528 /* fallthrough */
519 case -ERESTARTNOINTR: 529 case -ERESTARTNOINTR:
520 regs->regs[0] = save_r0; 530 regs->regs[0] = save_r0;
521 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); 531 regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
522 break; 532 break;
523 } 533 }
524} 534}
@@ -581,7 +591,7 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
581 if (try_to_freeze()) 591 if (try_to_freeze())
582 goto no_signal; 592 goto no_signal;
583 593
584 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 594 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
585 oldset = &current->saved_sigmask; 595 oldset = &current->saved_sigmask;
586 else 596 else
587 oldset = &current->blocked; 597 oldset = &current->blocked;
@@ -593,12 +603,13 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
593 /* Whee! Actually deliver the signal. */ 603 /* Whee! Actually deliver the signal. */
594 if (handle_signal(signr, &ka, &info, oldset, 604 if (handle_signal(signr, &ka, &info, oldset,
595 regs, save_r0) == 0) { 605 regs, save_r0) == 0) {
596 /* a signal was successfully delivered; the saved 606 /*
607 * A signal was successfully delivered; the saved
597 * sigmask will have been stored in the signal frame, 608 * sigmask will have been stored in the signal frame,
598 * and will be restored by sigreturn, so we can simply 609 * and will be restored by sigreturn, so we can simply
599 * clear the TIF_RESTORE_SIGMASK flag */ 610 * clear the TS_RESTORE_SIGMASK flag
600 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 611 */
601 clear_thread_flag(TIF_RESTORE_SIGMASK); 612 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
602 613
603 tracehook_signal_handler(signr, &info, &ka, regs, 614 tracehook_signal_handler(signr, &info, &ka, regs,
604 test_thread_flag(TIF_SINGLESTEP)); 615 test_thread_flag(TIF_SINGLESTEP));
@@ -615,17 +626,19 @@ no_signal:
615 regs->regs[0] == -ERESTARTSYS || 626 regs->regs[0] == -ERESTARTSYS ||
616 regs->regs[0] == -ERESTARTNOINTR) { 627 regs->regs[0] == -ERESTARTNOINTR) {
617 regs->regs[0] = save_r0; 628 regs->regs[0] = save_r0;
618 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); 629 regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
619 } else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) { 630 } else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
620 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); 631 regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
621 regs->regs[3] = __NR_restart_syscall; 632 regs->regs[3] = __NR_restart_syscall;
622 } 633 }
623 } 634 }
624 635
625 /* if there's no signal to deliver, we just put the saved sigmask 636 /*
626 * back */ 637 * If there's no signal to deliver, we just put the saved sigmask
627 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 638 * back.
628 clear_thread_flag(TIF_RESTORE_SIGMASK); 639 */
640 if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
641 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
629 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 642 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
630 } 643 }
631} 644}
@@ -640,5 +653,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
640 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 653 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
641 clear_thread_flag(TIF_NOTIFY_RESUME); 654 clear_thread_flag(TIF_NOTIFY_RESUME);
642 tracehook_notify_resume(regs); 655 tracehook_notify_resume(regs);
656 if (current->replacement_session_keyring)
657 key_replace_session_keyring();
643 } 658 }
644} 659}
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 0663a0ee6021..5a9f1f10ebf4 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -101,7 +101,7 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
101 if (try_to_freeze()) 101 if (try_to_freeze())
102 goto no_signal; 102 goto no_signal;
103 103
104 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 104 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
105 oldset = &current->saved_sigmask; 105 oldset = &current->saved_sigmask;
106 else if (!oldset) 106 else if (!oldset)
107 oldset = &current->blocked; 107 oldset = &current->blocked;
@@ -115,12 +115,12 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
115 /* 115 /*
116 * If a signal was successfully delivered, the 116 * If a signal was successfully delivered, the
117 * saved sigmask is in its frame, and we can 117 * saved sigmask is in its frame, and we can
118 * clear the TIF_RESTORE_SIGMASK flag. 118 * clear the TS_RESTORE_SIGMASK flag.
119 */ 119 */
120 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 120 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
121 clear_thread_flag(TIF_RESTORE_SIGMASK);
122 121
123 tracehook_signal_handler(signr, &info, &ka, regs, 0); 122 tracehook_signal_handler(signr, &info, &ka, regs,
123 test_thread_flag(TIF_SINGLESTEP));
124 return 1; 124 return 1;
125 } 125 }
126 } 126 }
@@ -146,8 +146,8 @@ no_signal:
146 } 146 }
147 147
148 /* No signal to deliver -- put the saved sigmask back */ 148 /* No signal to deliver -- put the saved sigmask back */
149 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 149 if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
150 clear_thread_flag(TIF_RESTORE_SIGMASK); 150 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
151 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 151 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
152 } 152 }
153 153
@@ -176,6 +176,7 @@ sys_sigsuspend(old_sigset_t mask,
176 while (1) { 176 while (1) {
177 current->state = TASK_INTERRUPTIBLE; 177 current->state = TASK_INTERRUPTIBLE;
178 schedule(); 178 schedule();
179 set_restore_sigmask();
179 regs->pc += 4; /* because sys_sigreturn decrements the pc */ 180 regs->pc += 4; /* because sys_sigreturn decrements the pc */
180 if (do_signal(regs, &saveset)) { 181 if (do_signal(regs, &saveset)) {
181 /* pc now points at signal handler. Need to decrement 182 /* pc now points at signal handler. Need to decrement
@@ -296,7 +297,7 @@ restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
296 regs->sr |= SR_FD; 297 regs->sr |= SR_FD;
297 } 298 }
298 299
299 err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0], 300 err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
300 (sizeof(long long) * 32) + (sizeof(int) * 1)); 301 (sizeof(long long) * 32) + (sizeof(int) * 1));
301 302
302 return err; 303 return err;
@@ -315,13 +316,13 @@ setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
315 316
316 if (current == last_task_used_math) { 317 if (current == last_task_used_math) {
317 enable_fpu(); 318 enable_fpu();
318 save_fpu(current, regs); 319 save_fpu(current);
319 disable_fpu(); 320 disable_fpu();
320 last_task_used_math = NULL; 321 last_task_used_math = NULL;
321 regs->sr |= SR_FD; 322 regs->sr |= SR_FD;
322 } 323 }
323 324
324 err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard, 325 err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
325 (sizeof(long long) * 32) + (sizeof(int) * 1)); 326 (sizeof(long long) * 32) + (sizeof(int) * 1));
326 clear_used_math(); 327 clear_used_math();
327 328
@@ -561,13 +562,11 @@ static int setup_frame(int sig, struct k_sigaction *ka,
561 /* Set up to return from userspace. If provided, use a stub 562 /* Set up to return from userspace. If provided, use a stub
562 already in userspace. */ 563 already in userspace. */
563 if (ka->sa.sa_flags & SA_RESTORER) { 564 if (ka->sa.sa_flags & SA_RESTORER) {
564 DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
565
566 /* 565 /*
567 * On SH5 all edited pointers are subject to NEFF 566 * On SH5 all edited pointers are subject to NEFF
568 */ 567 */
569 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? 568 DEREF_REG_PR = neff_sign_extend((unsigned long)
570 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; 569 ka->sa.sa_restorer | 0x1);
571 } else { 570 } else {
572 /* 571 /*
573 * Different approach on SH5. 572 * Different approach on SH5.
@@ -580,9 +579,8 @@ static int setup_frame(int sig, struct k_sigaction *ka,
580 * . being code, linker turns ShMedia bit on, always 579 * . being code, linker turns ShMedia bit on, always
581 * dereference index -1. 580 * dereference index -1.
582 */ 581 */
583 DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; 582 DEREF_REG_PR = neff_sign_extend((unsigned long)
584 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? 583 frame->retcode | 0x01);
585 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
586 584
587 if (__copy_to_user(frame->retcode, 585 if (__copy_to_user(frame->retcode,
588 (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0) 586 (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0)
@@ -596,9 +594,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
596 * Set up registers for signal handler. 594 * Set up registers for signal handler.
597 * All edited pointers are subject to NEFF. 595 * All edited pointers are subject to NEFF.
598 */ 596 */
599 regs->regs[REG_SP] = (unsigned long) frame; 597 regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
600 regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
601 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
602 regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ 598 regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
603 599
604 /* FIXME: 600 /* FIXME:
@@ -613,8 +609,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
613 regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc; 609 regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
614 regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc; 610 regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
615 611
616 regs->pc = (unsigned long) ka->sa.sa_handler; 612 regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler);
617 regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
618 613
619 set_fs(USER_DS); 614 set_fs(USER_DS);
620 615
@@ -676,13 +671,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
676 /* Set up to return from userspace. If provided, use a stub 671 /* Set up to return from userspace. If provided, use a stub
677 already in userspace. */ 672 already in userspace. */
678 if (ka->sa.sa_flags & SA_RESTORER) { 673 if (ka->sa.sa_flags & SA_RESTORER) {
679 DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
680
681 /* 674 /*
682 * On SH5 all edited pointers are subject to NEFF 675 * On SH5 all edited pointers are subject to NEFF
683 */ 676 */
684 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? 677 DEREF_REG_PR = neff_sign_extend((unsigned long)
685 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; 678 ka->sa.sa_restorer | 0x1);
686 } else { 679 } else {
687 /* 680 /*
688 * Different approach on SH5. 681 * Different approach on SH5.
@@ -695,15 +688,14 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
695 * . being code, linker turns ShMedia bit on, always 688 * . being code, linker turns ShMedia bit on, always
696 * dereference index -1. 689 * dereference index -1.
697 */ 690 */
698 691 DEREF_REG_PR = neff_sign_extend((unsigned long)
699 DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; 692 frame->retcode | 0x01);
700 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
701 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
702 693
703 if (__copy_to_user(frame->retcode, 694 if (__copy_to_user(frame->retcode,
704 (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0) 695 (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0)
705 goto give_sigsegv; 696 goto give_sigsegv;
706 697
698 /* Cohere the trampoline with the I-cache. */
707 flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15); 699 flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
708 } 700 }
709 701
@@ -711,14 +703,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
711 * Set up registers for signal handler. 703 * Set up registers for signal handler.
712 * All edited pointers are subject to NEFF. 704 * All edited pointers are subject to NEFF.
713 */ 705 */
714 regs->regs[REG_SP] = (unsigned long) frame; 706 regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
715 regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
716 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
717 regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ 707 regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
718 regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info; 708 regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
719 regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext; 709 regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
720 regs->pc = (unsigned long) ka->sa.sa_handler; 710 regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler);
721 regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
722 711
723 set_fs(USER_DS); 712 set_fs(USER_DS);
724 713
@@ -772,5 +761,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info
772 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 761 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
773 clear_thread_flag(TIF_NOTIFY_RESUME); 762 clear_thread_flag(TIF_NOTIFY_RESUME);
774 tracehook_notify_resume(regs); 763 tracehook_notify_resume(regs);
764 if (current->replacement_session_keyring)
765 key_replace_session_keyring();
775 } 766 }
776} 767}
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 442d8d47a41e..509b36b45115 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * SMP support for the SuperH processors. 4 * SMP support for the SuperH processors.
5 * 5 *
6 * Copyright (C) 2002 - 2008 Paul Mundt 6 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara 7 * Copyright (C) 2006 - 2007 Akio Idehara
8 * 8 *
9 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
@@ -31,10 +31,25 @@
31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
33 33
34static inline void __init smp_store_cpu_info(unsigned int cpu) 34struct plat_smp_ops *mp_ops = NULL;
35
36/* State of each CPU */
37DEFINE_PER_CPU(int, cpu_state) = { 0 };
38
39void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
40{
41 if (mp_ops)
42 printk(KERN_WARNING "Overriding previously set SMP ops\n");
43
44 mp_ops = ops;
45}
46
47static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
35{ 48{
36 struct sh_cpuinfo *c = cpu_data + cpu; 49 struct sh_cpuinfo *c = cpu_data + cpu;
37 50
51 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
52
38 c->loops_per_jiffy = loops_per_jiffy; 53 c->loops_per_jiffy = loops_per_jiffy;
39} 54}
40 55
@@ -44,14 +59,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
44 59
45 init_new_context(current, &init_mm); 60 init_new_context(current, &init_mm);
46 current_thread_info()->cpu = cpu; 61 current_thread_info()->cpu = cpu;
47 plat_prepare_cpus(max_cpus); 62 mp_ops->prepare_cpus(max_cpus);
48 63
49#ifndef CONFIG_HOTPLUG_CPU 64#ifndef CONFIG_HOTPLUG_CPU
50 init_cpu_present(&cpu_possible_map); 65 init_cpu_present(&cpu_possible_map);
51#endif 66#endif
52} 67}
53 68
54void __devinit smp_prepare_boot_cpu(void) 69void __init smp_prepare_boot_cpu(void)
55{ 70{
56 unsigned int cpu = smp_processor_id(); 71 unsigned int cpu = smp_processor_id();
57 72
@@ -60,36 +75,137 @@ void __devinit smp_prepare_boot_cpu(void)
60 75
61 set_cpu_online(cpu, true); 76 set_cpu_online(cpu, true);
62 set_cpu_possible(cpu, true); 77 set_cpu_possible(cpu, true);
78
79 per_cpu(cpu_state, cpu) = CPU_ONLINE;
80}
81
82#ifdef CONFIG_HOTPLUG_CPU
83void native_cpu_die(unsigned int cpu)
84{
85 unsigned int i;
86
87 for (i = 0; i < 10; i++) {
88 smp_rmb();
89 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
90 if (system_state == SYSTEM_RUNNING)
91 pr_info("CPU %u is now offline\n", cpu);
92
93 return;
94 }
95
96 msleep(100);
97 }
98
99 pr_err("CPU %u didn't die...\n", cpu);
100}
101
102int native_cpu_disable(unsigned int cpu)
103{
104 return cpu == 0 ? -EPERM : 0;
105}
106
107void play_dead_common(void)
108{
109 idle_task_exit();
110 irq_ctx_exit(raw_smp_processor_id());
111 mb();
112
113 __get_cpu_var(cpu_state) = CPU_DEAD;
114 local_irq_disable();
115}
116
117void native_play_dead(void)
118{
119 play_dead_common();
120}
121
122int __cpu_disable(void)
123{
124 unsigned int cpu = smp_processor_id();
125 struct task_struct *p;
126 int ret;
127
128 ret = mp_ops->cpu_disable(cpu);
129 if (ret)
130 return ret;
131
132 /*
133 * Take this CPU offline. Once we clear this, we can't return,
134 * and we must not schedule until we're ready to give up the cpu.
135 */
136 set_cpu_online(cpu, false);
137
138 /*
139 * OK - migrate IRQs away from this CPU
140 */
141 migrate_irqs();
142
143 /*
144 * Stop the local timer for this CPU.
145 */
146 local_timer_stop(cpu);
147
148 /*
149 * Flush user cache and TLB mappings, and then remove this CPU
150 * from the vm mask set of all processes.
151 */
152 flush_cache_all();
153 local_flush_tlb_all();
154
155 read_lock(&tasklist_lock);
156 for_each_process(p)
157 if (p->mm)
158 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
159 read_unlock(&tasklist_lock);
160
161 return 0;
162}
163#else /* ... !CONFIG_HOTPLUG_CPU */
164int native_cpu_disable(unsigned int cpu)
165{
166 return -ENOSYS;
167}
168
169void native_cpu_die(unsigned int cpu)
170{
171 /* We said "no" in __cpu_disable */
172 BUG();
63} 173}
64 174
175void native_play_dead(void)
176{
177 BUG();
178}
179#endif
180
65asmlinkage void __cpuinit start_secondary(void) 181asmlinkage void __cpuinit start_secondary(void)
66{ 182{
67 unsigned int cpu; 183 unsigned int cpu = smp_processor_id();
68 struct mm_struct *mm = &init_mm; 184 struct mm_struct *mm = &init_mm;
69 185
186 enable_mmu();
70 atomic_inc(&mm->mm_count); 187 atomic_inc(&mm->mm_count);
71 atomic_inc(&mm->mm_users); 188 atomic_inc(&mm->mm_users);
72 current->active_mm = mm; 189 current->active_mm = mm;
73 BUG_ON(current->mm);
74 enter_lazy_tlb(mm, current); 190 enter_lazy_tlb(mm, current);
191 local_flush_tlb_all();
75 192
76 per_cpu_trap_init(); 193 per_cpu_trap_init();
77 194
78 preempt_disable(); 195 preempt_disable();
79 196
80 notify_cpu_starting(smp_processor_id()); 197 notify_cpu_starting(cpu);
81 198
82 local_irq_enable(); 199 local_irq_enable();
83 200
84 cpu = smp_processor_id();
85
86 /* Enable local timers */ 201 /* Enable local timers */
87 local_timer_setup(cpu); 202 local_timer_setup(cpu);
88 calibrate_delay(); 203 calibrate_delay();
89 204
90 smp_store_cpu_info(cpu); 205 smp_store_cpu_info(cpu);
91 206
92 cpu_set(cpu, cpu_online_map); 207 set_cpu_online(cpu, true);
208 per_cpu(cpu_state, cpu) = CPU_ONLINE;
93 209
94 cpu_idle(); 210 cpu_idle();
95} 211}
@@ -108,21 +224,30 @@ int __cpuinit __cpu_up(unsigned int cpu)
108 struct task_struct *tsk; 224 struct task_struct *tsk;
109 unsigned long timeout; 225 unsigned long timeout;
110 226
111 tsk = fork_idle(cpu); 227 tsk = cpu_data[cpu].idle;
112 if (IS_ERR(tsk)) { 228 if (!tsk) {
113 printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu); 229 tsk = fork_idle(cpu);
114 return PTR_ERR(tsk); 230 if (IS_ERR(tsk)) {
231 pr_err("Failed forking idle task for cpu %d\n", cpu);
232 return PTR_ERR(tsk);
233 }
234
235 cpu_data[cpu].idle = tsk;
115 } 236 }
116 237
238 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
239
117 /* Fill in data in head.S for secondary cpus */ 240 /* Fill in data in head.S for secondary cpus */
118 stack_start.sp = tsk->thread.sp; 241 stack_start.sp = tsk->thread.sp;
119 stack_start.thread_info = tsk->stack; 242 stack_start.thread_info = tsk->stack;
120 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ 243 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
121 stack_start.start_kernel_fn = start_secondary; 244 stack_start.start_kernel_fn = start_secondary;
122 245
123 flush_cache_all(); 246 flush_icache_range((unsigned long)&stack_start,
247 (unsigned long)&stack_start + sizeof(stack_start));
248 wmb();
124 249
125 plat_start_cpu(cpu, (unsigned long)_stext); 250 mp_ops->start_cpu(cpu, (unsigned long)_stext);
126 251
127 timeout = jiffies + HZ; 252 timeout = jiffies + HZ;
128 while (time_before(jiffies, timeout)) { 253 while (time_before(jiffies, timeout)) {
@@ -130,6 +255,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
130 break; 255 break;
131 256
132 udelay(10); 257 udelay(10);
258 barrier();
133 } 259 }
134 260
135 if (cpu_online(cpu)) 261 if (cpu_online(cpu))
@@ -154,16 +280,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
154 280
155void smp_send_reschedule(int cpu) 281void smp_send_reschedule(int cpu)
156{ 282{
157 plat_send_ipi(cpu, SMP_MSG_RESCHEDULE); 283 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
158}
159
160static void stop_this_cpu(void *unused)
161{
162 cpu_clear(smp_processor_id(), cpu_online_map);
163 local_irq_disable();
164
165 for (;;)
166 cpu_relax();
167} 284}
168 285
169void smp_send_stop(void) 286void smp_send_stop(void)
@@ -176,12 +293,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
176 int cpu; 293 int cpu;
177 294
178 for_each_cpu(cpu, mask) 295 for_each_cpu(cpu, mask)
179 plat_send_ipi(cpu, SMP_MSG_FUNCTION); 296 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
180} 297}
181 298
182void arch_send_call_function_single_ipi(int cpu) 299void arch_send_call_function_single_ipi(int cpu)
183{ 300{
184 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); 301 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
185} 302}
186 303
187void smp_timer_broadcast(const struct cpumask *mask) 304void smp_timer_broadcast(const struct cpumask *mask)
@@ -189,7 +306,7 @@ void smp_timer_broadcast(const struct cpumask *mask)
189 int cpu; 306 int cpu;
190 307
191 for_each_cpu(cpu, mask) 308 for_each_cpu(cpu, mask)
192 plat_send_ipi(cpu, SMP_MSG_TIMER); 309 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
193} 310}
194 311
195static void ipi_timer(void) 312static void ipi_timer(void)
@@ -253,7 +370,6 @@ static void flush_tlb_mm_ipi(void *mm)
253 * behalf of debugees, kswapd stealing pages from another process etc). 370 * behalf of debugees, kswapd stealing pages from another process etc).
254 * Kanoj 07/00. 371 * Kanoj 07/00.
255 */ 372 */
256
257void flush_tlb_mm(struct mm_struct *mm) 373void flush_tlb_mm(struct mm_struct *mm)
258{ 374{
259 preempt_disable(); 375 preempt_disable();
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
index 1a2a5eb76e41..c2e45c48409c 100644
--- a/arch/sh/kernel/stacktrace.c
+++ b/arch/sh/kernel/stacktrace.c
@@ -13,47 +13,93 @@
13#include <linux/stacktrace.h> 13#include <linux/stacktrace.h>
14#include <linux/thread_info.h> 14#include <linux/thread_info.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <asm/unwinder.h>
16#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/stacktrace.h>
19
20static void save_stack_warning(void *data, char *msg)
21{
22}
23
24static void
25save_stack_warning_symbol(void *data, char *msg, unsigned long symbol)
26{
27}
28
29static int save_stack_stack(void *data, char *name)
30{
31 return 0;
32}
17 33
18/* 34/*
19 * Save stack-backtrace addresses into a stack_trace buffer. 35 * Save stack-backtrace addresses into a stack_trace buffer.
20 */ 36 */
37static void save_stack_address(void *data, unsigned long addr, int reliable)
38{
39 struct stack_trace *trace = data;
40
41 if (!reliable)
42 return;
43
44 if (trace->skip > 0) {
45 trace->skip--;
46 return;
47 }
48
49 if (trace->nr_entries < trace->max_entries)
50 trace->entries[trace->nr_entries++] = addr;
51}
52
53static const struct stacktrace_ops save_stack_ops = {
54 .warning = save_stack_warning,
55 .warning_symbol = save_stack_warning_symbol,
56 .stack = save_stack_stack,
57 .address = save_stack_address,
58};
59
21void save_stack_trace(struct stack_trace *trace) 60void save_stack_trace(struct stack_trace *trace)
22{ 61{
23 unsigned long *sp = (unsigned long *)current_stack_pointer; 62 unsigned long *sp = (unsigned long *)current_stack_pointer;
24 63
25 while (!kstack_end(sp)) { 64 unwind_stack(current, NULL, sp, &save_stack_ops, trace);
26 unsigned long addr = *sp++; 65 if (trace->nr_entries < trace->max_entries)
27 66 trace->entries[trace->nr_entries++] = ULONG_MAX;
28 if (__kernel_text_address(addr)) {
29 if (trace->skip > 0)
30 trace->skip--;
31 else
32 trace->entries[trace->nr_entries++] = addr;
33 if (trace->nr_entries >= trace->max_entries)
34 break;
35 }
36 }
37} 67}
38EXPORT_SYMBOL_GPL(save_stack_trace); 68EXPORT_SYMBOL_GPL(save_stack_trace);
39 69
70static void
71save_stack_address_nosched(void *data, unsigned long addr, int reliable)
72{
73 struct stack_trace *trace = (struct stack_trace *)data;
74
75 if (!reliable)
76 return;
77
78 if (in_sched_functions(addr))
79 return;
80
81 if (trace->skip > 0) {
82 trace->skip--;
83 return;
84 }
85
86 if (trace->nr_entries < trace->max_entries)
87 trace->entries[trace->nr_entries++] = addr;
88}
89
90static const struct stacktrace_ops save_stack_ops_nosched = {
91 .warning = save_stack_warning,
92 .warning_symbol = save_stack_warning_symbol,
93 .stack = save_stack_stack,
94 .address = save_stack_address_nosched,
95};
96
40void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 97void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
41{ 98{
42 unsigned long *sp = (unsigned long *)tsk->thread.sp; 99 unsigned long *sp = (unsigned long *)tsk->thread.sp;
43 100
44 while (!kstack_end(sp)) { 101 unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
45 unsigned long addr = *sp++; 102 if (trace->nr_entries < trace->max_entries)
46 103 trace->entries[trace->nr_entries++] = ULONG_MAX;
47 if (__kernel_text_address(addr)) {
48 if (in_sched_functions(addr))
49 break;
50 if (trace->skip > 0)
51 trace->skip--;
52 else
53 trace->entries[trace->nr_entries++] = addr;
54 if (trace->nr_entries >= trace->max_entries)
55 break;
56 }
57 }
58} 104}
59EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 105EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 90d00e47264d..8c6a350df751 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -25,30 +25,8 @@
25#include <asm/syscalls.h> 25#include <asm/syscalls.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/unistd.h> 27#include <asm/unistd.h>
28 28#include <asm/cacheflush.h>
29static inline long 29#include <asm/cachectl.h>
30do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
31 unsigned long flags, int fd, unsigned long pgoff)
32{
33 int error = -EBADF;
34 struct file *file = NULL;
35
36 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
37 if (!(flags & MAP_ANONYMOUS)) {
38 file = fget(fd);
39 if (!file)
40 goto out;
41 }
42
43 down_write(&current->mm->mmap_sem);
44 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
45 up_write(&current->mm->mmap_sem);
46
47 if (file)
48 fput(file);
49out:
50 return error;
51}
52 30
53asmlinkage int old_mmap(unsigned long addr, unsigned long len, 31asmlinkage int old_mmap(unsigned long addr, unsigned long len,
54 unsigned long prot, unsigned long flags, 32 unsigned long prot, unsigned long flags,
@@ -56,7 +34,7 @@ asmlinkage int old_mmap(unsigned long addr, unsigned long len,
56{ 34{
57 if (off & ~PAGE_MASK) 35 if (off & ~PAGE_MASK)
58 return -EINVAL; 36 return -EINVAL;
59 return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT); 37 return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
60} 38}
61 39
62asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 40asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
@@ -72,120 +50,46 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
72 50
73 pgoff >>= PAGE_SHIFT - 12; 51 pgoff >>= PAGE_SHIFT - 12;
74 52
75 return do_mmap2(addr, len, prot, flags, fd, pgoff); 53 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
76} 54}
77 55
78/* 56/* sys_cacheflush -- flush (part of) the processor cache. */
79 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 57asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
80 *
81 * This is really horribly ugly.
82 */
83asmlinkage int sys_ipc(uint call, int first, int second,
84 int third, void __user *ptr, long fifth)
85{ 58{
86 int version, ret; 59 struct vm_area_struct *vma;
87
88 version = call >> 16; /* hack for backward compatibility */
89 call &= 0xffff;
90 60
91 if (call <= SEMTIMEDOP) 61 if ((op <= 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I)))
92 switch (call) { 62 return -EINVAL;
93 case SEMOP:
94 return sys_semtimedop(first,
95 (struct sembuf __user *)ptr,
96 second, NULL);
97 case SEMTIMEDOP:
98 return sys_semtimedop(first,
99 (struct sembuf __user *)ptr, second,
100 (const struct timespec __user *)fifth);
101 case SEMGET:
102 return sys_semget (first, second, third);
103 case SEMCTL: {
104 union semun fourth;
105 if (!ptr)
106 return -EINVAL;
107 if (get_user(fourth.__pad, (void __user * __user *) ptr))
108 return -EFAULT;
109 return sys_semctl (first, second, third, fourth);
110 }
111 default:
112 return -EINVAL;
113 }
114
115 if (call <= MSGCTL)
116 switch (call) {
117 case MSGSND:
118 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
119 second, third);
120 case MSGRCV:
121 switch (version) {
122 case 0:
123 {
124 struct ipc_kludge tmp;
125 63
126 if (!ptr) 64 /*
127 return -EINVAL; 65 * Verify that the specified address region actually belongs
66 * to this process.
67 */
68 if (addr + len < addr)
69 return -EFAULT;
128 70
129 if (copy_from_user(&tmp, 71 down_read(&current->mm->mmap_sem);
130 (struct ipc_kludge __user *) ptr, 72 vma = find_vma (current->mm, addr);
131 sizeof (tmp))) 73 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
132 return -EFAULT; 74 up_read(&current->mm->mmap_sem);
75 return -EFAULT;
76 }
133 77
134 return sys_msgrcv (first, tmp.msgp, second, 78 switch (op & CACHEFLUSH_D_PURGE) {
135 tmp.msgtyp, third); 79 case CACHEFLUSH_D_INVAL:
136 } 80 __flush_invalidate_region((void *)addr, len);
137 default: 81 break;
138 return sys_msgrcv (first, 82 case CACHEFLUSH_D_WB:
139 (struct msgbuf __user *) ptr, 83 __flush_wback_region((void *)addr, len);
140 second, fifth, third); 84 break;
141 } 85 case CACHEFLUSH_D_PURGE:
142 case MSGGET: 86 __flush_purge_region((void *)addr, len);
143 return sys_msgget ((key_t) first, second); 87 break;
144 case MSGCTL: 88 }
145 return sys_msgctl (first, second,
146 (struct msqid_ds __user *) ptr);
147 default:
148 return -EINVAL;
149 }
150 if (call <= SHMCTL)
151 switch (call) {
152 case SHMAT:
153 switch (version) {
154 default: {
155 ulong raddr;
156 ret = do_shmat (first, (char __user *) ptr,
157 second, &raddr);
158 if (ret)
159 return ret;
160 return put_user (raddr, (ulong __user *) third);
161 }
162 case 1: /* iBCS2 emulator entry point */
163 if (!segment_eq(get_fs(), get_ds()))
164 return -EINVAL;
165 return do_shmat (first, (char __user *) ptr,
166 second, (ulong *) third);
167 }
168 case SHMDT:
169 return sys_shmdt ((char __user *)ptr);
170 case SHMGET:
171 return sys_shmget (first, second, third);
172 case SHMCTL:
173 return sys_shmctl (first, second,
174 (struct shmid_ds __user *) ptr);
175 default:
176 return -EINVAL;
177 }
178 89
179 return -EINVAL; 90 if (op & CACHEFLUSH_I)
180} 91 flush_icache_range(addr, addr+len);
181 92
182asmlinkage int sys_uname(struct old_utsname __user *name) 93 up_read(&current->mm->mmap_sem);
183{ 94 return 0;
184 int err;
185 if (!name)
186 return -EFAULT;
187 down_read(&uts_sem);
188 err = copy_to_user(name, utsname(), sizeof(*name));
189 up_read(&uts_sem);
190 return err?-EFAULT:0;
191} 95}
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
index 63ba12836eae..f56b6fe5c5d0 100644
--- a/arch/sh/kernel/sys_sh32.c
+++ b/arch/sh/kernel/sys_sh32.c
@@ -9,7 +9,6 @@
9#include <linux/syscalls.h> 9#include <linux/syscalls.h>
10#include <linux/mman.h> 10#include <linux/mman.h>
11#include <linux/file.h> 11#include <linux/file.h>
12#include <linux/utsname.h>
13#include <linux/module.h> 12#include <linux/module.h>
14#include <linux/fs.h> 13#include <linux/fs.h>
15#include <linux/ipc.h> 14#include <linux/ipc.h>
@@ -72,7 +71,9 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
72 * Do a system call from kernel instead of calling sys_execve so we 71 * Do a system call from kernel instead of calling sys_execve so we
73 * end up with proper pt_regs. 72 * end up with proper pt_regs.
74 */ 73 */
75int kernel_execve(const char *filename, char *const argv[], char *const envp[]) 74int kernel_execve(const char *filename,
75 const char *const argv[],
76 const char *const envp[])
76{ 77{
77 register long __sc0 __asm__ ("r3") = __NR_execve; 78 register long __sc0 __asm__ ("r3") = __NR_execve;
78 register long __sc4 __asm__ ("r4") = (long) filename; 79 register long __sc4 __asm__ ("r4") = (long) filename;
diff --git a/arch/sh/kernel/sys_sh64.c b/arch/sh/kernel/sys_sh64.c
index 91fb8445a5a0..c5a38c4bf410 100644
--- a/arch/sh/kernel/sys_sh64.c
+++ b/arch/sh/kernel/sys_sh64.c
@@ -23,7 +23,6 @@
23#include <linux/stat.h> 23#include <linux/stat.h>
24#include <linux/mman.h> 24#include <linux/mman.h>
25#include <linux/file.h> 25#include <linux/file.h>
26#include <linux/utsname.h>
27#include <linux/syscalls.h> 26#include <linux/syscalls.h>
28#include <linux/ipc.h> 27#include <linux/ipc.h>
29#include <asm/uaccess.h> 28#include <asm/uaccess.h>
@@ -34,7 +33,9 @@
34 * Do a system call from kernel instead of calling sys_execve so we 33 * Do a system call from kernel instead of calling sys_execve so we
35 * end up with proper pt_regs. 34 * end up with proper pt_regs.
36 */ 35 */
37int kernel_execve(const char *filename, char *const argv[], char *const envp[]) 36int kernel_execve(const char *filename,
37 const char *const argv[],
38 const char *const envp[])
38{ 39{
39 register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve); 40 register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
40 register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename; 41 register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index f9e21fa2f592..6fc347ebe59d 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -139,7 +139,7 @@ ENTRY(sys_call_table)
139 .long sys_clone /* 120 */ 139 .long sys_clone /* 120 */
140 .long sys_setdomainname 140 .long sys_setdomainname
141 .long sys_newuname 141 .long sys_newuname
142 .long sys_ni_syscall /* sys_modify_ldt */ 142 .long sys_cacheflush /* x86: sys_modify_ldt */
143 .long sys_adjtimex 143 .long sys_adjtimex
144 .long sys_mprotect /* 125 */ 144 .long sys_mprotect /* 125 */
145 .long sys_sigprocmask 145 .long sys_sigprocmask
@@ -352,4 +352,27 @@ ENTRY(sys_call_table)
352 .long sys_preadv 352 .long sys_preadv
353 .long sys_pwritev 353 .long sys_pwritev
354 .long sys_rt_tgsigqueueinfo /* 335 */ 354 .long sys_rt_tgsigqueueinfo /* 335 */
355 .long sys_perf_counter_open 355 .long sys_perf_event_open
356 .long sys_fanotify_init
357 .long sys_fanotify_mark
358 .long sys_prlimit64
359 /* Broken-out socket family */
360 .long sys_socket /* 340 */
361 .long sys_bind
362 .long sys_connect
363 .long sys_listen
364 .long sys_accept
365 .long sys_getsockname /* 345 */
366 .long sys_getpeername
367 .long sys_socketpair
368 .long sys_send
369 .long sys_sendto
370 .long sys_recv /* 350 */
371 .long sys_recvfrom
372 .long sys_shutdown
373 .long sys_setsockopt
374 .long sys_getsockopt
375 .long sys_sendmsg /* 355 */
376 .long sys_recvmsg
377 .long sys_recvmmsg
378 .long sys_accept4
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index bf420b616ae0..66585708ce90 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -143,7 +143,7 @@ sys_call_table:
143 .long sys_clone /* 120 */ 143 .long sys_clone /* 120 */
144 .long sys_setdomainname 144 .long sys_setdomainname
145 .long sys_newuname 145 .long sys_newuname
146 .long sys_ni_syscall /* sys_modify_ldt */ 146 .long sys_cacheflush /* x86: sys_modify_ldt */
147 .long sys_adjtimex 147 .long sys_adjtimex
148 .long sys_mprotect /* 125 */ 148 .long sys_mprotect /* 125 */
149 .long sys_sigprocmask 149 .long sys_sigprocmask
@@ -390,4 +390,9 @@ sys_call_table:
390 .long sys_preadv 390 .long sys_preadv
391 .long sys_pwritev 391 .long sys_pwritev
392 .long sys_rt_tgsigqueueinfo 392 .long sys_rt_tgsigqueueinfo
393 .long sys_perf_counter_open 393 .long sys_perf_event_open
394 .long sys_recvmmsg /* 365 */
395 .long sys_accept4
396 .long sys_fanotify_init
397 .long sys_fanotify_mark
398 .long sys_prlimit64
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 9b352a1e3fb4..8a0072de2bcc 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -21,6 +21,7 @@
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/rtc.h> 22#include <linux/rtc.h>
23#include <asm/clock.h> 23#include <asm/clock.h>
24#include <asm/hwblk.h>
24#include <asm/rtc.h> 25#include <asm/rtc.h>
25 26
26/* Dummy RTC ops */ 27/* Dummy RTC ops */
@@ -38,14 +39,12 @@ static int null_rtc_set_time(const time_t secs)
38void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; 39void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
39int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; 40int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
40 41
41#ifdef CONFIG_GENERIC_CMOS_UPDATE 42void read_persistent_clock(struct timespec *ts)
42unsigned long read_persistent_clock(void)
43{ 43{
44 struct timespec tv; 44 rtc_sh_get_time(ts);
45 rtc_sh_get_time(&tv);
46 return tv.tv_sec;
47} 45}
48 46
47#ifdef CONFIG_GENERIC_CMOS_UPDATE
49int update_persistent_clock(struct timespec now) 48int update_persistent_clock(struct timespec now)
50{ 49{
51 return rtc_sh_set_time(now.tv_sec); 50 return rtc_sh_set_time(now.tv_sec);
@@ -91,21 +90,8 @@ module_init(rtc_generic_init);
91 90
92void (*board_time_init)(void); 91void (*board_time_init)(void);
93 92
94void __init time_init(void) 93static void __init sh_late_time_init(void)
95{ 94{
96 if (board_time_init)
97 board_time_init();
98
99 clk_init();
100
101 rtc_sh_get_time(&xtime);
102 set_normalized_timespec(&wall_to_monotonic,
103 -xtime.tv_sec, -xtime.tv_nsec);
104
105#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
106 local_timer_setup(smp_processor_id());
107#endif
108
109 /* 95 /*
110 * Make sure all compiled-in early timers register themselves. 96 * Make sure all compiled-in early timers register themselves.
111 * 97 *
@@ -118,3 +104,14 @@ void __init time_init(void)
118 early_platform_driver_register_all("earlytimer"); 104 early_platform_driver_register_all("earlytimer");
119 early_platform_driver_probe("earlytimer", 2, 0); 105 early_platform_driver_probe("earlytimer", 2, 0);
120} 106}
107
108void __init time_init(void)
109{
110 if (board_time_init)
111 board_time_init();
112
113 hwblk_init();
114 clk_init();
115
116 late_time_init = sh_late_time_init;
117}
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 0838942b7083..948fdb656933 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -16,6 +16,32 @@
16 16
17static DEFINE_PER_CPU(struct cpu, cpu_devices); 17static DEFINE_PER_CPU(struct cpu, cpu_devices);
18 18
19cpumask_t cpu_core_map[NR_CPUS];
20
21static cpumask_t cpu_coregroup_map(unsigned int cpu)
22{
23 /*
24 * Presently all SH-X3 SMP cores are multi-cores, so just keep it
25 * simple until we have a method for determining topology..
26 */
27 return cpu_possible_map;
28}
29
30const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
31{
32 return &cpu_core_map[cpu];
33}
34
35int arch_update_cpu_topology(void)
36{
37 unsigned int cpu;
38
39 for_each_possible_cpu(cpu)
40 cpu_core_map[cpu] = cpu_coregroup_map(cpu);
41
42 return 0;
43}
44
19static int __init topology_init(void) 45static int __init topology_init(void)
20{ 46{
21 int i, ret; 47 int i, ret;
@@ -26,7 +52,11 @@ static int __init topology_init(void)
26#endif 52#endif
27 53
28 for_each_present_cpu(i) { 54 for_each_present_cpu(i) {
29 ret = register_cpu(&per_cpu(cpu_devices, i), i); 55 struct cpu *c = &per_cpu(cpu_devices, i);
56
57 c->hotpluggable = 1;
58
59 ret = register_cpu(c, i);
30 if (unlikely(ret)) 60 if (unlikely(ret))
31 printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n", 61 printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n",
32 __func__, i, ret); 62 __func__, i, ret);
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index b3e0067db358..0830c2a9f712 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -5,18 +5,33 @@
5#include <linux/signal.h> 5#include <linux/signal.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/uaccess.h> 7#include <linux/uaccess.h>
8#include <linux/hardirq.h>
9#include <asm/unwinder.h>
8#include <asm/system.h> 10#include <asm/system.h>
9 11
10#ifdef CONFIG_BUG 12#ifdef CONFIG_GENERIC_BUG
11static void handle_BUG(struct pt_regs *regs) 13static void handle_BUG(struct pt_regs *regs)
12{ 14{
15 const struct bug_entry *bug;
16 unsigned long bugaddr = regs->pc;
13 enum bug_trap_type tt; 17 enum bug_trap_type tt;
14 tt = report_bug(regs->pc, regs); 18
19 if (!is_valid_bugaddr(bugaddr))
20 goto invalid;
21
22 bug = find_bug(bugaddr);
23
24 /* Switch unwinders when unwind_stack() is called */
25 if (bug->flags & BUGFLAG_UNWINDER)
26 unwinder_faulted = 1;
27
28 tt = report_bug(bugaddr, regs);
15 if (tt == BUG_TRAP_TYPE_WARN) { 29 if (tt == BUG_TRAP_TYPE_WARN) {
16 regs->pc += instruction_size(regs->pc); 30 regs->pc += instruction_size(bugaddr);
17 return; 31 return;
18 } 32 }
19 33
34invalid:
20 die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff); 35 die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
21} 36}
22 37
@@ -28,8 +43,10 @@ int is_valid_bugaddr(unsigned long addr)
28 return 0; 43 return 0;
29 if (probe_kernel_address((insn_size_t *)addr, opcode)) 44 if (probe_kernel_address((insn_size_t *)addr, opcode))
30 return 0; 45 return 0;
46 if (opcode == TRAPA_BUG_OPCODE)
47 return 1;
31 48
32 return opcode == TRAPA_BUG_OPCODE; 49 return 0;
33} 50}
34#endif 51#endif
35 52
@@ -41,7 +58,7 @@ BUILD_TRAP_HANDLER(debug)
41 TRAP_HANDLER_DECL; 58 TRAP_HANDLER_DECL;
42 59
43 /* Rewind */ 60 /* Rewind */
44 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); 61 regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
45 62
46 if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff, 63 if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff,
47 SIGTRAP) == NOTIFY_STOP) 64 SIGTRAP) == NOTIFY_STOP)
@@ -58,13 +75,13 @@ BUILD_TRAP_HANDLER(bug)
58 TRAP_HANDLER_DECL; 75 TRAP_HANDLER_DECL;
59 76
60 /* Rewind */ 77 /* Rewind */
61 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); 78 regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
62 79
63 if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff, 80 if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
64 SIGTRAP) == NOTIFY_STOP) 81 SIGTRAP) == NOTIFY_STOP)
65 return; 82 return;
66 83
67#ifdef CONFIG_BUG 84#ifdef CONFIG_GENERIC_BUG
68 if (__kernel_text_address(instruction_pointer(regs))) { 85 if (__kernel_text_address(instruction_pointer(regs))) {
69 insn_size_t insn = *(insn_size_t *)instruction_pointer(regs); 86 insn_size_t insn = *(insn_size_t *)instruction_pointer(regs);
70 if (insn == TRAPA_BUG_OPCODE) 87 if (insn == TRAPA_BUG_OPCODE)
@@ -75,3 +92,25 @@ BUILD_TRAP_HANDLER(bug)
75 92
76 force_sig(SIGTRAP, current); 93 force_sig(SIGTRAP, current);
77} 94}
95
96BUILD_TRAP_HANDLER(nmi)
97{
98 unsigned int cpu = smp_processor_id();
99 TRAP_HANDLER_DECL;
100
101 nmi_enter();
102 nmi_count(cpu)++;
103
104 switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) {
105 case NOTIFY_OK:
106 case NOTIFY_STOP:
107 break;
108 case NOTIFY_BAD:
109 die("Fatal Non-Maskable Interrupt", regs, SIGINT);
110 default:
111 printk(KERN_ALERT "Got NMI, but nobody cared. Ignoring...\n");
112 break;
113 }
114
115 nmi_exit();
116}
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 2b772776fcda..3484c2f65aba 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -5,7 +5,7 @@
5 * SuperH version: Copyright (C) 1999 Niibe Yutaka 5 * SuperH version: Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2000 Philipp Rumpf 6 * Copyright (C) 2000 Philipp Rumpf
7 * Copyright (C) 2000 David Howells 7 * Copyright (C) 2000 David Howells
8 * Copyright (C) 2002 - 2007 Paul Mundt 8 * Copyright (C) 2002 - 2010 Paul Mundt
9 * 9 *
10 * This file is subject to the terms and conditions of the GNU General Public 10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive 11 * License. See the file "COPYING" in the main directory of this archive
@@ -24,8 +24,11 @@
24#include <linux/kdebug.h> 24#include <linux/kdebug.h>
25#include <linux/kexec.h> 25#include <linux/kexec.h>
26#include <linux/limits.h> 26#include <linux/limits.h>
27#include <linux/sysfs.h>
28#include <linux/uaccess.h>
29#include <linux/perf_event.h>
27#include <asm/system.h> 30#include <asm/system.h>
28#include <asm/uaccess.h> 31#include <asm/alignment.h>
29#include <asm/fpu.h> 32#include <asm/fpu.h>
30#include <asm/kprobes.h> 33#include <asm/kprobes.h>
31 34
@@ -79,12 +82,12 @@ void die(const char * str, struct pt_regs * regs, long err)
79 82
80 oops_enter(); 83 oops_enter();
81 84
82 console_verbose();
83 spin_lock_irq(&die_lock); 85 spin_lock_irq(&die_lock);
86 console_verbose();
84 bust_spinlocks(1); 87 bust_spinlocks(1);
85 88
86 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); 89 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
87 90 sysfs_printk_last_file();
88 print_modules(); 91 print_modules();
89 show_regs(regs); 92 show_regs(regs);
90 93
@@ -100,6 +103,7 @@ void die(const char * str, struct pt_regs * regs, long err)
100 bust_spinlocks(0); 103 bust_spinlocks(0);
101 add_taint(TAINT_DIE); 104 add_taint(TAINT_DIE);
102 spin_unlock_irq(&die_lock); 105 spin_unlock_irq(&die_lock);
106 oops_exit();
103 107
104 if (kexec_should_crash(current)) 108 if (kexec_should_crash(current))
105 crash_kexec(regs); 109 crash_kexec(regs);
@@ -110,7 +114,6 @@ void die(const char * str, struct pt_regs * regs, long err)
110 if (panic_on_oops) 114 if (panic_on_oops)
111 panic("Fatal exception"); 115 panic("Fatal exception");
112 116
113 oops_exit();
114 do_exit(SIGSEGV); 117 do_exit(SIGSEGV);
115} 118}
116 119
@@ -136,6 +139,7 @@ static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
136 regs->pc = fixup->fixup; 139 regs->pc = fixup->fixup;
137 return; 140 return;
138 } 141 }
142
139 die(str, regs, err); 143 die(str, regs, err);
140 } 144 }
141} 145}
@@ -193,6 +197,13 @@ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
193 197
194 count = 1<<(instruction&3); 198 count = 1<<(instruction&3);
195 199
200 switch (count) {
201 case 1: inc_unaligned_byte_access(); break;
202 case 2: inc_unaligned_word_access(); break;
203 case 4: inc_unaligned_dword_access(); break;
204 case 8: inc_unaligned_multi_access(); break;
205 }
206
196 ret = -EFAULT; 207 ret = -EFAULT;
197 switch (instruction>>12) { 208 switch (instruction>>12) {
198 case 0: /* mov.[bwl] to/from memory via r0+rn */ 209 case 0: /* mov.[bwl] to/from memory via r0+rn */
@@ -358,30 +369,33 @@ static inline int handle_delayslot(struct pt_regs *regs,
358#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) 369#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
359#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) 370#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
360 371
361/*
362 * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
363 * opcodes..
364 */
365
366static int handle_unaligned_notify_count = 10;
367
368int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, 372int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
369 struct mem_access *ma) 373 struct mem_access *ma, int expected,
374 unsigned long address)
370{ 375{
371 u_int rm; 376 u_int rm;
372 int ret, index; 377 int ret, index;
373 378
379 /*
380 * XXX: We can't handle mixed 16/32-bit instructions yet
381 */
382 if (instruction_size(instruction) != 2)
383 return -EINVAL;
384
374 index = (instruction>>8)&15; /* 0x0F00 */ 385 index = (instruction>>8)&15; /* 0x0F00 */
375 rm = regs->regs[index]; 386 rm = regs->regs[index];
376 387
377 /* shout about the first ten userspace fixups */ 388 /*
378 if (user_mode(regs) && handle_unaligned_notify_count>0) { 389 * Log the unexpected fixups, and then pass them on to perf.
379 handle_unaligned_notify_count--; 390 *
380 391 * We intentionally don't report the expected cases to perf as
381 printk(KERN_NOTICE "Fixing up unaligned userspace access " 392 * otherwise the trapped I/O case will skew the results too much
382 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", 393 * to be useful.
383 current->comm, task_pid_nr(current), 394 */
384 (void *)regs->pc, instruction); 395 if (!expected) {
396 unaligned_fixups_notify(current, instruction, regs);
397 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0,
398 regs, address);
385 } 399 }
386 400
387 ret = -EFAULT; 401 ret = -EFAULT;
@@ -535,9 +549,34 @@ asmlinkage void do_address_error(struct pt_regs *regs,
535 549
536 if (user_mode(regs)) { 550 if (user_mode(regs)) {
537 int si_code = BUS_ADRERR; 551 int si_code = BUS_ADRERR;
552 unsigned int user_action;
538 553
539 local_irq_enable(); 554 local_irq_enable();
555 inc_unaligned_user_access();
540 556
557 set_fs(USER_DS);
558 if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
559 sizeof(instruction))) {
560 set_fs(oldfs);
561 goto uspace_segv;
562 }
563 set_fs(oldfs);
564
565 /* shout about userspace fixups */
566 unaligned_fixups_notify(current, instruction, regs);
567
568 user_action = unaligned_user_action();
569 if (user_action & UM_FIXUP)
570 goto fixup;
571 if (user_action & UM_SIGNAL)
572 goto uspace_segv;
573 else {
574 /* ignore */
575 regs->pc += instruction_size(instruction);
576 return;
577 }
578
579fixup:
541 /* bad PC is not something we can fix */ 580 /* bad PC is not something we can fix */
542 if (regs->pc & 1) { 581 if (regs->pc & 1) {
543 si_code = BUS_ADRALN; 582 si_code = BUS_ADRALN;
@@ -545,20 +584,12 @@ asmlinkage void do_address_error(struct pt_regs *regs,
545 } 584 }
546 585
547 set_fs(USER_DS); 586 set_fs(USER_DS);
548 if (copy_from_user(&instruction, (void __user *)(regs->pc),
549 sizeof(instruction))) {
550 /* Argh. Fault on the instruction itself.
551 This should never happen non-SMP
552 */
553 set_fs(oldfs);
554 goto uspace_segv;
555 }
556
557 tmp = handle_unaligned_access(instruction, regs, 587 tmp = handle_unaligned_access(instruction, regs,
558 &user_mem_access); 588 &user_mem_access, 0,
589 address);
559 set_fs(oldfs); 590 set_fs(oldfs);
560 591
561 if (tmp==0) 592 if (tmp == 0)
562 return; /* sorted */ 593 return; /* sorted */
563uspace_segv: 594uspace_segv:
564 printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " 595 printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
@@ -571,6 +602,8 @@ uspace_segv:
571 info.si_addr = (void __user *)address; 602 info.si_addr = (void __user *)address;
572 force_sig_info(SIGBUS, &info, current); 603 force_sig_info(SIGBUS, &info, current);
573 } else { 604 } else {
605 inc_unaligned_kernel_access();
606
574 if (regs->pc & 1) 607 if (regs->pc & 1)
575 die("unaligned program counter", regs, error_code); 608 die("unaligned program counter", regs, error_code);
576 609
@@ -584,7 +617,10 @@ uspace_segv:
584 die("insn faulting in do_address_error", regs, 0); 617 die("insn faulting in do_address_error", regs, 0);
585 } 618 }
586 619
587 handle_unaligned_access(instruction, regs, &user_mem_access); 620 unaligned_fixups_notify(current, instruction, regs);
621
622 handle_unaligned_access(instruction, regs, &user_mem_access,
623 0, address);
588 set_fs(oldfs); 624 set_fs(oldfs);
589 } 625 }
590} 626}
@@ -766,35 +802,10 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
766 die_if_kernel("exception", regs, ex); 802 die_if_kernel("exception", regs, ex);
767} 803}
768 804
769#if defined(CONFIG_SH_STANDARD_BIOS)
770void *gdb_vbr_vector;
771
772static inline void __init gdb_vbr_init(void)
773{
774 register unsigned long vbr;
775
776 /*
777 * Read the old value of the VBR register to initialise
778 * the vector through which debug and BIOS traps are
779 * delegated by the Linux trap handler.
780 */
781 asm volatile("stc vbr, %0" : "=r" (vbr));
782
783 gdb_vbr_vector = (void *)(vbr + 0x100);
784 printk("Setting GDB trap vector to 0x%08lx\n",
785 (unsigned long)gdb_vbr_vector);
786}
787#endif
788
789void __cpuinit per_cpu_trap_init(void) 805void __cpuinit per_cpu_trap_init(void)
790{ 806{
791 extern void *vbr_base; 807 extern void *vbr_base;
792 808
793#ifdef CONFIG_SH_STANDARD_BIOS
794 if (raw_smp_processor_id() == 0)
795 gdb_vbr_init();
796#endif
797
798 /* NOTE: The VBR value should be at P1 809 /* NOTE: The VBR value should be at P1
799 (or P2, virtural "fixed" address space). 810 (or P2, virtural "fixed" address space).
800 It's definitely should not in physical address. */ 811 It's definitely should not in physical address. */
@@ -803,6 +814,9 @@ void __cpuinit per_cpu_trap_init(void)
803 : /* no output */ 814 : /* no output */
804 : "r" (&vbr_base) 815 : "r" (&vbr_base)
805 : "memory"); 816 : "memory");
817
818 /* disable exception blocking now when the vbr has been setup */
819 clear_bl_bit();
806} 820}
807 821
808void *set_exception_table_vec(unsigned int vec, void *handler) 822void *set_exception_table_vec(unsigned int vec, void *handler)
@@ -830,14 +844,9 @@ void __init trap_init(void)
830 set_exception_table_evt(0x800, do_reserved_inst); 844 set_exception_table_evt(0x800, do_reserved_inst);
831 set_exception_table_evt(0x820, do_illegal_slot_inst); 845 set_exception_table_evt(0x820, do_illegal_slot_inst);
832#elif defined(CONFIG_SH_FPU) 846#elif defined(CONFIG_SH_FPU)
833#ifdef CONFIG_CPU_SUBTYPE_SHX3
834 set_exception_table_evt(0xd80, fpu_state_restore_trap_handler);
835 set_exception_table_evt(0xda0, fpu_state_restore_trap_handler);
836#else
837 set_exception_table_evt(0x800, fpu_state_restore_trap_handler); 847 set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
838 set_exception_table_evt(0x820, fpu_state_restore_trap_handler); 848 set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
839#endif 849#endif
840#endif
841 850
842#ifdef CONFIG_CPU_SH2 851#ifdef CONFIG_CPU_SH2
843 set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler); 852 set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
@@ -851,35 +860,8 @@ void __init trap_init(void)
851#endif 860#endif
852 861
853#ifdef TRAP_UBC 862#ifdef TRAP_UBC
854 set_exception_table_vec(TRAP_UBC, break_point_trap); 863 set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
855#endif 864#endif
856
857 /* Setup VBR for boot cpu */
858 per_cpu_trap_init();
859}
860
861void show_trace(struct task_struct *tsk, unsigned long *sp,
862 struct pt_regs *regs)
863{
864 unsigned long addr;
865
866 if (regs && user_mode(regs))
867 return;
868
869 printk("\nCall trace:\n");
870
871 while (!kstack_end(sp)) {
872 addr = *sp++;
873 if (kernel_text_address(addr))
874 print_ip_sym(addr);
875 }
876
877 printk("\n");
878
879 if (!tsk)
880 tsk = current;
881
882 debug_show_held_locks(tsk);
883} 865}
884 866
885void show_stack(struct task_struct *tsk, unsigned long *sp) 867void show_stack(struct task_struct *tsk, unsigned long *sp)
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index 267e5ebbb475..6713ca97e553 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/sysctl.h> 25#include <linux/sysctl.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/perf_event.h>
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/io.h> 30#include <asm/io.h>
@@ -50,7 +51,7 @@ asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
50 do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \ 51 do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
51} 52}
52 53
53spinlock_t die_lock; 54static DEFINE_SPINLOCK(die_lock);
54 55
55void die(const char * str, struct pt_regs * regs, long err) 56void die(const char * str, struct pt_regs * regs, long err)
56{ 57{
@@ -433,6 +434,8 @@ static int misaligned_load(struct pt_regs *regs,
433 return error; 434 return error;
434 } 435 }
435 436
437 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
438
436 destreg = (opcode >> 4) & 0x3f; 439 destreg = (opcode >> 4) & 0x3f;
437 if (user_mode(regs)) { 440 if (user_mode(regs)) {
438 __u64 buffer; 441 __u64 buffer;
@@ -509,6 +512,8 @@ static int misaligned_store(struct pt_regs *regs,
509 return error; 512 return error;
510 } 513 }
511 514
515 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
516
512 srcreg = (opcode >> 4) & 0x3f; 517 srcreg = (opcode >> 4) & 0x3f;
513 if (user_mode(regs)) { 518 if (user_mode(regs)) {
514 __u64 buffer; 519 __u64 buffer;
@@ -583,6 +588,8 @@ static int misaligned_fpu_load(struct pt_regs *regs,
583 return error; 588 return error;
584 } 589 }
585 590
591 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
592
586 destreg = (opcode >> 4) & 0x3f; 593 destreg = (opcode >> 4) & 0x3f;
587 if (user_mode(regs)) { 594 if (user_mode(regs)) {
588 __u64 buffer; 595 __u64 buffer;
@@ -600,7 +607,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
600 indexed by register number. */ 607 indexed by register number. */
601 if (last_task_used_math == current) { 608 if (last_task_used_math == current) {
602 enable_fpu(); 609 enable_fpu();
603 save_fpu(current, regs); 610 save_fpu(current);
604 disable_fpu(); 611 disable_fpu();
605 last_task_used_math = NULL; 612 last_task_used_math = NULL;
606 regs->sr |= SR_FD; 613 regs->sr |= SR_FD;
@@ -611,19 +618,19 @@ static int misaligned_fpu_load(struct pt_regs *regs,
611 618
612 switch (width_shift) { 619 switch (width_shift) {
613 case 2: 620 case 2:
614 current->thread.fpu.hard.fp_regs[destreg] = buflo; 621 current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
615 break; 622 break;
616 case 3: 623 case 3:
617 if (do_paired_load) { 624 if (do_paired_load) {
618 current->thread.fpu.hard.fp_regs[destreg] = buflo; 625 current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
619 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; 626 current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
620 } else { 627 } else {
621#if defined(CONFIG_CPU_LITTLE_ENDIAN) 628#if defined(CONFIG_CPU_LITTLE_ENDIAN)
622 current->thread.fpu.hard.fp_regs[destreg] = bufhi; 629 current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
623 current->thread.fpu.hard.fp_regs[destreg+1] = buflo; 630 current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
624#else 631#else
625 current->thread.fpu.hard.fp_regs[destreg] = buflo; 632 current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
626 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; 633 current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
627#endif 634#endif
628 } 635 }
629 break; 636 break;
@@ -658,6 +665,8 @@ static int misaligned_fpu_store(struct pt_regs *regs,
658 return error; 665 return error;
659 } 666 }
660 667
668 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
669
661 srcreg = (opcode >> 4) & 0x3f; 670 srcreg = (opcode >> 4) & 0x3f;
662 if (user_mode(regs)) { 671 if (user_mode(regs)) {
663 __u64 buffer; 672 __u64 buffer;
@@ -673,7 +682,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
673 indexed by register number. */ 682 indexed by register number. */
674 if (last_task_used_math == current) { 683 if (last_task_used_math == current) {
675 enable_fpu(); 684 enable_fpu();
676 save_fpu(current, regs); 685 save_fpu(current);
677 disable_fpu(); 686 disable_fpu();
678 last_task_used_math = NULL; 687 last_task_used_math = NULL;
679 regs->sr |= SR_FD; 688 regs->sr |= SR_FD;
@@ -681,19 +690,19 @@ static int misaligned_fpu_store(struct pt_regs *regs,
681 690
682 switch (width_shift) { 691 switch (width_shift) {
683 case 2: 692 case 2:
684 buflo = current->thread.fpu.hard.fp_regs[srcreg]; 693 buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
685 break; 694 break;
686 case 3: 695 case 3:
687 if (do_paired_load) { 696 if (do_paired_load) {
688 buflo = current->thread.fpu.hard.fp_regs[srcreg]; 697 buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
689 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; 698 bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
690 } else { 699 } else {
691#if defined(CONFIG_CPU_LITTLE_ENDIAN) 700#if defined(CONFIG_CPU_LITTLE_ENDIAN)
692 bufhi = current->thread.fpu.hard.fp_regs[srcreg]; 701 bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
693 buflo = current->thread.fpu.hard.fp_regs[srcreg+1]; 702 buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
694#else 703#else
695 buflo = current->thread.fpu.hard.fp_regs[srcreg]; 704 buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
696 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; 705 bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
697#endif 706#endif
698 } 707 }
699 break; 708 break;
@@ -877,44 +886,39 @@ static int misaligned_fixup(struct pt_regs *regs)
877 886
878static ctl_table unaligned_table[] = { 887static ctl_table unaligned_table[] = {
879 { 888 {
880 .ctl_name = CTL_UNNUMBERED,
881 .procname = "kernel_reports", 889 .procname = "kernel_reports",
882 .data = &kernel_mode_unaligned_fixup_count, 890 .data = &kernel_mode_unaligned_fixup_count,
883 .maxlen = sizeof(int), 891 .maxlen = sizeof(int),
884 .mode = 0644, 892 .mode = 0644,
885 .proc_handler = &proc_dointvec 893 .proc_handler = proc_dointvec
886 }, 894 },
887 { 895 {
888 .ctl_name = CTL_UNNUMBERED,
889 .procname = "user_reports", 896 .procname = "user_reports",
890 .data = &user_mode_unaligned_fixup_count, 897 .data = &user_mode_unaligned_fixup_count,
891 .maxlen = sizeof(int), 898 .maxlen = sizeof(int),
892 .mode = 0644, 899 .mode = 0644,
893 .proc_handler = &proc_dointvec 900 .proc_handler = proc_dointvec
894 }, 901 },
895 { 902 {
896 .ctl_name = CTL_UNNUMBERED,
897 .procname = "user_enable", 903 .procname = "user_enable",
898 .data = &user_mode_unaligned_fixup_enable, 904 .data = &user_mode_unaligned_fixup_enable,
899 .maxlen = sizeof(int), 905 .maxlen = sizeof(int),
900 .mode = 0644, 906 .mode = 0644,
901 .proc_handler = &proc_dointvec}, 907 .proc_handler = proc_dointvec},
902 {} 908 {}
903}; 909};
904 910
905static ctl_table unaligned_root[] = { 911static ctl_table unaligned_root[] = {
906 { 912 {
907 .ctl_name = CTL_UNNUMBERED,
908 .procname = "unaligned_fixup", 913 .procname = "unaligned_fixup",
909 .mode = 0555, 914 .mode = 0555,
910 unaligned_table 915 .child = unaligned_table
911 }, 916 },
912 {} 917 {}
913}; 918};
914 919
915static ctl_table sh64_root[] = { 920static ctl_table sh64_root[] = {
916 { 921 {
917 .ctl_name = CTL_UNNUMBERED,
918 .procname = "sh64", 922 .procname = "sh64",
919 .mode = 0555, 923 .mode = 0555,
920 .child = unaligned_root 924 .child = unaligned_root
@@ -949,3 +953,8 @@ asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
949 /* Clear all DEBUGINT causes */ 953 /* Clear all DEBUGINT causes */
950 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); 954 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
951} 955}
956
957void __cpuinit per_cpu_trap_init(void)
958{
959 /* Nothing to do for now, VBR initialization later. */
960}
diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c
new file mode 100644
index 000000000000..468889d958f4
--- /dev/null
+++ b/arch/sh/kernel/unwinder.c
@@ -0,0 +1,164 @@
1/*
2 * Copyright (C) 2009 Matt Fleming
3 *
4 * Based, in part, on kernel/time/clocksource.c.
5 *
6 * This file provides arbitration code for stack unwinders.
7 *
8 * Multiple stack unwinders can be available on a system, usually with
9 * the most accurate unwinder being the currently active one.
10 */
11#include <linux/errno.h>
12#include <linux/list.h>
13#include <linux/spinlock.h>
14#include <linux/module.h>
15#include <asm/unwinder.h>
16#include <asm/atomic.h>
17
18/*
19 * This is the most basic stack unwinder an architecture can
20 * provide. For architectures without reliable frame pointers, e.g.
21 * RISC CPUs, it can be implemented by looking through the stack for
22 * addresses that lie within the kernel text section.
23 *
24 * Other CPUs, e.g. x86, can use their frame pointer register to
25 * construct more accurate stack traces.
26 */
27static struct list_head unwinder_list;
28static struct unwinder stack_reader = {
29 .name = "stack-reader",
30 .dump = stack_reader_dump,
31 .rating = 50,
32 .list = {
33 .next = &unwinder_list,
34 .prev = &unwinder_list,
35 },
36};
37
38/*
39 * "curr_unwinder" points to the stack unwinder currently in use. This
40 * is the unwinder with the highest rating.
41 *
42 * "unwinder_list" is a linked-list of all available unwinders, sorted
43 * by rating.
44 *
45 * All modifications of "curr_unwinder" and "unwinder_list" must be
46 * performed whilst holding "unwinder_lock".
47 */
48static struct unwinder *curr_unwinder = &stack_reader;
49
50static struct list_head unwinder_list = {
51 .next = &stack_reader.list,
52 .prev = &stack_reader.list,
53};
54
55static DEFINE_SPINLOCK(unwinder_lock);
56
57/**
58 * select_unwinder - Select the best registered stack unwinder.
59 *
60 * Private function. Must hold unwinder_lock when called.
61 *
62 * Select the stack unwinder with the best rating. This is useful for
63 * setting up curr_unwinder.
64 */
65static struct unwinder *select_unwinder(void)
66{
67 struct unwinder *best;
68
69 if (list_empty(&unwinder_list))
70 return NULL;
71
72 best = list_entry(unwinder_list.next, struct unwinder, list);
73 if (best == curr_unwinder)
74 return NULL;
75
76 return best;
77}
78
79/*
80 * Enqueue the stack unwinder sorted by rating.
81 */
82static int unwinder_enqueue(struct unwinder *ops)
83{
84 struct list_head *tmp, *entry = &unwinder_list;
85
86 list_for_each(tmp, &unwinder_list) {
87 struct unwinder *o;
88
89 o = list_entry(tmp, struct unwinder, list);
90 if (o == ops)
91 return -EBUSY;
92 /* Keep track of the place, where to insert */
93 if (o->rating >= ops->rating)
94 entry = tmp;
95 }
96 list_add(&ops->list, entry);
97
98 return 0;
99}
100
101/**
102 * unwinder_register - Used to install new stack unwinder
103 * @u: unwinder to be registered
104 *
105 * Install the new stack unwinder on the unwinder list, which is sorted
106 * by rating.
107 *
108 * Returns -EBUSY if registration fails, zero otherwise.
109 */
110int unwinder_register(struct unwinder *u)
111{
112 unsigned long flags;
113 int ret;
114
115 spin_lock_irqsave(&unwinder_lock, flags);
116 ret = unwinder_enqueue(u);
117 if (!ret)
118 curr_unwinder = select_unwinder();
119 spin_unlock_irqrestore(&unwinder_lock, flags);
120
121 return ret;
122}
123
124int unwinder_faulted = 0;
125
126/*
127 * Unwind the call stack and pass information to the stacktrace_ops
128 * functions. Also handle the case where we need to switch to a new
129 * stack dumper because the current one faulted unexpectedly.
130 */
131void unwind_stack(struct task_struct *task, struct pt_regs *regs,
132 unsigned long *sp, const struct stacktrace_ops *ops,
133 void *data)
134{
135 unsigned long flags;
136
137 /*
138 * The problem with unwinders with high ratings is that they are
139 * inherently more complicated than the simple ones with lower
140 * ratings. We are therefore more likely to fault in the
141 * complicated ones, e.g. hitting BUG()s. If we fault in the
142 * code for the current stack unwinder we try to downgrade to
143 * one with a lower rating.
144 *
145 * Hopefully this will give us a semi-reliable stacktrace so we
146 * can diagnose why curr_unwinder->dump() faulted.
147 */
148 if (unwinder_faulted) {
149 spin_lock_irqsave(&unwinder_lock, flags);
150
151 /* Make sure no one beat us to changing the unwinder */
152 if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
153 list_del(&curr_unwinder->list);
154 curr_unwinder = select_unwinder();
155
156 unwinder_faulted = 0;
157 }
158
159 spin_unlock_irqrestore(&unwinder_lock, flags);
160 }
161
162 curr_unwinder->dump(task, regs, sp, ops, data);
163}
164EXPORT_SYMBOL_GPL(unwind_stack);
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index f53c76acaede..7f8a709c3ada 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -3,7 +3,7 @@
3 * Written by Niibe Yutaka and Paul Mundt 3 * Written by Niibe Yutaka and Paul Mundt
4 */ 4 */
5#ifdef CONFIG_SUPERH64 5#ifdef CONFIG_SUPERH64
6#define LOAD_OFFSET CONFIG_PAGE_OFFSET 6#define LOAD_OFFSET PAGE_OFFSET
7OUTPUT_ARCH(sh:sh5) 7OUTPUT_ARCH(sh:sh5)
8#else 8#else
9#define LOAD_OFFSET 0 9#define LOAD_OFFSET 0
@@ -12,19 +12,18 @@ OUTPUT_ARCH(sh)
12 12
13#include <asm/thread_info.h> 13#include <asm/thread_info.h>
14#include <asm/cache.h> 14#include <asm/cache.h>
15#include <asm-generic/vmlinux.lds.h> 15#include <asm/vmlinux.lds.h>
16
17#ifdef CONFIG_PMB
18 #define MEMORY_OFFSET 0
19#else
20 #define MEMORY_OFFSET __MEMORY_START
21#endif
16 22
17ENTRY(_start) 23ENTRY(_start)
18SECTIONS 24SECTIONS
19{ 25{
20#ifdef CONFIG_PMB_FIXED 26 . = PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
21 . = CONFIG_PAGE_OFFSET + (CONFIG_MEMORY_START & 0x1fffffff) +
22 CONFIG_ZERO_PAGE_OFFSET;
23#elif defined(CONFIG_32BIT)
24 . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
25#else
26 . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
27#endif
28 27
29 _text = .; /* Text and read-only data */ 28 _text = .; /* Text and read-only data */
30 29
@@ -35,12 +34,7 @@ SECTIONS
35 .text : AT(ADDR(.text) - LOAD_OFFSET) { 34 .text : AT(ADDR(.text) - LOAD_OFFSET) {
36 HEAD_TEXT 35 HEAD_TEXT
37 TEXT_TEXT 36 TEXT_TEXT
38 37 EXTRA_TEXT
39#ifdef CONFIG_SUPERH64
40 *(.text64)
41 *(.text..SHmedia32)
42#endif
43
44 SCHED_TEXT 38 SCHED_TEXT
45 LOCK_TEXT 39 LOCK_TEXT
46 KPROBES_TEXT 40 KPROBES_TEXT
@@ -50,90 +44,20 @@ SECTIONS
50 _etext = .; /* End of text section */ 44 _etext = .; /* End of text section */
51 } = 0x0009 45 } = 0x0009
52 46
53 . = ALIGN(16); /* Exception table */ 47 EXCEPTION_TABLE(16)
54 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
55 __start___ex_table = .;
56 *(__ex_table)
57 __stop___ex_table = .;
58 }
59
60 NOTES 48 NOTES
61 RO_DATA(PAGE_SIZE)
62
63 /*
64 * Code which must be executed uncached and the associated data
65 */
66 . = ALIGN(PAGE_SIZE);
67 .uncached : AT(ADDR(.uncached) - LOAD_OFFSET) {
68 __uncached_start = .;
69 *(.uncached.text)
70 *(.uncached.data)
71 __uncached_end = .;
72 }
73
74 . = ALIGN(THREAD_SIZE);
75 .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
76 *(.data.init_task)
77
78 . = ALIGN(L1_CACHE_BYTES);
79 *(.data.cacheline_aligned)
80
81 . = ALIGN(L1_CACHE_BYTES);
82 *(.data.read_mostly)
83
84 . = ALIGN(PAGE_SIZE);
85 *(.data.page_aligned)
86
87 __nosave_begin = .;
88 *(.data.nosave)
89 . = ALIGN(PAGE_SIZE);
90 __nosave_end = .;
91 49
92 DATA_DATA 50 _sdata = .;
93 CONSTRUCTORS 51 RO_DATA(PAGE_SIZE)
94 } 52 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
53 _edata = .;
95 54
96 _edata = .; /* End of data section */ 55 DWARF_EH_FRAME
97 56
98 . = ALIGN(PAGE_SIZE); /* Init code and data */ 57 . = ALIGN(PAGE_SIZE); /* Init code and data */
99 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { 58 __init_begin = .;
100 __init_begin = .; 59 INIT_TEXT_SECTION(PAGE_SIZE)
101 _sinittext = .; 60 INIT_DATA_SECTION(16)
102 INIT_TEXT
103 _einittext = .;
104 }
105
106 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { INIT_DATA }
107
108 . = ALIGN(16);
109 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
110 __setup_start = .;
111 *(.init.setup)
112 __setup_end = .;
113 }
114
115 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
116 __initcall_start = .;
117 INITCALLS
118 __initcall_end = .;
119 }
120
121 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
122 __con_initcall_start = .;
123 *(.con_initcall.init)
124 __con_initcall_end = .;
125 }
126
127 SECURITY_INIT
128
129#ifdef CONFIG_BLK_DEV_INITRD
130 . = ALIGN(PAGE_SIZE);
131 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
132 __initramfs_start = .;
133 *(.init.ramfs)
134 __initramfs_end = .;
135 }
136#endif
137 61
138 . = ALIGN(4); 62 . = ALIGN(4);
139 .machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) { 63 .machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) {
@@ -152,27 +76,13 @@ SECTIONS
152 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA } 76 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA }
153 77
154 . = ALIGN(PAGE_SIZE); 78 . = ALIGN(PAGE_SIZE);
155 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 79 __init_end = .;
156 __init_end = .; 80 BSS_SECTION(0, PAGE_SIZE, 4)
157 __bss_start = .; /* BSS */ 81 _ebss = .; /* uClinux MTD sucks */
158 *(.bss.page_aligned) 82 _end = . ;
159 *(.bss)
160 *(COMMON)
161 . = ALIGN(4);
162 _ebss = .; /* uClinux MTD sucks */
163 _end = . ;
164 }
165
166 /*
167 * When something in the kernel is NOT compiled as a module, the
168 * module cleanup code and data are put into these segments. Both
169 * can then be thrown away, as cleanup code is never called unless
170 * it's a module.
171 */
172 /DISCARD/ : {
173 *(.exitcall.exit)
174 }
175 83
176 STABS_DEBUG 84 STABS_DEBUG
177 DWARF_DEBUG 85 DWARF_DEBUG
86
87 DISCARDS
178} 88}
diff --git a/arch/sh/kernel/vsyscall/Makefile b/arch/sh/kernel/vsyscall/Makefile
index 4bbce1cfa359..8f0ea5fc835c 100644
--- a/arch/sh/kernel/vsyscall/Makefile
+++ b/arch/sh/kernel/vsyscall/Makefile
@@ -15,7 +15,7 @@ quiet_cmd_syscall = SYSCALL $@
15export CPPFLAGS_vsyscall.lds += -P -C -Ush 15export CPPFLAGS_vsyscall.lds += -P -C -Ush
16 16
17vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \ 17vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
18 $(call ld-option, -Wl$(comma)--hash-style=sysv) 18 $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
19 19
20SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags) 20SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags)
21 21
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
index 3b6eb34c43fa..3e70f851cdc6 100644
--- a/arch/sh/kernel/vsyscall/vsyscall-trapa.S
+++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
@@ -8,9 +8,9 @@ __kernel_vsyscall:
8 * fill out .eh_frame -- PFM. */ 8 * fill out .eh_frame -- PFM. */
9.LEND_vsyscall: 9.LEND_vsyscall:
10 .size __kernel_vsyscall,.-.LSTART_vsyscall 10 .size __kernel_vsyscall,.-.LSTART_vsyscall
11 .previous
12 11
13 .section .eh_frame,"a",@progbits 12 .section .eh_frame,"a",@progbits
13 .previous
14.LCIE: 14.LCIE:
15 .ualong .LCIE_end - .LCIE_start 15 .ualong .LCIE_end - .LCIE_start
16.LCIE_start: 16.LCIE_start:
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 3f7e415be86a..242117cbad67 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -11,7 +11,6 @@
11 * for more details. 11 * for more details.
12 */ 12 */
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/kernel.h> 14#include <linux/kernel.h>
16#include <linux/init.h> 15#include <linux/init.h>
17#include <linux/gfp.h> 16#include <linux/gfp.h>