diff options
Diffstat (limited to 'arch/sh64')
75 files changed, 17847 insertions, 0 deletions
diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig new file mode 100644 index 000000000000..76eb81fba45e --- /dev/null +++ b/arch/sh64/Kconfig | |||
@@ -0,0 +1,293 @@ | |||
1 | # | ||
2 | # For a description of the syntax of this configuration file, | ||
3 | # see Documentation/kbuild/config-language.txt. | ||
4 | # | ||
5 | |||
6 | mainmenu "Linux/SH64 Kernel Configuration" | ||
7 | |||
8 | config SUPERH | ||
9 | bool | ||
10 | default y | ||
11 | |||
12 | config SUPERH64 | ||
13 | bool | ||
14 | default y | ||
15 | |||
16 | config MMU | ||
17 | bool | ||
18 | default y | ||
19 | |||
20 | config UID16 | ||
21 | bool | ||
22 | default y | ||
23 | |||
24 | config RWSEM_GENERIC_SPINLOCK | ||
25 | bool | ||
26 | default y | ||
27 | |||
28 | config GENERIC_CALIBRATE_DELAY | ||
29 | bool | ||
30 | default y | ||
31 | |||
32 | config LOG_BUF_SHIFT | ||
33 | int | ||
34 | default 14 | ||
35 | |||
36 | config RWSEM_XCHGADD_ALGORITHM | ||
37 | bool | ||
38 | |||
39 | config GENERIC_ISA_DMA | ||
40 | bool | ||
41 | |||
42 | source init/Kconfig | ||
43 | |||
44 | menu "System type" | ||
45 | |||
46 | choice | ||
47 | prompt "SuperH system type" | ||
48 | default SH_SIMULATOR | ||
49 | |||
50 | config SH_GENERIC | ||
51 | bool "Generic" | ||
52 | |||
53 | config SH_SIMULATOR | ||
54 | bool "Simulator" | ||
55 | |||
56 | config SH_CAYMAN | ||
57 | bool "Cayman" | ||
58 | |||
59 | config SH_ROMRAM | ||
60 | bool "ROM/RAM" | ||
61 | |||
62 | config SH_HARP | ||
63 | bool "ST50-Harp" | ||
64 | |||
65 | endchoice | ||
66 | |||
67 | choice | ||
68 | prompt "Processor family" | ||
69 | default CPU_SH5 | ||
70 | |||
71 | config CPU_SH5 | ||
72 | bool "SH-5" | ||
73 | |||
74 | endchoice | ||
75 | |||
76 | choice | ||
77 | prompt "Processor type" | ||
78 | |||
79 | config CPU_SUBTYPE_SH5_101 | ||
80 | bool "SH5-101" | ||
81 | depends on CPU_SH5 | ||
82 | |||
83 | config CPU_SUBTYPE_SH5_103 | ||
84 | bool "SH5-103" | ||
85 | depends on CPU_SH5 | ||
86 | |||
87 | endchoice | ||
88 | |||
89 | choice | ||
90 | prompt "Endianness" | ||
91 | default LITTLE_ENDIAN | ||
92 | |||
93 | config LITTLE_ENDIAN | ||
94 | bool "Little-Endian" | ||
95 | |||
96 | config BIG_ENDIAN | ||
97 | bool "Big-Endian" | ||
98 | |||
99 | endchoice | ||
100 | |||
101 | config SH_FPU | ||
102 | bool "FPU support" | ||
103 | default y | ||
104 | |||
105 | config SH64_FPU_DENORM_FLUSH | ||
106 | depends on SH_FPU | ||
107 | bool "Flush floating point denorms to zero" | ||
108 | |||
109 | choice | ||
110 | prompt "Page table levels" | ||
111 | default SH64_PGTABLE_2_LEVEL | ||
112 | |||
113 | config SH64_PGTABLE_2_LEVEL | ||
114 | bool "2" | ||
115 | |||
116 | config SH64_PGTABLE_3_LEVEL | ||
117 | bool "3" | ||
118 | |||
119 | endchoice | ||
120 | |||
121 | choice | ||
122 | prompt "HugeTLB page size" | ||
123 | depends on HUGETLB_PAGE && MMU | ||
124 | default HUGETLB_PAGE_SIZE_64K | ||
125 | |||
126 | config HUGETLB_PAGE_SIZE_64K | ||
127 | bool "64K" | ||
128 | |||
129 | config HUGETLB_PAGE_SIZE_1MB | ||
130 | bool "1MB" | ||
131 | |||
132 | config HUGETLB_PAGE_SIZE_512MB | ||
133 | bool "512MB" | ||
134 | |||
135 | endchoice | ||
136 | |||
137 | config SH64_USER_MISALIGNED_FIXUP | ||
138 | bool "Fixup misaligned loads/stores occurring in user mode" | ||
139 | |||
140 | comment "Memory options" | ||
141 | |||
142 | config CACHED_MEMORY_OFFSET | ||
143 | hex "Cached Area Offset" | ||
144 | depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR | ||
145 | default "20000000" | ||
146 | |||
147 | config MEMORY_START | ||
148 | hex "Physical memory start address" | ||
149 | depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR | ||
150 | default "80000000" | ||
151 | |||
152 | config MEMORY_SIZE_IN_MB | ||
153 | int "Memory size (in MB)" if SH_HARP || SH_CAYMAN || SH_SIMULATOR | ||
154 | default "64" if SH_HARP || SH_CAYMAN | ||
155 | default "8" if SH_SIMULATOR | ||
156 | |||
157 | comment "Cache options" | ||
158 | |||
159 | config DCACHE_DISABLED | ||
160 | bool "DCache Disabling" | ||
161 | depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR | ||
162 | |||
163 | choice | ||
164 | prompt "DCache mode" | ||
165 | depends on !DCACHE_DISABLED && !SH_SIMULATOR | ||
166 | default DCACHE_WRITE_BACK | ||
167 | |||
168 | config DCACHE_WRITE_BACK | ||
169 | bool "Write-back" | ||
170 | |||
171 | config DCACHE_WRITE_THROUGH | ||
172 | bool "Write-through" | ||
173 | |||
174 | endchoice | ||
175 | |||
176 | config ICACHE_DISABLED | ||
177 | bool "ICache Disabling" | ||
178 | depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR | ||
179 | |||
180 | config PCIDEVICE_MEMORY_START | ||
181 | hex | ||
182 | depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR | ||
183 | default "C0000000" | ||
184 | |||
185 | config DEVICE_MEMORY_START | ||
186 | hex | ||
187 | depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR | ||
188 | default "E0000000" | ||
189 | |||
190 | config FLASH_MEMORY_START | ||
191 | hex "Flash memory/on-chip devices start address" | ||
192 | depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR | ||
193 | default "00000000" | ||
194 | |||
195 | config PCI_BLOCK_START | ||
196 | hex "PCI block start address" | ||
197 | depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR | ||
198 | default "40000000" | ||
199 | |||
200 | comment "CPU Subtype specific options" | ||
201 | |||
202 | config SH64_ID2815_WORKAROUND | ||
203 | bool "Include workaround for SH5-101 cut2 silicon defect ID2815" | ||
204 | |||
205 | comment "Misc options" | ||
206 | config HEARTBEAT | ||
207 | bool "Heartbeat LED" | ||
208 | |||
209 | config HDSP253_LED | ||
210 | bool "Support for HDSP-253 LED" | ||
211 | depends on SH_CAYMAN | ||
212 | |||
213 | config SH_DMA | ||
214 | tristate "DMA controller (DMAC) support" | ||
215 | |||
216 | config PREEMPT | ||
217 | bool "Preemptible Kernel (EXPERIMENTAL)" | ||
218 | depends on EXPERIMENTAL | ||
219 | |||
220 | endmenu | ||
221 | |||
222 | menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" | ||
223 | |||
224 | config ISA | ||
225 | bool | ||
226 | |||
227 | config SBUS | ||
228 | bool | ||
229 | |||
230 | config PCI | ||
231 | bool "PCI support" | ||
232 | help | ||
233 | Find out whether you have a PCI motherboard. PCI is the name of a | ||
234 | bus system, i.e. the way the CPU talks to the other stuff inside | ||
235 | your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or | ||
236 | VESA. If you have PCI, say Y, otherwise N. | ||
237 | |||
238 | The PCI-HOWTO, available from | ||
239 | <http://www.tldp.org/docs.html#howto>, contains valuable | ||
240 | information about which PCI hardware does work under Linux and which | ||
241 | doesn't. | ||
242 | |||
243 | config SH_PCIDMA_NONCOHERENT | ||
244 | bool "Cache and PCI noncoherent" | ||
245 | depends on PCI | ||
246 | default y | ||
247 | help | ||
248 | Enable this option if your platform does not have a CPU cache which | ||
249 | remains coherent with PCI DMA. It is safest to say 'Y', although you | ||
250 | will see better performance if you can say 'N', because the PCI DMA | ||
251 | code will not have to flush the CPU's caches. If you have a PCI host | ||
252 | bridge integrated with your SH CPU, refer carefully to the chip specs | ||
253 | to see if you can say 'N' here. Otherwise, leave it as 'Y'. | ||
254 | |||
255 | source "drivers/pci/Kconfig" | ||
256 | |||
257 | source "drivers/pcmcia/Kconfig" | ||
258 | |||
259 | source "drivers/pci/hotplug/Kconfig" | ||
260 | |||
261 | endmenu | ||
262 | |||
263 | menu "Executable file formats" | ||
264 | |||
265 | source "fs/Kconfig.binfmt" | ||
266 | |||
267 | endmenu | ||
268 | |||
269 | source "drivers/Kconfig" | ||
270 | |||
271 | source "fs/Kconfig" | ||
272 | |||
273 | source "arch/sh64/oprofile/Kconfig" | ||
274 | |||
275 | source "arch/sh64/Kconfig.debug" | ||
276 | |||
277 | source "security/Kconfig" | ||
278 | |||
279 | source "crypto/Kconfig" | ||
280 | |||
281 | source "lib/Kconfig" | ||
282 | |||
283 | # | ||
284 | # Use the generic interrupt handling code in kernel/irq/: | ||
285 | # | ||
286 | config GENERIC_HARDIRQS | ||
287 | bool | ||
288 | default y | ||
289 | |||
290 | config GENERIC_IRQ_PROBE | ||
291 | bool | ||
292 | default y | ||
293 | |||
diff --git a/arch/sh64/Kconfig.debug b/arch/sh64/Kconfig.debug new file mode 100644 index 000000000000..26d842c07139 --- /dev/null +++ b/arch/sh64/Kconfig.debug | |||
@@ -0,0 +1,44 @@ | |||
1 | menu "Kernel hacking" | ||
2 | |||
3 | source "lib/Kconfig.debug" | ||
4 | |||
5 | config EARLY_PRINTK | ||
6 | bool "Early SCIF console support" | ||
7 | |||
8 | config DEBUG_KERNEL_WITH_GDB_STUB | ||
9 | bool "GDB Stub kernel debug" | ||
10 | |||
11 | config SH64_PROC_TLB | ||
12 | bool "Debug: report TLB fill/purge activity through /proc/tlb" | ||
13 | depends on PROC_FS | ||
14 | |||
15 | config SH64_PROC_ASIDS | ||
16 | bool "Debug: report ASIDs through /proc/asids" | ||
17 | depends on PROC_FS | ||
18 | |||
19 | config SH64_SR_WATCH | ||
20 | bool "Debug: set SR.WATCH to enable hardware watchpoints and trace" | ||
21 | |||
22 | config POOR_MANS_STRACE | ||
23 | bool "Debug: enable rudimentary strace facility" | ||
24 | help | ||
25 | This option allows system calls to be traced to the console. It also | ||
26 | aids in detecting kernel stack underflow. It is useful for debugging | ||
27 | early-userland problems (e.g. init incurring fatal exceptions.) | ||
28 | |||
29 | config SH_ALPHANUMERIC | ||
30 | bool "Enable debug outputs to on-board alphanumeric display" | ||
31 | |||
32 | config SH_NO_BSS_INIT | ||
33 | bool "Avoid zeroing BSS (to speed-up startup on suitable platforms)" | ||
34 | |||
35 | config FRAME_POINTER | ||
36 | bool "Compile the kernel with frame pointers" | ||
37 | default y if KGDB | ||
38 | help | ||
39 | If you say Y here the resulting kernel image will be slightly larger | ||
40 | and slower, but it will give very useful debugging information. | ||
41 | If you don't debug the kernel, you can say N, but we may not be able | ||
42 | to solve problems without frame pointers. | ||
43 | |||
44 | endmenu | ||
diff --git a/arch/sh64/Makefile b/arch/sh64/Makefile new file mode 100644 index 000000000000..b4fd8e13fea9 --- /dev/null +++ b/arch/sh64/Makefile | |||
@@ -0,0 +1,116 @@ | |||
1 | # | ||
2 | # This file is subject to the terms and conditions of the GNU General Public | ||
3 | # License. See the file "COPYING" in the main directory of this archive | ||
4 | # for more details. | ||
5 | # | ||
6 | # Copyright (C) 2000, 2001 Paolo Alberelli | ||
7 | # Copyright (C) 2003, 2004 Paul Mundt | ||
8 | # | ||
9 | # This file is included by the global makefile so that you can add your own | ||
10 | # architecture-specific flags and dependencies. Remember to do have actions | ||
11 | # for "archclean" and "archdep" for cleaning up and making dependencies for | ||
12 | # this architecture | ||
13 | # | ||
14 | |||
15 | cpu-y := -mb | ||
16 | cpu-$(CONFIG_LITTLE_ENDIAN) := -ml | ||
17 | |||
18 | cpu-$(CONFIG_CPU_SH5) += -m5-32media-nofpu | ||
19 | |||
20 | ifdef CONFIG_LITTLE_ENDIAN | ||
21 | LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64' | ||
22 | LDFLAGS += -EL -mshlelf32_linux | ||
23 | else | ||
24 | LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64+4' | ||
25 | LDFLAGS += -EB -mshelf32_linux | ||
26 | endif | ||
27 | |||
28 | # No requirements for endianess support from AFLAGS, 'as' always run through gcc | ||
29 | AFLAGS += -m5 -isa=sh64 -traditional | ||
30 | CFLAGS += $(cpu-y) | ||
31 | |||
32 | LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_CACHED_MEMORY_OFFSET) \ | ||
33 | --defsym phys_stext_shmedia=phys_stext+1 \ | ||
34 | -e phys_stext_shmedia | ||
35 | |||
36 | OBJCOPYFLAGS := -O binary -R .note -R .comment -R .stab -R .stabstr -S | ||
37 | |||
38 | # | ||
39 | # arch/sh64/defconfig never had any hope of being | ||
40 | # frequently updated, so use one that does | ||
41 | # | ||
42 | KBUILD_DEFCONFIG := cayman_defconfig | ||
43 | |||
44 | ifdef LOADADDR | ||
45 | LINKFLAGS += -Ttext $(word 1,$(LOADADDR)) | ||
46 | endif | ||
47 | |||
48 | machine-$(CONFIG_SH_CAYMAN) := cayman | ||
49 | machine-$(CONFIG_SH_SIMULATOR) := sim | ||
50 | machine-$(CONFIG_SH_HARP) := harp | ||
51 | machine-$(CONFIG_SH_ROMRAM) := romram | ||
52 | |||
53 | head-y := arch/$(ARCH)/kernel/head.o arch/$(ARCH)/kernel/init_task.o | ||
54 | |||
55 | core-y += arch/sh64/kernel/ arch/sh64/mm/ | ||
56 | |||
57 | ifneq ($(machine-y),) | ||
58 | core-y += arch/sh64/mach-$(machine-y)/ | ||
59 | endif | ||
60 | |||
61 | LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name) | ||
62 | libs-y += arch/$(ARCH)/lib/ $(LIBGCC) | ||
63 | |||
64 | drivers-$(CONFIG_OPROFILE) += arch/sh64/oprofile/ | ||
65 | |||
66 | boot := arch/$(ARCH)/boot | ||
67 | |||
68 | zImage: vmlinux | ||
69 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | ||
70 | |||
71 | compressed: zImage | ||
72 | |||
73 | archclean: | ||
74 | $(Q)$(MAKE) $(clean)=$(boot) | ||
75 | |||
76 | prepare: include/asm-$(ARCH)/asm-offsets.h arch/$(ARCH)/lib/syscalltab.h | ||
77 | |||
78 | include/asm-$(ARCH)/asm-offsets.h: arch/$(ARCH)/kernel/asm-offsets.s \ | ||
79 | include/asm include/linux/version.h | ||
80 | $(call filechk,gen-asm-offsets) | ||
81 | |||
82 | define filechk_gen-syscalltab | ||
83 | (set -e; \ | ||
84 | echo "/*"; \ | ||
85 | echo " * DO NOT MODIFY."; \ | ||
86 | echo " *"; \ | ||
87 | echo " * This file was generated by arch/$(ARCH)/Makefile"; \ | ||
88 | echo " * Any changes will be reverted at build time."; \ | ||
89 | echo " */"; \ | ||
90 | echo ""; \ | ||
91 | echo "#ifndef __SYSCALLTAB_H"; \ | ||
92 | echo "#define __SYSCALLTAB_H"; \ | ||
93 | echo ""; \ | ||
94 | echo "#include <linux/kernel.h>"; \ | ||
95 | echo ""; \ | ||
96 | echo "struct syscall_info {"; \ | ||
97 | echo " const char *name;"; \ | ||
98 | echo "} syscall_info_table[] = {"; \ | ||
99 | sed -e '/^.*\.long /!d;s// { "/;s/\(\([^/]*\)\/\)\{1\}.*/\2/; \ | ||
100 | s/[ \t]*$$//g;s/$$/" },/;s/\("\)sys_/\1/g'; \ | ||
101 | echo "};"; \ | ||
102 | echo ""; \ | ||
103 | echo "#define NUM_SYSCALL_INFO_ENTRIES ARRAY_SIZE(syscall_info_table)"; \ | ||
104 | echo ""; \ | ||
105 | echo "#endif /* __SYSCALLTAB_H */" ) | ||
106 | endef | ||
107 | |||
108 | arch/$(ARCH)/lib/syscalltab.h: arch/sh64/kernel/syscalls.S | ||
109 | $(call filechk,gen-syscalltab) | ||
110 | |||
111 | CLEAN_FILES += include/asm-$(ARCH)/asm-offsets.h arch/$(ARCH)/lib/syscalltab.h | ||
112 | |||
113 | define archhelp | ||
114 | @echo ' zImage - Compressed kernel image (arch/sh64/boot/zImage)' | ||
115 | endef | ||
116 | |||
diff --git a/arch/sh64/boot/Makefile b/arch/sh64/boot/Makefile new file mode 100644 index 000000000000..fb71087b7b8a --- /dev/null +++ b/arch/sh64/boot/Makefile | |||
@@ -0,0 +1,20 @@ | |||
1 | # | ||
2 | # arch/sh64/boot/Makefile | ||
3 | # | ||
4 | # This file is subject to the terms and conditions of the GNU General Public | ||
5 | # License. See the file "COPYING" in the main directory of this archive | ||
6 | # for more details. | ||
7 | # | ||
8 | # Copyright (C) 2002 Stuart Menefy | ||
9 | # | ||
10 | |||
11 | targets := zImage | ||
12 | subdir- := compressed | ||
13 | |||
14 | $(obj)/zImage: $(obj)/compressed/vmlinux FORCE | ||
15 | $(call if_changed,objcopy) | ||
16 | @echo 'Kernel: $@ is ready' | ||
17 | |||
18 | $(obj)/compressed/vmlinux: FORCE | ||
19 | $(Q)$(MAKE) $(build)=$(obj)/compressed $@ | ||
20 | |||
diff --git a/arch/sh64/boot/compressed/Makefile b/arch/sh64/boot/compressed/Makefile new file mode 100644 index 000000000000..9cd216718856 --- /dev/null +++ b/arch/sh64/boot/compressed/Makefile | |||
@@ -0,0 +1,46 @@ | |||
1 | # | ||
2 | # linux/arch/sh64/boot/compressed/Makefile | ||
3 | # | ||
4 | # This file is subject to the terms and conditions of the GNU General Public | ||
5 | # License. See the file "COPYING" in the main directory of this archive | ||
6 | # for more details. | ||
7 | # | ||
8 | # Copyright (C) 2002 Stuart Menefy | ||
9 | # Copyright (C) 2004 Paul Mundt | ||
10 | # | ||
11 | # create a compressed vmlinux image from the original vmlinux | ||
12 | # | ||
13 | |||
14 | targets := vmlinux vmlinux.bin vmlinux.bin.gz \ | ||
15 | head.o misc.o cache.o piggy.o vmlinux.lds | ||
16 | |||
17 | EXTRA_AFLAGS := -traditional | ||
18 | |||
19 | OBJECTS := $(obj)/head.o $(obj)/misc.o $(obj)/cache.o | ||
20 | |||
21 | # | ||
22 | # ZIMAGE_OFFSET is the load offset of the compression loader | ||
23 | # (4M for the kernel plus 64K for this loader) | ||
24 | # | ||
25 | ZIMAGE_OFFSET = $(shell printf "0x%8x" $$[$(CONFIG_MEMORY_START)+0x400000+0x10000]) | ||
26 | |||
27 | LDFLAGS_vmlinux := -Ttext $(ZIMAGE_OFFSET) -e startup \ | ||
28 | -T $(obj)/../../kernel/vmlinux.lds \ | ||
29 | --no-warn-mismatch | ||
30 | |||
31 | $(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE | ||
32 | $(call if_changed,ld) | ||
33 | @: | ||
34 | |||
35 | $(obj)/vmlinux.bin: vmlinux FORCE | ||
36 | $(call if_changed,objcopy) | ||
37 | |||
38 | $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE | ||
39 | $(call if_changed,gzip) | ||
40 | |||
41 | LDFLAGS_piggy.o := -r --format binary --oformat elf32-sh64-linux -T | ||
42 | OBJCOPYFLAGS += -R .empty_zero_page | ||
43 | |||
44 | $(obj)/piggy.o: $(obj)/vmlinux.lds $(obj)/vmlinux.bin.gz FORCE | ||
45 | $(call if_changed,ld) | ||
46 | |||
diff --git a/arch/sh64/boot/compressed/cache.c b/arch/sh64/boot/compressed/cache.c new file mode 100644 index 000000000000..708707355ffa --- /dev/null +++ b/arch/sh64/boot/compressed/cache.c | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * arch/shmedia/boot/compressed/cache.c -- simple cache management functions | ||
3 | * | ||
4 | * Code extracted from sh-ipl+g, sh-stub.c, which has the copyright: | ||
5 | * | ||
6 | * This is originally based on an m68k software stub written by Glenn | ||
7 | * Engel at HP, but has changed quite a bit. | ||
8 | * | ||
9 | * Modifications for the SH by Ben Lee and Steve Chamberlain | ||
10 | * | ||
11 | **************************************************************************** | ||
12 | |||
13 | THIS SOFTWARE IS NOT COPYRIGHTED | ||
14 | |||
15 | HP offers the following for use in the public domain. HP makes no | ||
16 | warranty with regard to the software or it's performance and the | ||
17 | user accepts the software "AS IS" with all faults. | ||
18 | |||
19 | HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD | ||
20 | TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES | ||
21 | OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | |||
23 | ****************************************************************************/ | ||
24 | |||
25 | #define CACHE_ENABLE 0 | ||
26 | #define CACHE_DISABLE 1 | ||
27 | |||
28 | int cache_control(unsigned int command) | ||
29 | { | ||
30 | volatile unsigned int *p = (volatile unsigned int *) 0x80000000; | ||
31 | int i; | ||
32 | |||
33 | for (i = 0; i < (32 * 1024); i += 32) { | ||
34 | (void *) *p; | ||
35 | p += (32 / sizeof (int)); | ||
36 | } | ||
37 | |||
38 | return 0; | ||
39 | } | ||
diff --git a/arch/sh64/boot/compressed/head.S b/arch/sh64/boot/compressed/head.S new file mode 100644 index 000000000000..82040b1a29cf --- /dev/null +++ b/arch/sh64/boot/compressed/head.S | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/shmedia/boot/compressed/head.S | ||
7 | * | ||
8 | * Copied from | ||
9 | * arch/shmedia/kernel/head.S | ||
10 | * which carried the copyright: | ||
11 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
12 | * | ||
13 | * Modification for compressed loader: | ||
14 | * Copyright (C) 2002 Stuart Menefy (stuart.menefy@st.com) | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | #include <asm/registers.h> | ||
19 | #include <asm/cache.h> | ||
20 | #include <asm/mmu_context.h> | ||
21 | |||
22 | /* | ||
23 | * Fixed TLB entries to identity map the beginning of RAM | ||
24 | */ | ||
25 | #define MMUIR_TEXT_H 0x0000000000000003 | CONFIG_MEMORY_START | ||
26 | /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ | ||
27 | #define MMUIR_TEXT_L 0x000000000000009a | CONFIG_MEMORY_START | ||
28 | /* 512 Mb, Cacheable (Write-back), execute, Not User, Ph. Add. */ | ||
29 | |||
30 | #define MMUDR_CACHED_H 0x0000000000000003 | CONFIG_MEMORY_START | ||
31 | /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ | ||
32 | #define MMUDR_CACHED_L 0x000000000000015a | CONFIG_MEMORY_START | ||
33 | /* 512 Mb, Cacheable (Write-back), read/write, Not User, Ph. Add. */ | ||
34 | |||
35 | #define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */ | ||
36 | #define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */ | ||
37 | |||
38 | #if 1 | ||
39 | #define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* OCE + OCI + WB */ | ||
40 | #else | ||
41 | #define OCCR0_INIT_VAL OCCR0_OFF | ||
42 | #endif | ||
43 | #define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */ | ||
44 | |||
45 | .text | ||
46 | |||
47 | .global startup | ||
48 | startup: | ||
49 | /* | ||
50 | * Prevent speculative fetch on device memory due to | ||
51 | * uninitialized target registers. | ||
52 | * This must be executed before the first branch. | ||
53 | */ | ||
54 | ptabs/u ZERO, tr0 | ||
55 | ptabs/u ZERO, tr1 | ||
56 | ptabs/u ZERO, tr2 | ||
57 | ptabs/u ZERO, tr3 | ||
58 | ptabs/u ZERO, tr4 | ||
59 | ptabs/u ZERO, tr5 | ||
60 | ptabs/u ZERO, tr6 | ||
61 | ptabs/u ZERO, tr7 | ||
62 | synci | ||
63 | |||
64 | /* | ||
65 | * Set initial TLB entries for cached and uncached regions. | ||
66 | * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't ! | ||
67 | */ | ||
68 | /* Clear ITLBs */ | ||
69 | pta 1f, tr1 | ||
70 | movi ITLB_FIXED, r21 | ||
71 | movi ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22 | ||
72 | 1: putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */ | ||
73 | addi r21, TLB_STEP, r21 | ||
74 | bne r21, r22, tr1 | ||
75 | |||
76 | /* Clear DTLBs */ | ||
77 | pta 1f, tr1 | ||
78 | movi DTLB_FIXED, r21 | ||
79 | movi DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22 | ||
80 | 1: putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */ | ||
81 | addi r21, TLB_STEP, r21 | ||
82 | bne r21, r22, tr1 | ||
83 | |||
84 | /* Map one big (512Mb) page for ITLB */ | ||
85 | movi ITLB_FIXED, r21 | ||
86 | movi MMUIR_TEXT_L, r22 /* PTEL first */ | ||
87 | putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */ | ||
88 | movi MMUIR_TEXT_H, r22 /* PTEH last */ | ||
89 | putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */ | ||
90 | |||
91 | /* Map one big CACHED (512Mb) page for DTLB */ | ||
92 | movi DTLB_FIXED, r21 | ||
93 | movi MMUDR_CACHED_L, r22 /* PTEL first */ | ||
94 | putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */ | ||
95 | movi MMUDR_CACHED_H, r22 /* PTEH last */ | ||
96 | putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */ | ||
97 | |||
98 | /* ICache */ | ||
99 | movi ICCR_BASE, r21 | ||
100 | movi ICCR0_INIT_VAL, r22 | ||
101 | movi ICCR1_INIT_VAL, r23 | ||
102 | putcfg r21, ICCR_REG0, r22 | ||
103 | putcfg r21, ICCR_REG1, r23 | ||
104 | synci | ||
105 | |||
106 | /* OCache */ | ||
107 | movi OCCR_BASE, r21 | ||
108 | movi OCCR0_INIT_VAL, r22 | ||
109 | movi OCCR1_INIT_VAL, r23 | ||
110 | putcfg r21, OCCR_REG0, r22 | ||
111 | putcfg r21, OCCR_REG1, r23 | ||
112 | synco | ||
113 | |||
114 | /* | ||
115 | * Enable the MMU. | ||
116 | * From here-on code can be non-PIC. | ||
117 | */ | ||
118 | movi SR_HARMLESS | SR_ENABLE_MMU, r22 | ||
119 | putcon r22, SSR | ||
120 | movi 1f, r22 | ||
121 | putcon r22, SPC | ||
122 | synco | ||
123 | rte /* And now go into the hyperspace ... */ | ||
124 | 1: /* ... that's the next instruction ! */ | ||
125 | |||
126 | /* Set initial stack pointer */ | ||
127 | movi datalabel stack_start, r0 | ||
128 | ld.l r0, 0, r15 | ||
129 | |||
130 | /* | ||
131 | * Clear bss | ||
132 | */ | ||
133 | pt 1f, tr1 | ||
134 | movi datalabel __bss_start, r22 | ||
135 | movi datalabel _end, r23 | ||
136 | 1: st.l r22, 0, ZERO | ||
137 | addi r22, 4, r22 | ||
138 | bne r22, r23, tr1 | ||
139 | |||
140 | /* | ||
141 | * Decompress the kernel. | ||
142 | */ | ||
143 | pt decompress_kernel, tr0 | ||
144 | blink tr0, r18 | ||
145 | |||
146 | /* | ||
147 | * Disable the MMU. | ||
148 | */ | ||
149 | movi SR_HARMLESS, r22 | ||
150 | putcon r22, SSR | ||
151 | movi 1f, r22 | ||
152 | putcon r22, SPC | ||
153 | synco | ||
154 | rte /* And now go into the hyperspace ... */ | ||
155 | 1: /* ... that's the next instruction ! */ | ||
156 | |||
157 | /* Jump into the decompressed kernel */ | ||
158 | movi datalabel (CONFIG_MEMORY_START + 0x2000)+1, r19 | ||
159 | ptabs r19, tr0 | ||
160 | blink tr0, r18 | ||
161 | |||
162 | /* Shouldn't return here, but just in case, loop forever */ | ||
163 | pt 1f, tr0 | ||
164 | 1: blink tr0, ZERO | ||
diff --git a/arch/sh64/boot/compressed/install.sh b/arch/sh64/boot/compressed/install.sh new file mode 100644 index 000000000000..90589f0fec12 --- /dev/null +++ b/arch/sh64/boot/compressed/install.sh | |||
@@ -0,0 +1,56 @@ | |||
1 | #!/bin/sh | ||
2 | # | ||
3 | # arch/sh/boot/install.sh | ||
4 | # | ||
5 | # This file is subject to the terms and conditions of the GNU General Public | ||
6 | # License. See the file "COPYING" in the main directory of this archive | ||
7 | # for more details. | ||
8 | # | ||
9 | # Copyright (C) 1995 by Linus Torvalds | ||
10 | # | ||
11 | # Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin | ||
12 | # Adapted from code in arch/i386/boot/install.sh by Russell King | ||
13 | # Adapted from code in arch/arm/boot/install.sh by Stuart Menefy | ||
14 | # | ||
15 | # "make install" script for sh architecture | ||
16 | # | ||
17 | # Arguments: | ||
18 | # $1 - kernel version | ||
19 | # $2 - kernel image file | ||
20 | # $3 - kernel map file | ||
21 | # $4 - default install path (blank if root directory) | ||
22 | # | ||
23 | |||
24 | # User may have a custom install script | ||
25 | |||
26 | if [ -x /sbin/installkernel ]; then | ||
27 | exec /sbin/installkernel "$@" | ||
28 | fi | ||
29 | |||
30 | if [ "$2" = "zImage" ]; then | ||
31 | # Compressed install | ||
32 | echo "Installing compressed kernel" | ||
33 | if [ -f $4/vmlinuz-$1 ]; then | ||
34 | mv $4/vmlinuz-$1 $4/vmlinuz.old | ||
35 | fi | ||
36 | |||
37 | if [ -f $4/System.map-$1 ]; then | ||
38 | mv $4/System.map-$1 $4/System.old | ||
39 | fi | ||
40 | |||
41 | cat $2 > $4/vmlinuz-$1 | ||
42 | cp $3 $4/System.map-$1 | ||
43 | else | ||
44 | # Normal install | ||
45 | echo "Installing normal kernel" | ||
46 | if [ -f $4/vmlinux-$1 ]; then | ||
47 | mv $4/vmlinux-$1 $4/vmlinux.old | ||
48 | fi | ||
49 | |||
50 | if [ -f $4/System.map ]; then | ||
51 | mv $4/System.map $4/System.old | ||
52 | fi | ||
53 | |||
54 | cat $2 > $4/vmlinux-$1 | ||
55 | cp $3 $4/System.map | ||
56 | fi | ||
diff --git a/arch/sh64/boot/compressed/misc.c b/arch/sh64/boot/compressed/misc.c new file mode 100644 index 000000000000..89dbf45df3c8 --- /dev/null +++ b/arch/sh64/boot/compressed/misc.c | |||
@@ -0,0 +1,251 @@ | |||
1 | /* | ||
2 | * arch/shmedia/boot/compressed/misc.c | ||
3 | * | ||
4 | * This is a collection of several routines from gzip-1.0.3 | ||
5 | * adapted for Linux. | ||
6 | * | ||
7 | * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 | ||
8 | * | ||
9 | * Adapted for SHmedia from sh by Stuart Menefy, May 2002 | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | |||
15 | /* cache.c */ | ||
16 | #define CACHE_ENABLE 0 | ||
17 | #define CACHE_DISABLE 1 | ||
18 | int cache_control(unsigned int command); | ||
19 | |||
20 | /* | ||
21 | * gzip declarations | ||
22 | */ | ||
23 | |||
24 | #define OF(args) args | ||
25 | #define STATIC static | ||
26 | |||
27 | #undef memset | ||
28 | #undef memcpy | ||
29 | #define memzero(s, n) memset ((s), 0, (n)) | ||
30 | |||
31 | typedef unsigned char uch; | ||
32 | typedef unsigned short ush; | ||
33 | typedef unsigned long ulg; | ||
34 | |||
35 | #define WSIZE 0x8000 /* Window size must be at least 32k, */ | ||
36 | /* and a power of two */ | ||
37 | |||
38 | static uch *inbuf; /* input buffer */ | ||
39 | static uch window[WSIZE]; /* Sliding window buffer */ | ||
40 | |||
41 | static unsigned insize = 0; /* valid bytes in inbuf */ | ||
42 | static unsigned inptr = 0; /* index of next byte to be processed in inbuf */ | ||
43 | static unsigned outcnt = 0; /* bytes in output buffer */ | ||
44 | |||
45 | /* gzip flag byte */ | ||
46 | #define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ | ||
47 | #define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ | ||
48 | #define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ | ||
49 | #define ORIG_NAME 0x08 /* bit 3 set: original file name present */ | ||
50 | #define COMMENT 0x10 /* bit 4 set: file comment present */ | ||
51 | #define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ | ||
52 | #define RESERVED 0xC0 /* bit 6,7: reserved */ | ||
53 | |||
54 | #define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) | ||
55 | |||
56 | /* Diagnostic functions */ | ||
57 | #ifdef DEBUG | ||
58 | # define Assert(cond,msg) {if(!(cond)) error(msg);} | ||
59 | # define Trace(x) fprintf x | ||
60 | # define Tracev(x) {if (verbose) fprintf x ;} | ||
61 | # define Tracevv(x) {if (verbose>1) fprintf x ;} | ||
62 | # define Tracec(c,x) {if (verbose && (c)) fprintf x ;} | ||
63 | # define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} | ||
64 | #else | ||
65 | # define Assert(cond,msg) | ||
66 | # define Trace(x) | ||
67 | # define Tracev(x) | ||
68 | # define Tracevv(x) | ||
69 | # define Tracec(c,x) | ||
70 | # define Tracecv(c,x) | ||
71 | #endif | ||
72 | |||
73 | static int fill_inbuf(void); | ||
74 | static void flush_window(void); | ||
75 | static void error(char *m); | ||
76 | static void gzip_mark(void **); | ||
77 | static void gzip_release(void **); | ||
78 | |||
79 | extern char input_data[]; | ||
80 | extern int input_len; | ||
81 | |||
82 | static long bytes_out = 0; | ||
83 | static uch *output_data; | ||
84 | static unsigned long output_ptr = 0; | ||
85 | |||
86 | static void *malloc(int size); | ||
87 | static void free(void *where); | ||
88 | static void error(char *m); | ||
89 | static void gzip_mark(void **); | ||
90 | static void gzip_release(void **); | ||
91 | |||
92 | static void puts(const char *); | ||
93 | |||
94 | extern int _text; /* Defined in vmlinux.lds.S */ | ||
95 | extern int _end; | ||
96 | static unsigned long free_mem_ptr; | ||
97 | static unsigned long free_mem_end_ptr; | ||
98 | |||
99 | #define HEAP_SIZE 0x10000 | ||
100 | |||
101 | #include "../../../../lib/inflate.c" | ||
102 | |||
103 | static void *malloc(int size) | ||
104 | { | ||
105 | void *p; | ||
106 | |||
107 | if (size < 0) | ||
108 | error("Malloc error\n"); | ||
109 | if (free_mem_ptr == 0) | ||
110 | error("Memory error\n"); | ||
111 | |||
112 | free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ | ||
113 | |||
114 | p = (void *) free_mem_ptr; | ||
115 | free_mem_ptr += size; | ||
116 | |||
117 | if (free_mem_ptr >= free_mem_end_ptr) | ||
118 | error("\nOut of memory\n"); | ||
119 | |||
120 | return p; | ||
121 | } | ||
122 | |||
123 | static void free(void *where) | ||
124 | { /* Don't care */ | ||
125 | } | ||
126 | |||
127 | static void gzip_mark(void **ptr) | ||
128 | { | ||
129 | *ptr = (void *) free_mem_ptr; | ||
130 | } | ||
131 | |||
132 | static void gzip_release(void **ptr) | ||
133 | { | ||
134 | free_mem_ptr = (long) *ptr; | ||
135 | } | ||
136 | |||
137 | void puts(const char *s) | ||
138 | { | ||
139 | } | ||
140 | |||
141 | void *memset(void *s, int c, size_t n) | ||
142 | { | ||
143 | int i; | ||
144 | char *ss = (char *) s; | ||
145 | |||
146 | for (i = 0; i < n; i++) | ||
147 | ss[i] = c; | ||
148 | return s; | ||
149 | } | ||
150 | |||
151 | void *memcpy(void *__dest, __const void *__src, size_t __n) | ||
152 | { | ||
153 | int i; | ||
154 | char *d = (char *) __dest, *s = (char *) __src; | ||
155 | |||
156 | for (i = 0; i < __n; i++) | ||
157 | d[i] = s[i]; | ||
158 | return __dest; | ||
159 | } | ||
160 | |||
161 | /* =========================================================================== | ||
162 | * Fill the input buffer. This is called only when the buffer is empty | ||
163 | * and at least one byte is really needed. | ||
164 | */ | ||
165 | static int fill_inbuf(void) | ||
166 | { | ||
167 | if (insize != 0) { | ||
168 | error("ran out of input data\n"); | ||
169 | } | ||
170 | |||
171 | inbuf = input_data; | ||
172 | insize = input_len; | ||
173 | inptr = 1; | ||
174 | return inbuf[0]; | ||
175 | } | ||
176 | |||
177 | /* =========================================================================== | ||
178 | * Write the output window window[0..outcnt-1] and update crc and bytes_out. | ||
179 | * (Used for the decompressed data only.) | ||
180 | */ | ||
181 | static void flush_window(void) | ||
182 | { | ||
183 | ulg c = crc; /* temporary variable */ | ||
184 | unsigned n; | ||
185 | uch *in, *out, ch; | ||
186 | |||
187 | in = window; | ||
188 | out = &output_data[output_ptr]; | ||
189 | for (n = 0; n < outcnt; n++) { | ||
190 | ch = *out++ = *in++; | ||
191 | c = crc_32_tab[((int) c ^ ch) & 0xff] ^ (c >> 8); | ||
192 | } | ||
193 | crc = c; | ||
194 | bytes_out += (ulg) outcnt; | ||
195 | output_ptr += (ulg) outcnt; | ||
196 | outcnt = 0; | ||
197 | puts("."); | ||
198 | } | ||
199 | |||
200 | static void error(char *x) | ||
201 | { | ||
202 | puts("\n\n"); | ||
203 | puts(x); | ||
204 | puts("\n\n -- System halted"); | ||
205 | |||
206 | while (1) ; /* Halt */ | ||
207 | } | ||
208 | |||
209 | #define STACK_SIZE (4096) | ||
210 | long __attribute__ ((aligned(8))) user_stack[STACK_SIZE]; | ||
211 | long *stack_start = &user_stack[STACK_SIZE]; | ||
212 | |||
213 | void decompress_kernel(void) | ||
214 | { | ||
215 | output_data = (uch *) (CONFIG_MEMORY_START + 0x2000); | ||
216 | free_mem_ptr = (unsigned long) &_end; | ||
217 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; | ||
218 | |||
219 | makecrc(); | ||
220 | puts("Uncompressing Linux... "); | ||
221 | cache_control(CACHE_ENABLE); | ||
222 | gunzip(); | ||
223 | puts("\n"); | ||
224 | |||
225 | #if 0 | ||
226 | /* When booting from ROM may want to do something like this if the | ||
227 | * boot loader doesn't. | ||
228 | */ | ||
229 | |||
230 | /* Set up the parameters and command line */ | ||
231 | { | ||
232 | volatile unsigned int *parambase = | ||
233 | (int *) (CONFIG_MEMORY_START + 0x1000); | ||
234 | |||
235 | parambase[0] = 0x1; /* MOUNT_ROOT_RDONLY */ | ||
236 | parambase[1] = 0x0; /* RAMDISK_FLAGS */ | ||
237 | parambase[2] = 0x0200; /* ORIG_ROOT_DEV */ | ||
238 | parambase[3] = 0x0; /* LOADER_TYPE */ | ||
239 | parambase[4] = 0x0; /* INITRD_START */ | ||
240 | parambase[5] = 0x0; /* INITRD_SIZE */ | ||
241 | parambase[6] = 0; | ||
242 | |||
243 | strcpy((char *) ((int) parambase + 0x100), | ||
244 | "console=ttySC0,38400"); | ||
245 | } | ||
246 | #endif | ||
247 | |||
248 | puts("Ok, booting the kernel.\n"); | ||
249 | |||
250 | cache_control(CACHE_DISABLE); | ||
251 | } | ||
diff --git a/arch/sh64/boot/compressed/vmlinux.lds.S b/arch/sh64/boot/compressed/vmlinux.lds.S new file mode 100644 index 000000000000..15a737d9bba8 --- /dev/null +++ b/arch/sh64/boot/compressed/vmlinux.lds.S | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * ld script to make compressed SuperH/shmedia Linux kernel+decompression | ||
3 | * bootstrap | ||
4 | * Modified by Stuart Menefy from arch/sh/vmlinux.lds.S written by Niibe Yutaka | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | |||
9 | #ifdef CONFIG_LITTLE_ENDIAN | ||
10 | /* OUTPUT_FORMAT("elf32-sh64l-linux", "elf32-sh64l-linux", "elf32-sh64l-linux") */ | ||
11 | #define NOP 0x6ff0fff0 | ||
12 | #else | ||
13 | /* OUTPUT_FORMAT("elf32-sh64", "elf32-sh64", "elf32-sh64") */ | ||
14 | #define NOP 0xf0fff06f | ||
15 | #endif | ||
16 | |||
17 | OUTPUT_FORMAT("elf32-sh64-linux") | ||
18 | OUTPUT_ARCH(sh) | ||
19 | ENTRY(_start) | ||
20 | |||
21 | #define ALIGNED_GAP(section, align) (((ADDR(section)+SIZEOF(section)+(align)-1) & ~((align)-1))-ADDR(section)) | ||
22 | #define FOLLOWING(section, align) AT (LOADADDR(section) + ALIGNED_GAP(section,align)) | ||
23 | |||
24 | SECTIONS | ||
25 | { | ||
26 | _text = .; /* Text and read-only data */ | ||
27 | |||
28 | .text : { | ||
29 | *(.text) | ||
30 | *(.text64) | ||
31 | *(.text..SHmedia32) | ||
32 | *(.fixup) | ||
33 | *(.gnu.warning) | ||
34 | } = NOP | ||
35 | . = ALIGN(4); | ||
36 | .rodata : { *(.rodata) } | ||
37 | |||
38 | /* There is no 'real' reason for eight byte alignment, four would work | ||
39 | * as well, but gdb downloads much (*4) faster with this. | ||
40 | */ | ||
41 | . = ALIGN(8); | ||
42 | .image : { *(.image) } | ||
43 | . = ALIGN(4); | ||
44 | _etext = .; /* End of text section */ | ||
45 | |||
46 | .data : /* Data */ | ||
47 | FOLLOWING(.image, 4) | ||
48 | { | ||
49 | _data = .; | ||
50 | *(.data) | ||
51 | } | ||
52 | _data_image = LOADADDR(.data);/* Address of data section in ROM */ | ||
53 | |||
54 | _edata = .; /* End of data section */ | ||
55 | |||
56 | .stack : { stack = .; _stack = .; } | ||
57 | |||
58 | . = ALIGN(4); | ||
59 | __bss_start = .; /* BSS */ | ||
60 | .bss : { | ||
61 | *(.bss) | ||
62 | } | ||
63 | . = ALIGN(4); | ||
64 | _end = . ; | ||
65 | } | ||
diff --git a/arch/sh64/configs/cayman_defconfig b/arch/sh64/configs/cayman_defconfig new file mode 100644 index 000000000000..48f27407d5e4 --- /dev/null +++ b/arch/sh64/configs/cayman_defconfig | |||
@@ -0,0 +1,837 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.11 | ||
4 | # Fri Feb 25 18:14:31 2005 | ||
5 | # | ||
6 | CONFIG_SUPERH=y | ||
7 | CONFIG_SUPERH64=y | ||
8 | CONFIG_MMU=y | ||
9 | CONFIG_UID16=y | ||
10 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | ||
11 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
12 | CONFIG_LOG_BUF_SHIFT=14 | ||
13 | |||
14 | # | ||
15 | # Code maturity level options | ||
16 | # | ||
17 | CONFIG_EXPERIMENTAL=y | ||
18 | CONFIG_CLEAN_COMPILE=y | ||
19 | CONFIG_BROKEN_ON_SMP=y | ||
20 | |||
21 | # | ||
22 | # General setup | ||
23 | # | ||
24 | CONFIG_LOCALVERSION="" | ||
25 | CONFIG_SWAP=y | ||
26 | # CONFIG_SYSVIPC is not set | ||
27 | CONFIG_POSIX_MQUEUE=y | ||
28 | # CONFIG_BSD_PROCESS_ACCT is not set | ||
29 | CONFIG_SYSCTL=y | ||
30 | # CONFIG_AUDIT is not set | ||
31 | # CONFIG_HOTPLUG is not set | ||
32 | CONFIG_KOBJECT_UEVENT=y | ||
33 | # CONFIG_IKCONFIG is not set | ||
34 | # CONFIG_EMBEDDED is not set | ||
35 | CONFIG_KALLSYMS=y | ||
36 | # CONFIG_KALLSYMS_ALL is not set | ||
37 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | ||
38 | CONFIG_FUTEX=y | ||
39 | CONFIG_EPOLL=y | ||
40 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
41 | CONFIG_SHMEM=y | ||
42 | CONFIG_CC_ALIGN_FUNCTIONS=0 | ||
43 | CONFIG_CC_ALIGN_LABELS=0 | ||
44 | CONFIG_CC_ALIGN_LOOPS=0 | ||
45 | CONFIG_CC_ALIGN_JUMPS=0 | ||
46 | # CONFIG_TINY_SHMEM is not set | ||
47 | |||
48 | # | ||
49 | # Loadable module support | ||
50 | # | ||
51 | # CONFIG_MODULES is not set | ||
52 | |||
53 | # | ||
54 | # System type | ||
55 | # | ||
56 | # CONFIG_SH_GENERIC is not set | ||
57 | # CONFIG_SH_SIMULATOR is not set | ||
58 | CONFIG_SH_CAYMAN=y | ||
59 | # CONFIG_SH_ROMRAM is not set | ||
60 | # CONFIG_SH_HARP is not set | ||
61 | CONFIG_CPU_SH5=y | ||
62 | CONFIG_CPU_SUBTYPE_SH5_101=y | ||
63 | # CONFIG_CPU_SUBTYPE_SH5_103 is not set | ||
64 | CONFIG_LITTLE_ENDIAN=y | ||
65 | # CONFIG_BIG_ENDIAN is not set | ||
66 | CONFIG_SH_FPU=y | ||
67 | # CONFIG_SH64_FPU_DENORM_FLUSH is not set | ||
68 | CONFIG_SH64_PGTABLE_2_LEVEL=y | ||
69 | # CONFIG_SH64_PGTABLE_3_LEVEL is not set | ||
70 | CONFIG_HUGETLB_PAGE_SIZE_64K=y | ||
71 | # CONFIG_HUGETLB_PAGE_SIZE_1MB is not set | ||
72 | # CONFIG_HUGETLB_PAGE_SIZE_512MB is not set | ||
73 | CONFIG_SH64_USER_MISALIGNED_FIXUP=y | ||
74 | |||
75 | # | ||
76 | # Memory options | ||
77 | # | ||
78 | CONFIG_CACHED_MEMORY_OFFSET=0x20000000 | ||
79 | CONFIG_MEMORY_START=0x80000000 | ||
80 | CONFIG_MEMORY_SIZE_IN_MB=128 | ||
81 | |||
82 | # | ||
83 | # Cache options | ||
84 | # | ||
85 | # CONFIG_DCACHE_DISABLED is not set | ||
86 | CONFIG_DCACHE_WRITE_BACK=y | ||
87 | # CONFIG_DCACHE_WRITE_THROUGH is not set | ||
88 | # CONFIG_ICACHE_DISABLED is not set | ||
89 | CONFIG_PCIDEVICE_MEMORY_START=C0000000 | ||
90 | CONFIG_DEVICE_MEMORY_START=E0000000 | ||
91 | CONFIG_FLASH_MEMORY_START=0x00000000 | ||
92 | CONFIG_PCI_BLOCK_START=0x40000000 | ||
93 | |||
94 | # | ||
95 | # CPU Subtype specific options | ||
96 | # | ||
97 | CONFIG_SH64_ID2815_WORKAROUND=y | ||
98 | |||
99 | # | ||
100 | # Misc options | ||
101 | # | ||
102 | CONFIG_HEARTBEAT=y | ||
103 | CONFIG_HDSP253_LED=y | ||
104 | CONFIG_SH_DMA=y | ||
105 | CONFIG_PREEMPT=y | ||
106 | |||
107 | # | ||
108 | # Bus options (PCI, PCMCIA, EISA, MCA, ISA) | ||
109 | # | ||
110 | CONFIG_SUPERHYWAY=y | ||
111 | CONFIG_PCI=y | ||
112 | CONFIG_SH_PCIDMA_NONCOHERENT=y | ||
113 | CONFIG_PCI_LEGACY_PROC=y | ||
114 | CONFIG_PCI_NAMES=y | ||
115 | |||
116 | # | ||
117 | # PCCARD (PCMCIA/CardBus) support | ||
118 | # | ||
119 | # CONFIG_PCCARD is not set | ||
120 | |||
121 | # | ||
122 | # PC-card bridges | ||
123 | # | ||
124 | |||
125 | # | ||
126 | # PCI Hotplug Support | ||
127 | # | ||
128 | # CONFIG_HOTPLUG_PCI is not set | ||
129 | |||
130 | # | ||
131 | # Executable file formats | ||
132 | # | ||
133 | CONFIG_BINFMT_ELF=y | ||
134 | # CONFIG_BINFMT_FLAT is not set | ||
135 | # CONFIG_BINFMT_MISC is not set | ||
136 | |||
137 | # | ||
138 | # Device Drivers | ||
139 | # | ||
140 | |||
141 | # | ||
142 | # Generic Driver Options | ||
143 | # | ||
144 | CONFIG_STANDALONE=y | ||
145 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
146 | # CONFIG_FW_LOADER is not set | ||
147 | # CONFIG_DEBUG_DRIVER is not set | ||
148 | |||
149 | # | ||
150 | # Memory Technology Devices (MTD) | ||
151 | # | ||
152 | # CONFIG_MTD is not set | ||
153 | |||
154 | # | ||
155 | # Parallel port support | ||
156 | # | ||
157 | # CONFIG_PARPORT is not set | ||
158 | |||
159 | # | ||
160 | # Plug and Play support | ||
161 | # | ||
162 | |||
163 | # | ||
164 | # Block devices | ||
165 | # | ||
166 | # CONFIG_BLK_DEV_FD is not set | ||
167 | # CONFIG_BLK_CPQ_DA is not set | ||
168 | # CONFIG_BLK_CPQ_CISS_DA is not set | ||
169 | # CONFIG_BLK_DEV_DAC960 is not set | ||
170 | # CONFIG_BLK_DEV_UMEM is not set | ||
171 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
172 | CONFIG_BLK_DEV_LOOP=y | ||
173 | # CONFIG_BLK_DEV_CRYPTOLOOP is not set | ||
174 | # CONFIG_BLK_DEV_NBD is not set | ||
175 | # CONFIG_BLK_DEV_SX8 is not set | ||
176 | CONFIG_BLK_DEV_RAM=y | ||
177 | CONFIG_BLK_DEV_RAM_COUNT=16 | ||
178 | CONFIG_BLK_DEV_RAM_SIZE=4096 | ||
179 | # CONFIG_BLK_DEV_INITRD is not set | ||
180 | CONFIG_INITRAMFS_SOURCE="" | ||
181 | # CONFIG_LBD is not set | ||
182 | # CONFIG_CDROM_PKTCDVD is not set | ||
183 | |||
184 | # | ||
185 | # IO Schedulers | ||
186 | # | ||
187 | CONFIG_IOSCHED_NOOP=y | ||
188 | CONFIG_IOSCHED_AS=y | ||
189 | CONFIG_IOSCHED_DEADLINE=y | ||
190 | CONFIG_IOSCHED_CFQ=y | ||
191 | # CONFIG_ATA_OVER_ETH is not set | ||
192 | |||
193 | # | ||
194 | # ATA/ATAPI/MFM/RLL support | ||
195 | # | ||
196 | # CONFIG_IDE is not set | ||
197 | |||
198 | # | ||
199 | # SCSI device support | ||
200 | # | ||
201 | CONFIG_SCSI=y | ||
202 | CONFIG_SCSI_PROC_FS=y | ||
203 | |||
204 | # | ||
205 | # SCSI support type (disk, tape, CD-ROM) | ||
206 | # | ||
207 | CONFIG_BLK_DEV_SD=y | ||
208 | # CONFIG_CHR_DEV_ST is not set | ||
209 | # CONFIG_CHR_DEV_OSST is not set | ||
210 | # CONFIG_BLK_DEV_SR is not set | ||
211 | # CONFIG_CHR_DEV_SG is not set | ||
212 | |||
213 | # | ||
214 | # Some SCSI devices (e.g. CD jukebox) support multiple LUNs | ||
215 | # | ||
216 | CONFIG_SCSI_MULTI_LUN=y | ||
217 | # CONFIG_SCSI_CONSTANTS is not set | ||
218 | # CONFIG_SCSI_LOGGING is not set | ||
219 | |||
220 | # | ||
221 | # SCSI Transport Attributes | ||
222 | # | ||
223 | CONFIG_SCSI_SPI_ATTRS=y | ||
224 | # CONFIG_SCSI_FC_ATTRS is not set | ||
225 | # CONFIG_SCSI_ISCSI_ATTRS is not set | ||
226 | |||
227 | # | ||
228 | # SCSI low-level drivers | ||
229 | # | ||
230 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set | ||
231 | # CONFIG_SCSI_3W_9XXX is not set | ||
232 | # CONFIG_SCSI_ACARD is not set | ||
233 | # CONFIG_SCSI_AACRAID is not set | ||
234 | # CONFIG_SCSI_AIC7XXX is not set | ||
235 | # CONFIG_SCSI_AIC7XXX_OLD is not set | ||
236 | # CONFIG_SCSI_AIC79XX is not set | ||
237 | # CONFIG_SCSI_DPT_I2O is not set | ||
238 | # CONFIG_MEGARAID_NEWGEN is not set | ||
239 | # CONFIG_MEGARAID_LEGACY is not set | ||
240 | # CONFIG_SCSI_SATA is not set | ||
241 | # CONFIG_SCSI_BUSLOGIC is not set | ||
242 | # CONFIG_SCSI_DMX3191D is not set | ||
243 | # CONFIG_SCSI_EATA is not set | ||
244 | # CONFIG_SCSI_EATA_PIO is not set | ||
245 | # CONFIG_SCSI_FUTURE_DOMAIN is not set | ||
246 | # CONFIG_SCSI_GDTH is not set | ||
247 | # CONFIG_SCSI_IPS is not set | ||
248 | # CONFIG_SCSI_INITIO is not set | ||
249 | # CONFIG_SCSI_INIA100 is not set | ||
250 | CONFIG_SCSI_SYM53C8XX_2=y | ||
251 | CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 | ||
252 | CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 | ||
253 | CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 | ||
254 | # CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set | ||
255 | # CONFIG_SCSI_IPR is not set | ||
256 | # CONFIG_SCSI_QLOGIC_ISP is not set | ||
257 | # CONFIG_SCSI_QLOGIC_FC is not set | ||
258 | # CONFIG_SCSI_QLOGIC_1280 is not set | ||
259 | CONFIG_SCSI_QLA2XXX=y | ||
260 | # CONFIG_SCSI_QLA21XX is not set | ||
261 | # CONFIG_SCSI_QLA22XX is not set | ||
262 | # CONFIG_SCSI_QLA2300 is not set | ||
263 | # CONFIG_SCSI_QLA2322 is not set | ||
264 | # CONFIG_SCSI_QLA6312 is not set | ||
265 | # CONFIG_SCSI_DC395x is not set | ||
266 | # CONFIG_SCSI_DC390T is not set | ||
267 | # CONFIG_SCSI_NSP32 is not set | ||
268 | # CONFIG_SCSI_DEBUG is not set | ||
269 | |||
270 | # | ||
271 | # Multi-device support (RAID and LVM) | ||
272 | # | ||
273 | # CONFIG_MD is not set | ||
274 | |||
275 | # | ||
276 | # Fusion MPT device support | ||
277 | # | ||
278 | # CONFIG_FUSION is not set | ||
279 | |||
280 | # | ||
281 | # IEEE 1394 (FireWire) support | ||
282 | # | ||
283 | # CONFIG_IEEE1394 is not set | ||
284 | |||
285 | # | ||
286 | # I2O device support | ||
287 | # | ||
288 | # CONFIG_I2O is not set | ||
289 | |||
290 | # | ||
291 | # Networking support | ||
292 | # | ||
293 | CONFIG_NET=y | ||
294 | |||
295 | # | ||
296 | # Networking options | ||
297 | # | ||
298 | CONFIG_PACKET=y | ||
299 | # CONFIG_PACKET_MMAP is not set | ||
300 | # CONFIG_NETLINK_DEV is not set | ||
301 | CONFIG_UNIX=y | ||
302 | # CONFIG_NET_KEY is not set | ||
303 | CONFIG_INET=y | ||
304 | # CONFIG_IP_MULTICAST is not set | ||
305 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
306 | CONFIG_IP_PNP=y | ||
307 | # CONFIG_IP_PNP_DHCP is not set | ||
308 | # CONFIG_IP_PNP_BOOTP is not set | ||
309 | # CONFIG_IP_PNP_RARP is not set | ||
310 | # CONFIG_NET_IPIP is not set | ||
311 | # CONFIG_NET_IPGRE is not set | ||
312 | # CONFIG_ARPD is not set | ||
313 | # CONFIG_SYN_COOKIES is not set | ||
314 | # CONFIG_INET_AH is not set | ||
315 | # CONFIG_INET_ESP is not set | ||
316 | # CONFIG_INET_IPCOMP is not set | ||
317 | # CONFIG_INET_TUNNEL is not set | ||
318 | CONFIG_IP_TCPDIAG=y | ||
319 | # CONFIG_IP_TCPDIAG_IPV6 is not set | ||
320 | # CONFIG_IPV6 is not set | ||
321 | # CONFIG_NETFILTER is not set | ||
322 | |||
323 | # | ||
324 | # SCTP Configuration (EXPERIMENTAL) | ||
325 | # | ||
326 | # CONFIG_IP_SCTP is not set | ||
327 | # CONFIG_ATM is not set | ||
328 | # CONFIG_BRIDGE is not set | ||
329 | # CONFIG_VLAN_8021Q is not set | ||
330 | # CONFIG_DECNET is not set | ||
331 | # CONFIG_LLC2 is not set | ||
332 | # CONFIG_IPX is not set | ||
333 | # CONFIG_ATALK is not set | ||
334 | # CONFIG_X25 is not set | ||
335 | # CONFIG_LAPB is not set | ||
336 | # CONFIG_NET_DIVERT is not set | ||
337 | # CONFIG_ECONET is not set | ||
338 | # CONFIG_WAN_ROUTER is not set | ||
339 | |||
340 | # | ||
341 | # QoS and/or fair queueing | ||
342 | # | ||
343 | # CONFIG_NET_SCHED is not set | ||
344 | # CONFIG_NET_CLS_ROUTE is not set | ||
345 | |||
346 | # | ||
347 | # Network testing | ||
348 | # | ||
349 | # CONFIG_NET_PKTGEN is not set | ||
350 | # CONFIG_NETPOLL is not set | ||
351 | # CONFIG_NET_POLL_CONTROLLER is not set | ||
352 | # CONFIG_HAMRADIO is not set | ||
353 | # CONFIG_IRDA is not set | ||
354 | # CONFIG_BT is not set | ||
355 | CONFIG_NETDEVICES=y | ||
356 | # CONFIG_DUMMY is not set | ||
357 | # CONFIG_BONDING is not set | ||
358 | # CONFIG_EQUALIZER is not set | ||
359 | # CONFIG_TUN is not set | ||
360 | |||
361 | # | ||
362 | # ARCnet devices | ||
363 | # | ||
364 | # CONFIG_ARCNET is not set | ||
365 | |||
366 | # | ||
367 | # Ethernet (10 or 100Mbit) | ||
368 | # | ||
369 | CONFIG_NET_ETHERNET=y | ||
370 | # CONFIG_MII is not set | ||
371 | # CONFIG_STNIC is not set | ||
372 | # CONFIG_HAPPYMEAL is not set | ||
373 | # CONFIG_SUNGEM is not set | ||
374 | # CONFIG_NET_VENDOR_3COM is not set | ||
375 | |||
376 | # | ||
377 | # Tulip family network device support | ||
378 | # | ||
379 | CONFIG_NET_TULIP=y | ||
380 | # CONFIG_DE2104X is not set | ||
381 | CONFIG_TULIP=y | ||
382 | # CONFIG_TULIP_MWI is not set | ||
383 | # CONFIG_TULIP_MMIO is not set | ||
384 | # CONFIG_TULIP_NAPI is not set | ||
385 | # CONFIG_DE4X5 is not set | ||
386 | # CONFIG_WINBOND_840 is not set | ||
387 | # CONFIG_DM9102 is not set | ||
388 | # CONFIG_HP100 is not set | ||
389 | CONFIG_NET_PCI=y | ||
390 | # CONFIG_PCNET32 is not set | ||
391 | # CONFIG_AMD8111_ETH is not set | ||
392 | # CONFIG_ADAPTEC_STARFIRE is not set | ||
393 | # CONFIG_B44 is not set | ||
394 | # CONFIG_FORCEDETH is not set | ||
395 | # CONFIG_DGRS is not set | ||
396 | # CONFIG_EEPRO100 is not set | ||
397 | # CONFIG_E100 is not set | ||
398 | # CONFIG_FEALNX is not set | ||
399 | # CONFIG_NATSEMI is not set | ||
400 | # CONFIG_NE2K_PCI is not set | ||
401 | # CONFIG_8139CP is not set | ||
402 | # CONFIG_8139TOO is not set | ||
403 | # CONFIG_SIS900 is not set | ||
404 | # CONFIG_EPIC100 is not set | ||
405 | # CONFIG_SUNDANCE is not set | ||
406 | # CONFIG_TLAN is not set | ||
407 | # CONFIG_VIA_RHINE is not set | ||
408 | |||
409 | # | ||
410 | # Ethernet (1000 Mbit) | ||
411 | # | ||
412 | # CONFIG_ACENIC is not set | ||
413 | # CONFIG_DL2K is not set | ||
414 | # CONFIG_E1000 is not set | ||
415 | # CONFIG_NS83820 is not set | ||
416 | # CONFIG_HAMACHI is not set | ||
417 | # CONFIG_YELLOWFIN is not set | ||
418 | # CONFIG_R8169 is not set | ||
419 | # CONFIG_SK98LIN is not set | ||
420 | # CONFIG_VIA_VELOCITY is not set | ||
421 | # CONFIG_TIGON3 is not set | ||
422 | |||
423 | # | ||
424 | # Ethernet (10000 Mbit) | ||
425 | # | ||
426 | # CONFIG_IXGB is not set | ||
427 | # CONFIG_S2IO is not set | ||
428 | |||
429 | # | ||
430 | # Token Ring devices | ||
431 | # | ||
432 | # CONFIG_TR is not set | ||
433 | |||
434 | # | ||
435 | # Wireless LAN (non-hamradio) | ||
436 | # | ||
437 | # CONFIG_NET_RADIO is not set | ||
438 | |||
439 | # | ||
440 | # Wan interfaces | ||
441 | # | ||
442 | # CONFIG_WAN is not set | ||
443 | # CONFIG_FDDI is not set | ||
444 | # CONFIG_HIPPI is not set | ||
445 | # CONFIG_PPP is not set | ||
446 | # CONFIG_SLIP is not set | ||
447 | # CONFIG_NET_FC is not set | ||
448 | # CONFIG_SHAPER is not set | ||
449 | # CONFIG_NETCONSOLE is not set | ||
450 | |||
451 | # | ||
452 | # ISDN subsystem | ||
453 | # | ||
454 | # CONFIG_ISDN is not set | ||
455 | |||
456 | # | ||
457 | # Telephony Support | ||
458 | # | ||
459 | # CONFIG_PHONE is not set | ||
460 | |||
461 | # | ||
462 | # Input device support | ||
463 | # | ||
464 | CONFIG_INPUT=y | ||
465 | |||
466 | # | ||
467 | # Userland interfaces | ||
468 | # | ||
469 | CONFIG_INPUT_MOUSEDEV=y | ||
470 | CONFIG_INPUT_MOUSEDEV_PSAUX=y | ||
471 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 | ||
472 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 | ||
473 | # CONFIG_INPUT_JOYDEV is not set | ||
474 | # CONFIG_INPUT_TSDEV is not set | ||
475 | # CONFIG_INPUT_EVDEV is not set | ||
476 | # CONFIG_INPUT_EVBUG is not set | ||
477 | |||
478 | # | ||
479 | # Input I/O drivers | ||
480 | # | ||
481 | # CONFIG_GAMEPORT is not set | ||
482 | CONFIG_SOUND_GAMEPORT=y | ||
483 | CONFIG_SERIO=y | ||
484 | CONFIG_SERIO_I8042=y | ||
485 | CONFIG_SERIO_SERPORT=y | ||
486 | # CONFIG_SERIO_CT82C710 is not set | ||
487 | # CONFIG_SERIO_PCIPS2 is not set | ||
488 | CONFIG_SERIO_LIBPS2=y | ||
489 | # CONFIG_SERIO_RAW is not set | ||
490 | |||
491 | # | ||
492 | # Input Device Drivers | ||
493 | # | ||
494 | CONFIG_INPUT_KEYBOARD=y | ||
495 | CONFIG_KEYBOARD_ATKBD=y | ||
496 | # CONFIG_KEYBOARD_SUNKBD is not set | ||
497 | # CONFIG_KEYBOARD_LKKBD is not set | ||
498 | # CONFIG_KEYBOARD_XTKBD is not set | ||
499 | # CONFIG_KEYBOARD_NEWTON is not set | ||
500 | CONFIG_INPUT_MOUSE=y | ||
501 | CONFIG_MOUSE_PS2=y | ||
502 | # CONFIG_MOUSE_SERIAL is not set | ||
503 | # CONFIG_MOUSE_VSXXXAA is not set | ||
504 | # CONFIG_INPUT_JOYSTICK is not set | ||
505 | # CONFIG_INPUT_TOUCHSCREEN is not set | ||
506 | # CONFIG_INPUT_MISC is not set | ||
507 | |||
508 | # | ||
509 | # Character devices | ||
510 | # | ||
511 | CONFIG_VT=y | ||
512 | CONFIG_VT_CONSOLE=y | ||
513 | CONFIG_HW_CONSOLE=y | ||
514 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
515 | |||
516 | # | ||
517 | # Serial drivers | ||
518 | # | ||
519 | # CONFIG_SERIAL_8250 is not set | ||
520 | |||
521 | # | ||
522 | # Non-8250 serial port support | ||
523 | # | ||
524 | CONFIG_SERIAL_SH_SCI=y | ||
525 | CONFIG_SERIAL_SH_SCI_CONSOLE=y | ||
526 | CONFIG_SERIAL_CORE=y | ||
527 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
528 | CONFIG_UNIX98_PTYS=y | ||
529 | CONFIG_LEGACY_PTYS=y | ||
530 | CONFIG_LEGACY_PTY_COUNT=256 | ||
531 | |||
532 | # | ||
533 | # IPMI | ||
534 | # | ||
535 | # CONFIG_IPMI_HANDLER is not set | ||
536 | |||
537 | # | ||
538 | # Watchdog Cards | ||
539 | # | ||
540 | CONFIG_WATCHDOG=y | ||
541 | # CONFIG_WATCHDOG_NOWAYOUT is not set | ||
542 | |||
543 | # | ||
544 | # Watchdog Device Drivers | ||
545 | # | ||
546 | # CONFIG_SOFT_WATCHDOG is not set | ||
547 | # CONFIG_SH_WDT is not set | ||
548 | |||
549 | # | ||
550 | # PCI-based Watchdog Cards | ||
551 | # | ||
552 | # CONFIG_PCIPCWATCHDOG is not set | ||
553 | # CONFIG_WDTPCI is not set | ||
554 | # CONFIG_RTC is not set | ||
555 | # CONFIG_GEN_RTC is not set | ||
556 | # CONFIG_DTLK is not set | ||
557 | # CONFIG_R3964 is not set | ||
558 | # CONFIG_APPLICOM is not set | ||
559 | |||
560 | # | ||
561 | # Ftape, the floppy tape device driver | ||
562 | # | ||
563 | # CONFIG_DRM is not set | ||
564 | # CONFIG_RAW_DRIVER is not set | ||
565 | |||
566 | # | ||
567 | # I2C support | ||
568 | # | ||
569 | # CONFIG_I2C is not set | ||
570 | |||
571 | # | ||
572 | # Dallas's 1-wire bus | ||
573 | # | ||
574 | # CONFIG_W1 is not set | ||
575 | |||
576 | # | ||
577 | # Misc devices | ||
578 | # | ||
579 | |||
580 | # | ||
581 | # Multimedia devices | ||
582 | # | ||
583 | # CONFIG_VIDEO_DEV is not set | ||
584 | |||
585 | # | ||
586 | # Digital Video Broadcasting Devices | ||
587 | # | ||
588 | # CONFIG_DVB is not set | ||
589 | |||
590 | # | ||
591 | # Graphics support | ||
592 | # | ||
593 | CONFIG_FB=y | ||
594 | CONFIG_FB_MODE_HELPERS=y | ||
595 | # CONFIG_FB_TILEBLITTING is not set | ||
596 | # CONFIG_FB_CIRRUS is not set | ||
597 | # CONFIG_FB_PM2 is not set | ||
598 | # CONFIG_FB_CYBER2000 is not set | ||
599 | # CONFIG_FB_ASILIANT is not set | ||
600 | # CONFIG_FB_IMSTT is not set | ||
601 | # CONFIG_FB_EPSON1355 is not set | ||
602 | # CONFIG_FB_RIVA is not set | ||
603 | # CONFIG_FB_MATROX is not set | ||
604 | # CONFIG_FB_RADEON_OLD is not set | ||
605 | # CONFIG_FB_RADEON is not set | ||
606 | # CONFIG_FB_ATY128 is not set | ||
607 | # CONFIG_FB_ATY is not set | ||
608 | # CONFIG_FB_SAVAGE is not set | ||
609 | # CONFIG_FB_SIS is not set | ||
610 | # CONFIG_FB_NEOMAGIC is not set | ||
611 | CONFIG_FB_KYRO=y | ||
612 | # CONFIG_FB_3DFX is not set | ||
613 | # CONFIG_FB_VOODOO1 is not set | ||
614 | # CONFIG_FB_TRIDENT is not set | ||
615 | # CONFIG_FB_VIRTUAL is not set | ||
616 | |||
617 | # | ||
618 | # Console display driver support | ||
619 | # | ||
620 | # CONFIG_VGA_CONSOLE is not set | ||
621 | CONFIG_DUMMY_CONSOLE=y | ||
622 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
623 | CONFIG_FONTS=y | ||
624 | # CONFIG_FONT_8x8 is not set | ||
625 | CONFIG_FONT_8x16=y | ||
626 | # CONFIG_FONT_6x11 is not set | ||
627 | # CONFIG_FONT_PEARL_8x8 is not set | ||
628 | # CONFIG_FONT_ACORN_8x8 is not set | ||
629 | # CONFIG_FONT_MINI_4x6 is not set | ||
630 | # CONFIG_FONT_SUN8x16 is not set | ||
631 | # CONFIG_FONT_SUN12x22 is not set | ||
632 | |||
633 | # | ||
634 | # Logo configuration | ||
635 | # | ||
636 | CONFIG_LOGO=y | ||
637 | # CONFIG_LOGO_LINUX_MONO is not set | ||
638 | # CONFIG_LOGO_LINUX_VGA16 is not set | ||
639 | # CONFIG_LOGO_LINUX_CLUT224 is not set | ||
640 | # CONFIG_LOGO_SUPERH_MONO is not set | ||
641 | # CONFIG_LOGO_SUPERH_VGA16 is not set | ||
642 | CONFIG_LOGO_SUPERH_CLUT224=y | ||
643 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
644 | |||
645 | # | ||
646 | # Sound | ||
647 | # | ||
648 | # CONFIG_SOUND is not set | ||
649 | |||
650 | # | ||
651 | # USB support | ||
652 | # | ||
653 | # CONFIG_USB is not set | ||
654 | CONFIG_USB_ARCH_HAS_HCD=y | ||
655 | CONFIG_USB_ARCH_HAS_OHCI=y | ||
656 | |||
657 | # | ||
658 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information | ||
659 | # | ||
660 | |||
661 | # | ||
662 | # USB Gadget Support | ||
663 | # | ||
664 | # CONFIG_USB_GADGET is not set | ||
665 | |||
666 | # | ||
667 | # MMC/SD Card support | ||
668 | # | ||
669 | # CONFIG_MMC is not set | ||
670 | |||
671 | # | ||
672 | # InfiniBand support | ||
673 | # | ||
674 | # CONFIG_INFINIBAND is not set | ||
675 | |||
676 | # | ||
677 | # File systems | ||
678 | # | ||
679 | CONFIG_EXT2_FS=y | ||
680 | # CONFIG_EXT2_FS_XATTR is not set | ||
681 | CONFIG_EXT3_FS=y | ||
682 | CONFIG_EXT3_FS_XATTR=y | ||
683 | # CONFIG_EXT3_FS_POSIX_ACL is not set | ||
684 | # CONFIG_EXT3_FS_SECURITY is not set | ||
685 | CONFIG_JBD=y | ||
686 | # CONFIG_JBD_DEBUG is not set | ||
687 | CONFIG_FS_MBCACHE=y | ||
688 | # CONFIG_REISERFS_FS is not set | ||
689 | # CONFIG_JFS_FS is not set | ||
690 | |||
691 | # | ||
692 | # XFS support | ||
693 | # | ||
694 | # CONFIG_XFS_FS is not set | ||
695 | CONFIG_MINIX_FS=y | ||
696 | CONFIG_ROMFS_FS=y | ||
697 | # CONFIG_QUOTA is not set | ||
698 | CONFIG_DNOTIFY=y | ||
699 | # CONFIG_AUTOFS_FS is not set | ||
700 | # CONFIG_AUTOFS4_FS is not set | ||
701 | |||
702 | # | ||
703 | # CD-ROM/DVD Filesystems | ||
704 | # | ||
705 | # CONFIG_ISO9660_FS is not set | ||
706 | # CONFIG_UDF_FS is not set | ||
707 | |||
708 | # | ||
709 | # DOS/FAT/NT Filesystems | ||
710 | # | ||
711 | # CONFIG_MSDOS_FS is not set | ||
712 | # CONFIG_VFAT_FS is not set | ||
713 | # CONFIG_NTFS_FS is not set | ||
714 | |||
715 | # | ||
716 | # Pseudo filesystems | ||
717 | # | ||
718 | CONFIG_PROC_FS=y | ||
719 | CONFIG_PROC_KCORE=y | ||
720 | CONFIG_SYSFS=y | ||
721 | # CONFIG_DEVFS_FS is not set | ||
722 | # CONFIG_DEVPTS_FS_XATTR is not set | ||
723 | CONFIG_TMPFS=y | ||
724 | # CONFIG_TMPFS_XATTR is not set | ||
725 | CONFIG_HUGETLBFS=y | ||
726 | CONFIG_HUGETLB_PAGE=y | ||
727 | CONFIG_RAMFS=y | ||
728 | |||
729 | # | ||
730 | # Miscellaneous filesystems | ||
731 | # | ||
732 | # CONFIG_ADFS_FS is not set | ||
733 | # CONFIG_AFFS_FS is not set | ||
734 | # CONFIG_HFS_FS is not set | ||
735 | # CONFIG_HFSPLUS_FS is not set | ||
736 | # CONFIG_BEFS_FS is not set | ||
737 | # CONFIG_BFS_FS is not set | ||
738 | # CONFIG_EFS_FS is not set | ||
739 | # CONFIG_CRAMFS is not set | ||
740 | # CONFIG_VXFS_FS is not set | ||
741 | # CONFIG_HPFS_FS is not set | ||
742 | # CONFIG_QNX4FS_FS is not set | ||
743 | # CONFIG_SYSV_FS is not set | ||
744 | # CONFIG_UFS_FS is not set | ||
745 | |||
746 | # | ||
747 | # Network File Systems | ||
748 | # | ||
749 | CONFIG_NFS_FS=y | ||
750 | CONFIG_NFS_V3=y | ||
751 | # CONFIG_NFS_V4 is not set | ||
752 | # CONFIG_NFS_DIRECTIO is not set | ||
753 | # CONFIG_NFSD is not set | ||
754 | CONFIG_ROOT_NFS=y | ||
755 | CONFIG_LOCKD=y | ||
756 | CONFIG_LOCKD_V4=y | ||
757 | CONFIG_SUNRPC=y | ||
758 | # CONFIG_RPCSEC_GSS_KRB5 is not set | ||
759 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
760 | # CONFIG_SMB_FS is not set | ||
761 | # CONFIG_CIFS is not set | ||
762 | # CONFIG_NCP_FS is not set | ||
763 | # CONFIG_CODA_FS is not set | ||
764 | # CONFIG_AFS_FS is not set | ||
765 | |||
766 | # | ||
767 | # Partition Types | ||
768 | # | ||
769 | CONFIG_PARTITION_ADVANCED=y | ||
770 | # CONFIG_ACORN_PARTITION is not set | ||
771 | # CONFIG_OSF_PARTITION is not set | ||
772 | # CONFIG_AMIGA_PARTITION is not set | ||
773 | # CONFIG_ATARI_PARTITION is not set | ||
774 | # CONFIG_MAC_PARTITION is not set | ||
775 | CONFIG_MSDOS_PARTITION=y | ||
776 | # CONFIG_BSD_DISKLABEL is not set | ||
777 | # CONFIG_MINIX_SUBPARTITION is not set | ||
778 | # CONFIG_SOLARIS_X86_PARTITION is not set | ||
779 | # CONFIG_UNIXWARE_DISKLABEL is not set | ||
780 | # CONFIG_LDM_PARTITION is not set | ||
781 | # CONFIG_SGI_PARTITION is not set | ||
782 | # CONFIG_ULTRIX_PARTITION is not set | ||
783 | # CONFIG_SUN_PARTITION is not set | ||
784 | # CONFIG_EFI_PARTITION is not set | ||
785 | |||
786 | # | ||
787 | # Native Language Support | ||
788 | # | ||
789 | # CONFIG_NLS is not set | ||
790 | |||
791 | # | ||
792 | # Profiling support | ||
793 | # | ||
794 | # CONFIG_PROFILING is not set | ||
795 | |||
796 | # | ||
797 | # Kernel hacking | ||
798 | # | ||
799 | CONFIG_DEBUG_KERNEL=y | ||
800 | CONFIG_MAGIC_SYSRQ=y | ||
801 | CONFIG_SCHEDSTATS=y | ||
802 | # CONFIG_DEBUG_SPINLOCK is not set | ||
803 | # CONFIG_DEBUG_KOBJECT is not set | ||
804 | CONFIG_DEBUG_FS=y | ||
805 | CONFIG_FRAME_POINTER=y | ||
806 | # CONFIG_EARLY_PRINTK is not set | ||
807 | # CONFIG_DEBUG_KERNEL_WITH_GDB_STUB is not set | ||
808 | CONFIG_SH64_PROC_TLB=y | ||
809 | CONFIG_SH64_PROC_ASIDS=y | ||
810 | CONFIG_SH64_SR_WATCH=y | ||
811 | # CONFIG_POOR_MANS_STRACE is not set | ||
812 | # CONFIG_SH_ALPHANUMERIC is not set | ||
813 | # CONFIG_SH_NO_BSS_INIT is not set | ||
814 | |||
815 | # | ||
816 | # Security options | ||
817 | # | ||
818 | # CONFIG_KEYS is not set | ||
819 | # CONFIG_SECURITY is not set | ||
820 | |||
821 | # | ||
822 | # Cryptographic options | ||
823 | # | ||
824 | # CONFIG_CRYPTO is not set | ||
825 | |||
826 | # | ||
827 | # Hardware crypto devices | ||
828 | # | ||
829 | |||
830 | # | ||
831 | # Library routines | ||
832 | # | ||
833 | # CONFIG_CRC_CCITT is not set | ||
834 | CONFIG_CRC32=y | ||
835 | # CONFIG_LIBCRC32C is not set | ||
836 | CONFIG_GENERIC_HARDIRQS=y | ||
837 | CONFIG_GENERIC_IRQ_PROBE=y | ||
diff --git a/arch/sh64/kernel/Makefile b/arch/sh64/kernel/Makefile new file mode 100644 index 000000000000..5816657c079c --- /dev/null +++ b/arch/sh64/kernel/Makefile | |||
@@ -0,0 +1,36 @@ | |||
1 | # | ||
2 | # This file is subject to the terms and conditions of the GNU General Public | ||
3 | # License. See the file "COPYING" in the main directory of this archive | ||
4 | # for more details. | ||
5 | # | ||
6 | # Copyright (C) 2000, 2001 Paolo Alberelli | ||
7 | # Copyright (C) 2003 Paul Mundt | ||
8 | # | ||
9 | # Makefile for the Linux sh64 kernel. | ||
10 | # | ||
11 | # Note! Dependencies are done automagically by 'make dep', which also | ||
12 | # removes any old dependencies. DON'T put your own dependencies here | ||
13 | # unless it's something special (ie not a .c file). | ||
14 | # | ||
15 | |||
16 | extra-y := head.o init_task.o vmlinux.lds | ||
17 | |||
18 | obj-y := process.o signal.o entry.o traps.o irq.o irq_intc.o \ | ||
19 | ptrace.o setup.o time.o sys_sh64.o semaphore.o sh_ksyms.o \ | ||
20 | switchto.o syscalls.o | ||
21 | |||
22 | obj-$(CONFIG_HEARTBEAT) += led.o | ||
23 | obj-$(CONFIG_SH_ALPHANUMERIC) += alphanum.o | ||
24 | obj-$(CONFIG_SH_DMA) += dma.o | ||
25 | obj-$(CONFIG_SH_FPU) += fpu.o | ||
26 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | ||
27 | obj-$(CONFIG_KALLSYMS) += unwind.o | ||
28 | obj-$(CONFIG_PCI) += pci-dma.o pcibios.o | ||
29 | obj-$(CONFIG_MODULES) += module.o | ||
30 | |||
31 | ifeq ($(CONFIG_PCI),y) | ||
32 | obj-$(CONFIG_CPU_SH5) += pci_sh5.o | ||
33 | endif | ||
34 | |||
35 | USE_STANDARD_AS_RULE := true | ||
36 | |||
diff --git a/arch/sh64/kernel/alphanum.c b/arch/sh64/kernel/alphanum.c new file mode 100644 index 000000000000..56d6f9f71524 --- /dev/null +++ b/arch/sh64/kernel/alphanum.c | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * arch/sh64/kernel/alpanum.c | ||
3 | * | ||
4 | * Copyright (C) 2002 Stuart Menefy <stuart.menefy@st.com> | ||
5 | * | ||
6 | * May be copied or modified under the terms of the GNU General Public | ||
7 | * License. See linux/COPYING for more information. | ||
8 | * | ||
9 | * Machine-independent functions for handling 8-digit alphanumeric display | ||
10 | * (e.g. Agilent HDSP-253x) | ||
11 | */ | ||
12 | #include <linux/config.h> | ||
13 | #include <linux/stddef.h> | ||
14 | #include <linux/sched.h> | ||
15 | |||
16 | void mach_alphanum(int pos, unsigned char val); | ||
17 | void mach_led(int pos, int val); | ||
18 | |||
19 | void print_seg(char *file, int line) | ||
20 | { | ||
21 | int i; | ||
22 | unsigned int nibble; | ||
23 | |||
24 | for (i = 0; i < 5; i++) { | ||
25 | mach_alphanum(i, file[i]); | ||
26 | } | ||
27 | |||
28 | for (i = 0; i < 3; i++) { | ||
29 | nibble = ((line >> (i * 4)) & 0xf); | ||
30 | mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48)); | ||
31 | } | ||
32 | } | ||
33 | |||
34 | void print_seg_num(unsigned num) | ||
35 | { | ||
36 | int i; | ||
37 | unsigned int nibble; | ||
38 | |||
39 | for (i = 0; i < 8; i++) { | ||
40 | nibble = ((num >> (i * 4)) & 0xf); | ||
41 | |||
42 | mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48)); | ||
43 | } | ||
44 | } | ||
45 | |||
diff --git a/arch/sh64/kernel/asm-offsets.c b/arch/sh64/kernel/asm-offsets.c new file mode 100644 index 000000000000..ca76537c16c0 --- /dev/null +++ b/arch/sh64/kernel/asm-offsets.c | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * This program is used to generate definitions needed by | ||
3 | * assembly language modules. | ||
4 | * | ||
5 | * We use the technique used in the OSF Mach kernel code: | ||
6 | * generate asm statements containing #defines, | ||
7 | * compile this file to assembler, and then extract the | ||
8 | * #defines from the assembly-language output. | ||
9 | */ | ||
10 | |||
11 | #include <linux/stddef.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <asm/thread_info.h> | ||
15 | |||
16 | #define DEFINE(sym, val) \ | ||
17 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
18 | |||
19 | #define BLANK() asm volatile("\n->" : : ) | ||
20 | |||
21 | int main(void) | ||
22 | { | ||
23 | /* offsets into the thread_info struct */ | ||
24 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); | ||
25 | DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); | ||
26 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | ||
27 | DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); | ||
28 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | ||
29 | DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); | ||
30 | DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block)); | ||
31 | |||
32 | return 0; | ||
33 | } | ||
diff --git a/arch/sh64/kernel/dma.c b/arch/sh64/kernel/dma.c new file mode 100644 index 000000000000..09cd9f4670b5 --- /dev/null +++ b/arch/sh64/kernel/dma.c | |||
@@ -0,0 +1,297 @@ | |||
1 | /* | ||
2 | * arch/sh64/kernel/dma.c | ||
3 | * | ||
4 | * DMA routines for the SH-5 DMAC. | ||
5 | * | ||
6 | * Copyright (C) 2003 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/irq.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <asm/hardware.h> | ||
20 | #include <asm/dma.h> | ||
21 | #include <asm/signal.h> | ||
22 | #include <asm/errno.h> | ||
23 | #include <asm/io.h> | ||
24 | |||
25 | typedef struct { | ||
26 | unsigned long dev_addr; | ||
27 | unsigned long mem_addr; | ||
28 | |||
29 | unsigned int mode; | ||
30 | unsigned int count; | ||
31 | } dma_info_t; | ||
32 | |||
33 | static dma_info_t dma_info[MAX_DMA_CHANNELS]; | ||
34 | static DEFINE_SPINLOCK(dma_spin_lock); | ||
35 | |||
36 | /* arch/sh64/kernel/irq_intc.c */ | ||
37 | extern void make_intc_irq(unsigned int irq); | ||
38 | |||
39 | /* DMAC Interrupts */ | ||
40 | #define DMA_IRQ_DMTE0 18 | ||
41 | #define DMA_IRQ_DERR 22 | ||
42 | |||
43 | #define DMAC_COMMON_BASE (dmac_base + 0x08) | ||
44 | #define DMAC_SAR_BASE (dmac_base + 0x10) | ||
45 | #define DMAC_DAR_BASE (dmac_base + 0x18) | ||
46 | #define DMAC_COUNT_BASE (dmac_base + 0x20) | ||
47 | #define DMAC_CTRL_BASE (dmac_base + 0x28) | ||
48 | #define DMAC_STATUS_BASE (dmac_base + 0x30) | ||
49 | |||
50 | #define DMAC_SAR(n) (DMAC_SAR_BASE + ((n) * 0x28)) | ||
51 | #define DMAC_DAR(n) (DMAC_DAR_BASE + ((n) * 0x28)) | ||
52 | #define DMAC_COUNT(n) (DMAC_COUNT_BASE + ((n) * 0x28)) | ||
53 | #define DMAC_CTRL(n) (DMAC_CTRL_BASE + ((n) * 0x28)) | ||
54 | #define DMAC_STATUS(n) (DMAC_STATUS_BASE + ((n) * 0x28)) | ||
55 | |||
56 | /* DMAC.COMMON Bit Definitions */ | ||
57 | #define DMAC_COMMON_PR 0x00000001 /* Priority */ | ||
58 | /* Bits 1-2 Reserved */ | ||
59 | #define DMAC_COMMON_ME 0x00000008 /* Master Enable */ | ||
60 | #define DMAC_COMMON_NMI 0x00000010 /* NMI Flag */ | ||
61 | /* Bits 5-6 Reserved */ | ||
62 | #define DMAC_COMMON_ER 0x00000780 /* Error Response */ | ||
63 | #define DMAC_COMMON_AAE 0x00007800 /* Address Alignment Error */ | ||
64 | /* Bits 15-63 Reserved */ | ||
65 | |||
66 | /* DMAC.SAR Bit Definitions */ | ||
67 | #define DMAC_SAR_ADDR 0xffffffff /* Source Address */ | ||
68 | |||
69 | /* DMAC.DAR Bit Definitions */ | ||
70 | #define DMAC_DAR_ADDR 0xffffffff /* Destination Address */ | ||
71 | |||
72 | /* DMAC.COUNT Bit Definitions */ | ||
73 | #define DMAC_COUNT_CNT 0xffffffff /* Transfer Count */ | ||
74 | |||
75 | /* DMAC.CTRL Bit Definitions */ | ||
76 | #define DMAC_CTRL_TS 0x00000007 /* Transfer Size */ | ||
77 | #define DMAC_CTRL_SI 0x00000018 /* Source Increment */ | ||
78 | #define DMAC_CTRL_DI 0x00000060 /* Destination Increment */ | ||
79 | #define DMAC_CTRL_RS 0x00000780 /* Resource Select */ | ||
80 | #define DMAC_CTRL_IE 0x00000800 /* Interrupt Enable */ | ||
81 | #define DMAC_CTRL_TE 0x00001000 /* Transfer Enable */ | ||
82 | /* Bits 15-63 Reserved */ | ||
83 | |||
84 | /* DMAC.STATUS Bit Definitions */ | ||
85 | #define DMAC_STATUS_TE 0x00000001 /* Transfer End */ | ||
86 | #define DMAC_STATUS_AAE 0x00000002 /* Address Alignment Error */ | ||
87 | /* Bits 2-63 Reserved */ | ||
88 | |||
89 | static unsigned long dmac_base; | ||
90 | |||
91 | void set_dma_count(unsigned int chan, unsigned int count); | ||
92 | void set_dma_addr(unsigned int chan, unsigned int addr); | ||
93 | |||
94 | static irqreturn_t dma_mte(int irq, void *dev_id, struct pt_regs *regs) | ||
95 | { | ||
96 | unsigned int chan = irq - DMA_IRQ_DMTE0; | ||
97 | dma_info_t *info = dma_info + chan; | ||
98 | u64 status; | ||
99 | |||
100 | if (info->mode & DMA_MODE_WRITE) { | ||
101 | sh64_out64(info->mem_addr & DMAC_SAR_ADDR, DMAC_SAR(chan)); | ||
102 | } else { | ||
103 | sh64_out64(info->mem_addr & DMAC_DAR_ADDR, DMAC_DAR(chan)); | ||
104 | } | ||
105 | |||
106 | set_dma_count(chan, info->count); | ||
107 | |||
108 | /* Clear the TE bit */ | ||
109 | status = sh64_in64(DMAC_STATUS(chan)); | ||
110 | status &= ~DMAC_STATUS_TE; | ||
111 | sh64_out64(status, DMAC_STATUS(chan)); | ||
112 | |||
113 | return IRQ_HANDLED; | ||
114 | } | ||
115 | |||
116 | static struct irqaction irq_dmte = { | ||
117 | .handler = dma_mte, | ||
118 | .flags = SA_INTERRUPT, | ||
119 | .name = "DMA MTE", | ||
120 | }; | ||
121 | |||
122 | static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs) | ||
123 | { | ||
124 | u64 tmp; | ||
125 | u8 chan; | ||
126 | |||
127 | printk(KERN_NOTICE "DMAC: Got a DMA Error!\n"); | ||
128 | |||
129 | tmp = sh64_in64(DMAC_COMMON_BASE); | ||
130 | |||
131 | /* Check for the type of error */ | ||
132 | if ((chan = tmp & DMAC_COMMON_AAE)) { | ||
133 | /* It's an address alignment error.. */ | ||
134 | printk(KERN_NOTICE "DMAC: Alignment error on channel %d, ", chan); | ||
135 | |||
136 | printk(KERN_NOTICE "SAR: 0x%08llx, DAR: 0x%08llx, COUNT: %lld\n", | ||
137 | (sh64_in64(DMAC_SAR(chan)) & DMAC_SAR_ADDR), | ||
138 | (sh64_in64(DMAC_DAR(chan)) & DMAC_DAR_ADDR), | ||
139 | (sh64_in64(DMAC_COUNT(chan)) & DMAC_COUNT_CNT)); | ||
140 | |||
141 | } else if ((chan = tmp & DMAC_COMMON_ER)) { | ||
142 | /* Something else went wrong.. */ | ||
143 | printk(KERN_NOTICE "DMAC: Error on channel %d\n", chan); | ||
144 | } | ||
145 | |||
146 | /* Reset the ME bit to clear the interrupt */ | ||
147 | tmp |= DMAC_COMMON_ME; | ||
148 | sh64_out64(tmp, DMAC_COMMON_BASE); | ||
149 | |||
150 | return IRQ_HANDLED; | ||
151 | } | ||
152 | |||
153 | static struct irqaction irq_derr = { | ||
154 | .handler = dma_err, | ||
155 | .flags = SA_INTERRUPT, | ||
156 | .name = "DMA Error", | ||
157 | }; | ||
158 | |||
159 | static inline unsigned long calc_xmit_shift(unsigned int chan) | ||
160 | { | ||
161 | return sh64_in64(DMAC_CTRL(chan)) & 0x03; | ||
162 | } | ||
163 | |||
164 | void setup_dma(unsigned int chan, dma_info_t *info) | ||
165 | { | ||
166 | unsigned int irq = DMA_IRQ_DMTE0 + chan; | ||
167 | dma_info_t *dma = dma_info + chan; | ||
168 | |||
169 | make_intc_irq(irq); | ||
170 | setup_irq(irq, &irq_dmte); | ||
171 | dma = info; | ||
172 | } | ||
173 | |||
174 | void enable_dma(unsigned int chan) | ||
175 | { | ||
176 | u64 ctrl; | ||
177 | |||
178 | ctrl = sh64_in64(DMAC_CTRL(chan)); | ||
179 | ctrl |= DMAC_CTRL_TE; | ||
180 | sh64_out64(ctrl, DMAC_CTRL(chan)); | ||
181 | } | ||
182 | |||
183 | void disable_dma(unsigned int chan) | ||
184 | { | ||
185 | u64 ctrl; | ||
186 | |||
187 | ctrl = sh64_in64(DMAC_CTRL(chan)); | ||
188 | ctrl &= ~DMAC_CTRL_TE; | ||
189 | sh64_out64(ctrl, DMAC_CTRL(chan)); | ||
190 | } | ||
191 | |||
192 | void set_dma_mode(unsigned int chan, char mode) | ||
193 | { | ||
194 | dma_info_t *info = dma_info + chan; | ||
195 | |||
196 | info->mode = mode; | ||
197 | |||
198 | set_dma_addr(chan, info->mem_addr); | ||
199 | set_dma_count(chan, info->count); | ||
200 | } | ||
201 | |||
202 | void set_dma_addr(unsigned int chan, unsigned int addr) | ||
203 | { | ||
204 | dma_info_t *info = dma_info + chan; | ||
205 | unsigned long sar, dar; | ||
206 | |||
207 | info->mem_addr = addr; | ||
208 | sar = (info->mode & DMA_MODE_WRITE) ? info->mem_addr : info->dev_addr; | ||
209 | dar = (info->mode & DMA_MODE_WRITE) ? info->dev_addr : info->mem_addr; | ||
210 | |||
211 | sh64_out64(sar & DMAC_SAR_ADDR, DMAC_SAR(chan)); | ||
212 | sh64_out64(dar & DMAC_SAR_ADDR, DMAC_DAR(chan)); | ||
213 | } | ||
214 | |||
215 | void set_dma_count(unsigned int chan, unsigned int count) | ||
216 | { | ||
217 | dma_info_t *info = dma_info + chan; | ||
218 | u64 tmp; | ||
219 | |||
220 | info->count = count; | ||
221 | |||
222 | tmp = (info->count >> calc_xmit_shift(chan)) & DMAC_COUNT_CNT; | ||
223 | |||
224 | sh64_out64(tmp, DMAC_COUNT(chan)); | ||
225 | } | ||
226 | |||
227 | unsigned long claim_dma_lock(void) | ||
228 | { | ||
229 | unsigned long flags; | ||
230 | |||
231 | spin_lock_irqsave(&dma_spin_lock, flags); | ||
232 | |||
233 | return flags; | ||
234 | } | ||
235 | |||
236 | void release_dma_lock(unsigned long flags) | ||
237 | { | ||
238 | spin_unlock_irqrestore(&dma_spin_lock, flags); | ||
239 | } | ||
240 | |||
241 | int get_dma_residue(unsigned int chan) | ||
242 | { | ||
243 | return sh64_in64(DMAC_COUNT(chan) << calc_xmit_shift(chan)); | ||
244 | } | ||
245 | |||
246 | int __init init_dma(void) | ||
247 | { | ||
248 | struct vcr_info vcr; | ||
249 | u64 tmp; | ||
250 | |||
251 | /* Remap the DMAC */ | ||
252 | dmac_base = onchip_remap(PHYS_DMAC_BLOCK, 1024, "DMAC"); | ||
253 | if (!dmac_base) { | ||
254 | printk(KERN_ERR "Unable to remap DMAC\n"); | ||
255 | return -ENOMEM; | ||
256 | } | ||
257 | |||
258 | /* Report DMAC.VCR Info */ | ||
259 | vcr = sh64_get_vcr_info(dmac_base); | ||
260 | printk("DMAC: Module ID: 0x%04x, Module version: 0x%04x\n", | ||
261 | vcr.mod_id, vcr.mod_vers); | ||
262 | |||
263 | /* Set the ME bit */ | ||
264 | tmp = sh64_in64(DMAC_COMMON_BASE); | ||
265 | tmp |= DMAC_COMMON_ME; | ||
266 | sh64_out64(tmp, DMAC_COMMON_BASE); | ||
267 | |||
268 | /* Enable the DMAC Error Interrupt */ | ||
269 | make_intc_irq(DMA_IRQ_DERR); | ||
270 | setup_irq(DMA_IRQ_DERR, &irq_derr); | ||
271 | |||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static void __exit exit_dma(void) | ||
276 | { | ||
277 | onchip_unmap(dmac_base); | ||
278 | free_irq(DMA_IRQ_DERR, 0); | ||
279 | } | ||
280 | |||
281 | module_init(init_dma); | ||
282 | module_exit(exit_dma); | ||
283 | |||
284 | MODULE_AUTHOR("Paul Mundt"); | ||
285 | MODULE_DESCRIPTION("DMA API for SH-5 DMAC"); | ||
286 | MODULE_LICENSE("GPL"); | ||
287 | |||
288 | EXPORT_SYMBOL(setup_dma); | ||
289 | EXPORT_SYMBOL(claim_dma_lock); | ||
290 | EXPORT_SYMBOL(release_dma_lock); | ||
291 | EXPORT_SYMBOL(enable_dma); | ||
292 | EXPORT_SYMBOL(disable_dma); | ||
293 | EXPORT_SYMBOL(set_dma_mode); | ||
294 | EXPORT_SYMBOL(set_dma_addr); | ||
295 | EXPORT_SYMBOL(set_dma_count); | ||
296 | EXPORT_SYMBOL(get_dma_residue); | ||
297 | |||
diff --git a/arch/sh64/kernel/early_printk.c b/arch/sh64/kernel/early_printk.c new file mode 100644 index 000000000000..8c8a76e180aa --- /dev/null +++ b/arch/sh64/kernel/early_printk.c | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * arch/sh64/kernel/early_printk.c | ||
3 | * | ||
4 | * SH-5 Early SCIF console (cloned and hacked from sh implementation) | ||
5 | * | ||
6 | * Copyright (C) 2003, 2004 Paul Mundt <lethal@linux-sh.org> | ||
7 | * Copyright (C) 2002 M. R. Brown <mrbrown@0xd6.org> | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | ||
13 | #include <linux/console.h> | ||
14 | #include <linux/tty.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <asm/io.h> | ||
17 | #include <asm/hardware.h> | ||
18 | |||
19 | #define SCIF_BASE_ADDR 0x01030000 | ||
20 | #define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR | ||
21 | |||
22 | /* | ||
23 | * Fixed virtual address where SCIF is mapped (should already be done | ||
24 | * in arch/sh64/kernel/head.S!). | ||
25 | */ | ||
26 | #define SCIF_REG 0xfa030000 | ||
27 | |||
28 | enum { | ||
29 | SCIF_SCSMR2 = SCIF_REG + 0x00, | ||
30 | SCIF_SCBRR2 = SCIF_REG + 0x04, | ||
31 | SCIF_SCSCR2 = SCIF_REG + 0x08, | ||
32 | SCIF_SCFTDR2 = SCIF_REG + 0x0c, | ||
33 | SCIF_SCFSR2 = SCIF_REG + 0x10, | ||
34 | SCIF_SCFRDR2 = SCIF_REG + 0x14, | ||
35 | SCIF_SCFCR2 = SCIF_REG + 0x18, | ||
36 | SCIF_SCFDR2 = SCIF_REG + 0x1c, | ||
37 | SCIF_SCSPTR2 = SCIF_REG + 0x20, | ||
38 | SCIF_SCLSR2 = SCIF_REG + 0x24, | ||
39 | }; | ||
40 | |||
41 | static void sh_console_putc(int c) | ||
42 | { | ||
43 | while (!(ctrl_inw(SCIF_SCFSR2) & 0x20)) | ||
44 | cpu_relax(); | ||
45 | |||
46 | ctrl_outb(c, SCIF_SCFTDR2); | ||
47 | ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0x9f), SCIF_SCFSR2); | ||
48 | |||
49 | if (c == '\n') | ||
50 | sh_console_putc('\r'); | ||
51 | } | ||
52 | |||
53 | static void sh_console_flush(void) | ||
54 | { | ||
55 | ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2); | ||
56 | |||
57 | while (!(ctrl_inw(SCIF_SCFSR2) & 0x40)) | ||
58 | cpu_relax(); | ||
59 | |||
60 | ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2); | ||
61 | } | ||
62 | |||
63 | static void sh_console_write(struct console *con, const char *s, unsigned count) | ||
64 | { | ||
65 | while (count-- > 0) | ||
66 | sh_console_putc(*s++); | ||
67 | |||
68 | sh_console_flush(); | ||
69 | } | ||
70 | |||
71 | static int __init sh_console_setup(struct console *con, char *options) | ||
72 | { | ||
73 | con->cflag = CREAD | HUPCL | CLOCAL | B19200 | CS8; | ||
74 | |||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static struct console sh_console = { | ||
79 | .name = "scifcon", | ||
80 | .write = sh_console_write, | ||
81 | .setup = sh_console_setup, | ||
82 | .flags = CON_PRINTBUFFER, | ||
83 | .index = -1, | ||
84 | }; | ||
85 | |||
86 | void __init enable_early_printk(void) | ||
87 | { | ||
88 | ctrl_outb(0x2a, SCIF_SCBRR2); /* 19200bps */ | ||
89 | |||
90 | ctrl_outw(0x04, SCIF_SCFCR2); /* Reset TFRST */ | ||
91 | ctrl_outw(0x10, SCIF_SCFCR2); /* TTRG0=1 */ | ||
92 | |||
93 | ctrl_outw(0, SCIF_SCSPTR2); | ||
94 | ctrl_outw(0x60, SCIF_SCFSR2); | ||
95 | ctrl_outw(0, SCIF_SCLSR2); | ||
96 | ctrl_outw(0x30, SCIF_SCSCR2); | ||
97 | |||
98 | register_console(&sh_console); | ||
99 | } | ||
100 | |||
101 | void disable_early_printk(void) | ||
102 | { | ||
103 | unregister_console(&sh_console); | ||
104 | } | ||
105 | |||
diff --git a/arch/sh64/kernel/entry.S b/arch/sh64/kernel/entry.S new file mode 100644 index 000000000000..2e2cfe20b426 --- /dev/null +++ b/arch/sh64/kernel/entry.S | |||
@@ -0,0 +1,2103 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/entry.S | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2004, 2005 Paul Mundt | ||
10 | * Copyright (C) 2003, 2004 Richard Curnow | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/sys.h> | ||
17 | |||
18 | #include <asm/processor.h> | ||
19 | #include <asm/registers.h> | ||
20 | #include <asm/unistd.h> | ||
21 | #include <asm/thread_info.h> | ||
22 | #include <asm/asm-offsets.h> | ||
23 | |||
24 | /* | ||
25 | * SR fields. | ||
26 | */ | ||
27 | #define SR_ASID_MASK 0x00ff0000 | ||
28 | #define SR_FD_MASK 0x00008000 | ||
29 | #define SR_SS 0x08000000 | ||
30 | #define SR_BL 0x10000000 | ||
31 | #define SR_MD 0x40000000 | ||
32 | |||
33 | /* | ||
34 | * Event code. | ||
35 | */ | ||
36 | #define EVENT_INTERRUPT 0 | ||
37 | #define EVENT_FAULT_TLB 1 | ||
38 | #define EVENT_FAULT_NOT_TLB 2 | ||
39 | #define EVENT_DEBUG 3 | ||
40 | |||
41 | /* EXPEVT values */ | ||
42 | #define RESET_CAUSE 0x20 | ||
43 | #define DEBUGSS_CAUSE 0x980 | ||
44 | |||
45 | /* | ||
46 | * Frame layout. Quad index. | ||
47 | */ | ||
48 | #define FRAME_T(x) FRAME_TBASE+(x*8) | ||
49 | #define FRAME_R(x) FRAME_RBASE+(x*8) | ||
50 | #define FRAME_S(x) FRAME_SBASE+(x*8) | ||
51 | #define FSPC 0 | ||
52 | #define FSSR 1 | ||
53 | #define FSYSCALL_ID 2 | ||
54 | |||
55 | /* Arrange the save frame to be a multiple of 32 bytes long */ | ||
56 | #define FRAME_SBASE 0 | ||
57 | #define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */ | ||
58 | #define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */ | ||
59 | #define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */ | ||
60 | #define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */ | ||
61 | |||
62 | #define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */ | ||
63 | #define FP_FRAME_BASE 0 | ||
64 | |||
65 | #define SAVED_R2 0*8 | ||
66 | #define SAVED_R3 1*8 | ||
67 | #define SAVED_R4 2*8 | ||
68 | #define SAVED_R5 3*8 | ||
69 | #define SAVED_R18 4*8 | ||
70 | #define SAVED_R6 5*8 | ||
71 | #define SAVED_TR0 6*8 | ||
72 | |||
73 | /* These are the registers saved in the TLB path that aren't saved in the first | ||
74 | level of the normal one. */ | ||
75 | #define TLB_SAVED_R25 7*8 | ||
76 | #define TLB_SAVED_TR1 8*8 | ||
77 | #define TLB_SAVED_TR2 9*8 | ||
78 | #define TLB_SAVED_TR3 10*8 | ||
79 | #define TLB_SAVED_TR4 11*8 | ||
80 | /* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing | ||
81 | breakage otherwise. */ | ||
82 | #define TLB_SAVED_R0 12*8 | ||
83 | #define TLB_SAVED_R1 13*8 | ||
84 | |||
85 | #define CLI() \ | ||
86 | getcon SR, r6; \ | ||
87 | ori r6, 0xf0, r6; \ | ||
88 | putcon r6, SR; | ||
89 | |||
90 | #define STI() \ | ||
91 | getcon SR, r6; \ | ||
92 | andi r6, ~0xf0, r6; \ | ||
93 | putcon r6, SR; | ||
94 | |||
95 | #ifdef CONFIG_PREEMPT | ||
96 | # define preempt_stop() CLI() | ||
97 | #else | ||
98 | # define preempt_stop() | ||
99 | # define resume_kernel restore_all | ||
100 | #endif | ||
101 | |||
102 | .section .data, "aw" | ||
103 | |||
104 | #define FAST_TLBMISS_STACK_CACHELINES 4 | ||
105 | #define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES) | ||
106 | |||
107 | /* Register back-up area for all exceptions */ | ||
108 | .balign 32 | ||
109 | /* Allow for 16 quadwords to be pushed by fast tlbmiss handling | ||
110 | * register saves etc. */ | ||
111 | .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0 | ||
112 | /* This is 32 byte aligned by construction */ | ||
113 | /* Register back-up area for all exceptions */ | ||
114 | reg_save_area: | ||
115 | .quad 0 | ||
116 | .quad 0 | ||
117 | .quad 0 | ||
118 | .quad 0 | ||
119 | |||
120 | .quad 0 | ||
121 | .quad 0 | ||
122 | .quad 0 | ||
123 | .quad 0 | ||
124 | |||
125 | .quad 0 | ||
126 | .quad 0 | ||
127 | .quad 0 | ||
128 | .quad 0 | ||
129 | |||
130 | .quad 0 | ||
131 | .quad 0 | ||
132 | |||
133 | /* Save area for RESVEC exceptions. We cannot use reg_save_area because of | ||
134 | * reentrancy. Note this area may be accessed via physical address. | ||
135 | * Align so this fits a whole single cache line, for ease of purging. | ||
136 | */ | ||
137 | .balign 32,0,32 | ||
138 | resvec_save_area: | ||
139 | .quad 0 | ||
140 | .quad 0 | ||
141 | .quad 0 | ||
142 | .quad 0 | ||
143 | .quad 0 | ||
144 | .balign 32,0,32 | ||
145 | |||
146 | /* Jump table of 3rd level handlers */ | ||
147 | trap_jtable: | ||
148 | .long do_exception_error /* 0x000 */ | ||
149 | .long do_exception_error /* 0x020 */ | ||
150 | .long tlb_miss_load /* 0x040 */ | ||
151 | .long tlb_miss_store /* 0x060 */ | ||
152 | ! ARTIFICIAL pseudo-EXPEVT setting | ||
153 | .long do_debug_interrupt /* 0x080 */ | ||
154 | .long tlb_miss_load /* 0x0A0 */ | ||
155 | .long tlb_miss_store /* 0x0C0 */ | ||
156 | .long do_address_error_load /* 0x0E0 */ | ||
157 | .long do_address_error_store /* 0x100 */ | ||
158 | #ifdef CONFIG_SH_FPU | ||
159 | .long do_fpu_error /* 0x120 */ | ||
160 | #else | ||
161 | .long do_exception_error /* 0x120 */ | ||
162 | #endif | ||
163 | .long do_exception_error /* 0x140 */ | ||
164 | .long system_call /* 0x160 */ | ||
165 | .long do_reserved_inst /* 0x180 */ | ||
166 | .long do_illegal_slot_inst /* 0x1A0 */ | ||
167 | .long do_NMI /* 0x1C0 */ | ||
168 | .long do_exception_error /* 0x1E0 */ | ||
169 | .rept 15 | ||
170 | .long do_IRQ /* 0x200 - 0x3C0 */ | ||
171 | .endr | ||
172 | .long do_exception_error /* 0x3E0 */ | ||
173 | .rept 32 | ||
174 | .long do_IRQ /* 0x400 - 0x7E0 */ | ||
175 | .endr | ||
176 | .long fpu_error_or_IRQA /* 0x800 */ | ||
177 | .long fpu_error_or_IRQB /* 0x820 */ | ||
178 | .long do_IRQ /* 0x840 */ | ||
179 | .long do_IRQ /* 0x860 */ | ||
180 | .rept 6 | ||
181 | .long do_exception_error /* 0x880 - 0x920 */ | ||
182 | .endr | ||
183 | .long do_software_break_point /* 0x940 */ | ||
184 | .long do_exception_error /* 0x960 */ | ||
185 | .long do_single_step /* 0x980 */ | ||
186 | |||
187 | .rept 3 | ||
188 | .long do_exception_error /* 0x9A0 - 0x9E0 */ | ||
189 | .endr | ||
190 | .long do_IRQ /* 0xA00 */ | ||
191 | .long do_IRQ /* 0xA20 */ | ||
192 | .long itlb_miss_or_IRQ /* 0xA40 */ | ||
193 | .long do_IRQ /* 0xA60 */ | ||
194 | .long do_IRQ /* 0xA80 */ | ||
195 | .long itlb_miss_or_IRQ /* 0xAA0 */ | ||
196 | .long do_exception_error /* 0xAC0 */ | ||
197 | .long do_address_error_exec /* 0xAE0 */ | ||
198 | .rept 8 | ||
199 | .long do_exception_error /* 0xB00 - 0xBE0 */ | ||
200 | .endr | ||
201 | .rept 18 | ||
202 | .long do_IRQ /* 0xC00 - 0xE20 */ | ||
203 | .endr | ||
204 | |||
205 | .section .text64, "ax" | ||
206 | |||
207 | /* | ||
208 | * --- Exception/Interrupt/Event Handling Section | ||
209 | */ | ||
210 | |||
211 | /* | ||
212 | * VBR and RESVEC blocks. | ||
213 | * | ||
214 | * First level handler for VBR-based exceptions. | ||
215 | * | ||
216 | * To avoid waste of space, align to the maximum text block size. | ||
217 | * This is assumed to be at most 128 bytes or 32 instructions. | ||
218 | * DO NOT EXCEED 32 instructions on the first level handlers ! | ||
219 | * | ||
220 | * Also note that RESVEC is contained within the VBR block | ||
221 | * where the room left (1KB - TEXT_SIZE) allows placing | ||
222 | * the RESVEC block (at most 512B + TEXT_SIZE). | ||
223 | * | ||
224 | * So first (and only) level handler for RESVEC-based exceptions. | ||
225 | * | ||
226 | * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss | ||
227 | * and interrupt) we are a lot tight with register space until | ||
228 | * saving onto the stack frame, which is done in handle_exception(). | ||
229 | * | ||
230 | */ | ||
231 | |||
232 | #define TEXT_SIZE 128 | ||
233 | #define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */ | ||
234 | |||
235 | .balign TEXT_SIZE | ||
236 | LVBR_block: | ||
237 | .space 256, 0 /* Power-on class handler, */ | ||
238 | /* not required here */ | ||
239 | not_a_tlb_miss: | ||
240 | synco /* TAKum03020 (but probably a good idea anyway.) */ | ||
241 | /* Save original stack pointer into KCR1 */ | ||
242 | putcon SP, KCR1 | ||
243 | |||
244 | /* Save other original registers into reg_save_area */ | ||
245 | movi reg_save_area, SP | ||
246 | st.q SP, SAVED_R2, r2 | ||
247 | st.q SP, SAVED_R3, r3 | ||
248 | st.q SP, SAVED_R4, r4 | ||
249 | st.q SP, SAVED_R5, r5 | ||
250 | st.q SP, SAVED_R6, r6 | ||
251 | st.q SP, SAVED_R18, r18 | ||
252 | gettr tr0, r3 | ||
253 | st.q SP, SAVED_TR0, r3 | ||
254 | |||
255 | /* Set args for Non-debug, Not a TLB miss class handler */ | ||
256 | getcon EXPEVT, r2 | ||
257 | movi ret_from_exception, r3 | ||
258 | ori r3, 1, r3 | ||
259 | movi EVENT_FAULT_NOT_TLB, r4 | ||
260 | or SP, ZERO, r5 | ||
261 | getcon KCR1, SP | ||
262 | pta handle_exception, tr0 | ||
263 | blink tr0, ZERO | ||
264 | |||
265 | .balign 256 | ||
266 | ! VBR+0x200 | ||
267 | nop | ||
268 | .balign 256 | ||
269 | ! VBR+0x300 | ||
270 | nop | ||
271 | .balign 256 | ||
272 | /* | ||
273 | * Instead of the natural .balign 1024 place RESVEC here | ||
274 | * respecting the final 1KB alignment. | ||
275 | */ | ||
276 | .balign TEXT_SIZE | ||
277 | /* | ||
278 | * Instead of '.space 1024-TEXT_SIZE' place the RESVEC | ||
279 | * block making sure the final alignment is correct. | ||
280 | */ | ||
281 | tlb_miss: | ||
282 | synco /* TAKum03020 (but probably a good idea anyway.) */ | ||
283 | putcon SP, KCR1 | ||
284 | movi reg_save_area, SP | ||
285 | /* SP is guaranteed 32-byte aligned. */ | ||
286 | st.q SP, TLB_SAVED_R0 , r0 | ||
287 | st.q SP, TLB_SAVED_R1 , r1 | ||
288 | st.q SP, SAVED_R2 , r2 | ||
289 | st.q SP, SAVED_R3 , r3 | ||
290 | st.q SP, SAVED_R4 , r4 | ||
291 | st.q SP, SAVED_R5 , r5 | ||
292 | st.q SP, SAVED_R6 , r6 | ||
293 | st.q SP, SAVED_R18, r18 | ||
294 | |||
295 | /* Save R25 for safety; as/ld may want to use it to achieve the call to | ||
296 | * the code in mm/tlbmiss.c */ | ||
297 | st.q SP, TLB_SAVED_R25, r25 | ||
298 | gettr tr0, r2 | ||
299 | gettr tr1, r3 | ||
300 | gettr tr2, r4 | ||
301 | gettr tr3, r5 | ||
302 | gettr tr4, r18 | ||
303 | st.q SP, SAVED_TR0 , r2 | ||
304 | st.q SP, TLB_SAVED_TR1 , r3 | ||
305 | st.q SP, TLB_SAVED_TR2 , r4 | ||
306 | st.q SP, TLB_SAVED_TR3 , r5 | ||
307 | st.q SP, TLB_SAVED_TR4 , r18 | ||
308 | |||
309 | pt do_fast_page_fault, tr0 | ||
310 | getcon SSR, r2 | ||
311 | getcon EXPEVT, r3 | ||
312 | getcon TEA, r4 | ||
313 | shlri r2, 30, r2 | ||
314 | andi r2, 1, r2 /* r2 = SSR.MD */ | ||
315 | blink tr0, LINK | ||
316 | |||
317 | pt fixup_to_invoke_general_handler, tr1 | ||
318 | |||
319 | /* If the fast path handler fixed the fault, just drop through quickly | ||
320 | to the restore code right away to return to the excepting context. | ||
321 | */ | ||
322 | beqi/u r2, 0, tr1 | ||
323 | |||
324 | fast_tlb_miss_restore: | ||
325 | ld.q SP, SAVED_TR0, r2 | ||
326 | ld.q SP, TLB_SAVED_TR1, r3 | ||
327 | ld.q SP, TLB_SAVED_TR2, r4 | ||
328 | |||
329 | ld.q SP, TLB_SAVED_TR3, r5 | ||
330 | ld.q SP, TLB_SAVED_TR4, r18 | ||
331 | |||
332 | ptabs r2, tr0 | ||
333 | ptabs r3, tr1 | ||
334 | ptabs r4, tr2 | ||
335 | ptabs r5, tr3 | ||
336 | ptabs r18, tr4 | ||
337 | |||
338 | ld.q SP, TLB_SAVED_R0, r0 | ||
339 | ld.q SP, TLB_SAVED_R1, r1 | ||
340 | ld.q SP, SAVED_R2, r2 | ||
341 | ld.q SP, SAVED_R3, r3 | ||
342 | ld.q SP, SAVED_R4, r4 | ||
343 | ld.q SP, SAVED_R5, r5 | ||
344 | ld.q SP, SAVED_R6, r6 | ||
345 | ld.q SP, SAVED_R18, r18 | ||
346 | ld.q SP, TLB_SAVED_R25, r25 | ||
347 | |||
348 | getcon KCR1, SP | ||
349 | rte | ||
350 | nop /* for safety, in case the code is run on sh5-101 cut1.x */ | ||
351 | |||
352 | fixup_to_invoke_general_handler: | ||
353 | |||
354 | /* OK, new method. Restore stuff that's not expected to get saved into | ||
355 | the 'first-level' reg save area, then just fall through to setting | ||
356 | up the registers and calling the second-level handler. */ | ||
357 | |||
358 | /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore | ||
359 | r25,tr1-4 and save r6 to get into the right state. */ | ||
360 | |||
361 | ld.q SP, TLB_SAVED_TR1, r3 | ||
362 | ld.q SP, TLB_SAVED_TR2, r4 | ||
363 | ld.q SP, TLB_SAVED_TR3, r5 | ||
364 | ld.q SP, TLB_SAVED_TR4, r18 | ||
365 | ld.q SP, TLB_SAVED_R25, r25 | ||
366 | |||
367 | ld.q SP, TLB_SAVED_R0, r0 | ||
368 | ld.q SP, TLB_SAVED_R1, r1 | ||
369 | |||
370 | ptabs/u r3, tr1 | ||
371 | ptabs/u r4, tr2 | ||
372 | ptabs/u r5, tr3 | ||
373 | ptabs/u r18, tr4 | ||
374 | |||
375 | /* Set args for Non-debug, TLB miss class handler */ | ||
376 | getcon EXPEVT, r2 | ||
377 | movi ret_from_exception, r3 | ||
378 | ori r3, 1, r3 | ||
379 | movi EVENT_FAULT_TLB, r4 | ||
380 | or SP, ZERO, r5 | ||
381 | getcon KCR1, SP | ||
382 | pta handle_exception, tr0 | ||
383 | blink tr0, ZERO | ||
384 | |||
385 | /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE | ||
386 | DOES END UP AT VBR+0x600 */ | ||
387 | nop | ||
388 | nop | ||
389 | nop | ||
390 | nop | ||
391 | nop | ||
392 | nop | ||
393 | |||
394 | .balign 256 | ||
395 | /* VBR + 0x600 */ | ||
396 | |||
397 | interrupt: | ||
398 | synco /* TAKum03020 (but probably a good idea anyway.) */ | ||
399 | /* Save original stack pointer into KCR1 */ | ||
400 | putcon SP, KCR1 | ||
401 | |||
402 | /* Save other original registers into reg_save_area */ | ||
403 | movi reg_save_area, SP | ||
404 | st.q SP, SAVED_R2, r2 | ||
405 | st.q SP, SAVED_R3, r3 | ||
406 | st.q SP, SAVED_R4, r4 | ||
407 | st.q SP, SAVED_R5, r5 | ||
408 | st.q SP, SAVED_R6, r6 | ||
409 | st.q SP, SAVED_R18, r18 | ||
410 | gettr tr0, r3 | ||
411 | st.q SP, SAVED_TR0, r3 | ||
412 | |||
413 | /* Set args for interrupt class handler */ | ||
414 | getcon INTEVT, r2 | ||
415 | movi ret_from_irq, r3 | ||
416 | ori r3, 1, r3 | ||
417 | movi EVENT_INTERRUPT, r4 | ||
418 | or SP, ZERO, r5 | ||
419 | getcon KCR1, SP | ||
420 | pta handle_exception, tr0 | ||
421 | blink tr0, ZERO | ||
422 | .balign TEXT_SIZE /* let's waste the bare minimum */ | ||
423 | |||
424 | LVBR_block_end: /* Marker. Used for total checking */ | ||
425 | |||
426 | .balign 256 | ||
427 | LRESVEC_block: | ||
428 | /* Panic handler. Called with MMU off. Possible causes/actions: | ||
429 | * - Reset: Jump to program start. | ||
430 | * - Single Step: Turn off Single Step & return. | ||
431 | * - Others: Call panic handler, passing PC as arg. | ||
432 | * (this may need to be extended...) | ||
433 | */ | ||
434 | reset_or_panic: | ||
435 | synco /* TAKum03020 (but probably a good idea anyway.) */ | ||
436 | putcon SP, DCR | ||
437 | /* First save r0-1 and tr0, as we need to use these */ | ||
438 | movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP | ||
439 | st.q SP, 0, r0 | ||
440 | st.q SP, 8, r1 | ||
441 | gettr tr0, r0 | ||
442 | st.q SP, 32, r0 | ||
443 | |||
444 | /* Check cause */ | ||
445 | getcon EXPEVT, r0 | ||
446 | movi RESET_CAUSE, r1 | ||
447 | sub r1, r0, r1 /* r1=0 if reset */ | ||
448 | movi _stext-CONFIG_CACHED_MEMORY_OFFSET, r0 | ||
449 | ori r0, 1, r0 | ||
450 | ptabs r0, tr0 | ||
451 | beqi r1, 0, tr0 /* Jump to start address if reset */ | ||
452 | |||
453 | getcon EXPEVT, r0 | ||
454 | movi DEBUGSS_CAUSE, r1 | ||
455 | sub r1, r0, r1 /* r1=0 if single step */ | ||
456 | pta single_step_panic, tr0 | ||
457 | beqi r1, 0, tr0 /* jump if single step */ | ||
458 | |||
459 | /* Now jump to where we save the registers. */ | ||
460 | movi panic_stash_regs-CONFIG_CACHED_MEMORY_OFFSET, r1 | ||
461 | ptabs r1, tr0 | ||
462 | blink tr0, r63 | ||
463 | |||
464 | single_step_panic: | ||
465 | /* We are in a handler with Single Step set. We need to resume the | ||
466 | * handler, by turning on MMU & turning off Single Step. */ | ||
467 | getcon SSR, r0 | ||
468 | movi SR_MMU, r1 | ||
469 | or r0, r1, r0 | ||
470 | movi ~SR_SS, r1 | ||
471 | and r0, r1, r0 | ||
472 | putcon r0, SSR | ||
473 | /* Restore EXPEVT, as the rte won't do this */ | ||
474 | getcon PEXPEVT, r0 | ||
475 | putcon r0, EXPEVT | ||
476 | /* Restore regs */ | ||
477 | ld.q SP, 32, r0 | ||
478 | ptabs r0, tr0 | ||
479 | ld.q SP, 0, r0 | ||
480 | ld.q SP, 8, r1 | ||
481 | getcon DCR, SP | ||
482 | synco | ||
483 | rte | ||
484 | |||
485 | |||
486 | .balign 256 | ||
487 | debug_exception: | ||
488 | synco /* TAKum03020 (but probably a good idea anyway.) */ | ||
489 | /* | ||
490 | * Single step/software_break_point first level handler. | ||
491 | * Called with MMU off, so the first thing we do is enable it | ||
492 | * by doing an rte with appropriate SSR. | ||
493 | */ | ||
494 | putcon SP, DCR | ||
495 | /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */ | ||
496 | movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP | ||
497 | |||
498 | /* With the MMU off, we are bypassing the cache, so purge any | ||
499 | * data that will be made stale by the following stores. | ||
500 | */ | ||
501 | ocbp SP, 0 | ||
502 | synco | ||
503 | |||
504 | st.q SP, 0, r0 | ||
505 | st.q SP, 8, r1 | ||
506 | getcon SPC, r0 | ||
507 | st.q SP, 16, r0 | ||
508 | getcon SSR, r0 | ||
509 | st.q SP, 24, r0 | ||
510 | |||
511 | /* Enable MMU, block exceptions, set priv mode, disable single step */ | ||
512 | movi SR_MMU | SR_BL | SR_MD, r1 | ||
513 | or r0, r1, r0 | ||
514 | movi ~SR_SS, r1 | ||
515 | and r0, r1, r0 | ||
516 | putcon r0, SSR | ||
517 | /* Force control to debug_exception_2 when rte is executed */ | ||
518 | movi debug_exeception_2, r0 | ||
519 | ori r0, 1, r0 /* force SHmedia, just in case */ | ||
520 | putcon r0, SPC | ||
521 | getcon DCR, SP | ||
522 | synco | ||
523 | rte | ||
524 | debug_exeception_2: | ||
525 | /* Restore saved regs */ | ||
526 | putcon SP, KCR1 | ||
527 | movi resvec_save_area, SP | ||
528 | ld.q SP, 24, r0 | ||
529 | putcon r0, SSR | ||
530 | ld.q SP, 16, r0 | ||
531 | putcon r0, SPC | ||
532 | ld.q SP, 0, r0 | ||
533 | ld.q SP, 8, r1 | ||
534 | |||
535 | /* Save other original registers into reg_save_area */ | ||
536 | movi reg_save_area, SP | ||
537 | st.q SP, SAVED_R2, r2 | ||
538 | st.q SP, SAVED_R3, r3 | ||
539 | st.q SP, SAVED_R4, r4 | ||
540 | st.q SP, SAVED_R5, r5 | ||
541 | st.q SP, SAVED_R6, r6 | ||
542 | st.q SP, SAVED_R18, r18 | ||
543 | gettr tr0, r3 | ||
544 | st.q SP, SAVED_TR0, r3 | ||
545 | |||
546 | /* Set args for debug class handler */ | ||
547 | getcon EXPEVT, r2 | ||
548 | movi ret_from_exception, r3 | ||
549 | ori r3, 1, r3 | ||
550 | movi EVENT_DEBUG, r4 | ||
551 | or SP, ZERO, r5 | ||
552 | getcon KCR1, SP | ||
553 | pta handle_exception, tr0 | ||
554 | blink tr0, ZERO | ||
555 | |||
556 | .balign 256 | ||
557 | debug_interrupt: | ||
558 | /* !!! WE COME HERE IN REAL MODE !!! */ | ||
559 | /* Hook-up debug interrupt to allow various debugging options to be | ||
560 | * hooked into its handler. */ | ||
561 | /* Save original stack pointer into KCR1 */ | ||
562 | synco | ||
563 | putcon SP, KCR1 | ||
564 | movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP | ||
565 | ocbp SP, 0 | ||
566 | ocbp SP, 32 | ||
567 | synco | ||
568 | |||
569 | /* Save other original registers into reg_save_area thru real addresses */ | ||
570 | st.q SP, SAVED_R2, r2 | ||
571 | st.q SP, SAVED_R3, r3 | ||
572 | st.q SP, SAVED_R4, r4 | ||
573 | st.q SP, SAVED_R5, r5 | ||
574 | st.q SP, SAVED_R6, r6 | ||
575 | st.q SP, SAVED_R18, r18 | ||
576 | gettr tr0, r3 | ||
577 | st.q SP, SAVED_TR0, r3 | ||
578 | |||
579 | /* move (spc,ssr)->(pspc,pssr). The rte will shift | ||
580 | them back again, so that they look like the originals | ||
581 | as far as the real handler code is concerned. */ | ||
582 | getcon spc, r6 | ||
583 | putcon r6, pspc | ||
584 | getcon ssr, r6 | ||
585 | putcon r6, pssr | ||
586 | |||
587 | ! construct useful SR for handle_exception | ||
588 | movi 3, r6 | ||
589 | shlli r6, 30, r6 | ||
590 | getcon sr, r18 | ||
591 | or r18, r6, r6 | ||
592 | putcon r6, ssr | ||
593 | |||
594 | ! SSR is now the current SR with the MD and MMU bits set | ||
595 | ! i.e. the rte will switch back to priv mode and put | ||
596 | ! the mmu back on | ||
597 | |||
598 | ! construct spc | ||
599 | movi handle_exception, r18 | ||
600 | ori r18, 1, r18 ! for safety (do we need this?) | ||
601 | putcon r18, spc | ||
602 | |||
603 | /* Set args for Non-debug, Not a TLB miss class handler */ | ||
604 | |||
605 | ! EXPEVT==0x80 is unused, so 'steal' this value to put the | ||
606 | ! debug interrupt handler in the vectoring table | ||
607 | movi 0x80, r2 | ||
608 | movi ret_from_exception, r3 | ||
609 | ori r3, 1, r3 | ||
610 | movi EVENT_FAULT_NOT_TLB, r4 | ||
611 | |||
612 | or SP, ZERO, r5 | ||
613 | movi CONFIG_CACHED_MEMORY_OFFSET, r6 | ||
614 | add r6, r5, r5 | ||
615 | getcon KCR1, SP | ||
616 | |||
617 | synco ! for safety | ||
618 | rte ! -> handle_exception, switch back to priv mode again | ||
619 | |||
620 | LRESVEC_block_end: /* Marker. Unused. */ | ||
621 | |||
622 | .balign TEXT_SIZE | ||
623 | |||
624 | /* | ||
625 | * Second level handler for VBR-based exceptions. Pre-handler. | ||
626 | * In common to all stack-frame sensitive handlers. | ||
627 | * | ||
628 | * Inputs: | ||
629 | * (KCR0) Current [current task union] | ||
630 | * (KCR1) Original SP | ||
631 | * (r2) INTEVT/EXPEVT | ||
632 | * (r3) appropriate return address | ||
633 | * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug) | ||
634 | * (r5) Pointer to reg_save_area | ||
635 | * (SP) Original SP | ||
636 | * | ||
637 | * Available registers: | ||
638 | * (r6) | ||
639 | * (r18) | ||
640 | * (tr0) | ||
641 | * | ||
642 | */ | ||
643 | handle_exception: | ||
644 | /* Common 2nd level handler. */ | ||
645 | |||
646 | /* First thing we need an appropriate stack pointer */ | ||
647 | getcon SSR, r6 | ||
648 | shlri r6, 30, r6 | ||
649 | andi r6, 1, r6 | ||
650 | pta stack_ok, tr0 | ||
651 | bne r6, ZERO, tr0 /* Original stack pointer is fine */ | ||
652 | |||
653 | /* Set stack pointer for user fault */ | ||
654 | getcon KCR0, SP | ||
655 | movi THREAD_SIZE, r6 /* Point to the end */ | ||
656 | add SP, r6, SP | ||
657 | |||
658 | stack_ok: | ||
659 | |||
660 | /* DEBUG : check for underflow/overflow of the kernel stack */ | ||
661 | pta no_underflow, tr0 | ||
662 | getcon KCR0, r6 | ||
663 | movi 1024, r18 | ||
664 | add r6, r18, r6 | ||
665 | bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone | ||
666 | |||
667 | /* Just panic to cause a crash. */ | ||
668 | bad_sp: | ||
669 | ld.b r63, 0, r6 | ||
670 | nop | ||
671 | |||
672 | no_underflow: | ||
673 | pta bad_sp, tr0 | ||
674 | getcon kcr0, r6 | ||
675 | movi THREAD_SIZE, r18 | ||
676 | add r18, r6, r6 | ||
677 | bgt SP, r6, tr0 ! sp above the stack | ||
678 | |||
679 | /* Make some room for the BASIC frame. */ | ||
680 | movi -(FRAME_SIZE), r6 | ||
681 | add SP, r6, SP | ||
682 | |||
683 | /* Could do this with no stalling if we had another spare register, but the | ||
684 | code below will be OK. */ | ||
685 | ld.q r5, SAVED_R2, r6 | ||
686 | ld.q r5, SAVED_R3, r18 | ||
687 | st.q SP, FRAME_R(2), r6 | ||
688 | ld.q r5, SAVED_R4, r6 | ||
689 | st.q SP, FRAME_R(3), r18 | ||
690 | ld.q r5, SAVED_R5, r18 | ||
691 | st.q SP, FRAME_R(4), r6 | ||
692 | ld.q r5, SAVED_R6, r6 | ||
693 | st.q SP, FRAME_R(5), r18 | ||
694 | ld.q r5, SAVED_R18, r18 | ||
695 | st.q SP, FRAME_R(6), r6 | ||
696 | ld.q r5, SAVED_TR0, r6 | ||
697 | st.q SP, FRAME_R(18), r18 | ||
698 | st.q SP, FRAME_T(0), r6 | ||
699 | |||
700 | /* Keep old SP around */ | ||
701 | getcon KCR1, r6 | ||
702 | |||
703 | /* Save the rest of the general purpose registers */ | ||
704 | st.q SP, FRAME_R(0), r0 | ||
705 | st.q SP, FRAME_R(1), r1 | ||
706 | st.q SP, FRAME_R(7), r7 | ||
707 | st.q SP, FRAME_R(8), r8 | ||
708 | st.q SP, FRAME_R(9), r9 | ||
709 | st.q SP, FRAME_R(10), r10 | ||
710 | st.q SP, FRAME_R(11), r11 | ||
711 | st.q SP, FRAME_R(12), r12 | ||
712 | st.q SP, FRAME_R(13), r13 | ||
713 | st.q SP, FRAME_R(14), r14 | ||
714 | |||
715 | /* SP is somewhere else */ | ||
716 | st.q SP, FRAME_R(15), r6 | ||
717 | |||
718 | st.q SP, FRAME_R(16), r16 | ||
719 | st.q SP, FRAME_R(17), r17 | ||
720 | /* r18 is saved earlier. */ | ||
721 | st.q SP, FRAME_R(19), r19 | ||
722 | st.q SP, FRAME_R(20), r20 | ||
723 | st.q SP, FRAME_R(21), r21 | ||
724 | st.q SP, FRAME_R(22), r22 | ||
725 | st.q SP, FRAME_R(23), r23 | ||
726 | st.q SP, FRAME_R(24), r24 | ||
727 | st.q SP, FRAME_R(25), r25 | ||
728 | st.q SP, FRAME_R(26), r26 | ||
729 | st.q SP, FRAME_R(27), r27 | ||
730 | st.q SP, FRAME_R(28), r28 | ||
731 | st.q SP, FRAME_R(29), r29 | ||
732 | st.q SP, FRAME_R(30), r30 | ||
733 | st.q SP, FRAME_R(31), r31 | ||
734 | st.q SP, FRAME_R(32), r32 | ||
735 | st.q SP, FRAME_R(33), r33 | ||
736 | st.q SP, FRAME_R(34), r34 | ||
737 | st.q SP, FRAME_R(35), r35 | ||
738 | st.q SP, FRAME_R(36), r36 | ||
739 | st.q SP, FRAME_R(37), r37 | ||
740 | st.q SP, FRAME_R(38), r38 | ||
741 | st.q SP, FRAME_R(39), r39 | ||
742 | st.q SP, FRAME_R(40), r40 | ||
743 | st.q SP, FRAME_R(41), r41 | ||
744 | st.q SP, FRAME_R(42), r42 | ||
745 | st.q SP, FRAME_R(43), r43 | ||
746 | st.q SP, FRAME_R(44), r44 | ||
747 | st.q SP, FRAME_R(45), r45 | ||
748 | st.q SP, FRAME_R(46), r46 | ||
749 | st.q SP, FRAME_R(47), r47 | ||
750 | st.q SP, FRAME_R(48), r48 | ||
751 | st.q SP, FRAME_R(49), r49 | ||
752 | st.q SP, FRAME_R(50), r50 | ||
753 | st.q SP, FRAME_R(51), r51 | ||
754 | st.q SP, FRAME_R(52), r52 | ||
755 | st.q SP, FRAME_R(53), r53 | ||
756 | st.q SP, FRAME_R(54), r54 | ||
757 | st.q SP, FRAME_R(55), r55 | ||
758 | st.q SP, FRAME_R(56), r56 | ||
759 | st.q SP, FRAME_R(57), r57 | ||
760 | st.q SP, FRAME_R(58), r58 | ||
761 | st.q SP, FRAME_R(59), r59 | ||
762 | st.q SP, FRAME_R(60), r60 | ||
763 | st.q SP, FRAME_R(61), r61 | ||
764 | st.q SP, FRAME_R(62), r62 | ||
765 | |||
766 | /* | ||
767 | * Save the S* registers. | ||
768 | */ | ||
769 | getcon SSR, r61 | ||
770 | st.q SP, FRAME_S(FSSR), r61 | ||
771 | getcon SPC, r62 | ||
772 | st.q SP, FRAME_S(FSPC), r62 | ||
773 | movi -1, r62 /* Reset syscall_nr */ | ||
774 | st.q SP, FRAME_S(FSYSCALL_ID), r62 | ||
775 | |||
776 | /* Save the rest of the target registers */ | ||
777 | gettr tr1, r6 | ||
778 | st.q SP, FRAME_T(1), r6 | ||
779 | gettr tr2, r6 | ||
780 | st.q SP, FRAME_T(2), r6 | ||
781 | gettr tr3, r6 | ||
782 | st.q SP, FRAME_T(3), r6 | ||
783 | gettr tr4, r6 | ||
784 | st.q SP, FRAME_T(4), r6 | ||
785 | gettr tr5, r6 | ||
786 | st.q SP, FRAME_T(5), r6 | ||
787 | gettr tr6, r6 | ||
788 | st.q SP, FRAME_T(6), r6 | ||
789 | gettr tr7, r6 | ||
790 | st.q SP, FRAME_T(7), r6 | ||
791 | |||
792 | ! setup FP so that unwinder can wind back through nested kernel mode | ||
793 | ! exceptions | ||
794 | add SP, ZERO, r14 | ||
795 | |||
796 | #ifdef CONFIG_POOR_MANS_STRACE | ||
797 | /* We've pushed all the registers now, so only r2-r4 hold anything | ||
798 | * useful. Move them into callee save registers */ | ||
799 | or r2, ZERO, r28 | ||
800 | or r3, ZERO, r29 | ||
801 | or r4, ZERO, r30 | ||
802 | |||
803 | /* Preserve r2 as the event code */ | ||
804 | movi evt_debug, r3 | ||
805 | ori r3, 1, r3 | ||
806 | ptabs r3, tr0 | ||
807 | |||
808 | or SP, ZERO, r6 | ||
809 | getcon TRA, r5 | ||
810 | blink tr0, LINK | ||
811 | |||
812 | or r28, ZERO, r2 | ||
813 | or r29, ZERO, r3 | ||
814 | or r30, ZERO, r4 | ||
815 | #endif | ||
816 | |||
817 | /* For syscall and debug race condition, get TRA now */ | ||
818 | getcon TRA, r5 | ||
819 | |||
820 | /* We are in a safe position to turn SR.BL off, but set IMASK=0xf | ||
821 | * Also set FD, to catch FPU usage in the kernel. | ||
822 | * | ||
823 | * benedict.gaster@superh.com 29/07/2002 | ||
824 | * | ||
825 | * On all SH5-101 revisions it is unsafe to raise the IMASK and at the | ||
826 | * same time change BL from 1->0, as any pending interrupt of a level | ||
827 | * higher than he previous value of IMASK will leak through and be | ||
828 | * taken unexpectedly. | ||
829 | * | ||
830 | * To avoid this we raise the IMASK and then issue another PUTCON to | ||
831 | * enable interrupts. | ||
832 | */ | ||
833 | getcon SR, r6 | ||
834 | movi SR_IMASK | SR_FD, r7 | ||
835 | or r6, r7, r6 | ||
836 | putcon r6, SR | ||
837 | movi SR_UNBLOCK_EXC, r7 | ||
838 | and r6, r7, r6 | ||
839 | putcon r6, SR | ||
840 | |||
841 | |||
842 | /* Now call the appropriate 3rd level handler */ | ||
843 | or r3, ZERO, LINK | ||
844 | movi trap_jtable, r3 | ||
845 | shlri r2, 3, r2 | ||
846 | ldx.l r2, r3, r3 | ||
847 | shlri r2, 2, r2 | ||
848 | ptabs r3, tr0 | ||
849 | or SP, ZERO, r3 | ||
850 | blink tr0, ZERO | ||
851 | |||
852 | /* | ||
853 | * Second level handler for VBR-based exceptions. Post-handlers. | ||
854 | * | ||
855 | * Post-handlers for interrupts (ret_from_irq), exceptions | ||
856 | * (ret_from_exception) and common reentrance doors (restore_all | ||
857 | * to get back to the original context, ret_from_syscall loop to | ||
858 | * check kernel exiting). | ||
859 | * | ||
860 | * ret_with_reschedule and work_notifysig are an inner lables of | ||
861 | * the ret_from_syscall loop. | ||
862 | * | ||
863 | * In common to all stack-frame sensitive handlers. | ||
864 | * | ||
865 | * Inputs: | ||
866 | * (SP) struct pt_regs *, original register's frame pointer (basic) | ||
867 | * | ||
868 | */ | ||
869 | .global ret_from_irq | ||
870 | ret_from_irq: | ||
871 | #ifdef CONFIG_POOR_MANS_STRACE | ||
872 | pta evt_debug_ret_from_irq, tr0 | ||
873 | ori SP, 0, r2 | ||
874 | blink tr0, LINK | ||
875 | #endif | ||
876 | ld.q SP, FRAME_S(FSSR), r6 | ||
877 | shlri r6, 30, r6 | ||
878 | andi r6, 1, r6 | ||
879 | pta resume_kernel, tr0 | ||
880 | bne r6, ZERO, tr0 /* no further checks */ | ||
881 | STI() | ||
882 | pta ret_with_reschedule, tr0 | ||
883 | blink tr0, ZERO /* Do not check softirqs */ | ||
884 | |||
885 | .global ret_from_exception | ||
886 | ret_from_exception: | ||
887 | preempt_stop() | ||
888 | |||
889 | #ifdef CONFIG_POOR_MANS_STRACE | ||
890 | pta evt_debug_ret_from_exc, tr0 | ||
891 | ori SP, 0, r2 | ||
892 | blink tr0, LINK | ||
893 | #endif | ||
894 | |||
895 | ld.q SP, FRAME_S(FSSR), r6 | ||
896 | shlri r6, 30, r6 | ||
897 | andi r6, 1, r6 | ||
898 | pta resume_kernel, tr0 | ||
899 | bne r6, ZERO, tr0 /* no further checks */ | ||
900 | |||
901 | /* Check softirqs */ | ||
902 | |||
903 | #ifdef CONFIG_PREEMPT | ||
904 | pta ret_from_syscall, tr0 | ||
905 | blink tr0, ZERO | ||
906 | |||
907 | resume_kernel: | ||
908 | pta restore_all, tr0 | ||
909 | |||
910 | getcon KCR0, r6 | ||
911 | ld.l r6, TI_PRE_COUNT, r7 | ||
912 | beq/u r7, ZERO, tr0 | ||
913 | |||
914 | need_resched: | ||
915 | ld.l r6, TI_FLAGS, r7 | ||
916 | movi (1 << TIF_NEED_RESCHED), r8 | ||
917 | and r8, r7, r8 | ||
918 | bne r8, ZERO, tr0 | ||
919 | |||
920 | getcon SR, r7 | ||
921 | andi r7, 0xf0, r7 | ||
922 | bne r7, ZERO, tr0 | ||
923 | |||
924 | movi ((PREEMPT_ACTIVE >> 16) & 65535), r8 | ||
925 | shori (PREEMPT_ACTIVE & 65535), r8 | ||
926 | st.l r6, TI_PRE_COUNT, r8 | ||
927 | |||
928 | STI() | ||
929 | movi schedule, r7 | ||
930 | ori r7, 1, r7 | ||
931 | ptabs r7, tr1 | ||
932 | blink tr1, LINK | ||
933 | |||
934 | st.l r6, TI_PRE_COUNT, ZERO | ||
935 | CLI() | ||
936 | |||
937 | pta need_resched, tr1 | ||
938 | blink tr1, ZERO | ||
939 | #endif | ||
940 | |||
941 | .global ret_from_syscall | ||
942 | ret_from_syscall: | ||
943 | |||
944 | ret_with_reschedule: | ||
945 | getcon KCR0, r6 ! r6 contains current_thread_info | ||
946 | ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags | ||
947 | |||
948 | ! FIXME:!!! | ||
949 | ! no handling of TIF_SYSCALL_TRACE yet!! | ||
950 | |||
951 | movi (1 << TIF_NEED_RESCHED), r8 | ||
952 | and r8, r7, r8 | ||
953 | pta work_resched, tr0 | ||
954 | bne r8, ZERO, tr0 | ||
955 | |||
956 | pta restore_all, tr1 | ||
957 | |||
958 | movi (1 << TIF_SIGPENDING), r8 | ||
959 | and r8, r7, r8 | ||
960 | pta work_notifysig, tr0 | ||
961 | bne r8, ZERO, tr0 | ||
962 | |||
963 | blink tr1, ZERO | ||
964 | |||
965 | work_resched: | ||
966 | pta ret_from_syscall, tr0 | ||
967 | gettr tr0, LINK | ||
968 | movi schedule, r6 | ||
969 | ptabs r6, tr0 | ||
970 | blink tr0, ZERO /* Call schedule(), return on top */ | ||
971 | |||
972 | work_notifysig: | ||
973 | gettr tr1, LINK | ||
974 | |||
975 | movi do_signal, r6 | ||
976 | ptabs r6, tr0 | ||
977 | or SP, ZERO, r2 | ||
978 | or ZERO, ZERO, r3 | ||
979 | blink tr0, LINK /* Call do_signal(regs, 0), return here */ | ||
980 | |||
981 | restore_all: | ||
982 | /* Do prefetches */ | ||
983 | |||
984 | ld.q SP, FRAME_T(0), r6 | ||
985 | ld.q SP, FRAME_T(1), r7 | ||
986 | ld.q SP, FRAME_T(2), r8 | ||
987 | ld.q SP, FRAME_T(3), r9 | ||
988 | ptabs r6, tr0 | ||
989 | ptabs r7, tr1 | ||
990 | ptabs r8, tr2 | ||
991 | ptabs r9, tr3 | ||
992 | ld.q SP, FRAME_T(4), r6 | ||
993 | ld.q SP, FRAME_T(5), r7 | ||
994 | ld.q SP, FRAME_T(6), r8 | ||
995 | ld.q SP, FRAME_T(7), r9 | ||
996 | ptabs r6, tr4 | ||
997 | ptabs r7, tr5 | ||
998 | ptabs r8, tr6 | ||
999 | ptabs r9, tr7 | ||
1000 | |||
1001 | ld.q SP, FRAME_R(0), r0 | ||
1002 | ld.q SP, FRAME_R(1), r1 | ||
1003 | ld.q SP, FRAME_R(2), r2 | ||
1004 | ld.q SP, FRAME_R(3), r3 | ||
1005 | ld.q SP, FRAME_R(4), r4 | ||
1006 | ld.q SP, FRAME_R(5), r5 | ||
1007 | ld.q SP, FRAME_R(6), r6 | ||
1008 | ld.q SP, FRAME_R(7), r7 | ||
1009 | ld.q SP, FRAME_R(8), r8 | ||
1010 | ld.q SP, FRAME_R(9), r9 | ||
1011 | ld.q SP, FRAME_R(10), r10 | ||
1012 | ld.q SP, FRAME_R(11), r11 | ||
1013 | ld.q SP, FRAME_R(12), r12 | ||
1014 | ld.q SP, FRAME_R(13), r13 | ||
1015 | ld.q SP, FRAME_R(14), r14 | ||
1016 | |||
1017 | ld.q SP, FRAME_R(16), r16 | ||
1018 | ld.q SP, FRAME_R(17), r17 | ||
1019 | ld.q SP, FRAME_R(18), r18 | ||
1020 | ld.q SP, FRAME_R(19), r19 | ||
1021 | ld.q SP, FRAME_R(20), r20 | ||
1022 | ld.q SP, FRAME_R(21), r21 | ||
1023 | ld.q SP, FRAME_R(22), r22 | ||
1024 | ld.q SP, FRAME_R(23), r23 | ||
1025 | ld.q SP, FRAME_R(24), r24 | ||
1026 | ld.q SP, FRAME_R(25), r25 | ||
1027 | ld.q SP, FRAME_R(26), r26 | ||
1028 | ld.q SP, FRAME_R(27), r27 | ||
1029 | ld.q SP, FRAME_R(28), r28 | ||
1030 | ld.q SP, FRAME_R(29), r29 | ||
1031 | ld.q SP, FRAME_R(30), r30 | ||
1032 | ld.q SP, FRAME_R(31), r31 | ||
1033 | ld.q SP, FRAME_R(32), r32 | ||
1034 | ld.q SP, FRAME_R(33), r33 | ||
1035 | ld.q SP, FRAME_R(34), r34 | ||
1036 | ld.q SP, FRAME_R(35), r35 | ||
1037 | ld.q SP, FRAME_R(36), r36 | ||
1038 | ld.q SP, FRAME_R(37), r37 | ||
1039 | ld.q SP, FRAME_R(38), r38 | ||
1040 | ld.q SP, FRAME_R(39), r39 | ||
1041 | ld.q SP, FRAME_R(40), r40 | ||
1042 | ld.q SP, FRAME_R(41), r41 | ||
1043 | ld.q SP, FRAME_R(42), r42 | ||
1044 | ld.q SP, FRAME_R(43), r43 | ||
1045 | ld.q SP, FRAME_R(44), r44 | ||
1046 | ld.q SP, FRAME_R(45), r45 | ||
1047 | ld.q SP, FRAME_R(46), r46 | ||
1048 | ld.q SP, FRAME_R(47), r47 | ||
1049 | ld.q SP, FRAME_R(48), r48 | ||
1050 | ld.q SP, FRAME_R(49), r49 | ||
1051 | ld.q SP, FRAME_R(50), r50 | ||
1052 | ld.q SP, FRAME_R(51), r51 | ||
1053 | ld.q SP, FRAME_R(52), r52 | ||
1054 | ld.q SP, FRAME_R(53), r53 | ||
1055 | ld.q SP, FRAME_R(54), r54 | ||
1056 | ld.q SP, FRAME_R(55), r55 | ||
1057 | ld.q SP, FRAME_R(56), r56 | ||
1058 | ld.q SP, FRAME_R(57), r57 | ||
1059 | ld.q SP, FRAME_R(58), r58 | ||
1060 | |||
1061 | getcon SR, r59 | ||
1062 | movi SR_BLOCK_EXC, r60 | ||
1063 | or r59, r60, r59 | ||
1064 | putcon r59, SR /* SR.BL = 1, keep nesting out */ | ||
1065 | ld.q SP, FRAME_S(FSSR), r61 | ||
1066 | ld.q SP, FRAME_S(FSPC), r62 | ||
1067 | movi SR_ASID_MASK, r60 | ||
1068 | and r59, r60, r59 | ||
1069 | andc r61, r60, r61 /* Clear out older ASID */ | ||
1070 | or r59, r61, r61 /* Retain current ASID */ | ||
1071 | putcon r61, SSR | ||
1072 | putcon r62, SPC | ||
1073 | |||
1074 | /* Ignore FSYSCALL_ID */ | ||
1075 | |||
1076 | ld.q SP, FRAME_R(59), r59 | ||
1077 | ld.q SP, FRAME_R(60), r60 | ||
1078 | ld.q SP, FRAME_R(61), r61 | ||
1079 | ld.q SP, FRAME_R(62), r62 | ||
1080 | |||
1081 | /* Last touch */ | ||
1082 | ld.q SP, FRAME_R(15), SP | ||
1083 | rte | ||
1084 | nop | ||
1085 | |||
1086 | /* | ||
1087 | * Third level handlers for VBR-based exceptions. Adapting args to | ||
1088 | * and/or deflecting to fourth level handlers. | ||
1089 | * | ||
1090 | * Fourth level handlers interface. | ||
1091 | * Most are C-coded handlers directly pointed by the trap_jtable. | ||
1092 | * (Third = Fourth level) | ||
1093 | * Inputs: | ||
1094 | * (r2) fault/interrupt code, entry number (e.g. NMI = 14, | ||
1095 | * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...) | ||
1096 | * (r3) struct pt_regs *, original register's frame pointer | ||
1097 | * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault) | ||
1098 | * (r5) TRA control register (for syscall/debug benefit only) | ||
1099 | * (LINK) return address | ||
1100 | * (SP) = r3 | ||
1101 | * | ||
1102 | * Kernel TLB fault handlers will get a slightly different interface. | ||
1103 | * (r2) struct pt_regs *, original register's frame pointer | ||
1104 | * (r3) writeaccess, whether it's a store fault as opposed to load fault | ||
1105 | * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault | ||
1106 | * (r5) Effective Address of fault | ||
1107 | * (LINK) return address | ||
1108 | * (SP) = r2 | ||
1109 | * | ||
1110 | * fpu_error_or_IRQ? is a helper to deflect to the right cause. | ||
1111 | * | ||
1112 | */ | ||
1113 | tlb_miss_load: | ||
1114 | or SP, ZERO, r2 | ||
1115 | or ZERO, ZERO, r3 /* Read */ | ||
1116 | or ZERO, ZERO, r4 /* Data */ | ||
1117 | getcon TEA, r5 | ||
1118 | pta call_do_page_fault, tr0 | ||
1119 | beq ZERO, ZERO, tr0 | ||
1120 | |||
1121 | tlb_miss_store: | ||
1122 | or SP, ZERO, r2 | ||
1123 | movi 1, r3 /* Write */ | ||
1124 | or ZERO, ZERO, r4 /* Data */ | ||
1125 | getcon TEA, r5 | ||
1126 | pta call_do_page_fault, tr0 | ||
1127 | beq ZERO, ZERO, tr0 | ||
1128 | |||
1129 | itlb_miss_or_IRQ: | ||
1130 | pta its_IRQ, tr0 | ||
1131 | beqi/u r4, EVENT_INTERRUPT, tr0 | ||
1132 | or SP, ZERO, r2 | ||
1133 | or ZERO, ZERO, r3 /* Read */ | ||
1134 | movi 1, r4 /* Text */ | ||
1135 | getcon TEA, r5 | ||
1136 | /* Fall through */ | ||
1137 | |||
1138 | call_do_page_fault: | ||
1139 | movi do_page_fault, r6 | ||
1140 | ptabs r6, tr0 | ||
1141 | blink tr0, ZERO | ||
1142 | |||
1143 | fpu_error_or_IRQA: | ||
1144 | pta its_IRQ, tr0 | ||
1145 | beqi/l r4, EVENT_INTERRUPT, tr0 | ||
1146 | #ifdef CONFIG_SH_FPU | ||
1147 | movi do_fpu_state_restore, r6 | ||
1148 | #else | ||
1149 | movi do_exception_error, r6 | ||
1150 | #endif | ||
1151 | ptabs r6, tr0 | ||
1152 | blink tr0, ZERO | ||
1153 | |||
1154 | fpu_error_or_IRQB: | ||
1155 | pta its_IRQ, tr0 | ||
1156 | beqi/l r4, EVENT_INTERRUPT, tr0 | ||
1157 | #ifdef CONFIG_SH_FPU | ||
1158 | movi do_fpu_state_restore, r6 | ||
1159 | #else | ||
1160 | movi do_exception_error, r6 | ||
1161 | #endif | ||
1162 | ptabs r6, tr0 | ||
1163 | blink tr0, ZERO | ||
1164 | |||
1165 | its_IRQ: | ||
1166 | movi do_IRQ, r6 | ||
1167 | ptabs r6, tr0 | ||
1168 | blink tr0, ZERO | ||
1169 | |||
1170 | /* | ||
1171 | * system_call/unknown_trap third level handler: | ||
1172 | * | ||
1173 | * Inputs: | ||
1174 | * (r2) fault/interrupt code, entry number (TRAP = 11) | ||
1175 | * (r3) struct pt_regs *, original register's frame pointer | ||
1176 | * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault) | ||
1177 | * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr) | ||
1178 | * (SP) = r3 | ||
1179 | * (LINK) return address: ret_from_exception | ||
1180 | * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7) | ||
1181 | * | ||
1182 | * Outputs: | ||
1183 | * (*r3) Syscall reply (Saved r2) | ||
1184 | * (LINK) In case of syscall only it can be scrapped. | ||
1185 | * Common second level post handler will be ret_from_syscall. | ||
1186 | * Common (non-trace) exit point to that is syscall_ret (saving | ||
1187 | * result to r2). Common bad exit point is syscall_bad (returning | ||
1188 | * ENOSYS then saved to r2). | ||
1189 | * | ||
1190 | */ | ||
1191 | |||
1192 | unknown_trap: | ||
1193 | /* Unknown Trap or User Trace */ | ||
1194 | movi do_unknown_trapa, r6 | ||
1195 | ptabs r6, tr0 | ||
1196 | ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */ | ||
1197 | andi r2, 0x1ff, r2 /* r2 = syscall # */ | ||
1198 | blink tr0, LINK | ||
1199 | |||
1200 | pta syscall_ret, tr0 | ||
1201 | blink tr0, ZERO | ||
1202 | |||
1203 | /* New syscall implementation*/ | ||
1204 | system_call: | ||
1205 | pta unknown_trap, tr0 | ||
1206 | or r5, ZERO, r4 /* TRA (=r5) -> r4 */ | ||
1207 | shlri r4, 20, r4 | ||
1208 | bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */ | ||
1209 | |||
1210 | /* It's a system call */ | ||
1211 | st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */ | ||
1212 | andi r5, 0x1ff, r5 /* syscall # -> r5 */ | ||
1213 | |||
1214 | STI() | ||
1215 | |||
1216 | pta syscall_allowed, tr0 | ||
1217 | movi NR_syscalls - 1, r4 /* Last valid */ | ||
1218 | bgeu/l r4, r5, tr0 | ||
1219 | |||
1220 | syscall_bad: | ||
1221 | /* Return ENOSYS ! */ | ||
1222 | movi -(ENOSYS), r2 /* Fall-through */ | ||
1223 | |||
1224 | .global syscall_ret | ||
1225 | syscall_ret: | ||
1226 | st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */ | ||
1227 | |||
1228 | #ifdef CONFIG_POOR_MANS_STRACE | ||
1229 | /* nothing useful in registers at this point */ | ||
1230 | |||
1231 | movi evt_debug2, r5 | ||
1232 | ori r5, 1, r5 | ||
1233 | ptabs r5, tr0 | ||
1234 | ld.q SP, FRAME_R(9), r2 | ||
1235 | or SP, ZERO, r3 | ||
1236 | blink tr0, LINK | ||
1237 | #endif | ||
1238 | |||
1239 | ld.q SP, FRAME_S(FSPC), r2 | ||
1240 | addi r2, 4, r2 /* Move PC, being pre-execution event */ | ||
1241 | st.q SP, FRAME_S(FSPC), r2 | ||
1242 | pta ret_from_syscall, tr0 | ||
1243 | blink tr0, ZERO | ||
1244 | |||
1245 | |||
1246 | /* A different return path for ret_from_fork, because we now need | ||
1247 | * to call schedule_tail with the later kernels. Because prev is | ||
1248 | * loaded into r2 by switch_to() means we can just call it straight away | ||
1249 | */ | ||
1250 | |||
1251 | .global ret_from_fork | ||
1252 | ret_from_fork: | ||
1253 | |||
1254 | movi schedule_tail,r5 | ||
1255 | ori r5, 1, r5 | ||
1256 | ptabs r5, tr0 | ||
1257 | blink tr0, LINK | ||
1258 | |||
1259 | #ifdef CONFIG_POOR_MANS_STRACE | ||
1260 | /* nothing useful in registers at this point */ | ||
1261 | |||
1262 | movi evt_debug2, r5 | ||
1263 | ori r5, 1, r5 | ||
1264 | ptabs r5, tr0 | ||
1265 | ld.q SP, FRAME_R(9), r2 | ||
1266 | or SP, ZERO, r3 | ||
1267 | blink tr0, LINK | ||
1268 | #endif | ||
1269 | |||
1270 | ld.q SP, FRAME_S(FSPC), r2 | ||
1271 | addi r2, 4, r2 /* Move PC, being pre-execution event */ | ||
1272 | st.q SP, FRAME_S(FSPC), r2 | ||
1273 | pta ret_from_syscall, tr0 | ||
1274 | blink tr0, ZERO | ||
1275 | |||
1276 | |||
1277 | |||
1278 | syscall_allowed: | ||
1279 | /* Use LINK to deflect the exit point, default is syscall_ret */ | ||
1280 | pta syscall_ret, tr0 | ||
1281 | gettr tr0, LINK | ||
1282 | pta syscall_notrace, tr0 | ||
1283 | |||
1284 | getcon KCR0, r2 | ||
1285 | ld.l r2, TI_FLAGS, r4 | ||
1286 | movi (1 << TIF_SYSCALL_TRACE), r6 | ||
1287 | and r6, r4, r6 | ||
1288 | beq/l r6, ZERO, tr0 | ||
1289 | |||
1290 | /* Trace it by calling syscall_trace before and after */ | ||
1291 | movi syscall_trace, r4 | ||
1292 | ptabs r4, tr0 | ||
1293 | blink tr0, LINK | ||
1294 | /* Reload syscall number as r5 is trashed by syscall_trace */ | ||
1295 | ld.q SP, FRAME_S(FSYSCALL_ID), r5 | ||
1296 | andi r5, 0x1ff, r5 | ||
1297 | |||
1298 | pta syscall_ret_trace, tr0 | ||
1299 | gettr tr0, LINK | ||
1300 | |||
1301 | syscall_notrace: | ||
1302 | /* Now point to the appropriate 4th level syscall handler */ | ||
1303 | movi sys_call_table, r4 | ||
1304 | shlli r5, 2, r5 | ||
1305 | ldx.l r4, r5, r5 | ||
1306 | ptabs r5, tr0 | ||
1307 | |||
1308 | /* Prepare original args */ | ||
1309 | ld.q SP, FRAME_R(2), r2 | ||
1310 | ld.q SP, FRAME_R(3), r3 | ||
1311 | ld.q SP, FRAME_R(4), r4 | ||
1312 | ld.q SP, FRAME_R(5), r5 | ||
1313 | ld.q SP, FRAME_R(6), r6 | ||
1314 | ld.q SP, FRAME_R(7), r7 | ||
1315 | |||
1316 | /* And now the trick for those syscalls requiring regs * ! */ | ||
1317 | or SP, ZERO, r8 | ||
1318 | |||
1319 | /* Call it */ | ||
1320 | blink tr0, ZERO /* LINK is already properly set */ | ||
1321 | |||
1322 | syscall_ret_trace: | ||
1323 | /* We get back here only if under trace */ | ||
1324 | st.q SP, FRAME_R(9), r2 /* Save return value */ | ||
1325 | |||
1326 | movi syscall_trace, LINK | ||
1327 | ptabs LINK, tr0 | ||
1328 | blink tr0, LINK | ||
1329 | |||
1330 | /* This needs to be done after any syscall tracing */ | ||
1331 | ld.q SP, FRAME_S(FSPC), r2 | ||
1332 | addi r2, 4, r2 /* Move PC, being pre-execution event */ | ||
1333 | st.q SP, FRAME_S(FSPC), r2 | ||
1334 | |||
1335 | pta ret_from_syscall, tr0 | ||
1336 | blink tr0, ZERO /* Resume normal return sequence */ | ||
1337 | |||
1338 | /* | ||
1339 | * --- Switch to running under a particular ASID and return the previous ASID value | ||
1340 | * --- The caller is assumed to have done a cli before calling this. | ||
1341 | * | ||
1342 | * Input r2 : new ASID | ||
1343 | * Output r2 : old ASID | ||
1344 | */ | ||
1345 | |||
1346 | .global switch_and_save_asid | ||
1347 | switch_and_save_asid: | ||
1348 | getcon sr, r0 | ||
1349 | movi 255, r4 | ||
1350 | shlli r4, 16, r4 /* r4 = mask to select ASID */ | ||
1351 | and r0, r4, r3 /* r3 = shifted old ASID */ | ||
1352 | andi r2, 255, r2 /* mask down new ASID */ | ||
1353 | shlli r2, 16, r2 /* align new ASID against SR.ASID */ | ||
1354 | andc r0, r4, r0 /* efface old ASID from SR */ | ||
1355 | or r0, r2, r0 /* insert the new ASID */ | ||
1356 | putcon r0, ssr | ||
1357 | movi 1f, r0 | ||
1358 | putcon r0, spc | ||
1359 | rte | ||
1360 | nop | ||
1361 | 1: | ||
1362 | ptabs LINK, tr0 | ||
1363 | shlri r3, 16, r2 /* r2 = old ASID */ | ||
1364 | blink tr0, r63 | ||
1365 | |||
1366 | .global route_to_panic_handler | ||
1367 | route_to_panic_handler: | ||
1368 | /* Switch to real mode, goto panic_handler, don't return. Useful for | ||
1369 | last-chance debugging, e.g. if no output wants to go to the console. | ||
1370 | */ | ||
1371 | |||
1372 | movi panic_handler - CONFIG_CACHED_MEMORY_OFFSET, r1 | ||
1373 | ptabs r1, tr0 | ||
1374 | pta 1f, tr1 | ||
1375 | gettr tr1, r0 | ||
1376 | putcon r0, spc | ||
1377 | getcon sr, r0 | ||
1378 | movi 1, r1 | ||
1379 | shlli r1, 31, r1 | ||
1380 | andc r0, r1, r0 | ||
1381 | putcon r0, ssr | ||
1382 | rte | ||
1383 | nop | ||
1384 | 1: /* Now in real mode */ | ||
1385 | blink tr0, r63 | ||
1386 | nop | ||
1387 | |||
1388 | .global peek_real_address_q | ||
1389 | peek_real_address_q: | ||
1390 | /* Two args: | ||
1391 | r2 : real mode address to peek | ||
1392 | r2(out) : result quadword | ||
1393 | |||
1394 | This is provided as a cheapskate way of manipulating device | ||
1395 | registers for debugging (to avoid the need to onchip_remap the debug | ||
1396 | module, and to avoid the need to onchip_remap the watchpoint | ||
1397 | controller in a way that identity maps sufficient bits to avoid the | ||
1398 | SH5-101 cut2 silicon defect). | ||
1399 | |||
1400 | This code is not performance critical | ||
1401 | */ | ||
1402 | |||
1403 | add.l r2, r63, r2 /* sign extend address */ | ||
1404 | getcon sr, r0 /* r0 = saved original SR */ | ||
1405 | movi 1, r1 | ||
1406 | shlli r1, 28, r1 | ||
1407 | or r0, r1, r1 /* r0 with block bit set */ | ||
1408 | putcon r1, sr /* now in critical section */ | ||
1409 | movi 1, r36 | ||
1410 | shlli r36, 31, r36 | ||
1411 | andc r1, r36, r1 /* turn sr.mmu off in real mode section */ | ||
1412 | |||
1413 | putcon r1, ssr | ||
1414 | movi .peek0 - CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */ | ||
1415 | movi 1f, r37 /* virtual mode return addr */ | ||
1416 | putcon r36, spc | ||
1417 | |||
1418 | synco | ||
1419 | rte | ||
1420 | nop | ||
1421 | |||
1422 | .peek0: /* come here in real mode, don't touch caches!! | ||
1423 | still in critical section (sr.bl==1) */ | ||
1424 | putcon r0, ssr | ||
1425 | putcon r37, spc | ||
1426 | /* Here's the actual peek. If the address is bad, all bets are now off | ||
1427 | * what will happen (handlers invoked in real-mode = bad news) */ | ||
1428 | ld.q r2, 0, r2 | ||
1429 | synco | ||
1430 | rte /* Back to virtual mode */ | ||
1431 | nop | ||
1432 | |||
1433 | 1: | ||
1434 | ptabs LINK, tr0 | ||
1435 | blink tr0, r63 | ||
1436 | |||
1437 | .global poke_real_address_q | ||
1438 | poke_real_address_q: | ||
1439 | /* Two args: | ||
1440 | r2 : real mode address to poke | ||
1441 | r3 : quadword value to write. | ||
1442 | |||
1443 | This is provided as a cheapskate way of manipulating device | ||
1444 | registers for debugging (to avoid the need to onchip_remap the debug | ||
1445 | module, and to avoid the need to onchip_remap the watchpoint | ||
1446 | controller in a way that identity maps sufficient bits to avoid the | ||
1447 | SH5-101 cut2 silicon defect). | ||
1448 | |||
1449 | This code is not performance critical | ||
1450 | */ | ||
1451 | |||
1452 | add.l r2, r63, r2 /* sign extend address */ | ||
1453 | getcon sr, r0 /* r0 = saved original SR */ | ||
1454 | movi 1, r1 | ||
1455 | shlli r1, 28, r1 | ||
1456 | or r0, r1, r1 /* r0 with block bit set */ | ||
1457 | putcon r1, sr /* now in critical section */ | ||
1458 | movi 1, r36 | ||
1459 | shlli r36, 31, r36 | ||
1460 | andc r1, r36, r1 /* turn sr.mmu off in real mode section */ | ||
1461 | |||
1462 | putcon r1, ssr | ||
1463 | movi .poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */ | ||
1464 | movi 1f, r37 /* virtual mode return addr */ | ||
1465 | putcon r36, spc | ||
1466 | |||
1467 | synco | ||
1468 | rte | ||
1469 | nop | ||
1470 | |||
1471 | .poke0: /* come here in real mode, don't touch caches!! | ||
1472 | still in critical section (sr.bl==1) */ | ||
1473 | putcon r0, ssr | ||
1474 | putcon r37, spc | ||
1475 | /* Here's the actual poke. If the address is bad, all bets are now off | ||
1476 | * what will happen (handlers invoked in real-mode = bad news) */ | ||
1477 | st.q r2, 0, r3 | ||
1478 | synco | ||
1479 | rte /* Back to virtual mode */ | ||
1480 | nop | ||
1481 | |||
1482 | 1: | ||
1483 | ptabs LINK, tr0 | ||
1484 | blink tr0, r63 | ||
1485 | |||
1486 | /* | ||
1487 | * --- User Access Handling Section | ||
1488 | */ | ||
1489 | |||
1490 | /* | ||
1491 | * User Access support. It all moved to non inlined Assembler | ||
1492 | * functions in here. | ||
1493 | * | ||
1494 | * __kernel_size_t __copy_user(void *__to, const void *__from, | ||
1495 | * __kernel_size_t __n) | ||
1496 | * | ||
1497 | * Inputs: | ||
1498 | * (r2) target address | ||
1499 | * (r3) source address | ||
1500 | * (r4) size in bytes | ||
1501 | * | ||
1502 | * Ouputs: | ||
1503 | * (*r2) target data | ||
1504 | * (r2) non-copied bytes | ||
1505 | * | ||
1506 | * If a fault occurs on the user pointer, bail out early and return the | ||
1507 | * number of bytes not copied in r2. | ||
1508 | * Strategy : for large blocks, call a real memcpy function which can | ||
1509 | * move >1 byte at a time using unaligned ld/st instructions, and can | ||
1510 | * manipulate the cache using prefetch + alloco to improve the speed | ||
1511 | * further. If a fault occurs in that function, just revert to the | ||
1512 | * byte-by-byte approach used for small blocks; this is rare so the | ||
1513 | * performance hit for that case does not matter. | ||
1514 | * | ||
1515 | * For small blocks it's not worth the overhead of setting up and calling | ||
1516 | * the memcpy routine; do the copy a byte at a time. | ||
1517 | * | ||
1518 | */ | ||
1519 | .global __copy_user | ||
1520 | __copy_user: | ||
1521 | pta __copy_user_byte_by_byte, tr1 | ||
1522 | movi 16, r0 ! this value is a best guess, should tune it by benchmarking | ||
1523 | bge/u r0, r4, tr1 | ||
1524 | pta copy_user_memcpy, tr0 | ||
1525 | addi SP, -32, SP | ||
1526 | /* Save arguments in case we have to fix-up unhandled page fault */ | ||
1527 | st.q SP, 0, r2 | ||
1528 | st.q SP, 8, r3 | ||
1529 | st.q SP, 16, r4 | ||
1530 | st.q SP, 24, r35 ! r35 is callee-save | ||
1531 | /* Save LINK in a register to reduce RTS time later (otherwise | ||
1532 | ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */ | ||
1533 | ori LINK, 0, r35 | ||
1534 | blink tr0, LINK | ||
1535 | |||
1536 | /* Copy completed normally if we get back here */ | ||
1537 | ptabs r35, tr0 | ||
1538 | ld.q SP, 24, r35 | ||
1539 | /* don't restore r2-r4, pointless */ | ||
1540 | /* set result=r2 to zero as the copy must have succeeded. */ | ||
1541 | or r63, r63, r2 | ||
1542 | addi SP, 32, SP | ||
1543 | blink tr0, r63 ! RTS | ||
1544 | |||
1545 | .global __copy_user_fixup | ||
1546 | __copy_user_fixup: | ||
1547 | /* Restore stack frame */ | ||
1548 | ori r35, 0, LINK | ||
1549 | ld.q SP, 24, r35 | ||
1550 | ld.q SP, 16, r4 | ||
1551 | ld.q SP, 8, r3 | ||
1552 | ld.q SP, 0, r2 | ||
1553 | addi SP, 32, SP | ||
1554 | /* Fall through to original code, in the 'same' state we entered with */ | ||
1555 | |||
1556 | /* The slow byte-by-byte method is used if the fast copy traps due to a bad | ||
1557 | user address. In that rare case, the speed drop can be tolerated. */ | ||
1558 | __copy_user_byte_by_byte: | ||
1559 | pta ___copy_user_exit, tr1 | ||
1560 | pta ___copy_user1, tr0 | ||
1561 | beq/u r4, r63, tr1 /* early exit for zero length copy */ | ||
1562 | sub r2, r3, r0 | ||
1563 | addi r0, -1, r0 | ||
1564 | |||
1565 | ___copy_user1: | ||
1566 | ld.b r3, 0, r5 /* Fault address 1 */ | ||
1567 | |||
1568 | /* Could rewrite this to use just 1 add, but the second comes 'free' | ||
1569 | due to load latency */ | ||
1570 | addi r3, 1, r3 | ||
1571 | addi r4, -1, r4 /* No real fixup required */ | ||
1572 | ___copy_user2: | ||
1573 | stx.b r3, r0, r5 /* Fault address 2 */ | ||
1574 | bne r4, ZERO, tr0 | ||
1575 | |||
1576 | ___copy_user_exit: | ||
1577 | or r4, ZERO, r2 | ||
1578 | ptabs LINK, tr0 | ||
1579 | blink tr0, ZERO | ||
1580 | |||
1581 | /* | ||
1582 | * __kernel_size_t __clear_user(void *addr, __kernel_size_t size) | ||
1583 | * | ||
1584 | * Inputs: | ||
1585 | * (r2) target address | ||
1586 | * (r3) size in bytes | ||
1587 | * | ||
1588 | * Ouputs: | ||
1589 | * (*r2) zero-ed target data | ||
1590 | * (r2) non-zero-ed bytes | ||
1591 | */ | ||
1592 | .global __clear_user | ||
1593 | __clear_user: | ||
1594 | pta ___clear_user_exit, tr1 | ||
1595 | pta ___clear_user1, tr0 | ||
1596 | beq/u r3, r63, tr1 | ||
1597 | |||
1598 | ___clear_user1: | ||
1599 | st.b r2, 0, ZERO /* Fault address */ | ||
1600 | addi r2, 1, r2 | ||
1601 | addi r3, -1, r3 /* No real fixup required */ | ||
1602 | bne r3, ZERO, tr0 | ||
1603 | |||
1604 | ___clear_user_exit: | ||
1605 | or r3, ZERO, r2 | ||
1606 | ptabs LINK, tr0 | ||
1607 | blink tr0, ZERO | ||
1608 | |||
1609 | |||
1610 | /* | ||
1611 | * int __strncpy_from_user(unsigned long __dest, unsigned long __src, | ||
1612 | * int __count) | ||
1613 | * | ||
1614 | * Inputs: | ||
1615 | * (r2) target address | ||
1616 | * (r3) source address | ||
1617 | * (r4) maximum size in bytes | ||
1618 | * | ||
1619 | * Ouputs: | ||
1620 | * (*r2) copied data | ||
1621 | * (r2) -EFAULT (in case of faulting) | ||
1622 | * copied data (otherwise) | ||
1623 | */ | ||
1624 | .global __strncpy_from_user | ||
1625 | __strncpy_from_user: | ||
1626 | pta ___strncpy_from_user1, tr0 | ||
1627 | pta ___strncpy_from_user_done, tr1 | ||
1628 | or r4, ZERO, r5 /* r5 = original count */ | ||
1629 | beq/u r4, r63, tr1 /* early exit if r4==0 */ | ||
1630 | movi -(EFAULT), r6 /* r6 = reply, no real fixup */ | ||
1631 | or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ | ||
1632 | |||
1633 | ___strncpy_from_user1: | ||
1634 | ld.b r3, 0, r7 /* Fault address: only in reading */ | ||
1635 | st.b r2, 0, r7 | ||
1636 | addi r2, 1, r2 | ||
1637 | addi r3, 1, r3 | ||
1638 | beq/u ZERO, r7, tr1 | ||
1639 | addi r4, -1, r4 /* return real number of copied bytes */ | ||
1640 | bne/l ZERO, r4, tr0 | ||
1641 | |||
1642 | ___strncpy_from_user_done: | ||
1643 | sub r5, r4, r6 /* If done, return copied */ | ||
1644 | |||
1645 | ___strncpy_from_user_exit: | ||
1646 | or r6, ZERO, r2 | ||
1647 | ptabs LINK, tr0 | ||
1648 | blink tr0, ZERO | ||
1649 | |||
1650 | /* | ||
1651 | * extern long __strnlen_user(const char *__s, long __n) | ||
1652 | * | ||
1653 | * Inputs: | ||
1654 | * (r2) source address | ||
1655 | * (r3) source size in bytes | ||
1656 | * | ||
1657 | * Ouputs: | ||
1658 | * (r2) -EFAULT (in case of faulting) | ||
1659 | * string length (otherwise) | ||
1660 | */ | ||
1661 | .global __strnlen_user | ||
1662 | __strnlen_user: | ||
1663 | pta ___strnlen_user_set_reply, tr0 | ||
1664 | pta ___strnlen_user1, tr1 | ||
1665 | or ZERO, ZERO, r5 /* r5 = counter */ | ||
1666 | movi -(EFAULT), r6 /* r6 = reply, no real fixup */ | ||
1667 | or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ | ||
1668 | beq r3, ZERO, tr0 | ||
1669 | |||
1670 | ___strnlen_user1: | ||
1671 | ldx.b r2, r5, r7 /* Fault address: only in reading */ | ||
1672 | addi r3, -1, r3 /* No real fixup */ | ||
1673 | addi r5, 1, r5 | ||
1674 | beq r3, ZERO, tr0 | ||
1675 | bne r7, ZERO, tr1 | ||
1676 | ! The line below used to be active. This meant led to a junk byte lying between each pair | ||
1677 | ! of entries in the argv & envp structures in memory. Whilst the program saw the right data | ||
1678 | ! via the argv and envp arguments to main, it meant the 'flat' representation visible through | ||
1679 | ! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example. | ||
1680 | ! addi r5, 1, r5 /* Include '\0' */ | ||
1681 | |||
1682 | ___strnlen_user_set_reply: | ||
1683 | or r5, ZERO, r6 /* If done, return counter */ | ||
1684 | |||
1685 | ___strnlen_user_exit: | ||
1686 | or r6, ZERO, r2 | ||
1687 | ptabs LINK, tr0 | ||
1688 | blink tr0, ZERO | ||
1689 | |||
1690 | /* | ||
1691 | * extern long __get_user_asm_?(void *val, long addr) | ||
1692 | * | ||
1693 | * Inputs: | ||
1694 | * (r2) dest address | ||
1695 | * (r3) source address (in User Space) | ||
1696 | * | ||
1697 | * Ouputs: | ||
1698 | * (r2) -EFAULT (faulting) | ||
1699 | * 0 (not faulting) | ||
1700 | */ | ||
1701 | .global __get_user_asm_b | ||
1702 | __get_user_asm_b: | ||
1703 | or r2, ZERO, r4 | ||
1704 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1705 | |||
1706 | ___get_user_asm_b1: | ||
1707 | ld.b r3, 0, r5 /* r5 = data */ | ||
1708 | st.b r4, 0, r5 | ||
1709 | or ZERO, ZERO, r2 | ||
1710 | |||
1711 | ___get_user_asm_b_exit: | ||
1712 | ptabs LINK, tr0 | ||
1713 | blink tr0, ZERO | ||
1714 | |||
1715 | |||
1716 | .global __get_user_asm_w | ||
1717 | __get_user_asm_w: | ||
1718 | or r2, ZERO, r4 | ||
1719 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1720 | |||
1721 | ___get_user_asm_w1: | ||
1722 | ld.w r3, 0, r5 /* r5 = data */ | ||
1723 | st.w r4, 0, r5 | ||
1724 | or ZERO, ZERO, r2 | ||
1725 | |||
1726 | ___get_user_asm_w_exit: | ||
1727 | ptabs LINK, tr0 | ||
1728 | blink tr0, ZERO | ||
1729 | |||
1730 | |||
1731 | .global __get_user_asm_l | ||
1732 | __get_user_asm_l: | ||
1733 | or r2, ZERO, r4 | ||
1734 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1735 | |||
1736 | ___get_user_asm_l1: | ||
1737 | ld.l r3, 0, r5 /* r5 = data */ | ||
1738 | st.l r4, 0, r5 | ||
1739 | or ZERO, ZERO, r2 | ||
1740 | |||
1741 | ___get_user_asm_l_exit: | ||
1742 | ptabs LINK, tr0 | ||
1743 | blink tr0, ZERO | ||
1744 | |||
1745 | |||
1746 | .global __get_user_asm_q | ||
1747 | __get_user_asm_q: | ||
1748 | or r2, ZERO, r4 | ||
1749 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1750 | |||
1751 | ___get_user_asm_q1: | ||
1752 | ld.q r3, 0, r5 /* r5 = data */ | ||
1753 | st.q r4, 0, r5 | ||
1754 | or ZERO, ZERO, r2 | ||
1755 | |||
1756 | ___get_user_asm_q_exit: | ||
1757 | ptabs LINK, tr0 | ||
1758 | blink tr0, ZERO | ||
1759 | |||
1760 | /* | ||
1761 | * extern long __put_user_asm_?(void *pval, long addr) | ||
1762 | * | ||
1763 | * Inputs: | ||
1764 | * (r2) kernel pointer to value | ||
1765 | * (r3) dest address (in User Space) | ||
1766 | * | ||
1767 | * Ouputs: | ||
1768 | * (r2) -EFAULT (faulting) | ||
1769 | * 0 (not faulting) | ||
1770 | */ | ||
1771 | .global __put_user_asm_b | ||
1772 | __put_user_asm_b: | ||
1773 | ld.b r2, 0, r4 /* r4 = data */ | ||
1774 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1775 | |||
1776 | ___put_user_asm_b1: | ||
1777 | st.b r3, 0, r4 | ||
1778 | or ZERO, ZERO, r2 | ||
1779 | |||
1780 | ___put_user_asm_b_exit: | ||
1781 | ptabs LINK, tr0 | ||
1782 | blink tr0, ZERO | ||
1783 | |||
1784 | |||
1785 | .global __put_user_asm_w | ||
1786 | __put_user_asm_w: | ||
1787 | ld.w r2, 0, r4 /* r4 = data */ | ||
1788 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1789 | |||
1790 | ___put_user_asm_w1: | ||
1791 | st.w r3, 0, r4 | ||
1792 | or ZERO, ZERO, r2 | ||
1793 | |||
1794 | ___put_user_asm_w_exit: | ||
1795 | ptabs LINK, tr0 | ||
1796 | blink tr0, ZERO | ||
1797 | |||
1798 | |||
1799 | .global __put_user_asm_l | ||
1800 | __put_user_asm_l: | ||
1801 | ld.l r2, 0, r4 /* r4 = data */ | ||
1802 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1803 | |||
1804 | ___put_user_asm_l1: | ||
1805 | st.l r3, 0, r4 | ||
1806 | or ZERO, ZERO, r2 | ||
1807 | |||
1808 | ___put_user_asm_l_exit: | ||
1809 | ptabs LINK, tr0 | ||
1810 | blink tr0, ZERO | ||
1811 | |||
1812 | |||
1813 | .global __put_user_asm_q | ||
1814 | __put_user_asm_q: | ||
1815 | ld.q r2, 0, r4 /* r4 = data */ | ||
1816 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1817 | |||
1818 | ___put_user_asm_q1: | ||
1819 | st.q r3, 0, r4 | ||
1820 | or ZERO, ZERO, r2 | ||
1821 | |||
1822 | ___put_user_asm_q_exit: | ||
1823 | ptabs LINK, tr0 | ||
1824 | blink tr0, ZERO | ||
1825 | |||
1826 | panic_stash_regs: | ||
1827 | /* The idea is : when we get an unhandled panic, we dump the registers | ||
1828 | to a known memory location, the just sit in a tight loop. | ||
1829 | This allows the human to look at the memory region through the GDB | ||
1830 | session (assuming the debug module's SHwy initiator isn't locked up | ||
1831 | or anything), to hopefully analyze the cause of the panic. */ | ||
1832 | |||
1833 | /* On entry, former r15 (SP) is in DCR | ||
1834 | former r0 is at resvec_saved_area + 0 | ||
1835 | former r1 is at resvec_saved_area + 8 | ||
1836 | former tr0 is at resvec_saved_area + 32 | ||
1837 | DCR is the only register whose value is lost altogether. | ||
1838 | */ | ||
1839 | |||
1840 | movi 0xffffffff80000000, r0 ! phy of dump area | ||
1841 | ld.q SP, 0x000, r1 ! former r0 | ||
1842 | st.q r0, 0x000, r1 | ||
1843 | ld.q SP, 0x008, r1 ! former r1 | ||
1844 | st.q r0, 0x008, r1 | ||
1845 | st.q r0, 0x010, r2 | ||
1846 | st.q r0, 0x018, r3 | ||
1847 | st.q r0, 0x020, r4 | ||
1848 | st.q r0, 0x028, r5 | ||
1849 | st.q r0, 0x030, r6 | ||
1850 | st.q r0, 0x038, r7 | ||
1851 | st.q r0, 0x040, r8 | ||
1852 | st.q r0, 0x048, r9 | ||
1853 | st.q r0, 0x050, r10 | ||
1854 | st.q r0, 0x058, r11 | ||
1855 | st.q r0, 0x060, r12 | ||
1856 | st.q r0, 0x068, r13 | ||
1857 | st.q r0, 0x070, r14 | ||
1858 | getcon dcr, r14 | ||
1859 | st.q r0, 0x078, r14 | ||
1860 | st.q r0, 0x080, r16 | ||
1861 | st.q r0, 0x088, r17 | ||
1862 | st.q r0, 0x090, r18 | ||
1863 | st.q r0, 0x098, r19 | ||
1864 | st.q r0, 0x0a0, r20 | ||
1865 | st.q r0, 0x0a8, r21 | ||
1866 | st.q r0, 0x0b0, r22 | ||
1867 | st.q r0, 0x0b8, r23 | ||
1868 | st.q r0, 0x0c0, r24 | ||
1869 | st.q r0, 0x0c8, r25 | ||
1870 | st.q r0, 0x0d0, r26 | ||
1871 | st.q r0, 0x0d8, r27 | ||
1872 | st.q r0, 0x0e0, r28 | ||
1873 | st.q r0, 0x0e8, r29 | ||
1874 | st.q r0, 0x0f0, r30 | ||
1875 | st.q r0, 0x0f8, r31 | ||
1876 | st.q r0, 0x100, r32 | ||
1877 | st.q r0, 0x108, r33 | ||
1878 | st.q r0, 0x110, r34 | ||
1879 | st.q r0, 0x118, r35 | ||
1880 | st.q r0, 0x120, r36 | ||
1881 | st.q r0, 0x128, r37 | ||
1882 | st.q r0, 0x130, r38 | ||
1883 | st.q r0, 0x138, r39 | ||
1884 | st.q r0, 0x140, r40 | ||
1885 | st.q r0, 0x148, r41 | ||
1886 | st.q r0, 0x150, r42 | ||
1887 | st.q r0, 0x158, r43 | ||
1888 | st.q r0, 0x160, r44 | ||
1889 | st.q r0, 0x168, r45 | ||
1890 | st.q r0, 0x170, r46 | ||
1891 | st.q r0, 0x178, r47 | ||
1892 | st.q r0, 0x180, r48 | ||
1893 | st.q r0, 0x188, r49 | ||
1894 | st.q r0, 0x190, r50 | ||
1895 | st.q r0, 0x198, r51 | ||
1896 | st.q r0, 0x1a0, r52 | ||
1897 | st.q r0, 0x1a8, r53 | ||
1898 | st.q r0, 0x1b0, r54 | ||
1899 | st.q r0, 0x1b8, r55 | ||
1900 | st.q r0, 0x1c0, r56 | ||
1901 | st.q r0, 0x1c8, r57 | ||
1902 | st.q r0, 0x1d0, r58 | ||
1903 | st.q r0, 0x1d8, r59 | ||
1904 | st.q r0, 0x1e0, r60 | ||
1905 | st.q r0, 0x1e8, r61 | ||
1906 | st.q r0, 0x1f0, r62 | ||
1907 | st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake... | ||
1908 | |||
1909 | ld.q SP, 0x020, r1 ! former tr0 | ||
1910 | st.q r0, 0x200, r1 | ||
1911 | gettr tr1, r1 | ||
1912 | st.q r0, 0x208, r1 | ||
1913 | gettr tr2, r1 | ||
1914 | st.q r0, 0x210, r1 | ||
1915 | gettr tr3, r1 | ||
1916 | st.q r0, 0x218, r1 | ||
1917 | gettr tr4, r1 | ||
1918 | st.q r0, 0x220, r1 | ||
1919 | gettr tr5, r1 | ||
1920 | st.q r0, 0x228, r1 | ||
1921 | gettr tr6, r1 | ||
1922 | st.q r0, 0x230, r1 | ||
1923 | gettr tr7, r1 | ||
1924 | st.q r0, 0x238, r1 | ||
1925 | |||
1926 | getcon sr, r1 | ||
1927 | getcon ssr, r2 | ||
1928 | getcon pssr, r3 | ||
1929 | getcon spc, r4 | ||
1930 | getcon pspc, r5 | ||
1931 | getcon intevt, r6 | ||
1932 | getcon expevt, r7 | ||
1933 | getcon pexpevt, r8 | ||
1934 | getcon tra, r9 | ||
1935 | getcon tea, r10 | ||
1936 | getcon kcr0, r11 | ||
1937 | getcon kcr1, r12 | ||
1938 | getcon vbr, r13 | ||
1939 | getcon resvec, r14 | ||
1940 | |||
1941 | st.q r0, 0x240, r1 | ||
1942 | st.q r0, 0x248, r2 | ||
1943 | st.q r0, 0x250, r3 | ||
1944 | st.q r0, 0x258, r4 | ||
1945 | st.q r0, 0x260, r5 | ||
1946 | st.q r0, 0x268, r6 | ||
1947 | st.q r0, 0x270, r7 | ||
1948 | st.q r0, 0x278, r8 | ||
1949 | st.q r0, 0x280, r9 | ||
1950 | st.q r0, 0x288, r10 | ||
1951 | st.q r0, 0x290, r11 | ||
1952 | st.q r0, 0x298, r12 | ||
1953 | st.q r0, 0x2a0, r13 | ||
1954 | st.q r0, 0x2a8, r14 | ||
1955 | |||
1956 | getcon SPC,r2 | ||
1957 | getcon SSR,r3 | ||
1958 | getcon EXPEVT,r4 | ||
1959 | /* Prepare to jump to C - physical address */ | ||
1960 | movi panic_handler-CONFIG_CACHED_MEMORY_OFFSET, r1 | ||
1961 | ori r1, 1, r1 | ||
1962 | ptabs r1, tr0 | ||
1963 | getcon DCR, SP | ||
1964 | blink tr0, ZERO | ||
1965 | nop | ||
1966 | nop | ||
1967 | nop | ||
1968 | nop | ||
1969 | |||
1970 | |||
1971 | |||
1972 | |||
1973 | /* | ||
1974 | * --- Signal Handling Section | ||
1975 | */ | ||
1976 | |||
1977 | /* | ||
1978 | * extern long long _sa_default_rt_restorer | ||
1979 | * extern long long _sa_default_restorer | ||
1980 | * | ||
1981 | * or, better, | ||
1982 | * | ||
1983 | * extern void _sa_default_rt_restorer(void) | ||
1984 | * extern void _sa_default_restorer(void) | ||
1985 | * | ||
1986 | * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn() | ||
1987 | * from user space. Copied into user space by signal management. | ||
1988 | * Both must be quad aligned and 2 quad long (4 instructions). | ||
1989 | * | ||
1990 | */ | ||
1991 | .balign 8 | ||
1992 | .global sa_default_rt_restorer | ||
1993 | sa_default_rt_restorer: | ||
1994 | movi 0x10, r9 | ||
1995 | shori __NR_rt_sigreturn, r9 | ||
1996 | trapa r9 | ||
1997 | nop | ||
1998 | |||
1999 | .balign 8 | ||
2000 | .global sa_default_restorer | ||
2001 | sa_default_restorer: | ||
2002 | movi 0x10, r9 | ||
2003 | shori __NR_sigreturn, r9 | ||
2004 | trapa r9 | ||
2005 | nop | ||
2006 | |||
2007 | /* | ||
2008 | * --- __ex_table Section | ||
2009 | */ | ||
2010 | |||
2011 | /* | ||
2012 | * User Access Exception Table. | ||
2013 | */ | ||
2014 | .section __ex_table, "a" | ||
2015 | |||
2016 | .global asm_uaccess_start /* Just a marker */ | ||
2017 | asm_uaccess_start: | ||
2018 | |||
2019 | .long ___copy_user1, ___copy_user_exit | ||
2020 | .long ___copy_user2, ___copy_user_exit | ||
2021 | .long ___clear_user1, ___clear_user_exit | ||
2022 | .long ___strncpy_from_user1, ___strncpy_from_user_exit | ||
2023 | .long ___strnlen_user1, ___strnlen_user_exit | ||
2024 | .long ___get_user_asm_b1, ___get_user_asm_b_exit | ||
2025 | .long ___get_user_asm_w1, ___get_user_asm_w_exit | ||
2026 | .long ___get_user_asm_l1, ___get_user_asm_l_exit | ||
2027 | .long ___get_user_asm_q1, ___get_user_asm_q_exit | ||
2028 | .long ___put_user_asm_b1, ___put_user_asm_b_exit | ||
2029 | .long ___put_user_asm_w1, ___put_user_asm_w_exit | ||
2030 | .long ___put_user_asm_l1, ___put_user_asm_l_exit | ||
2031 | .long ___put_user_asm_q1, ___put_user_asm_q_exit | ||
2032 | |||
2033 | .global asm_uaccess_end /* Just a marker */ | ||
2034 | asm_uaccess_end: | ||
2035 | |||
2036 | |||
2037 | |||
2038 | |||
2039 | /* | ||
2040 | * --- .text.init Section | ||
2041 | */ | ||
2042 | |||
2043 | .section .text.init, "ax" | ||
2044 | |||
2045 | /* | ||
2046 | * void trap_init (void) | ||
2047 | * | ||
2048 | */ | ||
2049 | .global trap_init | ||
2050 | trap_init: | ||
2051 | addi SP, -24, SP /* Room to save r28/r29/r30 */ | ||
2052 | st.q SP, 0, r28 | ||
2053 | st.q SP, 8, r29 | ||
2054 | st.q SP, 16, r30 | ||
2055 | |||
2056 | /* Set VBR and RESVEC */ | ||
2057 | movi LVBR_block, r19 | ||
2058 | andi r19, -4, r19 /* reset MMUOFF + reserved */ | ||
2059 | /* For RESVEC exceptions we force the MMU off, which means we need the | ||
2060 | physical address. */ | ||
2061 | movi LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20 | ||
2062 | andi r20, -4, r20 /* reset reserved */ | ||
2063 | ori r20, 1, r20 /* set MMUOFF */ | ||
2064 | putcon r19, VBR | ||
2065 | putcon r20, RESVEC | ||
2066 | |||
2067 | /* Sanity check */ | ||
2068 | movi LVBR_block_end, r21 | ||
2069 | andi r21, -4, r21 | ||
2070 | movi BLOCK_SIZE, r29 /* r29 = expected size */ | ||
2071 | or r19, ZERO, r30 | ||
2072 | add r19, r29, r19 | ||
2073 | |||
2074 | /* | ||
2075 | * Ugly, but better loop forever now than crash afterwards. | ||
2076 | * We should print a message, but if we touch LVBR or | ||
2077 | * LRESVEC blocks we should not be surprised if we get stuck | ||
2078 | * in trap_init(). | ||
2079 | */ | ||
2080 | pta trap_init_loop, tr1 | ||
2081 | gettr tr1, r28 /* r28 = trap_init_loop */ | ||
2082 | sub r21, r30, r30 /* r30 = actual size */ | ||
2083 | |||
2084 | /* | ||
2085 | * VBR/RESVEC handlers overlap by being bigger than | ||
2086 | * allowed. Very bad. Just loop forever. | ||
2087 | * (r28) panic/loop address | ||
2088 | * (r29) expected size | ||
2089 | * (r30) actual size | ||
2090 | */ | ||
2091 | trap_init_loop: | ||
2092 | bne r19, r21, tr1 | ||
2093 | |||
2094 | /* Now that exception vectors are set up reset SR.BL */ | ||
2095 | getcon SR, r22 | ||
2096 | movi SR_UNBLOCK_EXC, r23 | ||
2097 | and r22, r23, r22 | ||
2098 | putcon r22, SR | ||
2099 | |||
2100 | addi SP, 24, SP | ||
2101 | ptabs LINK, tr0 | ||
2102 | blink tr0, ZERO | ||
2103 | |||
diff --git a/arch/sh64/kernel/fpu.c b/arch/sh64/kernel/fpu.c new file mode 100644 index 000000000000..8ad4ed6a6c9b --- /dev/null +++ b/arch/sh64/kernel/fpu.c | |||
@@ -0,0 +1,170 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/fpu.c | ||
7 | * | ||
8 | * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli | ||
9 | * Copyright (C) 2002 STMicroelectronics Limited | ||
10 | * Author : Stuart Menefy | ||
11 | * | ||
12 | * Started from SH4 version: | ||
13 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/sched.h> | ||
18 | #include <linux/signal.h> | ||
19 | #include <asm/processor.h> | ||
20 | #include <asm/user.h> | ||
21 | #include <asm/io.h> | ||
22 | |||
23 | /* | ||
24 | * Initially load the FPU with signalling NANS. This bit pattern | ||
25 | * has the property that no matter whether considered as single or as | ||
26 | * double precision, it still represents a signalling NAN. | ||
27 | */ | ||
28 | #define sNAN64 0xFFFFFFFFFFFFFFFFULL | ||
29 | #define sNAN32 0xFFFFFFFFUL | ||
30 | |||
31 | static union sh_fpu_union init_fpuregs = { | ||
32 | .hard = { | ||
33 | .fp_regs = { [0 ... 63] = sNAN32 }, | ||
34 | .fpscr = FPSCR_INIT | ||
35 | } | ||
36 | }; | ||
37 | |||
38 | inline void fpsave(struct sh_fpu_hard_struct *fpregs) | ||
39 | { | ||
40 | asm volatile("fst.p %0, (0*8), fp0\n\t" | ||
41 | "fst.p %0, (1*8), fp2\n\t" | ||
42 | "fst.p %0, (2*8), fp4\n\t" | ||
43 | "fst.p %0, (3*8), fp6\n\t" | ||
44 | "fst.p %0, (4*8), fp8\n\t" | ||
45 | "fst.p %0, (5*8), fp10\n\t" | ||
46 | "fst.p %0, (6*8), fp12\n\t" | ||
47 | "fst.p %0, (7*8), fp14\n\t" | ||
48 | "fst.p %0, (8*8), fp16\n\t" | ||
49 | "fst.p %0, (9*8), fp18\n\t" | ||
50 | "fst.p %0, (10*8), fp20\n\t" | ||
51 | "fst.p %0, (11*8), fp22\n\t" | ||
52 | "fst.p %0, (12*8), fp24\n\t" | ||
53 | "fst.p %0, (13*8), fp26\n\t" | ||
54 | "fst.p %0, (14*8), fp28\n\t" | ||
55 | "fst.p %0, (15*8), fp30\n\t" | ||
56 | "fst.p %0, (16*8), fp32\n\t" | ||
57 | "fst.p %0, (17*8), fp34\n\t" | ||
58 | "fst.p %0, (18*8), fp36\n\t" | ||
59 | "fst.p %0, (19*8), fp38\n\t" | ||
60 | "fst.p %0, (20*8), fp40\n\t" | ||
61 | "fst.p %0, (21*8), fp42\n\t" | ||
62 | "fst.p %0, (22*8), fp44\n\t" | ||
63 | "fst.p %0, (23*8), fp46\n\t" | ||
64 | "fst.p %0, (24*8), fp48\n\t" | ||
65 | "fst.p %0, (25*8), fp50\n\t" | ||
66 | "fst.p %0, (26*8), fp52\n\t" | ||
67 | "fst.p %0, (27*8), fp54\n\t" | ||
68 | "fst.p %0, (28*8), fp56\n\t" | ||
69 | "fst.p %0, (29*8), fp58\n\t" | ||
70 | "fst.p %0, (30*8), fp60\n\t" | ||
71 | "fst.p %0, (31*8), fp62\n\t" | ||
72 | |||
73 | "fgetscr fr63\n\t" | ||
74 | "fst.s %0, (32*8), fr63\n\t" | ||
75 | : /* no output */ | ||
76 | : "r" (fpregs) | ||
77 | : "memory"); | ||
78 | } | ||
79 | |||
80 | |||
81 | static inline void | ||
82 | fpload(struct sh_fpu_hard_struct *fpregs) | ||
83 | { | ||
84 | asm volatile("fld.p %0, (0*8), fp0\n\t" | ||
85 | "fld.p %0, (1*8), fp2\n\t" | ||
86 | "fld.p %0, (2*8), fp4\n\t" | ||
87 | "fld.p %0, (3*8), fp6\n\t" | ||
88 | "fld.p %0, (4*8), fp8\n\t" | ||
89 | "fld.p %0, (5*8), fp10\n\t" | ||
90 | "fld.p %0, (6*8), fp12\n\t" | ||
91 | "fld.p %0, (7*8), fp14\n\t" | ||
92 | "fld.p %0, (8*8), fp16\n\t" | ||
93 | "fld.p %0, (9*8), fp18\n\t" | ||
94 | "fld.p %0, (10*8), fp20\n\t" | ||
95 | "fld.p %0, (11*8), fp22\n\t" | ||
96 | "fld.p %0, (12*8), fp24\n\t" | ||
97 | "fld.p %0, (13*8), fp26\n\t" | ||
98 | "fld.p %0, (14*8), fp28\n\t" | ||
99 | "fld.p %0, (15*8), fp30\n\t" | ||
100 | "fld.p %0, (16*8), fp32\n\t" | ||
101 | "fld.p %0, (17*8), fp34\n\t" | ||
102 | "fld.p %0, (18*8), fp36\n\t" | ||
103 | "fld.p %0, (19*8), fp38\n\t" | ||
104 | "fld.p %0, (20*8), fp40\n\t" | ||
105 | "fld.p %0, (21*8), fp42\n\t" | ||
106 | "fld.p %0, (22*8), fp44\n\t" | ||
107 | "fld.p %0, (23*8), fp46\n\t" | ||
108 | "fld.p %0, (24*8), fp48\n\t" | ||
109 | "fld.p %0, (25*8), fp50\n\t" | ||
110 | "fld.p %0, (26*8), fp52\n\t" | ||
111 | "fld.p %0, (27*8), fp54\n\t" | ||
112 | "fld.p %0, (28*8), fp56\n\t" | ||
113 | "fld.p %0, (29*8), fp58\n\t" | ||
114 | "fld.p %0, (30*8), fp60\n\t" | ||
115 | |||
116 | "fld.s %0, (32*8), fr63\n\t" | ||
117 | "fputscr fr63\n\t" | ||
118 | |||
119 | "fld.p %0, (31*8), fp62\n\t" | ||
120 | : /* no output */ | ||
121 | : "r" (fpregs) ); | ||
122 | } | ||
123 | |||
124 | void fpinit(struct sh_fpu_hard_struct *fpregs) | ||
125 | { | ||
126 | *fpregs = init_fpuregs.hard; | ||
127 | } | ||
128 | |||
129 | asmlinkage void | ||
130 | do_fpu_error(unsigned long ex, struct pt_regs *regs) | ||
131 | { | ||
132 | struct task_struct *tsk = current; | ||
133 | |||
134 | regs->pc += 4; | ||
135 | |||
136 | tsk->thread.trap_no = 11; | ||
137 | tsk->thread.error_code = 0; | ||
138 | force_sig(SIGFPE, tsk); | ||
139 | } | ||
140 | |||
141 | |||
142 | asmlinkage void | ||
143 | do_fpu_state_restore(unsigned long ex, struct pt_regs *regs) | ||
144 | { | ||
145 | void die(const char *str, struct pt_regs *regs, long err); | ||
146 | |||
147 | if (! user_mode(regs)) | ||
148 | die("FPU used in kernel", regs, ex); | ||
149 | |||
150 | regs->sr &= ~SR_FD; | ||
151 | |||
152 | if (last_task_used_math == current) | ||
153 | return; | ||
154 | |||
155 | grab_fpu(); | ||
156 | if (last_task_used_math != NULL) { | ||
157 | /* Other processes fpu state, save away */ | ||
158 | fpsave(&last_task_used_math->thread.fpu.hard); | ||
159 | } | ||
160 | last_task_used_math = current; | ||
161 | if (used_math()) { | ||
162 | fpload(¤t->thread.fpu.hard); | ||
163 | } else { | ||
164 | /* First time FPU user. */ | ||
165 | fpload(&init_fpuregs.hard); | ||
166 | set_used_math(); | ||
167 | } | ||
168 | release_fpu(); | ||
169 | } | ||
170 | |||
diff --git a/arch/sh64/kernel/head.S b/arch/sh64/kernel/head.S new file mode 100644 index 000000000000..cc0b628a9ea7 --- /dev/null +++ b/arch/sh64/kernel/head.S | |||
@@ -0,0 +1,373 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/head.S | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003, 2004 Paul Mundt | ||
10 | * | ||
11 | * | ||
12 | * benedict.gaster@superh.com: 2nd May 2002 | ||
13 | * Moved definition of empty_zero_page to its own section allowing | ||
14 | * it to be placed at an absolute address known at load time. | ||
15 | * | ||
16 | * lethal@linux-sh.org: 9th May 2003 | ||
17 | * Kill off GLOBAL_NAME() usage. | ||
18 | * | ||
19 | * lethal@linux-sh.org: 8th May 2004 | ||
20 | * Add early SCIF console DTLB mapping. | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | |||
25 | #include <asm/page.h> | ||
26 | #include <asm/mmu_context.h> | ||
27 | #include <asm/cache.h> | ||
28 | #include <asm/tlb.h> | ||
29 | #include <asm/processor.h> | ||
30 | #include <asm/registers.h> | ||
31 | #include <asm/thread_info.h> | ||
32 | |||
33 | /* | ||
34 | * MMU defines: TLB boundaries. | ||
35 | */ | ||
36 | |||
37 | #define MMUIR_FIRST ITLB_FIXED | ||
38 | #define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP | ||
39 | #define MMUIR_STEP TLB_STEP | ||
40 | |||
41 | #define MMUDR_FIRST DTLB_FIXED | ||
42 | #define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP | ||
43 | #define MMUDR_STEP TLB_STEP | ||
44 | |||
45 | /* Safety check : CONFIG_CACHED_MEMORY_OFFSET has to be a multiple of 512Mb */ | ||
46 | #if (CONFIG_CACHED_MEMORY_OFFSET & ((1UL<<29)-1)) | ||
47 | #error "CONFIG_CACHED_MEMORY_OFFSET must be a multiple of 512Mb" | ||
48 | #endif | ||
49 | |||
50 | /* | ||
51 | * MMU defines: Fixed TLBs. | ||
52 | */ | ||
53 | /* Deal safely with the case where the base of RAM is not 512Mb aligned */ | ||
54 | |||
55 | #define ALIGN_512M_MASK (0xffffffffe0000000) | ||
56 | #define ALIGNED_EFFECTIVE ((CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK) | ||
57 | #define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK) | ||
58 | |||
59 | #define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE) | ||
60 | /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ | ||
61 | |||
62 | #define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL) | ||
63 | /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */ | ||
64 | |||
65 | #define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE | ||
66 | /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ | ||
67 | #define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL | ||
68 | /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */ | ||
69 | |||
70 | #ifdef CONFIG_ICACHE_DISABLED | ||
71 | #define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */ | ||
72 | #else | ||
73 | #define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */ | ||
74 | #endif | ||
75 | #define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */ | ||
76 | |||
77 | #if defined (CONFIG_DCACHE_DISABLED) | ||
78 | #define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */ | ||
79 | #elif defined (CONFIG_DCACHE_WRITE_THROUGH) | ||
80 | #define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */ | ||
81 | /* WT, invalidate */ | ||
82 | #elif defined (CONFIG_DCACHE_WRITE_BACK) | ||
83 | #define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */ | ||
84 | /* WB, invalidate */ | ||
85 | #else | ||
86 | #error preprocessor flag CONFIG_DCACHE_... not recognized! | ||
87 | #endif | ||
88 | |||
89 | #define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */ | ||
90 | |||
91 | .section .empty_zero_page, "aw" | ||
92 | .global empty_zero_page | ||
93 | |||
94 | empty_zero_page: | ||
95 | .long 1 /* MOUNT_ROOT_RDONLY */ | ||
96 | .long 0 /* RAMDISK_FLAGS */ | ||
97 | .long 0x0200 /* ORIG_ROOT_DEV */ | ||
98 | .long 1 /* LOADER_TYPE */ | ||
99 | .long 0x00800000 /* INITRD_START */ | ||
100 | .long 0x00800000 /* INITRD_SIZE */ | ||
101 | .long 0 | ||
102 | |||
103 | .text | ||
104 | .balign 4096,0,4096 | ||
105 | |||
106 | .section .data, "aw" | ||
107 | .balign PAGE_SIZE | ||
108 | |||
109 | .section .data, "aw" | ||
110 | .balign PAGE_SIZE | ||
111 | |||
112 | .global swapper_pg_dir | ||
113 | swapper_pg_dir: | ||
114 | .space PAGE_SIZE, 0 | ||
115 | |||
116 | .global empty_bad_page | ||
117 | empty_bad_page: | ||
118 | .space PAGE_SIZE, 0 | ||
119 | |||
120 | .global empty_bad_pte_table | ||
121 | empty_bad_pte_table: | ||
122 | .space PAGE_SIZE, 0 | ||
123 | |||
124 | .global fpu_in_use | ||
125 | fpu_in_use: .quad 0 | ||
126 | |||
127 | |||
128 | .section .text, "ax" | ||
129 | .balign L1_CACHE_BYTES | ||
130 | /* | ||
131 | * Condition at the entry of __stext: | ||
132 | * . Reset state: | ||
133 | * . SR.FD = 1 (FPU disabled) | ||
134 | * . SR.BL = 1 (Exceptions disabled) | ||
135 | * . SR.MD = 1 (Privileged Mode) | ||
136 | * . SR.MMU = 0 (MMU Disabled) | ||
137 | * . SR.CD = 0 (CTC User Visible) | ||
138 | * . SR.IMASK = Undefined (Interrupt Mask) | ||
139 | * | ||
140 | * Operations supposed to be performed by __stext: | ||
141 | * . prevent speculative fetch onto device memory while MMU is off | ||
142 | * . reflect as much as possible SH5 ABI (r15, r26, r27, r18) | ||
143 | * . first, save CPU state and set it to something harmless | ||
144 | * . any CPU detection and/or endianness settings (?) | ||
145 | * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD | ||
146 | * . set initial TLB entries for cached and uncached regions | ||
147 | * (no fine granularity paging) | ||
148 | * . set initial cache state | ||
149 | * . enable MMU and caches | ||
150 | * . set CPU to a consistent state | ||
151 | * . registers (including stack pointer and current/KCR0) | ||
152 | * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR | ||
153 | * at this stage. This is all to later Linux initialization steps. | ||
154 | * . initialize FPU | ||
155 | * . clear BSS | ||
156 | * . jump into start_kernel() | ||
157 | * . be prepared to hopeless start_kernel() returns. | ||
158 | * | ||
159 | */ | ||
160 | .global _stext | ||
161 | _stext: | ||
162 | /* | ||
163 | * Prevent speculative fetch on device memory due to | ||
164 | * uninitialized target registers. | ||
165 | */ | ||
166 | ptabs/u ZERO, tr0 | ||
167 | ptabs/u ZERO, tr1 | ||
168 | ptabs/u ZERO, tr2 | ||
169 | ptabs/u ZERO, tr3 | ||
170 | ptabs/u ZERO, tr4 | ||
171 | ptabs/u ZERO, tr5 | ||
172 | ptabs/u ZERO, tr6 | ||
173 | ptabs/u ZERO, tr7 | ||
174 | synci | ||
175 | |||
176 | /* | ||
177 | * Read/Set CPU state. After this block: | ||
178 | * r29 = Initial SR | ||
179 | */ | ||
180 | getcon SR, r29 | ||
181 | movi SR_HARMLESS, r20 | ||
182 | putcon r20, SR | ||
183 | |||
184 | /* | ||
185 | * Initialize EMI/LMI. To Be Done. | ||
186 | */ | ||
187 | |||
188 | /* | ||
189 | * CPU detection and/or endianness settings (?). To Be Done. | ||
190 | * Pure PIC code here, please ! Just save state into r30. | ||
191 | * After this block: | ||
192 | * r30 = CPU type/Platform Endianness | ||
193 | */ | ||
194 | |||
195 | /* | ||
196 | * Set initial TLB entries for cached and uncached regions. | ||
197 | * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't ! | ||
198 | */ | ||
199 | /* Clear ITLBs */ | ||
200 | pta clear_ITLB, tr1 | ||
201 | movi MMUIR_FIRST, r21 | ||
202 | movi MMUIR_END, r22 | ||
203 | clear_ITLB: | ||
204 | putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */ | ||
205 | addi r21, MMUIR_STEP, r21 | ||
206 | bne r21, r22, tr1 | ||
207 | |||
208 | /* Clear DTLBs */ | ||
209 | pta clear_DTLB, tr1 | ||
210 | movi MMUDR_FIRST, r21 | ||
211 | movi MMUDR_END, r22 | ||
212 | clear_DTLB: | ||
213 | putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */ | ||
214 | addi r21, MMUDR_STEP, r21 | ||
215 | bne r21, r22, tr1 | ||
216 | |||
217 | /* Map one big (512Mb) page for ITLB */ | ||
218 | movi MMUIR_FIRST, r21 | ||
219 | movi MMUIR_TEXT_L, r22 /* PTEL first */ | ||
220 | add.l r22, r63, r22 /* Sign extend */ | ||
221 | putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */ | ||
222 | movi MMUIR_TEXT_H, r22 /* PTEH last */ | ||
223 | add.l r22, r63, r22 /* Sign extend */ | ||
224 | putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */ | ||
225 | |||
226 | /* Map one big CACHED (512Mb) page for DTLB */ | ||
227 | movi MMUDR_FIRST, r21 | ||
228 | movi MMUDR_CACHED_L, r22 /* PTEL first */ | ||
229 | add.l r22, r63, r22 /* Sign extend */ | ||
230 | putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */ | ||
231 | movi MMUDR_CACHED_H, r22 /* PTEH last */ | ||
232 | add.l r22, r63, r22 /* Sign extend */ | ||
233 | putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */ | ||
234 | |||
235 | #ifdef CONFIG_EARLY_PRINTK | ||
236 | /* | ||
237 | * Setup a DTLB translation for SCIF phys. | ||
238 | */ | ||
239 | addi r21, MMUDR_STEP, r21 | ||
240 | movi 0x0a03, r22 /* SCIF phys */ | ||
241 | shori 0x0148, r22 | ||
242 | putcfg r21, 1, r22 /* PTEL first */ | ||
243 | movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */ | ||
244 | shori 0x0003, r22 | ||
245 | putcfg r21, 0, r22 /* PTEH last */ | ||
246 | #endif | ||
247 | |||
248 | /* | ||
249 | * Set cache behaviours. | ||
250 | */ | ||
251 | /* ICache */ | ||
252 | movi ICCR_BASE, r21 | ||
253 | movi ICCR0_INIT_VAL, r22 | ||
254 | movi ICCR1_INIT_VAL, r23 | ||
255 | putcfg r21, ICCR_REG0, r22 | ||
256 | putcfg r21, ICCR_REG1, r23 | ||
257 | |||
258 | /* OCache */ | ||
259 | movi OCCR_BASE, r21 | ||
260 | movi OCCR0_INIT_VAL, r22 | ||
261 | movi OCCR1_INIT_VAL, r23 | ||
262 | putcfg r21, OCCR_REG0, r22 | ||
263 | putcfg r21, OCCR_REG1, r23 | ||
264 | |||
265 | |||
266 | /* | ||
267 | * Enable Caches and MMU. Do the first non-PIC jump. | ||
268 | * Now head.S global variables, constants and externs | ||
269 | * can be used. | ||
270 | */ | ||
271 | getcon SR, r21 | ||
272 | movi SR_ENABLE_MMU, r22 | ||
273 | or r21, r22, r21 | ||
274 | putcon r21, SSR | ||
275 | movi hyperspace, r22 | ||
276 | ori r22, 1, r22 /* Make it SHmedia, not required but..*/ | ||
277 | putcon r22, SPC | ||
278 | synco | ||
279 | rte /* And now go into the hyperspace ... */ | ||
280 | hyperspace: /* ... that's the next instruction ! */ | ||
281 | |||
282 | /* | ||
283 | * Set CPU to a consistent state. | ||
284 | * r31 = FPU support flag | ||
285 | * tr0/tr7 in use. Others give a chance to loop somewhere safe | ||
286 | */ | ||
287 | movi start_kernel, r32 | ||
288 | ori r32, 1, r32 | ||
289 | |||
290 | ptabs r32, tr0 /* r32 = _start_kernel address */ | ||
291 | pta/u hopeless, tr1 | ||
292 | pta/u hopeless, tr2 | ||
293 | pta/u hopeless, tr3 | ||
294 | pta/u hopeless, tr4 | ||
295 | pta/u hopeless, tr5 | ||
296 | pta/u hopeless, tr6 | ||
297 | pta/u hopeless, tr7 | ||
298 | gettr tr1, r28 /* r28 = hopeless address */ | ||
299 | |||
300 | /* Set initial stack pointer */ | ||
301 | movi init_thread_union, SP | ||
302 | putcon SP, KCR0 /* Set current to init_task */ | ||
303 | movi THREAD_SIZE, r22 /* Point to the end */ | ||
304 | add SP, r22, SP | ||
305 | |||
306 | /* | ||
307 | * Initialize FPU. | ||
308 | * Keep FPU flag in r31. After this block: | ||
309 | * r31 = FPU flag | ||
310 | */ | ||
311 | movi fpu_in_use, r31 /* Temporary */ | ||
312 | |||
313 | #ifdef CONFIG_SH_FPU | ||
314 | getcon SR, r21 | ||
315 | movi SR_ENABLE_FPU, r22 | ||
316 | and r21, r22, r22 | ||
317 | putcon r22, SR /* Try to enable */ | ||
318 | getcon SR, r22 | ||
319 | xor r21, r22, r21 | ||
320 | shlri r21, 15, r21 /* Supposedly 0/1 */ | ||
321 | st.q r31, 0 , r21 /* Set fpu_in_use */ | ||
322 | #else | ||
323 | movi 0, r21 | ||
324 | st.q r31, 0 , r21 /* Set fpu_in_use */ | ||
325 | #endif | ||
326 | or r21, ZERO, r31 /* Set FPU flag at last */ | ||
327 | |||
328 | #ifndef CONFIG_SH_NO_BSS_INIT | ||
329 | /* Don't clear BSS if running on slow platforms such as an RTL simulation, | ||
330 | remote memory via SHdebug link, etc. For these the memory can be guaranteed | ||
331 | to be all zero on boot anyway. */ | ||
332 | /* | ||
333 | * Clear bss | ||
334 | */ | ||
335 | pta clear_quad, tr1 | ||
336 | movi __bss_start, r22 | ||
337 | movi _end, r23 | ||
338 | clear_quad: | ||
339 | st.q r22, 0, ZERO | ||
340 | addi r22, 8, r22 | ||
341 | bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */ | ||
342 | #endif | ||
343 | pta/u hopeless, tr1 | ||
344 | |||
345 | /* Say bye to head.S but be prepared to wrongly get back ... */ | ||
346 | blink tr0, LINK | ||
347 | |||
348 | /* If we ever get back here through LINK/tr1-tr7 */ | ||
349 | pta/u hopeless, tr7 | ||
350 | |||
351 | hopeless: | ||
352 | /* | ||
353 | * Something's badly wrong here. Loop endlessly, | ||
354 | * there's nothing more we can do about it. | ||
355 | * | ||
356 | * Note on hopeless: it can be jumped into invariably | ||
357 | * before or after jumping into hyperspace. The only | ||
358 | * requirement is to be PIC called (PTA) before and | ||
359 | * any way (PTA/PTABS) after. According to Virtual | ||
360 | * to Physical mapping a simulator/emulator can easily | ||
361 | * tell where we came here from just looking at hopeless | ||
362 | * (PC) address. | ||
363 | * | ||
364 | * For debugging purposes: | ||
365 | * (r28) hopeless/loop address | ||
366 | * (r29) Original SR | ||
367 | * (r30) CPU type/Platform endianness | ||
368 | * (r31) FPU Support | ||
369 | * (r32) _start_kernel address | ||
370 | */ | ||
371 | blink tr7, ZERO | ||
372 | |||
373 | |||
diff --git a/arch/sh64/kernel/init_task.c b/arch/sh64/kernel/init_task.c new file mode 100644 index 000000000000..de2d07db1f88 --- /dev/null +++ b/arch/sh64/kernel/init_task.c | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/init_task.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003 Paul Mundt | ||
10 | * | ||
11 | */ | ||
12 | #include <linux/rwsem.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/init_task.h> | ||
16 | #include <linux/mqueue.h> | ||
17 | |||
18 | #include <asm/uaccess.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | |||
21 | static struct fs_struct init_fs = INIT_FS; | ||
22 | static struct files_struct init_files = INIT_FILES; | ||
23 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
24 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
25 | struct mm_struct init_mm = INIT_MM(init_mm); | ||
26 | |||
27 | struct pt_regs fake_swapper_regs; | ||
28 | |||
29 | /* | ||
30 | * Initial thread structure. | ||
31 | * | ||
32 | * We need to make sure that this is THREAD_SIZE-byte aligned due | ||
33 | * to the way process stacks are handled. This is done by having a | ||
34 | * special "init_task" linker map entry.. | ||
35 | */ | ||
36 | union thread_union init_thread_union | ||
37 | __attribute__((__section__(".data.init_task"))) = | ||
38 | { INIT_THREAD_INFO(init_task) }; | ||
39 | |||
40 | /* | ||
41 | * Initial task structure. | ||
42 | * | ||
43 | * All other task structs will be allocated on slabs in fork.c | ||
44 | */ | ||
45 | struct task_struct init_task = INIT_TASK(init_task); | ||
46 | |||
diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c new file mode 100644 index 000000000000..9fc2b71dbd84 --- /dev/null +++ b/arch/sh64/kernel/irq.c | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/irq.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003 Paul Mundt | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | /* | ||
14 | * IRQs are in fact implemented a bit like signal handlers for the kernel. | ||
15 | * Naturally it's not a 1:1 relation, but there are similarities. | ||
16 | */ | ||
17 | |||
18 | #include <linux/config.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/kernel_stat.h> | ||
21 | #include <linux/signal.h> | ||
22 | #include <linux/rwsem.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/timex.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/random.h> | ||
29 | #include <linux/smp.h> | ||
30 | #include <linux/smp_lock.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/seq_file.h> | ||
33 | #include <linux/bitops.h> | ||
34 | #include <asm/system.h> | ||
35 | #include <asm/io.h> | ||
36 | #include <asm/smp.h> | ||
37 | #include <asm/pgalloc.h> | ||
38 | #include <asm/delay.h> | ||
39 | #include <asm/irq.h> | ||
40 | #include <linux/irq.h> | ||
41 | |||
42 | void ack_bad_irq(unsigned int irq) | ||
43 | { | ||
44 | printk("unexpected IRQ trap at irq %02x\n", irq); | ||
45 | } | ||
46 | |||
47 | #if defined(CONFIG_PROC_FS) | ||
48 | int show_interrupts(struct seq_file *p, void *v) | ||
49 | { | ||
50 | int i = *(loff_t *) v, j; | ||
51 | struct irqaction * action; | ||
52 | unsigned long flags; | ||
53 | |||
54 | if (i == 0) { | ||
55 | seq_puts(p, " "); | ||
56 | for (j=0; j<NR_CPUS; j++) | ||
57 | if (cpu_online(j)) | ||
58 | seq_printf(p, "CPU%d ",j); | ||
59 | seq_putc(p, '\n'); | ||
60 | } | ||
61 | |||
62 | if (i < NR_IRQS) { | ||
63 | spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
64 | action = irq_desc[i].action; | ||
65 | if (!action) | ||
66 | goto unlock; | ||
67 | seq_printf(p, "%3d: ",i); | ||
68 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
69 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | ||
70 | seq_printf(p, " %s", action->name); | ||
71 | |||
72 | for (action=action->next; action; action = action->next) | ||
73 | seq_printf(p, ", %s", action->name); | ||
74 | seq_putc(p, '\n'); | ||
75 | unlock: | ||
76 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
77 | } | ||
78 | return 0; | ||
79 | } | ||
80 | #endif | ||
81 | |||
82 | /* | ||
83 | * do_NMI handles all Non-Maskable Interrupts. | ||
84 | */ | ||
85 | asmlinkage void do_NMI(unsigned long vector_num, struct pt_regs * regs) | ||
86 | { | ||
87 | if (regs->sr & 0x40000000) | ||
88 | printk("unexpected NMI trap in system mode\n"); | ||
89 | else | ||
90 | printk("unexpected NMI trap in user mode\n"); | ||
91 | |||
92 | /* No statistics */ | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * do_IRQ handles all normal device IRQ's. | ||
97 | */ | ||
98 | asmlinkage int do_IRQ(unsigned long vector_num, struct pt_regs * regs) | ||
99 | { | ||
100 | int irq; | ||
101 | |||
102 | irq_enter(); | ||
103 | |||
104 | irq = irq_demux(vector_num); | ||
105 | |||
106 | if (irq >= 0) { | ||
107 | __do_IRQ(irq, regs); | ||
108 | } else { | ||
109 | printk("unexpected IRQ trap at vector %03lx\n", vector_num); | ||
110 | } | ||
111 | |||
112 | irq_exit(); | ||
113 | |||
114 | return 1; | ||
115 | } | ||
116 | |||
diff --git a/arch/sh64/kernel/irq_intc.c b/arch/sh64/kernel/irq_intc.c new file mode 100644 index 000000000000..43f88f3a78b0 --- /dev/null +++ b/arch/sh64/kernel/irq_intc.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/irq_intc.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003 Paul Mundt | ||
10 | * | ||
11 | * Interrupt Controller support for SH5 INTC. | ||
12 | * Per-interrupt selective. IRLM=0 (Fixed priority) is not | ||
13 | * supported being useless without a cascaded interrupt | ||
14 | * controller. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/config.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/irq.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/stddef.h> | ||
23 | #include <linux/bitops.h> /* this includes also <asm/registers.h */ | ||
24 | /* which is required to remap register */ | ||
25 | /* names used into __asm__ blocks... */ | ||
26 | |||
27 | #include <asm/hardware.h> | ||
28 | #include <asm/platform.h> | ||
29 | #include <asm/page.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/irq.h> | ||
32 | |||
33 | /* | ||
34 | * Maybe the generic Peripheral block could move to a more | ||
35 | * generic include file. INTC Block will be defined here | ||
36 | * and only here to make INTC self-contained in a single | ||
37 | * file. | ||
38 | */ | ||
39 | #define INTC_BLOCK_OFFSET 0x01000000 | ||
40 | |||
41 | /* Base */ | ||
42 | #define INTC_BASE PHYS_PERIPHERAL_BLOCK + \ | ||
43 | INTC_BLOCK_OFFSET | ||
44 | |||
45 | /* Address */ | ||
46 | #define INTC_ICR_SET (intc_virt + 0x0) | ||
47 | #define INTC_ICR_CLEAR (intc_virt + 0x8) | ||
48 | #define INTC_INTPRI_0 (intc_virt + 0x10) | ||
49 | #define INTC_INTSRC_0 (intc_virt + 0x50) | ||
50 | #define INTC_INTSRC_1 (intc_virt + 0x58) | ||
51 | #define INTC_INTREQ_0 (intc_virt + 0x60) | ||
52 | #define INTC_INTREQ_1 (intc_virt + 0x68) | ||
53 | #define INTC_INTENB_0 (intc_virt + 0x70) | ||
54 | #define INTC_INTENB_1 (intc_virt + 0x78) | ||
55 | #define INTC_INTDSB_0 (intc_virt + 0x80) | ||
56 | #define INTC_INTDSB_1 (intc_virt + 0x88) | ||
57 | |||
58 | #define INTC_ICR_IRLM 0x1 | ||
59 | #define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */ | ||
60 | #define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */ | ||
61 | |||
62 | |||
63 | /* | ||
64 | * Mapper between the vector ordinal and the IRQ number | ||
65 | * passed to kernel/device drivers. | ||
66 | */ | ||
67 | int intc_evt_to_irq[(0xE20/0x20)+1] = { | ||
68 | -1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */ | ||
69 | -1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */ | ||
70 | 0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */ | ||
71 | 2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */ | ||
72 | 32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */ | ||
73 | -1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */ | ||
74 | -1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */ | ||
75 | 39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */ | ||
76 | 4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */ | ||
77 | -1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */ | ||
78 | 12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */ | ||
79 | -1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */ | ||
80 | -1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */ | ||
81 | -1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */ | ||
82 | -1, -1 /* 0xE00 - 0xE20 */ | ||
83 | }; | ||
84 | |||
85 | /* | ||
86 | * Opposite mapper. | ||
87 | */ | ||
88 | static int IRQ_to_vectorN[NR_INTC_IRQS] = { | ||
89 | 0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */ | ||
90 | -1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */ | ||
91 | 0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */ | ||
92 | -1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */ | ||
93 | 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */ | ||
94 | 0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */ | ||
95 | -1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */ | ||
96 | -1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */ | ||
97 | |||
98 | }; | ||
99 | |||
100 | static unsigned long intc_virt; | ||
101 | |||
102 | static unsigned int startup_intc_irq(unsigned int irq); | ||
103 | static void shutdown_intc_irq(unsigned int irq); | ||
104 | static void enable_intc_irq(unsigned int irq); | ||
105 | static void disable_intc_irq(unsigned int irq); | ||
106 | static void mask_and_ack_intc(unsigned int); | ||
107 | static void end_intc_irq(unsigned int irq); | ||
108 | |||
109 | static struct hw_interrupt_type intc_irq_type = { | ||
110 | "INTC", | ||
111 | startup_intc_irq, | ||
112 | shutdown_intc_irq, | ||
113 | enable_intc_irq, | ||
114 | disable_intc_irq, | ||
115 | mask_and_ack_intc, | ||
116 | end_intc_irq | ||
117 | }; | ||
118 | |||
119 | static int irlm; /* IRL mode */ | ||
120 | |||
121 | static unsigned int startup_intc_irq(unsigned int irq) | ||
122 | { | ||
123 | enable_intc_irq(irq); | ||
124 | return 0; /* never anything pending */ | ||
125 | } | ||
126 | |||
127 | static void shutdown_intc_irq(unsigned int irq) | ||
128 | { | ||
129 | disable_intc_irq(irq); | ||
130 | } | ||
131 | |||
132 | static void enable_intc_irq(unsigned int irq) | ||
133 | { | ||
134 | unsigned long reg; | ||
135 | unsigned long bitmask; | ||
136 | |||
137 | if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY)) | ||
138 | printk("Trying to use straight IRL0-3 with an encoding platform.\n"); | ||
139 | |||
140 | if (irq < 32) { | ||
141 | reg = INTC_INTENB_0; | ||
142 | bitmask = 1 << irq; | ||
143 | } else { | ||
144 | reg = INTC_INTENB_1; | ||
145 | bitmask = 1 << (irq - 32); | ||
146 | } | ||
147 | |||
148 | ctrl_outl(bitmask, reg); | ||
149 | } | ||
150 | |||
151 | static void disable_intc_irq(unsigned int irq) | ||
152 | { | ||
153 | unsigned long reg; | ||
154 | unsigned long bitmask; | ||
155 | |||
156 | if (irq < 32) { | ||
157 | reg = INTC_INTDSB_0; | ||
158 | bitmask = 1 << irq; | ||
159 | } else { | ||
160 | reg = INTC_INTDSB_1; | ||
161 | bitmask = 1 << (irq - 32); | ||
162 | } | ||
163 | |||
164 | ctrl_outl(bitmask, reg); | ||
165 | } | ||
166 | |||
167 | static void mask_and_ack_intc(unsigned int irq) | ||
168 | { | ||
169 | disable_intc_irq(irq); | ||
170 | } | ||
171 | |||
172 | static void end_intc_irq(unsigned int irq) | ||
173 | { | ||
174 | enable_intc_irq(irq); | ||
175 | } | ||
176 | |||
177 | /* For future use, if we ever support IRLM=0) */ | ||
178 | void make_intc_irq(unsigned int irq) | ||
179 | { | ||
180 | disable_irq_nosync(irq); | ||
181 | irq_desc[irq].handler = &intc_irq_type; | ||
182 | disable_intc_irq(irq); | ||
183 | } | ||
184 | |||
185 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL) | ||
186 | int intc_irq_describe(char* p, int irq) | ||
187 | { | ||
188 | if (irq < NR_INTC_IRQS) | ||
189 | return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20); | ||
190 | else | ||
191 | return 0; | ||
192 | } | ||
193 | #endif | ||
194 | |||
195 | void __init init_IRQ(void) | ||
196 | { | ||
197 | unsigned long long __dummy0, __dummy1=~0x00000000100000f0; | ||
198 | unsigned long reg; | ||
199 | unsigned long data; | ||
200 | int i; | ||
201 | |||
202 | intc_virt = onchip_remap(INTC_BASE, 1024, "INTC"); | ||
203 | if (!intc_virt) { | ||
204 | panic("Unable to remap INTC\n"); | ||
205 | } | ||
206 | |||
207 | |||
208 | /* Set default: per-line enable/disable, priority driven ack/eoi */ | ||
209 | for (i = 0; i < NR_INTC_IRQS; i++) { | ||
210 | if (platform_int_priority[i] != NO_PRIORITY) { | ||
211 | irq_desc[i].handler = &intc_irq_type; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | |||
216 | /* Disable all interrupts and set all priorities to 0 to avoid trouble */ | ||
217 | ctrl_outl(-1, INTC_INTDSB_0); | ||
218 | ctrl_outl(-1, INTC_INTDSB_1); | ||
219 | |||
220 | for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8) | ||
221 | ctrl_outl( NO_PRIORITY, reg); | ||
222 | |||
223 | |||
224 | /* Set IRLM */ | ||
225 | /* If all the priorities are set to 'no priority', then | ||
226 | * assume we are using encoded mode. | ||
227 | */ | ||
228 | irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \ | ||
229 | platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3]; | ||
230 | |||
231 | if (irlm == NO_PRIORITY) { | ||
232 | /* IRLM = 0 */ | ||
233 | reg = INTC_ICR_CLEAR; | ||
234 | i = IRQ_INTA; | ||
235 | printk("Trying to use encoded IRL0-3. IRLs unsupported.\n"); | ||
236 | } else { | ||
237 | /* IRLM = 1 */ | ||
238 | reg = INTC_ICR_SET; | ||
239 | i = IRQ_IRL0; | ||
240 | } | ||
241 | ctrl_outl(INTC_ICR_IRLM, reg); | ||
242 | |||
243 | /* Set interrupt priorities according to platform description */ | ||
244 | for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) { | ||
245 | data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4); | ||
246 | if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) { | ||
247 | /* Upon the 7th, set Priority Register */ | ||
248 | ctrl_outl(data, reg); | ||
249 | data = 0; | ||
250 | reg += 8; | ||
251 | } | ||
252 | } | ||
253 | |||
254 | #ifdef CONFIG_SH_CAYMAN | ||
255 | { | ||
256 | extern void init_cayman_irq(void); | ||
257 | |||
258 | init_cayman_irq(); | ||
259 | } | ||
260 | #endif | ||
261 | |||
262 | /* | ||
263 | * And now let interrupts come in. | ||
264 | * sti() is not enough, we need to | ||
265 | * lower priority, too. | ||
266 | */ | ||
267 | __asm__ __volatile__("getcon " __SR ", %0\n\t" | ||
268 | "and %0, %1, %0\n\t" | ||
269 | "putcon %0, " __SR "\n\t" | ||
270 | : "=&r" (__dummy0) | ||
271 | : "r" (__dummy1)); | ||
272 | } | ||
diff --git a/arch/sh64/kernel/led.c b/arch/sh64/kernel/led.c new file mode 100644 index 000000000000..cf993c4a9fdc --- /dev/null +++ b/arch/sh64/kernel/led.c | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * arch/sh64/kernel/led.c | ||
3 | * | ||
4 | * Copyright (C) 2002 Stuart Menefy <stuart.menefy@st.com> | ||
5 | * | ||
6 | * May be copied or modified under the terms of the GNU General Public | ||
7 | * License. See linux/COPYING for more information. | ||
8 | * | ||
9 | * Flash the LEDs | ||
10 | */ | ||
11 | #include <linux/config.h> | ||
12 | #include <linux/stddef.h> | ||
13 | #include <linux/sched.h> | ||
14 | |||
15 | void mach_led(int pos, int val); | ||
16 | |||
17 | /* acts like an actual heart beat -- ie thump-thump-pause... */ | ||
18 | void heartbeat(void) | ||
19 | { | ||
20 | static unsigned int cnt = 0, period = 0, dist = 0; | ||
21 | |||
22 | if (cnt == 0 || cnt == dist) { | ||
23 | mach_led(-1, 1); | ||
24 | } else if (cnt == 7 || cnt == dist + 7) { | ||
25 | mach_led(-1, 0); | ||
26 | } | ||
27 | |||
28 | if (++cnt > period) { | ||
29 | cnt = 0; | ||
30 | |||
31 | /* | ||
32 | * The hyperbolic function below modifies the heartbeat period | ||
33 | * length in dependency of the current (5min) load. It goes | ||
34 | * through the points f(0)=126, f(1)=86, f(5)=51, f(inf)->30. | ||
35 | */ | ||
36 | period = ((672 << FSHIFT) / (5 * avenrun[0] + | ||
37 | (7 << FSHIFT))) + 30; | ||
38 | dist = period / 4; | ||
39 | } | ||
40 | } | ||
41 | |||
diff --git a/arch/sh64/kernel/module.c b/arch/sh64/kernel/module.c new file mode 100644 index 000000000000..2598f6b88b44 --- /dev/null +++ b/arch/sh64/kernel/module.c | |||
@@ -0,0 +1,161 @@ | |||
1 | /* Kernel module help for sh64. | ||
2 | |||
3 | This program is free software; you can redistribute it and/or modify | ||
4 | it under the terms of the GNU General Public License as published by | ||
5 | the Free Software Foundation; either version 2 of the License, or | ||
6 | (at your option) any later version. | ||
7 | |||
8 | This program is distributed in the hope that it will be useful, | ||
9 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | GNU General Public License for more details. | ||
12 | |||
13 | You should have received a copy of the GNU General Public License | ||
14 | along with this program; if not, write to the Free Software | ||
15 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | |||
17 | Copyright 2004 SuperH (UK) Ltd | ||
18 | Author: Richard Curnow | ||
19 | |||
20 | Based on the sh version, and on code from the sh64-specific parts of | ||
21 | modutils, originally written by Richard Curnow and Ben Gaster. | ||
22 | |||
23 | */ | ||
24 | #include <linux/moduleloader.h> | ||
25 | #include <linux/elf.h> | ||
26 | #include <linux/vmalloc.h> | ||
27 | #include <linux/fs.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/kernel.h> | ||
30 | |||
31 | #if 0 | ||
32 | #define DEBUGP printk | ||
33 | #else | ||
34 | #define DEBUGP(fmt...) | ||
35 | #endif | ||
36 | |||
37 | void *module_alloc(unsigned long size) | ||
38 | { | ||
39 | if (size == 0) | ||
40 | return NULL; | ||
41 | return vmalloc(size); | ||
42 | } | ||
43 | |||
44 | |||
45 | /* Free memory returned from module_alloc */ | ||
46 | void module_free(struct module *mod, void *module_region) | ||
47 | { | ||
48 | vfree(module_region); | ||
49 | /* FIXME: If module_region == mod->init_region, trim exception | ||
50 | table entries. */ | ||
51 | } | ||
52 | |||
53 | /* We don't need anything special. */ | ||
54 | int module_frob_arch_sections(Elf_Ehdr *hdr, | ||
55 | Elf_Shdr *sechdrs, | ||
56 | char *secstrings, | ||
57 | struct module *mod) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | int apply_relocate_add(Elf32_Shdr *sechdrs, | ||
63 | const char *strtab, | ||
64 | unsigned int symindex, | ||
65 | unsigned int relsec, | ||
66 | struct module *me) | ||
67 | { | ||
68 | unsigned int i; | ||
69 | Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr; | ||
70 | Elf32_Sym *sym; | ||
71 | Elf32_Addr relocation; | ||
72 | uint32_t *location; | ||
73 | int align; | ||
74 | int is_shmedia; | ||
75 | |||
76 | DEBUGP("Applying relocate section %u to %u\n", relsec, | ||
77 | sechdrs[relsec].sh_info); | ||
78 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
79 | /* This is where to make the change */ | ||
80 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
81 | + rel[i].r_offset; | ||
82 | /* This is the symbol it is referring to. Note that all | ||
83 | undefined symbols have been resolved. */ | ||
84 | sym = (Elf32_Sym *)sechdrs[symindex].sh_addr | ||
85 | + ELF32_R_SYM(rel[i].r_info); | ||
86 | relocation = sym->st_value + rel[i].r_addend; | ||
87 | align = (int)location & 3; | ||
88 | |||
89 | /* For text addresses, bit2 of the st_other field indicates | ||
90 | * whether the symbol is SHmedia (1) or SHcompact (0). If | ||
91 | * SHmedia, the LSB of the symbol needs to be asserted | ||
92 | * for the CPU to be in SHmedia mode when it starts executing | ||
93 | * the branch target. */ | ||
94 | is_shmedia = (sym->st_other & 4) ? 1 : 0; | ||
95 | if (is_shmedia) { | ||
96 | relocation |= 1; | ||
97 | } | ||
98 | |||
99 | switch (ELF32_R_TYPE(rel[i].r_info)) { | ||
100 | case R_SH_DIR32: | ||
101 | DEBUGP("R_SH_DIR32 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation); | ||
102 | *location += relocation; | ||
103 | break; | ||
104 | case R_SH_REL32: | ||
105 | DEBUGP("R_SH_REL32 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation); | ||
106 | relocation -= (Elf32_Addr) location; | ||
107 | *location += relocation; | ||
108 | break; | ||
109 | case R_SH_IMM_LOW16: | ||
110 | DEBUGP("R_SH_IMM_LOW16 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation); | ||
111 | *location = (*location & ~0x3fffc00) | | ||
112 | ((relocation & 0xffff) << 10); | ||
113 | break; | ||
114 | case R_SH_IMM_MEDLOW16: | ||
115 | DEBUGP("R_SH_IMM_MEDLOW16 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation); | ||
116 | *location = (*location & ~0x3fffc00) | | ||
117 | (((relocation >> 16) & 0xffff) << 10); | ||
118 | break; | ||
119 | case R_SH_IMM_LOW16_PCREL: | ||
120 | DEBUGP("R_SH_IMM_LOW16_PCREL @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation); | ||
121 | relocation -= (Elf32_Addr) location; | ||
122 | *location = (*location & ~0x3fffc00) | | ||
123 | ((relocation & 0xffff) << 10); | ||
124 | break; | ||
125 | case R_SH_IMM_MEDLOW16_PCREL: | ||
126 | DEBUGP("R_SH_IMM_MEDLOW16_PCREL @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation); | ||
127 | relocation -= (Elf32_Addr) location; | ||
128 | *location = (*location & ~0x3fffc00) | | ||
129 | (((relocation >> 16) & 0xffff) << 10); | ||
130 | break; | ||
131 | default: | ||
132 | printk(KERN_ERR "module %s: Unknown relocation: %u\n", | ||
133 | me->name, ELF32_R_TYPE(rel[i].r_info)); | ||
134 | return -ENOEXEC; | ||
135 | } | ||
136 | } | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | int apply_relocate(Elf32_Shdr *sechdrs, | ||
141 | const char *strtab, | ||
142 | unsigned int symindex, | ||
143 | unsigned int relsec, | ||
144 | struct module *me) | ||
145 | { | ||
146 | printk(KERN_ERR "module %s: REL RELOCATION unsupported\n", | ||
147 | me->name); | ||
148 | return -ENOEXEC; | ||
149 | } | ||
150 | |||
151 | int module_finalize(const Elf_Ehdr *hdr, | ||
152 | const Elf_Shdr *sechdrs, | ||
153 | struct module *me) | ||
154 | { | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | void module_arch_cleanup(struct module *mod) | ||
159 | { | ||
160 | } | ||
161 | |||
diff --git a/arch/sh64/kernel/pci-dma.c b/arch/sh64/kernel/pci-dma.c new file mode 100644 index 000000000000..a36c3d71a3fe --- /dev/null +++ b/arch/sh64/kernel/pci-dma.c | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) | ||
3 | * Copyright (C) 2003 Paul Mundt (lethal@linux-sh.org) | ||
4 | * | ||
5 | * May be copied or modified under the terms of the GNU General Public | ||
6 | * License. See linux/COPYING for more information. | ||
7 | * | ||
8 | * Dynamic DMA mapping support. | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <asm/io.h> | ||
15 | |||
16 | void *consistent_alloc(struct pci_dev *hwdev, size_t size, | ||
17 | dma_addr_t *dma_handle) | ||
18 | { | ||
19 | void *ret; | ||
20 | int gfp = GFP_ATOMIC; | ||
21 | void *vp; | ||
22 | |||
23 | if (hwdev == NULL || hwdev->dma_mask != 0xffffffff) | ||
24 | gfp |= GFP_DMA; | ||
25 | |||
26 | ret = (void *)__get_free_pages(gfp, get_order(size)); | ||
27 | |||
28 | /* now call our friend ioremap_nocache to give us an uncached area */ | ||
29 | vp = ioremap_nocache(virt_to_phys(ret), size); | ||
30 | |||
31 | if (vp != NULL) { | ||
32 | memset(vp, 0, size); | ||
33 | *dma_handle = virt_to_bus(ret); | ||
34 | dma_cache_wback_inv((unsigned long)ret, size); | ||
35 | } | ||
36 | |||
37 | return vp; | ||
38 | } | ||
39 | |||
40 | void consistent_free(struct pci_dev *hwdev, size_t size, | ||
41 | void *vaddr, dma_addr_t dma_handle) | ||
42 | { | ||
43 | void *alloc; | ||
44 | |||
45 | alloc = bus_to_virt((unsigned long)dma_handle); | ||
46 | free_pages((unsigned long)alloc, get_order(size)); | ||
47 | |||
48 | iounmap(vaddr); | ||
49 | } | ||
50 | |||
diff --git a/arch/sh64/kernel/pci_sh5.c b/arch/sh64/kernel/pci_sh5.c new file mode 100644 index 000000000000..6197879e8578 --- /dev/null +++ b/arch/sh64/kernel/pci_sh5.c | |||
@@ -0,0 +1,541 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) | ||
3 | * Copyright (C) 2003, 2004 Paul Mundt | ||
4 | * Copyright (C) 2004 Richard Curnow | ||
5 | * | ||
6 | * May be copied or modified under the terms of the GNU General Public | ||
7 | * License. See linux/COPYING for more information. | ||
8 | * | ||
9 | * Support functions for the SH5 PCI hardware. | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/rwsem.h> | ||
15 | #include <linux/smp.h> | ||
16 | #include <linux/smp_lock.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/pci.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <asm/pci.h> | ||
24 | #include <linux/irq.h> | ||
25 | |||
26 | #include <asm/io.h> | ||
27 | #include <asm/hardware.h> | ||
28 | #include "pci_sh5.h" | ||
29 | |||
30 | static unsigned long pcicr_virt; | ||
31 | unsigned long pciio_virt; | ||
32 | |||
33 | static void __init pci_fixup_ide_bases(struct pci_dev *d) | ||
34 | { | ||
35 | int i; | ||
36 | |||
37 | /* | ||
38 | * PCI IDE controllers use non-standard I/O port decoding, respect it. | ||
39 | */ | ||
40 | if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE) | ||
41 | return; | ||
42 | printk("PCI: IDE base address fixup for %s\n", pci_name(d)); | ||
43 | for(i=0; i<4; i++) { | ||
44 | struct resource *r = &d->resource[i]; | ||
45 | if ((r->start & ~0x80) == 0x374) { | ||
46 | r->start |= 2; | ||
47 | r->end = r->start; | ||
48 | } | ||
49 | } | ||
50 | } | ||
51 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); | ||
52 | |||
53 | char * __init pcibios_setup(char *str) | ||
54 | { | ||
55 | return str; | ||
56 | } | ||
57 | |||
58 | /* Rounds a number UP to the nearest power of two. Used for | ||
59 | * sizing the PCI window. | ||
60 | */ | ||
61 | static u32 __init r2p2(u32 num) | ||
62 | { | ||
63 | int i = 31; | ||
64 | u32 tmp = num; | ||
65 | |||
66 | if (num == 0) | ||
67 | return 0; | ||
68 | |||
69 | do { | ||
70 | if (tmp & (1 << 31)) | ||
71 | break; | ||
72 | i--; | ||
73 | tmp <<= 1; | ||
74 | } while (i >= 0); | ||
75 | |||
76 | tmp = 1 << i; | ||
77 | /* If the original number isn't a power of 2, round it up */ | ||
78 | if (tmp != num) | ||
79 | tmp <<= 1; | ||
80 | |||
81 | return tmp; | ||
82 | } | ||
83 | |||
84 | extern unsigned long long memory_start, memory_end; | ||
85 | |||
86 | int __init sh5pci_init(unsigned memStart, unsigned memSize) | ||
87 | { | ||
88 | u32 lsr0; | ||
89 | u32 uval; | ||
90 | |||
91 | pcicr_virt = onchip_remap(SH5PCI_ICR_BASE, 1024, "PCICR"); | ||
92 | if (!pcicr_virt) { | ||
93 | panic("Unable to remap PCICR\n"); | ||
94 | } | ||
95 | |||
96 | pciio_virt = onchip_remap(SH5PCI_IO_BASE, 0x10000, "PCIIO"); | ||
97 | if (!pciio_virt) { | ||
98 | panic("Unable to remap PCIIO\n"); | ||
99 | } | ||
100 | |||
101 | pr_debug("Register base addres is 0x%08lx\n", pcicr_virt); | ||
102 | |||
103 | /* Clear snoop registers */ | ||
104 | SH5PCI_WRITE(CSCR0, 0); | ||
105 | SH5PCI_WRITE(CSCR1, 0); | ||
106 | |||
107 | pr_debug("Wrote to reg\n"); | ||
108 | |||
109 | /* Switch off interrupts */ | ||
110 | SH5PCI_WRITE(INTM, 0); | ||
111 | SH5PCI_WRITE(AINTM, 0); | ||
112 | SH5PCI_WRITE(PINTM, 0); | ||
113 | |||
114 | /* Set bus active, take it out of reset */ | ||
115 | uval = SH5PCI_READ(CR); | ||
116 | |||
117 | /* Set command Register */ | ||
118 | SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE | CR_PFCS | CR_BMAM); | ||
119 | |||
120 | uval=SH5PCI_READ(CR); | ||
121 | pr_debug("CR is actually 0x%08x\n",uval); | ||
122 | |||
123 | /* Allow it to be a master */ | ||
124 | /* NB - WE DISABLE I/O ACCESS to stop overlap */ | ||
125 | /* set WAIT bit to enable stepping, an attempt to improve stability */ | ||
126 | SH5PCI_WRITE_SHORT(CSR_CMD, | ||
127 | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_WAIT); | ||
128 | |||
129 | /* | ||
130 | ** Set translation mapping memory in order to convert the address | ||
131 | ** used for the main bus, to the PCI internal address. | ||
132 | */ | ||
133 | SH5PCI_WRITE(MBR,0x40000000); | ||
134 | |||
135 | /* Always set the max size 512M */ | ||
136 | SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024)); | ||
137 | |||
138 | /* | ||
139 | ** I/O addresses are mapped at internal PCI specific address | ||
140 | ** as is described into the configuration bridge table. | ||
141 | ** These are changed to 0, to allow cards that have legacy | ||
142 | ** io such as vga to function correctly. We set the SH5 IOBAR to | ||
143 | ** 256K, which is a bit big as we can only have 64K of address space | ||
144 | */ | ||
145 | |||
146 | SH5PCI_WRITE(IOBR,0x0); | ||
147 | |||
148 | pr_debug("PCI:Writing 0x%08x to IOBR\n",0); | ||
149 | |||
150 | /* Set up a 256K window. Totally pointless waste of address space */ | ||
151 | SH5PCI_WRITE(IOBMR,0); | ||
152 | pr_debug("PCI:Writing 0x%08x to IOBMR\n",0); | ||
153 | |||
154 | /* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec. Ideally, | ||
155 | * we would want to map the I/O region somewhere, but it is so big this is not | ||
156 | * that easy! | ||
157 | */ | ||
158 | SH5PCI_WRITE(CSR_IBAR0,~0); | ||
159 | /* Set memory size value */ | ||
160 | memSize = memory_end - memory_start; | ||
161 | |||
162 | /* Now we set up the mbars so the PCI bus can see the memory of the machine */ | ||
163 | if (memSize < (1024 * 1024)) { | ||
164 | printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%x?\n", memSize); | ||
165 | return -EINVAL; | ||
166 | } | ||
167 | |||
168 | /* Set LSR 0 */ | ||
169 | lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 : ((r2p2(memSize) - 0x100000) | 0x1); | ||
170 | SH5PCI_WRITE(LSR0, lsr0); | ||
171 | |||
172 | pr_debug("PCI:Writing 0x%08x to LSR0\n",lsr0); | ||
173 | |||
174 | /* Set MBAR 0 */ | ||
175 | SH5PCI_WRITE(CSR_MBAR0, memory_start); | ||
176 | SH5PCI_WRITE(LAR0, memory_start); | ||
177 | |||
178 | SH5PCI_WRITE(CSR_MBAR1,0); | ||
179 | SH5PCI_WRITE(LAR1,0); | ||
180 | SH5PCI_WRITE(LSR1,0); | ||
181 | |||
182 | pr_debug("PCI:Writing 0x%08llx to CSR_MBAR0\n",memory_start); | ||
183 | pr_debug("PCI:Writing 0x%08llx to LAR0\n",memory_start); | ||
184 | |||
185 | /* Enable the PCI interrupts on the device */ | ||
186 | SH5PCI_WRITE(INTM, ~0); | ||
187 | SH5PCI_WRITE(AINTM, ~0); | ||
188 | SH5PCI_WRITE(PINTM, ~0); | ||
189 | |||
190 | pr_debug("Switching on all error interrupts\n"); | ||
191 | |||
192 | return(0); | ||
193 | } | ||
194 | |||
195 | static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where, | ||
196 | int size, u32 *val) | ||
197 | { | ||
198 | SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where)); | ||
199 | |||
200 | switch (size) { | ||
201 | case 1: | ||
202 | *val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3)); | ||
203 | break; | ||
204 | case 2: | ||
205 | *val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2)); | ||
206 | break; | ||
207 | case 4: | ||
208 | *val = SH5PCI_READ(PDR); | ||
209 | break; | ||
210 | } | ||
211 | |||
212 | return PCIBIOS_SUCCESSFUL; | ||
213 | } | ||
214 | |||
215 | static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where, | ||
216 | int size, u32 val) | ||
217 | { | ||
218 | SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where)); | ||
219 | |||
220 | switch (size) { | ||
221 | case 1: | ||
222 | SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val); | ||
223 | break; | ||
224 | case 2: | ||
225 | SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val); | ||
226 | break; | ||
227 | case 4: | ||
228 | SH5PCI_WRITE(PDR, val); | ||
229 | break; | ||
230 | } | ||
231 | |||
232 | return PCIBIOS_SUCCESSFUL; | ||
233 | } | ||
234 | |||
235 | static struct pci_ops pci_config_ops = { | ||
236 | .read = sh5pci_read, | ||
237 | .write = sh5pci_write, | ||
238 | }; | ||
239 | |||
240 | /* Everything hangs off this */ | ||
241 | static struct pci_bus *pci_root_bus; | ||
242 | |||
243 | |||
244 | static u8 __init no_swizzle(struct pci_dev *dev, u8 * pin) | ||
245 | { | ||
246 | pr_debug("swizzle for dev %d on bus %d slot %d pin is %d\n", | ||
247 | dev->devfn,dev->bus->number, PCI_SLOT(dev->devfn),*pin); | ||
248 | return PCI_SLOT(dev->devfn); | ||
249 | } | ||
250 | |||
251 | static inline u8 bridge_swizzle(u8 pin, u8 slot) | ||
252 | { | ||
253 | return (((pin-1) + slot) % 4) + 1; | ||
254 | } | ||
255 | |||
256 | u8 __init common_swizzle(struct pci_dev *dev, u8 *pinp) | ||
257 | { | ||
258 | if (dev->bus->number != 0) { | ||
259 | u8 pin = *pinp; | ||
260 | do { | ||
261 | pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); | ||
262 | /* Move up the chain of bridges. */ | ||
263 | dev = dev->bus->self; | ||
264 | } while (dev->bus->self); | ||
265 | *pinp = pin; | ||
266 | |||
267 | /* The slot is the slot of the last bridge. */ | ||
268 | } | ||
269 | |||
270 | return PCI_SLOT(dev->devfn); | ||
271 | } | ||
272 | |||
273 | /* This needs to be shunted out of here into the board specific bit */ | ||
274 | |||
275 | static int __init map_cayman_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
276 | { | ||
277 | int result = -1; | ||
278 | |||
279 | /* The complication here is that the PCI IRQ lines from the Cayman's 2 | ||
280 | 5V slots get into the CPU via a different path from the IRQ lines | ||
281 | from the 3 3.3V slots. Thus, we have to detect whether the card's | ||
282 | interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling' | ||
283 | at the point where we cross from 5V to 3.3V is not the normal case. | ||
284 | |||
285 | The added complication is that we don't know that the 5V slots are | ||
286 | always bus 2, because a card containing a PCI-PCI bridge may be | ||
287 | plugged into a 3.3V slot, and this changes the bus numbering. | ||
288 | |||
289 | Also, the Cayman has an intermediate PCI bus that goes a custom | ||
290 | expansion board header (and to the secondary bridge). This bus has | ||
291 | never been used in practice. | ||
292 | |||
293 | The 1ary onboard PCI-PCI bridge is device 3 on bus 0 | ||
294 | The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of the 1ary bridge. | ||
295 | */ | ||
296 | |||
297 | struct slot_pin { | ||
298 | int slot; | ||
299 | int pin; | ||
300 | } path[4]; | ||
301 | int i=0; | ||
302 | |||
303 | while (dev->bus->number > 0) { | ||
304 | |||
305 | slot = path[i].slot = PCI_SLOT(dev->devfn); | ||
306 | pin = path[i].pin = bridge_swizzle(pin, slot); | ||
307 | dev = dev->bus->self; | ||
308 | i++; | ||
309 | if (i > 3) panic("PCI path to root bus too long!\n"); | ||
310 | } | ||
311 | |||
312 | slot = PCI_SLOT(dev->devfn); | ||
313 | /* This is the slot on bus 0 through which the device is eventually | ||
314 | reachable. */ | ||
315 | |||
316 | /* Now work back up. */ | ||
317 | if ((slot < 3) || (i == 0)) { | ||
318 | /* Bus 0 (incl. PCI-PCI bridge itself) : perform the final | ||
319 | swizzle now. */ | ||
320 | result = IRQ_INTA + bridge_swizzle(pin, slot) - 1; | ||
321 | } else { | ||
322 | i--; | ||
323 | slot = path[i].slot; | ||
324 | pin = path[i].pin; | ||
325 | if (slot > 0) { | ||
326 | panic("PCI expansion bus device found - not handled!\n"); | ||
327 | } else { | ||
328 | if (i > 0) { | ||
329 | /* 5V slots */ | ||
330 | i--; | ||
331 | slot = path[i].slot; | ||
332 | pin = path[i].pin; | ||
333 | /* 'pin' was swizzled earlier wrt slot, don't do it again. */ | ||
334 | result = IRQ_P2INTA + (pin - 1); | ||
335 | } else { | ||
336 | /* IRQ for 2ary PCI-PCI bridge : unused */ | ||
337 | result = -1; | ||
338 | } | ||
339 | } | ||
340 | } | ||
341 | |||
342 | return result; | ||
343 | } | ||
344 | |||
345 | irqreturn_t pcish5_err_irq(int irq, void *dev_id, struct pt_regs *regs) | ||
346 | { | ||
347 | unsigned pci_int, pci_air, pci_cir, pci_aint; | ||
348 | |||
349 | pci_int = SH5PCI_READ(INT); | ||
350 | pci_cir = SH5PCI_READ(CIR); | ||
351 | pci_air = SH5PCI_READ(AIR); | ||
352 | |||
353 | if (pci_int) { | ||
354 | printk("PCI INTERRUPT (at %08llx)!\n", regs->pc); | ||
355 | printk("PCI INT -> 0x%x\n", pci_int & 0xffff); | ||
356 | printk("PCI AIR -> 0x%x\n", pci_air); | ||
357 | printk("PCI CIR -> 0x%x\n", pci_cir); | ||
358 | SH5PCI_WRITE(INT, ~0); | ||
359 | } | ||
360 | |||
361 | pci_aint = SH5PCI_READ(AINT); | ||
362 | if (pci_aint) { | ||
363 | printk("PCI ARB INTERRUPT!\n"); | ||
364 | printk("PCI AINT -> 0x%x\n", pci_aint); | ||
365 | printk("PCI AIR -> 0x%x\n", pci_air); | ||
366 | printk("PCI CIR -> 0x%x\n", pci_cir); | ||
367 | SH5PCI_WRITE(AINT, ~0); | ||
368 | } | ||
369 | |||
370 | return IRQ_HANDLED; | ||
371 | } | ||
372 | |||
373 | irqreturn_t pcish5_serr_irq(int irq, void *dev_id, struct pt_regs *regs) | ||
374 | { | ||
375 | printk("SERR IRQ\n"); | ||
376 | |||
377 | return IRQ_NONE; | ||
378 | } | ||
379 | |||
380 | #define ROUND_UP(x, a) (((x) + (a) - 1) & ~((a) - 1)) | ||
381 | |||
382 | static void __init | ||
383 | pcibios_size_bridge(struct pci_bus *bus, struct resource *ior, | ||
384 | struct resource *memr) | ||
385 | { | ||
386 | struct resource io_res, mem_res; | ||
387 | struct pci_dev *dev; | ||
388 | struct pci_dev *bridge = bus->self; | ||
389 | struct list_head *ln; | ||
390 | |||
391 | if (!bridge) | ||
392 | return; /* host bridge, nothing to do */ | ||
393 | |||
394 | /* set reasonable default locations for pcibios_align_resource */ | ||
395 | io_res.start = PCIBIOS_MIN_IO; | ||
396 | mem_res.start = PCIBIOS_MIN_MEM; | ||
397 | |||
398 | io_res.end = io_res.start; | ||
399 | mem_res.end = mem_res.start; | ||
400 | |||
401 | /* Collect information about how our direct children are layed out. */ | ||
402 | for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) { | ||
403 | int i; | ||
404 | dev = pci_dev_b(ln); | ||
405 | |||
406 | /* Skip bridges for now */ | ||
407 | if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI) | ||
408 | continue; | ||
409 | |||
410 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
411 | struct resource res; | ||
412 | unsigned long size; | ||
413 | |||
414 | memcpy(&res, &dev->resource[i], sizeof(res)); | ||
415 | size = res.end - res.start + 1; | ||
416 | |||
417 | if (res.flags & IORESOURCE_IO) { | ||
418 | res.start = io_res.end; | ||
419 | pcibios_align_resource(dev, &res, size, 0); | ||
420 | io_res.end = res.start + size; | ||
421 | } else if (res.flags & IORESOURCE_MEM) { | ||
422 | res.start = mem_res.end; | ||
423 | pcibios_align_resource(dev, &res, size, 0); | ||
424 | mem_res.end = res.start + size; | ||
425 | } | ||
426 | } | ||
427 | } | ||
428 | |||
429 | /* And for all of the subordinate busses. */ | ||
430 | for (ln=bus->children.next; ln != &bus->children; ln=ln->next) | ||
431 | pcibios_size_bridge(pci_bus_b(ln), &io_res, &mem_res); | ||
432 | |||
433 | /* turn the ending locations into sizes (subtract start) */ | ||
434 | io_res.end -= io_res.start; | ||
435 | mem_res.end -= mem_res.start; | ||
436 | |||
437 | /* Align the sizes up by bridge rules */ | ||
438 | io_res.end = ROUND_UP(io_res.end, 4*1024) - 1; | ||
439 | mem_res.end = ROUND_UP(mem_res.end, 1*1024*1024) - 1; | ||
440 | |||
441 | /* Adjust the bridge's allocation requirements */ | ||
442 | bridge->resource[0].end = bridge->resource[0].start + io_res.end; | ||
443 | bridge->resource[1].end = bridge->resource[1].start + mem_res.end; | ||
444 | |||
445 | bridge->resource[PCI_BRIDGE_RESOURCES].end = | ||
446 | bridge->resource[PCI_BRIDGE_RESOURCES].start + io_res.end; | ||
447 | bridge->resource[PCI_BRIDGE_RESOURCES+1].end = | ||
448 | bridge->resource[PCI_BRIDGE_RESOURCES+1].start + mem_res.end; | ||
449 | |||
450 | /* adjust parent's resource requirements */ | ||
451 | if (ior) { | ||
452 | ior->end = ROUND_UP(ior->end, 4*1024); | ||
453 | ior->end += io_res.end; | ||
454 | } | ||
455 | |||
456 | if (memr) { | ||
457 | memr->end = ROUND_UP(memr->end, 1*1024*1024); | ||
458 | memr->end += mem_res.end; | ||
459 | } | ||
460 | } | ||
461 | |||
462 | #undef ROUND_UP | ||
463 | |||
464 | static void __init pcibios_size_bridges(void) | ||
465 | { | ||
466 | struct resource io_res, mem_res; | ||
467 | |||
468 | memset(&io_res, 0, sizeof(io_res)); | ||
469 | memset(&mem_res, 0, sizeof(mem_res)); | ||
470 | |||
471 | pcibios_size_bridge(pci_root_bus, &io_res, &mem_res); | ||
472 | } | ||
473 | |||
474 | static int __init pcibios_init(void) | ||
475 | { | ||
476 | if (request_irq(IRQ_ERR, pcish5_err_irq, | ||
477 | SA_INTERRUPT, "PCI Error",NULL) < 0) { | ||
478 | printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n"); | ||
479 | return -EINVAL; | ||
480 | } | ||
481 | |||
482 | if (request_irq(IRQ_SERR, pcish5_serr_irq, | ||
483 | SA_INTERRUPT, "PCI SERR interrupt", NULL) < 0) { | ||
484 | printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n"); | ||
485 | return -EINVAL; | ||
486 | } | ||
487 | |||
488 | /* The pci subsytem needs to know where memory is and how much | ||
489 | * of it there is. I've simply made these globals. A better mechanism | ||
490 | * is probably needed. | ||
491 | */ | ||
492 | sh5pci_init(__pa(memory_start), | ||
493 | __pa(memory_end) - __pa(memory_start)); | ||
494 | |||
495 | pci_root_bus = pci_scan_bus(0, &pci_config_ops, NULL); | ||
496 | pcibios_size_bridges(); | ||
497 | pci_assign_unassigned_resources(); | ||
498 | pci_fixup_irqs(no_swizzle, map_cayman_irq); | ||
499 | |||
500 | return 0; | ||
501 | } | ||
502 | |||
503 | subsys_initcall(pcibios_init); | ||
504 | |||
505 | void __init pcibios_fixup_bus(struct pci_bus *bus) | ||
506 | { | ||
507 | struct pci_dev *dev = bus->self; | ||
508 | int i; | ||
509 | |||
510 | #if 1 | ||
511 | if(dev) { | ||
512 | for(i=0; i<3; i++) { | ||
513 | bus->resource[i] = | ||
514 | &dev->resource[PCI_BRIDGE_RESOURCES+i]; | ||
515 | bus->resource[i]->name = bus->name; | ||
516 | } | ||
517 | bus->resource[0]->flags |= IORESOURCE_IO; | ||
518 | bus->resource[1]->flags |= IORESOURCE_MEM; | ||
519 | |||
520 | /* For now, propagate host limits to the bus; | ||
521 | * we'll adjust them later. */ | ||
522 | |||
523 | #if 1 | ||
524 | bus->resource[0]->end = 64*1024 - 1 ; | ||
525 | bus->resource[1]->end = PCIBIOS_MIN_MEM+(256*1024*1024)-1; | ||
526 | bus->resource[0]->start = PCIBIOS_MIN_IO; | ||
527 | bus->resource[1]->start = PCIBIOS_MIN_MEM; | ||
528 | #else | ||
529 | bus->resource[0]->end = 0 | ||
530 | bus->resource[1]->end = 0 | ||
531 | bus->resource[0]->start =0 | ||
532 | bus->resource[1]->start = 0; | ||
533 | #endif | ||
534 | /* Turn off downstream PF memory address range by default */ | ||
535 | bus->resource[2]->start = 1024*1024; | ||
536 | bus->resource[2]->end = bus->resource[2]->start - 1; | ||
537 | } | ||
538 | #endif | ||
539 | |||
540 | } | ||
541 | |||
diff --git a/arch/sh64/kernel/pci_sh5.h b/arch/sh64/kernel/pci_sh5.h new file mode 100644 index 000000000000..8f21f5d2aa3e --- /dev/null +++ b/arch/sh64/kernel/pci_sh5.h | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) | ||
3 | * | ||
4 | * May be copied or modified under the terms of the GNU General Public | ||
5 | * License. See linux/COPYING for more information. | ||
6 | * | ||
7 | * Defintions for the SH5 PCI hardware. | ||
8 | */ | ||
9 | |||
10 | /* Product ID */ | ||
11 | #define PCISH5_PID 0x350d | ||
12 | |||
13 | /* vendor ID */ | ||
14 | #define PCISH5_VID 0x1054 | ||
15 | |||
16 | /* Configuration types */ | ||
17 | #define ST_TYPE0 0x00 /* Configuration cycle type 0 */ | ||
18 | #define ST_TYPE1 0x01 /* Configuration cycle type 1 */ | ||
19 | |||
20 | /* VCR data */ | ||
21 | #define PCISH5_VCR_STATUS 0x00 | ||
22 | #define PCISH5_VCR_VERSION 0x08 | ||
23 | |||
24 | /* | ||
25 | ** ICR register offsets and bits | ||
26 | */ | ||
27 | #define PCISH5_ICR_CR 0x100 /* PCI control register values */ | ||
28 | #define CR_PBAM (1<<12) | ||
29 | #define CR_PFCS (1<<11) | ||
30 | #define CR_FTO (1<<10) | ||
31 | #define CR_PFE (1<<9) | ||
32 | #define CR_TBS (1<<8) | ||
33 | #define CR_SPUE (1<<7) | ||
34 | #define CR_BMAM (1<<6) | ||
35 | #define CR_HOST (1<<5) | ||
36 | #define CR_CLKEN (1<<4) | ||
37 | #define CR_SOCS (1<<3) | ||
38 | #define CR_IOCS (1<<2) | ||
39 | #define CR_RSTCTL (1<<1) | ||
40 | #define CR_CFINT (1<<0) | ||
41 | #define CR_LOCK_MASK 0xa5000000 | ||
42 | |||
43 | #define PCISH5_ICR_INT 0x114 /* Interrupt registert values */ | ||
44 | #define INT_MADIM (1<<2) | ||
45 | |||
46 | #define PCISH5_ICR_LSR0 0X104 /* Local space register values */ | ||
47 | #define PCISH5_ICR_LSR1 0X108 /* Local space register values */ | ||
48 | #define PCISH5_ICR_LAR0 0x10c /* Local address register values */ | ||
49 | #define PCISH5_ICR_LAR1 0x110 /* Local address register values */ | ||
50 | #define PCISH5_ICR_INTM 0x118 /* Interrupt mask register values */ | ||
51 | #define PCISH5_ICR_AIR 0x11c /* Interrupt error address information register values */ | ||
52 | #define PCISH5_ICR_CIR 0x120 /* Interrupt error command information register values */ | ||
53 | #define PCISH5_ICR_AINT 0x130 /* Interrupt error arbiter interrupt register values */ | ||
54 | #define PCISH5_ICR_AINTM 0x134 /* Interrupt error arbiter interrupt mask register values */ | ||
55 | #define PCISH5_ICR_BMIR 0x138 /* Interrupt error info register of bus master values */ | ||
56 | #define PCISH5_ICR_PAR 0x1c0 /* Pio address register values */ | ||
57 | #define PCISH5_ICR_MBR 0x1c4 /* Memory space bank register values */ | ||
58 | #define PCISH5_ICR_IOBR 0x1c8 /* I/O space bank register values */ | ||
59 | #define PCISH5_ICR_PINT 0x1cc /* power management interrupt register values */ | ||
60 | #define PCISH5_ICR_PINTM 0x1d0 /* power management interrupt mask register values */ | ||
61 | #define PCISH5_ICR_MBMR 0x1d8 /* memory space bank mask register values */ | ||
62 | #define PCISH5_ICR_IOBMR 0x1dc /* I/O space bank mask register values */ | ||
63 | #define PCISH5_ICR_CSCR0 0x210 /* PCI cache snoop control register 0 */ | ||
64 | #define PCISH5_ICR_CSCR1 0x214 /* PCI cache snoop control register 1 */ | ||
65 | #define PCISH5_ICR_PDR 0x220 /* Pio data register values */ | ||
66 | |||
67 | /* These are configs space registers */ | ||
68 | #define PCISH5_ICR_CSR_VID 0x000 /* Vendor id */ | ||
69 | #define PCISH5_ICR_CSR_DID 0x002 /* Device id */ | ||
70 | #define PCISH5_ICR_CSR_CMD 0x004 /* Command register */ | ||
71 | #define PCISH5_ICR_CSR_STATUS 0x006 /* Stautus */ | ||
72 | #define PCISH5_ICR_CSR_IBAR0 0x010 /* I/O base address register */ | ||
73 | #define PCISH5_ICR_CSR_MBAR0 0x014 /* First Memory base address register */ | ||
74 | #define PCISH5_ICR_CSR_MBAR1 0x018 /* Second Memory base address register */ | ||
75 | |||
76 | |||
77 | |||
78 | /* Base address of registers */ | ||
79 | #define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000) | ||
80 | #define SH5PCI_IO_BASE (PHYS_PCI_BLOCK + 0x00800000) | ||
81 | /* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG) */ | ||
82 | |||
83 | /* Register selection macro */ | ||
84 | #define PCISH5_ICR_REG(x) ( pcicr_virt + (PCISH5_ICR_##x)) | ||
85 | /* #define PCISH5_VCR_REG(x) ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */ | ||
86 | |||
87 | /* Write I/O functions */ | ||
88 | #define SH5PCI_WRITE(reg,val) ctrl_outl((u32)(val),PCISH5_ICR_REG(reg)) | ||
89 | #define SH5PCI_WRITE_SHORT(reg,val) ctrl_outw((u16)(val),PCISH5_ICR_REG(reg)) | ||
90 | #define SH5PCI_WRITE_BYTE(reg,val) ctrl_outb((u8)(val),PCISH5_ICR_REG(reg)) | ||
91 | |||
92 | /* Read I/O functions */ | ||
93 | #define SH5PCI_READ(reg) ctrl_inl(PCISH5_ICR_REG(reg)) | ||
94 | #define SH5PCI_READ_SHORT(reg) ctrl_inw(PCISH5_ICR_REG(reg)) | ||
95 | #define SH5PCI_READ_BYTE(reg) ctrl_inb(PCISH5_ICR_REG(reg)) | ||
96 | |||
97 | /* Set PCI config bits */ | ||
98 | #define SET_CONFIG_BITS(bus,devfn,where) ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000) | ||
99 | |||
100 | /* Set PCI command register */ | ||
101 | #define CONFIG_CMD(bus, devfn, where) SET_CONFIG_BITS(bus->number,devfn,where) | ||
102 | |||
103 | /* Size converters */ | ||
104 | #define PCISH5_MEM_SIZCONV(x) (((x / 0x40000) - 1) << 18) | ||
105 | #define PCISH5_IO_SIZCONV(x) (((x / 0x40000) - 1) << 18) | ||
106 | |||
107 | |||
diff --git a/arch/sh64/kernel/pcibios.c b/arch/sh64/kernel/pcibios.c new file mode 100644 index 000000000000..50c61dcb9fae --- /dev/null +++ b/arch/sh64/kernel/pcibios.c | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * $Id: pcibios.c,v 1.1 2001/08/24 12:38:19 dwmw2 Exp $ | ||
3 | * | ||
4 | * arch/sh/kernel/pcibios.c | ||
5 | * | ||
6 | * Copyright (C) 2002 STMicroelectronics Limited | ||
7 | * Author : David J. McKay | ||
8 | * | ||
9 | * Copyright (C) 2004 Richard Curnow, SuperH UK Limited | ||
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | * This is GPL'd. | ||
15 | * | ||
16 | * Provided here are generic versions of: | ||
17 | * pcibios_update_resource() | ||
18 | * pcibios_align_resource() | ||
19 | * pcibios_enable_device() | ||
20 | * pcibios_set_master() | ||
21 | * pcibios_update_irq() | ||
22 | * | ||
23 | * These functions are collected here to reduce duplication of common | ||
24 | * code amongst the many platform-specific PCI support code files. | ||
25 | * | ||
26 | * Platform-specific files are expected to provide: | ||
27 | * pcibios_fixup_bus() | ||
28 | * pcibios_init() | ||
29 | * pcibios_setup() | ||
30 | * pcibios_fixup_pbus_ranges() | ||
31 | */ | ||
32 | |||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/pci.h> | ||
35 | #include <linux/init.h> | ||
36 | |||
37 | void | ||
38 | pcibios_update_resource(struct pci_dev *dev, struct resource *root, | ||
39 | struct resource *res, int resource) | ||
40 | { | ||
41 | u32 new, check; | ||
42 | int reg; | ||
43 | |||
44 | new = res->start | (res->flags & PCI_REGION_FLAG_MASK); | ||
45 | if (resource < 6) { | ||
46 | reg = PCI_BASE_ADDRESS_0 + 4*resource; | ||
47 | } else if (resource == PCI_ROM_RESOURCE) { | ||
48 | res->flags |= IORESOURCE_ROM_ENABLE; | ||
49 | new |= PCI_ROM_ADDRESS_ENABLE; | ||
50 | reg = dev->rom_base_reg; | ||
51 | } else { | ||
52 | /* Somebody might have asked allocation of a non-standard resource */ | ||
53 | return; | ||
54 | } | ||
55 | |||
56 | pci_write_config_dword(dev, reg, new); | ||
57 | pci_read_config_dword(dev, reg, &check); | ||
58 | if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) { | ||
59 | printk(KERN_ERR "PCI: Error while updating region " | ||
60 | "%s/%d (%08x != %08x)\n", pci_name(dev), resource, | ||
61 | new, check); | ||
62 | } | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * We need to avoid collisions with `mirrored' VGA ports | ||
67 | * and other strange ISA hardware, so we always want the | ||
68 | * addresses to be allocated in the 0x000-0x0ff region | ||
69 | * modulo 0x400. | ||
70 | */ | ||
71 | void pcibios_align_resource(void *data, struct resource *res, | ||
72 | unsigned long size, unsigned long align) | ||
73 | { | ||
74 | if (res->flags & IORESOURCE_IO) { | ||
75 | unsigned long start = res->start; | ||
76 | |||
77 | if (start & 0x300) { | ||
78 | start = (start + 0x3ff) & ~0x3ff; | ||
79 | res->start = start; | ||
80 | } | ||
81 | } | ||
82 | } | ||
83 | |||
84 | static void pcibios_enable_bridge(struct pci_dev *dev) | ||
85 | { | ||
86 | struct pci_bus *bus = dev->subordinate; | ||
87 | u16 cmd, old_cmd; | ||
88 | |||
89 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
90 | old_cmd = cmd; | ||
91 | |||
92 | if (bus->resource[0]->flags & IORESOURCE_IO) { | ||
93 | cmd |= PCI_COMMAND_IO; | ||
94 | } | ||
95 | if ((bus->resource[1]->flags & IORESOURCE_MEM) || | ||
96 | (bus->resource[2]->flags & IORESOURCE_PREFETCH)) { | ||
97 | cmd |= PCI_COMMAND_MEMORY; | ||
98 | } | ||
99 | |||
100 | if (cmd != old_cmd) { | ||
101 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
102 | } | ||
103 | |||
104 | printk("PCI bridge %s, command register -> %04x\n", | ||
105 | pci_name(dev), cmd); | ||
106 | |||
107 | } | ||
108 | |||
109 | |||
110 | |||
111 | int pcibios_enable_device(struct pci_dev *dev, int mask) | ||
112 | { | ||
113 | u16 cmd, old_cmd; | ||
114 | int idx; | ||
115 | struct resource *r; | ||
116 | |||
117 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
118 | pcibios_enable_bridge(dev); | ||
119 | } | ||
120 | |||
121 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
122 | old_cmd = cmd; | ||
123 | for(idx=0; idx<6; idx++) { | ||
124 | if (!(mask & (1 << idx))) | ||
125 | continue; | ||
126 | r = &dev->resource[idx]; | ||
127 | if (!r->start && r->end) { | ||
128 | printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); | ||
129 | return -EINVAL; | ||
130 | } | ||
131 | if (r->flags & IORESOURCE_IO) | ||
132 | cmd |= PCI_COMMAND_IO; | ||
133 | if (r->flags & IORESOURCE_MEM) | ||
134 | cmd |= PCI_COMMAND_MEMORY; | ||
135 | } | ||
136 | if (dev->resource[PCI_ROM_RESOURCE].start) | ||
137 | cmd |= PCI_COMMAND_MEMORY; | ||
138 | if (cmd != old_cmd) { | ||
139 | printk(KERN_INFO "PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); | ||
140 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
141 | } | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * If we set up a device for bus mastering, we need to check and set | ||
147 | * the latency timer as it may not be properly set. | ||
148 | */ | ||
149 | unsigned int pcibios_max_latency = 255; | ||
150 | |||
151 | void pcibios_set_master(struct pci_dev *dev) | ||
152 | { | ||
153 | u8 lat; | ||
154 | pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); | ||
155 | if (lat < 16) | ||
156 | lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; | ||
157 | else if (lat > pcibios_max_latency) | ||
158 | lat = pcibios_max_latency; | ||
159 | else | ||
160 | return; | ||
161 | printk(KERN_INFO "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat); | ||
162 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); | ||
163 | } | ||
164 | |||
165 | void __init pcibios_update_irq(struct pci_dev *dev, int irq) | ||
166 | { | ||
167 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); | ||
168 | } | ||
diff --git a/arch/sh64/kernel/process.c b/arch/sh64/kernel/process.c new file mode 100644 index 000000000000..efde41c0cd66 --- /dev/null +++ b/arch/sh64/kernel/process.c | |||
@@ -0,0 +1,962 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/process.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003 Paul Mundt | ||
10 | * Copyright (C) 2003, 2004 Richard Curnow | ||
11 | * | ||
12 | * Started from SH3/4 version: | ||
13 | * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima | ||
14 | * | ||
15 | * In turn started from i386 version: | ||
16 | * Copyright (C) 1995 Linus Torvalds | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | /* | ||
21 | * This file handles the architecture-dependent parts of process handling.. | ||
22 | */ | ||
23 | |||
24 | /* Temporary flags/tests. All to be removed/undefined. BEGIN */ | ||
25 | #define IDLE_TRACE | ||
26 | #define VM_SHOW_TABLES | ||
27 | #define VM_TEST_FAULT | ||
28 | #define VM_TEST_RTLBMISS | ||
29 | #define VM_TEST_WTLBMISS | ||
30 | |||
31 | #undef VM_SHOW_TABLES | ||
32 | #undef IDLE_TRACE | ||
33 | /* Temporary flags/tests. All to be removed/undefined. END */ | ||
34 | |||
35 | #define __KERNEL_SYSCALLS__ | ||
36 | #include <stdarg.h> | ||
37 | |||
38 | #include <linux/config.h> | ||
39 | #include <linux/kernel.h> | ||
40 | #include <linux/rwsem.h> | ||
41 | #include <linux/mm.h> | ||
42 | #include <linux/smp.h> | ||
43 | #include <linux/smp_lock.h> | ||
44 | #include <linux/ptrace.h> | ||
45 | #include <linux/slab.h> | ||
46 | #include <linux/vmalloc.h> | ||
47 | #include <linux/user.h> | ||
48 | #include <linux/a.out.h> | ||
49 | #include <linux/interrupt.h> | ||
50 | #include <linux/unistd.h> | ||
51 | #include <linux/delay.h> | ||
52 | #include <linux/reboot.h> | ||
53 | #include <linux/init.h> | ||
54 | |||
55 | #include <asm/uaccess.h> | ||
56 | #include <asm/pgtable.h> | ||
57 | #include <asm/system.h> | ||
58 | #include <asm/io.h> | ||
59 | #include <asm/processor.h> /* includes also <asm/registers.h> */ | ||
60 | #include <asm/mmu_context.h> | ||
61 | #include <asm/elf.h> | ||
62 | #include <asm/page.h> | ||
63 | |||
64 | #include <linux/irq.h> | ||
65 | |||
66 | struct task_struct *last_task_used_math = NULL; | ||
67 | |||
68 | #ifdef IDLE_TRACE | ||
69 | #ifdef VM_SHOW_TABLES | ||
70 | /* For testing */ | ||
71 | static void print_PTE(long base) | ||
72 | { | ||
73 | int i, skip=0; | ||
74 | long long x, y, *p = (long long *) base; | ||
75 | |||
76 | for (i=0; i< 512; i++, p++){ | ||
77 | if (*p == 0) { | ||
78 | if (!skip) { | ||
79 | skip++; | ||
80 | printk("(0s) "); | ||
81 | } | ||
82 | } else { | ||
83 | skip=0; | ||
84 | x = (*p) >> 32; | ||
85 | y = (*p) & 0xffffffff; | ||
86 | printk("%08Lx%08Lx ", x, y); | ||
87 | if (!((i+1)&0x3)) printk("\n"); | ||
88 | } | ||
89 | } | ||
90 | } | ||
91 | |||
92 | /* For testing */ | ||
93 | static void print_DIR(long base) | ||
94 | { | ||
95 | int i, skip=0; | ||
96 | long *p = (long *) base; | ||
97 | |||
98 | for (i=0; i< 512; i++, p++){ | ||
99 | if (*p == 0) { | ||
100 | if (!skip) { | ||
101 | skip++; | ||
102 | printk("(0s) "); | ||
103 | } | ||
104 | } else { | ||
105 | skip=0; | ||
106 | printk("%08lx ", *p); | ||
107 | if (!((i+1)&0x7)) printk("\n"); | ||
108 | } | ||
109 | } | ||
110 | } | ||
111 | |||
112 | /* For testing */ | ||
113 | static void print_vmalloc_first_tables(void) | ||
114 | { | ||
115 | |||
116 | #define PRESENT 0x800 /* Bit 11 */ | ||
117 | |||
118 | /* | ||
119 | * Do it really dirty by looking at raw addresses, | ||
120 | * raw offsets, no types. If we used pgtable/pgalloc | ||
121 | * macros/definitions we could hide potential bugs. | ||
122 | * | ||
123 | * Note that pointers are 32-bit for CDC. | ||
124 | */ | ||
125 | long pgdt, pmdt, ptet; | ||
126 | |||
127 | pgdt = (long) &swapper_pg_dir; | ||
128 | printk("-->PGD (0x%08lx):\n", pgdt); | ||
129 | print_DIR(pgdt); | ||
130 | printk("\n"); | ||
131 | |||
132 | /* VMALLOC pool is mapped at 0xc0000000, second (pointer) entry in PGD */ | ||
133 | pgdt += 4; | ||
134 | pmdt = (long) (* (long *) pgdt); | ||
135 | if (!(pmdt & PRESENT)) { | ||
136 | printk("No PMD\n"); | ||
137 | return; | ||
138 | } else pmdt &= 0xfffff000; | ||
139 | |||
140 | printk("-->PMD (0x%08lx):\n", pmdt); | ||
141 | print_DIR(pmdt); | ||
142 | printk("\n"); | ||
143 | |||
144 | /* Get the pmdt displacement for 0xc0000000 */ | ||
145 | pmdt += 2048; | ||
146 | |||
147 | /* just look at first two address ranges ... */ | ||
148 | /* ... 0xc0000000 ... */ | ||
149 | ptet = (long) (* (long *) pmdt); | ||
150 | if (!(ptet & PRESENT)) { | ||
151 | printk("No PTE0\n"); | ||
152 | return; | ||
153 | } else ptet &= 0xfffff000; | ||
154 | |||
155 | printk("-->PTE0 (0x%08lx):\n", ptet); | ||
156 | print_PTE(ptet); | ||
157 | printk("\n"); | ||
158 | |||
159 | /* ... 0xc0001000 ... */ | ||
160 | ptet += 4; | ||
161 | if (!(ptet & PRESENT)) { | ||
162 | printk("No PTE1\n"); | ||
163 | return; | ||
164 | } else ptet &= 0xfffff000; | ||
165 | printk("-->PTE1 (0x%08lx):\n", ptet); | ||
166 | print_PTE(ptet); | ||
167 | printk("\n"); | ||
168 | } | ||
169 | #else | ||
170 | #define print_vmalloc_first_tables() | ||
171 | #endif /* VM_SHOW_TABLES */ | ||
172 | |||
173 | static void test_VM(void) | ||
174 | { | ||
175 | void *a, *b, *c; | ||
176 | |||
177 | #ifdef VM_SHOW_TABLES | ||
178 | printk("Initial PGD/PMD/PTE\n"); | ||
179 | #endif | ||
180 | print_vmalloc_first_tables(); | ||
181 | |||
182 | printk("Allocating 2 bytes\n"); | ||
183 | a = vmalloc(2); | ||
184 | print_vmalloc_first_tables(); | ||
185 | |||
186 | printk("Allocating 4100 bytes\n"); | ||
187 | b = vmalloc(4100); | ||
188 | print_vmalloc_first_tables(); | ||
189 | |||
190 | printk("Allocating 20234 bytes\n"); | ||
191 | c = vmalloc(20234); | ||
192 | print_vmalloc_first_tables(); | ||
193 | |||
194 | #ifdef VM_TEST_FAULT | ||
195 | /* Here you may want to fault ! */ | ||
196 | |||
197 | #ifdef VM_TEST_RTLBMISS | ||
198 | printk("Ready to fault upon read.\n"); | ||
199 | if (* (char *) a) { | ||
200 | printk("RTLBMISSed on area a !\n"); | ||
201 | } | ||
202 | printk("RTLBMISSed on area a !\n"); | ||
203 | #endif | ||
204 | |||
205 | #ifdef VM_TEST_WTLBMISS | ||
206 | printk("Ready to fault upon write.\n"); | ||
207 | *((char *) b) = 'L'; | ||
208 | printk("WTLBMISSed on area b !\n"); | ||
209 | #endif | ||
210 | |||
211 | #endif /* VM_TEST_FAULT */ | ||
212 | |||
213 | printk("Deallocating the 4100 byte chunk\n"); | ||
214 | vfree(b); | ||
215 | print_vmalloc_first_tables(); | ||
216 | |||
217 | printk("Deallocating the 2 byte chunk\n"); | ||
218 | vfree(a); | ||
219 | print_vmalloc_first_tables(); | ||
220 | |||
221 | printk("Deallocating the last chunk\n"); | ||
222 | vfree(c); | ||
223 | print_vmalloc_first_tables(); | ||
224 | } | ||
225 | |||
226 | extern unsigned long volatile jiffies; | ||
227 | int once = 0; | ||
228 | unsigned long old_jiffies; | ||
229 | int pid = -1, pgid = -1; | ||
230 | |||
231 | void idle_trace(void) | ||
232 | { | ||
233 | |||
234 | _syscall0(int, getpid) | ||
235 | _syscall1(int, getpgid, int, pid) | ||
236 | |||
237 | if (!once) { | ||
238 | /* VM allocation/deallocation simple test */ | ||
239 | test_VM(); | ||
240 | pid = getpid(); | ||
241 | |||
242 | printk("Got all through to Idle !!\n"); | ||
243 | printk("I'm now going to loop forever ...\n"); | ||
244 | printk("Any ! below is a timer tick.\n"); | ||
245 | printk("Any . below is a getpgid system call from pid = %d.\n", pid); | ||
246 | |||
247 | |||
248 | old_jiffies = jiffies; | ||
249 | once++; | ||
250 | } | ||
251 | |||
252 | if (old_jiffies != jiffies) { | ||
253 | old_jiffies = jiffies - old_jiffies; | ||
254 | switch (old_jiffies) { | ||
255 | case 1: | ||
256 | printk("!"); | ||
257 | break; | ||
258 | case 2: | ||
259 | printk("!!"); | ||
260 | break; | ||
261 | case 3: | ||
262 | printk("!!!"); | ||
263 | break; | ||
264 | case 4: | ||
265 | printk("!!!!"); | ||
266 | break; | ||
267 | default: | ||
268 | printk("(%d!)", (int) old_jiffies); | ||
269 | } | ||
270 | old_jiffies = jiffies; | ||
271 | } | ||
272 | pgid = getpgid(pid); | ||
273 | printk("."); | ||
274 | } | ||
275 | #else | ||
276 | #define idle_trace() do { } while (0) | ||
277 | #endif /* IDLE_TRACE */ | ||
278 | |||
279 | static int hlt_counter = 1; | ||
280 | |||
281 | #define HARD_IDLE_TIMEOUT (HZ / 3) | ||
282 | |||
283 | void disable_hlt(void) | ||
284 | { | ||
285 | hlt_counter++; | ||
286 | } | ||
287 | |||
288 | void enable_hlt(void) | ||
289 | { | ||
290 | hlt_counter--; | ||
291 | } | ||
292 | |||
293 | static int __init nohlt_setup(char *__unused) | ||
294 | { | ||
295 | hlt_counter = 1; | ||
296 | return 1; | ||
297 | } | ||
298 | |||
299 | static int __init hlt_setup(char *__unused) | ||
300 | { | ||
301 | hlt_counter = 0; | ||
302 | return 1; | ||
303 | } | ||
304 | |||
305 | __setup("nohlt", nohlt_setup); | ||
306 | __setup("hlt", hlt_setup); | ||
307 | |||
308 | static inline void hlt(void) | ||
309 | { | ||
310 | if (hlt_counter) | ||
311 | return; | ||
312 | |||
313 | __asm__ __volatile__ ("sleep" : : : "memory"); | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * The idle loop on a uniprocessor SH.. | ||
318 | */ | ||
319 | void default_idle(void) | ||
320 | { | ||
321 | /* endless idle loop with no priority at all */ | ||
322 | while (1) { | ||
323 | if (hlt_counter) { | ||
324 | while (1) | ||
325 | if (need_resched()) | ||
326 | break; | ||
327 | } else { | ||
328 | local_irq_disable(); | ||
329 | while (!need_resched()) { | ||
330 | local_irq_enable(); | ||
331 | idle_trace(); | ||
332 | hlt(); | ||
333 | local_irq_disable(); | ||
334 | } | ||
335 | local_irq_enable(); | ||
336 | } | ||
337 | schedule(); | ||
338 | } | ||
339 | } | ||
340 | |||
341 | void cpu_idle(void) | ||
342 | { | ||
343 | default_idle(); | ||
344 | } | ||
345 | |||
346 | void machine_restart(char * __unused) | ||
347 | { | ||
348 | extern void phys_stext(void); | ||
349 | |||
350 | phys_stext(); | ||
351 | } | ||
352 | |||
353 | void machine_halt(void) | ||
354 | { | ||
355 | for (;;); | ||
356 | } | ||
357 | |||
358 | void machine_power_off(void) | ||
359 | { | ||
360 | extern void enter_deep_standby(void); | ||
361 | |||
362 | enter_deep_standby(); | ||
363 | } | ||
364 | |||
365 | void show_regs(struct pt_regs * regs) | ||
366 | { | ||
367 | unsigned long long ah, al, bh, bl, ch, cl; | ||
368 | |||
369 | printk("\n"); | ||
370 | |||
371 | ah = (regs->pc) >> 32; | ||
372 | al = (regs->pc) & 0xffffffff; | ||
373 | bh = (regs->regs[18]) >> 32; | ||
374 | bl = (regs->regs[18]) & 0xffffffff; | ||
375 | ch = (regs->regs[15]) >> 32; | ||
376 | cl = (regs->regs[15]) & 0xffffffff; | ||
377 | printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n", | ||
378 | ah, al, bh, bl, ch, cl); | ||
379 | |||
380 | ah = (regs->sr) >> 32; | ||
381 | al = (regs->sr) & 0xffffffff; | ||
382 | asm volatile ("getcon " __TEA ", %0" : "=r" (bh)); | ||
383 | asm volatile ("getcon " __TEA ", %0" : "=r" (bl)); | ||
384 | bh = (bh) >> 32; | ||
385 | bl = (bl) & 0xffffffff; | ||
386 | asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch)); | ||
387 | asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl)); | ||
388 | ch = (ch) >> 32; | ||
389 | cl = (cl) & 0xffffffff; | ||
390 | printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n", | ||
391 | ah, al, bh, bl, ch, cl); | ||
392 | |||
393 | ah = (regs->regs[0]) >> 32; | ||
394 | al = (regs->regs[0]) & 0xffffffff; | ||
395 | bh = (regs->regs[1]) >> 32; | ||
396 | bl = (regs->regs[1]) & 0xffffffff; | ||
397 | ch = (regs->regs[2]) >> 32; | ||
398 | cl = (regs->regs[2]) & 0xffffffff; | ||
399 | printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n", | ||
400 | ah, al, bh, bl, ch, cl); | ||
401 | |||
402 | ah = (regs->regs[3]) >> 32; | ||
403 | al = (regs->regs[3]) & 0xffffffff; | ||
404 | bh = (regs->regs[4]) >> 32; | ||
405 | bl = (regs->regs[4]) & 0xffffffff; | ||
406 | ch = (regs->regs[5]) >> 32; | ||
407 | cl = (regs->regs[5]) & 0xffffffff; | ||
408 | printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n", | ||
409 | ah, al, bh, bl, ch, cl); | ||
410 | |||
411 | ah = (regs->regs[6]) >> 32; | ||
412 | al = (regs->regs[6]) & 0xffffffff; | ||
413 | bh = (regs->regs[7]) >> 32; | ||
414 | bl = (regs->regs[7]) & 0xffffffff; | ||
415 | ch = (regs->regs[8]) >> 32; | ||
416 | cl = (regs->regs[8]) & 0xffffffff; | ||
417 | printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n", | ||
418 | ah, al, bh, bl, ch, cl); | ||
419 | |||
420 | ah = (regs->regs[9]) >> 32; | ||
421 | al = (regs->regs[9]) & 0xffffffff; | ||
422 | bh = (regs->regs[10]) >> 32; | ||
423 | bl = (regs->regs[10]) & 0xffffffff; | ||
424 | ch = (regs->regs[11]) >> 32; | ||
425 | cl = (regs->regs[11]) & 0xffffffff; | ||
426 | printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n", | ||
427 | ah, al, bh, bl, ch, cl); | ||
428 | |||
429 | ah = (regs->regs[12]) >> 32; | ||
430 | al = (regs->regs[12]) & 0xffffffff; | ||
431 | bh = (regs->regs[13]) >> 32; | ||
432 | bl = (regs->regs[13]) & 0xffffffff; | ||
433 | ch = (regs->regs[14]) >> 32; | ||
434 | cl = (regs->regs[14]) & 0xffffffff; | ||
435 | printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n", | ||
436 | ah, al, bh, bl, ch, cl); | ||
437 | |||
438 | ah = (regs->regs[16]) >> 32; | ||
439 | al = (regs->regs[16]) & 0xffffffff; | ||
440 | bh = (regs->regs[17]) >> 32; | ||
441 | bl = (regs->regs[17]) & 0xffffffff; | ||
442 | ch = (regs->regs[19]) >> 32; | ||
443 | cl = (regs->regs[19]) & 0xffffffff; | ||
444 | printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n", | ||
445 | ah, al, bh, bl, ch, cl); | ||
446 | |||
447 | ah = (regs->regs[20]) >> 32; | ||
448 | al = (regs->regs[20]) & 0xffffffff; | ||
449 | bh = (regs->regs[21]) >> 32; | ||
450 | bl = (regs->regs[21]) & 0xffffffff; | ||
451 | ch = (regs->regs[22]) >> 32; | ||
452 | cl = (regs->regs[22]) & 0xffffffff; | ||
453 | printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n", | ||
454 | ah, al, bh, bl, ch, cl); | ||
455 | |||
456 | ah = (regs->regs[23]) >> 32; | ||
457 | al = (regs->regs[23]) & 0xffffffff; | ||
458 | bh = (regs->regs[24]) >> 32; | ||
459 | bl = (regs->regs[24]) & 0xffffffff; | ||
460 | ch = (regs->regs[25]) >> 32; | ||
461 | cl = (regs->regs[25]) & 0xffffffff; | ||
462 | printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n", | ||
463 | ah, al, bh, bl, ch, cl); | ||
464 | |||
465 | ah = (regs->regs[26]) >> 32; | ||
466 | al = (regs->regs[26]) & 0xffffffff; | ||
467 | bh = (regs->regs[27]) >> 32; | ||
468 | bl = (regs->regs[27]) & 0xffffffff; | ||
469 | ch = (regs->regs[28]) >> 32; | ||
470 | cl = (regs->regs[28]) & 0xffffffff; | ||
471 | printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n", | ||
472 | ah, al, bh, bl, ch, cl); | ||
473 | |||
474 | ah = (regs->regs[29]) >> 32; | ||
475 | al = (regs->regs[29]) & 0xffffffff; | ||
476 | bh = (regs->regs[30]) >> 32; | ||
477 | bl = (regs->regs[30]) & 0xffffffff; | ||
478 | ch = (regs->regs[31]) >> 32; | ||
479 | cl = (regs->regs[31]) & 0xffffffff; | ||
480 | printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n", | ||
481 | ah, al, bh, bl, ch, cl); | ||
482 | |||
483 | ah = (regs->regs[32]) >> 32; | ||
484 | al = (regs->regs[32]) & 0xffffffff; | ||
485 | bh = (regs->regs[33]) >> 32; | ||
486 | bl = (regs->regs[33]) & 0xffffffff; | ||
487 | ch = (regs->regs[34]) >> 32; | ||
488 | cl = (regs->regs[34]) & 0xffffffff; | ||
489 | printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n", | ||
490 | ah, al, bh, bl, ch, cl); | ||
491 | |||
492 | ah = (regs->regs[35]) >> 32; | ||
493 | al = (regs->regs[35]) & 0xffffffff; | ||
494 | bh = (regs->regs[36]) >> 32; | ||
495 | bl = (regs->regs[36]) & 0xffffffff; | ||
496 | ch = (regs->regs[37]) >> 32; | ||
497 | cl = (regs->regs[37]) & 0xffffffff; | ||
498 | printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n", | ||
499 | ah, al, bh, bl, ch, cl); | ||
500 | |||
501 | ah = (regs->regs[38]) >> 32; | ||
502 | al = (regs->regs[38]) & 0xffffffff; | ||
503 | bh = (regs->regs[39]) >> 32; | ||
504 | bl = (regs->regs[39]) & 0xffffffff; | ||
505 | ch = (regs->regs[40]) >> 32; | ||
506 | cl = (regs->regs[40]) & 0xffffffff; | ||
507 | printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n", | ||
508 | ah, al, bh, bl, ch, cl); | ||
509 | |||
510 | ah = (regs->regs[41]) >> 32; | ||
511 | al = (regs->regs[41]) & 0xffffffff; | ||
512 | bh = (regs->regs[42]) >> 32; | ||
513 | bl = (regs->regs[42]) & 0xffffffff; | ||
514 | ch = (regs->regs[43]) >> 32; | ||
515 | cl = (regs->regs[43]) & 0xffffffff; | ||
516 | printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n", | ||
517 | ah, al, bh, bl, ch, cl); | ||
518 | |||
519 | ah = (regs->regs[44]) >> 32; | ||
520 | al = (regs->regs[44]) & 0xffffffff; | ||
521 | bh = (regs->regs[45]) >> 32; | ||
522 | bl = (regs->regs[45]) & 0xffffffff; | ||
523 | ch = (regs->regs[46]) >> 32; | ||
524 | cl = (regs->regs[46]) & 0xffffffff; | ||
525 | printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n", | ||
526 | ah, al, bh, bl, ch, cl); | ||
527 | |||
528 | ah = (regs->regs[47]) >> 32; | ||
529 | al = (regs->regs[47]) & 0xffffffff; | ||
530 | bh = (regs->regs[48]) >> 32; | ||
531 | bl = (regs->regs[48]) & 0xffffffff; | ||
532 | ch = (regs->regs[49]) >> 32; | ||
533 | cl = (regs->regs[49]) & 0xffffffff; | ||
534 | printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n", | ||
535 | ah, al, bh, bl, ch, cl); | ||
536 | |||
537 | ah = (regs->regs[50]) >> 32; | ||
538 | al = (regs->regs[50]) & 0xffffffff; | ||
539 | bh = (regs->regs[51]) >> 32; | ||
540 | bl = (regs->regs[51]) & 0xffffffff; | ||
541 | ch = (regs->regs[52]) >> 32; | ||
542 | cl = (regs->regs[52]) & 0xffffffff; | ||
543 | printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n", | ||
544 | ah, al, bh, bl, ch, cl); | ||
545 | |||
546 | ah = (regs->regs[53]) >> 32; | ||
547 | al = (regs->regs[53]) & 0xffffffff; | ||
548 | bh = (regs->regs[54]) >> 32; | ||
549 | bl = (regs->regs[54]) & 0xffffffff; | ||
550 | ch = (regs->regs[55]) >> 32; | ||
551 | cl = (regs->regs[55]) & 0xffffffff; | ||
552 | printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n", | ||
553 | ah, al, bh, bl, ch, cl); | ||
554 | |||
555 | ah = (regs->regs[56]) >> 32; | ||
556 | al = (regs->regs[56]) & 0xffffffff; | ||
557 | bh = (regs->regs[57]) >> 32; | ||
558 | bl = (regs->regs[57]) & 0xffffffff; | ||
559 | ch = (regs->regs[58]) >> 32; | ||
560 | cl = (regs->regs[58]) & 0xffffffff; | ||
561 | printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n", | ||
562 | ah, al, bh, bl, ch, cl); | ||
563 | |||
564 | ah = (regs->regs[59]) >> 32; | ||
565 | al = (regs->regs[59]) & 0xffffffff; | ||
566 | bh = (regs->regs[60]) >> 32; | ||
567 | bl = (regs->regs[60]) & 0xffffffff; | ||
568 | ch = (regs->regs[61]) >> 32; | ||
569 | cl = (regs->regs[61]) & 0xffffffff; | ||
570 | printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n", | ||
571 | ah, al, bh, bl, ch, cl); | ||
572 | |||
573 | ah = (regs->regs[62]) >> 32; | ||
574 | al = (regs->regs[62]) & 0xffffffff; | ||
575 | bh = (regs->tregs[0]) >> 32; | ||
576 | bl = (regs->tregs[0]) & 0xffffffff; | ||
577 | ch = (regs->tregs[1]) >> 32; | ||
578 | cl = (regs->tregs[1]) & 0xffffffff; | ||
579 | printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n", | ||
580 | ah, al, bh, bl, ch, cl); | ||
581 | |||
582 | ah = (regs->tregs[2]) >> 32; | ||
583 | al = (regs->tregs[2]) & 0xffffffff; | ||
584 | bh = (regs->tregs[3]) >> 32; | ||
585 | bl = (regs->tregs[3]) & 0xffffffff; | ||
586 | ch = (regs->tregs[4]) >> 32; | ||
587 | cl = (regs->tregs[4]) & 0xffffffff; | ||
588 | printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n", | ||
589 | ah, al, bh, bl, ch, cl); | ||
590 | |||
591 | ah = (regs->tregs[5]) >> 32; | ||
592 | al = (regs->tregs[5]) & 0xffffffff; | ||
593 | bh = (regs->tregs[6]) >> 32; | ||
594 | bl = (regs->tregs[6]) & 0xffffffff; | ||
595 | ch = (regs->tregs[7]) >> 32; | ||
596 | cl = (regs->tregs[7]) & 0xffffffff; | ||
597 | printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n", | ||
598 | ah, al, bh, bl, ch, cl); | ||
599 | |||
600 | /* | ||
601 | * If we're in kernel mode, dump the stack too.. | ||
602 | */ | ||
603 | if (!user_mode(regs)) { | ||
604 | void show_stack(struct task_struct *tsk, unsigned long *sp); | ||
605 | unsigned long sp = regs->regs[15] & 0xffffffff; | ||
606 | struct task_struct *tsk = get_current(); | ||
607 | |||
608 | tsk->thread.kregs = regs; | ||
609 | |||
610 | show_stack(tsk, (unsigned long *)sp); | ||
611 | } | ||
612 | } | ||
613 | |||
614 | struct task_struct * alloc_task_struct(void) | ||
615 | { | ||
616 | /* Get task descriptor pages */ | ||
617 | return (struct task_struct *) | ||
618 | __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)); | ||
619 | } | ||
620 | |||
621 | void free_task_struct(struct task_struct *p) | ||
622 | { | ||
623 | free_pages((unsigned long) p, get_order(THREAD_SIZE)); | ||
624 | } | ||
625 | |||
626 | /* | ||
627 | * Create a kernel thread | ||
628 | */ | ||
629 | |||
630 | /* | ||
631 | * This is the mechanism for creating a new kernel thread. | ||
632 | * | ||
633 | * NOTE! Only a kernel-only process(ie the swapper or direct descendants | ||
634 | * who haven't done an "execve()") should use this: it will work within | ||
635 | * a system call from a "real" process, but the process memory space will | ||
636 | * not be free'd until both the parent and the child have exited. | ||
637 | */ | ||
638 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | ||
639 | { | ||
640 | /* A bit less processor dependent than older sh ... */ | ||
641 | unsigned int reply; | ||
642 | |||
643 | static __inline__ _syscall2(int,clone,unsigned long,flags,unsigned long,newsp) | ||
644 | static __inline__ _syscall1(int,exit,int,ret) | ||
645 | |||
646 | reply = clone(flags | CLONE_VM, 0); | ||
647 | if (!reply) { | ||
648 | /* Child */ | ||
649 | reply = exit(fn(arg)); | ||
650 | } | ||
651 | |||
652 | return reply; | ||
653 | } | ||
654 | |||
655 | /* | ||
656 | * Free current thread data structures etc.. | ||
657 | */ | ||
658 | void exit_thread(void) | ||
659 | { | ||
660 | /* See arch/sparc/kernel/process.c for the precedent for doing this -- RPC. | ||
661 | |||
662 | The SH-5 FPU save/restore approach relies on last_task_used_math | ||
663 | pointing to a live task_struct. When another task tries to use the | ||
664 | FPU for the 1st time, the FPUDIS trap handling (see | ||
665 | arch/sh64/kernel/fpu.c) will save the existing FPU state to the | ||
666 | FP regs field within last_task_used_math before re-loading the new | ||
667 | task's FPU state (or initialising it if the FPU has been used | ||
668 | before). So if last_task_used_math is stale, and its page has already been | ||
669 | re-allocated for another use, the consequences are rather grim. Unless we | ||
670 | null it here, there is no other path through which it would get safely | ||
671 | nulled. */ | ||
672 | |||
673 | #ifdef CONFIG_SH_FPU | ||
674 | if (last_task_used_math == current) { | ||
675 | last_task_used_math = NULL; | ||
676 | } | ||
677 | #endif | ||
678 | } | ||
679 | |||
680 | void flush_thread(void) | ||
681 | { | ||
682 | |||
683 | /* Called by fs/exec.c (flush_old_exec) to remove traces of a | ||
684 | * previously running executable. */ | ||
685 | #ifdef CONFIG_SH_FPU | ||
686 | if (last_task_used_math == current) { | ||
687 | last_task_used_math = NULL; | ||
688 | } | ||
689 | /* Force FPU state to be reinitialised after exec */ | ||
690 | clear_used_math(); | ||
691 | #endif | ||
692 | |||
693 | /* if we are a kernel thread, about to change to user thread, | ||
694 | * update kreg | ||
695 | */ | ||
696 | if(current->thread.kregs==&fake_swapper_regs) { | ||
697 | current->thread.kregs = | ||
698 | ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1); | ||
699 | current->thread.uregs = current->thread.kregs; | ||
700 | } | ||
701 | } | ||
702 | |||
703 | void release_thread(struct task_struct *dead_task) | ||
704 | { | ||
705 | /* do nothing */ | ||
706 | } | ||
707 | |||
708 | /* Fill in the fpu structure for a core dump.. */ | ||
709 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | ||
710 | { | ||
711 | #ifdef CONFIG_SH_FPU | ||
712 | int fpvalid; | ||
713 | struct task_struct *tsk = current; | ||
714 | |||
715 | fpvalid = !!tsk_used_math(tsk); | ||
716 | if (fpvalid) { | ||
717 | if (current == last_task_used_math) { | ||
718 | grab_fpu(); | ||
719 | fpsave(&tsk->thread.fpu.hard); | ||
720 | release_fpu(); | ||
721 | last_task_used_math = 0; | ||
722 | regs->sr |= SR_FD; | ||
723 | } | ||
724 | |||
725 | memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); | ||
726 | } | ||
727 | |||
728 | return fpvalid; | ||
729 | #else | ||
730 | return 0; /* Task didn't use the fpu at all. */ | ||
731 | #endif | ||
732 | } | ||
733 | |||
734 | asmlinkage void ret_from_fork(void); | ||
735 | |||
736 | int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | ||
737 | unsigned long unused, | ||
738 | struct task_struct *p, struct pt_regs *regs) | ||
739 | { | ||
740 | struct pt_regs *childregs; | ||
741 | unsigned long long se; /* Sign extension */ | ||
742 | |||
743 | #ifdef CONFIG_SH_FPU | ||
744 | if(last_task_used_math == current) { | ||
745 | grab_fpu(); | ||
746 | fpsave(¤t->thread.fpu.hard); | ||
747 | release_fpu(); | ||
748 | last_task_used_math = NULL; | ||
749 | regs->sr |= SR_FD; | ||
750 | } | ||
751 | #endif | ||
752 | /* Copy from sh version */ | ||
753 | childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) p->thread_info )) - 1; | ||
754 | |||
755 | *childregs = *regs; | ||
756 | |||
757 | if (user_mode(regs)) { | ||
758 | childregs->regs[15] = usp; | ||
759 | p->thread.uregs = childregs; | ||
760 | } else { | ||
761 | childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE; | ||
762 | } | ||
763 | |||
764 | childregs->regs[9] = 0; /* Set return value for child */ | ||
765 | childregs->sr |= SR_FD; /* Invalidate FPU flag */ | ||
766 | |||
767 | p->thread.sp = (unsigned long) childregs; | ||
768 | p->thread.pc = (unsigned long) ret_from_fork; | ||
769 | |||
770 | /* | ||
771 | * Sign extend the edited stack. | ||
772 | * Note that thread.pc and thread.pc will stay | ||
773 | * 32-bit wide and context switch must take care | ||
774 | * of NEFF sign extension. | ||
775 | */ | ||
776 | |||
777 | se = childregs->regs[15]; | ||
778 | se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se; | ||
779 | childregs->regs[15] = se; | ||
780 | |||
781 | return 0; | ||
782 | } | ||
783 | |||
784 | /* | ||
785 | * fill in the user structure for a core dump.. | ||
786 | */ | ||
787 | void dump_thread(struct pt_regs * regs, struct user * dump) | ||
788 | { | ||
789 | dump->magic = CMAGIC; | ||
790 | dump->start_code = current->mm->start_code; | ||
791 | dump->start_data = current->mm->start_data; | ||
792 | dump->start_stack = regs->regs[15] & ~(PAGE_SIZE - 1); | ||
793 | dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT; | ||
794 | dump->u_dsize = (current->mm->brk + (PAGE_SIZE-1) - dump->start_data) >> PAGE_SHIFT; | ||
795 | dump->u_ssize = (current->mm->start_stack - dump->start_stack + | ||
796 | PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
797 | /* Debug registers will come here. */ | ||
798 | |||
799 | dump->regs = *regs; | ||
800 | |||
801 | dump->u_fpvalid = dump_fpu(regs, &dump->fpu); | ||
802 | } | ||
803 | |||
804 | asmlinkage int sys_fork(unsigned long r2, unsigned long r3, | ||
805 | unsigned long r4, unsigned long r5, | ||
806 | unsigned long r6, unsigned long r7, | ||
807 | struct pt_regs *pregs) | ||
808 | { | ||
809 | return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0); | ||
810 | } | ||
811 | |||
812 | asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
813 | unsigned long r4, unsigned long r5, | ||
814 | unsigned long r6, unsigned long r7, | ||
815 | struct pt_regs *pregs) | ||
816 | { | ||
817 | if (!newsp) | ||
818 | newsp = pregs->regs[15]; | ||
819 | return do_fork(clone_flags, newsp, pregs, 0, 0, 0); | ||
820 | } | ||
821 | |||
822 | /* | ||
823 | * This is trivial, and on the face of it looks like it | ||
824 | * could equally well be done in user mode. | ||
825 | * | ||
826 | * Not so, for quite unobvious reasons - register pressure. | ||
827 | * In user mode vfork() cannot have a stack frame, and if | ||
828 | * done by calling the "clone()" system call directly, you | ||
829 | * do not have enough call-clobbered registers to hold all | ||
830 | * the information you need. | ||
831 | */ | ||
832 | asmlinkage int sys_vfork(unsigned long r2, unsigned long r3, | ||
833 | unsigned long r4, unsigned long r5, | ||
834 | unsigned long r6, unsigned long r7, | ||
835 | struct pt_regs *pregs) | ||
836 | { | ||
837 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0); | ||
838 | } | ||
839 | |||
840 | /* | ||
841 | * sys_execve() executes a new program. | ||
842 | */ | ||
843 | asmlinkage int sys_execve(char *ufilename, char **uargv, | ||
844 | char **uenvp, unsigned long r5, | ||
845 | unsigned long r6, unsigned long r7, | ||
846 | struct pt_regs *pregs) | ||
847 | { | ||
848 | int error; | ||
849 | char *filename; | ||
850 | |||
851 | lock_kernel(); | ||
852 | filename = getname((char __user *)ufilename); | ||
853 | error = PTR_ERR(filename); | ||
854 | if (IS_ERR(filename)) | ||
855 | goto out; | ||
856 | |||
857 | error = do_execve(filename, | ||
858 | (char __user * __user *)uargv, | ||
859 | (char __user * __user *)uenvp, | ||
860 | pregs); | ||
861 | if (error == 0) { | ||
862 | task_lock(current); | ||
863 | current->ptrace &= ~PT_DTRACE; | ||
864 | task_unlock(current); | ||
865 | } | ||
866 | putname(filename); | ||
867 | out: | ||
868 | unlock_kernel(); | ||
869 | return error; | ||
870 | } | ||
871 | |||
872 | /* | ||
873 | * These bracket the sleeping functions.. | ||
874 | */ | ||
875 | extern void interruptible_sleep_on(wait_queue_head_t *q); | ||
876 | |||
877 | #define mid_sched ((unsigned long) interruptible_sleep_on) | ||
878 | |||
879 | static int in_sh64_switch_to(unsigned long pc) | ||
880 | { | ||
881 | extern char __sh64_switch_to_end; | ||
882 | /* For a sleeping task, the PC is somewhere in the middle of the function, | ||
883 | so we don't have to worry about masking the LSB off */ | ||
884 | return (pc >= (unsigned long) sh64_switch_to) && | ||
885 | (pc < (unsigned long) &__sh64_switch_to_end); | ||
886 | } | ||
887 | |||
888 | unsigned long get_wchan(struct task_struct *p) | ||
889 | { | ||
890 | unsigned long schedule_fp; | ||
891 | unsigned long sh64_switch_to_fp; | ||
892 | unsigned long schedule_caller_pc; | ||
893 | unsigned long pc; | ||
894 | |||
895 | if (!p || p == current || p->state == TASK_RUNNING) | ||
896 | return 0; | ||
897 | |||
898 | /* | ||
899 | * The same comment as on the Alpha applies here, too ... | ||
900 | */ | ||
901 | pc = thread_saved_pc(p); | ||
902 | |||
903 | #ifdef CONFIG_FRAME_POINTER | ||
904 | if (in_sh64_switch_to(pc)) { | ||
905 | sh64_switch_to_fp = (long) p->thread.sp; | ||
906 | /* r14 is saved at offset 4 in the sh64_switch_to frame */ | ||
907 | schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4); | ||
908 | |||
909 | /* and the caller of 'schedule' is (currently!) saved at offset 24 | ||
910 | in the frame of schedule (from disasm) */ | ||
911 | schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24); | ||
912 | return schedule_caller_pc; | ||
913 | } | ||
914 | #endif | ||
915 | return pc; | ||
916 | } | ||
917 | |||
918 | /* Provide a /proc/asids file that lists out the | ||
919 | ASIDs currently associated with the processes. (If the DM.PC register is | ||
920 | examined through the debug link, this shows ASID + PC. To make use of this, | ||
921 | the PID->ASID relationship needs to be known. This is primarily for | ||
922 | debugging.) | ||
923 | */ | ||
924 | |||
925 | #if defined(CONFIG_SH64_PROC_ASIDS) | ||
926 | #include <linux/init.h> | ||
927 | #include <linux/proc_fs.h> | ||
928 | |||
929 | static int | ||
930 | asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data) | ||
931 | { | ||
932 | int len=0; | ||
933 | struct task_struct *p; | ||
934 | read_lock(&tasklist_lock); | ||
935 | for_each_process(p) { | ||
936 | int pid = p->pid; | ||
937 | struct mm_struct *mm; | ||
938 | if (!pid) continue; | ||
939 | mm = p->mm; | ||
940 | if (mm) { | ||
941 | unsigned long asid, context; | ||
942 | context = mm->context; | ||
943 | asid = (context & 0xff); | ||
944 | len += sprintf(buf+len, "%5d : %02lx\n", pid, asid); | ||
945 | } else { | ||
946 | len += sprintf(buf+len, "%5d : (none)\n", pid); | ||
947 | } | ||
948 | } | ||
949 | read_unlock(&tasklist_lock); | ||
950 | *eof = 1; | ||
951 | return len; | ||
952 | } | ||
953 | |||
954 | static int __init register_proc_asids(void) | ||
955 | { | ||
956 | create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL); | ||
957 | return 0; | ||
958 | } | ||
959 | |||
960 | __initcall(register_proc_asids); | ||
961 | #endif | ||
962 | |||
diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c new file mode 100644 index 000000000000..800288c1562b --- /dev/null +++ b/arch/sh64/kernel/ptrace.c | |||
@@ -0,0 +1,376 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/ptrace.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003 Paul Mundt | ||
10 | * | ||
11 | * Started from SH3/4 version: | ||
12 | * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka | ||
13 | * | ||
14 | * Original x86 implementation: | ||
15 | * By Ross Biro 1/23/92 | ||
16 | * edited by Linus Torvalds | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/config.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/rwsem.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/smp.h> | ||
26 | #include <linux/smp_lock.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/user.h> | ||
30 | |||
31 | #include <asm/io.h> | ||
32 | #include <asm/uaccess.h> | ||
33 | #include <asm/pgtable.h> | ||
34 | #include <asm/system.h> | ||
35 | #include <asm/processor.h> | ||
36 | #include <asm/mmu_context.h> | ||
37 | |||
38 | /* This mask defines the bits of the SR which the user is not allowed to | ||
39 | change, which are everything except S, Q, M, PR, SZ, FR. */ | ||
40 | #define SR_MASK (0xffff8cfd) | ||
41 | |||
42 | /* | ||
43 | * does not yet catch signals sent when the child dies. | ||
44 | * in exit.c or in signal.c. | ||
45 | */ | ||
46 | |||
47 | /* | ||
48 | * This routine will get a word from the user area in the process kernel stack. | ||
49 | */ | ||
50 | static inline int get_stack_long(struct task_struct *task, int offset) | ||
51 | { | ||
52 | unsigned char *stack; | ||
53 | |||
54 | stack = (unsigned char *)(task->thread.uregs); | ||
55 | stack += offset; | ||
56 | return (*((int *)stack)); | ||
57 | } | ||
58 | |||
59 | static inline unsigned long | ||
60 | get_fpu_long(struct task_struct *task, unsigned long addr) | ||
61 | { | ||
62 | unsigned long tmp; | ||
63 | struct pt_regs *regs; | ||
64 | regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1; | ||
65 | |||
66 | if (!tsk_used_math(task)) { | ||
67 | if (addr == offsetof(struct user_fpu_struct, fpscr)) { | ||
68 | tmp = FPSCR_INIT; | ||
69 | } else { | ||
70 | tmp = 0xffffffffUL; /* matches initial value in fpu.c */ | ||
71 | } | ||
72 | return tmp; | ||
73 | } | ||
74 | |||
75 | if (last_task_used_math == task) { | ||
76 | grab_fpu(); | ||
77 | fpsave(&task->thread.fpu.hard); | ||
78 | release_fpu(); | ||
79 | last_task_used_math = 0; | ||
80 | regs->sr |= SR_FD; | ||
81 | } | ||
82 | |||
83 | tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)]; | ||
84 | return tmp; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * This routine will put a word into the user area in the process kernel stack. | ||
89 | */ | ||
90 | static inline int put_stack_long(struct task_struct *task, int offset, | ||
91 | unsigned long data) | ||
92 | { | ||
93 | unsigned char *stack; | ||
94 | |||
95 | stack = (unsigned char *)(task->thread.uregs); | ||
96 | stack += offset; | ||
97 | *(unsigned long *) stack = data; | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | static inline int | ||
102 | put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data) | ||
103 | { | ||
104 | struct pt_regs *regs; | ||
105 | |||
106 | regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1; | ||
107 | |||
108 | if (!tsk_used_math(task)) { | ||
109 | fpinit(&task->thread.fpu.hard); | ||
110 | set_stopped_child_used_math(task); | ||
111 | } else if (last_task_used_math == task) { | ||
112 | grab_fpu(); | ||
113 | fpsave(&task->thread.fpu.hard); | ||
114 | release_fpu(); | ||
115 | last_task_used_math = 0; | ||
116 | regs->sr |= SR_FD; | ||
117 | } | ||
118 | |||
119 | ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data; | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | ||
124 | { | ||
125 | struct task_struct *child; | ||
126 | extern void poke_real_address_q(unsigned long long addr, unsigned long long data); | ||
127 | #define WPC_DBRMODE 0x0d104008 | ||
128 | static int first_call = 1; | ||
129 | int ret; | ||
130 | |||
131 | lock_kernel(); | ||
132 | |||
133 | if (first_call) { | ||
134 | /* Set WPC.DBRMODE to 0. This makes all debug events get | ||
135 | * delivered through RESVEC, i.e. into the handlers in entry.S. | ||
136 | * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE | ||
137 | * would normally be left set to 1, which makes debug events get | ||
138 | * delivered through DBRVEC, i.e. into the remote gdb's | ||
139 | * handlers. This prevents ptrace getting them, and confuses | ||
140 | * the remote gdb.) */ | ||
141 | printk("DBRMODE set to 0 to permit native debugging\n"); | ||
142 | poke_real_address_q(WPC_DBRMODE, 0); | ||
143 | first_call = 0; | ||
144 | } | ||
145 | |||
146 | ret = -EPERM; | ||
147 | if (request == PTRACE_TRACEME) { | ||
148 | /* are we already being traced? */ | ||
149 | if (current->ptrace & PT_PTRACED) | ||
150 | goto out; | ||
151 | /* set the ptrace bit in the process flags. */ | ||
152 | current->ptrace |= PT_PTRACED; | ||
153 | ret = 0; | ||
154 | goto out; | ||
155 | } | ||
156 | ret = -ESRCH; | ||
157 | read_lock(&tasklist_lock); | ||
158 | child = find_task_by_pid(pid); | ||
159 | if (child) | ||
160 | get_task_struct(child); | ||
161 | read_unlock(&tasklist_lock); | ||
162 | if (!child) | ||
163 | goto out; | ||
164 | |||
165 | ret = -EPERM; | ||
166 | if (pid == 1) /* you may not mess with init */ | ||
167 | goto out_tsk; | ||
168 | |||
169 | if (request == PTRACE_ATTACH) { | ||
170 | ret = ptrace_attach(child); | ||
171 | goto out_tsk; | ||
172 | } | ||
173 | |||
174 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | ||
175 | if (ret < 0) | ||
176 | goto out_tsk; | ||
177 | |||
178 | switch (request) { | ||
179 | /* when I and D space are separate, these will need to be fixed. */ | ||
180 | case PTRACE_PEEKTEXT: /* read word at location addr. */ | ||
181 | case PTRACE_PEEKDATA: { | ||
182 | unsigned long tmp; | ||
183 | int copied; | ||
184 | |||
185 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | ||
186 | ret = -EIO; | ||
187 | if (copied != sizeof(tmp)) | ||
188 | break; | ||
189 | ret = put_user(tmp,(unsigned long *) data); | ||
190 | break; | ||
191 | } | ||
192 | |||
193 | /* read the word at location addr in the USER area. */ | ||
194 | case PTRACE_PEEKUSR: { | ||
195 | unsigned long tmp; | ||
196 | |||
197 | ret = -EIO; | ||
198 | if ((addr & 3) || addr < 0) | ||
199 | break; | ||
200 | |||
201 | if (addr < sizeof(struct pt_regs)) | ||
202 | tmp = get_stack_long(child, addr); | ||
203 | else if ((addr >= offsetof(struct user, fpu)) && | ||
204 | (addr < offsetof(struct user, u_fpvalid))) { | ||
205 | tmp = get_fpu_long(child, addr - offsetof(struct user, fpu)); | ||
206 | } else if (addr == offsetof(struct user, u_fpvalid)) { | ||
207 | tmp = !!tsk_used_math(child); | ||
208 | } else { | ||
209 | break; | ||
210 | } | ||
211 | ret = put_user(tmp, (unsigned long *)data); | ||
212 | break; | ||
213 | } | ||
214 | |||
215 | /* when I and D space are separate, this will have to be fixed. */ | ||
216 | case PTRACE_POKETEXT: /* write the word at location addr. */ | ||
217 | case PTRACE_POKEDATA: | ||
218 | ret = 0; | ||
219 | if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) | ||
220 | break; | ||
221 | ret = -EIO; | ||
222 | break; | ||
223 | |||
224 | case PTRACE_POKEUSR: | ||
225 | /* write the word at location addr in the USER area. We must | ||
226 | disallow any changes to certain SR bits or u_fpvalid, since | ||
227 | this could crash the kernel or result in a security | ||
228 | loophole. */ | ||
229 | ret = -EIO; | ||
230 | if ((addr & 3) || addr < 0) | ||
231 | break; | ||
232 | |||
233 | if (addr < sizeof(struct pt_regs)) { | ||
234 | /* Ignore change of top 32 bits of SR */ | ||
235 | if (addr == offsetof (struct pt_regs, sr)+4) | ||
236 | { | ||
237 | ret = 0; | ||
238 | break; | ||
239 | } | ||
240 | /* If lower 32 bits of SR, ignore non-user bits */ | ||
241 | if (addr == offsetof (struct pt_regs, sr)) | ||
242 | { | ||
243 | long cursr = get_stack_long(child, addr); | ||
244 | data &= ~(SR_MASK); | ||
245 | data |= (cursr & SR_MASK); | ||
246 | } | ||
247 | ret = put_stack_long(child, addr, data); | ||
248 | } | ||
249 | else if ((addr >= offsetof(struct user, fpu)) && | ||
250 | (addr < offsetof(struct user, u_fpvalid))) { | ||
251 | ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data); | ||
252 | } | ||
253 | break; | ||
254 | |||
255 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | ||
256 | case PTRACE_CONT: { /* restart after signal. */ | ||
257 | ret = -EIO; | ||
258 | if ((unsigned long) data > _NSIG) | ||
259 | break; | ||
260 | if (request == PTRACE_SYSCALL) | ||
261 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
262 | else | ||
263 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
264 | child->exit_code = data; | ||
265 | wake_up_process(child); | ||
266 | ret = 0; | ||
267 | break; | ||
268 | } | ||
269 | |||
270 | /* | ||
271 | * make the child exit. Best I can do is send it a sigkill. | ||
272 | * perhaps it should be put in the status that it wants to | ||
273 | * exit. | ||
274 | */ | ||
275 | case PTRACE_KILL: { | ||
276 | ret = 0; | ||
277 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ | ||
278 | break; | ||
279 | child->exit_code = SIGKILL; | ||
280 | wake_up_process(child); | ||
281 | break; | ||
282 | } | ||
283 | |||
284 | case PTRACE_SINGLESTEP: { /* set the trap flag. */ | ||
285 | struct pt_regs *regs; | ||
286 | |||
287 | ret = -EIO; | ||
288 | if ((unsigned long) data > _NSIG) | ||
289 | break; | ||
290 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
291 | if ((child->ptrace & PT_DTRACE) == 0) { | ||
292 | /* Spurious delayed TF traps may occur */ | ||
293 | child->ptrace |= PT_DTRACE; | ||
294 | } | ||
295 | |||
296 | regs = child->thread.uregs; | ||
297 | |||
298 | regs->sr |= SR_SSTEP; /* auto-resetting upon exception */ | ||
299 | |||
300 | child->exit_code = data; | ||
301 | /* give it a chance to run. */ | ||
302 | wake_up_process(child); | ||
303 | ret = 0; | ||
304 | break; | ||
305 | } | ||
306 | |||
307 | case PTRACE_DETACH: /* detach a process that was attached. */ | ||
308 | ret = ptrace_detach(child, data); | ||
309 | break; | ||
310 | |||
311 | default: | ||
312 | ret = ptrace_request(child, request, addr, data); | ||
313 | break; | ||
314 | } | ||
315 | out_tsk: | ||
316 | put_task_struct(child); | ||
317 | out: | ||
318 | unlock_kernel(); | ||
319 | return ret; | ||
320 | } | ||
321 | |||
322 | asmlinkage void syscall_trace(void) | ||
323 | { | ||
324 | struct task_struct *tsk = current; | ||
325 | |||
326 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
327 | return; | ||
328 | if (!(tsk->ptrace & PT_PTRACED)) | ||
329 | return; | ||
330 | |||
331 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | ||
332 | ? 0x80 : 0)); | ||
333 | /* | ||
334 | * this isn't the same as continuing with a signal, but it will do | ||
335 | * for normal use. strace only continues with a signal if the | ||
336 | * stopping signal is not SIGTRAP. -brl | ||
337 | */ | ||
338 | if (tsk->exit_code) { | ||
339 | send_sig(tsk->exit_code, tsk, 1); | ||
340 | tsk->exit_code = 0; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | /* Called with interrupts disabled */ | ||
345 | asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs) | ||
346 | { | ||
347 | /* This is called after a single step exception (DEBUGSS). | ||
348 | There is no need to change the PC, as it is a post-execution | ||
349 | exception, as entry.S does not do anything to the PC for DEBUGSS. | ||
350 | We need to clear the Single Step setting in SR to avoid | ||
351 | continually stepping. */ | ||
352 | local_irq_enable(); | ||
353 | regs->sr &= ~SR_SSTEP; | ||
354 | force_sig(SIGTRAP, current); | ||
355 | } | ||
356 | |||
357 | /* Called with interrupts disabled */ | ||
358 | asmlinkage void do_software_break_point(unsigned long long vec, | ||
359 | struct pt_regs *regs) | ||
360 | { | ||
361 | /* We need to forward step the PC, to counteract the backstep done | ||
362 | in signal.c. */ | ||
363 | local_irq_enable(); | ||
364 | force_sig(SIGTRAP, current); | ||
365 | regs->pc += 4; | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * Called by kernel/ptrace.c when detaching.. | ||
370 | * | ||
371 | * Make sure single step bits etc are not set. | ||
372 | */ | ||
373 | void ptrace_disable(struct task_struct *child) | ||
374 | { | ||
375 | /* nothing to do.. */ | ||
376 | } | ||
diff --git a/arch/sh64/kernel/semaphore.c b/arch/sh64/kernel/semaphore.c new file mode 100644 index 000000000000..72c16533436e --- /dev/null +++ b/arch/sh64/kernel/semaphore.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * Just taken from alpha implementation. | ||
3 | * This can't work well, perhaps. | ||
4 | */ | ||
5 | /* | ||
6 | * Generic semaphore code. Buyer beware. Do your own | ||
7 | * specific changes in <asm/semaphore-helper.h> | ||
8 | */ | ||
9 | |||
10 | #include <linux/errno.h> | ||
11 | #include <linux/rwsem.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/wait.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <asm/semaphore.h> | ||
16 | #include <asm/semaphore-helper.h> | ||
17 | |||
18 | spinlock_t semaphore_wake_lock; | ||
19 | |||
20 | /* | ||
21 | * Semaphores are implemented using a two-way counter: | ||
22 | * The "count" variable is decremented for each process | ||
23 | * that tries to sleep, while the "waking" variable is | ||
24 | * incremented when the "up()" code goes to wake up waiting | ||
25 | * processes. | ||
26 | * | ||
27 | * Notably, the inline "up()" and "down()" functions can | ||
28 | * efficiently test if they need to do any extra work (up | ||
29 | * needs to do something only if count was negative before | ||
30 | * the increment operation. | ||
31 | * | ||
32 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
33 | * atomically. | ||
34 | * | ||
35 | * When __up() is called, the count was negative before | ||
36 | * incrementing it, and we need to wake up somebody. | ||
37 | * | ||
38 | * This routine adds one to the count of processes that need to | ||
39 | * wake up and exit. ALL waiting processes actually wake up but | ||
40 | * only the one that gets to the "waking" field first will gate | ||
41 | * through and acquire the semaphore. The others will go back | ||
42 | * to sleep. | ||
43 | * | ||
44 | * Note that these functions are only called when there is | ||
45 | * contention on the lock, and as such all this is the | ||
46 | * "non-critical" part of the whole semaphore business. The | ||
47 | * critical part is the inline stuff in <asm/semaphore.h> | ||
48 | * where we want to avoid any extra jumps and calls. | ||
49 | */ | ||
50 | void __up(struct semaphore *sem) | ||
51 | { | ||
52 | wake_one_more(sem); | ||
53 | wake_up(&sem->wait); | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * Perform the "down" function. Return zero for semaphore acquired, | ||
58 | * return negative for signalled out of the function. | ||
59 | * | ||
60 | * If called from __down, the return is ignored and the wait loop is | ||
61 | * not interruptible. This means that a task waiting on a semaphore | ||
62 | * using "down()" cannot be killed until someone does an "up()" on | ||
63 | * the semaphore. | ||
64 | * | ||
65 | * If called from __down_interruptible, the return value gets checked | ||
66 | * upon return. If the return value is negative then the task continues | ||
67 | * with the negative value in the return register (it can be tested by | ||
68 | * the caller). | ||
69 | * | ||
70 | * Either form may be used in conjunction with "up()". | ||
71 | * | ||
72 | */ | ||
73 | |||
74 | #define DOWN_VAR \ | ||
75 | struct task_struct *tsk = current; \ | ||
76 | wait_queue_t wait; \ | ||
77 | init_waitqueue_entry(&wait, tsk); | ||
78 | |||
79 | #define DOWN_HEAD(task_state) \ | ||
80 | \ | ||
81 | \ | ||
82 | tsk->state = (task_state); \ | ||
83 | add_wait_queue(&sem->wait, &wait); \ | ||
84 | \ | ||
85 | /* \ | ||
86 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
87 | * so we must wait. \ | ||
88 | * \ | ||
89 | * We can let go the lock for purposes of waiting. \ | ||
90 | * We re-acquire it after awaking so as to protect \ | ||
91 | * all semaphore operations. \ | ||
92 | * \ | ||
93 | * If "up()" is called before we call waking_non_zero() then \ | ||
94 | * we will catch it right away. If it is called later then \ | ||
95 | * we will have to go through a wakeup cycle to catch it. \ | ||
96 | * \ | ||
97 | * Multiple waiters contend for the semaphore lock to see \ | ||
98 | * who gets to gate through and who has to wait some more. \ | ||
99 | */ \ | ||
100 | for (;;) { | ||
101 | |||
102 | #define DOWN_TAIL(task_state) \ | ||
103 | tsk->state = (task_state); \ | ||
104 | } \ | ||
105 | tsk->state = TASK_RUNNING; \ | ||
106 | remove_wait_queue(&sem->wait, &wait); | ||
107 | |||
108 | void __sched __down(struct semaphore * sem) | ||
109 | { | ||
110 | DOWN_VAR | ||
111 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
112 | if (waking_non_zero(sem)) | ||
113 | break; | ||
114 | schedule(); | ||
115 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
116 | } | ||
117 | |||
118 | int __sched __down_interruptible(struct semaphore * sem) | ||
119 | { | ||
120 | int ret = 0; | ||
121 | DOWN_VAR | ||
122 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
123 | |||
124 | ret = waking_non_zero_interruptible(sem, tsk); | ||
125 | if (ret) | ||
126 | { | ||
127 | if (ret == 1) | ||
128 | /* ret != 0 only if we get interrupted -arca */ | ||
129 | ret = 0; | ||
130 | break; | ||
131 | } | ||
132 | schedule(); | ||
133 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | int __down_trylock(struct semaphore * sem) | ||
138 | { | ||
139 | return waking_non_zero_trylock(sem); | ||
140 | } | ||
diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c new file mode 100644 index 000000000000..c7a7b816a30f --- /dev/null +++ b/arch/sh64/kernel/setup.c | |||
@@ -0,0 +1,385 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/setup.c | ||
7 | * | ||
8 | * sh64 Arch Support | ||
9 | * | ||
10 | * This file handles the architecture-dependent parts of initialization | ||
11 | * | ||
12 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
13 | * Copyright (C) 2003, 2004 Paul Mundt | ||
14 | * | ||
15 | * benedict.gaster@superh.com: 2nd May 2002 | ||
16 | * Modified to use the empty_zero_page to pass command line arguments. | ||
17 | * | ||
18 | * benedict.gaster@superh.com: 3rd May 2002 | ||
19 | * Added support for ramdisk, removing statically linked romfs at the same time. | ||
20 | * | ||
21 | * lethal@linux-sh.org: 15th May 2003 | ||
22 | * Added generic procfs cpuinfo reporting. Make boards just export their name. | ||
23 | * | ||
24 | * lethal@linux-sh.org: 25th May 2003 | ||
25 | * Added generic get_cpu_subtype() for subtype reporting from cpu_data->type. | ||
26 | * | ||
27 | */ | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/rwsem.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/mm.h> | ||
33 | #include <linux/stddef.h> | ||
34 | #include <linux/unistd.h> | ||
35 | #include <linux/ptrace.h> | ||
36 | #include <linux/slab.h> | ||
37 | #include <linux/user.h> | ||
38 | #include <linux/a.out.h> | ||
39 | #include <linux/tty.h> | ||
40 | #include <linux/ioport.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/config.h> | ||
43 | #include <linux/init.h> | ||
44 | #include <linux/seq_file.h> | ||
45 | #include <linux/blkdev.h> | ||
46 | #include <linux/bootmem.h> | ||
47 | #include <linux/console.h> | ||
48 | #include <linux/root_dev.h> | ||
49 | #include <linux/cpu.h> | ||
50 | #include <linux/initrd.h> | ||
51 | #include <asm/processor.h> | ||
52 | #include <asm/page.h> | ||
53 | #include <asm/pgtable.h> | ||
54 | #include <asm/platform.h> | ||
55 | #include <asm/uaccess.h> | ||
56 | #include <asm/system.h> | ||
57 | #include <asm/io.h> | ||
58 | #include <asm/sections.h> | ||
59 | #include <asm/setup.h> | ||
60 | #include <asm/smp.h> | ||
61 | |||
62 | #ifdef CONFIG_VT | ||
63 | #include <linux/console.h> | ||
64 | #endif | ||
65 | |||
66 | struct screen_info screen_info; | ||
67 | |||
68 | #ifdef CONFIG_BLK_DEV_RAM | ||
69 | extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */ | ||
70 | extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */ | ||
71 | extern int rd_image_start; /* starting block # of image */ | ||
72 | #endif | ||
73 | |||
74 | extern int root_mountflags; | ||
75 | extern char *get_system_type(void); | ||
76 | extern void platform_setup(void); | ||
77 | extern void platform_monitor(void); | ||
78 | extern void platform_reserve(void); | ||
79 | extern int sh64_cache_init(void); | ||
80 | extern int sh64_tlb_init(void); | ||
81 | |||
82 | #define RAMDISK_IMAGE_START_MASK 0x07FF | ||
83 | #define RAMDISK_PROMPT_FLAG 0x8000 | ||
84 | #define RAMDISK_LOAD_FLAG 0x4000 | ||
85 | |||
86 | static char command_line[COMMAND_LINE_SIZE] = { 0, }; | ||
87 | unsigned long long memory_start = CONFIG_MEMORY_START; | ||
88 | unsigned long long memory_end = CONFIG_MEMORY_START + (CONFIG_MEMORY_SIZE_IN_MB * 1024 * 1024); | ||
89 | |||
90 | struct sh_cpuinfo boot_cpu_data; | ||
91 | |||
92 | static inline void parse_mem_cmdline (char ** cmdline_p) | ||
93 | { | ||
94 | char c = ' ', *to = command_line, *from = COMMAND_LINE; | ||
95 | int len = 0; | ||
96 | |||
97 | /* Save unparsed command line copy for /proc/cmdline */ | ||
98 | memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); | ||
99 | saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; | ||
100 | |||
101 | for (;;) { | ||
102 | /* | ||
103 | * "mem=XXX[kKmM]" defines a size of memory. | ||
104 | */ | ||
105 | if (c == ' ' && !memcmp(from, "mem=", 4)) { | ||
106 | if (to != command_line) | ||
107 | to--; | ||
108 | { | ||
109 | unsigned long mem_size; | ||
110 | |||
111 | mem_size = memparse(from+4, &from); | ||
112 | memory_end = memory_start + mem_size; | ||
113 | } | ||
114 | } | ||
115 | c = *(from++); | ||
116 | if (!c) | ||
117 | break; | ||
118 | if (COMMAND_LINE_SIZE <= ++len) | ||
119 | break; | ||
120 | *(to++) = c; | ||
121 | } | ||
122 | *to = '\0'; | ||
123 | |||
124 | *cmdline_p = command_line; | ||
125 | } | ||
126 | |||
127 | static void __init sh64_cpu_type_detect(void) | ||
128 | { | ||
129 | extern unsigned long long peek_real_address_q(unsigned long long addr); | ||
130 | unsigned long long cir; | ||
131 | /* Do peeks in real mode to avoid having to set up a mapping for the | ||
132 | WPC registers. On SH5-101 cut2, such a mapping would be exposed to | ||
133 | an address translation erratum which would make it hard to set up | ||
134 | correctly. */ | ||
135 | cir = peek_real_address_q(0x0d000008); | ||
136 | |||
137 | if ((cir & 0xffff) == 0x5103) { | ||
138 | boot_cpu_data.type = CPU_SH5_103; | ||
139 | } else if (((cir >> 32) & 0xffff) == 0x51e2) { | ||
140 | /* CPU.VCR aliased at CIR address on SH5-101 */ | ||
141 | boot_cpu_data.type = CPU_SH5_101; | ||
142 | } else { | ||
143 | boot_cpu_data.type = CPU_SH_NONE; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | void __init setup_arch(char **cmdline_p) | ||
148 | { | ||
149 | unsigned long bootmap_size, i; | ||
150 | unsigned long first_pfn, start_pfn, last_pfn, pages; | ||
151 | |||
152 | #ifdef CONFIG_EARLY_PRINTK | ||
153 | extern void enable_early_printk(void); | ||
154 | |||
155 | /* | ||
156 | * Setup Early SCIF console | ||
157 | */ | ||
158 | enable_early_printk(); | ||
159 | #endif | ||
160 | |||
161 | /* | ||
162 | * Setup TLB mappings | ||
163 | */ | ||
164 | sh64_tlb_init(); | ||
165 | |||
166 | /* | ||
167 | * Caches are already initialized by the time we get here, so we just | ||
168 | * fill in cpu_data info for the caches. | ||
169 | */ | ||
170 | sh64_cache_init(); | ||
171 | |||
172 | platform_setup(); | ||
173 | platform_monitor(); | ||
174 | |||
175 | sh64_cpu_type_detect(); | ||
176 | |||
177 | ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); | ||
178 | |||
179 | #ifdef CONFIG_BLK_DEV_RAM | ||
180 | rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; | ||
181 | rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); | ||
182 | rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); | ||
183 | #endif | ||
184 | |||
185 | if (!MOUNT_ROOT_RDONLY) | ||
186 | root_mountflags &= ~MS_RDONLY; | ||
187 | init_mm.start_code = (unsigned long) _text; | ||
188 | init_mm.end_code = (unsigned long) _etext; | ||
189 | init_mm.end_data = (unsigned long) _edata; | ||
190 | init_mm.brk = (unsigned long) _end; | ||
191 | |||
192 | code_resource.start = __pa(_text); | ||
193 | code_resource.end = __pa(_etext)-1; | ||
194 | data_resource.start = __pa(_etext); | ||
195 | data_resource.end = __pa(_edata)-1; | ||
196 | |||
197 | parse_mem_cmdline(cmdline_p); | ||
198 | |||
199 | /* | ||
200 | * Find the lowest and highest page frame numbers we have available | ||
201 | */ | ||
202 | first_pfn = PFN_DOWN(memory_start); | ||
203 | last_pfn = PFN_DOWN(memory_end); | ||
204 | pages = last_pfn - first_pfn; | ||
205 | |||
206 | /* | ||
207 | * Partially used pages are not usable - thus | ||
208 | * we are rounding upwards: | ||
209 | */ | ||
210 | start_pfn = PFN_UP(__pa(_end)); | ||
211 | |||
212 | /* | ||
213 | * Find a proper area for the bootmem bitmap. After this | ||
214 | * bootstrap step all allocations (until the page allocator | ||
215 | * is intact) must be done via bootmem_alloc(). | ||
216 | */ | ||
217 | bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, | ||
218 | first_pfn, | ||
219 | last_pfn); | ||
220 | /* | ||
221 | * Round it up. | ||
222 | */ | ||
223 | bootmap_size = PFN_PHYS(PFN_UP(bootmap_size)); | ||
224 | |||
225 | /* | ||
226 | * Register fully available RAM pages with the bootmem allocator. | ||
227 | */ | ||
228 | free_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn), PFN_PHYS(pages)); | ||
229 | |||
230 | /* | ||
231 | * Reserve all kernel sections + bootmem bitmap + a guard page. | ||
232 | */ | ||
233 | reserve_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn), | ||
234 | (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE) - PFN_PHYS(first_pfn)); | ||
235 | |||
236 | /* | ||
237 | * Reserve platform dependent sections | ||
238 | */ | ||
239 | platform_reserve(); | ||
240 | |||
241 | #ifdef CONFIG_BLK_DEV_INITRD | ||
242 | if (LOADER_TYPE && INITRD_START) { | ||
243 | if (INITRD_START + INITRD_SIZE <= (PFN_PHYS(last_pfn))) { | ||
244 | reserve_bootmem_node(NODE_DATA(0), INITRD_START + __MEMORY_START, INITRD_SIZE); | ||
245 | |||
246 | initrd_start = | ||
247 | (long) INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0; | ||
248 | |||
249 | initrd_end = initrd_start + INITRD_SIZE; | ||
250 | } else { | ||
251 | printk("initrd extends beyond end of memory " | ||
252 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | ||
253 | (long) INITRD_START + INITRD_SIZE, | ||
254 | PFN_PHYS(last_pfn)); | ||
255 | initrd_start = 0; | ||
256 | } | ||
257 | } | ||
258 | #endif | ||
259 | |||
260 | /* | ||
261 | * Claim all RAM, ROM, and I/O resources. | ||
262 | */ | ||
263 | |||
264 | /* Kernel RAM */ | ||
265 | request_resource(&iomem_resource, &code_resource); | ||
266 | request_resource(&iomem_resource, &data_resource); | ||
267 | |||
268 | /* Other KRAM space */ | ||
269 | for (i = 0; i < STANDARD_KRAM_RESOURCES - 2; i++) | ||
270 | request_resource(&iomem_resource, | ||
271 | &platform_parms.kram_res_p[i]); | ||
272 | |||
273 | /* XRAM space */ | ||
274 | for (i = 0; i < STANDARD_XRAM_RESOURCES; i++) | ||
275 | request_resource(&iomem_resource, | ||
276 | &platform_parms.xram_res_p[i]); | ||
277 | |||
278 | /* ROM space */ | ||
279 | for (i = 0; i < STANDARD_ROM_RESOURCES; i++) | ||
280 | request_resource(&iomem_resource, | ||
281 | &platform_parms.rom_res_p[i]); | ||
282 | |||
283 | /* I/O space */ | ||
284 | for (i = 0; i < STANDARD_IO_RESOURCES; i++) | ||
285 | request_resource(&ioport_resource, | ||
286 | &platform_parms.io_res_p[i]); | ||
287 | |||
288 | |||
289 | #ifdef CONFIG_VT | ||
290 | #if defined(CONFIG_VGA_CONSOLE) | ||
291 | conswitchp = &vga_con; | ||
292 | #elif defined(CONFIG_DUMMY_CONSOLE) | ||
293 | conswitchp = &dummy_con; | ||
294 | #endif | ||
295 | #endif | ||
296 | |||
297 | printk("Hardware FPU: %s\n", fpu_in_use ? "enabled" : "disabled"); | ||
298 | |||
299 | paging_init(); | ||
300 | } | ||
301 | |||
302 | void __xchg_called_with_bad_pointer(void) | ||
303 | { | ||
304 | printk(KERN_EMERG "xchg() called with bad pointer !\n"); | ||
305 | } | ||
306 | |||
307 | static struct cpu cpu[1]; | ||
308 | |||
309 | static int __init topology_init(void) | ||
310 | { | ||
311 | return register_cpu(cpu, 0, NULL); | ||
312 | } | ||
313 | |||
314 | subsys_initcall(topology_init); | ||
315 | |||
316 | /* | ||
317 | * Get CPU information | ||
318 | */ | ||
319 | static const char *cpu_name[] = { | ||
320 | [CPU_SH5_101] = "SH5-101", | ||
321 | [CPU_SH5_103] = "SH5-103", | ||
322 | [CPU_SH_NONE] = "Unknown", | ||
323 | }; | ||
324 | |||
325 | const char *get_cpu_subtype(void) | ||
326 | { | ||
327 | return cpu_name[boot_cpu_data.type]; | ||
328 | } | ||
329 | |||
330 | #ifdef CONFIG_PROC_FS | ||
331 | static int show_cpuinfo(struct seq_file *m,void *v) | ||
332 | { | ||
333 | unsigned int cpu = smp_processor_id(); | ||
334 | |||
335 | if (!cpu) | ||
336 | seq_printf(m, "machine\t\t: %s\n", get_system_type()); | ||
337 | |||
338 | seq_printf(m, "processor\t: %d\n", cpu); | ||
339 | seq_printf(m, "cpu family\t: SH-5\n"); | ||
340 | seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype()); | ||
341 | |||
342 | seq_printf(m, "icache size\t: %dK-bytes\n", | ||
343 | (boot_cpu_data.icache.ways * | ||
344 | boot_cpu_data.icache.sets * | ||
345 | boot_cpu_data.icache.linesz) >> 10); | ||
346 | seq_printf(m, "dcache size\t: %dK-bytes\n", | ||
347 | (boot_cpu_data.dcache.ways * | ||
348 | boot_cpu_data.dcache.sets * | ||
349 | boot_cpu_data.dcache.linesz) >> 10); | ||
350 | seq_printf(m, "itlb entries\t: %d\n", boot_cpu_data.itlb.entries); | ||
351 | seq_printf(m, "dtlb entries\t: %d\n", boot_cpu_data.dtlb.entries); | ||
352 | |||
353 | #define PRINT_CLOCK(name, value) \ | ||
354 | seq_printf(m, name " clock\t: %d.%02dMHz\n", \ | ||
355 | ((value) / 1000000), ((value) % 1000000)/10000) | ||
356 | |||
357 | PRINT_CLOCK("cpu", boot_cpu_data.cpu_clock); | ||
358 | PRINT_CLOCK("bus", boot_cpu_data.bus_clock); | ||
359 | PRINT_CLOCK("module", boot_cpu_data.module_clock); | ||
360 | |||
361 | seq_printf(m, "bogomips\t: %lu.%02lu\n\n", | ||
362 | (loops_per_jiffy*HZ+2500)/500000, | ||
363 | ((loops_per_jiffy*HZ+2500)/5000) % 100); | ||
364 | |||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
369 | { | ||
370 | return (void*)(*pos == 0); | ||
371 | } | ||
372 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
373 | { | ||
374 | return NULL; | ||
375 | } | ||
376 | static void c_stop(struct seq_file *m, void *v) | ||
377 | { | ||
378 | } | ||
379 | struct seq_operations cpuinfo_op = { | ||
380 | .start = c_start, | ||
381 | .next = c_next, | ||
382 | .stop = c_stop, | ||
383 | .show = show_cpuinfo, | ||
384 | }; | ||
385 | #endif /* CONFIG_PROC_FS */ | ||
diff --git a/arch/sh64/kernel/sh_ksyms.c b/arch/sh64/kernel/sh_ksyms.c new file mode 100644 index 000000000000..0b5497d70bd3 --- /dev/null +++ b/arch/sh64/kernel/sh_ksyms.c | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/sh_ksyms.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/rwsem.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/smp.h> | ||
16 | #include <linux/user.h> | ||
17 | #include <linux/elfcore.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/in6.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/tty.h> | ||
23 | |||
24 | #include <asm/semaphore.h> | ||
25 | #include <asm/processor.h> | ||
26 | #include <asm/uaccess.h> | ||
27 | #include <asm/checksum.h> | ||
28 | #include <asm/io.h> | ||
29 | #include <asm/delay.h> | ||
30 | #include <asm/irq.h> | ||
31 | |||
32 | extern void dump_thread(struct pt_regs *, struct user *); | ||
33 | extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); | ||
34 | |||
35 | #if 0 | ||
36 | /* Not yet - there's no declaration of drive_info anywhere. */ | ||
37 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE) | ||
38 | extern struct drive_info_struct drive_info; | ||
39 | EXPORT_SYMBOL(drive_info); | ||
40 | #endif | ||
41 | #endif | ||
42 | |||
43 | /* platform dependent support */ | ||
44 | EXPORT_SYMBOL(dump_thread); | ||
45 | EXPORT_SYMBOL(dump_fpu); | ||
46 | EXPORT_SYMBOL(iounmap); | ||
47 | EXPORT_SYMBOL(enable_irq); | ||
48 | EXPORT_SYMBOL(disable_irq); | ||
49 | EXPORT_SYMBOL(kernel_thread); | ||
50 | |||
51 | /* Networking helper routines. */ | ||
52 | EXPORT_SYMBOL(csum_partial_copy); | ||
53 | |||
54 | EXPORT_SYMBOL(strpbrk); | ||
55 | EXPORT_SYMBOL(strstr); | ||
56 | |||
57 | #ifdef CONFIG_VT | ||
58 | EXPORT_SYMBOL(screen_info); | ||
59 | #endif | ||
60 | |||
61 | EXPORT_SYMBOL(__down); | ||
62 | EXPORT_SYMBOL(__down_trylock); | ||
63 | EXPORT_SYMBOL(__up); | ||
64 | EXPORT_SYMBOL(__put_user_asm_l); | ||
65 | EXPORT_SYMBOL(__get_user_asm_l); | ||
66 | EXPORT_SYMBOL(memcmp); | ||
67 | EXPORT_SYMBOL(memcpy); | ||
68 | EXPORT_SYMBOL(memset); | ||
69 | EXPORT_SYMBOL(memscan); | ||
70 | EXPORT_SYMBOL(strchr); | ||
71 | EXPORT_SYMBOL(strlen); | ||
72 | |||
73 | EXPORT_SYMBOL(flush_dcache_page); | ||
74 | |||
75 | /* For ext3 */ | ||
76 | EXPORT_SYMBOL(sh64_page_clear); | ||
77 | |||
78 | /* Ugh. These come in from libgcc.a at link time. */ | ||
79 | |||
80 | extern void __sdivsi3(void); | ||
81 | extern void __muldi3(void); | ||
82 | extern void __udivsi3(void); | ||
83 | extern char __div_table; | ||
84 | EXPORT_SYMBOL(__sdivsi3); | ||
85 | EXPORT_SYMBOL(__muldi3); | ||
86 | EXPORT_SYMBOL(__udivsi3); | ||
87 | EXPORT_SYMBOL(__div_table); | ||
88 | |||
89 | |||
diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c new file mode 100644 index 000000000000..45ad1026dde7 --- /dev/null +++ b/arch/sh64/kernel/signal.c | |||
@@ -0,0 +1,727 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/signal.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003 Paul Mundt | ||
10 | * Copyright (C) 2004 Richard Curnow | ||
11 | * | ||
12 | * Started from sh version. | ||
13 | * | ||
14 | */ | ||
15 | #include <linux/rwsem.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/signal.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/wait.h> | ||
24 | #include <linux/personality.h> | ||
25 | #include <linux/suspend.h> | ||
26 | #include <linux/ptrace.h> | ||
27 | #include <linux/unistd.h> | ||
28 | #include <linux/stddef.h> | ||
29 | #include <linux/personality.h> | ||
30 | #include <asm/ucontext.h> | ||
31 | #include <asm/uaccess.h> | ||
32 | #include <asm/pgtable.h> | ||
33 | |||
34 | |||
35 | #define REG_RET 9 | ||
36 | #define REG_ARG1 2 | ||
37 | #define REG_ARG2 3 | ||
38 | #define REG_ARG3 4 | ||
39 | #define REG_SP 15 | ||
40 | #define REG_PR 18 | ||
41 | #define REF_REG_RET regs->regs[REG_RET] | ||
42 | #define REF_REG_SP regs->regs[REG_SP] | ||
43 | #define DEREF_REG_PR regs->regs[REG_PR] | ||
44 | |||
45 | #define DEBUG_SIG 0 | ||
46 | |||
47 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
48 | |||
49 | asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset); | ||
50 | |||
51 | /* | ||
52 | * Atomically swap in the new signal mask, and wait for a signal. | ||
53 | */ | ||
54 | |||
55 | asmlinkage int | ||
56 | sys_sigsuspend(old_sigset_t mask, | ||
57 | unsigned long r3, unsigned long r4, unsigned long r5, | ||
58 | unsigned long r6, unsigned long r7, | ||
59 | struct pt_regs * regs) | ||
60 | { | ||
61 | sigset_t saveset; | ||
62 | |||
63 | mask &= _BLOCKABLE; | ||
64 | spin_lock_irq(¤t->sighand->siglock); | ||
65 | saveset = current->blocked; | ||
66 | siginitset(¤t->blocked, mask); | ||
67 | recalc_sigpending(); | ||
68 | spin_unlock_irq(¤t->sighand->siglock); | ||
69 | |||
70 | REF_REG_RET = -EINTR; | ||
71 | while (1) { | ||
72 | current->state = TASK_INTERRUPTIBLE; | ||
73 | schedule(); | ||
74 | regs->pc += 4; /* because sys_sigreturn decrements the pc */ | ||
75 | if (do_signal(regs, &saveset)) { | ||
76 | /* pc now points at signal handler. Need to decrement | ||
77 | it because entry.S will increment it. */ | ||
78 | regs->pc -= 4; | ||
79 | return -EINTR; | ||
80 | } | ||
81 | } | ||
82 | } | ||
83 | |||
84 | asmlinkage int | ||
85 | sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, | ||
86 | unsigned long r4, unsigned long r5, unsigned long r6, | ||
87 | unsigned long r7, | ||
88 | struct pt_regs * regs) | ||
89 | { | ||
90 | sigset_t saveset, newset; | ||
91 | |||
92 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
93 | if (sigsetsize != sizeof(sigset_t)) | ||
94 | return -EINVAL; | ||
95 | |||
96 | if (copy_from_user(&newset, unewset, sizeof(newset))) | ||
97 | return -EFAULT; | ||
98 | sigdelsetmask(&newset, ~_BLOCKABLE); | ||
99 | spin_lock_irq(¤t->sighand->siglock); | ||
100 | saveset = current->blocked; | ||
101 | current->blocked = newset; | ||
102 | recalc_sigpending(); | ||
103 | spin_unlock_irq(¤t->sighand->siglock); | ||
104 | |||
105 | REF_REG_RET = -EINTR; | ||
106 | while (1) { | ||
107 | current->state = TASK_INTERRUPTIBLE; | ||
108 | schedule(); | ||
109 | regs->pc += 4; /* because sys_sigreturn decrements the pc */ | ||
110 | if (do_signal(regs, &saveset)) { | ||
111 | /* pc now points at signal handler. Need to decrement | ||
112 | it because entry.S will increment it. */ | ||
113 | regs->pc -= 4; | ||
114 | return -EINTR; | ||
115 | } | ||
116 | } | ||
117 | } | ||
118 | |||
119 | asmlinkage int | ||
120 | sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
121 | struct old_sigaction __user *oact) | ||
122 | { | ||
123 | struct k_sigaction new_ka, old_ka; | ||
124 | int ret; | ||
125 | |||
126 | if (act) { | ||
127 | old_sigset_t mask; | ||
128 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
129 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | ||
130 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | ||
131 | return -EFAULT; | ||
132 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
133 | __get_user(mask, &act->sa_mask); | ||
134 | siginitset(&new_ka.sa.sa_mask, mask); | ||
135 | } | ||
136 | |||
137 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
138 | |||
139 | if (!ret && oact) { | ||
140 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
141 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | ||
142 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | ||
143 | return -EFAULT; | ||
144 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
145 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
146 | } | ||
147 | |||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | asmlinkage int | ||
152 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | ||
153 | unsigned long r4, unsigned long r5, unsigned long r6, | ||
154 | unsigned long r7, | ||
155 | struct pt_regs * regs) | ||
156 | { | ||
157 | return do_sigaltstack(uss, uoss, REF_REG_SP); | ||
158 | } | ||
159 | |||
160 | |||
161 | /* | ||
162 | * Do a signal return; undo the signal stack. | ||
163 | */ | ||
164 | |||
165 | struct sigframe | ||
166 | { | ||
167 | struct sigcontext sc; | ||
168 | unsigned long extramask[_NSIG_WORDS-1]; | ||
169 | long long retcode[2]; | ||
170 | }; | ||
171 | |||
172 | struct rt_sigframe | ||
173 | { | ||
174 | struct siginfo __user *pinfo; | ||
175 | void *puc; | ||
176 | struct siginfo info; | ||
177 | struct ucontext uc; | ||
178 | long long retcode[2]; | ||
179 | }; | ||
180 | |||
181 | #ifdef CONFIG_SH_FPU | ||
182 | static inline int | ||
183 | restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) | ||
184 | { | ||
185 | int err = 0; | ||
186 | int fpvalid; | ||
187 | |||
188 | err |= __get_user (fpvalid, &sc->sc_fpvalid); | ||
189 | conditional_used_math(fpvalid); | ||
190 | if (! fpvalid) | ||
191 | return err; | ||
192 | |||
193 | if (current == last_task_used_math) { | ||
194 | last_task_used_math = NULL; | ||
195 | regs->sr |= SR_FD; | ||
196 | } | ||
197 | |||
198 | err |= __copy_from_user(¤t->thread.fpu.hard, &sc->sc_fpregs[0], | ||
199 | (sizeof(long long) * 32) + (sizeof(int) * 1)); | ||
200 | |||
201 | return err; | ||
202 | } | ||
203 | |||
204 | static inline int | ||
205 | setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) | ||
206 | { | ||
207 | int err = 0; | ||
208 | int fpvalid; | ||
209 | |||
210 | fpvalid = !!used_math(); | ||
211 | err |= __put_user(fpvalid, &sc->sc_fpvalid); | ||
212 | if (! fpvalid) | ||
213 | return err; | ||
214 | |||
215 | if (current == last_task_used_math) { | ||
216 | grab_fpu(); | ||
217 | fpsave(¤t->thread.fpu.hard); | ||
218 | release_fpu(); | ||
219 | last_task_used_math = NULL; | ||
220 | regs->sr |= SR_FD; | ||
221 | } | ||
222 | |||
223 | err |= __copy_to_user(&sc->sc_fpregs[0], ¤t->thread.fpu.hard, | ||
224 | (sizeof(long long) * 32) + (sizeof(int) * 1)); | ||
225 | clear_used_math(); | ||
226 | |||
227 | return err; | ||
228 | } | ||
229 | #else | ||
230 | static inline int | ||
231 | restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) | ||
232 | {} | ||
233 | static inline int | ||
234 | setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) | ||
235 | {} | ||
236 | #endif | ||
237 | |||
238 | static int | ||
239 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p) | ||
240 | { | ||
241 | unsigned int err = 0; | ||
242 | unsigned long long current_sr, new_sr; | ||
243 | #define SR_MASK 0xffff8cfd | ||
244 | |||
245 | #define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) | ||
246 | |||
247 | COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]); | ||
248 | COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]); | ||
249 | COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]); | ||
250 | COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]); | ||
251 | COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]); | ||
252 | COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]); | ||
253 | COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]); | ||
254 | COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]); | ||
255 | COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]); | ||
256 | COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]); | ||
257 | COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]); | ||
258 | COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]); | ||
259 | COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]); | ||
260 | COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]); | ||
261 | COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]); | ||
262 | COPY(regs[60]); COPY(regs[61]); COPY(regs[62]); | ||
263 | COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]); | ||
264 | COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]); | ||
265 | |||
266 | /* Prevent the signal handler manipulating SR in a way that can | ||
267 | crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be | ||
268 | modified */ | ||
269 | current_sr = regs->sr; | ||
270 | err |= __get_user(new_sr, &sc->sc_sr); | ||
271 | regs->sr &= SR_MASK; | ||
272 | regs->sr |= (new_sr & ~SR_MASK); | ||
273 | |||
274 | COPY(pc); | ||
275 | |||
276 | #undef COPY | ||
277 | |||
278 | /* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr | ||
279 | * has been restored above.) */ | ||
280 | err |= restore_sigcontext_fpu(regs, sc); | ||
281 | |||
282 | regs->syscall_nr = -1; /* disable syscall checks */ | ||
283 | err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]); | ||
284 | return err; | ||
285 | } | ||
286 | |||
287 | asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3, | ||
288 | unsigned long r4, unsigned long r5, | ||
289 | unsigned long r6, unsigned long r7, | ||
290 | struct pt_regs * regs) | ||
291 | { | ||
292 | struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP; | ||
293 | sigset_t set; | ||
294 | long long ret; | ||
295 | |||
296 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
297 | goto badframe; | ||
298 | |||
299 | if (__get_user(set.sig[0], &frame->sc.oldmask) | ||
300 | || (_NSIG_WORDS > 1 | ||
301 | && __copy_from_user(&set.sig[1], &frame->extramask, | ||
302 | sizeof(frame->extramask)))) | ||
303 | goto badframe; | ||
304 | |||
305 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
306 | |||
307 | spin_lock_irq(¤t->sighand->siglock); | ||
308 | current->blocked = set; | ||
309 | recalc_sigpending(); | ||
310 | spin_unlock_irq(¤t->sighand->siglock); | ||
311 | |||
312 | if (restore_sigcontext(regs, &frame->sc, &ret)) | ||
313 | goto badframe; | ||
314 | regs->pc -= 4; | ||
315 | |||
316 | return (int) ret; | ||
317 | |||
318 | badframe: | ||
319 | force_sig(SIGSEGV, current); | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3, | ||
324 | unsigned long r4, unsigned long r5, | ||
325 | unsigned long r6, unsigned long r7, | ||
326 | struct pt_regs * regs) | ||
327 | { | ||
328 | struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP; | ||
329 | sigset_t set; | ||
330 | stack_t __user st; | ||
331 | long long ret; | ||
332 | |||
333 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
334 | goto badframe; | ||
335 | |||
336 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
337 | goto badframe; | ||
338 | |||
339 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
340 | spin_lock_irq(¤t->sighand->siglock); | ||
341 | current->blocked = set; | ||
342 | recalc_sigpending(); | ||
343 | spin_unlock_irq(¤t->sighand->siglock); | ||
344 | |||
345 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret)) | ||
346 | goto badframe; | ||
347 | regs->pc -= 4; | ||
348 | |||
349 | if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) | ||
350 | goto badframe; | ||
351 | /* It is more difficult to avoid calling this function than to | ||
352 | call it and ignore errors. */ | ||
353 | do_sigaltstack(&st, NULL, REF_REG_SP); | ||
354 | |||
355 | return (int) ret; | ||
356 | |||
357 | badframe: | ||
358 | force_sig(SIGSEGV, current); | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Set up a signal frame. | ||
364 | */ | ||
365 | |||
366 | static int | ||
367 | setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | ||
368 | unsigned long mask) | ||
369 | { | ||
370 | int err = 0; | ||
371 | |||
372 | /* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */ | ||
373 | err |= setup_sigcontext_fpu(regs, sc); | ||
374 | |||
375 | #define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) | ||
376 | |||
377 | COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]); | ||
378 | COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]); | ||
379 | COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]); | ||
380 | COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]); | ||
381 | COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]); | ||
382 | COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]); | ||
383 | COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]); | ||
384 | COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]); | ||
385 | COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]); | ||
386 | COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]); | ||
387 | COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]); | ||
388 | COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]); | ||
389 | COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]); | ||
390 | COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]); | ||
391 | COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]); | ||
392 | COPY(regs[60]); COPY(regs[61]); COPY(regs[62]); | ||
393 | COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]); | ||
394 | COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]); | ||
395 | COPY(sr); COPY(pc); | ||
396 | |||
397 | #undef COPY | ||
398 | |||
399 | err |= __put_user(mask, &sc->oldmask); | ||
400 | |||
401 | return err; | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * Determine which stack to use.. | ||
406 | */ | ||
407 | static inline void __user * | ||
408 | get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) | ||
409 | { | ||
410 | if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) | ||
411 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
412 | |||
413 | return (void __user *)((sp - frame_size) & -8ul); | ||
414 | } | ||
415 | |||
416 | void sa_default_restorer(void); /* See comments below */ | ||
417 | void sa_default_rt_restorer(void); /* See comments below */ | ||
418 | |||
419 | static void setup_frame(int sig, struct k_sigaction *ka, | ||
420 | sigset_t *set, struct pt_regs *regs) | ||
421 | { | ||
422 | struct sigframe __user *frame; | ||
423 | int err = 0; | ||
424 | int signal; | ||
425 | |||
426 | frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame)); | ||
427 | |||
428 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
429 | goto give_sigsegv; | ||
430 | |||
431 | signal = current_thread_info()->exec_domain | ||
432 | && current_thread_info()->exec_domain->signal_invmap | ||
433 | && sig < 32 | ||
434 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
435 | : sig; | ||
436 | |||
437 | err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); | ||
438 | |||
439 | /* Give up earlier as i386, in case */ | ||
440 | if (err) | ||
441 | goto give_sigsegv; | ||
442 | |||
443 | if (_NSIG_WORDS > 1) { | ||
444 | err |= __copy_to_user(frame->extramask, &set->sig[1], | ||
445 | sizeof(frame->extramask)); } | ||
446 | |||
447 | /* Give up earlier as i386, in case */ | ||
448 | if (err) | ||
449 | goto give_sigsegv; | ||
450 | |||
451 | /* Set up to return from userspace. If provided, use a stub | ||
452 | already in userspace. */ | ||
453 | if (ka->sa.sa_flags & SA_RESTORER) { | ||
454 | DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1; | ||
455 | |||
456 | /* | ||
457 | * On SH5 all edited pointers are subject to NEFF | ||
458 | */ | ||
459 | DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? | ||
460 | (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; | ||
461 | } else { | ||
462 | /* | ||
463 | * Different approach on SH5. | ||
464 | * . Endianness independent asm code gets placed in entry.S . | ||
465 | * This is limited to four ASM instructions corresponding | ||
466 | * to two long longs in size. | ||
467 | * . err checking is done on the else branch only | ||
468 | * . flush_icache_range() is called upon __put_user() only | ||
469 | * . all edited pointers are subject to NEFF | ||
470 | * . being code, linker turns ShMedia bit on, always | ||
471 | * dereference index -1. | ||
472 | */ | ||
473 | DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; | ||
474 | DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? | ||
475 | (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; | ||
476 | |||
477 | if (__copy_to_user(frame->retcode, | ||
478 | (unsigned long long)sa_default_restorer & (~1), 16) != 0) | ||
479 | goto give_sigsegv; | ||
480 | |||
481 | /* Cohere the trampoline with the I-cache. */ | ||
482 | flush_cache_sigtramp(DEREF_REG_PR-1, DEREF_REG_PR-1+16); | ||
483 | } | ||
484 | |||
485 | /* | ||
486 | * Set up registers for signal handler. | ||
487 | * All edited pointers are subject to NEFF. | ||
488 | */ | ||
489 | regs->regs[REG_SP] = (unsigned long) frame; | ||
490 | regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? | ||
491 | (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; | ||
492 | regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ | ||
493 | |||
494 | /* FIXME: | ||
495 | The glibc profiling support for SH-5 needs to be passed a sigcontext | ||
496 | so it can retrieve the PC. At some point during 2003 the glibc | ||
497 | support was changed to receive the sigcontext through the 2nd | ||
498 | argument, but there are still versions of libc.so in use that use | ||
499 | the 3rd argument. Until libc.so is stabilised, pass the sigcontext | ||
500 | through both 2nd and 3rd arguments. | ||
501 | */ | ||
502 | |||
503 | regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc; | ||
504 | regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc; | ||
505 | |||
506 | regs->pc = (unsigned long) ka->sa.sa_handler; | ||
507 | regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc; | ||
508 | |||
509 | set_fs(USER_DS); | ||
510 | |||
511 | #if DEBUG_SIG | ||
512 | /* Broken %016Lx */ | ||
513 | printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n", | ||
514 | signal, | ||
515 | current->comm, current->pid, frame, | ||
516 | regs->pc >> 32, regs->pc & 0xffffffff, | ||
517 | DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); | ||
518 | #endif | ||
519 | |||
520 | return; | ||
521 | |||
522 | give_sigsegv: | ||
523 | force_sigsegv(sig, current); | ||
524 | } | ||
525 | |||
526 | static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
527 | sigset_t *set, struct pt_regs *regs) | ||
528 | { | ||
529 | struct rt_sigframe __user *frame; | ||
530 | int err = 0; | ||
531 | int signal; | ||
532 | |||
533 | frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame)); | ||
534 | |||
535 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
536 | goto give_sigsegv; | ||
537 | |||
538 | signal = current_thread_info()->exec_domain | ||
539 | && current_thread_info()->exec_domain->signal_invmap | ||
540 | && sig < 32 | ||
541 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
542 | : sig; | ||
543 | |||
544 | err |= __put_user(&frame->info, &frame->pinfo); | ||
545 | err |= __put_user(&frame->uc, &frame->puc); | ||
546 | err |= copy_siginfo_to_user(&frame->info, info); | ||
547 | |||
548 | /* Give up earlier as i386, in case */ | ||
549 | if (err) | ||
550 | goto give_sigsegv; | ||
551 | |||
552 | /* Create the ucontext. */ | ||
553 | err |= __put_user(0, &frame->uc.uc_flags); | ||
554 | err |= __put_user(0, &frame->uc.uc_link); | ||
555 | err |= __put_user((void *)current->sas_ss_sp, | ||
556 | &frame->uc.uc_stack.ss_sp); | ||
557 | err |= __put_user(sas_ss_flags(regs->regs[REG_SP]), | ||
558 | &frame->uc.uc_stack.ss_flags); | ||
559 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
560 | err |= setup_sigcontext(&frame->uc.uc_mcontext, | ||
561 | regs, set->sig[0]); | ||
562 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
563 | |||
564 | /* Give up earlier as i386, in case */ | ||
565 | if (err) | ||
566 | goto give_sigsegv; | ||
567 | |||
568 | /* Set up to return from userspace. If provided, use a stub | ||
569 | already in userspace. */ | ||
570 | if (ka->sa.sa_flags & SA_RESTORER) { | ||
571 | DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1; | ||
572 | |||
573 | /* | ||
574 | * On SH5 all edited pointers are subject to NEFF | ||
575 | */ | ||
576 | DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? | ||
577 | (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; | ||
578 | } else { | ||
579 | /* | ||
580 | * Different approach on SH5. | ||
581 | * . Endianness independent asm code gets placed in entry.S . | ||
582 | * This is limited to four ASM instructions corresponding | ||
583 | * to two long longs in size. | ||
584 | * . err checking is done on the else branch only | ||
585 | * . flush_icache_range() is called upon __put_user() only | ||
586 | * . all edited pointers are subject to NEFF | ||
587 | * . being code, linker turns ShMedia bit on, always | ||
588 | * dereference index -1. | ||
589 | */ | ||
590 | |||
591 | DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; | ||
592 | DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? | ||
593 | (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; | ||
594 | |||
595 | if (__copy_to_user(frame->retcode, | ||
596 | (unsigned long long)sa_default_rt_restorer & (~1), 16) != 0) | ||
597 | goto give_sigsegv; | ||
598 | |||
599 | flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15); | ||
600 | } | ||
601 | |||
602 | /* | ||
603 | * Set up registers for signal handler. | ||
604 | * All edited pointers are subject to NEFF. | ||
605 | */ | ||
606 | regs->regs[REG_SP] = (unsigned long) frame; | ||
607 | regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? | ||
608 | (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; | ||
609 | regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ | ||
610 | regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info; | ||
611 | regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext; | ||
612 | regs->pc = (unsigned long) ka->sa.sa_handler; | ||
613 | regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc; | ||
614 | |||
615 | set_fs(USER_DS); | ||
616 | |||
617 | #if DEBUG_SIG | ||
618 | /* Broken %016Lx */ | ||
619 | printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n", | ||
620 | signal, | ||
621 | current->comm, current->pid, frame, | ||
622 | regs->pc >> 32, regs->pc & 0xffffffff, | ||
623 | DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); | ||
624 | #endif | ||
625 | |||
626 | return; | ||
627 | |||
628 | give_sigsegv: | ||
629 | force_sigsegv(sig, current); | ||
630 | } | ||
631 | |||
632 | /* | ||
633 | * OK, we're invoking a handler | ||
634 | */ | ||
635 | |||
636 | static void | ||
637 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | ||
638 | sigset_t *oldset, struct pt_regs * regs) | ||
639 | { | ||
640 | /* Are we from a system call? */ | ||
641 | if (regs->syscall_nr >= 0) { | ||
642 | /* If so, check system call restarting.. */ | ||
643 | switch (regs->regs[REG_RET]) { | ||
644 | case -ERESTARTNOHAND: | ||
645 | regs->regs[REG_RET] = -EINTR; | ||
646 | break; | ||
647 | |||
648 | case -ERESTARTSYS: | ||
649 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
650 | regs->regs[REG_RET] = -EINTR; | ||
651 | break; | ||
652 | } | ||
653 | /* fallthrough */ | ||
654 | case -ERESTARTNOINTR: | ||
655 | /* Decode syscall # */ | ||
656 | regs->regs[REG_RET] = regs->syscall_nr; | ||
657 | regs->pc -= 4; | ||
658 | } | ||
659 | } | ||
660 | |||
661 | /* Set up the stack frame */ | ||
662 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
663 | setup_rt_frame(sig, ka, info, oldset, regs); | ||
664 | else | ||
665 | setup_frame(sig, ka, oldset, regs); | ||
666 | |||
667 | if (!(ka->sa.sa_flags & SA_NODEFER)) { | ||
668 | spin_lock_irq(¤t->sighand->siglock); | ||
669 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
670 | sigaddset(¤t->blocked,sig); | ||
671 | recalc_sigpending(); | ||
672 | spin_unlock_irq(¤t->sighand->siglock); | ||
673 | } | ||
674 | } | ||
675 | |||
676 | /* | ||
677 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
678 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
679 | * mistake. | ||
680 | * | ||
681 | * Note that we go through the signals twice: once to check the signals that | ||
682 | * the kernel can handle, and then we build all the user-level signal handling | ||
683 | * stack-frames in one go after that. | ||
684 | */ | ||
685 | int do_signal(struct pt_regs *regs, sigset_t *oldset) | ||
686 | { | ||
687 | siginfo_t info; | ||
688 | int signr; | ||
689 | struct k_sigaction ka; | ||
690 | |||
691 | /* | ||
692 | * We want the common case to go fast, which | ||
693 | * is why we may in certain cases get here from | ||
694 | * kernel mode. Just return without doing anything | ||
695 | * if so. | ||
696 | */ | ||
697 | if (!user_mode(regs)) | ||
698 | return 1; | ||
699 | |||
700 | if (try_to_freeze(0)) | ||
701 | goto no_signal; | ||
702 | |||
703 | if (!oldset) | ||
704 | oldset = ¤t->blocked; | ||
705 | |||
706 | signr = get_signal_to_deliver(&info, &ka, regs, 0); | ||
707 | |||
708 | if (signr > 0) { | ||
709 | /* Whee! Actually deliver the signal. */ | ||
710 | handle_signal(signr, &info, &ka, oldset, regs); | ||
711 | return 1; | ||
712 | } | ||
713 | |||
714 | no_signal: | ||
715 | /* Did we come from a system call? */ | ||
716 | if (regs->syscall_nr >= 0) { | ||
717 | /* Restart the system call - no handlers present */ | ||
718 | if (regs->regs[REG_RET] == -ERESTARTNOHAND || | ||
719 | regs->regs[REG_RET] == -ERESTARTSYS || | ||
720 | regs->regs[REG_RET] == -ERESTARTNOINTR) { | ||
721 | /* Decode Syscall # */ | ||
722 | regs->regs[REG_RET] = regs->syscall_nr; | ||
723 | regs->pc -= 4; | ||
724 | } | ||
725 | } | ||
726 | return 0; | ||
727 | } | ||
diff --git a/arch/sh64/kernel/switchto.S b/arch/sh64/kernel/switchto.S new file mode 100644 index 000000000000..45b2d90eed7d --- /dev/null +++ b/arch/sh64/kernel/switchto.S | |||
@@ -0,0 +1,198 @@ | |||
1 | /* | ||
2 | * arch/sh64/kernel/switchto.S | ||
3 | * | ||
4 | * sh64 context switch | ||
5 | * | ||
6 | * Copyright (C) 2004 Richard Curnow | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | |||
13 | .section .text..SHmedia32,"ax" | ||
14 | .little | ||
15 | |||
16 | .balign 32 | ||
17 | |||
18 | .type sh64_switch_to,@function | ||
19 | .global sh64_switch_to | ||
20 | .global __sh64_switch_to_end | ||
21 | sh64_switch_to: | ||
22 | |||
23 | /* Incoming args | ||
24 | r2 - prev | ||
25 | r3 - &prev->thread | ||
26 | r4 - next | ||
27 | r5 - &next->thread | ||
28 | |||
29 | Outgoing results | ||
30 | r2 - last (=prev) : this just stays in r2 throughout | ||
31 | |||
32 | Want to create a full (struct pt_regs) on the stack to allow backtracing | ||
33 | functions to work. However, we only need to populate the callee-save | ||
34 | register slots in this structure; since we're a function our ancestors must | ||
35 | have themselves preserved all caller saved state in the stack. This saves | ||
36 | some wasted effort since we won't need to look at the values. | ||
37 | |||
38 | In particular, all caller-save registers are immediately available for | ||
39 | scratch use. | ||
40 | |||
41 | */ | ||
42 | |||
43 | #define FRAME_SIZE (76*8 + 8) | ||
44 | |||
45 | movi FRAME_SIZE, r0 | ||
46 | sub.l r15, r0, r15 | ||
47 | ! Do normal-style register save to support backtrace | ||
48 | |||
49 | st.l r15, 0, r18 ! save link reg | ||
50 | st.l r15, 4, r14 ! save fp | ||
51 | add.l r15, r63, r14 ! setup frame pointer | ||
52 | |||
53 | ! hopefully this looks normal to the backtrace now. | ||
54 | |||
55 | addi.l r15, 8, r1 ! base of pt_regs | ||
56 | addi.l r1, 24, r0 ! base of pt_regs.regs | ||
57 | addi.l r0, (63*8), r8 ! base of pt_regs.trregs | ||
58 | |||
59 | /* Note : to be fixed? | ||
60 | struct pt_regs is really designed for holding the state on entry | ||
61 | to an exception, i.e. pc,sr,regs etc. However, for the context | ||
62 | switch state, some of this is not required. But the unwinder takes | ||
63 | struct pt_regs * as an arg so we have to build this structure | ||
64 | to allow unwinding switched tasks in show_state() */ | ||
65 | |||
66 | st.q r0, ( 9*8), r9 | ||
67 | st.q r0, (10*8), r10 | ||
68 | st.q r0, (11*8), r11 | ||
69 | st.q r0, (12*8), r12 | ||
70 | st.q r0, (13*8), r13 | ||
71 | st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at | ||
72 | ! the point where the process is left in suspended animation, i.e. current | ||
73 | ! fp here, not the saved one. | ||
74 | st.q r0, (16*8), r16 | ||
75 | |||
76 | st.q r0, (24*8), r24 | ||
77 | st.q r0, (25*8), r25 | ||
78 | st.q r0, (26*8), r26 | ||
79 | st.q r0, (27*8), r27 | ||
80 | st.q r0, (28*8), r28 | ||
81 | st.q r0, (29*8), r29 | ||
82 | st.q r0, (30*8), r30 | ||
83 | st.q r0, (31*8), r31 | ||
84 | st.q r0, (32*8), r32 | ||
85 | st.q r0, (33*8), r33 | ||
86 | st.q r0, (34*8), r34 | ||
87 | st.q r0, (35*8), r35 | ||
88 | |||
89 | st.q r0, (44*8), r44 | ||
90 | st.q r0, (45*8), r45 | ||
91 | st.q r0, (46*8), r46 | ||
92 | st.q r0, (47*8), r47 | ||
93 | st.q r0, (48*8), r48 | ||
94 | st.q r0, (49*8), r49 | ||
95 | st.q r0, (50*8), r50 | ||
96 | st.q r0, (51*8), r51 | ||
97 | st.q r0, (52*8), r52 | ||
98 | st.q r0, (53*8), r53 | ||
99 | st.q r0, (54*8), r54 | ||
100 | st.q r0, (55*8), r55 | ||
101 | st.q r0, (56*8), r56 | ||
102 | st.q r0, (57*8), r57 | ||
103 | st.q r0, (58*8), r58 | ||
104 | st.q r0, (59*8), r59 | ||
105 | |||
106 | ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency) | ||
107 | ! Use a local label to avoid creating a symbol that will confuse the ! | ||
108 | ! backtrace | ||
109 | pta .Lsave_pc, tr0 | ||
110 | |||
111 | gettr tr5, r45 | ||
112 | gettr tr6, r46 | ||
113 | gettr tr7, r47 | ||
114 | st.q r8, (5*8), r45 | ||
115 | st.q r8, (6*8), r46 | ||
116 | st.q r8, (7*8), r47 | ||
117 | |||
118 | ! Now switch context | ||
119 | gettr tr0, r9 | ||
120 | st.l r3, 0, r15 ! prev->thread.sp | ||
121 | st.l r3, 8, r1 ! prev->thread.kregs | ||
122 | st.l r3, 4, r9 ! prev->thread.pc | ||
123 | st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc | ||
124 | |||
125 | ! Load PC for next task (init value or save_pc later) | ||
126 | ld.l r5, 4, r18 ! next->thread.pc | ||
127 | ! Switch stacks | ||
128 | ld.l r5, 0, r15 ! next->thread.sp | ||
129 | ptabs r18, tr0 | ||
130 | |||
131 | ! Update current | ||
132 | ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct) | ||
133 | putcon r9, kcr0 ! current = next->thread_info | ||
134 | |||
135 | ! go to save_pc for a reschedule, or the initial thread.pc for a new process | ||
136 | blink tr0, r63 | ||
137 | |||
138 | ! Restore (when we come back to a previously saved task) | ||
139 | .Lsave_pc: | ||
140 | addi.l r15, 32, r0 ! r0 = next's regs | ||
141 | addi.l r0, (63*8), r8 ! r8 = next's tr_regs | ||
142 | |||
143 | ld.q r8, (5*8), r45 | ||
144 | ld.q r8, (6*8), r46 | ||
145 | ld.q r8, (7*8), r47 | ||
146 | ptabs r45, tr5 | ||
147 | ptabs r46, tr6 | ||
148 | ptabs r47, tr7 | ||
149 | |||
150 | ld.q r0, ( 9*8), r9 | ||
151 | ld.q r0, (10*8), r10 | ||
152 | ld.q r0, (11*8), r11 | ||
153 | ld.q r0, (12*8), r12 | ||
154 | ld.q r0, (13*8), r13 | ||
155 | ld.q r0, (14*8), r14 | ||
156 | ld.q r0, (16*8), r16 | ||
157 | |||
158 | ld.q r0, (24*8), r24 | ||
159 | ld.q r0, (25*8), r25 | ||
160 | ld.q r0, (26*8), r26 | ||
161 | ld.q r0, (27*8), r27 | ||
162 | ld.q r0, (28*8), r28 | ||
163 | ld.q r0, (29*8), r29 | ||
164 | ld.q r0, (30*8), r30 | ||
165 | ld.q r0, (31*8), r31 | ||
166 | ld.q r0, (32*8), r32 | ||
167 | ld.q r0, (33*8), r33 | ||
168 | ld.q r0, (34*8), r34 | ||
169 | ld.q r0, (35*8), r35 | ||
170 | |||
171 | ld.q r0, (44*8), r44 | ||
172 | ld.q r0, (45*8), r45 | ||
173 | ld.q r0, (46*8), r46 | ||
174 | ld.q r0, (47*8), r47 | ||
175 | ld.q r0, (48*8), r48 | ||
176 | ld.q r0, (49*8), r49 | ||
177 | ld.q r0, (50*8), r50 | ||
178 | ld.q r0, (51*8), r51 | ||
179 | ld.q r0, (52*8), r52 | ||
180 | ld.q r0, (53*8), r53 | ||
181 | ld.q r0, (54*8), r54 | ||
182 | ld.q r0, (55*8), r55 | ||
183 | ld.q r0, (56*8), r56 | ||
184 | ld.q r0, (57*8), r57 | ||
185 | ld.q r0, (58*8), r58 | ||
186 | ld.q r0, (59*8), r59 | ||
187 | |||
188 | ! epilogue | ||
189 | ld.l r15, 0, r18 | ||
190 | ld.l r15, 4, r14 | ||
191 | ptabs r18, tr0 | ||
192 | movi FRAME_SIZE, r0 | ||
193 | add r15, r0, r15 | ||
194 | blink tr0, r63 | ||
195 | __sh64_switch_to_end: | ||
196 | .LFE1: | ||
197 | .size sh64_switch_to,.LFE1-sh64_switch_to | ||
198 | |||
diff --git a/arch/sh64/kernel/sys_sh64.c b/arch/sh64/kernel/sys_sh64.c new file mode 100644 index 000000000000..4546845b9caf --- /dev/null +++ b/arch/sh64/kernel/sys_sh64.c | |||
@@ -0,0 +1,300 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/sys_sh64.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * | ||
10 | * This file contains various random system calls that | ||
11 | * have a non-standard calling sequence on the Linux/SH5 | ||
12 | * platform. | ||
13 | * | ||
14 | * Mostly taken from i386 version. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/errno.h> | ||
19 | #include <linux/rwsem.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/smp_lock.h> | ||
24 | #include <linux/sem.h> | ||
25 | #include <linux/msg.h> | ||
26 | #include <linux/shm.h> | ||
27 | #include <linux/stat.h> | ||
28 | #include <linux/mman.h> | ||
29 | #include <linux/file.h> | ||
30 | #include <linux/utsname.h> | ||
31 | #include <linux/syscalls.h> | ||
32 | #include <asm/uaccess.h> | ||
33 | #include <asm/ipc.h> | ||
34 | #include <asm/ptrace.h> | ||
35 | |||
36 | #define REG_3 3 | ||
37 | |||
38 | /* | ||
39 | * sys_pipe() is the normal C calling standard for creating | ||
40 | * a pipe. It's not the way Unix traditionally does this, though. | ||
41 | */ | ||
42 | #ifdef NEW_PIPE_IMPLEMENTATION | ||
43 | asmlinkage int sys_pipe(unsigned long * fildes, | ||
44 | unsigned long dummy_r3, | ||
45 | unsigned long dummy_r4, | ||
46 | unsigned long dummy_r5, | ||
47 | unsigned long dummy_r6, | ||
48 | unsigned long dummy_r7, | ||
49 | struct pt_regs * regs) /* r8 = pt_regs forced by entry.S */ | ||
50 | { | ||
51 | int fd[2]; | ||
52 | int ret; | ||
53 | |||
54 | ret = do_pipe(fd); | ||
55 | if (ret == 0) | ||
56 | /* | ||
57 | *********************************************************************** | ||
58 | * To avoid the copy_to_user we prefer to break the ABIs convention, * | ||
59 | * packing the valid pair of file IDs into a single register (r3); * | ||
60 | * while r2 is the return code as defined by the sh5-ABIs. * | ||
61 | * BE CAREFUL: pipe stub, into glibc, must be aware of this solution * | ||
62 | *********************************************************************** | ||
63 | |||
64 | #ifdef __LITTLE_ENDIAN__ | ||
65 | regs->regs[REG_3] = (((unsigned long long) fd[1]) << 32) | ((unsigned long long) fd[0]); | ||
66 | #else | ||
67 | regs->regs[REG_3] = (((unsigned long long) fd[0]) << 32) | ((unsigned long long) fd[1]); | ||
68 | #endif | ||
69 | |||
70 | */ | ||
71 | /* although not very clever this is endianess independent */ | ||
72 | regs->regs[REG_3] = (unsigned long long) *((unsigned long long *) fd); | ||
73 | |||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | #else | ||
78 | asmlinkage int sys_pipe(unsigned long * fildes) | ||
79 | { | ||
80 | int fd[2]; | ||
81 | int error; | ||
82 | |||
83 | error = do_pipe(fd); | ||
84 | if (!error) { | ||
85 | if (copy_to_user(fildes, fd, 2*sizeof(int))) | ||
86 | error = -EFAULT; | ||
87 | } | ||
88 | return error; | ||
89 | } | ||
90 | |||
91 | #endif | ||
92 | |||
93 | /* | ||
94 | * To avoid cache alias, we map the shard page with same color. | ||
95 | */ | ||
96 | #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1)) | ||
97 | |||
98 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
99 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
100 | { | ||
101 | struct vm_area_struct *vma; | ||
102 | |||
103 | if (flags & MAP_FIXED) { | ||
104 | /* We do not accept a shared mapping if it would violate | ||
105 | * cache aliasing constraints. | ||
106 | */ | ||
107 | if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1))) | ||
108 | return -EINVAL; | ||
109 | return addr; | ||
110 | } | ||
111 | |||
112 | if (len > TASK_SIZE) | ||
113 | return -ENOMEM; | ||
114 | if (!addr) | ||
115 | addr = TASK_UNMAPPED_BASE; | ||
116 | |||
117 | if (flags & MAP_PRIVATE) | ||
118 | addr = PAGE_ALIGN(addr); | ||
119 | else | ||
120 | addr = COLOUR_ALIGN(addr); | ||
121 | |||
122 | for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { | ||
123 | /* At this point: (!vma || addr < vma->vm_end). */ | ||
124 | if (TASK_SIZE - len < addr) | ||
125 | return -ENOMEM; | ||
126 | if (!vma || addr + len <= vma->vm_start) | ||
127 | return addr; | ||
128 | addr = vma->vm_end; | ||
129 | if (!(flags & MAP_PRIVATE)) | ||
130 | addr = COLOUR_ALIGN(addr); | ||
131 | } | ||
132 | } | ||
133 | |||
134 | /* common code for old and new mmaps */ | ||
135 | static inline long do_mmap2( | ||
136 | unsigned long addr, unsigned long len, | ||
137 | unsigned long prot, unsigned long flags, | ||
138 | unsigned long fd, unsigned long pgoff) | ||
139 | { | ||
140 | int error = -EBADF; | ||
141 | struct file * file = NULL; | ||
142 | |||
143 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
144 | if (!(flags & MAP_ANONYMOUS)) { | ||
145 | file = fget(fd); | ||
146 | if (!file) | ||
147 | goto out; | ||
148 | } | ||
149 | |||
150 | down_write(¤t->mm->mmap_sem); | ||
151 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
152 | up_write(¤t->mm->mmap_sem); | ||
153 | |||
154 | if (file) | ||
155 | fput(file); | ||
156 | out: | ||
157 | return error; | ||
158 | } | ||
159 | |||
160 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
161 | unsigned long prot, unsigned long flags, | ||
162 | unsigned long fd, unsigned long pgoff) | ||
163 | { | ||
164 | return do_mmap2(addr, len, prot, flags, fd, pgoff); | ||
165 | } | ||
166 | |||
167 | asmlinkage int old_mmap(unsigned long addr, unsigned long len, | ||
168 | unsigned long prot, unsigned long flags, | ||
169 | int fd, unsigned long off) | ||
170 | { | ||
171 | if (off & ~PAGE_MASK) | ||
172 | return -EINVAL; | ||
173 | return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT); | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | ||
178 | * | ||
179 | * This is really horribly ugly. | ||
180 | */ | ||
181 | asmlinkage int sys_ipc(uint call, int first, int second, | ||
182 | int third, void __user *ptr, long fifth) | ||
183 | { | ||
184 | int version, ret; | ||
185 | |||
186 | version = call >> 16; /* hack for backward compatibility */ | ||
187 | call &= 0xffff; | ||
188 | |||
189 | if (call <= SEMCTL) | ||
190 | switch (call) { | ||
191 | case SEMOP: | ||
192 | return sys_semtimedop(first, (struct sembuf __user *)ptr, | ||
193 | second, NULL); | ||
194 | case SEMTIMEDOP: | ||
195 | return sys_semtimedop(first, (struct sembuf __user *)ptr, | ||
196 | second, | ||
197 | (const struct timespec __user *)fifth); | ||
198 | case SEMGET: | ||
199 | return sys_semget (first, second, third); | ||
200 | case SEMCTL: { | ||
201 | union semun fourth; | ||
202 | if (!ptr) | ||
203 | return -EINVAL; | ||
204 | if (get_user(fourth.__pad, (void * __user *) ptr)) | ||
205 | return -EFAULT; | ||
206 | return sys_semctl (first, second, third, fourth); | ||
207 | } | ||
208 | default: | ||
209 | return -EINVAL; | ||
210 | } | ||
211 | |||
212 | if (call <= MSGCTL) | ||
213 | switch (call) { | ||
214 | case MSGSND: | ||
215 | return sys_msgsnd (first, (struct msgbuf __user *) ptr, | ||
216 | second, third); | ||
217 | case MSGRCV: | ||
218 | switch (version) { | ||
219 | case 0: { | ||
220 | struct ipc_kludge tmp; | ||
221 | if (!ptr) | ||
222 | return -EINVAL; | ||
223 | |||
224 | if (copy_from_user(&tmp, | ||
225 | (struct ipc_kludge __user *) ptr, | ||
226 | sizeof (tmp))) | ||
227 | return -EFAULT; | ||
228 | return sys_msgrcv (first, tmp.msgp, second, | ||
229 | tmp.msgtyp, third); | ||
230 | } | ||
231 | default: | ||
232 | return sys_msgrcv (first, | ||
233 | (struct msgbuf __user *) ptr, | ||
234 | second, fifth, third); | ||
235 | } | ||
236 | case MSGGET: | ||
237 | return sys_msgget ((key_t) first, second); | ||
238 | case MSGCTL: | ||
239 | return sys_msgctl (first, second, | ||
240 | (struct msqid_ds __user *) ptr); | ||
241 | default: | ||
242 | return -EINVAL; | ||
243 | } | ||
244 | if (call <= SHMCTL) | ||
245 | switch (call) { | ||
246 | case SHMAT: | ||
247 | switch (version) { | ||
248 | default: { | ||
249 | ulong raddr; | ||
250 | ret = do_shmat (first, (char __user *) ptr, | ||
251 | second, &raddr); | ||
252 | if (ret) | ||
253 | return ret; | ||
254 | return put_user (raddr, (ulong __user *) third); | ||
255 | } | ||
256 | case 1: /* iBCS2 emulator entry point */ | ||
257 | if (!segment_eq(get_fs(), get_ds())) | ||
258 | return -EINVAL; | ||
259 | return do_shmat (first, (char __user *) ptr, | ||
260 | second, (ulong *) third); | ||
261 | } | ||
262 | case SHMDT: | ||
263 | return sys_shmdt ((char __user *)ptr); | ||
264 | case SHMGET: | ||
265 | return sys_shmget (first, second, third); | ||
266 | case SHMCTL: | ||
267 | return sys_shmctl (first, second, | ||
268 | (struct shmid_ds __user *) ptr); | ||
269 | default: | ||
270 | return -EINVAL; | ||
271 | } | ||
272 | |||
273 | return -EINVAL; | ||
274 | } | ||
275 | |||
276 | asmlinkage int sys_uname(struct old_utsname * name) | ||
277 | { | ||
278 | int err; | ||
279 | if (!name) | ||
280 | return -EFAULT; | ||
281 | down_read(&uts_sem); | ||
282 | err=copy_to_user(name, &system_utsname, sizeof (*name)); | ||
283 | up_read(&uts_sem); | ||
284 | return err?-EFAULT:0; | ||
285 | } | ||
286 | |||
287 | /* Copy from mips version */ | ||
288 | asmlinkage long sys_shmatcall(int shmid, char __user *shmaddr, | ||
289 | int shmflg) | ||
290 | { | ||
291 | unsigned long raddr; | ||
292 | int err; | ||
293 | |||
294 | err = do_shmat(shmid, shmaddr, shmflg, &raddr); | ||
295 | if (err) | ||
296 | return err; | ||
297 | |||
298 | err = raddr; | ||
299 | return err; | ||
300 | } | ||
diff --git a/arch/sh64/kernel/syscalls.S b/arch/sh64/kernel/syscalls.S new file mode 100644 index 000000000000..8ed417df3dc6 --- /dev/null +++ b/arch/sh64/kernel/syscalls.S | |||
@@ -0,0 +1,345 @@ | |||
1 | /* | ||
2 | * arch/sh64/kernel/syscalls.S | ||
3 | * | ||
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
5 | * Copyright (C) 2004 Paul Mundt | ||
6 | * Copyright (C) 2003, 2004 Richard Curnow | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/sys.h> | ||
14 | |||
15 | .section .data, "aw" | ||
16 | .balign 32 | ||
17 | |||
18 | /* | ||
19 | * System calls jump table | ||
20 | */ | ||
21 | .globl sys_call_table | ||
22 | sys_call_table: | ||
23 | .long sys_ni_syscall /* 0 - old "setup()" system call */ | ||
24 | .long sys_exit | ||
25 | .long sys_fork | ||
26 | .long sys_read | ||
27 | .long sys_write | ||
28 | .long sys_open /* 5 */ | ||
29 | .long sys_close | ||
30 | .long sys_waitpid | ||
31 | .long sys_creat | ||
32 | .long sys_link | ||
33 | .long sys_unlink /* 10 */ | ||
34 | .long sys_execve | ||
35 | .long sys_chdir | ||
36 | .long sys_time | ||
37 | .long sys_mknod | ||
38 | .long sys_chmod /* 15 */ | ||
39 | .long sys_lchown16 | ||
40 | .long sys_ni_syscall /* old break syscall holder */ | ||
41 | .long sys_stat | ||
42 | .long sys_lseek | ||
43 | .long sys_getpid /* 20 */ | ||
44 | .long sys_mount | ||
45 | .long sys_oldumount | ||
46 | .long sys_setuid16 | ||
47 | .long sys_getuid16 | ||
48 | .long sys_stime /* 25 */ | ||
49 | .long sys_ptrace | ||
50 | .long sys_alarm | ||
51 | .long sys_fstat | ||
52 | .long sys_pause | ||
53 | .long sys_utime /* 30 */ | ||
54 | .long sys_ni_syscall /* old stty syscall holder */ | ||
55 | .long sys_ni_syscall /* old gtty syscall holder */ | ||
56 | .long sys_access | ||
57 | .long sys_nice | ||
58 | .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ | ||
59 | .long sys_sync | ||
60 | .long sys_kill | ||
61 | .long sys_rename | ||
62 | .long sys_mkdir | ||
63 | .long sys_rmdir /* 40 */ | ||
64 | .long sys_dup | ||
65 | .long sys_pipe | ||
66 | .long sys_times | ||
67 | .long sys_ni_syscall /* old prof syscall holder */ | ||
68 | .long sys_brk /* 45 */ | ||
69 | .long sys_setgid16 | ||
70 | .long sys_getgid16 | ||
71 | .long sys_signal | ||
72 | .long sys_geteuid16 | ||
73 | .long sys_getegid16 /* 50 */ | ||
74 | .long sys_acct | ||
75 | .long sys_umount /* recycled never used phys( */ | ||
76 | .long sys_ni_syscall /* old lock syscall holder */ | ||
77 | .long sys_ioctl | ||
78 | .long sys_fcntl /* 55 */ | ||
79 | .long sys_ni_syscall /* old mpx syscall holder */ | ||
80 | .long sys_setpgid | ||
81 | .long sys_ni_syscall /* old ulimit syscall holder */ | ||
82 | .long sys_ni_syscall /* sys_olduname */ | ||
83 | .long sys_umask /* 60 */ | ||
84 | .long sys_chroot | ||
85 | .long sys_ustat | ||
86 | .long sys_dup2 | ||
87 | .long sys_getppid | ||
88 | .long sys_getpgrp /* 65 */ | ||
89 | .long sys_setsid | ||
90 | .long sys_sigaction | ||
91 | .long sys_sgetmask | ||
92 | .long sys_ssetmask | ||
93 | .long sys_setreuid16 /* 70 */ | ||
94 | .long sys_setregid16 | ||
95 | .long sys_sigsuspend | ||
96 | .long sys_sigpending | ||
97 | .long sys_sethostname | ||
98 | .long sys_setrlimit /* 75 */ | ||
99 | .long sys_old_getrlimit | ||
100 | .long sys_getrusage | ||
101 | .long sys_gettimeofday | ||
102 | .long sys_settimeofday | ||
103 | .long sys_getgroups16 /* 80 */ | ||
104 | .long sys_setgroups16 | ||
105 | .long sys_ni_syscall /* sys_oldselect */ | ||
106 | .long sys_symlink | ||
107 | .long sys_lstat | ||
108 | .long sys_readlink /* 85 */ | ||
109 | .long sys_uselib | ||
110 | .long sys_swapon | ||
111 | .long sys_reboot | ||
112 | .long old_readdir | ||
113 | .long old_mmap /* 90 */ | ||
114 | .long sys_munmap | ||
115 | .long sys_truncate | ||
116 | .long sys_ftruncate | ||
117 | .long sys_fchmod | ||
118 | .long sys_fchown16 /* 95 */ | ||
119 | .long sys_getpriority | ||
120 | .long sys_setpriority | ||
121 | .long sys_ni_syscall /* old profil syscall holder */ | ||
122 | .long sys_statfs | ||
123 | .long sys_fstatfs /* 100 */ | ||
124 | .long sys_ni_syscall /* ioperm */ | ||
125 | .long sys_socketcall /* Obsolete implementation of socket syscall */ | ||
126 | .long sys_syslog | ||
127 | .long sys_setitimer | ||
128 | .long sys_getitimer /* 105 */ | ||
129 | .long sys_newstat | ||
130 | .long sys_newlstat | ||
131 | .long sys_newfstat | ||
132 | .long sys_uname | ||
133 | .long sys_ni_syscall /* 110 */ /* iopl */ | ||
134 | .long sys_vhangup | ||
135 | .long sys_ni_syscall /* idle */ | ||
136 | .long sys_ni_syscall /* vm86old */ | ||
137 | .long sys_wait4 | ||
138 | .long sys_swapoff /* 115 */ | ||
139 | .long sys_sysinfo | ||
140 | .long sys_ipc /* Obsolete ipc syscall implementation */ | ||
141 | .long sys_fsync | ||
142 | .long sys_sigreturn | ||
143 | .long sys_clone /* 120 */ | ||
144 | .long sys_setdomainname | ||
145 | .long sys_newuname | ||
146 | .long sys_ni_syscall /* sys_modify_ldt */ | ||
147 | .long sys_adjtimex | ||
148 | .long sys_mprotect /* 125 */ | ||
149 | .long sys_sigprocmask | ||
150 | .long sys_ni_syscall /* old "create_module" */ | ||
151 | .long sys_init_module | ||
152 | .long sys_delete_module | ||
153 | .long sys_ni_syscall /* 130: old "get_kernel_syms" */ | ||
154 | .long sys_quotactl | ||
155 | .long sys_getpgid | ||
156 | .long sys_fchdir | ||
157 | .long sys_bdflush | ||
158 | .long sys_sysfs /* 135 */ | ||
159 | .long sys_personality | ||
160 | .long sys_ni_syscall /* for afs_syscall */ | ||
161 | .long sys_setfsuid16 | ||
162 | .long sys_setfsgid16 | ||
163 | .long sys_llseek /* 140 */ | ||
164 | .long sys_getdents | ||
165 | .long sys_select | ||
166 | .long sys_flock | ||
167 | .long sys_msync | ||
168 | .long sys_readv /* 145 */ | ||
169 | .long sys_writev | ||
170 | .long sys_getsid | ||
171 | .long sys_fdatasync | ||
172 | .long sys_sysctl | ||
173 | .long sys_mlock /* 150 */ | ||
174 | .long sys_munlock | ||
175 | .long sys_mlockall | ||
176 | .long sys_munlockall | ||
177 | .long sys_sched_setparam | ||
178 | .long sys_sched_getparam /* 155 */ | ||
179 | .long sys_sched_setscheduler | ||
180 | .long sys_sched_getscheduler | ||
181 | .long sys_sched_yield | ||
182 | .long sys_sched_get_priority_max | ||
183 | .long sys_sched_get_priority_min /* 160 */ | ||
184 | .long sys_sched_rr_get_interval | ||
185 | .long sys_nanosleep | ||
186 | .long sys_mremap | ||
187 | .long sys_setresuid16 | ||
188 | .long sys_getresuid16 /* 165 */ | ||
189 | .long sys_ni_syscall /* vm86 */ | ||
190 | .long sys_ni_syscall /* old "query_module" */ | ||
191 | .long sys_poll | ||
192 | .long sys_nfsservctl | ||
193 | .long sys_setresgid16 /* 170 */ | ||
194 | .long sys_getresgid16 | ||
195 | .long sys_prctl | ||
196 | .long sys_rt_sigreturn | ||
197 | .long sys_rt_sigaction | ||
198 | .long sys_rt_sigprocmask /* 175 */ | ||
199 | .long sys_rt_sigpending | ||
200 | .long sys_rt_sigtimedwait | ||
201 | .long sys_rt_sigqueueinfo | ||
202 | .long sys_rt_sigsuspend | ||
203 | .long sys_pread64 /* 180 */ | ||
204 | .long sys_pwrite64 | ||
205 | .long sys_chown16 | ||
206 | .long sys_getcwd | ||
207 | .long sys_capget | ||
208 | .long sys_capset /* 185 */ | ||
209 | .long sys_sigaltstack | ||
210 | .long sys_sendfile | ||
211 | .long sys_ni_syscall /* streams1 */ | ||
212 | .long sys_ni_syscall /* streams2 */ | ||
213 | .long sys_vfork /* 190 */ | ||
214 | .long sys_getrlimit | ||
215 | .long sys_mmap2 | ||
216 | .long sys_truncate64 | ||
217 | .long sys_ftruncate64 | ||
218 | .long sys_stat64 /* 195 */ | ||
219 | .long sys_lstat64 | ||
220 | .long sys_fstat64 | ||
221 | .long sys_lchown | ||
222 | .long sys_getuid | ||
223 | .long sys_getgid /* 200 */ | ||
224 | .long sys_geteuid | ||
225 | .long sys_getegid | ||
226 | .long sys_setreuid | ||
227 | .long sys_setregid | ||
228 | .long sys_getgroups /* 205 */ | ||
229 | .long sys_setgroups | ||
230 | .long sys_fchown | ||
231 | .long sys_setresuid | ||
232 | .long sys_getresuid | ||
233 | .long sys_setresgid /* 210 */ | ||
234 | .long sys_getresgid | ||
235 | .long sys_chown | ||
236 | .long sys_setuid | ||
237 | .long sys_setgid | ||
238 | .long sys_setfsuid /* 215 */ | ||
239 | .long sys_setfsgid | ||
240 | .long sys_pivot_root | ||
241 | .long sys_mincore | ||
242 | .long sys_madvise | ||
243 | /* Broken-out socket family (maintain backwards compatibility in syscall | ||
244 | numbering with 2.4) */ | ||
245 | .long sys_socket /* 220 */ | ||
246 | .long sys_bind | ||
247 | .long sys_connect | ||
248 | .long sys_listen | ||
249 | .long sys_accept | ||
250 | .long sys_getsockname /* 225 */ | ||
251 | .long sys_getpeername | ||
252 | .long sys_socketpair | ||
253 | .long sys_send | ||
254 | .long sys_sendto | ||
255 | .long sys_recv /* 230*/ | ||
256 | .long sys_recvfrom | ||
257 | .long sys_shutdown | ||
258 | .long sys_setsockopt | ||
259 | .long sys_getsockopt | ||
260 | .long sys_sendmsg /* 235 */ | ||
261 | .long sys_recvmsg | ||
262 | /* Broken-out IPC family (maintain backwards compatibility in syscall | ||
263 | numbering with 2.4) */ | ||
264 | .long sys_semop | ||
265 | .long sys_semget | ||
266 | .long sys_semctl | ||
267 | .long sys_msgsnd /* 240 */ | ||
268 | .long sys_msgrcv | ||
269 | .long sys_msgget | ||
270 | .long sys_msgctl | ||
271 | .long sys_shmatcall | ||
272 | .long sys_shmdt /* 245 */ | ||
273 | .long sys_shmget | ||
274 | .long sys_shmctl | ||
275 | /* Rest of syscalls listed in 2.4 i386 unistd.h */ | ||
276 | .long sys_getdents64 | ||
277 | .long sys_fcntl64 | ||
278 | .long sys_ni_syscall /* 250 reserved for TUX */ | ||
279 | .long sys_ni_syscall /* Reserved for Security */ | ||
280 | .long sys_gettid | ||
281 | .long sys_readahead | ||
282 | .long sys_setxattr | ||
283 | .long sys_lsetxattr /* 255 */ | ||
284 | .long sys_fsetxattr | ||
285 | .long sys_getxattr | ||
286 | .long sys_lgetxattr | ||
287 | .long sys_fgetxattr | ||
288 | .long sys_listxattr /* 260 */ | ||
289 | .long sys_llistxattr | ||
290 | .long sys_flistxattr | ||
291 | .long sys_removexattr | ||
292 | .long sys_lremovexattr | ||
293 | .long sys_fremovexattr /* 265 */ | ||
294 | .long sys_tkill | ||
295 | .long sys_sendfile64 | ||
296 | .long sys_futex | ||
297 | .long sys_sched_setaffinity | ||
298 | .long sys_sched_getaffinity /* 270 */ | ||
299 | .long sys_ni_syscall | ||
300 | .long sys_ni_syscall | ||
301 | .long sys_io_setup | ||
302 | .long sys_io_destroy | ||
303 | .long sys_io_getevents /* 275 */ | ||
304 | .long sys_io_submit | ||
305 | .long sys_io_cancel | ||
306 | .long sys_fadvise64 | ||
307 | .long sys_ni_syscall | ||
308 | .long sys_exit_group /* 280 */ | ||
309 | /* Rest of new 2.6 syscalls */ | ||
310 | .long sys_lookup_dcookie | ||
311 | .long sys_epoll_create | ||
312 | .long sys_epoll_ctl | ||
313 | .long sys_epoll_wait | ||
314 | .long sys_remap_file_pages /* 285 */ | ||
315 | .long sys_set_tid_address | ||
316 | .long sys_timer_create | ||
317 | .long sys_timer_settime | ||
318 | .long sys_timer_gettime | ||
319 | .long sys_timer_getoverrun /* 290 */ | ||
320 | .long sys_timer_delete | ||
321 | .long sys_clock_settime | ||
322 | .long sys_clock_gettime | ||
323 | .long sys_clock_getres | ||
324 | .long sys_clock_nanosleep /* 295 */ | ||
325 | .long sys_statfs64 | ||
326 | .long sys_fstatfs64 | ||
327 | .long sys_tgkill | ||
328 | .long sys_utimes | ||
329 | .long sys_fadvise64_64 /* 300 */ | ||
330 | .long sys_ni_syscall /* Reserved for vserver */ | ||
331 | .long sys_ni_syscall /* Reserved for mbind */ | ||
332 | .long sys_ni_syscall /* get_mempolicy */ | ||
333 | .long sys_ni_syscall /* set_mempolicy */ | ||
334 | .long sys_mq_open /* 305 */ | ||
335 | .long sys_mq_unlink | ||
336 | .long sys_mq_timedsend | ||
337 | .long sys_mq_timedreceive | ||
338 | .long sys_mq_notify | ||
339 | .long sys_mq_getsetattr /* 310 */ | ||
340 | .long sys_ni_syscall /* Reserved for kexec */ | ||
341 | .long sys_waitid | ||
342 | .long sys_add_key | ||
343 | .long sys_request_key | ||
344 | .long sys_keyctl /* 315 */ | ||
345 | |||
diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c new file mode 100644 index 000000000000..6c84da3efc73 --- /dev/null +++ b/arch/sh64/kernel/time.c | |||
@@ -0,0 +1,610 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/time.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003, 2004 Paul Mundt | ||
10 | * Copyright (C) 2003 Richard Curnow | ||
11 | * | ||
12 | * Original TMU/RTC code taken from sh version. | ||
13 | * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka | ||
14 | * Some code taken from i386 version. | ||
15 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | ||
16 | */ | ||
17 | |||
18 | #include <linux/config.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/rwsem.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/param.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/time.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/profile.h> | ||
31 | #include <linux/smp.h> | ||
32 | |||
33 | #include <asm/registers.h> /* required by inline __asm__ stmt. */ | ||
34 | |||
35 | #include <asm/processor.h> | ||
36 | #include <asm/uaccess.h> | ||
37 | #include <asm/io.h> | ||
38 | #include <asm/irq.h> | ||
39 | #include <asm/delay.h> | ||
40 | |||
41 | #include <linux/timex.h> | ||
42 | #include <linux/irq.h> | ||
43 | #include <asm/hardware.h> | ||
44 | |||
45 | #define TMU_TOCR_INIT 0x00 | ||
46 | #define TMU0_TCR_INIT 0x0020 | ||
47 | #define TMU_TSTR_INIT 1 | ||
48 | #define TMU_TSTR_OFF 0 | ||
49 | |||
50 | /* RCR1 Bits */ | ||
51 | #define RCR1_CF 0x80 /* Carry Flag */ | ||
52 | #define RCR1_CIE 0x10 /* Carry Interrupt Enable */ | ||
53 | #define RCR1_AIE 0x08 /* Alarm Interrupt Enable */ | ||
54 | #define RCR1_AF 0x01 /* Alarm Flag */ | ||
55 | |||
56 | /* RCR2 Bits */ | ||
57 | #define RCR2_PEF 0x80 /* PEriodic interrupt Flag */ | ||
58 | #define RCR2_PESMASK 0x70 /* Periodic interrupt Set */ | ||
59 | #define RCR2_RTCEN 0x08 /* ENable RTC */ | ||
60 | #define RCR2_ADJ 0x04 /* ADJustment (30-second) */ | ||
61 | #define RCR2_RESET 0x02 /* Reset bit */ | ||
62 | #define RCR2_START 0x01 /* Start bit */ | ||
63 | |||
64 | /* Clock, Power and Reset Controller */ | ||
65 | #define CPRC_BLOCK_OFF 0x01010000 | ||
66 | #define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF | ||
67 | |||
68 | #define FRQCR (cprc_base+0x0) | ||
69 | #define WTCSR (cprc_base+0x0018) | ||
70 | #define STBCR (cprc_base+0x0030) | ||
71 | |||
72 | /* Time Management Unit */ | ||
73 | #define TMU_BLOCK_OFF 0x01020000 | ||
74 | #define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF | ||
75 | #define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0) | ||
76 | #define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1) | ||
77 | #define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2) | ||
78 | |||
79 | #define TMU_TOCR tmu_base+0x0 /* Byte access */ | ||
80 | #define TMU_TSTR tmu_base+0x4 /* Byte access */ | ||
81 | |||
82 | #define TMU0_TCOR TMU0_BASE+0x0 /* Long access */ | ||
83 | #define TMU0_TCNT TMU0_BASE+0x4 /* Long access */ | ||
84 | #define TMU0_TCR TMU0_BASE+0x8 /* Word access */ | ||
85 | |||
86 | /* Real Time Clock */ | ||
87 | #define RTC_BLOCK_OFF 0x01040000 | ||
88 | #define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF | ||
89 | |||
90 | #define R64CNT rtc_base+0x00 | ||
91 | #define RSECCNT rtc_base+0x04 | ||
92 | #define RMINCNT rtc_base+0x08 | ||
93 | #define RHRCNT rtc_base+0x0c | ||
94 | #define RWKCNT rtc_base+0x10 | ||
95 | #define RDAYCNT rtc_base+0x14 | ||
96 | #define RMONCNT rtc_base+0x18 | ||
97 | #define RYRCNT rtc_base+0x1c /* 16bit */ | ||
98 | #define RSECAR rtc_base+0x20 | ||
99 | #define RMINAR rtc_base+0x24 | ||
100 | #define RHRAR rtc_base+0x28 | ||
101 | #define RWKAR rtc_base+0x2c | ||
102 | #define RDAYAR rtc_base+0x30 | ||
103 | #define RMONAR rtc_base+0x34 | ||
104 | #define RCR1 rtc_base+0x38 | ||
105 | #define RCR2 rtc_base+0x3c | ||
106 | |||
107 | #ifndef BCD_TO_BIN | ||
108 | #define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10) | ||
109 | #endif | ||
110 | |||
111 | #ifndef BIN_TO_BCD | ||
112 | #define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10) | ||
113 | #endif | ||
114 | |||
115 | #define TICK_SIZE (tick_nsec / 1000) | ||
116 | |||
117 | extern unsigned long wall_jiffies; | ||
118 | |||
119 | u64 jiffies_64 = INITIAL_JIFFIES; | ||
120 | |||
121 | static unsigned long tmu_base, rtc_base; | ||
122 | unsigned long cprc_base; | ||
123 | |||
124 | /* Variables to allow interpolation of time of day to resolution better than a | ||
125 | * jiffy. */ | ||
126 | |||
127 | /* This is effectively protected by xtime_lock */ | ||
128 | static unsigned long ctc_last_interrupt; | ||
129 | static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */ | ||
130 | |||
131 | #define CTC_JIFFY_SCALE_SHIFT 40 | ||
132 | |||
133 | /* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */ | ||
134 | static unsigned long long scaled_recip_ctc_ticks_per_jiffy; | ||
135 | |||
136 | /* Estimate number of microseconds that have elapsed since the last timer tick, | ||
137 | by scaling the delta that has occured in the CTC register. | ||
138 | |||
139 | WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at | ||
140 | the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this | ||
141 | in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm | ||
142 | probably needs to use TMU.TCNT0 instead. This will work even if the CPU is | ||
143 | sleeping, though will be coarser. | ||
144 | |||
145 | FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime | ||
146 | is running or if the freq or tick arguments of adjtimex are modified after | ||
147 | we have calibrated the scaling factor? This will result in either a jump at | ||
148 | the end of a tick period, or a wrap backwards at the start of the next one, | ||
149 | if the application is reading the time of day often enough. I think we | ||
150 | ought to do better than this. For this reason, usecs_per_jiffy is left | ||
151 | separated out in the calculation below. This allows some future hook into | ||
152 | the adjtime-related stuff in kernel/timer.c to remove this hazard. | ||
153 | |||
154 | */ | ||
155 | |||
156 | static unsigned long usecs_since_tick(void) | ||
157 | { | ||
158 | unsigned long long current_ctc; | ||
159 | long ctc_ticks_since_interrupt; | ||
160 | unsigned long long ull_ctc_ticks_since_interrupt; | ||
161 | unsigned long result; | ||
162 | |||
163 | unsigned long long mul1_out; | ||
164 | unsigned long long mul1_out_high; | ||
165 | unsigned long long mul2_out_low, mul2_out_high; | ||
166 | |||
167 | /* Read CTC register */ | ||
168 | asm ("getcon cr62, %0" : "=r" (current_ctc)); | ||
169 | /* Note, the CTC counts down on each CPU clock, not up. | ||
170 | Note(2), use long type to get correct wraparound arithmetic when | ||
171 | the counter crosses zero. */ | ||
172 | ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc; | ||
173 | ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt; | ||
174 | |||
175 | /* Inline assembly to do 32x32x32->64 multiplier */ | ||
176 | asm volatile ("mulu.l %1, %2, %0" : | ||
177 | "=r" (mul1_out) : | ||
178 | "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy)); | ||
179 | |||
180 | mul1_out_high = mul1_out >> 32; | ||
181 | |||
182 | asm volatile ("mulu.l %1, %2, %0" : | ||
183 | "=r" (mul2_out_low) : | ||
184 | "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy)); | ||
185 | |||
186 | #if 1 | ||
187 | asm volatile ("mulu.l %1, %2, %0" : | ||
188 | "=r" (mul2_out_high) : | ||
189 | "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy)); | ||
190 | #endif | ||
191 | |||
192 | result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT); | ||
193 | |||
194 | return result; | ||
195 | } | ||
196 | |||
197 | void do_gettimeofday(struct timeval *tv) | ||
198 | { | ||
199 | unsigned long flags; | ||
200 | unsigned long seq; | ||
201 | unsigned long usec, sec; | ||
202 | |||
203 | do { | ||
204 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
205 | usec = usecs_since_tick(); | ||
206 | { | ||
207 | unsigned long lost = jiffies - wall_jiffies; | ||
208 | |||
209 | if (lost) | ||
210 | usec += lost * (1000000 / HZ); | ||
211 | } | ||
212 | |||
213 | sec = xtime.tv_sec; | ||
214 | usec += xtime.tv_nsec / 1000; | ||
215 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
216 | |||
217 | while (usec >= 1000000) { | ||
218 | usec -= 1000000; | ||
219 | sec++; | ||
220 | } | ||
221 | |||
222 | tv->tv_sec = sec; | ||
223 | tv->tv_usec = usec; | ||
224 | } | ||
225 | |||
226 | int do_settimeofday(struct timespec *tv) | ||
227 | { | ||
228 | time_t wtm_sec, sec = tv->tv_sec; | ||
229 | long wtm_nsec, nsec = tv->tv_nsec; | ||
230 | |||
231 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
232 | return -EINVAL; | ||
233 | |||
234 | write_seqlock_irq(&xtime_lock); | ||
235 | /* | ||
236 | * This is revolting. We need to set "xtime" correctly. However, the | ||
237 | * value in this location is the value at the most recent update of | ||
238 | * wall time. Discover what correction gettimeofday() would have | ||
239 | * made, and then undo it! | ||
240 | */ | ||
241 | nsec -= 1000 * (usecs_since_tick() + | ||
242 | (jiffies - wall_jiffies) * (1000000 / HZ)); | ||
243 | |||
244 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
245 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
246 | |||
247 | set_normalized_timespec(&xtime, sec, nsec); | ||
248 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
249 | |||
250 | time_adjust = 0; /* stop active adjtime() */ | ||
251 | time_status |= STA_UNSYNC; | ||
252 | time_maxerror = NTP_PHASE_LIMIT; | ||
253 | time_esterror = NTP_PHASE_LIMIT; | ||
254 | write_sequnlock_irq(&xtime_lock); | ||
255 | clock_was_set(); | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static int set_rtc_time(unsigned long nowtime) | ||
261 | { | ||
262 | int retval = 0; | ||
263 | int real_seconds, real_minutes, cmos_minutes; | ||
264 | |||
265 | ctrl_outb(RCR2_RESET, RCR2); /* Reset pre-scaler & stop RTC */ | ||
266 | |||
267 | cmos_minutes = ctrl_inb(RMINCNT); | ||
268 | BCD_TO_BIN(cmos_minutes); | ||
269 | |||
270 | /* | ||
271 | * since we're only adjusting minutes and seconds, | ||
272 | * don't interfere with hour overflow. This avoids | ||
273 | * messing with unknown time zones but requires your | ||
274 | * RTC not to be off by more than 15 minutes | ||
275 | */ | ||
276 | real_seconds = nowtime % 60; | ||
277 | real_minutes = nowtime / 60; | ||
278 | if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) | ||
279 | real_minutes += 30; /* correct for half hour time zone */ | ||
280 | real_minutes %= 60; | ||
281 | |||
282 | if (abs(real_minutes - cmos_minutes) < 30) { | ||
283 | BIN_TO_BCD(real_seconds); | ||
284 | BIN_TO_BCD(real_minutes); | ||
285 | ctrl_outb(real_seconds, RSECCNT); | ||
286 | ctrl_outb(real_minutes, RMINCNT); | ||
287 | } else { | ||
288 | printk(KERN_WARNING | ||
289 | "set_rtc_time: can't update from %d to %d\n", | ||
290 | cmos_minutes, real_minutes); | ||
291 | retval = -1; | ||
292 | } | ||
293 | |||
294 | ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start RTC */ | ||
295 | |||
296 | return retval; | ||
297 | } | ||
298 | |||
299 | /* last time the RTC clock got updated */ | ||
300 | static long last_rtc_update = 0; | ||
301 | |||
302 | /* | ||
303 | * timer_interrupt() needs to keep up the real-time clock, | ||
304 | * as well as call the "do_timer()" routine every clocktick | ||
305 | */ | ||
306 | static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
307 | { | ||
308 | unsigned long long current_ctc; | ||
309 | asm ("getcon cr62, %0" : "=r" (current_ctc)); | ||
310 | ctc_last_interrupt = (unsigned long) current_ctc; | ||
311 | |||
312 | do_timer(regs); | ||
313 | #ifndef CONFIG_SMP | ||
314 | update_process_times(user_mode(regs)); | ||
315 | #endif | ||
316 | profile_tick(CPU_PROFILING, regs); | ||
317 | |||
318 | #ifdef CONFIG_HEARTBEAT | ||
319 | { | ||
320 | extern void heartbeat(void); | ||
321 | |||
322 | heartbeat(); | ||
323 | } | ||
324 | #endif | ||
325 | |||
326 | /* | ||
327 | * If we have an externally synchronized Linux clock, then update | ||
328 | * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be | ||
329 | * called as close as possible to 500 ms before the new second starts. | ||
330 | */ | ||
331 | if ((time_status & STA_UNSYNC) == 0 && | ||
332 | xtime.tv_sec > last_rtc_update + 660 && | ||
333 | (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && | ||
334 | (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { | ||
335 | if (set_rtc_time(xtime.tv_sec) == 0) | ||
336 | last_rtc_update = xtime.tv_sec; | ||
337 | else | ||
338 | last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */ | ||
339 | } | ||
340 | } | ||
341 | |||
342 | /* | ||
343 | * This is the same as the above, except we _also_ save the current | ||
344 | * Time Stamp Counter value at the time of the timer interrupt, so that | ||
345 | * we later on can estimate the time of day more exactly. | ||
346 | */ | ||
347 | static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
348 | { | ||
349 | unsigned long timer_status; | ||
350 | |||
351 | /* Clear UNF bit */ | ||
352 | timer_status = ctrl_inw(TMU0_TCR); | ||
353 | timer_status &= ~0x100; | ||
354 | ctrl_outw(timer_status, TMU0_TCR); | ||
355 | |||
356 | /* | ||
357 | * Here we are in the timer irq handler. We just have irqs locally | ||
358 | * disabled but we don't know if the timer_bh is running on the other | ||
359 | * CPU. We need to avoid to SMP race with it. NOTE: we don' t need | ||
360 | * the irq version of write_lock because as just said we have irq | ||
361 | * locally disabled. -arca | ||
362 | */ | ||
363 | write_lock(&xtime_lock); | ||
364 | do_timer_interrupt(irq, NULL, regs); | ||
365 | write_unlock(&xtime_lock); | ||
366 | |||
367 | return IRQ_HANDLED; | ||
368 | } | ||
369 | |||
370 | static unsigned long get_rtc_time(void) | ||
371 | { | ||
372 | unsigned int sec, min, hr, wk, day, mon, yr, yr100; | ||
373 | |||
374 | again: | ||
375 | do { | ||
376 | ctrl_outb(0, RCR1); /* Clear CF-bit */ | ||
377 | sec = ctrl_inb(RSECCNT); | ||
378 | min = ctrl_inb(RMINCNT); | ||
379 | hr = ctrl_inb(RHRCNT); | ||
380 | wk = ctrl_inb(RWKCNT); | ||
381 | day = ctrl_inb(RDAYCNT); | ||
382 | mon = ctrl_inb(RMONCNT); | ||
383 | yr = ctrl_inw(RYRCNT); | ||
384 | yr100 = (yr >> 8); | ||
385 | yr &= 0xff; | ||
386 | } while ((ctrl_inb(RCR1) & RCR1_CF) != 0); | ||
387 | |||
388 | BCD_TO_BIN(yr100); | ||
389 | BCD_TO_BIN(yr); | ||
390 | BCD_TO_BIN(mon); | ||
391 | BCD_TO_BIN(day); | ||
392 | BCD_TO_BIN(hr); | ||
393 | BCD_TO_BIN(min); | ||
394 | BCD_TO_BIN(sec); | ||
395 | |||
396 | if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 || | ||
397 | hr > 23 || min > 59 || sec > 59) { | ||
398 | printk(KERN_ERR | ||
399 | "SH RTC: invalid value, resetting to 1 Jan 2000\n"); | ||
400 | ctrl_outb(RCR2_RESET, RCR2); /* Reset & Stop */ | ||
401 | ctrl_outb(0, RSECCNT); | ||
402 | ctrl_outb(0, RMINCNT); | ||
403 | ctrl_outb(0, RHRCNT); | ||
404 | ctrl_outb(6, RWKCNT); | ||
405 | ctrl_outb(1, RDAYCNT); | ||
406 | ctrl_outb(1, RMONCNT); | ||
407 | ctrl_outw(0x2000, RYRCNT); | ||
408 | ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start */ | ||
409 | goto again; | ||
410 | } | ||
411 | |||
412 | return mktime(yr100 * 100 + yr, mon, day, hr, min, sec); | ||
413 | } | ||
414 | |||
415 | static __init unsigned int get_cpu_hz(void) | ||
416 | { | ||
417 | unsigned int count; | ||
418 | unsigned long __dummy; | ||
419 | unsigned long ctc_val_init, ctc_val; | ||
420 | |||
421 | /* | ||
422 | ** Regardless the toolchain, force the compiler to use the | ||
423 | ** arbitrary register r3 as a clock tick counter. | ||
424 | ** NOTE: r3 must be in accordance with rtc_interrupt() | ||
425 | */ | ||
426 | register unsigned long long __rtc_irq_flag __asm__ ("r3"); | ||
427 | |||
428 | local_irq_enable(); | ||
429 | do {} while (ctrl_inb(R64CNT) != 0); | ||
430 | ctrl_outb(RCR1_CIE, RCR1); /* Enable carry interrupt */ | ||
431 | |||
432 | /* | ||
433 | * r3 is arbitrary. CDC does not support "=z". | ||
434 | */ | ||
435 | ctc_val_init = 0xffffffff; | ||
436 | ctc_val = ctc_val_init; | ||
437 | |||
438 | asm volatile("gettr tr0, %1\n\t" | ||
439 | "putcon %0, " __CTC "\n\t" | ||
440 | "and %2, r63, %2\n\t" | ||
441 | "pta $+4, tr0\n\t" | ||
442 | "beq/l %2, r63, tr0\n\t" | ||
443 | "ptabs %1, tr0\n\t" | ||
444 | "getcon " __CTC ", %0\n\t" | ||
445 | : "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag) | ||
446 | : "0" (0)); | ||
447 | local_irq_disable(); | ||
448 | /* | ||
449 | * SH-3: | ||
450 | * CPU clock = 4 stages * loop | ||
451 | * tst rm,rm if id ex | ||
452 | * bt/s 1b if id ex | ||
453 | * add #1,rd if id ex | ||
454 | * (if) pipe line stole | ||
455 | * tst rm,rm if id ex | ||
456 | * .... | ||
457 | * | ||
458 | * | ||
459 | * SH-4: | ||
460 | * CPU clock = 6 stages * loop | ||
461 | * I don't know why. | ||
462 | * .... | ||
463 | * | ||
464 | * SH-5: | ||
465 | * Use CTC register to count. This approach returns the right value | ||
466 | * even if the I-cache is disabled (e.g. whilst debugging.) | ||
467 | * | ||
468 | */ | ||
469 | |||
470 | count = ctc_val_init - ctc_val; /* CTC counts down */ | ||
471 | |||
472 | #if defined (CONFIG_SH_SIMULATOR) | ||
473 | /* | ||
474 | * Let's pretend we are a 5MHz SH-5 to avoid a too | ||
475 | * little timer interval. Also to keep delay | ||
476 | * calibration within a reasonable time. | ||
477 | */ | ||
478 | return 5000000; | ||
479 | #else | ||
480 | /* | ||
481 | * This really is count by the number of clock cycles | ||
482 | * by the ratio between a complete R64CNT | ||
483 | * wrap-around (128) and CUI interrupt being raised (64). | ||
484 | */ | ||
485 | return count*2; | ||
486 | #endif | ||
487 | } | ||
488 | |||
489 | static irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
490 | { | ||
491 | ctrl_outb(0, RCR1); /* Disable Carry Interrupts */ | ||
492 | regs->regs[3] = 1; /* Using r3 */ | ||
493 | |||
494 | return IRQ_HANDLED; | ||
495 | } | ||
496 | |||
497 | static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL}; | ||
498 | static struct irqaction irq1 = { rtc_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "rtc", NULL, NULL}; | ||
499 | |||
500 | void __init time_init(void) | ||
501 | { | ||
502 | unsigned int cpu_clock, master_clock, bus_clock, module_clock; | ||
503 | unsigned long interval; | ||
504 | unsigned long frqcr, ifc, pfc; | ||
505 | static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 }; | ||
506 | #define bfc_table ifc_table /* Same */ | ||
507 | #define pfc_table ifc_table /* Same */ | ||
508 | |||
509 | tmu_base = onchip_remap(TMU_BASE, 1024, "TMU"); | ||
510 | if (!tmu_base) { | ||
511 | panic("Unable to remap TMU\n"); | ||
512 | } | ||
513 | |||
514 | rtc_base = onchip_remap(RTC_BASE, 1024, "RTC"); | ||
515 | if (!rtc_base) { | ||
516 | panic("Unable to remap RTC\n"); | ||
517 | } | ||
518 | |||
519 | cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC"); | ||
520 | if (!cprc_base) { | ||
521 | panic("Unable to remap CPRC\n"); | ||
522 | } | ||
523 | |||
524 | xtime.tv_sec = get_rtc_time(); | ||
525 | xtime.tv_nsec = 0; | ||
526 | |||
527 | setup_irq(TIMER_IRQ, &irq0); | ||
528 | setup_irq(RTC_IRQ, &irq1); | ||
529 | |||
530 | /* Check how fast it is.. */ | ||
531 | cpu_clock = get_cpu_hz(); | ||
532 | |||
533 | /* Note careful order of operations to maintain reasonable precision and avoid overflow. */ | ||
534 | scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ)); | ||
535 | |||
536 | disable_irq(RTC_IRQ); | ||
537 | |||
538 | printk("CPU clock: %d.%02dMHz\n", | ||
539 | (cpu_clock / 1000000), (cpu_clock % 1000000)/10000); | ||
540 | { | ||
541 | unsigned short bfc; | ||
542 | frqcr = ctrl_inl(FRQCR); | ||
543 | ifc = ifc_table[(frqcr>> 6) & 0x0007]; | ||
544 | bfc = bfc_table[(frqcr>> 3) & 0x0007]; | ||
545 | pfc = pfc_table[(frqcr>> 12) & 0x0007]; | ||
546 | master_clock = cpu_clock * ifc; | ||
547 | bus_clock = master_clock/bfc; | ||
548 | } | ||
549 | |||
550 | printk("Bus clock: %d.%02dMHz\n", | ||
551 | (bus_clock/1000000), (bus_clock % 1000000)/10000); | ||
552 | module_clock = master_clock/pfc; | ||
553 | printk("Module clock: %d.%02dMHz\n", | ||
554 | (module_clock/1000000), (module_clock % 1000000)/10000); | ||
555 | interval = (module_clock/(HZ*4)); | ||
556 | |||
557 | printk("Interval = %ld\n", interval); | ||
558 | |||
559 | current_cpu_data.cpu_clock = cpu_clock; | ||
560 | current_cpu_data.master_clock = master_clock; | ||
561 | current_cpu_data.bus_clock = bus_clock; | ||
562 | current_cpu_data.module_clock = module_clock; | ||
563 | |||
564 | /* Start TMU0 */ | ||
565 | ctrl_outb(TMU_TSTR_OFF, TMU_TSTR); | ||
566 | ctrl_outb(TMU_TOCR_INIT, TMU_TOCR); | ||
567 | ctrl_outw(TMU0_TCR_INIT, TMU0_TCR); | ||
568 | ctrl_outl(interval, TMU0_TCOR); | ||
569 | ctrl_outl(interval, TMU0_TCNT); | ||
570 | ctrl_outb(TMU_TSTR_INIT, TMU_TSTR); | ||
571 | } | ||
572 | |||
573 | void enter_deep_standby(void) | ||
574 | { | ||
575 | /* Disable watchdog timer */ | ||
576 | ctrl_outl(0xa5000000, WTCSR); | ||
577 | /* Configure deep standby on sleep */ | ||
578 | ctrl_outl(0x03, STBCR); | ||
579 | |||
580 | #ifdef CONFIG_SH_ALPHANUMERIC | ||
581 | { | ||
582 | extern void mach_alphanum(int position, unsigned char value); | ||
583 | extern void mach_alphanum_brightness(int setting); | ||
584 | char halted[] = "Halted. "; | ||
585 | int i; | ||
586 | mach_alphanum_brightness(6); /* dimmest setting above off */ | ||
587 | for (i=0; i<8; i++) { | ||
588 | mach_alphanum(i, halted[i]); | ||
589 | } | ||
590 | asm __volatile__ ("synco"); | ||
591 | } | ||
592 | #endif | ||
593 | |||
594 | asm __volatile__ ("sleep"); | ||
595 | asm __volatile__ ("synci"); | ||
596 | asm __volatile__ ("nop"); | ||
597 | asm __volatile__ ("nop"); | ||
598 | asm __volatile__ ("nop"); | ||
599 | asm __volatile__ ("nop"); | ||
600 | panic("Unexpected wakeup!\n"); | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * Scheduler clock - returns current time in nanosec units. | ||
605 | */ | ||
606 | unsigned long long sched_clock(void) | ||
607 | { | ||
608 | return (unsigned long long)jiffies * (1000000000 / HZ); | ||
609 | } | ||
610 | |||
diff --git a/arch/sh64/kernel/traps.c b/arch/sh64/kernel/traps.c new file mode 100644 index 000000000000..224b7f5b9224 --- /dev/null +++ b/arch/sh64/kernel/traps.c | |||
@@ -0,0 +1,961 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/traps.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003, 2004 Paul Mundt | ||
10 | * Copyright (C) 2003, 2004 Richard Curnow | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * 'Traps.c' handles hardware traps and faults after we have saved some | ||
16 | * state in 'entry.S'. | ||
17 | */ | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/ptrace.h> | ||
23 | #include <linux/timer.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/smp.h> | ||
26 | #include <linux/smp_lock.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <linux/kallsyms.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/sysctl.h> | ||
33 | #include <linux/module.h> | ||
34 | |||
35 | #include <asm/system.h> | ||
36 | #include <asm/uaccess.h> | ||
37 | #include <asm/io.h> | ||
38 | #include <asm/atomic.h> | ||
39 | #include <asm/processor.h> | ||
40 | #include <asm/pgtable.h> | ||
41 | |||
42 | #undef DEBUG_EXCEPTION | ||
43 | #ifdef DEBUG_EXCEPTION | ||
44 | /* implemented in ../lib/dbg.c */ | ||
45 | extern void show_excp_regs(char *fname, int trapnr, int signr, | ||
46 | struct pt_regs *regs); | ||
47 | #else | ||
48 | #define show_excp_regs(a, b, c, d) | ||
49 | #endif | ||
50 | |||
51 | static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, | ||
52 | unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk); | ||
53 | |||
54 | #define DO_ERROR(trapnr, signr, str, name, tsk) \ | ||
55 | asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \ | ||
56 | { \ | ||
57 | do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \ | ||
58 | } | ||
59 | |||
60 | spinlock_t die_lock; | ||
61 | |||
62 | void die(const char * str, struct pt_regs * regs, long err) | ||
63 | { | ||
64 | console_verbose(); | ||
65 | spin_lock_irq(&die_lock); | ||
66 | printk("%s: %lx\n", str, (err & 0xffffff)); | ||
67 | show_regs(regs); | ||
68 | spin_unlock_irq(&die_lock); | ||
69 | do_exit(SIGSEGV); | ||
70 | } | ||
71 | |||
72 | static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) | ||
73 | { | ||
74 | if (!user_mode(regs)) | ||
75 | die(str, regs, err); | ||
76 | } | ||
77 | |||
78 | static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err) | ||
79 | { | ||
80 | if (!user_mode(regs)) { | ||
81 | const struct exception_table_entry *fixup; | ||
82 | fixup = search_exception_tables(regs->pc); | ||
83 | if (fixup) { | ||
84 | regs->pc = fixup->fixup; | ||
85 | return; | ||
86 | } | ||
87 | die(str, regs, err); | ||
88 | } | ||
89 | } | ||
90 | |||
91 | DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current) | ||
92 | DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current) | ||
93 | |||
94 | |||
95 | /* Implement misaligned load/store handling for kernel (and optionally for user | ||
96 | mode too). Limitation : only SHmedia mode code is handled - there is no | ||
97 | handling at all for misaligned accesses occurring in SHcompact code yet. */ | ||
98 | |||
99 | static int misaligned_fixup(struct pt_regs *regs); | ||
100 | |||
101 | asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs) | ||
102 | { | ||
103 | if (misaligned_fixup(regs) < 0) { | ||
104 | do_unhandled_exception(7, SIGSEGV, "address error(load)", | ||
105 | "do_address_error_load", | ||
106 | error_code, regs, current); | ||
107 | } | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs) | ||
112 | { | ||
113 | if (misaligned_fixup(regs) < 0) { | ||
114 | do_unhandled_exception(8, SIGSEGV, "address error(store)", | ||
115 | "do_address_error_store", | ||
116 | error_code, regs, current); | ||
117 | } | ||
118 | return; | ||
119 | } | ||
120 | |||
121 | #if defined(CONFIG_SH64_ID2815_WORKAROUND) | ||
122 | |||
123 | #define OPCODE_INVALID 0 | ||
124 | #define OPCODE_USER_VALID 1 | ||
125 | #define OPCODE_PRIV_VALID 2 | ||
126 | |||
127 | /* getcon/putcon - requires checking which control register is referenced. */ | ||
128 | #define OPCODE_CTRL_REG 3 | ||
129 | |||
130 | /* Table of valid opcodes for SHmedia mode. | ||
131 | Form a 10-bit value by concatenating the major/minor opcodes i.e. | ||
132 | opcode[31:26,20:16]. The 6 MSBs of this value index into the following | ||
133 | array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to | ||
134 | LSBs==4'b0000 etc). */ | ||
135 | static unsigned long shmedia_opcode_table[64] = { | ||
136 | 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015, | ||
137 | 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000, | ||
138 | 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000, | ||
139 | 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000, | ||
140 | 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, | ||
141 | 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, | ||
142 | 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, | ||
143 | 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000 | ||
144 | }; | ||
145 | |||
146 | void do_reserved_inst(unsigned long error_code, struct pt_regs *regs) | ||
147 | { | ||
148 | /* Workaround SH5-101 cut2 silicon defect #2815 : | ||
149 | in some situations, inter-mode branches from SHcompact -> SHmedia | ||
150 | which should take ITLBMISS or EXECPROT exceptions at the target | ||
151 | falsely take RESINST at the target instead. */ | ||
152 | |||
153 | unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */ | ||
154 | unsigned long pc, aligned_pc; | ||
155 | int get_user_error; | ||
156 | int trapnr = 12; | ||
157 | int signr = SIGILL; | ||
158 | char *exception_name = "reserved_instruction"; | ||
159 | |||
160 | pc = regs->pc; | ||
161 | if ((pc & 3) == 1) { | ||
162 | /* SHmedia : check for defect. This requires executable vmas | ||
163 | to be readable too. */ | ||
164 | aligned_pc = pc & ~3; | ||
165 | if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { | ||
166 | get_user_error = -EFAULT; | ||
167 | } else { | ||
168 | get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); | ||
169 | } | ||
170 | if (get_user_error >= 0) { | ||
171 | unsigned long index, shift; | ||
172 | unsigned long major, minor, combined; | ||
173 | unsigned long reserved_field; | ||
174 | reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */ | ||
175 | major = (opcode >> 26) & 0x3f; | ||
176 | minor = (opcode >> 16) & 0xf; | ||
177 | combined = (major << 4) | minor; | ||
178 | index = major; | ||
179 | shift = minor << 1; | ||
180 | if (reserved_field == 0) { | ||
181 | int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3; | ||
182 | switch (opcode_state) { | ||
183 | case OPCODE_INVALID: | ||
184 | /* Trap. */ | ||
185 | break; | ||
186 | case OPCODE_USER_VALID: | ||
187 | /* Restart the instruction : the branch to the instruction will now be from an RTE | ||
188 | not from SHcompact so the silicon defect won't be triggered. */ | ||
189 | return; | ||
190 | case OPCODE_PRIV_VALID: | ||
191 | if (!user_mode(regs)) { | ||
192 | /* Should only ever get here if a module has | ||
193 | SHcompact code inside it. If so, the same fix up is needed. */ | ||
194 | return; /* same reason */ | ||
195 | } | ||
196 | /* Otherwise, user mode trying to execute a privileged instruction - | ||
197 | fall through to trap. */ | ||
198 | break; | ||
199 | case OPCODE_CTRL_REG: | ||
200 | /* If in privileged mode, return as above. */ | ||
201 | if (!user_mode(regs)) return; | ||
202 | /* In user mode ... */ | ||
203 | if (combined == 0x9f) { /* GETCON */ | ||
204 | unsigned long regno = (opcode >> 20) & 0x3f; | ||
205 | if (regno >= 62) { | ||
206 | return; | ||
207 | } | ||
208 | /* Otherwise, reserved or privileged control register, => trap */ | ||
209 | } else if (combined == 0x1bf) { /* PUTCON */ | ||
210 | unsigned long regno = (opcode >> 4) & 0x3f; | ||
211 | if (regno >= 62) { | ||
212 | return; | ||
213 | } | ||
214 | /* Otherwise, reserved or privileged control register, => trap */ | ||
215 | } else { | ||
216 | /* Trap */ | ||
217 | } | ||
218 | break; | ||
219 | default: | ||
220 | /* Fall through to trap. */ | ||
221 | break; | ||
222 | } | ||
223 | } | ||
224 | /* fall through to normal resinst processing */ | ||
225 | } else { | ||
226 | /* Error trying to read opcode. This typically means a | ||
227 | real fault, not a RESINST any more. So change the | ||
228 | codes. */ | ||
229 | trapnr = 87; | ||
230 | exception_name = "address error (exec)"; | ||
231 | signr = SIGSEGV; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current); | ||
236 | } | ||
237 | |||
238 | #else /* CONFIG_SH64_ID2815_WORKAROUND */ | ||
239 | |||
240 | /* If the workaround isn't needed, this is just a straightforward reserved | ||
241 | instruction */ | ||
242 | DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current) | ||
243 | |||
244 | #endif /* CONFIG_SH64_ID2815_WORKAROUND */ | ||
245 | |||
246 | |||
247 | #include <asm/system.h> | ||
248 | |||
249 | /* Called with interrupts disabled */ | ||
250 | asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs) | ||
251 | { | ||
252 | PLS(); | ||
253 | show_excp_regs(__FUNCTION__, -1, -1, regs); | ||
254 | die_if_kernel("exception", regs, ex); | ||
255 | } | ||
256 | |||
257 | int do_unknown_trapa(unsigned long scId, struct pt_regs *regs) | ||
258 | { | ||
259 | /* Syscall debug */ | ||
260 | printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId); | ||
261 | |||
262 | die_if_kernel("unknown trapa", regs, scId); | ||
263 | |||
264 | return -ENOSYS; | ||
265 | } | ||
266 | |||
267 | void show_stack(struct task_struct *tsk, unsigned long *sp) | ||
268 | { | ||
269 | #ifdef CONFIG_KALLSYMS | ||
270 | extern void sh64_unwind(struct pt_regs *regs); | ||
271 | struct pt_regs *regs; | ||
272 | |||
273 | regs = tsk ? tsk->thread.kregs : NULL; | ||
274 | |||
275 | sh64_unwind(regs); | ||
276 | #else | ||
277 | printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n"); | ||
278 | #endif | ||
279 | } | ||
280 | |||
281 | void show_task(unsigned long *sp) | ||
282 | { | ||
283 | show_stack(NULL, sp); | ||
284 | } | ||
285 | |||
286 | void dump_stack(void) | ||
287 | { | ||
288 | show_task(NULL); | ||
289 | } | ||
290 | /* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */ | ||
291 | EXPORT_SYMBOL(dump_stack); | ||
292 | |||
293 | static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, | ||
294 | unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk) | ||
295 | { | ||
296 | show_excp_regs(fn_name, trapnr, signr, regs); | ||
297 | tsk->thread.error_code = error_code; | ||
298 | tsk->thread.trap_no = trapnr; | ||
299 | |||
300 | if (user_mode(regs)) | ||
301 | force_sig(signr, tsk); | ||
302 | |||
303 | die_if_no_fixup(str, regs, error_code); | ||
304 | } | ||
305 | |||
306 | static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode) | ||
307 | { | ||
308 | int get_user_error; | ||
309 | unsigned long aligned_pc; | ||
310 | unsigned long opcode; | ||
311 | |||
312 | if ((pc & 3) == 1) { | ||
313 | /* SHmedia */ | ||
314 | aligned_pc = pc & ~3; | ||
315 | if (from_user_mode) { | ||
316 | if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { | ||
317 | get_user_error = -EFAULT; | ||
318 | } else { | ||
319 | get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); | ||
320 | *result_opcode = opcode; | ||
321 | } | ||
322 | return get_user_error; | ||
323 | } else { | ||
324 | /* If the fault was in the kernel, we can either read | ||
325 | * this directly, or if not, we fault. | ||
326 | */ | ||
327 | *result_opcode = *(unsigned long *) aligned_pc; | ||
328 | return 0; | ||
329 | } | ||
330 | } else if ((pc & 1) == 0) { | ||
331 | /* SHcompact */ | ||
332 | /* TODO : provide handling for this. We don't really support | ||
333 | user-mode SHcompact yet, and for a kernel fault, this would | ||
334 | have to come from a module built for SHcompact. */ | ||
335 | return -EFAULT; | ||
336 | } else { | ||
337 | /* misaligned */ | ||
338 | return -EFAULT; | ||
339 | } | ||
340 | } | ||
341 | |||
342 | static int address_is_sign_extended(__u64 a) | ||
343 | { | ||
344 | __u64 b; | ||
345 | #if (NEFF == 32) | ||
346 | b = (__u64)(__s64)(__s32)(a & 0xffffffffUL); | ||
347 | return (b == a) ? 1 : 0; | ||
348 | #else | ||
349 | #error "Sign extend check only works for NEFF==32" | ||
350 | #endif | ||
351 | } | ||
352 | |||
353 | static int generate_and_check_address(struct pt_regs *regs, | ||
354 | __u32 opcode, | ||
355 | int displacement_not_indexed, | ||
356 | int width_shift, | ||
357 | __u64 *address) | ||
358 | { | ||
359 | /* return -1 for fault, 0 for OK */ | ||
360 | |||
361 | __u64 base_address, addr; | ||
362 | int basereg; | ||
363 | |||
364 | basereg = (opcode >> 20) & 0x3f; | ||
365 | base_address = regs->regs[basereg]; | ||
366 | if (displacement_not_indexed) { | ||
367 | __s64 displacement; | ||
368 | displacement = (opcode >> 10) & 0x3ff; | ||
369 | displacement = ((displacement << 54) >> 54); /* sign extend */ | ||
370 | addr = (__u64)((__s64)base_address + (displacement << width_shift)); | ||
371 | } else { | ||
372 | __u64 offset; | ||
373 | int offsetreg; | ||
374 | offsetreg = (opcode >> 10) & 0x3f; | ||
375 | offset = regs->regs[offsetreg]; | ||
376 | addr = base_address + offset; | ||
377 | } | ||
378 | |||
379 | /* Check sign extended */ | ||
380 | if (!address_is_sign_extended(addr)) { | ||
381 | return -1; | ||
382 | } | ||
383 | |||
384 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | ||
385 | /* Check accessible. For misaligned access in the kernel, assume the | ||
386 | address is always accessible (and if not, just fault when the | ||
387 | load/store gets done.) */ | ||
388 | if (user_mode(regs)) { | ||
389 | if (addr >= TASK_SIZE) { | ||
390 | return -1; | ||
391 | } | ||
392 | /* Do access_ok check later - it depends on whether it's a load or a store. */ | ||
393 | } | ||
394 | #endif | ||
395 | |||
396 | *address = addr; | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | /* Default value as for sh */ | ||
401 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | ||
402 | static int user_mode_unaligned_fixup_count = 10; | ||
403 | static int user_mode_unaligned_fixup_enable = 1; | ||
404 | #endif | ||
405 | |||
406 | static int kernel_mode_unaligned_fixup_count = 32; | ||
407 | |||
408 | static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result) | ||
409 | { | ||
410 | unsigned short x; | ||
411 | unsigned char *p, *q; | ||
412 | p = (unsigned char *) (int) address; | ||
413 | q = (unsigned char *) &x; | ||
414 | q[0] = p[0]; | ||
415 | q[1] = p[1]; | ||
416 | |||
417 | if (do_sign_extend) { | ||
418 | *result = (__u64)(__s64) *(short *) &x; | ||
419 | } else { | ||
420 | *result = (__u64) x; | ||
421 | } | ||
422 | } | ||
423 | |||
424 | static void misaligned_kernel_word_store(__u64 address, __u64 value) | ||
425 | { | ||
426 | unsigned short x; | ||
427 | unsigned char *p, *q; | ||
428 | p = (unsigned char *) (int) address; | ||
429 | q = (unsigned char *) &x; | ||
430 | |||
431 | x = (__u16) value; | ||
432 | p[0] = q[0]; | ||
433 | p[1] = q[1]; | ||
434 | } | ||
435 | |||
436 | static int misaligned_load(struct pt_regs *regs, | ||
437 | __u32 opcode, | ||
438 | int displacement_not_indexed, | ||
439 | int width_shift, | ||
440 | int do_sign_extend) | ||
441 | { | ||
442 | /* Return -1 for a fault, 0 for OK */ | ||
443 | int error; | ||
444 | int destreg; | ||
445 | __u64 address; | ||
446 | |||
447 | error = generate_and_check_address(regs, opcode, | ||
448 | displacement_not_indexed, width_shift, &address); | ||
449 | if (error < 0) { | ||
450 | return error; | ||
451 | } | ||
452 | |||
453 | destreg = (opcode >> 4) & 0x3f; | ||
454 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | ||
455 | if (user_mode(regs)) { | ||
456 | __u64 buffer; | ||
457 | |||
458 | if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { | ||
459 | return -1; | ||
460 | } | ||
461 | |||
462 | if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) { | ||
463 | return -1; /* fault */ | ||
464 | } | ||
465 | switch (width_shift) { | ||
466 | case 1: | ||
467 | if (do_sign_extend) { | ||
468 | regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer; | ||
469 | } else { | ||
470 | regs->regs[destreg] = (__u64) *(__u16 *) &buffer; | ||
471 | } | ||
472 | break; | ||
473 | case 2: | ||
474 | regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer; | ||
475 | break; | ||
476 | case 3: | ||
477 | regs->regs[destreg] = buffer; | ||
478 | break; | ||
479 | default: | ||
480 | printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", | ||
481 | width_shift, (unsigned long) regs->pc); | ||
482 | break; | ||
483 | } | ||
484 | } else | ||
485 | #endif | ||
486 | { | ||
487 | /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ | ||
488 | __u64 lo, hi; | ||
489 | |||
490 | switch (width_shift) { | ||
491 | case 1: | ||
492 | misaligned_kernel_word_load(address, do_sign_extend, ®s->regs[destreg]); | ||
493 | break; | ||
494 | case 2: | ||
495 | asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address)); | ||
496 | asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address)); | ||
497 | regs->regs[destreg] = lo | hi; | ||
498 | break; | ||
499 | case 3: | ||
500 | asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address)); | ||
501 | asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address)); | ||
502 | regs->regs[destreg] = lo | hi; | ||
503 | break; | ||
504 | |||
505 | default: | ||
506 | printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", | ||
507 | width_shift, (unsigned long) regs->pc); | ||
508 | break; | ||
509 | } | ||
510 | } | ||
511 | |||
512 | return 0; | ||
513 | |||
514 | } | ||
515 | |||
516 | static int misaligned_store(struct pt_regs *regs, | ||
517 | __u32 opcode, | ||
518 | int displacement_not_indexed, | ||
519 | int width_shift) | ||
520 | { | ||
521 | /* Return -1 for a fault, 0 for OK */ | ||
522 | int error; | ||
523 | int srcreg; | ||
524 | __u64 address; | ||
525 | |||
526 | error = generate_and_check_address(regs, opcode, | ||
527 | displacement_not_indexed, width_shift, &address); | ||
528 | if (error < 0) { | ||
529 | return error; | ||
530 | } | ||
531 | |||
532 | srcreg = (opcode >> 4) & 0x3f; | ||
533 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | ||
534 | if (user_mode(regs)) { | ||
535 | __u64 buffer; | ||
536 | |||
537 | if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { | ||
538 | return -1; | ||
539 | } | ||
540 | |||
541 | switch (width_shift) { | ||
542 | case 1: | ||
543 | *(__u16 *) &buffer = (__u16) regs->regs[srcreg]; | ||
544 | break; | ||
545 | case 2: | ||
546 | *(__u32 *) &buffer = (__u32) regs->regs[srcreg]; | ||
547 | break; | ||
548 | case 3: | ||
549 | buffer = regs->regs[srcreg]; | ||
550 | break; | ||
551 | default: | ||
552 | printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", | ||
553 | width_shift, (unsigned long) regs->pc); | ||
554 | break; | ||
555 | } | ||
556 | |||
557 | if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { | ||
558 | return -1; /* fault */ | ||
559 | } | ||
560 | } else | ||
561 | #endif | ||
562 | { | ||
563 | /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ | ||
564 | __u64 val = regs->regs[srcreg]; | ||
565 | |||
566 | switch (width_shift) { | ||
567 | case 1: | ||
568 | misaligned_kernel_word_store(address, val); | ||
569 | break; | ||
570 | case 2: | ||
571 | asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address)); | ||
572 | asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address)); | ||
573 | break; | ||
574 | case 3: | ||
575 | asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address)); | ||
576 | asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address)); | ||
577 | break; | ||
578 | |||
579 | default: | ||
580 | printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", | ||
581 | width_shift, (unsigned long) regs->pc); | ||
582 | break; | ||
583 | } | ||
584 | } | ||
585 | |||
586 | return 0; | ||
587 | |||
588 | } | ||
589 | |||
590 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | ||
591 | /* Never need to fix up misaligned FPU accesses within the kernel since that's a real | ||
592 | error. */ | ||
593 | static int misaligned_fpu_load(struct pt_regs *regs, | ||
594 | __u32 opcode, | ||
595 | int displacement_not_indexed, | ||
596 | int width_shift, | ||
597 | int do_paired_load) | ||
598 | { | ||
599 | /* Return -1 for a fault, 0 for OK */ | ||
600 | int error; | ||
601 | int destreg; | ||
602 | __u64 address; | ||
603 | |||
604 | error = generate_and_check_address(regs, opcode, | ||
605 | displacement_not_indexed, width_shift, &address); | ||
606 | if (error < 0) { | ||
607 | return error; | ||
608 | } | ||
609 | |||
610 | destreg = (opcode >> 4) & 0x3f; | ||
611 | if (user_mode(regs)) { | ||
612 | __u64 buffer; | ||
613 | __u32 buflo, bufhi; | ||
614 | |||
615 | if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { | ||
616 | return -1; | ||
617 | } | ||
618 | |||
619 | if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) { | ||
620 | return -1; /* fault */ | ||
621 | } | ||
622 | /* 'current' may be the current owner of the FPU state, so | ||
623 | context switch the registers into memory so they can be | ||
624 | indexed by register number. */ | ||
625 | if (last_task_used_math == current) { | ||
626 | grab_fpu(); | ||
627 | fpsave(¤t->thread.fpu.hard); | ||
628 | release_fpu(); | ||
629 | last_task_used_math = NULL; | ||
630 | regs->sr |= SR_FD; | ||
631 | } | ||
632 | |||
633 | buflo = *(__u32*) &buffer; | ||
634 | bufhi = *(1 + (__u32*) &buffer); | ||
635 | |||
636 | switch (width_shift) { | ||
637 | case 2: | ||
638 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | ||
639 | break; | ||
640 | case 3: | ||
641 | if (do_paired_load) { | ||
642 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | ||
643 | current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; | ||
644 | } else { | ||
645 | #if defined(CONFIG_LITTLE_ENDIAN) | ||
646 | current->thread.fpu.hard.fp_regs[destreg] = bufhi; | ||
647 | current->thread.fpu.hard.fp_regs[destreg+1] = buflo; | ||
648 | #else | ||
649 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | ||
650 | current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; | ||
651 | #endif | ||
652 | } | ||
653 | break; | ||
654 | default: | ||
655 | printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n", | ||
656 | width_shift, (unsigned long) regs->pc); | ||
657 | break; | ||
658 | } | ||
659 | return 0; | ||
660 | } else { | ||
661 | die ("Misaligned FPU load inside kernel", regs, 0); | ||
662 | return -1; | ||
663 | } | ||
664 | |||
665 | |||
666 | } | ||
667 | |||
668 | static int misaligned_fpu_store(struct pt_regs *regs, | ||
669 | __u32 opcode, | ||
670 | int displacement_not_indexed, | ||
671 | int width_shift, | ||
672 | int do_paired_load) | ||
673 | { | ||
674 | /* Return -1 for a fault, 0 for OK */ | ||
675 | int error; | ||
676 | int srcreg; | ||
677 | __u64 address; | ||
678 | |||
679 | error = generate_and_check_address(regs, opcode, | ||
680 | displacement_not_indexed, width_shift, &address); | ||
681 | if (error < 0) { | ||
682 | return error; | ||
683 | } | ||
684 | |||
685 | srcreg = (opcode >> 4) & 0x3f; | ||
686 | if (user_mode(regs)) { | ||
687 | __u64 buffer; | ||
688 | /* Initialise these to NaNs. */ | ||
689 | __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL; | ||
690 | |||
691 | if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { | ||
692 | return -1; | ||
693 | } | ||
694 | |||
695 | /* 'current' may be the current owner of the FPU state, so | ||
696 | context switch the registers into memory so they can be | ||
697 | indexed by register number. */ | ||
698 | if (last_task_used_math == current) { | ||
699 | grab_fpu(); | ||
700 | fpsave(¤t->thread.fpu.hard); | ||
701 | release_fpu(); | ||
702 | last_task_used_math = NULL; | ||
703 | regs->sr |= SR_FD; | ||
704 | } | ||
705 | |||
706 | switch (width_shift) { | ||
707 | case 2: | ||
708 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | ||
709 | break; | ||
710 | case 3: | ||
711 | if (do_paired_load) { | ||
712 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | ||
713 | bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; | ||
714 | } else { | ||
715 | #if defined(CONFIG_LITTLE_ENDIAN) | ||
716 | bufhi = current->thread.fpu.hard.fp_regs[srcreg]; | ||
717 | buflo = current->thread.fpu.hard.fp_regs[srcreg+1]; | ||
718 | #else | ||
719 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | ||
720 | bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; | ||
721 | #endif | ||
722 | } | ||
723 | break; | ||
724 | default: | ||
725 | printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n", | ||
726 | width_shift, (unsigned long) regs->pc); | ||
727 | break; | ||
728 | } | ||
729 | |||
730 | *(__u32*) &buffer = buflo; | ||
731 | *(1 + (__u32*) &buffer) = bufhi; | ||
732 | if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { | ||
733 | return -1; /* fault */ | ||
734 | } | ||
735 | return 0; | ||
736 | } else { | ||
737 | die ("Misaligned FPU load inside kernel", regs, 0); | ||
738 | return -1; | ||
739 | } | ||
740 | } | ||
741 | #endif | ||
742 | |||
743 | static int misaligned_fixup(struct pt_regs *regs) | ||
744 | { | ||
745 | unsigned long opcode; | ||
746 | int error; | ||
747 | int major, minor; | ||
748 | |||
749 | #if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | ||
750 | /* Never fixup user mode misaligned accesses without this option enabled. */ | ||
751 | return -1; | ||
752 | #else | ||
753 | if (!user_mode_unaligned_fixup_enable) return -1; | ||
754 | #endif | ||
755 | |||
756 | error = read_opcode(regs->pc, &opcode, user_mode(regs)); | ||
757 | if (error < 0) { | ||
758 | return error; | ||
759 | } | ||
760 | major = (opcode >> 26) & 0x3f; | ||
761 | minor = (opcode >> 16) & 0xf; | ||
762 | |||
763 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | ||
764 | if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) { | ||
765 | --user_mode_unaligned_fixup_count; | ||
766 | /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */ | ||
767 | printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", | ||
768 | current->comm, current->pid, (__u32)regs->pc, opcode); | ||
769 | } else | ||
770 | #endif | ||
771 | if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) { | ||
772 | --kernel_mode_unaligned_fixup_count; | ||
773 | if (in_interrupt()) { | ||
774 | printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n", | ||
775 | (__u32)regs->pc, opcode); | ||
776 | } else { | ||
777 | printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", | ||
778 | current->comm, current->pid, (__u32)regs->pc, opcode); | ||
779 | } | ||
780 | } | ||
781 | |||
782 | |||
783 | switch (major) { | ||
784 | case (0x84>>2): /* LD.W */ | ||
785 | error = misaligned_load(regs, opcode, 1, 1, 1); | ||
786 | break; | ||
787 | case (0xb0>>2): /* LD.UW */ | ||
788 | error = misaligned_load(regs, opcode, 1, 1, 0); | ||
789 | break; | ||
790 | case (0x88>>2): /* LD.L */ | ||
791 | error = misaligned_load(regs, opcode, 1, 2, 1); | ||
792 | break; | ||
793 | case (0x8c>>2): /* LD.Q */ | ||
794 | error = misaligned_load(regs, opcode, 1, 3, 0); | ||
795 | break; | ||
796 | |||
797 | case (0xa4>>2): /* ST.W */ | ||
798 | error = misaligned_store(regs, opcode, 1, 1); | ||
799 | break; | ||
800 | case (0xa8>>2): /* ST.L */ | ||
801 | error = misaligned_store(regs, opcode, 1, 2); | ||
802 | break; | ||
803 | case (0xac>>2): /* ST.Q */ | ||
804 | error = misaligned_store(regs, opcode, 1, 3); | ||
805 | break; | ||
806 | |||
807 | case (0x40>>2): /* indexed loads */ | ||
808 | switch (minor) { | ||
809 | case 0x1: /* LDX.W */ | ||
810 | error = misaligned_load(regs, opcode, 0, 1, 1); | ||
811 | break; | ||
812 | case 0x5: /* LDX.UW */ | ||
813 | error = misaligned_load(regs, opcode, 0, 1, 0); | ||
814 | break; | ||
815 | case 0x2: /* LDX.L */ | ||
816 | error = misaligned_load(regs, opcode, 0, 2, 1); | ||
817 | break; | ||
818 | case 0x3: /* LDX.Q */ | ||
819 | error = misaligned_load(regs, opcode, 0, 3, 0); | ||
820 | break; | ||
821 | default: | ||
822 | error = -1; | ||
823 | break; | ||
824 | } | ||
825 | break; | ||
826 | |||
827 | case (0x60>>2): /* indexed stores */ | ||
828 | switch (minor) { | ||
829 | case 0x1: /* STX.W */ | ||
830 | error = misaligned_store(regs, opcode, 0, 1); | ||
831 | break; | ||
832 | case 0x2: /* STX.L */ | ||
833 | error = misaligned_store(regs, opcode, 0, 2); | ||
834 | break; | ||
835 | case 0x3: /* STX.Q */ | ||
836 | error = misaligned_store(regs, opcode, 0, 3); | ||
837 | break; | ||
838 | default: | ||
839 | error = -1; | ||
840 | break; | ||
841 | } | ||
842 | break; | ||
843 | |||
844 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | ||
845 | case (0x94>>2): /* FLD.S */ | ||
846 | error = misaligned_fpu_load(regs, opcode, 1, 2, 0); | ||
847 | break; | ||
848 | case (0x98>>2): /* FLD.P */ | ||
849 | error = misaligned_fpu_load(regs, opcode, 1, 3, 1); | ||
850 | break; | ||
851 | case (0x9c>>2): /* FLD.D */ | ||
852 | error = misaligned_fpu_load(regs, opcode, 1, 3, 0); | ||
853 | break; | ||
854 | case (0x1c>>2): /* floating indexed loads */ | ||
855 | switch (minor) { | ||
856 | case 0x8: /* FLDX.S */ | ||
857 | error = misaligned_fpu_load(regs, opcode, 0, 2, 0); | ||
858 | break; | ||
859 | case 0xd: /* FLDX.P */ | ||
860 | error = misaligned_fpu_load(regs, opcode, 0, 3, 1); | ||
861 | break; | ||
862 | case 0x9: /* FLDX.D */ | ||
863 | error = misaligned_fpu_load(regs, opcode, 0, 3, 0); | ||
864 | break; | ||
865 | default: | ||
866 | error = -1; | ||
867 | break; | ||
868 | } | ||
869 | break; | ||
870 | case (0xb4>>2): /* FLD.S */ | ||
871 | error = misaligned_fpu_store(regs, opcode, 1, 2, 0); | ||
872 | break; | ||
873 | case (0xb8>>2): /* FLD.P */ | ||
874 | error = misaligned_fpu_store(regs, opcode, 1, 3, 1); | ||
875 | break; | ||
876 | case (0xbc>>2): /* FLD.D */ | ||
877 | error = misaligned_fpu_store(regs, opcode, 1, 3, 0); | ||
878 | break; | ||
879 | case (0x3c>>2): /* floating indexed stores */ | ||
880 | switch (minor) { | ||
881 | case 0x8: /* FSTX.S */ | ||
882 | error = misaligned_fpu_store(regs, opcode, 0, 2, 0); | ||
883 | break; | ||
884 | case 0xd: /* FSTX.P */ | ||
885 | error = misaligned_fpu_store(regs, opcode, 0, 3, 1); | ||
886 | break; | ||
887 | case 0x9: /* FSTX.D */ | ||
888 | error = misaligned_fpu_store(regs, opcode, 0, 3, 0); | ||
889 | break; | ||
890 | default: | ||
891 | error = -1; | ||
892 | break; | ||
893 | } | ||
894 | break; | ||
895 | #endif | ||
896 | |||
897 | default: | ||
898 | /* Fault */ | ||
899 | error = -1; | ||
900 | break; | ||
901 | } | ||
902 | |||
903 | if (error < 0) { | ||
904 | return error; | ||
905 | } else { | ||
906 | regs->pc += 4; /* Skip the instruction that's just been emulated */ | ||
907 | return 0; | ||
908 | } | ||
909 | |||
910 | } | ||
911 | |||
912 | static ctl_table unaligned_table[] = { | ||
913 | {1, "kernel_reports", &kernel_mode_unaligned_fixup_count, | ||
914 | sizeof(int), 0644, NULL, &proc_dointvec}, | ||
915 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | ||
916 | {2, "user_reports", &user_mode_unaligned_fixup_count, | ||
917 | sizeof(int), 0644, NULL, &proc_dointvec}, | ||
918 | {3, "user_enable", &user_mode_unaligned_fixup_enable, | ||
919 | sizeof(int), 0644, NULL, &proc_dointvec}, | ||
920 | #endif | ||
921 | {0} | ||
922 | }; | ||
923 | |||
924 | static ctl_table unaligned_root[] = { | ||
925 | {1, "unaligned_fixup", NULL, 0, 0555, unaligned_table}, | ||
926 | {0} | ||
927 | }; | ||
928 | |||
929 | static ctl_table sh64_root[] = { | ||
930 | {1, "sh64", NULL, 0, 0555, unaligned_root}, | ||
931 | {0} | ||
932 | }; | ||
933 | static struct ctl_table_header *sysctl_header; | ||
934 | static int __init init_sysctl(void) | ||
935 | { | ||
936 | sysctl_header = register_sysctl_table(sh64_root, 0); | ||
937 | return 0; | ||
938 | } | ||
939 | |||
940 | __initcall(init_sysctl); | ||
941 | |||
942 | |||
943 | asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs) | ||
944 | { | ||
945 | u64 peek_real_address_q(u64 addr); | ||
946 | u64 poke_real_address_q(u64 addr, u64 val); | ||
947 | unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010; | ||
948 | unsigned long long exp_cause; | ||
949 | /* It's not worth ioremapping the debug module registers for the amount | ||
950 | of access we make to them - just go direct to their physical | ||
951 | addresses. */ | ||
952 | exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY); | ||
953 | if (exp_cause & ~4) { | ||
954 | printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n", | ||
955 | (unsigned long)(exp_cause & 0xffffffff)); | ||
956 | } | ||
957 | show_state(); | ||
958 | /* Clear all DEBUGINT causes */ | ||
959 | poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); | ||
960 | } | ||
961 | |||
diff --git a/arch/sh64/kernel/unwind.c b/arch/sh64/kernel/unwind.c new file mode 100644 index 000000000000..f934f97f9f9c --- /dev/null +++ b/arch/sh64/kernel/unwind.c | |||
@@ -0,0 +1,326 @@ | |||
1 | /* | ||
2 | * arch/sh64/kernel/unwind.c | ||
3 | * | ||
4 | * Copyright (C) 2004 Paul Mundt | ||
5 | * Copyright (C) 2004 Richard Curnow | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | ||
11 | #include <linux/kallsyms.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/ptrace.h> | ||
17 | #include <asm/processor.h> | ||
18 | #include <asm/io.h> | ||
19 | |||
20 | static u8 regcache[63]; | ||
21 | |||
22 | /* | ||
23 | * Finding the previous stack frame isn't horribly straightforward as it is | ||
24 | * on some other platforms. In the sh64 case, we don't have "linked" stack | ||
25 | * frames, so we need to do a bit of work to determine the previous frame, | ||
26 | * and in turn, the previous r14/r18 pair. | ||
27 | * | ||
28 | * There are generally a few cases which determine where we can find out | ||
29 | * the r14/r18 values. In the general case, this can be determined by poking | ||
30 | * around the prologue of the symbol PC is in (note that we absolutely must | ||
31 | * have frame pointer support as well as the kernel symbol table mapped, | ||
32 | * otherwise we can't even get this far). | ||
33 | * | ||
34 | * In other cases, such as the interrupt/exception path, we can poke around | ||
35 | * the sp/fp. | ||
36 | * | ||
37 | * Notably, this entire approach is somewhat error prone, and in the event | ||
38 | * that the previous frame cannot be determined, that's all we can do. | ||
39 | * Either way, this still leaves us with a more correct backtrace then what | ||
40 | * we would be able to come up with by walking the stack (which is garbage | ||
41 | * for anything beyond the first frame). | ||
42 | * -- PFM. | ||
43 | */ | ||
44 | static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc, | ||
45 | unsigned long *pprev_fp, unsigned long *pprev_pc, | ||
46 | struct pt_regs *regs) | ||
47 | { | ||
48 | const char *sym; | ||
49 | char *modname, namebuf[128]; | ||
50 | unsigned long offset, size; | ||
51 | unsigned long prologue = 0; | ||
52 | unsigned long fp_displacement = 0; | ||
53 | unsigned long fp_prev = 0; | ||
54 | unsigned long offset_r14 = 0, offset_r18 = 0; | ||
55 | int i, found_prologue_end = 0; | ||
56 | |||
57 | sym = kallsyms_lookup(pc, &size, &offset, &modname, namebuf); | ||
58 | if (!sym) | ||
59 | return -EINVAL; | ||
60 | |||
61 | prologue = pc - offset; | ||
62 | if (!prologue) | ||
63 | return -EINVAL; | ||
64 | |||
65 | /* Validate fp, to avoid risk of dereferencing a bad pointer later. | ||
66 | Assume 128Mb since that's the amount of RAM on a Cayman. Modify | ||
67 | when there is an SH-5 board with more. */ | ||
68 | if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) || | ||
69 | (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) || | ||
70 | ((fp & 7) != 0)) { | ||
71 | return -EINVAL; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * Depth to walk, depth is completely arbitrary. | ||
76 | */ | ||
77 | for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) { | ||
78 | unsigned long op; | ||
79 | u8 major, minor; | ||
80 | u8 src, dest, disp; | ||
81 | |||
82 | op = *(unsigned long *)prologue; | ||
83 | |||
84 | major = (op >> 26) & 0x3f; | ||
85 | src = (op >> 20) & 0x3f; | ||
86 | minor = (op >> 16) & 0xf; | ||
87 | disp = (op >> 10) & 0x3f; | ||
88 | dest = (op >> 4) & 0x3f; | ||
89 | |||
90 | /* | ||
91 | * Stack frame creation happens in a number of ways.. in the | ||
92 | * general case when the stack frame is less than 511 bytes, | ||
93 | * it's generally created by an addi or addi.l: | ||
94 | * | ||
95 | * addi/addi.l r15, -FRAME_SIZE, r15 | ||
96 | * | ||
97 | * in the event that the frame size is bigger than this, it's | ||
98 | * typically created using a movi/sub pair as follows: | ||
99 | * | ||
100 | * movi FRAME_SIZE, rX | ||
101 | * sub r15, rX, r15 | ||
102 | */ | ||
103 | |||
104 | switch (major) { | ||
105 | case (0x00 >> 2): | ||
106 | switch (minor) { | ||
107 | case 0x8: /* add.l */ | ||
108 | case 0x9: /* add */ | ||
109 | /* Look for r15, r63, r14 */ | ||
110 | if (src == 15 && disp == 63 && dest == 14) | ||
111 | found_prologue_end = 1; | ||
112 | |||
113 | break; | ||
114 | case 0xa: /* sub.l */ | ||
115 | case 0xb: /* sub */ | ||
116 | if (src != 15 || dest != 15) | ||
117 | continue; | ||
118 | |||
119 | fp_displacement -= regcache[disp]; | ||
120 | fp_prev = fp - fp_displacement; | ||
121 | break; | ||
122 | } | ||
123 | break; | ||
124 | case (0xa8 >> 2): /* st.l */ | ||
125 | if (src != 15) | ||
126 | continue; | ||
127 | |||
128 | switch (dest) { | ||
129 | case 14: | ||
130 | if (offset_r14 || fp_displacement == 0) | ||
131 | continue; | ||
132 | |||
133 | offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); | ||
134 | offset_r14 *= sizeof(unsigned long); | ||
135 | offset_r14 += fp_displacement; | ||
136 | break; | ||
137 | case 18: | ||
138 | if (offset_r18 || fp_displacement == 0) | ||
139 | continue; | ||
140 | |||
141 | offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); | ||
142 | offset_r18 *= sizeof(unsigned long); | ||
143 | offset_r18 += fp_displacement; | ||
144 | break; | ||
145 | } | ||
146 | |||
147 | break; | ||
148 | case (0xcc >> 2): /* movi */ | ||
149 | if (dest >= 63) { | ||
150 | printk(KERN_NOTICE "%s: Invalid dest reg %d " | ||
151 | "specified in movi handler. Failed " | ||
152 | "opcode was 0x%lx: ", __FUNCTION__, | ||
153 | dest, op); | ||
154 | |||
155 | continue; | ||
156 | } | ||
157 | |||
158 | /* Sign extend */ | ||
159 | regcache[dest] = | ||
160 | ((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54; | ||
161 | break; | ||
162 | case (0xd0 >> 2): /* addi */ | ||
163 | case (0xd4 >> 2): /* addi.l */ | ||
164 | /* Look for r15, -FRAME_SIZE, r15 */ | ||
165 | if (src != 15 || dest != 15) | ||
166 | continue; | ||
167 | |||
168 | /* Sign extended frame size.. */ | ||
169 | fp_displacement += | ||
170 | (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); | ||
171 | fp_prev = fp - fp_displacement; | ||
172 | break; | ||
173 | } | ||
174 | |||
175 | if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev) | ||
176 | break; | ||
177 | } | ||
178 | |||
179 | if (offset_r14 == 0 || fp_prev == 0) { | ||
180 | if (!offset_r14) | ||
181 | pr_debug("Unable to find r14 offset\n"); | ||
182 | if (!fp_prev) | ||
183 | pr_debug("Unable to find previous fp\n"); | ||
184 | |||
185 | return -EINVAL; | ||
186 | } | ||
187 | |||
188 | /* For innermost leaf function, there might not be a offset_r18 */ | ||
189 | if (!*pprev_pc && (offset_r18 == 0)) | ||
190 | return -EINVAL; | ||
191 | |||
192 | *pprev_fp = *(unsigned long *)(fp_prev + offset_r14); | ||
193 | |||
194 | if (offset_r18) | ||
195 | *pprev_pc = *(unsigned long *)(fp_prev + offset_r18); | ||
196 | |||
197 | *pprev_pc &= ~1; | ||
198 | |||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | /* Don't put this on the stack since we'll want to call sh64_unwind | ||
203 | * when we're close to underflowing the stack anyway. */ | ||
204 | static struct pt_regs here_regs; | ||
205 | |||
206 | extern const char syscall_ret; | ||
207 | extern const char ret_from_syscall; | ||
208 | extern const char ret_from_exception; | ||
209 | extern const char ret_from_irq; | ||
210 | |||
211 | static void sh64_unwind_inner(struct pt_regs *regs); | ||
212 | |||
213 | static void unwind_nested (unsigned long pc, unsigned long fp) | ||
214 | { | ||
215 | if ((fp >= __MEMORY_START) && | ||
216 | ((fp & 7) == 0)) { | ||
217 | sh64_unwind_inner((struct pt_regs *) fp); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | static void sh64_unwind_inner(struct pt_regs *regs) | ||
222 | { | ||
223 | unsigned long pc, fp; | ||
224 | int ofs = 0; | ||
225 | int first_pass; | ||
226 | |||
227 | pc = regs->pc & ~1; | ||
228 | fp = regs->regs[14]; | ||
229 | |||
230 | first_pass = 1; | ||
231 | for (;;) { | ||
232 | int cond; | ||
233 | unsigned long next_fp, next_pc; | ||
234 | |||
235 | if (pc == ((unsigned long) &syscall_ret & ~1)) { | ||
236 | printk("SYSCALL\n"); | ||
237 | unwind_nested(pc,fp); | ||
238 | return; | ||
239 | } | ||
240 | |||
241 | if (pc == ((unsigned long) &ret_from_syscall & ~1)) { | ||
242 | printk("SYSCALL (PREEMPTED)\n"); | ||
243 | unwind_nested(pc,fp); | ||
244 | return; | ||
245 | } | ||
246 | |||
247 | /* In this case, the PC is discovered by lookup_prev_stack_frame but | ||
248 | it has 4 taken off it to look like the 'caller' */ | ||
249 | if (pc == ((unsigned long) &ret_from_exception & ~1)) { | ||
250 | printk("EXCEPTION\n"); | ||
251 | unwind_nested(pc,fp); | ||
252 | return; | ||
253 | } | ||
254 | |||
255 | if (pc == ((unsigned long) &ret_from_irq & ~1)) { | ||
256 | printk("IRQ\n"); | ||
257 | unwind_nested(pc,fp); | ||
258 | return; | ||
259 | } | ||
260 | |||
261 | cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) && | ||
262 | ((pc & 3) == 0) && ((fp & 7) == 0)); | ||
263 | |||
264 | pc -= ofs; | ||
265 | |||
266 | printk("[<%08lx>] ", pc); | ||
267 | print_symbol("%s\n", pc); | ||
268 | |||
269 | if (first_pass) { | ||
270 | /* If the innermost frame is a leaf function, it's | ||
271 | * possible that r18 is never saved out to the stack. | ||
272 | */ | ||
273 | next_pc = regs->regs[18]; | ||
274 | } else { | ||
275 | next_pc = 0; | ||
276 | } | ||
277 | |||
278 | if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) { | ||
279 | ofs = sizeof(unsigned long); | ||
280 | pc = next_pc & ~1; | ||
281 | fp = next_fp; | ||
282 | } else { | ||
283 | printk("Unable to lookup previous stack frame\n"); | ||
284 | break; | ||
285 | } | ||
286 | first_pass = 0; | ||
287 | } | ||
288 | |||
289 | printk("\n"); | ||
290 | |||
291 | } | ||
292 | |||
293 | void sh64_unwind(struct pt_regs *regs) | ||
294 | { | ||
295 | if (!regs) { | ||
296 | /* | ||
297 | * Fetch current regs if we have no other saved state to back | ||
298 | * trace from. | ||
299 | */ | ||
300 | regs = &here_regs; | ||
301 | |||
302 | __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14])); | ||
303 | __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15])); | ||
304 | __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18])); | ||
305 | |||
306 | __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0])); | ||
307 | __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1])); | ||
308 | __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2])); | ||
309 | __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3])); | ||
310 | __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4])); | ||
311 | __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5])); | ||
312 | __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6])); | ||
313 | __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7])); | ||
314 | |||
315 | __asm__ __volatile__ ( | ||
316 | "pta 0f, tr0\n\t" | ||
317 | "blink tr0, %0\n\t" | ||
318 | "0: nop" | ||
319 | : "=r" (regs->pc) | ||
320 | ); | ||
321 | } | ||
322 | |||
323 | printk("\nCall Trace:\n"); | ||
324 | sh64_unwind_inner(regs); | ||
325 | } | ||
326 | |||
diff --git a/arch/sh64/kernel/vmlinux.lds.S b/arch/sh64/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..7d9f7a6339a0 --- /dev/null +++ b/arch/sh64/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,181 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh5/vmlinux.lds.S | ||
7 | * | ||
8 | * ld script to make ST50 Linux kernel | ||
9 | * | ||
10 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
11 | * | ||
12 | * benedict.gaster@superh.com: 2nd May 2002 | ||
13 | * Add definition of empty_zero_page to be the first page of kernel image. | ||
14 | * | ||
15 | * benedict.gaster@superh.com: 3rd May 2002 | ||
16 | * Added support for ramdisk, removing statically linked romfs at the same time. | ||
17 | * | ||
18 | * lethal@linux-sh.org: 9th May 2003 | ||
19 | * Kill off GLOBAL_NAME() usage and other CDC-isms. | ||
20 | * | ||
21 | * lethal@linux-sh.org: 19th May 2003 | ||
22 | * Remove support for ancient toolchains. | ||
23 | */ | ||
24 | |||
25 | #include <linux/config.h> | ||
26 | #include <asm/page.h> | ||
27 | #include <asm/cache.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/thread_info.h> | ||
30 | |||
31 | #define LOAD_OFFSET CONFIG_CACHED_MEMORY_OFFSET | ||
32 | #include <asm-generic/vmlinux.lds.h> | ||
33 | |||
34 | #ifdef NOTDEF | ||
35 | #ifdef CONFIG_LITTLE_ENDIAN | ||
36 | OUTPUT_FORMAT("elf32-sh64l-linux", "elf32-sh64l-linux", "elf32-sh64l-linux") | ||
37 | #else | ||
38 | OUTPUT_FORMAT("elf32-sh64", "elf32-sh64", "elf32-sh64") | ||
39 | #endif | ||
40 | #endif | ||
41 | |||
42 | OUTPUT_ARCH(sh:sh5) | ||
43 | |||
44 | #define C_PHYS(x) AT (ADDR(x) - LOAD_OFFSET) | ||
45 | |||
46 | ENTRY(__start) | ||
47 | SECTIONS | ||
48 | { | ||
49 | . = CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE; | ||
50 | _text = .; /* Text and read-only data */ | ||
51 | text = .; /* Text and read-only data */ | ||
52 | |||
53 | .empty_zero_page : C_PHYS(.empty_zero_page) { | ||
54 | *(.empty_zero_page) | ||
55 | } = 0 | ||
56 | |||
57 | .text : C_PHYS(.text) { | ||
58 | *(.text) | ||
59 | *(.text64) | ||
60 | *(.text..SHmedia32) | ||
61 | SCHED_TEXT | ||
62 | LOCK_TEXT | ||
63 | *(.fixup) | ||
64 | *(.gnu.warning) | ||
65 | #ifdef CONFIG_LITTLE_ENDIAN | ||
66 | } = 0x6ff0fff0 | ||
67 | #else | ||
68 | } = 0xf0fff06f | ||
69 | #endif | ||
70 | |||
71 | /* We likely want __ex_table to be Cache Line aligned */ | ||
72 | . = ALIGN(L1_CACHE_BYTES); /* Exception table */ | ||
73 | __start___ex_table = .; | ||
74 | __ex_table : C_PHYS(__ex_table) { *(__ex_table) } | ||
75 | __stop___ex_table = .; | ||
76 | |||
77 | RODATA | ||
78 | |||
79 | _etext = .; /* End of text section */ | ||
80 | |||
81 | .data : C_PHYS(.data) { /* Data */ | ||
82 | *(.data) | ||
83 | CONSTRUCTORS | ||
84 | } | ||
85 | |||
86 | . = ALIGN(PAGE_SIZE); | ||
87 | .data.page_aligned : C_PHYS(.data.page_aligned) { *(.data.page_aligned) } | ||
88 | |||
89 | . = ALIGN(L1_CACHE_BYTES); | ||
90 | __per_cpu_start = .; | ||
91 | .data.percpu : C_PHYS(.data.percpu) { *(.data.percpu) } | ||
92 | __per_cpu_end = . ; | ||
93 | .data.cacheline_aligned : C_PHYS(.data.cacheline_aligned) { *(.data.cacheline_aligned) } | ||
94 | |||
95 | _edata = .; /* End of data section */ | ||
96 | |||
97 | . = ALIGN(THREAD_SIZE); /* init_task: structure size aligned */ | ||
98 | .data.init_task : C_PHYS(.data.init_task) { *(.data.init_task) } | ||
99 | |||
100 | . = ALIGN(PAGE_SIZE); /* Init code and data */ | ||
101 | __init_begin = .; | ||
102 | _sinittext = .; | ||
103 | .init.text : C_PHYS(.init.text) { *(.init.text) } | ||
104 | _einittext = .; | ||
105 | .init.data : C_PHYS(.init.data) { *(.init.data) } | ||
106 | . = ALIGN(L1_CACHE_BYTES); /* Better if Cache Line aligned */ | ||
107 | __setup_start = .; | ||
108 | .init.setup : C_PHYS(.init.setup) { *(.init.setup) } | ||
109 | __setup_end = .; | ||
110 | __initcall_start = .; | ||
111 | .initcall.init : C_PHYS(.initcall.init) { | ||
112 | *(.initcall1.init) | ||
113 | *(.initcall2.init) | ||
114 | *(.initcall3.init) | ||
115 | *(.initcall4.init) | ||
116 | *(.initcall5.init) | ||
117 | *(.initcall6.init) | ||
118 | *(.initcall7.init) | ||
119 | } | ||
120 | __initcall_end = .; | ||
121 | __con_initcall_start = .; | ||
122 | .con_initcall.init : C_PHYS(.con_initcall.init) { *(.con_initcall.init) } | ||
123 | __con_initcall_end = .; | ||
124 | SECURITY_INIT | ||
125 | __initramfs_start = .; | ||
126 | .init.ramfs : C_PHYS(.init.ramfs) { *(.init.ramfs) } | ||
127 | __initramfs_end = .; | ||
128 | . = ALIGN(PAGE_SIZE); | ||
129 | __init_end = .; | ||
130 | |||
131 | /* Align to the biggest single data representation, head and tail */ | ||
132 | . = ALIGN(8); | ||
133 | __bss_start = .; /* BSS */ | ||
134 | .bss : C_PHYS(.bss) { | ||
135 | *(.bss) | ||
136 | } | ||
137 | . = ALIGN(8); | ||
138 | _end = . ; | ||
139 | |||
140 | /* Sections to be discarded */ | ||
141 | /DISCARD/ : { | ||
142 | *(.exit.text) | ||
143 | *(.exit.data) | ||
144 | *(.exitcall.exit) | ||
145 | } | ||
146 | |||
147 | /* Stabs debugging sections. */ | ||
148 | .stab 0 : C_PHYS(.stab) { *(.stab) } | ||
149 | .stabstr 0 : C_PHYS(.stabstr) { *(.stabstr) } | ||
150 | .stab.excl 0 : C_PHYS(.stab.excl) { *(.stab.excl) } | ||
151 | .stab.exclstr 0 : C_PHYS(.stab.exclstr) { *(.stab.exclstr) } | ||
152 | .stab.index 0 : C_PHYS(.stab.index) { *(.stab.index) } | ||
153 | .stab.indexstr 0 : C_PHYS(.stab.indexstr) { *(.stab.indexstr) } | ||
154 | .comment 0 : C_PHYS(.comment) { *(.comment) } | ||
155 | /* DWARF debug sections. | ||
156 | Symbols in the DWARF debugging section are relative to the beginning | ||
157 | of the section so we begin .debug at 0. */ | ||
158 | /* DWARF 1 */ | ||
159 | .debug 0 : C_PHYS(.debug) { *(.debug) } | ||
160 | .line 0 : C_PHYS(.line) { *(.line) } | ||
161 | /* GNU DWARF 1 extensions */ | ||
162 | .debug_srcinfo 0 : C_PHYS(.debug_srcinfo) { *(.debug_srcinfo) } | ||
163 | .debug_sfnames 0 : C_PHYS(.debug_sfnames) { *(.debug_sfnames) } | ||
164 | /* DWARF 1.1 and DWARF 2 */ | ||
165 | .debug_aranges 0 : C_PHYS(.debug_aranges) { *(.debug_aranges) } | ||
166 | .debug_pubnames 0 : C_PHYS(.debug_pubnames) { *(.debug_pubnames) } | ||
167 | /* DWARF 2 */ | ||
168 | .debug_info 0 : C_PHYS(.debug_info) { *(.debug_info) } | ||
169 | .debug_abbrev 0 : C_PHYS(.debug_abbrev) { *(.debug_abbrev) } | ||
170 | .debug_line 0 : C_PHYS(.debug_line) { *(.debug_line) } | ||
171 | .debug_frame 0 : C_PHYS(.debug_frame) { *(.debug_frame) } | ||
172 | .debug_str 0 : C_PHYS(.debug_str) { *(.debug_str) } | ||
173 | .debug_loc 0 : C_PHYS(.debug_loc) { *(.debug_loc) } | ||
174 | .debug_macinfo 0 : C_PHYS(.debug_macinfo) { *(.debug_macinfo) } | ||
175 | /* SGI/MIPS DWARF 2 extensions */ | ||
176 | .debug_weaknames 0 : C_PHYS(.debug_weaknames) { *(.debug_weaknames) } | ||
177 | .debug_funcnames 0 : C_PHYS(.debug_funcnames) { *(.debug_funcnames) } | ||
178 | .debug_typenames 0 : C_PHYS(.debug_typenames) { *(.debug_typenames) } | ||
179 | .debug_varnames 0 : C_PHYS(.debug_varnames) { *(.debug_varnames) } | ||
180 | /* These must appear regardless of . */ | ||
181 | } | ||
diff --git a/arch/sh64/lib/Makefile b/arch/sh64/lib/Makefile new file mode 100644 index 000000000000..6a4cc3f9c0b1 --- /dev/null +++ b/arch/sh64/lib/Makefile | |||
@@ -0,0 +1,19 @@ | |||
1 | # | ||
2 | # This file is subject to the terms and conditions of the GNU General Public | ||
3 | # License. See the file "COPYING" in the main directory of this archive | ||
4 | # for more details. | ||
5 | # | ||
6 | # Copyright (C) 2000, 2001 Paolo Alberelli | ||
7 | # Coprygith (C) 2003 Paul Mundt | ||
8 | # | ||
9 | # Makefile for the SH-5 specific library files.. | ||
10 | # | ||
11 | # Note! Dependencies are done automagically by 'make dep', which also | ||
12 | # removes any old dependencies. DON'T put your own dependencies here | ||
13 | # unless it's something special (ie not a .c file). | ||
14 | # | ||
15 | |||
16 | # Panic should really be compiled as PIC | ||
17 | lib-y := udelay.o c-checksum.o dbg.o io.o panic.o memcpy.o copy_user_memcpy.o \ | ||
18 | page_copy.o page_clear.o iomap.o | ||
19 | |||
diff --git a/arch/sh64/lib/c-checksum.c b/arch/sh64/lib/c-checksum.c new file mode 100644 index 000000000000..a82d8f1a7a64 --- /dev/null +++ b/arch/sh64/lib/c-checksum.c | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * arch/sh/lib/csum_parial.c | ||
3 | * | ||
4 | * This file contains network checksum routines that are better done | ||
5 | * in an architecture-specific manner due to speed.. | ||
6 | */ | ||
7 | |||
8 | #undef DEBUG | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <asm/byteorder.h> | ||
15 | #include <asm/uaccess.h> | ||
16 | |||
17 | static inline unsigned short from64to16(unsigned long long x) | ||
18 | { | ||
19 | /* add up 32-bit words for 33 bits */ | ||
20 | x = (x & 0xffffffff) + (x >> 32); | ||
21 | /* add up 16-bit and 17-bit words for 17+c bits */ | ||
22 | x = (x & 0xffff) + (x >> 16); | ||
23 | /* add up 16-bit and 2-bit for 16+c bit */ | ||
24 | x = (x & 0xffff) + (x >> 16); | ||
25 | /* add up carry.. */ | ||
26 | x = (x & 0xffff) + (x >> 16); | ||
27 | return x; | ||
28 | } | ||
29 | |||
30 | static inline unsigned short foldto16(unsigned long x) | ||
31 | { | ||
32 | /* add up 16-bit for 17 bits */ | ||
33 | x = (x & 0xffff) + (x >> 16); | ||
34 | /* add up carry.. */ | ||
35 | x = (x & 0xffff) + (x >> 16); | ||
36 | return x; | ||
37 | } | ||
38 | |||
39 | static inline unsigned short myfoldto16(unsigned long long x) | ||
40 | { | ||
41 | /* Fold down to 32-bits so we don't loose in the typedef-less | ||
42 | network stack. */ | ||
43 | /* 64 to 33 */ | ||
44 | x = (x & 0xffffffff) + (x >> 32); | ||
45 | /* 33 to 32 */ | ||
46 | x = (x & 0xffffffff) + (x >> 32); | ||
47 | |||
48 | /* add up 16-bit for 17 bits */ | ||
49 | x = (x & 0xffff) + (x >> 16); | ||
50 | /* add up carry.. */ | ||
51 | x = (x & 0xffff) + (x >> 16); | ||
52 | return x; | ||
53 | } | ||
54 | |||
55 | #define odd(x) ((x)&1) | ||
56 | #define U16(x) ntohs(x) | ||
57 | |||
58 | static unsigned long do_csum(const unsigned char *buff, int len) | ||
59 | { | ||
60 | int odd, count; | ||
61 | unsigned long result = 0; | ||
62 | |||
63 | pr_debug("do_csum buff %p, len %d (0x%x)\n", buff, len, len); | ||
64 | #ifdef DEBUG | ||
65 | for (i = 0; i < len; i++) { | ||
66 | if ((i % 26) == 0) | ||
67 | printk("\n"); | ||
68 | printk("%02X ", buff[i]); | ||
69 | } | ||
70 | #endif | ||
71 | |||
72 | if (len <= 0) | ||
73 | goto out; | ||
74 | |||
75 | odd = 1 & (unsigned long) buff; | ||
76 | if (odd) { | ||
77 | result = *buff << 8; | ||
78 | len--; | ||
79 | buff++; | ||
80 | } | ||
81 | count = len >> 1; /* nr of 16-bit words.. */ | ||
82 | if (count) { | ||
83 | if (2 & (unsigned long) buff) { | ||
84 | result += *(unsigned short *) buff; | ||
85 | count--; | ||
86 | len -= 2; | ||
87 | buff += 2; | ||
88 | } | ||
89 | count >>= 1; /* nr of 32-bit words.. */ | ||
90 | if (count) { | ||
91 | unsigned long carry = 0; | ||
92 | do { | ||
93 | unsigned long w = *(unsigned long *) buff; | ||
94 | buff += 4; | ||
95 | count--; | ||
96 | result += carry; | ||
97 | result += w; | ||
98 | carry = (w > result); | ||
99 | } while (count); | ||
100 | result += carry; | ||
101 | result = (result & 0xffff) + (result >> 16); | ||
102 | } | ||
103 | if (len & 2) { | ||
104 | result += *(unsigned short *) buff; | ||
105 | buff += 2; | ||
106 | } | ||
107 | } | ||
108 | if (len & 1) | ||
109 | result += *buff; | ||
110 | result = foldto16(result); | ||
111 | if (odd) | ||
112 | result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); | ||
113 | |||
114 | pr_debug("\nCHECKSUM is 0x%x\n", result); | ||
115 | |||
116 | out: | ||
117 | return result; | ||
118 | } | ||
119 | |||
120 | /* computes the checksum of a memory block at buff, length len, | ||
121 | and adds in "sum" (32-bit) */ | ||
122 | unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum) | ||
123 | { | ||
124 | unsigned long long result = do_csum(buff, len); | ||
125 | |||
126 | /* add in old sum, and carry.. */ | ||
127 | result += sum; | ||
128 | /* 32+c bits -> 32 bits */ | ||
129 | result = (result & 0xffffffff) + (result >> 32); | ||
130 | |||
131 | pr_debug("csum_partial, buff %p len %d sum 0x%x result=0x%016Lx\n", | ||
132 | buff, len, sum, result); | ||
133 | |||
134 | return result; | ||
135 | } | ||
136 | |||
137 | /* Copy while checksumming, otherwise like csum_partial. */ | ||
138 | unsigned int | ||
139 | csum_partial_copy(const unsigned char *src, unsigned char *dst, int len, unsigned int sum) | ||
140 | { | ||
141 | sum = csum_partial(src, len, sum); | ||
142 | memcpy(dst, src, len); | ||
143 | |||
144 | return sum; | ||
145 | } | ||
146 | |||
147 | /* Copy from userspace and compute checksum. If we catch an exception | ||
148 | then zero the rest of the buffer. */ | ||
149 | unsigned int | ||
150 | csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, int len, | ||
151 | unsigned int sum, int *err_ptr) | ||
152 | { | ||
153 | int missing; | ||
154 | |||
155 | pr_debug | ||
156 | ("csum_partial_copy_from_user src %p, dest %p, len %d, sum %08x, err_ptr %p\n", | ||
157 | src, dst, len, sum, err_ptr); | ||
158 | missing = copy_from_user(dst, src, len); | ||
159 | pr_debug(" access_ok %d\n", __access_ok((unsigned long) src, len)); | ||
160 | pr_debug(" missing %d\n", missing); | ||
161 | if (missing) { | ||
162 | memset(dst + len - missing, 0, missing); | ||
163 | *err_ptr = -EFAULT; | ||
164 | } | ||
165 | |||
166 | return csum_partial(dst, len, sum); | ||
167 | } | ||
168 | |||
169 | /* Copy to userspace and compute checksum. */ | ||
170 | unsigned int | ||
171 | csum_partial_copy_to_user(const unsigned char *src, unsigned char *dst, int len, | ||
172 | unsigned int sum, int *err_ptr) | ||
173 | { | ||
174 | sum = csum_partial(src, len, sum); | ||
175 | |||
176 | if (copy_to_user(dst, src, len)) | ||
177 | *err_ptr = -EFAULT; | ||
178 | |||
179 | return sum; | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * This is a version of ip_compute_csum() optimized for IP headers, | ||
184 | * which always checksum on 4 octet boundaries. | ||
185 | */ | ||
186 | unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) | ||
187 | { | ||
188 | pr_debug("ip_fast_csum %p,%d\n", iph, ihl); | ||
189 | |||
190 | return ~do_csum(iph, ihl * 4); | ||
191 | } | ||
192 | |||
193 | unsigned int csum_tcpudp_nofold(unsigned long saddr, | ||
194 | unsigned long daddr, | ||
195 | unsigned short len, | ||
196 | unsigned short proto, unsigned int sum) | ||
197 | { | ||
198 | unsigned long long result; | ||
199 | |||
200 | pr_debug("ntohs(0x%x)=0x%x\n", 0xdead, ntohs(0xdead)); | ||
201 | pr_debug("htons(0x%x)=0x%x\n", 0xdead, htons(0xdead)); | ||
202 | |||
203 | result = ((unsigned long long) saddr + | ||
204 | (unsigned long long) daddr + | ||
205 | (unsigned long long) sum + | ||
206 | ((unsigned long long) ntohs(len) << 16) + | ||
207 | ((unsigned long long) proto << 8)); | ||
208 | |||
209 | /* Fold down to 32-bits so we don't loose in the typedef-less | ||
210 | network stack. */ | ||
211 | /* 64 to 33 */ | ||
212 | result = (result & 0xffffffff) + (result >> 32); | ||
213 | /* 33 to 32 */ | ||
214 | result = (result & 0xffffffff) + (result >> 32); | ||
215 | |||
216 | pr_debug("%s saddr %x daddr %x len %x proto %x sum %x result %08Lx\n", | ||
217 | __FUNCTION__, saddr, daddr, len, proto, sum, result); | ||
218 | |||
219 | return result; | ||
220 | } | ||
221 | |||
222 | // Post SIM: | ||
223 | unsigned int | ||
224 | csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, unsigned int sum) | ||
225 | { | ||
226 | // unsigned dummy; | ||
227 | pr_debug("csum_partial_copy_nocheck src %p dst %p len %d\n", src, dst, | ||
228 | len); | ||
229 | |||
230 | return csum_partial_copy(src, dst, len, sum); | ||
231 | } | ||
diff --git a/arch/sh64/lib/copy_user_memcpy.S b/arch/sh64/lib/copy_user_memcpy.S new file mode 100644 index 000000000000..2a62816d2ddd --- /dev/null +++ b/arch/sh64/lib/copy_user_memcpy.S | |||
@@ -0,0 +1,217 @@ | |||
1 | ! | ||
2 | ! Fast SH memcpy | ||
3 | ! | ||
4 | ! by Toshiyasu Morita (tm@netcom.com) | ||
5 | ! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut) | ||
6 | ! SH5 code Copyright 2002 SuperH Ltd. | ||
7 | ! | ||
8 | ! Entry: ARG0: destination pointer | ||
9 | ! ARG1: source pointer | ||
10 | ! ARG2: byte count | ||
11 | ! | ||
12 | ! Exit: RESULT: destination pointer | ||
13 | ! any other registers in the range r0-r7: trashed | ||
14 | ! | ||
15 | ! Notes: Usually one wants to do small reads and write a longword, but | ||
16 | ! unfortunately it is difficult in some cases to concatanate bytes | ||
17 | ! into a longword on the SH, so this does a longword read and small | ||
18 | ! writes. | ||
19 | ! | ||
20 | ! This implementation makes two assumptions about how it is called: | ||
21 | ! | ||
22 | ! 1.: If the byte count is nonzero, the address of the last byte to be | ||
23 | ! copied is unsigned greater than the address of the first byte to | ||
24 | ! be copied. This could be easily swapped for a signed comparison, | ||
25 | ! but the algorithm used needs some comparison. | ||
26 | ! | ||
27 | ! 2.: When there are two or three bytes in the last word of an 11-or-more | ||
28 | ! bytes memory chunk to b copied, the rest of the word can be read | ||
29 | ! without side effects. | ||
30 | ! This could be easily changed by increasing the minumum size of | ||
31 | ! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2, | ||
32 | ! however, this would cost a few extra cyles on average. | ||
33 | ! For SHmedia, the assumption is that any quadword can be read in its | ||
34 | ! enirety if at least one byte is included in the copy. | ||
35 | |||
36 | /* Imported into Linux kernel by Richard Curnow. This is used to implement the | ||
37 | __copy_user function in the general case, so it has to be a distinct | ||
38 | function from intra-kernel memcpy to allow for exception fix-ups in the | ||
39 | event that the user pointer is bad somewhere in the copy (e.g. due to | ||
40 | running off the end of the vma). | ||
41 | |||
42 | Note, this algorithm will be slightly wasteful in the case where the source | ||
43 | and destination pointers are equally aligned, because the stlo/sthi pairs | ||
44 | could then be merged back into single stores. If there are a lot of cache | ||
45 | misses, this is probably offset by the stall lengths on the preloads. | ||
46 | |||
47 | */ | ||
48 | |||
49 | /* NOTE : Prefetches removed and allocos guarded by synco to avoid TAKum03020 | ||
50 | * erratum. The first two prefetches are nop-ed out to avoid upsetting the | ||
51 | * instruction counts used in the jump address calculation. | ||
52 | * */ | ||
53 | |||
54 | .section .text..SHmedia32,"ax" | ||
55 | .little | ||
56 | .balign 32 | ||
57 | .global copy_user_memcpy | ||
58 | .global copy_user_memcpy_end | ||
59 | copy_user_memcpy: | ||
60 | |||
61 | #define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1 | ||
62 | #define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1 | ||
63 | #define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1 | ||
64 | #define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1 | ||
65 | |||
66 | nop ! ld.b r3,0,r63 ! TAKum03020 | ||
67 | pta/l Large,tr0 | ||
68 | movi 25,r0 | ||
69 | bgeu/u r4,r0,tr0 | ||
70 | nsb r4,r0 | ||
71 | shlli r0,5,r0 | ||
72 | movi (L1-L0+63*32 + 1) & 0xffff,r1 | ||
73 | sub r1, r0, r0 | ||
74 | L0: ptrel r0,tr0 | ||
75 | add r2,r4,r5 | ||
76 | ptabs r18,tr1 | ||
77 | add r3,r4,r6 | ||
78 | blink tr0,r63 | ||
79 | |||
80 | /* Rearranged to make cut2 safe */ | ||
81 | .balign 8 | ||
82 | L4_7: /* 4..7 byte memcpy cntd. */ | ||
83 | stlo.l r2, 0, r0 | ||
84 | or r6, r7, r6 | ||
85 | sthi.l r5, -1, r6 | ||
86 | stlo.l r5, -4, r6 | ||
87 | blink tr1,r63 | ||
88 | |||
89 | .balign 8 | ||
90 | L1: /* 0 byte memcpy */ | ||
91 | nop | ||
92 | blink tr1,r63 | ||
93 | nop | ||
94 | nop | ||
95 | nop | ||
96 | nop | ||
97 | |||
98 | L2_3: /* 2 or 3 byte memcpy cntd. */ | ||
99 | st.b r5,-1,r6 | ||
100 | blink tr1,r63 | ||
101 | |||
102 | /* 1 byte memcpy */ | ||
103 | ld.b r3,0,r0 | ||
104 | st.b r2,0,r0 | ||
105 | blink tr1,r63 | ||
106 | |||
107 | L8_15: /* 8..15 byte memcpy cntd. */ | ||
108 | stlo.q r2, 0, r0 | ||
109 | or r6, r7, r6 | ||
110 | sthi.q r5, -1, r6 | ||
111 | stlo.q r5, -8, r6 | ||
112 | blink tr1,r63 | ||
113 | |||
114 | /* 2 or 3 byte memcpy */ | ||
115 | ld.b r3,0,r0 | ||
116 | nop ! ld.b r2,0,r63 ! TAKum03020 | ||
117 | ld.b r3,1,r1 | ||
118 | st.b r2,0,r0 | ||
119 | pta/l L2_3,tr0 | ||
120 | ld.b r6,-1,r6 | ||
121 | st.b r2,1,r1 | ||
122 | blink tr0, r63 | ||
123 | |||
124 | /* 4 .. 7 byte memcpy */ | ||
125 | LDUAL (r3, 0, r0, r1) | ||
126 | pta L4_7, tr0 | ||
127 | ldlo.l r6, -4, r7 | ||
128 | or r0, r1, r0 | ||
129 | sthi.l r2, 3, r0 | ||
130 | ldhi.l r6, -1, r6 | ||
131 | blink tr0, r63 | ||
132 | |||
133 | /* 8 .. 15 byte memcpy */ | ||
134 | LDUAQ (r3, 0, r0, r1) | ||
135 | pta L8_15, tr0 | ||
136 | ldlo.q r6, -8, r7 | ||
137 | or r0, r1, r0 | ||
138 | sthi.q r2, 7, r0 | ||
139 | ldhi.q r6, -1, r6 | ||
140 | blink tr0, r63 | ||
141 | |||
142 | /* 16 .. 24 byte memcpy */ | ||
143 | LDUAQ (r3, 0, r0, r1) | ||
144 | LDUAQ (r3, 8, r8, r9) | ||
145 | or r0, r1, r0 | ||
146 | sthi.q r2, 7, r0 | ||
147 | or r8, r9, r8 | ||
148 | sthi.q r2, 15, r8 | ||
149 | ldlo.q r6, -8, r7 | ||
150 | ldhi.q r6, -1, r6 | ||
151 | stlo.q r2, 8, r8 | ||
152 | stlo.q r2, 0, r0 | ||
153 | or r6, r7, r6 | ||
154 | sthi.q r5, -1, r6 | ||
155 | stlo.q r5, -8, r6 | ||
156 | blink tr1,r63 | ||
157 | |||
158 | Large: | ||
159 | ! ld.b r2, 0, r63 ! TAKum03020 | ||
160 | pta/l Loop_ua, tr1 | ||
161 | ori r3, -8, r7 | ||
162 | sub r2, r7, r22 | ||
163 | sub r3, r2, r6 | ||
164 | add r2, r4, r5 | ||
165 | ldlo.q r3, 0, r0 | ||
166 | addi r5, -16, r5 | ||
167 | movi 64+8, r27 ! could subtract r7 from that. | ||
168 | stlo.q r2, 0, r0 | ||
169 | sthi.q r2, 7, r0 | ||
170 | ldx.q r22, r6, r0 | ||
171 | bgtu/l r27, r4, tr1 | ||
172 | |||
173 | addi r5, -48, r27 | ||
174 | pta/l Loop_line, tr0 | ||
175 | addi r6, 64, r36 | ||
176 | addi r6, -24, r19 | ||
177 | addi r6, -16, r20 | ||
178 | addi r6, -8, r21 | ||
179 | |||
180 | Loop_line: | ||
181 | ! ldx.q r22, r36, r63 ! TAKum03020 | ||
182 | alloco r22, 32 | ||
183 | synco | ||
184 | addi r22, 32, r22 | ||
185 | ldx.q r22, r19, r23 | ||
186 | sthi.q r22, -25, r0 | ||
187 | ldx.q r22, r20, r24 | ||
188 | ldx.q r22, r21, r25 | ||
189 | stlo.q r22, -32, r0 | ||
190 | ldx.q r22, r6, r0 | ||
191 | sthi.q r22, -17, r23 | ||
192 | sthi.q r22, -9, r24 | ||
193 | sthi.q r22, -1, r25 | ||
194 | stlo.q r22, -24, r23 | ||
195 | stlo.q r22, -16, r24 | ||
196 | stlo.q r22, -8, r25 | ||
197 | bgeu r27, r22, tr0 | ||
198 | |||
199 | Loop_ua: | ||
200 | addi r22, 8, r22 | ||
201 | sthi.q r22, -1, r0 | ||
202 | stlo.q r22, -8, r0 | ||
203 | ldx.q r22, r6, r0 | ||
204 | bgtu/l r5, r22, tr1 | ||
205 | |||
206 | add r3, r4, r7 | ||
207 | ldlo.q r7, -8, r1 | ||
208 | sthi.q r22, 7, r0 | ||
209 | ldhi.q r7, -1, r7 | ||
210 | ptabs r18,tr1 | ||
211 | stlo.q r22, 0, r0 | ||
212 | or r1, r7, r1 | ||
213 | sthi.q r5, 15, r1 | ||
214 | stlo.q r5, 8, r1 | ||
215 | blink tr1, r63 | ||
216 | copy_user_memcpy_end: | ||
217 | nop | ||
diff --git a/arch/sh64/lib/dbg.c b/arch/sh64/lib/dbg.c new file mode 100644 index 000000000000..526fedae6db8 --- /dev/null +++ b/arch/sh64/lib/dbg.c | |||
@@ -0,0 +1,430 @@ | |||
1 | /*-------------------------------------------------------------------------- | ||
2 | -- | ||
3 | -- Identity : Linux50 Debug Funcions | ||
4 | -- | ||
5 | -- File : arch/sh64/lib/dbg.C | ||
6 | -- | ||
7 | -- Copyright 2000, 2001 STMicroelectronics Limited. | ||
8 | -- Copyright 2004 Richard Curnow (evt_debug etc) | ||
9 | -- | ||
10 | --------------------------------------------------------------------------*/ | ||
11 | #include <linux/config.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <asm/mmu_context.h> | ||
17 | |||
18 | typedef u64 regType_t; | ||
19 | |||
20 | static regType_t getConfigReg(u64 id) | ||
21 | { | ||
22 | register u64 reg __asm__("r2"); | ||
23 | asm volatile ("getcfg %1, 0, %0":"=r" (reg):"r"(id)); | ||
24 | return (reg); | ||
25 | } | ||
26 | |||
27 | /* ======================================================================= */ | ||
28 | |||
29 | static char *szTab[] = { "4k", "64k", "1M", "512M" }; | ||
30 | static char *protTab[] = { "----", | ||
31 | "---R", | ||
32 | "--X-", | ||
33 | "--XR", | ||
34 | "-W--", | ||
35 | "-W-R", | ||
36 | "-WX-", | ||
37 | "-WXR", | ||
38 | "U---", | ||
39 | "U--R", | ||
40 | "U-X-", | ||
41 | "U-XR", | ||
42 | "UW--", | ||
43 | "UW-R", | ||
44 | "UWX-", | ||
45 | "UWXR" | ||
46 | }; | ||
47 | #define ITLB_BASE 0x00000000 | ||
48 | #define DTLB_BASE 0x00800000 | ||
49 | #define MAX_TLBs 64 | ||
50 | /* PTE High */ | ||
51 | #define GET_VALID(pte) ((pte) & 0x1) | ||
52 | #define GET_SHARED(pte) ((pte) & 0x2) | ||
53 | #define GET_ASID(pte) ((pte >> 2) & 0x0ff) | ||
54 | #define GET_EPN(pte) ((pte) & 0xfffff000) | ||
55 | |||
56 | /* PTE Low */ | ||
57 | #define GET_CBEHAVIOR(pte) ((pte) & 0x3) | ||
58 | #define GET_PAGE_SIZE(pte) szTab[((pte >> 3) & 0x3)] | ||
59 | #define GET_PROTECTION(pte) protTab[((pte >> 6) & 0xf)] | ||
60 | #define GET_PPN(pte) ((pte) & 0xfffff000) | ||
61 | |||
62 | #define PAGE_1K_MASK 0x00000000 | ||
63 | #define PAGE_4K_MASK 0x00000010 | ||
64 | #define PAGE_64K_MASK 0x00000080 | ||
65 | #define MMU_PAGESIZE_MASK (PAGE_64K_MASK | PAGE_4K_MASK) | ||
66 | #define PAGE_1MB_MASK MMU_PAGESIZE_MASK | ||
67 | #define PAGE_1K (1024) | ||
68 | #define PAGE_4K (1024 * 4) | ||
69 | #define PAGE_64K (1024 * 64) | ||
70 | #define PAGE_1MB (1024 * 1024) | ||
71 | |||
72 | #define HOW_TO_READ_TLB_CONTENT \ | ||
73 | "[ ID] PPN EPN ASID Share CB P.Size PROT.\n" | ||
74 | |||
75 | void print_single_tlb(unsigned long tlb, int single_print) | ||
76 | { | ||
77 | regType_t pteH; | ||
78 | regType_t pteL; | ||
79 | unsigned int valid, shared, asid, epn, cb, ppn; | ||
80 | char *pSize; | ||
81 | char *pProt; | ||
82 | |||
83 | /* | ||
84 | ** in case of single print <single_print> is true, this implies: | ||
85 | ** 1) print the TLB in any case also if NOT VALID | ||
86 | ** 2) print out the header | ||
87 | */ | ||
88 | |||
89 | pteH = getConfigReg(tlb); | ||
90 | valid = GET_VALID(pteH); | ||
91 | if (single_print) | ||
92 | printk(HOW_TO_READ_TLB_CONTENT); | ||
93 | else if (!valid) | ||
94 | return; | ||
95 | |||
96 | pteL = getConfigReg(tlb + 1); | ||
97 | |||
98 | shared = GET_SHARED(pteH); | ||
99 | asid = GET_ASID(pteH); | ||
100 | epn = GET_EPN(pteH); | ||
101 | cb = GET_CBEHAVIOR(pteL); | ||
102 | pSize = GET_PAGE_SIZE(pteL); | ||
103 | pProt = GET_PROTECTION(pteL); | ||
104 | ppn = GET_PPN(pteL); | ||
105 | printk("[%c%2ld] 0x%08x 0x%08x %03d %02x %02x %4s %s\n", | ||
106 | ((valid) ? ' ' : 'u'), ((tlb & 0x0ffff) / TLB_STEP), | ||
107 | ppn, epn, asid, shared, cb, pSize, pProt); | ||
108 | } | ||
109 | |||
110 | void print_dtlb(void) | ||
111 | { | ||
112 | int count; | ||
113 | unsigned long tlb; | ||
114 | |||
115 | printk(" ================= SH-5 D-TLBs Status ===================\n"); | ||
116 | printk(HOW_TO_READ_TLB_CONTENT); | ||
117 | tlb = DTLB_BASE; | ||
118 | for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP) | ||
119 | print_single_tlb(tlb, 0); | ||
120 | printk | ||
121 | (" =============================================================\n"); | ||
122 | } | ||
123 | |||
124 | void print_itlb(void) | ||
125 | { | ||
126 | int count; | ||
127 | unsigned long tlb; | ||
128 | |||
129 | printk(" ================= SH-5 I-TLBs Status ===================\n"); | ||
130 | printk(HOW_TO_READ_TLB_CONTENT); | ||
131 | tlb = ITLB_BASE; | ||
132 | for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP) | ||
133 | print_single_tlb(tlb, 0); | ||
134 | printk | ||
135 | (" =============================================================\n"); | ||
136 | } | ||
137 | |||
138 | /* ======================================================================= */ | ||
139 | |||
140 | #ifdef CONFIG_POOR_MANS_STRACE | ||
141 | |||
142 | #include "syscalltab.h" | ||
143 | |||
144 | struct ring_node { | ||
145 | int evt; | ||
146 | int ret_addr; | ||
147 | int event; | ||
148 | int tra; | ||
149 | int pid; | ||
150 | unsigned long sp; | ||
151 | unsigned long pc; | ||
152 | }; | ||
153 | |||
154 | static struct ring_node event_ring[16]; | ||
155 | static int event_ptr = 0; | ||
156 | |||
157 | struct stored_syscall_data { | ||
158 | int pid; | ||
159 | int syscall_number; | ||
160 | }; | ||
161 | |||
162 | #define N_STORED_SYSCALLS 16 | ||
163 | |||
164 | static struct stored_syscall_data stored_syscalls[N_STORED_SYSCALLS]; | ||
165 | static int syscall_next=0; | ||
166 | static int syscall_next_print=0; | ||
167 | |||
168 | void evt_debug(int evt, int ret_addr, int event, int tra, struct pt_regs *regs) | ||
169 | { | ||
170 | int syscallno = tra & 0xff; | ||
171 | unsigned long sp; | ||
172 | unsigned long stack_bottom; | ||
173 | int pid; | ||
174 | struct ring_node *rr; | ||
175 | |||
176 | pid = current->pid; | ||
177 | stack_bottom = (unsigned long) current->thread_info; | ||
178 | asm volatile("ori r15, 0, %0" : "=r" (sp)); | ||
179 | rr = event_ring + event_ptr; | ||
180 | rr->evt = evt; | ||
181 | rr->ret_addr = ret_addr; | ||
182 | rr->event = event; | ||
183 | rr->tra = tra; | ||
184 | rr->pid = pid; | ||
185 | rr->sp = sp; | ||
186 | rr->pc = regs->pc; | ||
187 | |||
188 | if (sp < stack_bottom + 3092) { | ||
189 | printk("evt_debug : stack underflow report\n"); | ||
190 | int i, j; | ||
191 | for (j=0, i = event_ptr; j<16; j++) { | ||
192 | rr = event_ring + i; | ||
193 | printk("evt=%08x event=%08x tra=%08x pid=%5d sp=%08lx pc=%08lx\n", | ||
194 | rr->evt, rr->event, rr->tra, rr->pid, rr->sp, rr->pc); | ||
195 | i--; | ||
196 | i &= 15; | ||
197 | } | ||
198 | panic("STACK UNDERFLOW\n"); | ||
199 | } | ||
200 | |||
201 | event_ptr = (event_ptr + 1) & 15; | ||
202 | |||
203 | if ((event == 2) && (evt == 0x160)) { | ||
204 | if (syscallno < NUM_SYSCALL_INFO_ENTRIES) { | ||
205 | /* Store the syscall information to print later. We | ||
206 | * can't print this now - currently we're running with | ||
207 | * SR.BL=1, so we can't take a tlbmiss (which could occur | ||
208 | * in the console drivers under printk). | ||
209 | * | ||
210 | * Just overwrite old entries on ring overflow - this | ||
211 | * is only for last-hope debugging. */ | ||
212 | stored_syscalls[syscall_next].pid = current->pid; | ||
213 | stored_syscalls[syscall_next].syscall_number = syscallno; | ||
214 | syscall_next++; | ||
215 | syscall_next &= (N_STORED_SYSCALLS - 1); | ||
216 | } | ||
217 | } | ||
218 | } | ||
219 | |||
220 | static void drain_syscalls(void) { | ||
221 | while (syscall_next_print != syscall_next) { | ||
222 | printk("Task %d: %s()\n", | ||
223 | stored_syscalls[syscall_next_print].pid, | ||
224 | syscall_info_table[stored_syscalls[syscall_next_print].syscall_number].name); | ||
225 | syscall_next_print++; | ||
226 | syscall_next_print &= (N_STORED_SYSCALLS - 1); | ||
227 | } | ||
228 | } | ||
229 | |||
230 | void evt_debug2(unsigned int ret) | ||
231 | { | ||
232 | drain_syscalls(); | ||
233 | printk("Task %d: syscall returns %08x\n", current->pid, ret); | ||
234 | } | ||
235 | |||
236 | void evt_debug_ret_from_irq(struct pt_regs *regs) | ||
237 | { | ||
238 | int pid; | ||
239 | struct ring_node *rr; | ||
240 | |||
241 | pid = current->pid; | ||
242 | rr = event_ring + event_ptr; | ||
243 | rr->evt = 0xffff; | ||
244 | rr->ret_addr = 0; | ||
245 | rr->event = 0; | ||
246 | rr->tra = 0; | ||
247 | rr->pid = pid; | ||
248 | rr->pc = regs->pc; | ||
249 | event_ptr = (event_ptr + 1) & 15; | ||
250 | } | ||
251 | |||
252 | void evt_debug_ret_from_exc(struct pt_regs *regs) | ||
253 | { | ||
254 | int pid; | ||
255 | struct ring_node *rr; | ||
256 | |||
257 | pid = current->pid; | ||
258 | rr = event_ring + event_ptr; | ||
259 | rr->evt = 0xfffe; | ||
260 | rr->ret_addr = 0; | ||
261 | rr->event = 0; | ||
262 | rr->tra = 0; | ||
263 | rr->pid = pid; | ||
264 | rr->pc = regs->pc; | ||
265 | event_ptr = (event_ptr + 1) & 15; | ||
266 | } | ||
267 | |||
268 | #endif /* CONFIG_POOR_MANS_STRACE */ | ||
269 | |||
270 | /* ======================================================================= */ | ||
271 | |||
272 | void show_excp_regs(char *from, int trapnr, int signr, struct pt_regs *regs) | ||
273 | { | ||
274 | |||
275 | unsigned long long ah, al, bh, bl, ch, cl; | ||
276 | |||
277 | printk("\n"); | ||
278 | printk("EXCEPTION - %s: task %d; Linux trap # %d; signal = %d\n", | ||
279 | ((from) ? from : "???"), current->pid, trapnr, signr); | ||
280 | |||
281 | asm volatile ("getcon " __EXPEVT ", %0":"=r"(ah)); | ||
282 | asm volatile ("getcon " __EXPEVT ", %0":"=r"(al)); | ||
283 | ah = (ah) >> 32; | ||
284 | al = (al) & 0xffffffff; | ||
285 | asm volatile ("getcon " __KCR1 ", %0":"=r"(bh)); | ||
286 | asm volatile ("getcon " __KCR1 ", %0":"=r"(bl)); | ||
287 | bh = (bh) >> 32; | ||
288 | bl = (bl) & 0xffffffff; | ||
289 | asm volatile ("getcon " __INTEVT ", %0":"=r"(ch)); | ||
290 | asm volatile ("getcon " __INTEVT ", %0":"=r"(cl)); | ||
291 | ch = (ch) >> 32; | ||
292 | cl = (cl) & 0xffffffff; | ||
293 | printk("EXPE: %08Lx%08Lx KCR1: %08Lx%08Lx INTE: %08Lx%08Lx\n", | ||
294 | ah, al, bh, bl, ch, cl); | ||
295 | |||
296 | asm volatile ("getcon " __PEXPEVT ", %0":"=r"(ah)); | ||
297 | asm volatile ("getcon " __PEXPEVT ", %0":"=r"(al)); | ||
298 | ah = (ah) >> 32; | ||
299 | al = (al) & 0xffffffff; | ||
300 | asm volatile ("getcon " __PSPC ", %0":"=r"(bh)); | ||
301 | asm volatile ("getcon " __PSPC ", %0":"=r"(bl)); | ||
302 | bh = (bh) >> 32; | ||
303 | bl = (bl) & 0xffffffff; | ||
304 | asm volatile ("getcon " __PSSR ", %0":"=r"(ch)); | ||
305 | asm volatile ("getcon " __PSSR ", %0":"=r"(cl)); | ||
306 | ch = (ch) >> 32; | ||
307 | cl = (cl) & 0xffffffff; | ||
308 | printk("PEXP: %08Lx%08Lx PSPC: %08Lx%08Lx PSSR: %08Lx%08Lx\n", | ||
309 | ah, al, bh, bl, ch, cl); | ||
310 | |||
311 | ah = (regs->pc) >> 32; | ||
312 | al = (regs->pc) & 0xffffffff; | ||
313 | bh = (regs->regs[18]) >> 32; | ||
314 | bl = (regs->regs[18]) & 0xffffffff; | ||
315 | ch = (regs->regs[15]) >> 32; | ||
316 | cl = (regs->regs[15]) & 0xffffffff; | ||
317 | printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n", | ||
318 | ah, al, bh, bl, ch, cl); | ||
319 | |||
320 | ah = (regs->sr) >> 32; | ||
321 | al = (regs->sr) & 0xffffffff; | ||
322 | asm volatile ("getcon " __TEA ", %0":"=r"(bh)); | ||
323 | asm volatile ("getcon " __TEA ", %0":"=r"(bl)); | ||
324 | bh = (bh) >> 32; | ||
325 | bl = (bl) & 0xffffffff; | ||
326 | asm volatile ("getcon " __KCR0 ", %0":"=r"(ch)); | ||
327 | asm volatile ("getcon " __KCR0 ", %0":"=r"(cl)); | ||
328 | ch = (ch) >> 32; | ||
329 | cl = (cl) & 0xffffffff; | ||
330 | printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n", | ||
331 | ah, al, bh, bl, ch, cl); | ||
332 | |||
333 | ah = (regs->regs[0]) >> 32; | ||
334 | al = (regs->regs[0]) & 0xffffffff; | ||
335 | bh = (regs->regs[1]) >> 32; | ||
336 | bl = (regs->regs[1]) & 0xffffffff; | ||
337 | ch = (regs->regs[2]) >> 32; | ||
338 | cl = (regs->regs[2]) & 0xffffffff; | ||
339 | printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n", | ||
340 | ah, al, bh, bl, ch, cl); | ||
341 | |||
342 | ah = (regs->regs[3]) >> 32; | ||
343 | al = (regs->regs[3]) & 0xffffffff; | ||
344 | bh = (regs->regs[4]) >> 32; | ||
345 | bl = (regs->regs[4]) & 0xffffffff; | ||
346 | ch = (regs->regs[5]) >> 32; | ||
347 | cl = (regs->regs[5]) & 0xffffffff; | ||
348 | printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n", | ||
349 | ah, al, bh, bl, ch, cl); | ||
350 | |||
351 | ah = (regs->regs[6]) >> 32; | ||
352 | al = (regs->regs[6]) & 0xffffffff; | ||
353 | bh = (regs->regs[7]) >> 32; | ||
354 | bl = (regs->regs[7]) & 0xffffffff; | ||
355 | ch = (regs->regs[8]) >> 32; | ||
356 | cl = (regs->regs[8]) & 0xffffffff; | ||
357 | printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n", | ||
358 | ah, al, bh, bl, ch, cl); | ||
359 | |||
360 | ah = (regs->regs[9]) >> 32; | ||
361 | al = (regs->regs[9]) & 0xffffffff; | ||
362 | bh = (regs->regs[10]) >> 32; | ||
363 | bl = (regs->regs[10]) & 0xffffffff; | ||
364 | ch = (regs->regs[11]) >> 32; | ||
365 | cl = (regs->regs[11]) & 0xffffffff; | ||
366 | printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n", | ||
367 | ah, al, bh, bl, ch, cl); | ||
368 | printk("....\n"); | ||
369 | |||
370 | ah = (regs->tregs[0]) >> 32; | ||
371 | al = (regs->tregs[0]) & 0xffffffff; | ||
372 | bh = (regs->tregs[1]) >> 32; | ||
373 | bl = (regs->tregs[1]) & 0xffffffff; | ||
374 | ch = (regs->tregs[2]) >> 32; | ||
375 | cl = (regs->tregs[2]) & 0xffffffff; | ||
376 | printk("T0 : %08Lx%08Lx T1 : %08Lx%08Lx T2 : %08Lx%08Lx\n", | ||
377 | ah, al, bh, bl, ch, cl); | ||
378 | printk("....\n"); | ||
379 | |||
380 | print_dtlb(); | ||
381 | print_itlb(); | ||
382 | } | ||
383 | |||
384 | /* ======================================================================= */ | ||
385 | |||
386 | /* | ||
387 | ** Depending on <base> scan the MMU, Data or Instrction side | ||
388 | ** looking for a valid mapping matching Eaddr & asid. | ||
389 | ** Return -1 if not found or the TLB id entry otherwise. | ||
390 | ** Note: it works only for 4k pages! | ||
391 | */ | ||
392 | static unsigned long | ||
393 | lookup_mmu_side(unsigned long base, unsigned long Eaddr, unsigned long asid) | ||
394 | { | ||
395 | regType_t pteH; | ||
396 | unsigned long epn; | ||
397 | int count; | ||
398 | |||
399 | epn = Eaddr & 0xfffff000; | ||
400 | |||
401 | for (count = 0; count < MAX_TLBs; count++, base += TLB_STEP) { | ||
402 | pteH = getConfigReg(base); | ||
403 | if (GET_VALID(pteH)) | ||
404 | if ((unsigned long) GET_EPN(pteH) == epn) | ||
405 | if ((unsigned long) GET_ASID(pteH) == asid) | ||
406 | break; | ||
407 | } | ||
408 | return ((unsigned long) ((count < MAX_TLBs) ? base : -1)); | ||
409 | } | ||
410 | |||
411 | unsigned long lookup_dtlb(unsigned long Eaddr) | ||
412 | { | ||
413 | unsigned long asid = get_asid(); | ||
414 | return (lookup_mmu_side((u64) DTLB_BASE, Eaddr, asid)); | ||
415 | } | ||
416 | |||
417 | unsigned long lookup_itlb(unsigned long Eaddr) | ||
418 | { | ||
419 | unsigned long asid = get_asid(); | ||
420 | return (lookup_mmu_side((u64) ITLB_BASE, Eaddr, asid)); | ||
421 | } | ||
422 | |||
423 | void print_page(struct page *page) | ||
424 | { | ||
425 | printk(" page[%p] -> index 0x%lx, count 0x%x, flags 0x%lx\n", | ||
426 | page, page->index, page_count(page), page->flags); | ||
427 | printk(" address_space = %p, pages =%ld\n", page->mapping, | ||
428 | page->mapping->nrpages); | ||
429 | |||
430 | } | ||
diff --git a/arch/sh64/lib/io.c b/arch/sh64/lib/io.c new file mode 100644 index 000000000000..277e11b10c2b --- /dev/null +++ b/arch/sh64/lib/io.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 David J. Mckay (david.mckay@st.com) | ||
3 | * | ||
4 | * May be copied or modified under the terms of the GNU General Public | ||
5 | * License. See linux/COPYING for more information. | ||
6 | * | ||
7 | * This file contains the I/O routines for use on the overdrive board | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <asm/system.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/io.h> | ||
18 | |||
19 | /* | ||
20 | * readX/writeX() are used to access memory mapped devices. On some | ||
21 | * architectures the memory mapped IO stuff needs to be accessed | ||
22 | * differently. On the SuperH architecture, we just read/write the | ||
23 | * memory location directly. | ||
24 | */ | ||
25 | |||
26 | /* This is horrible at the moment - needs more work to do something sensible */ | ||
27 | #define IO_DELAY() | ||
28 | |||
29 | #define OUT_DELAY(x,type) \ | ||
30 | void out##x##_p(unsigned type value,unsigned long port){out##x(value,port);IO_DELAY();} | ||
31 | |||
32 | #define IN_DELAY(x,type) \ | ||
33 | unsigned type in##x##_p(unsigned long port) {unsigned type tmp=in##x(port);IO_DELAY();return tmp;} | ||
34 | |||
35 | #if 1 | ||
36 | OUT_DELAY(b, long) OUT_DELAY(w, long) OUT_DELAY(l, long) | ||
37 | IN_DELAY(b, long) IN_DELAY(w, long) IN_DELAY(l, long) | ||
38 | #endif | ||
39 | /* Now for the string version of these functions */ | ||
40 | void outsb(unsigned long port, const void *addr, unsigned long count) | ||
41 | { | ||
42 | int i; | ||
43 | unsigned char *p = (unsigned char *) addr; | ||
44 | |||
45 | for (i = 0; i < count; i++, p++) { | ||
46 | outb(*p, port); | ||
47 | } | ||
48 | } | ||
49 | |||
50 | void insb(unsigned long port, void *addr, unsigned long count) | ||
51 | { | ||
52 | int i; | ||
53 | unsigned char *p = (unsigned char *) addr; | ||
54 | |||
55 | for (i = 0; i < count; i++, p++) { | ||
56 | *p = inb(port); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | /* For the 16 and 32 bit string functions, we have to worry about alignment. | ||
61 | * The SH does not do unaligned accesses, so we have to read as bytes and | ||
62 | * then write as a word or dword. | ||
63 | * This can be optimised a lot more, especially in the case where the data | ||
64 | * is aligned | ||
65 | */ | ||
66 | |||
67 | void outsw(unsigned long port, const void *addr, unsigned long count) | ||
68 | { | ||
69 | int i; | ||
70 | unsigned short tmp; | ||
71 | unsigned char *p = (unsigned char *) addr; | ||
72 | |||
73 | for (i = 0; i < count; i++, p += 2) { | ||
74 | tmp = (*p) | ((*(p + 1)) << 8); | ||
75 | outw(tmp, port); | ||
76 | } | ||
77 | } | ||
78 | |||
79 | void insw(unsigned long port, void *addr, unsigned long count) | ||
80 | { | ||
81 | int i; | ||
82 | unsigned short tmp; | ||
83 | unsigned char *p = (unsigned char *) addr; | ||
84 | |||
85 | for (i = 0; i < count; i++, p += 2) { | ||
86 | tmp = inw(port); | ||
87 | p[0] = tmp & 0xff; | ||
88 | p[1] = (tmp >> 8) & 0xff; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | void outsl(unsigned long port, const void *addr, unsigned long count) | ||
93 | { | ||
94 | int i; | ||
95 | unsigned tmp; | ||
96 | unsigned char *p = (unsigned char *) addr; | ||
97 | |||
98 | for (i = 0; i < count; i++, p += 4) { | ||
99 | tmp = (*p) | ((*(p + 1)) << 8) | ((*(p + 2)) << 16) | | ||
100 | ((*(p + 3)) << 24); | ||
101 | outl(tmp, port); | ||
102 | } | ||
103 | } | ||
104 | |||
105 | void insl(unsigned long port, void *addr, unsigned long count) | ||
106 | { | ||
107 | int i; | ||
108 | unsigned tmp; | ||
109 | unsigned char *p = (unsigned char *) addr; | ||
110 | |||
111 | for (i = 0; i < count; i++, p += 4) { | ||
112 | tmp = inl(port); | ||
113 | p[0] = tmp & 0xff; | ||
114 | p[1] = (tmp >> 8) & 0xff; | ||
115 | p[2] = (tmp >> 16) & 0xff; | ||
116 | p[3] = (tmp >> 24) & 0xff; | ||
117 | |||
118 | } | ||
119 | } | ||
120 | |||
121 | void memcpy_toio(void __iomem *to, const void *from, long count) | ||
122 | { | ||
123 | unsigned char *p = (unsigned char *) from; | ||
124 | |||
125 | while (count) { | ||
126 | count--; | ||
127 | writeb(*p++, to++); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | void memcpy_fromio(void *to, void __iomem *from, long count) | ||
132 | { | ||
133 | int i; | ||
134 | unsigned char *p = (unsigned char *) to; | ||
135 | |||
136 | for (i = 0; i < count; i++) { | ||
137 | p[i] = readb(from); | ||
138 | from++; | ||
139 | } | ||
140 | } | ||
diff --git a/arch/sh64/lib/iomap.c b/arch/sh64/lib/iomap.c new file mode 100644 index 000000000000..83c5f0c04958 --- /dev/null +++ b/arch/sh64/lib/iomap.c | |||
@@ -0,0 +1,55 @@ | |||
1 | /* | ||
2 | * arch/sh64/lib/iomap.c | ||
3 | * | ||
4 | * Generic sh64 iomap interface | ||
5 | * | ||
6 | * Copyright (C) 2004 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/config.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <asm/io.h> | ||
15 | |||
16 | void __iomem *__attribute__ ((weak)) | ||
17 | ioport_map(unsigned long port, unsigned int len) | ||
18 | { | ||
19 | return (void __iomem *)port; | ||
20 | } | ||
21 | |||
22 | void ioport_unmap(void __iomem *addr) | ||
23 | { | ||
24 | /* Nothing .. */ | ||
25 | } | ||
26 | |||
27 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) | ||
28 | { | ||
29 | unsigned long start = pci_resource_start(dev, bar); | ||
30 | unsigned long len = pci_resource_len(dev, bar); | ||
31 | unsigned long flags = pci_resource_flags(dev, bar); | ||
32 | |||
33 | if (!len) | ||
34 | return NULL; | ||
35 | if (max && len > max) | ||
36 | len = max; | ||
37 | if (flags & IORESOURCE_IO) | ||
38 | return ioport_map(start + pciio_virt, len); | ||
39 | if (flags & IORESOURCE_MEM) | ||
40 | return (void __iomem *)start; | ||
41 | |||
42 | /* What? */ | ||
43 | return NULL; | ||
44 | } | ||
45 | |||
46 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr) | ||
47 | { | ||
48 | /* Nothing .. */ | ||
49 | } | ||
50 | |||
51 | EXPORT_SYMBOL(ioport_map); | ||
52 | EXPORT_SYMBOL(ioport_unmap); | ||
53 | EXPORT_SYMBOL(pci_iomap); | ||
54 | EXPORT_SYMBOL(pci_iounmap); | ||
55 | |||
diff --git a/arch/sh64/lib/memcpy.c b/arch/sh64/lib/memcpy.c new file mode 100644 index 000000000000..c785d0aa194d --- /dev/null +++ b/arch/sh64/lib/memcpy.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 Mark Debbage (Mark.Debbage@superh.com) | ||
3 | * | ||
4 | * May be copied or modified under the terms of the GNU General Public | ||
5 | * License. See linux/COPYING for more information. | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <asm/string.h> | ||
12 | |||
13 | // This is a simplistic optimization of memcpy to increase the | ||
14 | // granularity of access beyond one byte using aligned | ||
15 | // loads and stores. This is not an optimal implementation | ||
16 | // for SH-5 (especially with regard to prefetching and the cache), | ||
17 | // and a better version should be provided later ... | ||
18 | |||
19 | void *memcpy(void *dest, const void *src, size_t count) | ||
20 | { | ||
21 | char *d = (char *) dest, *s = (char *) src; | ||
22 | |||
23 | if (count >= 32) { | ||
24 | int i = 8 - (((unsigned long) d) & 0x7); | ||
25 | |||
26 | if (i != 8) | ||
27 | while (i-- && count--) { | ||
28 | *d++ = *s++; | ||
29 | } | ||
30 | |||
31 | if (((((unsigned long) d) & 0x7) == 0) && | ||
32 | ((((unsigned long) s) & 0x7) == 0)) { | ||
33 | while (count >= 32) { | ||
34 | unsigned long long t1, t2, t3, t4; | ||
35 | t1 = *(unsigned long long *) (s); | ||
36 | t2 = *(unsigned long long *) (s + 8); | ||
37 | t3 = *(unsigned long long *) (s + 16); | ||
38 | t4 = *(unsigned long long *) (s + 24); | ||
39 | *(unsigned long long *) (d) = t1; | ||
40 | *(unsigned long long *) (d + 8) = t2; | ||
41 | *(unsigned long long *) (d + 16) = t3; | ||
42 | *(unsigned long long *) (d + 24) = t4; | ||
43 | d += 32; | ||
44 | s += 32; | ||
45 | count -= 32; | ||
46 | } | ||
47 | while (count >= 8) { | ||
48 | *(unsigned long long *) d = | ||
49 | *(unsigned long long *) s; | ||
50 | d += 8; | ||
51 | s += 8; | ||
52 | count -= 8; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | if (((((unsigned long) d) & 0x3) == 0) && | ||
57 | ((((unsigned long) s) & 0x3) == 0)) { | ||
58 | while (count >= 4) { | ||
59 | *(unsigned long *) d = *(unsigned long *) s; | ||
60 | d += 4; | ||
61 | s += 4; | ||
62 | count -= 4; | ||
63 | } | ||
64 | } | ||
65 | |||
66 | if (((((unsigned long) d) & 0x1) == 0) && | ||
67 | ((((unsigned long) s) & 0x1) == 0)) { | ||
68 | while (count >= 2) { | ||
69 | *(unsigned short *) d = *(unsigned short *) s; | ||
70 | d += 2; | ||
71 | s += 2; | ||
72 | count -= 2; | ||
73 | } | ||
74 | } | ||
75 | } | ||
76 | |||
77 | while (count--) { | ||
78 | *d++ = *s++; | ||
79 | } | ||
80 | |||
81 | return d; | ||
82 | } | ||
diff --git a/arch/sh64/lib/page_clear.S b/arch/sh64/lib/page_clear.S new file mode 100644 index 000000000000..ac0111d669a3 --- /dev/null +++ b/arch/sh64/lib/page_clear.S | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | Copyright 2003 Richard Curnow, SuperH (UK) Ltd. | ||
3 | |||
4 | This file is subject to the terms and conditions of the GNU General Public | ||
5 | License. See the file "COPYING" in the main directory of this archive | ||
6 | for more details. | ||
7 | |||
8 | Tight version of memset for the case of just clearing a page. It turns out | ||
9 | that having the alloco's spaced out slightly due to the increment/branch | ||
10 | pair causes them to contend less for access to the cache. Similarly, | ||
11 | keeping the stores apart from the allocos causes less contention. => Do two | ||
12 | separate loops. Do multiple stores per loop to amortise the | ||
13 | increment/branch cost a little. | ||
14 | |||
15 | Parameters: | ||
16 | r2 : source effective address (start of page) | ||
17 | |||
18 | Always clears 4096 bytes. | ||
19 | |||
20 | Note : alloco guarded by synco to avoid TAKum03020 erratum | ||
21 | |||
22 | */ | ||
23 | |||
24 | .section .text..SHmedia32,"ax" | ||
25 | .little | ||
26 | |||
27 | .balign 8 | ||
28 | .global sh64_page_clear | ||
29 | sh64_page_clear: | ||
30 | pta/l 1f, tr1 | ||
31 | pta/l 2f, tr2 | ||
32 | ptabs/l r18, tr0 | ||
33 | |||
34 | movi 4096, r7 | ||
35 | add r2, r7, r7 | ||
36 | add r2, r63, r6 | ||
37 | 1: | ||
38 | alloco r6, 0 | ||
39 | synco ! TAKum03020 | ||
40 | addi r6, 32, r6 | ||
41 | bgt/l r7, r6, tr1 | ||
42 | |||
43 | add r2, r63, r6 | ||
44 | 2: | ||
45 | st.q r6, 0, r63 | ||
46 | st.q r6, 8, r63 | ||
47 | st.q r6, 16, r63 | ||
48 | st.q r6, 24, r63 | ||
49 | addi r6, 32, r6 | ||
50 | bgt/l r7, r6, tr2 | ||
51 | |||
52 | blink tr0, r63 | ||
53 | |||
54 | |||
diff --git a/arch/sh64/lib/page_copy.S b/arch/sh64/lib/page_copy.S new file mode 100644 index 000000000000..e159c3cd2582 --- /dev/null +++ b/arch/sh64/lib/page_copy.S | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | Copyright 2003 Richard Curnow, SuperH (UK) Ltd. | ||
3 | |||
4 | This file is subject to the terms and conditions of the GNU General Public | ||
5 | License. See the file "COPYING" in the main directory of this archive | ||
6 | for more details. | ||
7 | |||
8 | Tight version of mempy for the case of just copying a page. | ||
9 | Prefetch strategy empirically optimised against RTL simulations | ||
10 | of SH5-101 cut2 eval chip with Cayman board DDR memory. | ||
11 | |||
12 | Parameters: | ||
13 | r2 : source effective address (start of page) | ||
14 | r3 : destination effective address (start of page) | ||
15 | |||
16 | Always copies 4096 bytes. | ||
17 | |||
18 | Points to review. | ||
19 | * Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead. | ||
20 | It seems like the prefetch needs to be at at least 4 lines ahead to get | ||
21 | the data into the cache in time, and the allocos contend with outstanding | ||
22 | prefetches for the same cache set, so it's better to have the numbers | ||
23 | different. | ||
24 | */ | ||
25 | |||
26 | .section .text..SHmedia32,"ax" | ||
27 | .little | ||
28 | |||
29 | .balign 8 | ||
30 | .global sh64_page_copy | ||
31 | sh64_page_copy: | ||
32 | |||
33 | /* Copy 4096 bytes worth of data from r2 to r3. | ||
34 | Do prefetches 4 lines ahead. | ||
35 | Do alloco 2 lines ahead */ | ||
36 | |||
37 | pta 1f, tr1 | ||
38 | pta 2f, tr2 | ||
39 | pta 3f, tr3 | ||
40 | ptabs r18, tr0 | ||
41 | |||
42 | #if 0 | ||
43 | /* TAKum03020 */ | ||
44 | ld.q r2, 0x00, r63 | ||
45 | ld.q r2, 0x20, r63 | ||
46 | ld.q r2, 0x40, r63 | ||
47 | ld.q r2, 0x60, r63 | ||
48 | #endif | ||
49 | alloco r3, 0x00 | ||
50 | synco ! TAKum03020 | ||
51 | alloco r3, 0x20 | ||
52 | synco ! TAKum03020 | ||
53 | |||
54 | movi 3968, r6 | ||
55 | add r3, r6, r6 | ||
56 | addi r6, 64, r7 | ||
57 | addi r7, 64, r8 | ||
58 | sub r2, r3, r60 | ||
59 | addi r60, 8, r61 | ||
60 | addi r61, 8, r62 | ||
61 | addi r62, 8, r23 | ||
62 | addi r60, 0x80, r22 | ||
63 | |||
64 | /* Minimal code size. The extra branches inside the loop don't cost much | ||
65 | because they overlap with the time spent waiting for prefetches to | ||
66 | complete. */ | ||
67 | 1: | ||
68 | #if 0 | ||
69 | /* TAKum03020 */ | ||
70 | bge/u r3, r6, tr2 ! skip prefetch for last 4 lines | ||
71 | ldx.q r3, r22, r63 ! prefetch 4 lines hence | ||
72 | #endif | ||
73 | 2: | ||
74 | bge/u r3, r7, tr3 ! skip alloco for last 2 lines | ||
75 | alloco r3, 0x40 ! alloc destination line 2 lines ahead | ||
76 | synco ! TAKum03020 | ||
77 | 3: | ||
78 | ldx.q r3, r60, r36 | ||
79 | ldx.q r3, r61, r37 | ||
80 | ldx.q r3, r62, r38 | ||
81 | ldx.q r3, r23, r39 | ||
82 | st.q r3, 0, r36 | ||
83 | st.q r3, 8, r37 | ||
84 | st.q r3, 16, r38 | ||
85 | st.q r3, 24, r39 | ||
86 | addi r3, 32, r3 | ||
87 | bgt/l r8, r3, tr1 | ||
88 | |||
89 | blink tr0, r63 ! return | ||
90 | |||
91 | |||
diff --git a/arch/sh64/lib/panic.c b/arch/sh64/lib/panic.c new file mode 100644 index 000000000000..c9eb1cb50d97 --- /dev/null +++ b/arch/sh64/lib/panic.c | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 Richard Curnow, SuperH UK Limited | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <asm/io.h> | ||
11 | #include <asm/registers.h> | ||
12 | |||
13 | /* THIS IS A PHYSICAL ADDRESS */ | ||
14 | #define HDSP2534_ADDR (0x04002100) | ||
15 | |||
16 | #ifdef CONFIG_SH_CAYMAN | ||
17 | |||
18 | static void poor_mans_delay(void) | ||
19 | { | ||
20 | int i; | ||
21 | for (i = 0; i < 2500000; i++) { | ||
22 | } /* poor man's delay */ | ||
23 | } | ||
24 | |||
25 | static void show_value(unsigned long x) | ||
26 | { | ||
27 | int i; | ||
28 | unsigned nibble; | ||
29 | for (i = 0; i < 8; i++) { | ||
30 | nibble = ((x >> (i * 4)) & 0xf); | ||
31 | |||
32 | ctrl_outb(nibble + ((nibble > 9) ? 55 : 48), | ||
33 | HDSP2534_ADDR + 0xe0 + ((7 - i) << 2)); | ||
34 | } | ||
35 | } | ||
36 | |||
37 | #endif | ||
38 | |||
39 | void | ||
40 | panic_handler(unsigned long panicPC, unsigned long panicSSR, | ||
41 | unsigned long panicEXPEVT) | ||
42 | { | ||
43 | #ifdef CONFIG_SH_CAYMAN | ||
44 | while (1) { | ||
45 | /* This piece of code displays the PC on the LED display */ | ||
46 | show_value(panicPC); | ||
47 | poor_mans_delay(); | ||
48 | show_value(panicSSR); | ||
49 | poor_mans_delay(); | ||
50 | show_value(panicEXPEVT); | ||
51 | poor_mans_delay(); | ||
52 | } | ||
53 | #endif | ||
54 | |||
55 | /* Never return from the panic handler */ | ||
56 | for (;;) ; | ||
57 | |||
58 | } | ||
diff --git a/arch/sh64/lib/udelay.c b/arch/sh64/lib/udelay.c new file mode 100644 index 000000000000..dad2f254efee --- /dev/null +++ b/arch/sh64/lib/udelay.c | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * arch/sh64/lib/udelay.c | ||
3 | * | ||
4 | * Delay routines, using a pre-computed "loops_per_jiffy" value. | ||
5 | * | ||
6 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
7 | * Copyright (C) 2003, 2004 Paul Mundt | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | ||
13 | #include <linux/config.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <asm/param.h> | ||
16 | |||
17 | extern unsigned long loops_per_jiffy; | ||
18 | |||
19 | /* | ||
20 | * Use only for very small delays (< 1 msec). | ||
21 | * | ||
22 | * The active part of our cycle counter is only 32-bits wide, and | ||
23 | * we're treating the difference between two marks as signed. On | ||
24 | * a 1GHz box, that's about 2 seconds. | ||
25 | */ | ||
26 | |||
27 | void __delay(int loops) | ||
28 | { | ||
29 | long long dummy; | ||
30 | __asm__ __volatile__("gettr tr0, %1\n\t" | ||
31 | "pta $+4, tr0\n\t" | ||
32 | "addi %0, -1, %0\n\t" | ||
33 | "bne %0, r63, tr0\n\t" | ||
34 | "ptabs %1, tr0\n\t":"=r"(loops), | ||
35 | "=r"(dummy) | ||
36 | :"0"(loops)); | ||
37 | } | ||
38 | |||
39 | void __udelay(unsigned long long usecs, unsigned long lpj) | ||
40 | { | ||
41 | usecs *= (((unsigned long long) HZ << 32) / 1000000) * lpj; | ||
42 | __delay((long long) usecs >> 32); | ||
43 | } | ||
44 | |||
45 | void __ndelay(unsigned long long nsecs, unsigned long lpj) | ||
46 | { | ||
47 | nsecs *= (((unsigned long long) HZ << 32) / 1000000000) * lpj; | ||
48 | __delay((long long) nsecs >> 32); | ||
49 | } | ||
50 | |||
51 | void udelay(unsigned long usecs) | ||
52 | { | ||
53 | __udelay(usecs, loops_per_jiffy); | ||
54 | } | ||
55 | |||
56 | void ndelay(unsigned long nsecs) | ||
57 | { | ||
58 | __ndelay(nsecs, loops_per_jiffy); | ||
59 | } | ||
60 | |||
diff --git a/arch/sh64/mach-cayman/Makefile b/arch/sh64/mach-cayman/Makefile new file mode 100644 index 000000000000..67a2258bf8c4 --- /dev/null +++ b/arch/sh64/mach-cayman/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | # | ||
2 | # Makefile for the Hitachi Cayman specific parts of the kernel | ||
3 | # | ||
4 | # Note! Dependencies are done automagically by 'make dep', which also | ||
5 | # removes any old dependencies. DON'T put your own dependencies here | ||
6 | # unless it's something special (ie not a .c file). | ||
7 | # | ||
8 | |||
9 | obj-y := setup.o irq.o iomap.o | ||
10 | obj-$(CONFIG_HEARTBEAT) += led.o | ||
11 | |||
diff --git a/arch/sh64/mach-cayman/iomap.c b/arch/sh64/mach-cayman/iomap.c new file mode 100644 index 000000000000..d6a538c70709 --- /dev/null +++ b/arch/sh64/mach-cayman/iomap.c | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * arch/sh64/mach-cayman/iomap.c | ||
3 | * | ||
4 | * Cayman iomap interface | ||
5 | * | ||
6 | * Copyright (C) 2004 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/config.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <asm/io.h> | ||
15 | #include <asm/cayman.h> | ||
16 | |||
17 | void __iomem *ioport_map(unsigned long port, unsigned int len) | ||
18 | { | ||
19 | if (port < 0x400) | ||
20 | return (void __iomem *)((port << 2) | smsc_superio_virt); | ||
21 | |||
22 | return (void __iomem *)port; | ||
23 | } | ||
24 | |||
diff --git a/arch/sh64/mach-cayman/irq.c b/arch/sh64/mach-cayman/irq.c new file mode 100644 index 000000000000..f797c84bfdd1 --- /dev/null +++ b/arch/sh64/mach-cayman/irq.c | |||
@@ -0,0 +1,196 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/irq_cayman.c | ||
7 | * | ||
8 | * SH-5 Cayman Interrupt Support | ||
9 | * | ||
10 | * This file handles the board specific parts of the Cayman interrupt system | ||
11 | * | ||
12 | * Copyright (C) 2002 Stuart Menefy | ||
13 | */ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | #include <asm/irq.h> | ||
17 | #include <asm/page.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <linux/irq.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/signal.h> | ||
22 | #include <asm/cayman.h> | ||
23 | |||
24 | unsigned long epld_virt; | ||
25 | |||
26 | #define EPLD_BASE 0x04002000 | ||
27 | #define EPLD_STATUS_BASE (epld_virt + 0x10) | ||
28 | #define EPLD_MASK_BASE (epld_virt + 0x20) | ||
29 | |||
30 | /* Note the SMSC SuperIO chip and SMSC LAN chip interrupts are all muxed onto | ||
31 | the same SH-5 interrupt */ | ||
32 | |||
33 | static irqreturn_t cayman_interrupt_smsc(int irq, void *dev_id, struct pt_regs *regs) | ||
34 | { | ||
35 | printk(KERN_INFO "CAYMAN: spurious SMSC interrupt\n"); | ||
36 | return IRQ_NONE; | ||
37 | } | ||
38 | |||
39 | static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id, struct pt_regs *regs) | ||
40 | { | ||
41 | printk(KERN_INFO "CAYMAN: spurious PCI interrupt, IRQ %d\n", irq); | ||
42 | return IRQ_NONE; | ||
43 | } | ||
44 | |||
45 | static struct irqaction cayman_action_smsc = { | ||
46 | .name = "Cayman SMSC Mux", | ||
47 | .handler = cayman_interrupt_smsc, | ||
48 | .flags = SA_INTERRUPT, | ||
49 | }; | ||
50 | |||
51 | static struct irqaction cayman_action_pci2 = { | ||
52 | .name = "Cayman PCI2 Mux", | ||
53 | .handler = cayman_interrupt_pci2, | ||
54 | .flags = SA_INTERRUPT, | ||
55 | }; | ||
56 | |||
57 | static void enable_cayman_irq(unsigned int irq) | ||
58 | { | ||
59 | unsigned long flags; | ||
60 | unsigned long mask; | ||
61 | unsigned int reg; | ||
62 | unsigned char bit; | ||
63 | |||
64 | irq -= START_EXT_IRQS; | ||
65 | reg = EPLD_MASK_BASE + ((irq / 8) << 2); | ||
66 | bit = 1<<(irq % 8); | ||
67 | local_irq_save(flags); | ||
68 | mask = ctrl_inl(reg); | ||
69 | mask |= bit; | ||
70 | ctrl_outl(mask, reg); | ||
71 | local_irq_restore(flags); | ||
72 | } | ||
73 | |||
74 | void disable_cayman_irq(unsigned int irq) | ||
75 | { | ||
76 | unsigned long flags; | ||
77 | unsigned long mask; | ||
78 | unsigned int reg; | ||
79 | unsigned char bit; | ||
80 | |||
81 | irq -= START_EXT_IRQS; | ||
82 | reg = EPLD_MASK_BASE + ((irq / 8) << 2); | ||
83 | bit = 1<<(irq % 8); | ||
84 | local_irq_save(flags); | ||
85 | mask = ctrl_inl(reg); | ||
86 | mask &= ~bit; | ||
87 | ctrl_outl(mask, reg); | ||
88 | local_irq_restore(flags); | ||
89 | } | ||
90 | |||
91 | static void ack_cayman_irq(unsigned int irq) | ||
92 | { | ||
93 | disable_cayman_irq(irq); | ||
94 | } | ||
95 | |||
96 | static void end_cayman_irq(unsigned int irq) | ||
97 | { | ||
98 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
99 | enable_cayman_irq(irq); | ||
100 | } | ||
101 | |||
102 | static unsigned int startup_cayman_irq(unsigned int irq) | ||
103 | { | ||
104 | enable_cayman_irq(irq); | ||
105 | return 0; /* never anything pending */ | ||
106 | } | ||
107 | |||
108 | static void shutdown_cayman_irq(unsigned int irq) | ||
109 | { | ||
110 | disable_cayman_irq(irq); | ||
111 | } | ||
112 | |||
113 | struct hw_interrupt_type cayman_irq_type = { | ||
114 | .typename = "Cayman-IRQ", | ||
115 | .startup = startup_cayman_irq, | ||
116 | .shutdown = shutdown_cayman_irq, | ||
117 | .enable = enable_cayman_irq, | ||
118 | .disable = disable_cayman_irq, | ||
119 | .ack = ack_cayman_irq, | ||
120 | .end = end_cayman_irq, | ||
121 | }; | ||
122 | |||
123 | int cayman_irq_demux(int evt) | ||
124 | { | ||
125 | int irq = intc_evt_to_irq[evt]; | ||
126 | |||
127 | if (irq == SMSC_IRQ) { | ||
128 | unsigned long status; | ||
129 | int i; | ||
130 | |||
131 | status = ctrl_inl(EPLD_STATUS_BASE) & | ||
132 | ctrl_inl(EPLD_MASK_BASE) & 0xff; | ||
133 | if (status == 0) { | ||
134 | irq = -1; | ||
135 | } else { | ||
136 | for (i=0; i<8; i++) { | ||
137 | if (status & (1<<i)) | ||
138 | break; | ||
139 | } | ||
140 | irq = START_EXT_IRQS + i; | ||
141 | } | ||
142 | } | ||
143 | |||
144 | if (irq == PCI2_IRQ) { | ||
145 | unsigned long status; | ||
146 | int i; | ||
147 | |||
148 | status = ctrl_inl(EPLD_STATUS_BASE + 3 * sizeof(u32)) & | ||
149 | ctrl_inl(EPLD_MASK_BASE + 3 * sizeof(u32)) & 0xff; | ||
150 | if (status == 0) { | ||
151 | irq = -1; | ||
152 | } else { | ||
153 | for (i=0; i<8; i++) { | ||
154 | if (status & (1<<i)) | ||
155 | break; | ||
156 | } | ||
157 | irq = START_EXT_IRQS + (3 * 8) + i; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | return irq; | ||
162 | } | ||
163 | |||
164 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL) | ||
165 | int cayman_irq_describe(char* p, int irq) | ||
166 | { | ||
167 | if (irq < NR_INTC_IRQS) { | ||
168 | return intc_irq_describe(p, irq); | ||
169 | } else if (irq < NR_INTC_IRQS + 8) { | ||
170 | return sprintf(p, "(SMSC %d)", irq - NR_INTC_IRQS); | ||
171 | } else if ((irq >= NR_INTC_IRQS + 24) && (irq < NR_INTC_IRQS + 32)) { | ||
172 | return sprintf(p, "(PCI2 %d)", irq - (NR_INTC_IRQS + 24)); | ||
173 | } | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | #endif | ||
178 | |||
179 | void init_cayman_irq(void) | ||
180 | { | ||
181 | int i; | ||
182 | |||
183 | epld_virt = onchip_remap(EPLD_BASE, 1024, "EPLD"); | ||
184 | if (!epld_virt) { | ||
185 | printk(KERN_ERR "Cayman IRQ: Unable to remap EPLD\n"); | ||
186 | return; | ||
187 | } | ||
188 | |||
189 | for (i=0; i<NR_EXT_IRQS; i++) { | ||
190 | irq_desc[START_EXT_IRQS + i].handler = &cayman_irq_type; | ||
191 | } | ||
192 | |||
193 | /* Setup the SMSC interrupt */ | ||
194 | setup_irq(SMSC_IRQ, &cayman_action_smsc); | ||
195 | setup_irq(PCI2_IRQ, &cayman_action_pci2); | ||
196 | } | ||
diff --git a/arch/sh64/mach-cayman/led.c b/arch/sh64/mach-cayman/led.c new file mode 100644 index 000000000000..8b3cc4c78870 --- /dev/null +++ b/arch/sh64/mach-cayman/led.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * arch/sh64/kernel/led_cayman.c | ||
3 | * | ||
4 | * Copyright (C) 2002 Stuart Menefy <stuart.menefy@st.com> | ||
5 | * | ||
6 | * May be copied or modified under the terms of the GNU General Public | ||
7 | * License. See linux/COPYING for more information. | ||
8 | * | ||
9 | * Flash the LEDs | ||
10 | */ | ||
11 | #include <asm/io.h> | ||
12 | |||
13 | /* | ||
14 | ** It is supposed these functions to be used for a low level | ||
15 | ** debugging (via Cayman LEDs), hence to be available as soon | ||
16 | ** as possible. | ||
17 | ** Unfortunately Cayman LEDs relies on Cayman EPLD to be mapped | ||
18 | ** (this happen when IRQ are initialized... quite late). | ||
19 | ** These triky dependencies should be removed. Temporary, it | ||
20 | ** may be enough to NOP until EPLD is mapped. | ||
21 | */ | ||
22 | |||
23 | extern unsigned long epld_virt; | ||
24 | |||
25 | #define LED_ADDR (epld_virt + 0x008) | ||
26 | #define HDSP2534_ADDR (epld_virt + 0x100) | ||
27 | |||
28 | void mach_led(int position, int value) | ||
29 | { | ||
30 | if (!epld_virt) | ||
31 | return; | ||
32 | |||
33 | if (value) | ||
34 | ctrl_outl(0, LED_ADDR); | ||
35 | else | ||
36 | ctrl_outl(1, LED_ADDR); | ||
37 | |||
38 | } | ||
39 | |||
40 | void mach_alphanum(int position, unsigned char value) | ||
41 | { | ||
42 | if (!epld_virt) | ||
43 | return; | ||
44 | |||
45 | ctrl_outb(value, HDSP2534_ADDR + 0xe0 + (position << 2)); | ||
46 | } | ||
47 | |||
48 | void mach_alphanum_brightness(int setting) | ||
49 | { | ||
50 | ctrl_outb(setting & 7, HDSP2534_ADDR + 0xc0); | ||
51 | } | ||
diff --git a/arch/sh64/mach-cayman/setup.c b/arch/sh64/mach-cayman/setup.c new file mode 100644 index 000000000000..c793245629ad --- /dev/null +++ b/arch/sh64/mach-cayman/setup.c | |||
@@ -0,0 +1,246 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/mach-cayman/setup.c | ||
7 | * | ||
8 | * SH5 Cayman support | ||
9 | * | ||
10 | * This file handles the architecture-dependent parts of initialization | ||
11 | * | ||
12 | * Copyright David J. Mckay. | ||
13 | * Needs major work! | ||
14 | * | ||
15 | * benedict.gaster@superh.com: 3rd May 2002 | ||
16 | * Added support for ramdisk, removing statically linked romfs at the same time. | ||
17 | * | ||
18 | * lethal@linux-sh.org: 15th May 2003 | ||
19 | * Use the generic procfs cpuinfo interface, just return a valid board name. | ||
20 | */ | ||
21 | |||
22 | #include <linux/stddef.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/config.h> | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/bootmem.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/seq_file.h> | ||
30 | #include <asm/processor.h> | ||
31 | #include <asm/platform.h> | ||
32 | #include <asm/io.h> | ||
33 | #include <asm/irq.h> | ||
34 | #include <asm/page.h> | ||
35 | |||
36 | /* | ||
37 | * Platform Dependent Interrupt Priorities. | ||
38 | */ | ||
39 | |||
40 | /* Using defaults defined in irq.h */ | ||
41 | #define RES NO_PRIORITY /* Disabled */ | ||
42 | #define IR0 IRL0_PRIORITY /* IRLs */ | ||
43 | #define IR1 IRL1_PRIORITY | ||
44 | #define IR2 IRL2_PRIORITY | ||
45 | #define IR3 IRL3_PRIORITY | ||
46 | #define PCA INTA_PRIORITY /* PCI Ints */ | ||
47 | #define PCB INTB_PRIORITY | ||
48 | #define PCC INTC_PRIORITY | ||
49 | #define PCD INTD_PRIORITY | ||
50 | #define SER TOP_PRIORITY | ||
51 | #define ERR TOP_PRIORITY | ||
52 | #define PW0 TOP_PRIORITY | ||
53 | #define PW1 TOP_PRIORITY | ||
54 | #define PW2 TOP_PRIORITY | ||
55 | #define PW3 TOP_PRIORITY | ||
56 | #define DM0 NO_PRIORITY /* DMA Ints */ | ||
57 | #define DM1 NO_PRIORITY | ||
58 | #define DM2 NO_PRIORITY | ||
59 | #define DM3 NO_PRIORITY | ||
60 | #define DAE NO_PRIORITY | ||
61 | #define TU0 TIMER_PRIORITY /* TMU Ints */ | ||
62 | #define TU1 NO_PRIORITY | ||
63 | #define TU2 NO_PRIORITY | ||
64 | #define TI2 NO_PRIORITY | ||
65 | #define ATI NO_PRIORITY /* RTC Ints */ | ||
66 | #define PRI NO_PRIORITY | ||
67 | #define CUI RTC_PRIORITY | ||
68 | #define ERI SCIF_PRIORITY /* SCIF Ints */ | ||
69 | #define RXI SCIF_PRIORITY | ||
70 | #define BRI SCIF_PRIORITY | ||
71 | #define TXI SCIF_PRIORITY | ||
72 | #define ITI TOP_PRIORITY /* WDT Ints */ | ||
73 | |||
74 | /* Setup for the SMSC FDC37C935 */ | ||
75 | #define SMSC_SUPERIO_BASE 0x04000000 | ||
76 | #define SMSC_CONFIG_PORT_ADDR 0x3f0 | ||
77 | #define SMSC_INDEX_PORT_ADDR SMSC_CONFIG_PORT_ADDR | ||
78 | #define SMSC_DATA_PORT_ADDR 0x3f1 | ||
79 | |||
80 | #define SMSC_ENTER_CONFIG_KEY 0x55 | ||
81 | #define SMSC_EXIT_CONFIG_KEY 0xaa | ||
82 | |||
83 | #define SMCS_LOGICAL_DEV_INDEX 0x07 | ||
84 | #define SMSC_DEVICE_ID_INDEX 0x20 | ||
85 | #define SMSC_DEVICE_REV_INDEX 0x21 | ||
86 | #define SMSC_ACTIVATE_INDEX 0x30 | ||
87 | #define SMSC_PRIMARY_BASE_INDEX 0x60 | ||
88 | #define SMSC_SECONDARY_BASE_INDEX 0x62 | ||
89 | #define SMSC_PRIMARY_INT_INDEX 0x70 | ||
90 | #define SMSC_SECONDARY_INT_INDEX 0x72 | ||
91 | |||
92 | #define SMSC_IDE1_DEVICE 1 | ||
93 | #define SMSC_KEYBOARD_DEVICE 7 | ||
94 | #define SMSC_CONFIG_REGISTERS 8 | ||
95 | |||
96 | #define SMSC_SUPERIO_READ_INDEXED(index) ({ \ | ||
97 | outb((index), SMSC_INDEX_PORT_ADDR); \ | ||
98 | inb(SMSC_DATA_PORT_ADDR); }) | ||
99 | #define SMSC_SUPERIO_WRITE_INDEXED(val, index) ({ \ | ||
100 | outb((index), SMSC_INDEX_PORT_ADDR); \ | ||
101 | outb((val), SMSC_DATA_PORT_ADDR); }) | ||
102 | |||
103 | #define IDE1_PRIMARY_BASE 0x01f0 | ||
104 | #define IDE1_SECONDARY_BASE 0x03f6 | ||
105 | |||
106 | unsigned long smsc_superio_virt; | ||
107 | |||
108 | /* | ||
109 | * Platform dependent structures: maps and parms block. | ||
110 | */ | ||
111 | struct resource io_resources[] = { | ||
112 | /* To be updated with external devices */ | ||
113 | }; | ||
114 | |||
115 | struct resource kram_resources[] = { | ||
116 | { "Kernel code", 0, 0 }, /* These must be last in the array */ | ||
117 | { "Kernel data", 0, 0 } /* These must be last in the array */ | ||
118 | }; | ||
119 | |||
120 | struct resource xram_resources[] = { | ||
121 | /* To be updated with external devices */ | ||
122 | }; | ||
123 | |||
124 | struct resource rom_resources[] = { | ||
125 | /* To be updated with external devices */ | ||
126 | }; | ||
127 | |||
128 | struct sh64_platform platform_parms = { | ||
129 | .readonly_rootfs = 1, | ||
130 | .initial_root_dev = 0x0100, | ||
131 | .loader_type = 1, | ||
132 | .io_res_p = io_resources, | ||
133 | .io_res_count = ARRAY_SIZE(io_resources), | ||
134 | .kram_res_p = kram_resources, | ||
135 | .kram_res_count = ARRAY_SIZE(kram_resources), | ||
136 | .xram_res_p = xram_resources, | ||
137 | .xram_res_count = ARRAY_SIZE(xram_resources), | ||
138 | .rom_res_p = rom_resources, | ||
139 | .rom_res_count = ARRAY_SIZE(rom_resources), | ||
140 | }; | ||
141 | |||
142 | int platform_int_priority[NR_INTC_IRQS] = { | ||
143 | IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD, /* IRQ 0- 7 */ | ||
144 | RES, RES, RES, RES, SER, ERR, PW3, PW2, /* IRQ 8-15 */ | ||
145 | PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES, /* IRQ 16-23 */ | ||
146 | RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 24-31 */ | ||
147 | TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI, /* IRQ 32-39 */ | ||
148 | RXI, BRI, TXI, RES, RES, RES, RES, RES, /* IRQ 40-47 */ | ||
149 | RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 48-55 */ | ||
150 | RES, RES, RES, RES, RES, RES, RES, ITI, /* IRQ 56-63 */ | ||
151 | }; | ||
152 | |||
153 | static int __init smsc_superio_setup(void) | ||
154 | { | ||
155 | unsigned char devid, devrev; | ||
156 | |||
157 | smsc_superio_virt = onchip_remap(SMSC_SUPERIO_BASE, 1024, "SMSC SuperIO"); | ||
158 | if (!smsc_superio_virt) { | ||
159 | panic("Unable to remap SMSC SuperIO\n"); | ||
160 | } | ||
161 | |||
162 | /* Initially the chip is in run state */ | ||
163 | /* Put it into configuration state */ | ||
164 | outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); | ||
165 | outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); | ||
166 | |||
167 | /* Read device ID info */ | ||
168 | devid = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_ID_INDEX); | ||
169 | devrev = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_REV_INDEX); | ||
170 | printk("SMSC SuperIO devid %02x rev %02x\n", devid, devrev); | ||
171 | |||
172 | /* Select the keyboard device */ | ||
173 | SMSC_SUPERIO_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX); | ||
174 | |||
175 | /* enable it */ | ||
176 | SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); | ||
177 | |||
178 | /* Select the interrupts */ | ||
179 | /* On a PC keyboard is IRQ1, mouse is IRQ12 */ | ||
180 | SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX); | ||
181 | SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX); | ||
182 | |||
183 | #ifdef CONFIG_IDE | ||
184 | /* | ||
185 | * Only IDE1 exists on the Cayman | ||
186 | */ | ||
187 | |||
188 | /* Power it on */ | ||
189 | SMSC_SUPERIO_WRITE_INDEXED(1 << SMSC_IDE1_DEVICE, 0x22); | ||
190 | |||
191 | SMSC_SUPERIO_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX); | ||
192 | SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); | ||
193 | |||
194 | SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE >> 8, | ||
195 | SMSC_PRIMARY_BASE_INDEX + 0); | ||
196 | SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE & 0xff, | ||
197 | SMSC_PRIMARY_BASE_INDEX + 1); | ||
198 | |||
199 | SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE >> 8, | ||
200 | SMSC_SECONDARY_BASE_INDEX + 0); | ||
201 | SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE & 0xff, | ||
202 | SMSC_SECONDARY_BASE_INDEX + 1); | ||
203 | |||
204 | SMSC_SUPERIO_WRITE_INDEXED(14, SMSC_PRIMARY_INT_INDEX); | ||
205 | |||
206 | SMSC_SUPERIO_WRITE_INDEXED(SMSC_CONFIG_REGISTERS, | ||
207 | SMCS_LOGICAL_DEV_INDEX); | ||
208 | |||
209 | SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */ | ||
210 | SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ | ||
211 | SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ | ||
212 | SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ | ||
213 | #endif | ||
214 | |||
215 | /* Exit the configuraton state */ | ||
216 | outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | /* This is grotty, but, because kernel is always referenced on the link line | ||
222 | * before any devices, this is safe. | ||
223 | */ | ||
224 | __initcall(smsc_superio_setup); | ||
225 | |||
226 | void __init platform_setup(void) | ||
227 | { | ||
228 | /* Cayman platform leaves the decision to head.S, for now */ | ||
229 | platform_parms.fpu_flags = fpu_in_use; | ||
230 | } | ||
231 | |||
232 | void __init platform_monitor(void) | ||
233 | { | ||
234 | /* Nothing yet .. */ | ||
235 | } | ||
236 | |||
237 | void __init platform_reserve(void) | ||
238 | { | ||
239 | /* Nothing yet .. */ | ||
240 | } | ||
241 | |||
242 | const char *get_system_type(void) | ||
243 | { | ||
244 | return "Hitachi Cayman"; | ||
245 | } | ||
246 | |||
diff --git a/arch/sh64/mach-harp/Makefile b/arch/sh64/mach-harp/Makefile new file mode 100644 index 000000000000..63f065bad2f9 --- /dev/null +++ b/arch/sh64/mach-harp/Makefile | |||
@@ -0,0 +1,14 @@ | |||
1 | # | ||
2 | # Makefile for the ST50 Harp specific parts of the kernel | ||
3 | # | ||
4 | # Note! Dependencies are done automagically by 'make dep', which also | ||
5 | # removes any old dependencies. DON'T put your own dependencies here | ||
6 | # unless it's something special (ie not a .c file). | ||
7 | # | ||
8 | |||
9 | O_TARGET := harp.o | ||
10 | |||
11 | obj-y := setup.o | ||
12 | |||
13 | include $(TOPDIR)/Rules.make | ||
14 | |||
diff --git a/arch/sh64/mach-harp/setup.c b/arch/sh64/mach-harp/setup.c new file mode 100644 index 000000000000..3938a65c4b25 --- /dev/null +++ b/arch/sh64/mach-harp/setup.c | |||
@@ -0,0 +1,139 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/mach-harp/setup.c | ||
7 | * | ||
8 | * SH-5 Simulator Platform Support | ||
9 | * | ||
10 | * This file handles the architecture-dependent parts of initialization | ||
11 | * | ||
12 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
13 | * | ||
14 | * benedict.gaster@superh.com: 3rd May 2002 | ||
15 | * Added support for ramdisk, removing statically linked romfs at the same time. * | ||
16 | * | ||
17 | * lethal@linux-sh.org: 15th May 2003 | ||
18 | * Use the generic procfs cpuinfo interface, just return a valid board name. | ||
19 | */ | ||
20 | |||
21 | #include <linux/stddef.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/config.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/bootmem.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/platform.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/irq.h> | ||
32 | #include <asm/page.h> | ||
33 | |||
34 | #define RES_COUNT(res) ((sizeof((res))/sizeof(struct resource))) | ||
35 | |||
36 | /* | ||
37 | * Platform Dependent Interrupt Priorities. | ||
38 | */ | ||
39 | |||
40 | /* Using defaults defined in irq.h */ | ||
41 | #define RES NO_PRIORITY /* Disabled */ | ||
42 | #define IR0 IRL0_PRIORITY /* IRLs */ | ||
43 | #define IR1 IRL1_PRIORITY | ||
44 | #define IR2 IRL2_PRIORITY | ||
45 | #define IR3 IRL3_PRIORITY | ||
46 | #define PCA INTA_PRIORITY /* PCI Ints */ | ||
47 | #define PCB INTB_PRIORITY | ||
48 | #define PCC INTC_PRIORITY | ||
49 | #define PCD INTD_PRIORITY | ||
50 | #define SER TOP_PRIORITY | ||
51 | #define ERR TOP_PRIORITY | ||
52 | #define PW0 TOP_PRIORITY | ||
53 | #define PW1 TOP_PRIORITY | ||
54 | #define PW2 TOP_PRIORITY | ||
55 | #define PW3 TOP_PRIORITY | ||
56 | #define DM0 NO_PRIORITY /* DMA Ints */ | ||
57 | #define DM1 NO_PRIORITY | ||
58 | #define DM2 NO_PRIORITY | ||
59 | #define DM3 NO_PRIORITY | ||
60 | #define DAE NO_PRIORITY | ||
61 | #define TU0 TIMER_PRIORITY /* TMU Ints */ | ||
62 | #define TU1 NO_PRIORITY | ||
63 | #define TU2 NO_PRIORITY | ||
64 | #define TI2 NO_PRIORITY | ||
65 | #define ATI NO_PRIORITY /* RTC Ints */ | ||
66 | #define PRI NO_PRIORITY | ||
67 | #define CUI RTC_PRIORITY | ||
68 | #define ERI SCIF_PRIORITY /* SCIF Ints */ | ||
69 | #define RXI SCIF_PRIORITY | ||
70 | #define BRI SCIF_PRIORITY | ||
71 | #define TXI SCIF_PRIORITY | ||
72 | #define ITI TOP_PRIORITY /* WDT Ints */ | ||
73 | |||
74 | /* | ||
75 | * Platform dependent structures: maps and parms block. | ||
76 | */ | ||
77 | struct resource io_resources[] = { | ||
78 | /* To be updated with external devices */ | ||
79 | }; | ||
80 | |||
81 | struct resource kram_resources[] = { | ||
82 | { "Kernel code", 0, 0 }, /* These must be last in the array */ | ||
83 | { "Kernel data", 0, 0 } /* These must be last in the array */ | ||
84 | }; | ||
85 | |||
86 | struct resource xram_resources[] = { | ||
87 | /* To be updated with external devices */ | ||
88 | }; | ||
89 | |||
90 | struct resource rom_resources[] = { | ||
91 | /* To be updated with external devices */ | ||
92 | }; | ||
93 | |||
94 | struct sh64_platform platform_parms = { | ||
95 | .readonly_rootfs = 1, | ||
96 | .initial_root_dev = 0x0100, | ||
97 | .loader_type = 1, | ||
98 | .io_res_p = io_resources, | ||
99 | .io_res_count = RES_COUNT(io_resources), | ||
100 | .kram_res_p = kram_resources, | ||
101 | .kram_res_count = RES_COUNT(kram_resources), | ||
102 | .xram_res_p = xram_resources, | ||
103 | .xram_res_count = RES_COUNT(xram_resources), | ||
104 | .rom_res_p = rom_resources, | ||
105 | .rom_res_count = RES_COUNT(rom_resources), | ||
106 | }; | ||
107 | |||
108 | int platform_int_priority[NR_INTC_IRQS] = { | ||
109 | IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD, /* IRQ 0- 7 */ | ||
110 | RES, RES, RES, RES, SER, ERR, PW3, PW2, /* IRQ 8-15 */ | ||
111 | PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES, /* IRQ 16-23 */ | ||
112 | RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 24-31 */ | ||
113 | TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI, /* IRQ 32-39 */ | ||
114 | RXI, BRI, TXI, RES, RES, RES, RES, RES, /* IRQ 40-47 */ | ||
115 | RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 48-55 */ | ||
116 | RES, RES, RES, RES, RES, RES, RES, ITI, /* IRQ 56-63 */ | ||
117 | }; | ||
118 | |||
119 | void __init platform_setup(void) | ||
120 | { | ||
121 | /* Harp platform leaves the decision to head.S, for now */ | ||
122 | platform_parms.fpu_flags = fpu_in_use; | ||
123 | } | ||
124 | |||
125 | void __init platform_monitor(void) | ||
126 | { | ||
127 | /* Nothing yet .. */ | ||
128 | } | ||
129 | |||
130 | void __init platform_reserve(void) | ||
131 | { | ||
132 | /* Nothing yet .. */ | ||
133 | } | ||
134 | |||
135 | const char *get_system_type(void) | ||
136 | { | ||
137 | return "ST50 Harp"; | ||
138 | } | ||
139 | |||
diff --git a/arch/sh64/mach-romram/Makefile b/arch/sh64/mach-romram/Makefile new file mode 100644 index 000000000000..02d05c05afa1 --- /dev/null +++ b/arch/sh64/mach-romram/Makefile | |||
@@ -0,0 +1,14 @@ | |||
1 | # | ||
2 | # Makefile for the SH-5 ROM/RAM specific parts of the kernel | ||
3 | # | ||
4 | # Note! Dependencies are done automagically by 'make dep', which also | ||
5 | # removes any old dependencies. DON'T put your own dependencies here | ||
6 | # unless it's something special (ie not a .c file). | ||
7 | # | ||
8 | |||
9 | O_TARGET := romram.o | ||
10 | |||
11 | obj-y := setup.o | ||
12 | |||
13 | include $(TOPDIR)/Rules.make | ||
14 | |||
diff --git a/arch/sh64/mach-romram/setup.c b/arch/sh64/mach-romram/setup.c new file mode 100644 index 000000000000..a9ba03fc5bed --- /dev/null +++ b/arch/sh64/mach-romram/setup.c | |||
@@ -0,0 +1,142 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/mach-romram/setup.c | ||
7 | * | ||
8 | * SH-5 ROM/RAM Platform Support | ||
9 | * | ||
10 | * This file handles the architecture-dependent parts of initialization | ||
11 | * | ||
12 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
13 | * | ||
14 | * benedict.gaster@superh.com: 3rd May 2002 | ||
15 | * Added support for ramdisk, removing statically linked romfs at the same time. * | ||
16 | * | ||
17 | * lethal@linux-sh.org: 15th May 2003 | ||
18 | * Use the generic procfs cpuinfo interface, just return a valid board name. | ||
19 | * | ||
20 | * Sean.McGoogan@superh.com 17th Feb 2004 | ||
21 | * copied from arch/sh64/mach-harp/setup.c | ||
22 | */ | ||
23 | |||
24 | #include <linux/stddef.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/config.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/bootmem.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <asm/processor.h> | ||
32 | #include <asm/platform.h> | ||
33 | #include <asm/io.h> | ||
34 | #include <asm/irq.h> | ||
35 | #include <asm/page.h> | ||
36 | |||
37 | #define RES_COUNT(res) ((sizeof((res))/sizeof(struct resource))) | ||
38 | |||
39 | /* | ||
40 | * Platform Dependent Interrupt Priorities. | ||
41 | */ | ||
42 | |||
43 | /* Using defaults defined in irq.h */ | ||
44 | #define RES NO_PRIORITY /* Disabled */ | ||
45 | #define IR0 IRL0_PRIORITY /* IRLs */ | ||
46 | #define IR1 IRL1_PRIORITY | ||
47 | #define IR2 IRL2_PRIORITY | ||
48 | #define IR3 IRL3_PRIORITY | ||
49 | #define PCA INTA_PRIORITY /* PCI Ints */ | ||
50 | #define PCB INTB_PRIORITY | ||
51 | #define PCC INTC_PRIORITY | ||
52 | #define PCD INTD_PRIORITY | ||
53 | #define SER TOP_PRIORITY | ||
54 | #define ERR TOP_PRIORITY | ||
55 | #define PW0 TOP_PRIORITY | ||
56 | #define PW1 TOP_PRIORITY | ||
57 | #define PW2 TOP_PRIORITY | ||
58 | #define PW3 TOP_PRIORITY | ||
59 | #define DM0 NO_PRIORITY /* DMA Ints */ | ||
60 | #define DM1 NO_PRIORITY | ||
61 | #define DM2 NO_PRIORITY | ||
62 | #define DM3 NO_PRIORITY | ||
63 | #define DAE NO_PRIORITY | ||
64 | #define TU0 TIMER_PRIORITY /* TMU Ints */ | ||
65 | #define TU1 NO_PRIORITY | ||
66 | #define TU2 NO_PRIORITY | ||
67 | #define TI2 NO_PRIORITY | ||
68 | #define ATI NO_PRIORITY /* RTC Ints */ | ||
69 | #define PRI NO_PRIORITY | ||
70 | #define CUI RTC_PRIORITY | ||
71 | #define ERI SCIF_PRIORITY /* SCIF Ints */ | ||
72 | #define RXI SCIF_PRIORITY | ||
73 | #define BRI SCIF_PRIORITY | ||
74 | #define TXI SCIF_PRIORITY | ||
75 | #define ITI TOP_PRIORITY /* WDT Ints */ | ||
76 | |||
77 | /* | ||
78 | * Platform dependent structures: maps and parms block. | ||
79 | */ | ||
80 | struct resource io_resources[] = { | ||
81 | /* To be updated with external devices */ | ||
82 | }; | ||
83 | |||
84 | struct resource kram_resources[] = { | ||
85 | { "Kernel code", 0, 0 }, /* These must be last in the array */ | ||
86 | { "Kernel data", 0, 0 } /* These must be last in the array */ | ||
87 | }; | ||
88 | |||
89 | struct resource xram_resources[] = { | ||
90 | /* To be updated with external devices */ | ||
91 | }; | ||
92 | |||
93 | struct resource rom_resources[] = { | ||
94 | /* To be updated with external devices */ | ||
95 | }; | ||
96 | |||
97 | struct sh64_platform platform_parms = { | ||
98 | .readonly_rootfs = 1, | ||
99 | .initial_root_dev = 0x0100, | ||
100 | .loader_type = 1, | ||
101 | .io_res_p = io_resources, | ||
102 | .io_res_count = RES_COUNT(io_resources), | ||
103 | .kram_res_p = kram_resources, | ||
104 | .kram_res_count = RES_COUNT(kram_resources), | ||
105 | .xram_res_p = xram_resources, | ||
106 | .xram_res_count = RES_COUNT(xram_resources), | ||
107 | .rom_res_p = rom_resources, | ||
108 | .rom_res_count = RES_COUNT(rom_resources), | ||
109 | }; | ||
110 | |||
111 | int platform_int_priority[NR_INTC_IRQS] = { | ||
112 | IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD, /* IRQ 0- 7 */ | ||
113 | RES, RES, RES, RES, SER, ERR, PW3, PW2, /* IRQ 8-15 */ | ||
114 | PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES, /* IRQ 16-23 */ | ||
115 | RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 24-31 */ | ||
116 | TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI, /* IRQ 32-39 */ | ||
117 | RXI, BRI, TXI, RES, RES, RES, RES, RES, /* IRQ 40-47 */ | ||
118 | RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 48-55 */ | ||
119 | RES, RES, RES, RES, RES, RES, RES, ITI, /* IRQ 56-63 */ | ||
120 | }; | ||
121 | |||
122 | void __init platform_setup(void) | ||
123 | { | ||
124 | /* ROM/RAM platform leaves the decision to head.S, for now */ | ||
125 | platform_parms.fpu_flags = fpu_in_use; | ||
126 | } | ||
127 | |||
128 | void __init platform_monitor(void) | ||
129 | { | ||
130 | /* Nothing yet .. */ | ||
131 | } | ||
132 | |||
133 | void __init platform_reserve(void) | ||
134 | { | ||
135 | /* Nothing yet .. */ | ||
136 | } | ||
137 | |||
138 | const char *get_system_type(void) | ||
139 | { | ||
140 | return "ROM/RAM"; | ||
141 | } | ||
142 | |||
diff --git a/arch/sh64/mach-sim/Makefile b/arch/sh64/mach-sim/Makefile new file mode 100644 index 000000000000..819c4078fdc6 --- /dev/null +++ b/arch/sh64/mach-sim/Makefile | |||
@@ -0,0 +1,14 @@ | |||
1 | # | ||
2 | # Makefile for the SH-5 Simulator specific parts of the kernel | ||
3 | # | ||
4 | # Note! Dependencies are done automagically by 'make dep', which also | ||
5 | # removes any old dependencies. DON'T put your own dependencies here | ||
6 | # unless it's something special (ie not a .c file). | ||
7 | # | ||
8 | |||
9 | O_TARGET := sim.o | ||
10 | |||
11 | obj-y := setup.o | ||
12 | |||
13 | include $(TOPDIR)/Rules.make | ||
14 | |||
diff --git a/arch/sh64/mach-sim/setup.c b/arch/sh64/mach-sim/setup.c new file mode 100644 index 000000000000..a68639cb4e5a --- /dev/null +++ b/arch/sh64/mach-sim/setup.c | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/mach-sim/setup.c | ||
7 | * | ||
8 | * ST50 Simulator Platform Support | ||
9 | * | ||
10 | * This file handles the architecture-dependent parts of initialization | ||
11 | * | ||
12 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
13 | * | ||
14 | * lethal@linux-sh.org: 15th May 2003 | ||
15 | * Use the generic procfs cpuinfo interface, just return a valid board name. | ||
16 | */ | ||
17 | |||
18 | #include <linux/stddef.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/config.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/bootmem.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <asm/addrspace.h> | ||
26 | #include <asm/processor.h> | ||
27 | #include <asm/platform.h> | ||
28 | #include <asm/io.h> | ||
29 | #include <asm/irq.h> | ||
30 | #include <asm/page.h> | ||
31 | |||
32 | #ifdef CONFIG_BLK_DEV_INITRD | ||
33 | #include "../rootfs/rootfs.h" | ||
34 | #endif | ||
35 | |||
36 | static __init void platform_monitor(void); | ||
37 | static __init void platform_setup(void); | ||
38 | static __init void platform_reserve(void); | ||
39 | |||
40 | |||
41 | #define PHYS_MEMORY CONFIG_MEMORY_SIZE_IN_MB*1024*1024 | ||
42 | |||
43 | #if (PHYS_MEMORY < P1SEG_FOOTPRINT_RAM) | ||
44 | #error "Invalid kernel configuration. Physical memory below footprint requirements." | ||
45 | #endif | ||
46 | |||
47 | #define RAM_DISK_START CONFIG_MEMORY_START+P1SEG_INITRD_BLOCK /* Top of 4MB */ | ||
48 | #ifdef PLATFORM_ROMFS_SIZE | ||
49 | #define RAM_DISK_SIZE (PAGE_ALIGN(PLATFORM_ROMFS_SIZE)) /* Variable Top */ | ||
50 | #if ((RAM_DISK_START + RAM_DISK_SIZE) > (CONFIG_MEMORY_START + PHYS_MEMORY)) | ||
51 | #error "Invalid kernel configuration. ROM RootFS exceeding physical memory." | ||
52 | #endif | ||
53 | #else | ||
54 | #define RAM_DISK_SIZE P1SEG_INITRD_BLOCK_SIZE /* Top of 4MB */ | ||
55 | #endif | ||
56 | |||
57 | #define RES_COUNT(res) ((sizeof((res))/sizeof(struct resource))) | ||
58 | |||
59 | /* | ||
60 | * Platform Dependent Interrupt Priorities. | ||
61 | */ | ||
62 | |||
63 | /* Using defaults defined in irq.h */ | ||
64 | #define RES NO_PRIORITY /* Disabled */ | ||
65 | #define IR0 IRL0_PRIORITY /* IRLs */ | ||
66 | #define IR1 IRL1_PRIORITY | ||
67 | #define IR2 IRL2_PRIORITY | ||
68 | #define IR3 IRL3_PRIORITY | ||
69 | #define PCA INTA_PRIORITY /* PCI Ints */ | ||
70 | #define PCB INTB_PRIORITY | ||
71 | #define PCC INTC_PRIORITY | ||
72 | #define PCD INTD_PRIORITY | ||
73 | #define SER TOP_PRIORITY | ||
74 | #define ERR TOP_PRIORITY | ||
75 | #define PW0 TOP_PRIORITY | ||
76 | #define PW1 TOP_PRIORITY | ||
77 | #define PW2 TOP_PRIORITY | ||
78 | #define PW3 TOP_PRIORITY | ||
79 | #define DM0 NO_PRIORITY /* DMA Ints */ | ||
80 | #define DM1 NO_PRIORITY | ||
81 | #define DM2 NO_PRIORITY | ||
82 | #define DM3 NO_PRIORITY | ||
83 | #define DAE NO_PRIORITY | ||
84 | #define TU0 TIMER_PRIORITY /* TMU Ints */ | ||
85 | #define TU1 NO_PRIORITY | ||
86 | #define TU2 NO_PRIORITY | ||
87 | #define TI2 NO_PRIORITY | ||
88 | #define ATI NO_PRIORITY /* RTC Ints */ | ||
89 | #define PRI NO_PRIORITY | ||
90 | #define CUI RTC_PRIORITY | ||
91 | #define ERI SCIF_PRIORITY /* SCIF Ints */ | ||
92 | #define RXI SCIF_PRIORITY | ||
93 | #define BRI SCIF_PRIORITY | ||
94 | #define TXI SCIF_PRIORITY | ||
95 | #define ITI TOP_PRIORITY /* WDT Ints */ | ||
96 | |||
97 | /* | ||
98 | * Platform dependent structures: maps and parms block. | ||
99 | */ | ||
100 | struct resource io_resources[] = { | ||
101 | /* Nothing yet .. */ | ||
102 | }; | ||
103 | |||
104 | struct resource kram_resources[] = { | ||
105 | { "Kernel code", 0, 0 }, /* These must be last in the array */ | ||
106 | { "Kernel data", 0, 0 } /* These must be last in the array */ | ||
107 | }; | ||
108 | |||
109 | struct resource xram_resources[] = { | ||
110 | /* Nothing yet .. */ | ||
111 | }; | ||
112 | |||
113 | struct resource rom_resources[] = { | ||
114 | /* Nothing yet .. */ | ||
115 | }; | ||
116 | |||
117 | struct sh64_platform platform_parms = { | ||
118 | .readonly_rootfs = 1, | ||
119 | .initial_root_dev = 0x0100, | ||
120 | .loader_type = 1, | ||
121 | .initrd_start = RAM_DISK_START, | ||
122 | .initrd_size = RAM_DISK_SIZE, | ||
123 | .io_res_p = io_resources, | ||
124 | .io_res_count = RES_COUNT(io_resources), | ||
125 | .kram_res_p = kram_resources, | ||
126 | .kram_res_count = RES_COUNT(kram_resources), | ||
127 | .xram_res_p = xram_resources, | ||
128 | .xram_res_count = RES_COUNT(xram_resources), | ||
129 | .rom_res_p = rom_resources, | ||
130 | .rom_res_count = RES_COUNT(rom_resources), | ||
131 | }; | ||
132 | |||
133 | int platform_int_priority[NR_IRQS] = { | ||
134 | IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD, /* IRQ 0- 7 */ | ||
135 | RES, RES, RES, RES, SER, ERR, PW3, PW2, /* IRQ 8-15 */ | ||
136 | PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES, /* IRQ 16-23 */ | ||
137 | RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 24-31 */ | ||
138 | TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI, /* IRQ 32-39 */ | ||
139 | RXI, BRI, TXI, RES, RES, RES, RES, RES, /* IRQ 40-47 */ | ||
140 | RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 48-55 */ | ||
141 | RES, RES, RES, RES, RES, RES, RES, ITI, /* IRQ 56-63 */ | ||
142 | }; | ||
143 | |||
144 | void __init platform_setup(void) | ||
145 | { | ||
146 | /* Simulator platform leaves the decision to head.S */ | ||
147 | platform_parms.fpu_flags = fpu_in_use; | ||
148 | } | ||
149 | |||
150 | void __init platform_monitor(void) | ||
151 | { | ||
152 | /* Nothing yet .. */ | ||
153 | } | ||
154 | |||
155 | void __init platform_reserve(void) | ||
156 | { | ||
157 | /* Nothing yet .. */ | ||
158 | } | ||
159 | |||
160 | const char *get_system_type(void) | ||
161 | { | ||
162 | return "SH-5 Simulator"; | ||
163 | } | ||
164 | |||
diff --git a/arch/sh64/mm/Makefile b/arch/sh64/mm/Makefile new file mode 100644 index 000000000000..ff19378ac90a --- /dev/null +++ b/arch/sh64/mm/Makefile | |||
@@ -0,0 +1,44 @@ | |||
1 | # | ||
2 | # This file is subject to the terms and conditions of the GNU General Public | ||
3 | # License. See the file "COPYING" in the main directory of this archive | ||
4 | # for more details. | ||
5 | # | ||
6 | # Copyright (C) 2000, 2001 Paolo Alberelli | ||
7 | # Copyright (C) 2003, 2004 Paul Mundt | ||
8 | # | ||
9 | # Makefile for the sh64-specific parts of the Linux memory manager. | ||
10 | # | ||
11 | # Note! Dependencies are done automagically by 'make dep', which also | ||
12 | # removes any old dependencies. DON'T put your own dependencies here | ||
13 | # unless it's something special (ie not a .c file). | ||
14 | # | ||
15 | |||
16 | obj-y := init.o fault.o ioremap.o extable.o cache.o tlbmiss.o tlb.o | ||
17 | |||
18 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
19 | |||
20 | # Special flags for tlbmiss.o. This puts restrictions on the number of | ||
21 | # caller-save registers that the compiler can target when building this file. | ||
22 | # This is required because the code is called from a context in entry.S where | ||
23 | # very few registers have been saved in the exception handler (for speed | ||
24 | # reasons). | ||
25 | # The caller save registers that have been saved and which can be used are | ||
26 | # r2,r3,r4,r5 : argument passing | ||
27 | # r15, r18 : SP and LINK | ||
28 | # tr0-4 : allow all caller-save TR's. The compiler seems to be able to make | ||
29 | # use of them, so it's probably beneficial to performance to save them | ||
30 | # and have them available for it. | ||
31 | # | ||
32 | # The resources not listed below are callee save, i.e. the compiler is free to | ||
33 | # use any of them and will spill them to the stack itself. | ||
34 | |||
35 | CFLAGS_tlbmiss.o += -ffixed-r7 \ | ||
36 | -ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \ | ||
37 | -ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \ | ||
38 | -ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \ | ||
39 | -ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \ | ||
40 | -ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \ | ||
41 | -ffixed-r41 -ffixed-r42 -ffixed-r43 \ | ||
42 | -ffixed-r60 -ffixed-r61 -ffixed-r62 \ | ||
43 | -fomit-frame-pointer | ||
44 | |||
diff --git a/arch/sh64/mm/cache.c b/arch/sh64/mm/cache.c new file mode 100644 index 000000000000..3b87e25ea773 --- /dev/null +++ b/arch/sh64/mm/cache.c | |||
@@ -0,0 +1,1041 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/mm/cache.c | ||
7 | * | ||
8 | * Original version Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Second version Copyright (C) benedict.gaster@superh.com 2002 | ||
10 | * Third version Copyright Richard.Curnow@superh.com 2003 | ||
11 | * Hacks to third version Copyright (C) 2003 Paul Mundt | ||
12 | */ | ||
13 | |||
14 | /****************************************************************************/ | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/mman.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/threads.h> | ||
21 | #include <asm/page.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/processor.h> | ||
24 | #include <asm/cache.h> | ||
25 | #include <asm/tlb.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/uaccess.h> | ||
28 | #include <asm/mmu_context.h> | ||
29 | #include <asm/pgalloc.h> /* for flush_itlb_range */ | ||
30 | |||
31 | #include <linux/proc_fs.h> | ||
32 | |||
33 | /* This function is in entry.S */ | ||
34 | extern unsigned long switch_and_save_asid(unsigned long new_asid); | ||
35 | |||
36 | /* Wired TLB entry for the D-cache */ | ||
37 | static unsigned long long dtlb_cache_slot; | ||
38 | |||
39 | /** | ||
40 | * sh64_cache_init() | ||
41 | * | ||
42 | * This is pretty much just a straightforward clone of the SH | ||
43 | * detect_cpu_and_cache_system(). | ||
44 | * | ||
45 | * This function is responsible for setting up all of the cache | ||
46 | * info dynamically as well as taking care of CPU probing and | ||
47 | * setting up the relevant subtype data. | ||
48 | * | ||
49 | * FIXME: For the time being, we only really support the SH5-101 | ||
50 | * out of the box, and don't support dynamic probing for things | ||
51 | * like the SH5-103 or even cut2 of the SH5-101. Implement this | ||
52 | * later! | ||
53 | */ | ||
54 | int __init sh64_cache_init(void) | ||
55 | { | ||
56 | /* | ||
57 | * First, setup some sane values for the I-cache. | ||
58 | */ | ||
59 | cpu_data->icache.ways = 4; | ||
60 | cpu_data->icache.sets = 256; | ||
61 | cpu_data->icache.linesz = L1_CACHE_BYTES; | ||
62 | |||
63 | /* | ||
64 | * FIXME: This can probably be cleaned up a bit as well.. for example, | ||
65 | * do we really need the way shift _and_ the way_step_shift ?? Judging | ||
66 | * by the existing code, I would guess no.. is there any valid reason | ||
67 | * why we need to be tracking this around? | ||
68 | */ | ||
69 | cpu_data->icache.way_shift = 13; | ||
70 | cpu_data->icache.entry_shift = 5; | ||
71 | cpu_data->icache.set_shift = 4; | ||
72 | cpu_data->icache.way_step_shift = 16; | ||
73 | cpu_data->icache.asid_shift = 2; | ||
74 | |||
75 | /* | ||
76 | * way offset = cache size / associativity, so just don't factor in | ||
77 | * associativity in the first place.. | ||
78 | */ | ||
79 | cpu_data->icache.way_ofs = cpu_data->icache.sets * | ||
80 | cpu_data->icache.linesz; | ||
81 | |||
82 | cpu_data->icache.asid_mask = 0x3fc; | ||
83 | cpu_data->icache.idx_mask = 0x1fe0; | ||
84 | cpu_data->icache.epn_mask = 0xffffe000; | ||
85 | cpu_data->icache.flags = 0; | ||
86 | |||
87 | /* | ||
88 | * Next, setup some sane values for the D-cache. | ||
89 | * | ||
90 | * On the SH5, these are pretty consistent with the I-cache settings, | ||
91 | * so we just copy over the existing definitions.. these can be fixed | ||
92 | * up later, especially if we add runtime CPU probing. | ||
93 | * | ||
94 | * Though in the meantime it saves us from having to duplicate all of | ||
95 | * the above definitions.. | ||
96 | */ | ||
97 | cpu_data->dcache = cpu_data->icache; | ||
98 | |||
99 | /* | ||
100 | * Setup any cache-related flags here | ||
101 | */ | ||
102 | #if defined(CONFIG_DCACHE_WRITE_THROUGH) | ||
103 | set_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)); | ||
104 | #elif defined(CONFIG_DCACHE_WRITE_BACK) | ||
105 | set_bit(SH_CACHE_MODE_WB, &(cpu_data->dcache.flags)); | ||
106 | #endif | ||
107 | |||
108 | /* | ||
109 | * We also need to reserve a slot for the D-cache in the DTLB, so we | ||
110 | * do this now .. | ||
111 | */ | ||
112 | dtlb_cache_slot = sh64_get_wired_dtlb_entry(); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | #ifdef CONFIG_DCACHE_DISABLED | ||
118 | #define sh64_dcache_purge_all() do { } while (0) | ||
119 | #define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0) | ||
120 | #define sh64_dcache_purge_user_range(mm, start, end) do { } while (0) | ||
121 | #define sh64_dcache_purge_phy_page(paddr) do { } while (0) | ||
122 | #define sh64_dcache_purge_virt_page(mm, eaddr) do { } while (0) | ||
123 | #define sh64_dcache_purge_kernel_range(start, end) do { } while (0) | ||
124 | #define sh64_dcache_wback_current_user_range(start, end) do { } while (0) | ||
125 | #endif | ||
126 | |||
127 | /*##########################################################################*/ | ||
128 | |||
129 | /* From here onwards, a rewrite of the implementation, | ||
130 | by Richard.Curnow@superh.com. | ||
131 | |||
132 | The major changes in this compared to the old version are; | ||
133 | 1. use more selective purging through OCBP instead of using ALLOCO to purge | ||
134 | by natural replacement. This avoids purging out unrelated cache lines | ||
135 | that happen to be in the same set. | ||
136 | 2. exploit the APIs copy_user_page and clear_user_page better | ||
137 | 3. be more selective about I-cache purging, in particular use invalidate_all | ||
138 | more sparingly. | ||
139 | |||
140 | */ | ||
141 | |||
142 | /*########################################################################## | ||
143 | SUPPORT FUNCTIONS | ||
144 | ##########################################################################*/ | ||
145 | |||
146 | /****************************************************************************/ | ||
147 | /* The following group of functions deal with mapping and unmapping a temporary | ||
148 | page into the DTLB slot that have been set aside for our exclusive use. */ | ||
149 | /* In order to accomplish this, we use the generic interface for adding and | ||
150 | removing a wired slot entry as defined in arch/sh64/mm/tlb.c */ | ||
151 | /****************************************************************************/ | ||
152 | |||
153 | static unsigned long slot_own_flags; | ||
154 | |||
155 | static inline void sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, unsigned long paddr) | ||
156 | { | ||
157 | local_irq_save(slot_own_flags); | ||
158 | sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); | ||
159 | } | ||
160 | |||
161 | static inline void sh64_teardown_dtlb_cache_slot(void) | ||
162 | { | ||
163 | sh64_teardown_tlb_slot(dtlb_cache_slot); | ||
164 | local_irq_restore(slot_own_flags); | ||
165 | } | ||
166 | |||
167 | /****************************************************************************/ | ||
168 | |||
169 | #ifndef CONFIG_ICACHE_DISABLED | ||
170 | |||
171 | static void __inline__ sh64_icache_inv_all(void) | ||
172 | { | ||
173 | unsigned long long addr, flag, data; | ||
174 | unsigned int flags; | ||
175 | |||
176 | addr=ICCR0; | ||
177 | flag=ICCR0_ICI; | ||
178 | data=0; | ||
179 | |||
180 | /* Make this a critical section for safety (probably not strictly necessary.) */ | ||
181 | local_irq_save(flags); | ||
182 | |||
183 | /* Without %1 it gets unexplicably wrong */ | ||
184 | asm volatile("getcfg %3, 0, %0\n\t" | ||
185 | "or %0, %2, %0\n\t" | ||
186 | "putcfg %3, 0, %0\n\t" | ||
187 | "synci" | ||
188 | : "=&r" (data) | ||
189 | : "0" (data), "r" (flag), "r" (addr)); | ||
190 | |||
191 | local_irq_restore(flags); | ||
192 | } | ||
193 | |||
194 | static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) | ||
195 | { | ||
196 | /* Invalidate range of addresses [start,end] from the I-cache, where | ||
197 | * the addresses lie in the kernel superpage. */ | ||
198 | |||
199 | unsigned long long ullend, addr, aligned_start; | ||
200 | #if (NEFF == 32) | ||
201 | aligned_start = (unsigned long long)(signed long long)(signed long) start; | ||
202 | #else | ||
203 | #error "NEFF != 32" | ||
204 | #endif | ||
205 | aligned_start &= L1_CACHE_ALIGN_MASK; | ||
206 | addr = aligned_start; | ||
207 | #if (NEFF == 32) | ||
208 | ullend = (unsigned long long) (signed long long) (signed long) end; | ||
209 | #else | ||
210 | #error "NEFF != 32" | ||
211 | #endif | ||
212 | while (addr <= ullend) { | ||
213 | asm __volatile__ ("icbi %0, 0" : : "r" (addr)); | ||
214 | addr += L1_CACHE_BYTES; | ||
215 | } | ||
216 | } | ||
217 | |||
218 | static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) | ||
219 | { | ||
220 | /* If we get called, we know that vma->vm_flags contains VM_EXEC. | ||
221 | Also, eaddr is page-aligned. */ | ||
222 | |||
223 | unsigned long long addr, end_addr; | ||
224 | unsigned long flags = 0; | ||
225 | unsigned long running_asid, vma_asid; | ||
226 | addr = eaddr; | ||
227 | end_addr = addr + PAGE_SIZE; | ||
228 | |||
229 | /* Check whether we can use the current ASID for the I-cache | ||
230 | invalidation. For example, if we're called via | ||
231 | access_process_vm->flush_cache_page->here, (e.g. when reading from | ||
232 | /proc), 'running_asid' will be that of the reader, not of the | ||
233 | victim. | ||
234 | |||
235 | Also, note the risk that we might get pre-empted between the ASID | ||
236 | compare and blocking IRQs, and before we regain control, the | ||
237 | pid->ASID mapping changes. However, the whole cache will get | ||
238 | invalidated when the mapping is renewed, so the worst that can | ||
239 | happen is that the loop below ends up invalidating somebody else's | ||
240 | cache entries. | ||
241 | */ | ||
242 | |||
243 | running_asid = get_asid(); | ||
244 | vma_asid = (vma->vm_mm->context & MMU_CONTEXT_ASID_MASK); | ||
245 | if (running_asid != vma_asid) { | ||
246 | local_irq_save(flags); | ||
247 | switch_and_save_asid(vma_asid); | ||
248 | } | ||
249 | while (addr < end_addr) { | ||
250 | /* Worth unrolling a little */ | ||
251 | asm __volatile__("icbi %0, 0" : : "r" (addr)); | ||
252 | asm __volatile__("icbi %0, 32" : : "r" (addr)); | ||
253 | asm __volatile__("icbi %0, 64" : : "r" (addr)); | ||
254 | asm __volatile__("icbi %0, 96" : : "r" (addr)); | ||
255 | addr += 128; | ||
256 | } | ||
257 | if (running_asid != vma_asid) { | ||
258 | switch_and_save_asid(running_asid); | ||
259 | local_irq_restore(flags); | ||
260 | } | ||
261 | } | ||
262 | |||
263 | /****************************************************************************/ | ||
264 | |||
265 | static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | ||
266 | unsigned long start, unsigned long end) | ||
267 | { | ||
268 | /* Used for invalidating big chunks of I-cache, i.e. assume the range | ||
269 | is whole pages. If 'start' or 'end' is not page aligned, the code | ||
270 | is conservative and invalidates to the ends of the enclosing pages. | ||
271 | This is functionally OK, just a performance loss. */ | ||
272 | |||
273 | /* See the comments below in sh64_dcache_purge_user_range() regarding | ||
274 | the choice of algorithm. However, for the I-cache option (2) isn't | ||
275 | available because there are no physical tags so aliases can't be | ||
276 | resolved. The icbi instruction has to be used through the user | ||
277 | mapping. Because icbi is cheaper than ocbp on a cache hit, it | ||
278 | would be cheaper to use the selective code for a large range than is | ||
279 | possible with the D-cache. Just assume 64 for now as a working | ||
280 | figure. | ||
281 | */ | ||
282 | |||
283 | int n_pages; | ||
284 | |||
285 | if (!mm) return; | ||
286 | |||
287 | n_pages = ((end - start) >> PAGE_SHIFT); | ||
288 | if (n_pages >= 64) { | ||
289 | sh64_icache_inv_all(); | ||
290 | } else { | ||
291 | unsigned long aligned_start; | ||
292 | unsigned long eaddr; | ||
293 | unsigned long after_last_page_start; | ||
294 | unsigned long mm_asid, current_asid; | ||
295 | unsigned long long flags = 0ULL; | ||
296 | |||
297 | mm_asid = mm->context & MMU_CONTEXT_ASID_MASK; | ||
298 | current_asid = get_asid(); | ||
299 | |||
300 | if (mm_asid != current_asid) { | ||
301 | /* Switch ASID and run the invalidate loop under cli */ | ||
302 | local_irq_save(flags); | ||
303 | switch_and_save_asid(mm_asid); | ||
304 | } | ||
305 | |||
306 | aligned_start = start & PAGE_MASK; | ||
307 | after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); | ||
308 | |||
309 | while (aligned_start < after_last_page_start) { | ||
310 | struct vm_area_struct *vma; | ||
311 | unsigned long vma_end; | ||
312 | vma = find_vma(mm, aligned_start); | ||
313 | if (!vma || (aligned_start <= vma->vm_end)) { | ||
314 | /* Avoid getting stuck in an error condition */ | ||
315 | aligned_start += PAGE_SIZE; | ||
316 | continue; | ||
317 | } | ||
318 | vma_end = vma->vm_end; | ||
319 | if (vma->vm_flags & VM_EXEC) { | ||
320 | /* Executable */ | ||
321 | eaddr = aligned_start; | ||
322 | while (eaddr < vma_end) { | ||
323 | sh64_icache_inv_user_page(vma, eaddr); | ||
324 | eaddr += PAGE_SIZE; | ||
325 | } | ||
326 | } | ||
327 | aligned_start = vma->vm_end; /* Skip to start of next region */ | ||
328 | } | ||
329 | if (mm_asid != current_asid) { | ||
330 | switch_and_save_asid(current_asid); | ||
331 | local_irq_restore(flags); | ||
332 | } | ||
333 | } | ||
334 | } | ||
335 | |||
336 | static void sh64_icache_inv_user_small_range(struct mm_struct *mm, | ||
337 | unsigned long start, int len) | ||
338 | { | ||
339 | |||
340 | /* Invalidate a small range of user context I-cache, not necessarily | ||
341 | page (or even cache-line) aligned. */ | ||
342 | |||
343 | unsigned long long eaddr = start; | ||
344 | unsigned long long eaddr_end = start + len; | ||
345 | unsigned long current_asid, mm_asid; | ||
346 | unsigned long long flags; | ||
347 | unsigned long long epage_start; | ||
348 | |||
349 | /* Since this is used inside ptrace, the ASID in the mm context | ||
350 | typically won't match current_asid. We'll have to switch ASID to do | ||
351 | this. For safety, and given that the range will be small, do all | ||
352 | this under cli. | ||
353 | |||
354 | Note, there is a hazard that the ASID in mm->context is no longer | ||
355 | actually associated with mm, i.e. if the mm->context has started a | ||
356 | new cycle since mm was last active. However, this is just a | ||
357 | performance issue: all that happens is that we invalidate lines | ||
358 | belonging to another mm, so the owning process has to refill them | ||
359 | when that mm goes live again. mm itself can't have any cache | ||
360 | entries because there will have been a flush_cache_all when the new | ||
361 | mm->context cycle started. */ | ||
362 | |||
363 | /* Align to start of cache line. Otherwise, suppose len==8 and start | ||
364 | was at 32N+28 : the last 4 bytes wouldn't get invalidated. */ | ||
365 | eaddr = start & L1_CACHE_ALIGN_MASK; | ||
366 | eaddr_end = start + len; | ||
367 | |||
368 | local_irq_save(flags); | ||
369 | mm_asid = mm->context & MMU_CONTEXT_ASID_MASK; | ||
370 | current_asid = switch_and_save_asid(mm_asid); | ||
371 | |||
372 | epage_start = eaddr & PAGE_MASK; | ||
373 | |||
374 | while (eaddr < eaddr_end) | ||
375 | { | ||
376 | asm __volatile__("icbi %0, 0" : : "r" (eaddr)); | ||
377 | eaddr += L1_CACHE_BYTES; | ||
378 | } | ||
379 | switch_and_save_asid(current_asid); | ||
380 | local_irq_restore(flags); | ||
381 | } | ||
382 | |||
383 | static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) | ||
384 | { | ||
385 | /* The icbi instruction never raises ITLBMISS. i.e. if there's not a | ||
386 | cache hit on the virtual tag the instruction ends there, without a | ||
387 | TLB lookup. */ | ||
388 | |||
389 | unsigned long long aligned_start; | ||
390 | unsigned long long ull_end; | ||
391 | unsigned long long addr; | ||
392 | |||
393 | ull_end = end; | ||
394 | |||
395 | /* Just invalidate over the range using the natural addresses. TLB | ||
396 | miss handling will be OK (TBC). Since it's for the current process, | ||
397 | either we're already in the right ASID context, or the ASIDs have | ||
398 | been recycled since we were last active in which case we might just | ||
399 | invalidate another processes I-cache entries : no worries, just a | ||
400 | performance drop for him. */ | ||
401 | aligned_start = start & L1_CACHE_ALIGN_MASK; | ||
402 | addr = aligned_start; | ||
403 | while (addr < ull_end) { | ||
404 | asm __volatile__ ("icbi %0, 0" : : "r" (addr)); | ||
405 | asm __volatile__ ("nop"); | ||
406 | asm __volatile__ ("nop"); | ||
407 | addr += L1_CACHE_BYTES; | ||
408 | } | ||
409 | } | ||
410 | |||
411 | #endif /* !CONFIG_ICACHE_DISABLED */ | ||
412 | |||
413 | /****************************************************************************/ | ||
414 | |||
415 | #ifndef CONFIG_DCACHE_DISABLED | ||
416 | |||
417 | /* Buffer used as the target of alloco instructions to purge data from cache | ||
418 | sets by natural eviction. -- RPC */ | ||
419 | #define DUMMY_ALLOCO_AREA_SIZE L1_CACHE_SIZE_BYTES + (1024 * 4) | ||
420 | static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, }; | ||
421 | |||
422 | /****************************************************************************/ | ||
423 | |||
424 | static void __inline__ sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) | ||
425 | { | ||
426 | /* Purge all ways in a particular block of sets, specified by the base | ||
427 | set number and number of sets. Can handle wrap-around, if that's | ||
428 | needed. */ | ||
429 | |||
430 | int dummy_buffer_base_set; | ||
431 | unsigned long long eaddr, eaddr0, eaddr1; | ||
432 | int j; | ||
433 | int set_offset; | ||
434 | |||
435 | dummy_buffer_base_set = ((int)&dummy_alloco_area & cpu_data->dcache.idx_mask) >> cpu_data->dcache.entry_shift; | ||
436 | set_offset = sets_to_purge_base - dummy_buffer_base_set; | ||
437 | |||
438 | for (j=0; j<n_sets; j++, set_offset++) { | ||
439 | set_offset &= (cpu_data->dcache.sets - 1); | ||
440 | eaddr0 = (unsigned long long)dummy_alloco_area + (set_offset << cpu_data->dcache.entry_shift); | ||
441 | |||
442 | /* Do one alloco which hits the required set per cache way. For | ||
443 | write-back mode, this will purge the #ways resident lines. There's | ||
444 | little point unrolling this loop because the allocos stall more if | ||
445 | they're too close together. */ | ||
446 | eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways; | ||
447 | for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) { | ||
448 | asm __volatile__ ("alloco %0, 0" : : "r" (eaddr)); | ||
449 | asm __volatile__ ("synco"); /* TAKum03020 */ | ||
450 | } | ||
451 | |||
452 | eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways; | ||
453 | for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) { | ||
454 | /* Load from each address. Required because alloco is a NOP if | ||
455 | the cache is write-through. Write-through is a config option. */ | ||
456 | if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags))) | ||
457 | *(volatile unsigned char *)(int)eaddr; | ||
458 | } | ||
459 | } | ||
460 | |||
461 | /* Don't use OCBI to invalidate the lines. That costs cycles directly. | ||
462 | If the dummy block is just left resident, it will naturally get | ||
463 | evicted as required. */ | ||
464 | |||
465 | return; | ||
466 | } | ||
467 | |||
468 | /****************************************************************************/ | ||
469 | |||
470 | static void sh64_dcache_purge_all(void) | ||
471 | { | ||
472 | /* Purge the entire contents of the dcache. The most efficient way to | ||
473 | achieve this is to use alloco instructions on a region of unused | ||
474 | memory equal in size to the cache, thereby causing the current | ||
475 | contents to be discarded by natural eviction. The alternative, | ||
476 | namely reading every tag, setting up a mapping for the corresponding | ||
477 | page and doing an OCBP for the line, would be much more expensive. | ||
478 | */ | ||
479 | |||
480 | sh64_dcache_purge_sets(0, cpu_data->dcache.sets); | ||
481 | |||
482 | return; | ||
483 | |||
484 | } | ||
485 | |||
486 | /****************************************************************************/ | ||
487 | |||
488 | static void sh64_dcache_purge_kernel_range(unsigned long start, unsigned long end) | ||
489 | { | ||
490 | /* Purge the range of addresses [start,end] from the D-cache. The | ||
491 | addresses lie in the superpage mapping. There's no harm if we | ||
492 | overpurge at either end - just a small performance loss. */ | ||
493 | unsigned long long ullend, addr, aligned_start; | ||
494 | #if (NEFF == 32) | ||
495 | aligned_start = (unsigned long long)(signed long long)(signed long) start; | ||
496 | #else | ||
497 | #error "NEFF != 32" | ||
498 | #endif | ||
499 | aligned_start &= L1_CACHE_ALIGN_MASK; | ||
500 | addr = aligned_start; | ||
501 | #if (NEFF == 32) | ||
502 | ullend = (unsigned long long) (signed long long) (signed long) end; | ||
503 | #else | ||
504 | #error "NEFF != 32" | ||
505 | #endif | ||
506 | while (addr <= ullend) { | ||
507 | asm __volatile__ ("ocbp %0, 0" : : "r" (addr)); | ||
508 | addr += L1_CACHE_BYTES; | ||
509 | } | ||
510 | return; | ||
511 | } | ||
512 | |||
513 | /* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for | ||
514 | anything else in the kernel */ | ||
515 | #define MAGIC_PAGE0_START 0xffffffffec000000ULL | ||
516 | |||
517 | static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, unsigned long eaddr) | ||
518 | { | ||
519 | /* Purge the physical page 'paddr' from the cache. It's known that any | ||
520 | cache lines requiring attention have the same page colour as the the | ||
521 | address 'eaddr'. | ||
522 | |||
523 | This relies on the fact that the D-cache matches on physical tags | ||
524 | when no virtual tag matches. So we create an alias for the original | ||
525 | page and purge through that. (Alternatively, we could have done | ||
526 | this by switching ASID to match the original mapping and purged | ||
527 | through that, but that involves ASID switching cost + probably a | ||
528 | TLBMISS + refill anyway.) | ||
529 | */ | ||
530 | |||
531 | unsigned long long magic_page_start; | ||
532 | unsigned long long magic_eaddr, magic_eaddr_end; | ||
533 | |||
534 | magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK); | ||
535 | |||
536 | /* As long as the kernel is not pre-emptible, this doesn't need to be | ||
537 | under cli/sti. */ | ||
538 | |||
539 | sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr); | ||
540 | |||
541 | magic_eaddr = magic_page_start; | ||
542 | magic_eaddr_end = magic_eaddr + PAGE_SIZE; | ||
543 | while (magic_eaddr < magic_eaddr_end) { | ||
544 | /* Little point in unrolling this loop - the OCBPs are blocking | ||
545 | and won't go any quicker (i.e. the loop overhead is parallel | ||
546 | to part of the OCBP execution.) */ | ||
547 | asm __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr)); | ||
548 | magic_eaddr += L1_CACHE_BYTES; | ||
549 | } | ||
550 | |||
551 | sh64_teardown_dtlb_cache_slot(); | ||
552 | } | ||
553 | |||
554 | /****************************************************************************/ | ||
555 | |||
556 | static void sh64_dcache_purge_phy_page(unsigned long paddr) | ||
557 | { | ||
558 | /* Pure a page given its physical start address, by creating a | ||
559 | temporary 1 page mapping and purging across that. Even if we know | ||
560 | the virtual address (& vma or mm) of the page, the method here is | ||
561 | more elegant because it avoids issues of coping with page faults on | ||
562 | the purge instructions (i.e. no special-case code required in the | ||
563 | critical path in the TLB miss handling). */ | ||
564 | |||
565 | unsigned long long eaddr_start, eaddr, eaddr_end; | ||
566 | int i; | ||
567 | |||
568 | /* As long as the kernel is not pre-emptible, this doesn't need to be | ||
569 | under cli/sti. */ | ||
570 | |||
571 | eaddr_start = MAGIC_PAGE0_START; | ||
572 | for (i=0; i < (1 << CACHE_OC_N_SYNBITS); i++) { | ||
573 | sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr); | ||
574 | |||
575 | eaddr = eaddr_start; | ||
576 | eaddr_end = eaddr + PAGE_SIZE; | ||
577 | while (eaddr < eaddr_end) { | ||
578 | asm __volatile__ ("ocbp %0, 0" : : "r" (eaddr)); | ||
579 | eaddr += L1_CACHE_BYTES; | ||
580 | } | ||
581 | |||
582 | sh64_teardown_dtlb_cache_slot(); | ||
583 | eaddr_start += PAGE_SIZE; | ||
584 | } | ||
585 | } | ||
586 | |||
587 | static void sh64_dcache_purge_user_page(struct mm_struct *mm, unsigned long eaddr) | ||
588 | { | ||
589 | pgd_t *pgd; | ||
590 | pmd_t *pmd; | ||
591 | pte_t *pte; | ||
592 | pte_t entry; | ||
593 | unsigned long paddr; | ||
594 | |||
595 | /* NOTE : all the callers of this have mm->page_table_lock held, so the | ||
596 | following page table traversal is safe even on SMP/pre-emptible. */ | ||
597 | |||
598 | if (!mm) return; /* No way to find physical address of page */ | ||
599 | pgd = pgd_offset(mm, eaddr); | ||
600 | if (pgd_bad(*pgd)) return; | ||
601 | |||
602 | pmd = pmd_offset(pgd, eaddr); | ||
603 | if (pmd_none(*pmd) || pmd_bad(*pmd)) return; | ||
604 | |||
605 | pte = pte_offset_kernel(pmd, eaddr); | ||
606 | entry = *pte; | ||
607 | if (pte_none(entry) || !pte_present(entry)) return; | ||
608 | |||
609 | paddr = pte_val(entry) & PAGE_MASK; | ||
610 | |||
611 | sh64_dcache_purge_coloured_phy_page(paddr, eaddr); | ||
612 | |||
613 | } | ||
614 | /****************************************************************************/ | ||
615 | |||
616 | static void sh64_dcache_purge_user_range(struct mm_struct *mm, | ||
617 | unsigned long start, unsigned long end) | ||
618 | { | ||
619 | /* There are at least 5 choices for the implementation of this, with | ||
620 | pros (+), cons(-), comments(*): | ||
621 | |||
622 | 1. ocbp each line in the range through the original user's ASID | ||
623 | + no lines spuriously evicted | ||
624 | - tlbmiss handling (must either handle faults on demand => extra | ||
625 | special-case code in tlbmiss critical path), or map the page in | ||
626 | advance (=> flush_tlb_range in advance to avoid multiple hits) | ||
627 | - ASID switching | ||
628 | - expensive for large ranges | ||
629 | |||
630 | 2. temporarily map each page in the range to a special effective | ||
631 | address and ocbp through the temporary mapping; relies on the | ||
632 | fact that SH-5 OCB* always do TLB lookup and match on ptags (they | ||
633 | never look at the etags) | ||
634 | + no spurious evictions | ||
635 | - expensive for large ranges | ||
636 | * surely cheaper than (1) | ||
637 | |||
638 | 3. walk all the lines in the cache, check the tags, if a match | ||
639 | occurs create a page mapping to ocbp the line through | ||
640 | + no spurious evictions | ||
641 | - tag inspection overhead | ||
642 | - (especially for small ranges) | ||
643 | - potential cost of setting up/tearing down page mapping for | ||
644 | every line that matches the range | ||
645 | * cost partly independent of range size | ||
646 | |||
647 | 4. walk all the lines in the cache, check the tags, if a match | ||
648 | occurs use 4 * alloco to purge the line (+3 other probably | ||
649 | innocent victims) by natural eviction | ||
650 | + no tlb mapping overheads | ||
651 | - spurious evictions | ||
652 | - tag inspection overhead | ||
653 | |||
654 | 5. implement like flush_cache_all | ||
655 | + no tag inspection overhead | ||
656 | - spurious evictions | ||
657 | - bad for small ranges | ||
658 | |||
659 | (1) can be ruled out as more expensive than (2). (2) appears best | ||
660 | for small ranges. The choice between (3), (4) and (5) for large | ||
661 | ranges and the range size for the large/small boundary need | ||
662 | benchmarking to determine. | ||
663 | |||
664 | For now use approach (2) for small ranges and (5) for large ones. | ||
665 | |||
666 | */ | ||
667 | |||
668 | int n_pages; | ||
669 | |||
670 | n_pages = ((end - start) >> PAGE_SHIFT); | ||
671 | if (n_pages >= 64) { | ||
672 | #if 1 | ||
673 | sh64_dcache_purge_all(); | ||
674 | #else | ||
675 | unsigned long long set, way; | ||
676 | unsigned long mm_asid = mm->context & MMU_CONTEXT_ASID_MASK; | ||
677 | for (set = 0; set < cpu_data->dcache.sets; set++) { | ||
678 | unsigned long long set_base_config_addr = CACHE_OC_ADDRESS_ARRAY + (set << cpu_data->dcache.set_shift); | ||
679 | for (way = 0; way < cpu_data->dcache.ways; way++) { | ||
680 | unsigned long long config_addr = set_base_config_addr + (way << cpu_data->dcache.way_step_shift); | ||
681 | unsigned long long tag0; | ||
682 | unsigned long line_valid; | ||
683 | |||
684 | asm __volatile__("getcfg %1, 0, %0" : "=r" (tag0) : "r" (config_addr)); | ||
685 | line_valid = tag0 & SH_CACHE_VALID; | ||
686 | if (line_valid) { | ||
687 | unsigned long cache_asid; | ||
688 | unsigned long epn; | ||
689 | |||
690 | cache_asid = (tag0 & cpu_data->dcache.asid_mask) >> cpu_data->dcache.asid_shift; | ||
691 | /* The next line needs some | ||
692 | explanation. The virtual tags | ||
693 | encode bits [31:13] of the virtual | ||
694 | address, bit [12] of the 'tag' being | ||
695 | implied by the cache set index. */ | ||
696 | epn = (tag0 & cpu_data->dcache.epn_mask) | ((set & 0x80) << cpu_data->dcache.entry_shift); | ||
697 | |||
698 | if ((cache_asid == mm_asid) && (start <= epn) && (epn < end)) { | ||
699 | /* TODO : could optimise this | ||
700 | call by batching multiple | ||
701 | adjacent sets together. */ | ||
702 | sh64_dcache_purge_sets(set, 1); | ||
703 | break; /* Don't waste time inspecting other ways for this set */ | ||
704 | } | ||
705 | } | ||
706 | } | ||
707 | } | ||
708 | #endif | ||
709 | } else { | ||
710 | /* 'Small' range */ | ||
711 | unsigned long aligned_start; | ||
712 | unsigned long eaddr; | ||
713 | unsigned long last_page_start; | ||
714 | |||
715 | aligned_start = start & PAGE_MASK; | ||
716 | /* 'end' is 1 byte beyond the end of the range */ | ||
717 | last_page_start = (end - 1) & PAGE_MASK; | ||
718 | |||
719 | eaddr = aligned_start; | ||
720 | while (eaddr <= last_page_start) { | ||
721 | sh64_dcache_purge_user_page(mm, eaddr); | ||
722 | eaddr += PAGE_SIZE; | ||
723 | } | ||
724 | } | ||
725 | return; | ||
726 | } | ||
727 | |||
728 | static void sh64_dcache_wback_current_user_range(unsigned long start, unsigned long end) | ||
729 | { | ||
730 | unsigned long long aligned_start; | ||
731 | unsigned long long ull_end; | ||
732 | unsigned long long addr; | ||
733 | |||
734 | ull_end = end; | ||
735 | |||
736 | /* Just wback over the range using the natural addresses. TLB miss | ||
737 | handling will be OK (TBC) : the range has just been written to by | ||
738 | the signal frame setup code, so the PTEs must exist. | ||
739 | |||
740 | Note, if we have CONFIG_PREEMPT and get preempted inside this loop, | ||
741 | it doesn't matter, even if the pid->ASID mapping changes whilst | ||
742 | we're away. In that case the cache will have been flushed when the | ||
743 | mapping was renewed. So the writebacks below will be nugatory (and | ||
744 | we'll doubtless have to fault the TLB entry/ies in again with the | ||
745 | new ASID), but it's a rare case. | ||
746 | */ | ||
747 | aligned_start = start & L1_CACHE_ALIGN_MASK; | ||
748 | addr = aligned_start; | ||
749 | while (addr < ull_end) { | ||
750 | asm __volatile__ ("ocbwb %0, 0" : : "r" (addr)); | ||
751 | addr += L1_CACHE_BYTES; | ||
752 | } | ||
753 | } | ||
754 | |||
755 | /****************************************************************************/ | ||
756 | |||
757 | /* These *MUST* lie in an area of virtual address space that's otherwise unused. */ | ||
758 | #define UNIQUE_EADDR_START 0xe0000000UL | ||
759 | #define UNIQUE_EADDR_END 0xe8000000UL | ||
760 | |||
761 | static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, unsigned long paddr) | ||
762 | { | ||
763 | /* Given a physical address paddr, and a user virtual address | ||
764 | user_eaddr which will eventually be mapped to it, create a one-off | ||
765 | kernel-private eaddr mapped to the same paddr. This is used for | ||
766 | creating special destination pages for copy_user_page and | ||
767 | clear_user_page */ | ||
768 | |||
769 | static unsigned long current_pointer = UNIQUE_EADDR_START; | ||
770 | unsigned long coloured_pointer; | ||
771 | |||
772 | if (current_pointer == UNIQUE_EADDR_END) { | ||
773 | sh64_dcache_purge_all(); | ||
774 | current_pointer = UNIQUE_EADDR_START; | ||
775 | } | ||
776 | |||
777 | coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | (user_eaddr & CACHE_OC_SYN_MASK); | ||
778 | sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr); | ||
779 | |||
780 | current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS); | ||
781 | |||
782 | return coloured_pointer; | ||
783 | } | ||
784 | |||
785 | /****************************************************************************/ | ||
786 | |||
787 | static void sh64_copy_user_page_coloured(void *to, void *from, unsigned long address) | ||
788 | { | ||
789 | void *coloured_to; | ||
790 | |||
791 | /* Discard any existing cache entries of the wrong colour. These are | ||
792 | present quite often, if the kernel has recently used the page | ||
793 | internally, then given it up, then it's been allocated to the user. | ||
794 | */ | ||
795 | sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to); | ||
796 | |||
797 | coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to)); | ||
798 | sh64_page_copy(from, coloured_to); | ||
799 | |||
800 | sh64_teardown_dtlb_cache_slot(); | ||
801 | } | ||
802 | |||
803 | static void sh64_clear_user_page_coloured(void *to, unsigned long address) | ||
804 | { | ||
805 | void *coloured_to; | ||
806 | |||
807 | /* Discard any existing kernel-originated lines of the wrong colour (as | ||
808 | above) */ | ||
809 | sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to); | ||
810 | |||
811 | coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to)); | ||
812 | sh64_page_clear(coloured_to); | ||
813 | |||
814 | sh64_teardown_dtlb_cache_slot(); | ||
815 | } | ||
816 | |||
817 | #endif /* !CONFIG_DCACHE_DISABLED */ | ||
818 | |||
819 | /****************************************************************************/ | ||
820 | |||
821 | /*########################################################################## | ||
822 | EXTERNALLY CALLABLE API. | ||
823 | ##########################################################################*/ | ||
824 | |||
825 | /* These functions are described in Documentation/cachetlb.txt. | ||
826 | Each one of these functions varies in behaviour depending on whether the | ||
827 | I-cache and/or D-cache are configured out. | ||
828 | |||
829 | Note that the Linux term 'flush' corresponds to what is termed 'purge' in | ||
830 | the sh/sh64 jargon for the D-cache, i.e. write back dirty data then | ||
831 | invalidate the cache lines, and 'invalidate' for the I-cache. | ||
832 | */ | ||
833 | |||
834 | #undef FLUSH_TRACE | ||
835 | |||
836 | void flush_cache_all(void) | ||
837 | { | ||
838 | /* Invalidate the entire contents of both caches, after writing back to | ||
839 | memory any dirty data from the D-cache. */ | ||
840 | sh64_dcache_purge_all(); | ||
841 | sh64_icache_inv_all(); | ||
842 | } | ||
843 | |||
844 | /****************************************************************************/ | ||
845 | |||
846 | void flush_cache_mm(struct mm_struct *mm) | ||
847 | { | ||
848 | /* Invalidate an entire user-address space from both caches, after | ||
849 | writing back dirty data (e.g. for shared mmap etc). */ | ||
850 | |||
851 | /* This could be coded selectively by inspecting all the tags then | ||
852 | doing 4*alloco on any set containing a match (as for | ||
853 | flush_cache_range), but fork/exit/execve (where this is called from) | ||
854 | are expensive anyway. */ | ||
855 | |||
856 | /* Have to do a purge here, despite the comments re I-cache below. | ||
857 | There could be odd-coloured dirty data associated with the mm still | ||
858 | in the cache - if this gets written out through natural eviction | ||
859 | after the kernel has reused the page there will be chaos. | ||
860 | */ | ||
861 | |||
862 | sh64_dcache_purge_all(); | ||
863 | |||
864 | /* The mm being torn down won't ever be active again, so any Icache | ||
865 | lines tagged with its ASID won't be visible for the rest of the | ||
866 | lifetime of this ASID cycle. Before the ASID gets reused, there | ||
867 | will be a flush_cache_all. Hence we don't need to touch the | ||
868 | I-cache. This is similar to the lack of action needed in | ||
869 | flush_tlb_mm - see fault.c. */ | ||
870 | } | ||
871 | |||
872 | /****************************************************************************/ | ||
873 | |||
874 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
875 | unsigned long end) | ||
876 | { | ||
877 | struct mm_struct *mm = vma->vm_mm; | ||
878 | |||
879 | /* Invalidate (from both caches) the range [start,end) of virtual | ||
880 | addresses from the user address space specified by mm, after writing | ||
881 | back any dirty data. | ||
882 | |||
883 | Note(1), 'end' is 1 byte beyond the end of the range to flush. | ||
884 | |||
885 | Note(2), this is called with mm->page_table_lock held.*/ | ||
886 | |||
887 | sh64_dcache_purge_user_range(mm, start, end); | ||
888 | sh64_icache_inv_user_page_range(mm, start, end); | ||
889 | } | ||
890 | |||
891 | /****************************************************************************/ | ||
892 | |||
893 | void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned long pfn) | ||
894 | { | ||
895 | /* Invalidate any entries in either cache for the vma within the user | ||
896 | address space vma->vm_mm for the page starting at virtual address | ||
897 | 'eaddr'. This seems to be used primarily in breaking COW. Note, | ||
898 | the I-cache must be searched too in case the page in question is | ||
899 | both writable and being executed from (e.g. stack trampolines.) | ||
900 | |||
901 | Note(1), this is called with mm->page_table_lock held. | ||
902 | */ | ||
903 | |||
904 | sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); | ||
905 | |||
906 | if (vma->vm_flags & VM_EXEC) { | ||
907 | sh64_icache_inv_user_page(vma, eaddr); | ||
908 | } | ||
909 | } | ||
910 | |||
911 | /****************************************************************************/ | ||
912 | |||
913 | #ifndef CONFIG_DCACHE_DISABLED | ||
914 | |||
915 | void copy_user_page(void *to, void *from, unsigned long address, struct page *page) | ||
916 | { | ||
917 | /* 'from' and 'to' are kernel virtual addresses (within the superpage | ||
918 | mapping of the physical RAM). 'address' is the user virtual address | ||
919 | where the copy 'to' will be mapped after. This allows a custom | ||
920 | mapping to be used to ensure that the new copy is placed in the | ||
921 | right cache sets for the user to see it without having to bounce it | ||
922 | out via memory. Note however : the call to flush_page_to_ram in | ||
923 | (generic)/mm/memory.c:(break_cow) undoes all this good work in that one | ||
924 | very important case! | ||
925 | |||
926 | TBD : can we guarantee that on every call, any cache entries for | ||
927 | 'from' are in the same colour sets as 'address' also? i.e. is this | ||
928 | always used just to deal with COW? (I suspect not). */ | ||
929 | |||
930 | /* There are two possibilities here for when the page 'from' was last accessed: | ||
931 | * by the kernel : this is OK, no purge required. | ||
932 | * by the/a user (e.g. for break_COW) : need to purge. | ||
933 | |||
934 | If the potential user mapping at 'address' is the same colour as | ||
935 | 'from' there is no need to purge any cache lines from the 'from' | ||
936 | page mapped into cache sets of colour 'address'. (The copy will be | ||
937 | accessing the page through 'from'). | ||
938 | */ | ||
939 | |||
940 | if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) { | ||
941 | sh64_dcache_purge_coloured_phy_page(__pa(from), address); | ||
942 | } | ||
943 | |||
944 | if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) { | ||
945 | /* No synonym problem on destination */ | ||
946 | sh64_page_copy(from, to); | ||
947 | } else { | ||
948 | sh64_copy_user_page_coloured(to, from, address); | ||
949 | } | ||
950 | |||
951 | /* Note, don't need to flush 'from' page from the cache again - it's | ||
952 | done anyway by the generic code */ | ||
953 | } | ||
954 | |||
955 | void clear_user_page(void *to, unsigned long address, struct page *page) | ||
956 | { | ||
957 | /* 'to' is a kernel virtual address (within the superpage | ||
958 | mapping of the physical RAM). 'address' is the user virtual address | ||
959 | where the 'to' page will be mapped after. This allows a custom | ||
960 | mapping to be used to ensure that the new copy is placed in the | ||
961 | right cache sets for the user to see it without having to bounce it | ||
962 | out via memory. | ||
963 | */ | ||
964 | |||
965 | if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) { | ||
966 | /* No synonym problem on destination */ | ||
967 | sh64_page_clear(to); | ||
968 | } else { | ||
969 | sh64_clear_user_page_coloured(to, address); | ||
970 | } | ||
971 | } | ||
972 | |||
973 | #endif /* !CONFIG_DCACHE_DISABLED */ | ||
974 | |||
975 | /****************************************************************************/ | ||
976 | |||
977 | void flush_dcache_page(struct page *page) | ||
978 | { | ||
979 | sh64_dcache_purge_phy_page(page_to_phys(page)); | ||
980 | wmb(); | ||
981 | } | ||
982 | |||
983 | /****************************************************************************/ | ||
984 | |||
985 | void flush_icache_range(unsigned long start, unsigned long end) | ||
986 | { | ||
987 | /* Flush the range [start,end] of kernel virtual adddress space from | ||
988 | the I-cache. The corresponding range must be purged from the | ||
989 | D-cache also because the SH-5 doesn't have cache snooping between | ||
990 | the caches. The addresses will be visible through the superpage | ||
991 | mapping, therefore it's guaranteed that there no cache entries for | ||
992 | the range in cache sets of the wrong colour. | ||
993 | |||
994 | Primarily used for cohering the I-cache after a module has | ||
995 | been loaded. */ | ||
996 | |||
997 | /* We also make sure to purge the same range from the D-cache since | ||
998 | flush_page_to_ram() won't be doing this for us! */ | ||
999 | |||
1000 | sh64_dcache_purge_kernel_range(start, end); | ||
1001 | wmb(); | ||
1002 | sh64_icache_inv_kernel_range(start, end); | ||
1003 | } | ||
1004 | |||
1005 | /****************************************************************************/ | ||
1006 | |||
1007 | void flush_icache_user_range(struct vm_area_struct *vma, | ||
1008 | struct page *page, unsigned long addr, int len) | ||
1009 | { | ||
1010 | /* Flush the range of user (defined by vma->vm_mm) address space | ||
1011 | starting at 'addr' for 'len' bytes from the cache. The range does | ||
1012 | not straddle a page boundary, the unique physical page containing | ||
1013 | the range is 'page'. This seems to be used mainly for invalidating | ||
1014 | an address range following a poke into the program text through the | ||
1015 | ptrace() call from another process (e.g. for BRK instruction | ||
1016 | insertion). */ | ||
1017 | |||
1018 | sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr); | ||
1019 | mb(); | ||
1020 | |||
1021 | if (vma->vm_flags & VM_EXEC) { | ||
1022 | sh64_icache_inv_user_small_range(vma->vm_mm, addr, len); | ||
1023 | } | ||
1024 | } | ||
1025 | |||
1026 | /*########################################################################## | ||
1027 | ARCH/SH64 PRIVATE CALLABLE API. | ||
1028 | ##########################################################################*/ | ||
1029 | |||
1030 | void flush_cache_sigtramp(unsigned long start, unsigned long end) | ||
1031 | { | ||
1032 | /* For the address range [start,end), write back the data from the | ||
1033 | D-cache and invalidate the corresponding region of the I-cache for | ||
1034 | the current process. Used to flush signal trampolines on the stack | ||
1035 | to make them executable. */ | ||
1036 | |||
1037 | sh64_dcache_wback_current_user_range(start, end); | ||
1038 | wmb(); | ||
1039 | sh64_icache_inv_current_user_range(start, end); | ||
1040 | } | ||
1041 | |||
diff --git a/arch/sh64/mm/extable.c b/arch/sh64/mm/extable.c new file mode 100644 index 000000000000..9da50e28b3fa --- /dev/null +++ b/arch/sh64/mm/extable.c | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/mm/extable.c | ||
7 | * | ||
8 | * Copyright (C) 2003 Richard Curnow | ||
9 | * Copyright (C) 2003, 2004 Paul Mundt | ||
10 | * | ||
11 | * Cloned from the 2.5 SH version.. | ||
12 | */ | ||
13 | #include <linux/config.h> | ||
14 | #include <linux/rwsem.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <asm/uaccess.h> | ||
17 | |||
18 | extern unsigned long copy_user_memcpy, copy_user_memcpy_end; | ||
19 | extern void __copy_user_fixup(void); | ||
20 | |||
21 | static const struct exception_table_entry __copy_user_fixup_ex = { | ||
22 | .fixup = (unsigned long)&__copy_user_fixup, | ||
23 | }; | ||
24 | |||
25 | /* Some functions that may trap due to a bad user-mode address have too many loads | ||
26 | and stores in them to make it at all practical to label each one and put them all in | ||
27 | the main exception table. | ||
28 | |||
29 | In particular, the fast memcpy routine is like this. It's fix-up is just to fall back | ||
30 | to a slow byte-at-a-time copy, which is handled the conventional way. So it's functionally | ||
31 | OK to just handle any trap occurring in the fast memcpy with that fixup. */ | ||
32 | static const struct exception_table_entry *check_exception_ranges(unsigned long addr) | ||
33 | { | ||
34 | if ((addr >= (unsigned long)©_user_memcpy) && | ||
35 | (addr <= (unsigned long)©_user_memcpy_end)) | ||
36 | return &__copy_user_fixup_ex; | ||
37 | |||
38 | return NULL; | ||
39 | } | ||
40 | |||
41 | /* Simple binary search */ | ||
42 | const struct exception_table_entry * | ||
43 | search_extable(const struct exception_table_entry *first, | ||
44 | const struct exception_table_entry *last, | ||
45 | unsigned long value) | ||
46 | { | ||
47 | const struct exception_table_entry *mid; | ||
48 | |||
49 | mid = check_exception_ranges(value); | ||
50 | if (mid) | ||
51 | return mid; | ||
52 | |||
53 | while (first <= last) { | ||
54 | long diff; | ||
55 | |||
56 | mid = (last - first) / 2 + first; | ||
57 | diff = mid->insn - value; | ||
58 | if (diff == 0) | ||
59 | return mid; | ||
60 | else if (diff < 0) | ||
61 | first = mid+1; | ||
62 | else | ||
63 | last = mid-1; | ||
64 | } | ||
65 | |||
66 | return NULL; | ||
67 | } | ||
68 | |||
69 | int fixup_exception(struct pt_regs *regs) | ||
70 | { | ||
71 | const struct exception_table_entry *fixup; | ||
72 | |||
73 | fixup = search_exception_tables(regs->pc); | ||
74 | if (fixup) { | ||
75 | regs->pc = fixup->fixup; | ||
76 | return 1; | ||
77 | } | ||
78 | |||
79 | return 0; | ||
80 | } | ||
81 | |||
diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c new file mode 100644 index 000000000000..a24932881dbb --- /dev/null +++ b/arch/sh64/mm/fault.c | |||
@@ -0,0 +1,601 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/mm/fault.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) | ||
10 | * Copyright (C) 2003 Paul Mundt | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/signal.h> | ||
15 | #include <linux/rwsem.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <linux/mman.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/smp.h> | ||
25 | #include <linux/smp_lock.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | |||
28 | #include <asm/system.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/tlb.h> | ||
31 | #include <asm/uaccess.h> | ||
32 | #include <asm/pgalloc.h> | ||
33 | #include <asm/mmu_context.h> | ||
34 | #include <asm/registers.h> /* required by inline asm statements */ | ||
35 | |||
36 | #if defined(CONFIG_SH64_PROC_TLB) | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/proc_fs.h> | ||
39 | /* Count numbers of tlb refills in each region */ | ||
40 | static unsigned long long calls_to_update_mmu_cache = 0ULL; | ||
41 | static unsigned long long calls_to_flush_tlb_page = 0ULL; | ||
42 | static unsigned long long calls_to_flush_tlb_range = 0ULL; | ||
43 | static unsigned long long calls_to_flush_tlb_mm = 0ULL; | ||
44 | static unsigned long long calls_to_flush_tlb_all = 0ULL; | ||
45 | unsigned long long calls_to_do_slow_page_fault = 0ULL; | ||
46 | unsigned long long calls_to_do_fast_page_fault = 0ULL; | ||
47 | |||
48 | /* Count size of ranges for flush_tlb_range */ | ||
49 | static unsigned long long flush_tlb_range_1 = 0ULL; | ||
50 | static unsigned long long flush_tlb_range_2 = 0ULL; | ||
51 | static unsigned long long flush_tlb_range_3_4 = 0ULL; | ||
52 | static unsigned long long flush_tlb_range_5_7 = 0ULL; | ||
53 | static unsigned long long flush_tlb_range_8_11 = 0ULL; | ||
54 | static unsigned long long flush_tlb_range_12_15 = 0ULL; | ||
55 | static unsigned long long flush_tlb_range_16_up = 0ULL; | ||
56 | |||
57 | static unsigned long long page_not_present = 0ULL; | ||
58 | |||
59 | #endif | ||
60 | |||
61 | extern void die(const char *,struct pt_regs *,long); | ||
62 | |||
63 | #define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" ) | ||
64 | #define PPROT(flag) PFLAG(pgprot_val(prot),flag) | ||
65 | |||
66 | static inline void print_prots(pgprot_t prot) | ||
67 | { | ||
68 | printk("prot is 0x%08lx\n",pgprot_val(prot)); | ||
69 | |||
70 | printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), | ||
71 | PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); | ||
72 | } | ||
73 | |||
74 | static inline void print_vma(struct vm_area_struct *vma) | ||
75 | { | ||
76 | printk("vma start 0x%08lx\n", vma->vm_start); | ||
77 | printk("vma end 0x%08lx\n", vma->vm_end); | ||
78 | |||
79 | print_prots(vma->vm_page_prot); | ||
80 | printk("vm_flags 0x%08lx\n", vma->vm_flags); | ||
81 | } | ||
82 | |||
83 | static inline void print_task(struct task_struct *tsk) | ||
84 | { | ||
85 | printk("Task pid %d\n", tsk->pid); | ||
86 | } | ||
87 | |||
88 | static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address) | ||
89 | { | ||
90 | pgd_t *dir; | ||
91 | pmd_t *pmd; | ||
92 | pte_t *pte; | ||
93 | pte_t entry; | ||
94 | |||
95 | dir = pgd_offset(mm, address); | ||
96 | if (pgd_none(*dir)) { | ||
97 | return NULL; | ||
98 | } | ||
99 | |||
100 | pmd = pmd_offset(dir, address); | ||
101 | if (pmd_none(*pmd)) { | ||
102 | return NULL; | ||
103 | } | ||
104 | |||
105 | pte = pte_offset_kernel(pmd, address); | ||
106 | entry = *pte; | ||
107 | |||
108 | if (pte_none(entry)) { | ||
109 | return NULL; | ||
110 | } | ||
111 | if (!pte_present(entry)) { | ||
112 | return NULL; | ||
113 | } | ||
114 | |||
115 | return pte; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * This routine handles page faults. It determines the address, | ||
120 | * and the problem, and then passes it off to one of the appropriate | ||
121 | * routines. | ||
122 | */ | ||
123 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | ||
124 | unsigned long textaccess, unsigned long address) | ||
125 | { | ||
126 | struct task_struct *tsk; | ||
127 | struct mm_struct *mm; | ||
128 | struct vm_area_struct * vma; | ||
129 | const struct exception_table_entry *fixup; | ||
130 | pte_t *pte; | ||
131 | |||
132 | #if defined(CONFIG_SH64_PROC_TLB) | ||
133 | ++calls_to_do_slow_page_fault; | ||
134 | #endif | ||
135 | |||
136 | /* SIM | ||
137 | * Note this is now called with interrupts still disabled | ||
138 | * This is to cope with being called for a missing IO port | ||
139 | * address with interupts disabled. This should be fixed as | ||
140 | * soon as we have a better 'fast path' miss handler. | ||
141 | * | ||
142 | * Plus take care how you try and debug this stuff. | ||
143 | * For example, writing debug data to a port which you | ||
144 | * have just faulted on is not going to work. | ||
145 | */ | ||
146 | |||
147 | tsk = current; | ||
148 | mm = tsk->mm; | ||
149 | |||
150 | /* Not an IO address, so reenable interrupts */ | ||
151 | local_irq_enable(); | ||
152 | |||
153 | /* | ||
154 | * If we're in an interrupt or have no user | ||
155 | * context, we must not take the fault.. | ||
156 | */ | ||
157 | if (in_interrupt() || !mm) | ||
158 | goto no_context; | ||
159 | |||
160 | /* TLB misses upon some cache flushes get done under cli() */ | ||
161 | down_read(&mm->mmap_sem); | ||
162 | |||
163 | vma = find_vma(mm, address); | ||
164 | |||
165 | if (!vma) { | ||
166 | #ifdef DEBUG_FAULT | ||
167 | print_task(tsk); | ||
168 | printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n", | ||
169 | __FUNCTION__,__LINE__, | ||
170 | address,regs->pc,textaccess,writeaccess); | ||
171 | show_regs(regs); | ||
172 | #endif | ||
173 | goto bad_area; | ||
174 | } | ||
175 | if (vma->vm_start <= address) { | ||
176 | goto good_area; | ||
177 | } | ||
178 | |||
179 | if (!(vma->vm_flags & VM_GROWSDOWN)) { | ||
180 | #ifdef DEBUG_FAULT | ||
181 | print_task(tsk); | ||
182 | printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n", | ||
183 | __FUNCTION__,__LINE__, | ||
184 | address,regs->pc,textaccess,writeaccess); | ||
185 | show_regs(regs); | ||
186 | |||
187 | print_vma(vma); | ||
188 | #endif | ||
189 | goto bad_area; | ||
190 | } | ||
191 | if (expand_stack(vma, address)) { | ||
192 | #ifdef DEBUG_FAULT | ||
193 | print_task(tsk); | ||
194 | printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n", | ||
195 | __FUNCTION__,__LINE__, | ||
196 | address,regs->pc,textaccess,writeaccess); | ||
197 | show_regs(regs); | ||
198 | #endif | ||
199 | goto bad_area; | ||
200 | } | ||
201 | /* | ||
202 | * Ok, we have a good vm_area for this memory access, so | ||
203 | * we can handle it.. | ||
204 | */ | ||
205 | good_area: | ||
206 | if (textaccess) { | ||
207 | if (!(vma->vm_flags & VM_EXEC)) | ||
208 | goto bad_area; | ||
209 | } else { | ||
210 | if (writeaccess) { | ||
211 | if (!(vma->vm_flags & VM_WRITE)) | ||
212 | goto bad_area; | ||
213 | } else { | ||
214 | if (!(vma->vm_flags & VM_READ)) | ||
215 | goto bad_area; | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * If for any reason at all we couldn't handle the fault, | ||
221 | * make sure we exit gracefully rather than endlessly redo | ||
222 | * the fault. | ||
223 | */ | ||
224 | survive: | ||
225 | switch (handle_mm_fault(mm, vma, address, writeaccess)) { | ||
226 | case 1: | ||
227 | tsk->min_flt++; | ||
228 | break; | ||
229 | case 2: | ||
230 | tsk->maj_flt++; | ||
231 | break; | ||
232 | case 0: | ||
233 | goto do_sigbus; | ||
234 | default: | ||
235 | goto out_of_memory; | ||
236 | } | ||
237 | /* If we get here, the page fault has been handled. Do the TLB refill | ||
238 | now from the newly-setup PTE, to avoid having to fault again right | ||
239 | away on the same instruction. */ | ||
240 | pte = lookup_pte (mm, address); | ||
241 | if (!pte) { | ||
242 | /* From empirical evidence, we can get here, due to | ||
243 | !pte_present(pte). (e.g. if a swap-in occurs, and the page | ||
244 | is swapped back out again before the process that wanted it | ||
245 | gets rescheduled?) */ | ||
246 | goto no_pte; | ||
247 | } | ||
248 | |||
249 | __do_tlb_refill(address, textaccess, pte); | ||
250 | |||
251 | no_pte: | ||
252 | |||
253 | up_read(&mm->mmap_sem); | ||
254 | return; | ||
255 | |||
256 | /* | ||
257 | * Something tried to access memory that isn't in our memory map.. | ||
258 | * Fix it, but check if it's kernel or user first.. | ||
259 | */ | ||
260 | bad_area: | ||
261 | #ifdef DEBUG_FAULT | ||
262 | printk("fault:bad area\n"); | ||
263 | #endif | ||
264 | up_read(&mm->mmap_sem); | ||
265 | |||
266 | if (user_mode(regs)) { | ||
267 | static int count=0; | ||
268 | siginfo_t info; | ||
269 | if (count < 4) { | ||
270 | /* This is really to help debug faults when starting | ||
271 | * usermode, so only need a few */ | ||
272 | count++; | ||
273 | printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n", | ||
274 | address, current->pid, current->comm, | ||
275 | (unsigned long) regs->pc); | ||
276 | #if 0 | ||
277 | show_regs(regs); | ||
278 | #endif | ||
279 | } | ||
280 | if (tsk->pid == 1) { | ||
281 | panic("INIT had user mode bad_area\n"); | ||
282 | } | ||
283 | tsk->thread.address = address; | ||
284 | tsk->thread.error_code = writeaccess; | ||
285 | info.si_signo = SIGSEGV; | ||
286 | info.si_errno = 0; | ||
287 | info.si_addr = (void *) address; | ||
288 | force_sig_info(SIGSEGV, &info, tsk); | ||
289 | return; | ||
290 | } | ||
291 | |||
292 | no_context: | ||
293 | #ifdef DEBUG_FAULT | ||
294 | printk("fault:No context\n"); | ||
295 | #endif | ||
296 | /* Are we prepared to handle this kernel fault? */ | ||
297 | fixup = search_exception_tables(regs->pc); | ||
298 | if (fixup) { | ||
299 | regs->pc = fixup->fixup; | ||
300 | return; | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * Oops. The kernel tried to access some bad page. We'll have to | ||
305 | * terminate things with extreme prejudice. | ||
306 | * | ||
307 | */ | ||
308 | if (address < PAGE_SIZE) | ||
309 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | ||
310 | else | ||
311 | printk(KERN_ALERT "Unable to handle kernel paging request"); | ||
312 | printk(" at virtual address %08lx\n", address); | ||
313 | printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff); | ||
314 | die("Oops", regs, writeaccess); | ||
315 | do_exit(SIGKILL); | ||
316 | |||
317 | /* | ||
318 | * We ran out of memory, or some other thing happened to us that made | ||
319 | * us unable to handle the page fault gracefully. | ||
320 | */ | ||
321 | out_of_memory: | ||
322 | if (current->pid == 1) { | ||
323 | panic("INIT out of memory\n"); | ||
324 | yield(); | ||
325 | goto survive; | ||
326 | } | ||
327 | printk("fault:Out of memory\n"); | ||
328 | up_read(&mm->mmap_sem); | ||
329 | if (current->pid == 1) { | ||
330 | yield(); | ||
331 | down_read(&mm->mmap_sem); | ||
332 | goto survive; | ||
333 | } | ||
334 | printk("VM: killing process %s\n", tsk->comm); | ||
335 | if (user_mode(regs)) | ||
336 | do_exit(SIGKILL); | ||
337 | goto no_context; | ||
338 | |||
339 | do_sigbus: | ||
340 | printk("fault:Do sigbus\n"); | ||
341 | up_read(&mm->mmap_sem); | ||
342 | |||
343 | /* | ||
344 | * Send a sigbus, regardless of whether we were in kernel | ||
345 | * or user mode. | ||
346 | */ | ||
347 | tsk->thread.address = address; | ||
348 | tsk->thread.error_code = writeaccess; | ||
349 | tsk->thread.trap_no = 14; | ||
350 | force_sig(SIGBUS, tsk); | ||
351 | |||
352 | /* Kernel mode? Handle exceptions or die */ | ||
353 | if (!user_mode(regs)) | ||
354 | goto no_context; | ||
355 | } | ||
356 | |||
357 | |||
358 | void flush_tlb_all(void); | ||
359 | |||
360 | void update_mmu_cache(struct vm_area_struct * vma, | ||
361 | unsigned long address, pte_t pte) | ||
362 | { | ||
363 | #if defined(CONFIG_SH64_PROC_TLB) | ||
364 | ++calls_to_update_mmu_cache; | ||
365 | #endif | ||
366 | |||
367 | /* | ||
368 | * This appears to get called once for every pte entry that gets | ||
369 | * established => I don't think it's efficient to try refilling the | ||
370 | * TLBs with the pages - some may not get accessed even. Also, for | ||
371 | * executable pages, it is impossible to determine reliably here which | ||
372 | * TLB they should be mapped into (or both even). | ||
373 | * | ||
374 | * So, just do nothing here and handle faults on demand. In the | ||
375 | * TLBMISS handling case, the refill is now done anyway after the pte | ||
376 | * has been fixed up, so that deals with most useful cases. | ||
377 | */ | ||
378 | } | ||
379 | |||
380 | static void __flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
381 | { | ||
382 | unsigned long long match, pteh=0, lpage; | ||
383 | unsigned long tlb; | ||
384 | struct mm_struct *mm; | ||
385 | |||
386 | mm = vma->vm_mm; | ||
387 | |||
388 | if (mm->context == NO_CONTEXT) | ||
389 | return; | ||
390 | |||
391 | /* | ||
392 | * Sign-extend based on neff. | ||
393 | */ | ||
394 | lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page; | ||
395 | match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID; | ||
396 | match |= lpage; | ||
397 | |||
398 | /* Do ITLB : don't bother for pages in non-exectutable VMAs */ | ||
399 | if (vma->vm_flags & VM_EXEC) { | ||
400 | for_each_itlb_entry(tlb) { | ||
401 | asm volatile ("getcfg %1, 0, %0" | ||
402 | : "=r" (pteh) | ||
403 | : "r" (tlb) ); | ||
404 | |||
405 | if (pteh == match) { | ||
406 | __flush_tlb_slot(tlb); | ||
407 | break; | ||
408 | } | ||
409 | |||
410 | } | ||
411 | } | ||
412 | |||
413 | /* Do DTLB : any page could potentially be in here. */ | ||
414 | for_each_dtlb_entry(tlb) { | ||
415 | asm volatile ("getcfg %1, 0, %0" | ||
416 | : "=r" (pteh) | ||
417 | : "r" (tlb) ); | ||
418 | |||
419 | if (pteh == match) { | ||
420 | __flush_tlb_slot(tlb); | ||
421 | break; | ||
422 | } | ||
423 | |||
424 | } | ||
425 | } | ||
426 | |||
427 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
428 | { | ||
429 | unsigned long flags; | ||
430 | |||
431 | #if defined(CONFIG_SH64_PROC_TLB) | ||
432 | ++calls_to_flush_tlb_page; | ||
433 | #endif | ||
434 | |||
435 | if (vma->vm_mm) { | ||
436 | page &= PAGE_MASK; | ||
437 | local_irq_save(flags); | ||
438 | __flush_tlb_page(vma, page); | ||
439 | local_irq_restore(flags); | ||
440 | } | ||
441 | } | ||
442 | |||
443 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
444 | unsigned long end) | ||
445 | { | ||
446 | unsigned long flags; | ||
447 | unsigned long long match, pteh=0, pteh_epn, pteh_low; | ||
448 | unsigned long tlb; | ||
449 | struct mm_struct *mm; | ||
450 | |||
451 | mm = vma->vm_mm; | ||
452 | |||
453 | #if defined(CONFIG_SH64_PROC_TLB) | ||
454 | ++calls_to_flush_tlb_range; | ||
455 | |||
456 | { | ||
457 | unsigned long size = (end - 1) - start; | ||
458 | size >>= 12; /* divide by PAGE_SIZE */ | ||
459 | size++; /* end=start+4096 => 1 page */ | ||
460 | switch (size) { | ||
461 | case 1 : flush_tlb_range_1++; break; | ||
462 | case 2 : flush_tlb_range_2++; break; | ||
463 | case 3 ... 4 : flush_tlb_range_3_4++; break; | ||
464 | case 5 ... 7 : flush_tlb_range_5_7++; break; | ||
465 | case 8 ... 11 : flush_tlb_range_8_11++; break; | ||
466 | case 12 ... 15 : flush_tlb_range_12_15++; break; | ||
467 | default : flush_tlb_range_16_up++; break; | ||
468 | } | ||
469 | } | ||
470 | #endif | ||
471 | |||
472 | if (mm->context == NO_CONTEXT) | ||
473 | return; | ||
474 | |||
475 | local_irq_save(flags); | ||
476 | |||
477 | start &= PAGE_MASK; | ||
478 | end &= PAGE_MASK; | ||
479 | |||
480 | match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID; | ||
481 | |||
482 | /* Flush ITLB */ | ||
483 | for_each_itlb_entry(tlb) { | ||
484 | asm volatile ("getcfg %1, 0, %0" | ||
485 | : "=r" (pteh) | ||
486 | : "r" (tlb) ); | ||
487 | |||
488 | pteh_epn = pteh & PAGE_MASK; | ||
489 | pteh_low = pteh & ~PAGE_MASK; | ||
490 | |||
491 | if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) | ||
492 | __flush_tlb_slot(tlb); | ||
493 | } | ||
494 | |||
495 | /* Flush DTLB */ | ||
496 | for_each_dtlb_entry(tlb) { | ||
497 | asm volatile ("getcfg %1, 0, %0" | ||
498 | : "=r" (pteh) | ||
499 | : "r" (tlb) ); | ||
500 | |||
501 | pteh_epn = pteh & PAGE_MASK; | ||
502 | pteh_low = pteh & ~PAGE_MASK; | ||
503 | |||
504 | if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) | ||
505 | __flush_tlb_slot(tlb); | ||
506 | } | ||
507 | |||
508 | local_irq_restore(flags); | ||
509 | } | ||
510 | |||
511 | void flush_tlb_mm(struct mm_struct *mm) | ||
512 | { | ||
513 | unsigned long flags; | ||
514 | |||
515 | #if defined(CONFIG_SH64_PROC_TLB) | ||
516 | ++calls_to_flush_tlb_mm; | ||
517 | #endif | ||
518 | |||
519 | if (mm->context == NO_CONTEXT) | ||
520 | return; | ||
521 | |||
522 | local_irq_save(flags); | ||
523 | |||
524 | mm->context=NO_CONTEXT; | ||
525 | if(mm==current->mm) | ||
526 | activate_context(mm); | ||
527 | |||
528 | local_irq_restore(flags); | ||
529 | |||
530 | } | ||
531 | |||
532 | void flush_tlb_all(void) | ||
533 | { | ||
534 | /* Invalidate all, including shared pages, excluding fixed TLBs */ | ||
535 | |||
536 | unsigned long flags, tlb; | ||
537 | |||
538 | #if defined(CONFIG_SH64_PROC_TLB) | ||
539 | ++calls_to_flush_tlb_all; | ||
540 | #endif | ||
541 | |||
542 | local_irq_save(flags); | ||
543 | |||
544 | /* Flush each ITLB entry */ | ||
545 | for_each_itlb_entry(tlb) { | ||
546 | __flush_tlb_slot(tlb); | ||
547 | } | ||
548 | |||
549 | /* Flush each DTLB entry */ | ||
550 | for_each_dtlb_entry(tlb) { | ||
551 | __flush_tlb_slot(tlb); | ||
552 | } | ||
553 | |||
554 | local_irq_restore(flags); | ||
555 | } | ||
556 | |||
557 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
558 | { | ||
559 | /* FIXME: Optimize this later.. */ | ||
560 | flush_tlb_all(); | ||
561 | } | ||
562 | |||
563 | #if defined(CONFIG_SH64_PROC_TLB) | ||
564 | /* Procfs interface to read the performance information */ | ||
565 | |||
566 | static int | ||
567 | tlb_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data) | ||
568 | { | ||
569 | int len=0; | ||
570 | len += sprintf(buf+len, "do_fast_page_fault called %12lld times\n", calls_to_do_fast_page_fault); | ||
571 | len += sprintf(buf+len, "do_slow_page_fault called %12lld times\n", calls_to_do_slow_page_fault); | ||
572 | len += sprintf(buf+len, "update_mmu_cache called %12lld times\n", calls_to_update_mmu_cache); | ||
573 | len += sprintf(buf+len, "flush_tlb_page called %12lld times\n", calls_to_flush_tlb_page); | ||
574 | len += sprintf(buf+len, "flush_tlb_range called %12lld times\n", calls_to_flush_tlb_range); | ||
575 | len += sprintf(buf+len, "flush_tlb_mm called %12lld times\n", calls_to_flush_tlb_mm); | ||
576 | len += sprintf(buf+len, "flush_tlb_all called %12lld times\n", calls_to_flush_tlb_all); | ||
577 | len += sprintf(buf+len, "flush_tlb_range_sizes\n" | ||
578 | " 1 : %12lld\n" | ||
579 | " 2 : %12lld\n" | ||
580 | " 3 - 4 : %12lld\n" | ||
581 | " 5 - 7 : %12lld\n" | ||
582 | " 8 - 11 : %12lld\n" | ||
583 | "12 - 15 : %12lld\n" | ||
584 | "16+ : %12lld\n", | ||
585 | flush_tlb_range_1, flush_tlb_range_2, flush_tlb_range_3_4, | ||
586 | flush_tlb_range_5_7, flush_tlb_range_8_11, flush_tlb_range_12_15, | ||
587 | flush_tlb_range_16_up); | ||
588 | len += sprintf(buf+len, "page not present %12lld times\n", page_not_present); | ||
589 | *eof = 1; | ||
590 | return len; | ||
591 | } | ||
592 | |||
593 | static int __init register_proc_tlb(void) | ||
594 | { | ||
595 | create_proc_read_entry("tlb", 0, NULL, tlb_proc_info, NULL); | ||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | __initcall(register_proc_tlb); | ||
600 | |||
601 | #endif | ||
diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c new file mode 100644 index 000000000000..bcad2aefa4ee --- /dev/null +++ b/arch/sh64/mm/hugetlbpage.c | |||
@@ -0,0 +1,264 @@ | |||
1 | /* | ||
2 | * arch/sh64/mm/hugetlbpage.c | ||
3 | * | ||
4 | * SuperH HugeTLB page support. | ||
5 | * | ||
6 | * Cloned from sparc64 by Paul Mundt. | ||
7 | * | ||
8 | * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/fs.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/hugetlb.h> | ||
16 | #include <linux/pagemap.h> | ||
17 | #include <linux/smp_lock.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/sysctl.h> | ||
20 | |||
21 | #include <asm/mman.h> | ||
22 | #include <asm/pgalloc.h> | ||
23 | #include <asm/tlb.h> | ||
24 | #include <asm/tlbflush.h> | ||
25 | #include <asm/cacheflush.h> | ||
26 | |||
27 | static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | ||
28 | { | ||
29 | pgd_t *pgd; | ||
30 | pmd_t *pmd; | ||
31 | pte_t *pte = NULL; | ||
32 | |||
33 | pgd = pgd_offset(mm, addr); | ||
34 | if (pgd) { | ||
35 | pmd = pmd_alloc(mm, pgd, addr); | ||
36 | if (pmd) | ||
37 | pte = pte_alloc_map(mm, pmd, addr); | ||
38 | } | ||
39 | return pte; | ||
40 | } | ||
41 | |||
42 | static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
43 | { | ||
44 | pgd_t *pgd; | ||
45 | pmd_t *pmd; | ||
46 | pte_t *pte = NULL; | ||
47 | |||
48 | pgd = pgd_offset(mm, addr); | ||
49 | if (pgd) { | ||
50 | pmd = pmd_offset(pgd, addr); | ||
51 | if (pmd) | ||
52 | pte = pte_offset_map(pmd, addr); | ||
53 | } | ||
54 | return pte; | ||
55 | } | ||
56 | |||
57 | #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0) | ||
58 | |||
59 | static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, | ||
60 | struct page *page, pte_t * page_table, int write_access) | ||
61 | { | ||
62 | unsigned long i; | ||
63 | pte_t entry; | ||
64 | |||
65 | add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); | ||
66 | |||
67 | if (write_access) | ||
68 | entry = pte_mkwrite(pte_mkdirty(mk_pte(page, | ||
69 | vma->vm_page_prot))); | ||
70 | else | ||
71 | entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); | ||
72 | entry = pte_mkyoung(entry); | ||
73 | mk_pte_huge(entry); | ||
74 | |||
75 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
76 | set_pte(page_table, entry); | ||
77 | page_table++; | ||
78 | |||
79 | pte_val(entry) += PAGE_SIZE; | ||
80 | } | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * This function checks for proper alignment of input addr and len parameters. | ||
85 | */ | ||
86 | int is_aligned_hugepage_range(unsigned long addr, unsigned long len) | ||
87 | { | ||
88 | if (len & ~HPAGE_MASK) | ||
89 | return -EINVAL; | ||
90 | if (addr & ~HPAGE_MASK) | ||
91 | return -EINVAL; | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | ||
96 | struct vm_area_struct *vma) | ||
97 | { | ||
98 | pte_t *src_pte, *dst_pte, entry; | ||
99 | struct page *ptepage; | ||
100 | unsigned long addr = vma->vm_start; | ||
101 | unsigned long end = vma->vm_end; | ||
102 | int i; | ||
103 | |||
104 | while (addr < end) { | ||
105 | dst_pte = huge_pte_alloc(dst, addr); | ||
106 | if (!dst_pte) | ||
107 | goto nomem; | ||
108 | src_pte = huge_pte_offset(src, addr); | ||
109 | BUG_ON(!src_pte || pte_none(*src_pte)); | ||
110 | entry = *src_pte; | ||
111 | ptepage = pte_page(entry); | ||
112 | get_page(ptepage); | ||
113 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
114 | set_pte(dst_pte, entry); | ||
115 | pte_val(entry) += PAGE_SIZE; | ||
116 | dst_pte++; | ||
117 | } | ||
118 | add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); | ||
119 | addr += HPAGE_SIZE; | ||
120 | } | ||
121 | return 0; | ||
122 | |||
123 | nomem: | ||
124 | return -ENOMEM; | ||
125 | } | ||
126 | |||
127 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | ||
128 | struct page **pages, struct vm_area_struct **vmas, | ||
129 | unsigned long *position, int *length, int i) | ||
130 | { | ||
131 | unsigned long vaddr = *position; | ||
132 | int remainder = *length; | ||
133 | |||
134 | WARN_ON(!is_vm_hugetlb_page(vma)); | ||
135 | |||
136 | while (vaddr < vma->vm_end && remainder) { | ||
137 | if (pages) { | ||
138 | pte_t *pte; | ||
139 | struct page *page; | ||
140 | |||
141 | pte = huge_pte_offset(mm, vaddr); | ||
142 | |||
143 | /* hugetlb should be locked, and hence, prefaulted */ | ||
144 | BUG_ON(!pte || pte_none(*pte)); | ||
145 | |||
146 | page = pte_page(*pte); | ||
147 | |||
148 | WARN_ON(!PageCompound(page)); | ||
149 | |||
150 | get_page(page); | ||
151 | pages[i] = page; | ||
152 | } | ||
153 | |||
154 | if (vmas) | ||
155 | vmas[i] = vma; | ||
156 | |||
157 | vaddr += PAGE_SIZE; | ||
158 | --remainder; | ||
159 | ++i; | ||
160 | } | ||
161 | |||
162 | *length = remainder; | ||
163 | *position = vaddr; | ||
164 | |||
165 | return i; | ||
166 | } | ||
167 | |||
168 | struct page *follow_huge_addr(struct mm_struct *mm, | ||
169 | unsigned long address, int write) | ||
170 | { | ||
171 | return ERR_PTR(-EINVAL); | ||
172 | } | ||
173 | |||
174 | int pmd_huge(pmd_t pmd) | ||
175 | { | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
180 | pmd_t *pmd, int write) | ||
181 | { | ||
182 | return NULL; | ||
183 | } | ||
184 | |||
185 | void unmap_hugepage_range(struct vm_area_struct *vma, | ||
186 | unsigned long start, unsigned long end) | ||
187 | { | ||
188 | struct mm_struct *mm = vma->vm_mm; | ||
189 | unsigned long address; | ||
190 | pte_t *pte; | ||
191 | struct page *page; | ||
192 | int i; | ||
193 | |||
194 | BUG_ON(start & (HPAGE_SIZE - 1)); | ||
195 | BUG_ON(end & (HPAGE_SIZE - 1)); | ||
196 | |||
197 | for (address = start; address < end; address += HPAGE_SIZE) { | ||
198 | pte = huge_pte_offset(mm, address); | ||
199 | BUG_ON(!pte); | ||
200 | if (pte_none(*pte)) | ||
201 | continue; | ||
202 | page = pte_page(*pte); | ||
203 | put_page(page); | ||
204 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
205 | pte_clear(mm, address+(i*PAGE_SIZE), pte); | ||
206 | pte++; | ||
207 | } | ||
208 | } | ||
209 | add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); | ||
210 | flush_tlb_range(vma, start, end); | ||
211 | } | ||
212 | |||
213 | int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) | ||
214 | { | ||
215 | struct mm_struct *mm = current->mm; | ||
216 | unsigned long addr; | ||
217 | int ret = 0; | ||
218 | |||
219 | BUG_ON(vma->vm_start & ~HPAGE_MASK); | ||
220 | BUG_ON(vma->vm_end & ~HPAGE_MASK); | ||
221 | |||
222 | spin_lock(&mm->page_table_lock); | ||
223 | for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { | ||
224 | unsigned long idx; | ||
225 | pte_t *pte = huge_pte_alloc(mm, addr); | ||
226 | struct page *page; | ||
227 | |||
228 | if (!pte) { | ||
229 | ret = -ENOMEM; | ||
230 | goto out; | ||
231 | } | ||
232 | if (!pte_none(*pte)) | ||
233 | continue; | ||
234 | |||
235 | idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) | ||
236 | + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | ||
237 | page = find_get_page(mapping, idx); | ||
238 | if (!page) { | ||
239 | /* charge the fs quota first */ | ||
240 | if (hugetlb_get_quota(mapping)) { | ||
241 | ret = -ENOMEM; | ||
242 | goto out; | ||
243 | } | ||
244 | page = alloc_huge_page(); | ||
245 | if (!page) { | ||
246 | hugetlb_put_quota(mapping); | ||
247 | ret = -ENOMEM; | ||
248 | goto out; | ||
249 | } | ||
250 | ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); | ||
251 | if (! ret) { | ||
252 | unlock_page(page); | ||
253 | } else { | ||
254 | hugetlb_put_quota(mapping); | ||
255 | free_huge_page(page); | ||
256 | goto out; | ||
257 | } | ||
258 | } | ||
259 | set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE); | ||
260 | } | ||
261 | out: | ||
262 | spin_unlock(&mm->page_table_lock); | ||
263 | return ret; | ||
264 | } | ||
diff --git a/arch/sh64/mm/init.c b/arch/sh64/mm/init.c new file mode 100644 index 000000000000..a65e8bb2c3cc --- /dev/null +++ b/arch/sh64/mm/init.c | |||
@@ -0,0 +1,196 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/mm/init.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003, 2004 Paul Mundt | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/rwsem.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/swap.h> | ||
17 | #include <linux/bootmem.h> | ||
18 | |||
19 | #include <asm/mmu_context.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/tlb.h> | ||
24 | |||
25 | #ifdef CONFIG_BLK_DEV_INITRD | ||
26 | #include <linux/blk.h> | ||
27 | #endif | ||
28 | |||
29 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
30 | |||
31 | /* | ||
32 | * Cache of MMU context last used. | ||
33 | */ | ||
34 | unsigned long mmu_context_cache; | ||
35 | pgd_t * mmu_pdtp_cache; | ||
36 | int after_bootmem = 0; | ||
37 | |||
38 | /* | ||
39 | * BAD_PAGE is the page that is used for page faults when linux | ||
40 | * is out-of-memory. Older versions of linux just did a | ||
41 | * do_exit(), but using this instead means there is less risk | ||
42 | * for a process dying in kernel mode, possibly leaving an inode | ||
43 | * unused etc.. | ||
44 | * | ||
45 | * BAD_PAGETABLE is the accompanying page-table: it is initialized | ||
46 | * to point to BAD_PAGE entries. | ||
47 | * | ||
48 | * ZERO_PAGE is a special page that is used for zero-initialized | ||
49 | * data and COW. | ||
50 | */ | ||
51 | |||
52 | extern unsigned char empty_zero_page[PAGE_SIZE]; | ||
53 | extern unsigned char empty_bad_page[PAGE_SIZE]; | ||
54 | extern pte_t empty_bad_pte_table[PTRS_PER_PTE]; | ||
55 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
56 | |||
57 | extern char _text, _etext, _edata, __bss_start, _end; | ||
58 | extern char __init_begin, __init_end; | ||
59 | |||
60 | /* It'd be good if these lines were in the standard header file. */ | ||
61 | #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) | ||
62 | #define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn) | ||
63 | |||
64 | |||
65 | void show_mem(void) | ||
66 | { | ||
67 | int i, total = 0, reserved = 0; | ||
68 | int shared = 0, cached = 0; | ||
69 | |||
70 | printk("Mem-info:\n"); | ||
71 | show_free_areas(); | ||
72 | printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); | ||
73 | i = max_mapnr; | ||
74 | while (i-- > 0) { | ||
75 | total++; | ||
76 | if (PageReserved(mem_map+i)) | ||
77 | reserved++; | ||
78 | else if (PageSwapCache(mem_map+i)) | ||
79 | cached++; | ||
80 | else if (page_count(mem_map+i)) | ||
81 | shared += page_count(mem_map+i) - 1; | ||
82 | } | ||
83 | printk("%d pages of RAM\n",total); | ||
84 | printk("%d reserved pages\n",reserved); | ||
85 | printk("%d pages shared\n",shared); | ||
86 | printk("%d pages swap cached\n",cached); | ||
87 | printk("%ld pages in page table cache\n",pgtable_cache_size); | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * paging_init() sets up the page tables. | ||
92 | * | ||
93 | * head.S already did a lot to set up address translation for the kernel. | ||
94 | * Here we comes with: | ||
95 | * . MMU enabled | ||
96 | * . ASID set (SR) | ||
97 | * . some 512MB regions being mapped of which the most relevant here is: | ||
98 | * . CACHED segment (ASID 0 [irrelevant], shared AND NOT user) | ||
99 | * . possible variable length regions being mapped as: | ||
100 | * . UNCACHED segment (ASID 0 [irrelevant], shared AND NOT user) | ||
101 | * . All of the memory regions are placed, independently from the platform | ||
102 | * on high addresses, above 0x80000000. | ||
103 | * . swapper_pg_dir is already cleared out by the .space directive | ||
104 | * in any case swapper does not require a real page directory since | ||
105 | * it's all kernel contained. | ||
106 | * | ||
107 | * Those pesky NULL-reference errors in the kernel are then | ||
108 | * dealt with by not mapping address 0x00000000 at all. | ||
109 | * | ||
110 | */ | ||
111 | void __init paging_init(void) | ||
112 | { | ||
113 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | ||
114 | |||
115 | pgd_init((unsigned long)swapper_pg_dir); | ||
116 | pgd_init((unsigned long)swapper_pg_dir + | ||
117 | sizeof(pgd_t) * USER_PTRS_PER_PGD); | ||
118 | |||
119 | mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; | ||
120 | |||
121 | /* | ||
122 | * All memory is good as ZONE_NORMAL (fall-through) and ZONE_DMA. | ||
123 | */ | ||
124 | zones_size[ZONE_DMA] = MAX_LOW_PFN - START_PFN; | ||
125 | NODE_DATA(0)->node_mem_map = NULL; | ||
126 | free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0); | ||
127 | } | ||
128 | |||
129 | void __init mem_init(void) | ||
130 | { | ||
131 | int codesize, reservedpages, datasize, initsize; | ||
132 | int tmp; | ||
133 | |||
134 | max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN; | ||
135 | high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE); | ||
136 | |||
137 | /* | ||
138 | * Clear the zero-page. | ||
139 | * This is not required but we might want to re-use | ||
140 | * this very page to pass boot parameters, one day. | ||
141 | */ | ||
142 | memset(empty_zero_page, 0, PAGE_SIZE); | ||
143 | |||
144 | /* this will put all low memory onto the freelists */ | ||
145 | totalram_pages += free_all_bootmem_node(NODE_DATA(0)); | ||
146 | reservedpages = 0; | ||
147 | for (tmp = 0; tmp < num_physpages; tmp++) | ||
148 | /* | ||
149 | * Only count reserved RAM pages | ||
150 | */ | ||
151 | if (PageReserved(mem_map+tmp)) | ||
152 | reservedpages++; | ||
153 | |||
154 | after_bootmem = 1; | ||
155 | |||
156 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | ||
157 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
158 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
159 | |||
160 | printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", | ||
161 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
162 | max_mapnr << (PAGE_SHIFT-10), | ||
163 | codesize >> 10, | ||
164 | reservedpages << (PAGE_SHIFT-10), | ||
165 | datasize >> 10, | ||
166 | initsize >> 10); | ||
167 | } | ||
168 | |||
169 | void free_initmem(void) | ||
170 | { | ||
171 | unsigned long addr; | ||
172 | |||
173 | addr = (unsigned long)(&__init_begin); | ||
174 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | ||
175 | ClearPageReserved(virt_to_page(addr)); | ||
176 | set_page_count(virt_to_page(addr), 1); | ||
177 | free_page(addr); | ||
178 | totalram_pages++; | ||
179 | } | ||
180 | printk ("Freeing unused kernel memory: %ldk freed\n", (&__init_end - &__init_begin) >> 10); | ||
181 | } | ||
182 | |||
183 | #ifdef CONFIG_BLK_DEV_INITRD | ||
184 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
185 | { | ||
186 | unsigned long p; | ||
187 | for (p = start; p < end; p += PAGE_SIZE) { | ||
188 | ClearPageReserved(virt_to_page(p)); | ||
189 | set_page_count(virt_to_page(p), 1); | ||
190 | free_page(p); | ||
191 | totalram_pages++; | ||
192 | } | ||
193 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
194 | } | ||
195 | #endif | ||
196 | |||
diff --git a/arch/sh64/mm/ioremap.c b/arch/sh64/mm/ioremap.c new file mode 100644 index 000000000000..f4003da556bc --- /dev/null +++ b/arch/sh64/mm/ioremap.c | |||
@@ -0,0 +1,469 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/mm/ioremap.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003, 2004 Paul Mundt | ||
10 | * | ||
11 | * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly | ||
12 | * derived from arch/i386/mm/ioremap.c . | ||
13 | * | ||
14 | * (C) Copyright 1995 1996 Linus Torvalds | ||
15 | */ | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/vmalloc.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <asm/io.h> | ||
22 | #include <asm/pgalloc.h> | ||
23 | #include <asm/tlbflush.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/bootmem.h> | ||
26 | #include <linux/proc_fs.h> | ||
27 | |||
28 | static void shmedia_mapioaddr(unsigned long, unsigned long); | ||
29 | static unsigned long shmedia_ioremap(struct resource *, u32, int); | ||
30 | |||
31 | static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | ||
32 | unsigned long phys_addr, unsigned long flags) | ||
33 | { | ||
34 | unsigned long end; | ||
35 | unsigned long pfn; | ||
36 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_READ | | ||
37 | _PAGE_WRITE | _PAGE_DIRTY | | ||
38 | _PAGE_ACCESSED | _PAGE_SHARED | flags); | ||
39 | |||
40 | address &= ~PMD_MASK; | ||
41 | end = address + size; | ||
42 | if (end > PMD_SIZE) | ||
43 | end = PMD_SIZE; | ||
44 | if (address >= end) | ||
45 | BUG(); | ||
46 | |||
47 | pfn = phys_addr >> PAGE_SHIFT; | ||
48 | |||
49 | pr_debug(" %s: pte %p address %lx size %lx phys_addr %lx\n", | ||
50 | __FUNCTION__,pte,address,size,phys_addr); | ||
51 | |||
52 | do { | ||
53 | if (!pte_none(*pte)) { | ||
54 | printk("remap_area_pte: page already exists\n"); | ||
55 | BUG(); | ||
56 | } | ||
57 | |||
58 | set_pte(pte, pfn_pte(pfn, pgprot)); | ||
59 | address += PAGE_SIZE; | ||
60 | pfn++; | ||
61 | pte++; | ||
62 | } while (address && (address < end)); | ||
63 | } | ||
64 | |||
65 | static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | ||
66 | unsigned long phys_addr, unsigned long flags) | ||
67 | { | ||
68 | unsigned long end; | ||
69 | |||
70 | address &= ~PGDIR_MASK; | ||
71 | end = address + size; | ||
72 | |||
73 | if (end > PGDIR_SIZE) | ||
74 | end = PGDIR_SIZE; | ||
75 | |||
76 | phys_addr -= address; | ||
77 | |||
78 | if (address >= end) | ||
79 | BUG(); | ||
80 | |||
81 | do { | ||
82 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | ||
83 | if (!pte) | ||
84 | return -ENOMEM; | ||
85 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | ||
86 | address = (address + PMD_SIZE) & PMD_MASK; | ||
87 | pmd++; | ||
88 | } while (address && (address < end)); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static int remap_area_pages(unsigned long address, unsigned long phys_addr, | ||
93 | unsigned long size, unsigned long flags) | ||
94 | { | ||
95 | int error; | ||
96 | pgd_t * dir; | ||
97 | unsigned long end = address + size; | ||
98 | |||
99 | phys_addr -= address; | ||
100 | dir = pgd_offset_k(address); | ||
101 | flush_cache_all(); | ||
102 | if (address >= end) | ||
103 | BUG(); | ||
104 | spin_lock(&init_mm.page_table_lock); | ||
105 | do { | ||
106 | pmd_t *pmd = pmd_alloc(&init_mm, dir, address); | ||
107 | error = -ENOMEM; | ||
108 | if (!pmd) | ||
109 | break; | ||
110 | if (remap_area_pmd(pmd, address, end - address, | ||
111 | phys_addr + address, flags)) { | ||
112 | break; | ||
113 | } | ||
114 | error = 0; | ||
115 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
116 | dir++; | ||
117 | } while (address && (address < end)); | ||
118 | spin_unlock(&init_mm.page_table_lock); | ||
119 | flush_tlb_all(); | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Generic mapping function (not visible outside): | ||
125 | */ | ||
126 | |||
127 | /* | ||
128 | * Remap an arbitrary physical address space into the kernel virtual | ||
129 | * address space. Needed when the kernel wants to access high addresses | ||
130 | * directly. | ||
131 | * | ||
132 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
133 | * have to convert them into an offset in a page-aligned mapping, but the | ||
134 | * caller shouldn't need to know that small detail. | ||
135 | */ | ||
136 | void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | ||
137 | { | ||
138 | void * addr; | ||
139 | struct vm_struct * area; | ||
140 | unsigned long offset, last_addr; | ||
141 | |||
142 | /* Don't allow wraparound or zero size */ | ||
143 | last_addr = phys_addr + size - 1; | ||
144 | if (!size || last_addr < phys_addr) | ||
145 | return NULL; | ||
146 | |||
147 | /* | ||
148 | * Mappings have to be page-aligned | ||
149 | */ | ||
150 | offset = phys_addr & ~PAGE_MASK; | ||
151 | phys_addr &= PAGE_MASK; | ||
152 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; | ||
153 | |||
154 | /* | ||
155 | * Ok, go for it.. | ||
156 | */ | ||
157 | area = get_vm_area(size, VM_IOREMAP); | ||
158 | pr_debug("Get vm_area returns %p addr %p\n",area,area->addr); | ||
159 | if (!area) | ||
160 | return NULL; | ||
161 | area->phys_addr = phys_addr; | ||
162 | addr = area->addr; | ||
163 | if (remap_area_pages((unsigned long)addr, phys_addr, size, flags)) { | ||
164 | vunmap(addr); | ||
165 | return NULL; | ||
166 | } | ||
167 | return (void *) (offset + (char *)addr); | ||
168 | } | ||
169 | |||
170 | void iounmap(void *addr) | ||
171 | { | ||
172 | struct vm_struct *area; | ||
173 | |||
174 | vfree((void *) (PAGE_MASK & (unsigned long) addr)); | ||
175 | area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr)); | ||
176 | if (!area) { | ||
177 | printk(KERN_ERR "iounmap: bad address %p\n", addr); | ||
178 | return; | ||
179 | } | ||
180 | |||
181 | kfree(area); | ||
182 | } | ||
183 | |||
184 | static struct resource shmedia_iomap = { | ||
185 | .name = "shmedia_iomap", | ||
186 | .start = IOBASE_VADDR + PAGE_SIZE, | ||
187 | .end = IOBASE_END - 1, | ||
188 | }; | ||
189 | |||
190 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va); | ||
191 | static void shmedia_unmapioaddr(unsigned long vaddr); | ||
192 | static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz); | ||
193 | |||
194 | /* | ||
195 | * We have the same problem as the SPARC, so lets have the same comment: | ||
196 | * Our mini-allocator... | ||
197 | * Boy this is gross! We need it because we must map I/O for | ||
198 | * timers and interrupt controller before the kmalloc is available. | ||
199 | */ | ||
200 | |||
201 | #define XNMLN 15 | ||
202 | #define XNRES 10 | ||
203 | |||
204 | struct xresource { | ||
205 | struct resource xres; /* Must be first */ | ||
206 | int xflag; /* 1 == used */ | ||
207 | char xname[XNMLN+1]; | ||
208 | }; | ||
209 | |||
210 | static struct xresource xresv[XNRES]; | ||
211 | |||
212 | static struct xresource *xres_alloc(void) | ||
213 | { | ||
214 | struct xresource *xrp; | ||
215 | int n; | ||
216 | |||
217 | xrp = xresv; | ||
218 | for (n = 0; n < XNRES; n++) { | ||
219 | if (xrp->xflag == 0) { | ||
220 | xrp->xflag = 1; | ||
221 | return xrp; | ||
222 | } | ||
223 | xrp++; | ||
224 | } | ||
225 | return NULL; | ||
226 | } | ||
227 | |||
228 | static void xres_free(struct xresource *xrp) | ||
229 | { | ||
230 | xrp->xflag = 0; | ||
231 | } | ||
232 | |||
233 | static struct resource *shmedia_find_resource(struct resource *root, | ||
234 | unsigned long vaddr) | ||
235 | { | ||
236 | struct resource *res; | ||
237 | |||
238 | for (res = root->child; res; res = res->sibling) | ||
239 | if (res->start <= vaddr && res->end >= vaddr) | ||
240 | return res; | ||
241 | |||
242 | return NULL; | ||
243 | } | ||
244 | |||
245 | static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size, | ||
246 | const char *name) | ||
247 | { | ||
248 | static int printed_full = 0; | ||
249 | struct xresource *xres; | ||
250 | struct resource *res; | ||
251 | char *tack; | ||
252 | int tlen; | ||
253 | |||
254 | if (name == NULL) name = "???"; | ||
255 | |||
256 | if ((xres = xres_alloc()) != 0) { | ||
257 | tack = xres->xname; | ||
258 | res = &xres->xres; | ||
259 | } else { | ||
260 | if (!printed_full) { | ||
261 | printk("%s: done with statics, switching to kmalloc\n", | ||
262 | __FUNCTION__); | ||
263 | printed_full = 1; | ||
264 | } | ||
265 | tlen = strlen(name); | ||
266 | tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); | ||
267 | if (!tack) | ||
268 | return -ENOMEM; | ||
269 | memset(tack, 0, sizeof(struct resource)); | ||
270 | res = (struct resource *) tack; | ||
271 | tack += sizeof (struct resource); | ||
272 | } | ||
273 | |||
274 | strncpy(tack, name, XNMLN); | ||
275 | tack[XNMLN] = 0; | ||
276 | res->name = tack; | ||
277 | |||
278 | return shmedia_ioremap(res, phys, size); | ||
279 | } | ||
280 | |||
281 | static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz) | ||
282 | { | ||
283 | unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); | ||
284 | unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK; | ||
285 | unsigned long va; | ||
286 | unsigned int psz; | ||
287 | |||
288 | if (allocate_resource(&shmedia_iomap, res, round_sz, | ||
289 | shmedia_iomap.start, shmedia_iomap.end, | ||
290 | PAGE_SIZE, NULL, NULL) != 0) { | ||
291 | panic("alloc_io_res(%s): cannot occupy\n", | ||
292 | (res->name != NULL)? res->name: "???"); | ||
293 | } | ||
294 | |||
295 | va = res->start; | ||
296 | pa &= PAGE_MASK; | ||
297 | |||
298 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; | ||
299 | |||
300 | /* log at boot time ... */ | ||
301 | printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n", | ||
302 | ((res->name != NULL) ? res->name : "???"), | ||
303 | psz, psz == 1 ? " " : "s", va, pa); | ||
304 | |||
305 | for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) { | ||
306 | shmedia_mapioaddr(pa, va); | ||
307 | va += PAGE_SIZE; | ||
308 | pa += PAGE_SIZE; | ||
309 | } | ||
310 | |||
311 | res->start += offset; | ||
312 | res->end = res->start + sz - 1; /* not strictly necessary.. */ | ||
313 | |||
314 | return res->start; | ||
315 | } | ||
316 | |||
317 | static void shmedia_free_io(struct resource *res) | ||
318 | { | ||
319 | unsigned long len = res->end - res->start + 1; | ||
320 | |||
321 | BUG_ON((len & (PAGE_SIZE - 1)) != 0); | ||
322 | |||
323 | while (len) { | ||
324 | len -= PAGE_SIZE; | ||
325 | shmedia_unmapioaddr(res->start + len); | ||
326 | } | ||
327 | |||
328 | release_resource(res); | ||
329 | } | ||
330 | |||
331 | static void *sh64_get_page(void) | ||
332 | { | ||
333 | extern int after_bootmem; | ||
334 | void *page; | ||
335 | |||
336 | if (after_bootmem) { | ||
337 | page = (void *)get_zeroed_page(GFP_ATOMIC); | ||
338 | } else { | ||
339 | page = alloc_bootmem_pages(PAGE_SIZE); | ||
340 | } | ||
341 | |||
342 | if (!page || ((unsigned long)page & ~PAGE_MASK)) | ||
343 | panic("sh64_get_page: Out of memory already?\n"); | ||
344 | |||
345 | return page; | ||
346 | } | ||
347 | |||
348 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va) | ||
349 | { | ||
350 | pgd_t *pgdp; | ||
351 | pmd_t *pmdp; | ||
352 | pte_t *ptep, pte; | ||
353 | pgprot_t prot; | ||
354 | unsigned long flags = 1; /* 1 = CB0-1 device */ | ||
355 | |||
356 | pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va); | ||
357 | |||
358 | pgdp = pgd_offset_k(va); | ||
359 | if (pgd_none(*pgdp) || !pgd_present(*pgdp)) { | ||
360 | pmdp = (pmd_t *)sh64_get_page(); | ||
361 | set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE)); | ||
362 | } | ||
363 | |||
364 | pmdp = pmd_offset(pgdp, va); | ||
365 | if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) { | ||
366 | ptep = (pte_t *)sh64_get_page(); | ||
367 | set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE)); | ||
368 | } | ||
369 | |||
370 | prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | | ||
371 | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags); | ||
372 | |||
373 | pte = pfn_pte(pa >> PAGE_SHIFT, prot); | ||
374 | ptep = pte_offset_kernel(pmdp, va); | ||
375 | |||
376 | if (!pte_none(*ptep) && | ||
377 | pte_val(*ptep) != pte_val(pte)) | ||
378 | pte_ERROR(*ptep); | ||
379 | |||
380 | set_pte(ptep, pte); | ||
381 | |||
382 | flush_tlb_kernel_range(va, PAGE_SIZE); | ||
383 | } | ||
384 | |||
385 | static void shmedia_unmapioaddr(unsigned long vaddr) | ||
386 | { | ||
387 | pgd_t *pgdp; | ||
388 | pmd_t *pmdp; | ||
389 | pte_t *ptep; | ||
390 | |||
391 | pgdp = pgd_offset_k(vaddr); | ||
392 | pmdp = pmd_offset(pgdp, vaddr); | ||
393 | |||
394 | if (pmd_none(*pmdp) || pmd_bad(*pmdp)) | ||
395 | return; | ||
396 | |||
397 | ptep = pte_offset_kernel(pmdp, vaddr); | ||
398 | |||
399 | if (pte_none(*ptep) || !pte_present(*ptep)) | ||
400 | return; | ||
401 | |||
402 | clear_page((void *)ptep); | ||
403 | pte_clear(&init_mm, vaddr, ptep); | ||
404 | } | ||
405 | |||
406 | unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name) | ||
407 | { | ||
408 | if (size < PAGE_SIZE) | ||
409 | size = PAGE_SIZE; | ||
410 | |||
411 | return shmedia_alloc_io(phys, size, name); | ||
412 | } | ||
413 | |||
414 | void onchip_unmap(unsigned long vaddr) | ||
415 | { | ||
416 | struct resource *res; | ||
417 | unsigned int psz; | ||
418 | |||
419 | res = shmedia_find_resource(&shmedia_iomap, vaddr); | ||
420 | if (!res) { | ||
421 | printk(KERN_ERR "%s: Failed to free 0x%08lx\n", | ||
422 | __FUNCTION__, vaddr); | ||
423 | return; | ||
424 | } | ||
425 | |||
426 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; | ||
427 | |||
428 | printk(KERN_DEBUG "unmapioaddr: %6s [%2d page%s] freed\n", | ||
429 | res->name, psz, psz == 1 ? " " : "s"); | ||
430 | |||
431 | shmedia_free_io(res); | ||
432 | |||
433 | if ((char *)res >= (char *)xresv && | ||
434 | (char *)res < (char *)&xresv[XNRES]) { | ||
435 | xres_free((struct xresource *)res); | ||
436 | } else { | ||
437 | kfree(res); | ||
438 | } | ||
439 | } | ||
440 | |||
441 | #ifdef CONFIG_PROC_FS | ||
442 | static int | ||
443 | ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, | ||
444 | void *data) | ||
445 | { | ||
446 | char *p = buf, *e = buf + length; | ||
447 | struct resource *r; | ||
448 | const char *nm; | ||
449 | |||
450 | for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { | ||
451 | if (p + 32 >= e) /* Better than nothing */ | ||
452 | break; | ||
453 | if ((nm = r->name) == 0) nm = "???"; | ||
454 | p += sprintf(p, "%08lx-%08lx: %s\n", r->start, r->end, nm); | ||
455 | } | ||
456 | |||
457 | return p-buf; | ||
458 | } | ||
459 | #endif /* CONFIG_PROC_FS */ | ||
460 | |||
461 | static int __init register_proc_onchip(void) | ||
462 | { | ||
463 | #ifdef CONFIG_PROC_FS | ||
464 | create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap); | ||
465 | #endif | ||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | __initcall(register_proc_onchip); | ||
diff --git a/arch/sh64/mm/tlb.c b/arch/sh64/mm/tlb.c new file mode 100644 index 000000000000..d517e7d70340 --- /dev/null +++ b/arch/sh64/mm/tlb.c | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * arch/sh64/mm/tlb.c | ||
3 | * | ||
4 | * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> | ||
5 | * Copyright (C) 2003 Richard Curnow <richard.curnow@superh.com> | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | */ | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/tlb.h> | ||
16 | #include <asm/mmu_context.h> | ||
17 | |||
18 | /** | ||
19 | * sh64_tlb_init | ||
20 | * | ||
21 | * Perform initial setup for the DTLB and ITLB. | ||
22 | */ | ||
23 | int __init sh64_tlb_init(void) | ||
24 | { | ||
25 | /* Assign some sane DTLB defaults */ | ||
26 | cpu_data->dtlb.entries = 64; | ||
27 | cpu_data->dtlb.step = 0x10; | ||
28 | |||
29 | cpu_data->dtlb.first = DTLB_FIXED | cpu_data->dtlb.step; | ||
30 | cpu_data->dtlb.next = cpu_data->dtlb.first; | ||
31 | |||
32 | cpu_data->dtlb.last = DTLB_FIXED | | ||
33 | ((cpu_data->dtlb.entries - 1) * | ||
34 | cpu_data->dtlb.step); | ||
35 | |||
36 | /* And again for the ITLB */ | ||
37 | cpu_data->itlb.entries = 64; | ||
38 | cpu_data->itlb.step = 0x10; | ||
39 | |||
40 | cpu_data->itlb.first = ITLB_FIXED | cpu_data->itlb.step; | ||
41 | cpu_data->itlb.next = cpu_data->itlb.first; | ||
42 | cpu_data->itlb.last = ITLB_FIXED | | ||
43 | ((cpu_data->itlb.entries - 1) * | ||
44 | cpu_data->itlb.step); | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | /** | ||
50 | * sh64_next_free_dtlb_entry | ||
51 | * | ||
52 | * Find the next available DTLB entry | ||
53 | */ | ||
54 | unsigned long long sh64_next_free_dtlb_entry(void) | ||
55 | { | ||
56 | return cpu_data->dtlb.next; | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * sh64_get_wired_dtlb_entry | ||
61 | * | ||
62 | * Allocate a wired (locked-in) entry in the DTLB | ||
63 | */ | ||
64 | unsigned long long sh64_get_wired_dtlb_entry(void) | ||
65 | { | ||
66 | unsigned long long entry = sh64_next_free_dtlb_entry(); | ||
67 | |||
68 | cpu_data->dtlb.first += cpu_data->dtlb.step; | ||
69 | cpu_data->dtlb.next += cpu_data->dtlb.step; | ||
70 | |||
71 | return entry; | ||
72 | } | ||
73 | |||
74 | /** | ||
75 | * sh64_put_wired_dtlb_entry | ||
76 | * | ||
77 | * @entry: Address of TLB slot. | ||
78 | * | ||
79 | * Free a wired (locked-in) entry in the DTLB. | ||
80 | * | ||
81 | * Works like a stack, last one to allocate must be first one to free. | ||
82 | */ | ||
83 | int sh64_put_wired_dtlb_entry(unsigned long long entry) | ||
84 | { | ||
85 | __flush_tlb_slot(entry); | ||
86 | |||
87 | /* | ||
88 | * We don't do any particularly useful tracking of wired entries, | ||
89 | * so this approach works like a stack .. last one to be allocated | ||
90 | * has to be the first one to be freed. | ||
91 | * | ||
92 | * We could potentially load wired entries into a list and work on | ||
93 | * rebalancing the list periodically (which also entails moving the | ||
94 | * contents of a TLB entry) .. though I have a feeling that this is | ||
95 | * more trouble than it's worth. | ||
96 | */ | ||
97 | |||
98 | /* | ||
99 | * Entry must be valid .. we don't want any ITLB addresses! | ||
100 | */ | ||
101 | if (entry <= DTLB_FIXED) | ||
102 | return -EINVAL; | ||
103 | |||
104 | /* | ||
105 | * Next, check if we're within range to be freed. (ie, must be the | ||
106 | * entry beneath the first 'free' entry! | ||
107 | */ | ||
108 | if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step)) | ||
109 | return -EINVAL; | ||
110 | |||
111 | /* If we are, then bring this entry back into the list */ | ||
112 | cpu_data->dtlb.first -= cpu_data->dtlb.step; | ||
113 | cpu_data->dtlb.next = entry; | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * sh64_setup_tlb_slot | ||
120 | * | ||
121 | * @config_addr: Address of TLB slot. | ||
122 | * @eaddr: Virtual address. | ||
123 | * @asid: Address Space Identifier. | ||
124 | * @paddr: Physical address. | ||
125 | * | ||
126 | * Load up a virtual<->physical translation for @eaddr<->@paddr in the | ||
127 | * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry). | ||
128 | */ | ||
129 | inline void sh64_setup_tlb_slot(unsigned long long config_addr, | ||
130 | unsigned long eaddr, | ||
131 | unsigned long asid, | ||
132 | unsigned long paddr) | ||
133 | { | ||
134 | unsigned long long pteh, ptel; | ||
135 | |||
136 | /* Sign extension */ | ||
137 | #if (NEFF == 32) | ||
138 | pteh = (unsigned long long)(signed long long)(signed long) eaddr; | ||
139 | #else | ||
140 | #error "Can't sign extend more than 32 bits yet" | ||
141 | #endif | ||
142 | pteh &= PAGE_MASK; | ||
143 | pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID; | ||
144 | #if (NEFF == 32) | ||
145 | ptel = (unsigned long long)(signed long long)(signed long) paddr; | ||
146 | #else | ||
147 | #error "Can't sign extend more than 32 bits yet" | ||
148 | #endif | ||
149 | ptel &= PAGE_MASK; | ||
150 | ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE); | ||
151 | |||
152 | asm volatile("putcfg %0, 1, %1\n\t" | ||
153 | "putcfg %0, 0, %2\n" | ||
154 | : : "r" (config_addr), "r" (ptel), "r" (pteh)); | ||
155 | } | ||
156 | |||
157 | /** | ||
158 | * sh64_teardown_tlb_slot | ||
159 | * | ||
160 | * @config_addr: Address of TLB slot. | ||
161 | * | ||
162 | * Teardown any existing mapping in the TLB slot @config_addr. | ||
163 | */ | ||
164 | inline void sh64_teardown_tlb_slot(unsigned long long config_addr) | ||
165 | __attribute__ ((alias("__flush_tlb_slot"))); | ||
166 | |||
diff --git a/arch/sh64/mm/tlbmiss.c b/arch/sh64/mm/tlbmiss.c new file mode 100644 index 000000000000..c8615954aaa9 --- /dev/null +++ b/arch/sh64/mm/tlbmiss.c | |||
@@ -0,0 +1,280 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/mm/tlbmiss.c | ||
7 | * | ||
8 | * Original code from fault.c | ||
9 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
10 | * | ||
11 | * Fast PTE->TLB refill path | ||
12 | * Copyright (C) 2003 Richard.Curnow@superh.com | ||
13 | * | ||
14 | * IMPORTANT NOTES : | ||
15 | * The do_fast_page_fault function is called from a context in entry.S where very few registers | ||
16 | * have been saved. In particular, the code in this file must be compiled not to use ANY | ||
17 | * caller-save regiseters that are not part of the restricted save set. Also, it means that | ||
18 | * code in this file must not make calls to functions elsewhere in the kernel, or else the | ||
19 | * excepting context will see corruption in its caller-save registers. Plus, the entry.S save | ||
20 | * area is non-reentrant, so this code has to run with SR.BL==1, i.e. no interrupts taken inside | ||
21 | * it and panic on any exception. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include <linux/signal.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/string.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/ptrace.h> | ||
32 | #include <linux/mman.h> | ||
33 | #include <linux/mm.h> | ||
34 | #include <linux/smp.h> | ||
35 | #include <linux/smp_lock.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | |||
38 | #include <asm/system.h> | ||
39 | #include <asm/tlb.h> | ||
40 | #include <asm/io.h> | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/pgalloc.h> | ||
43 | #include <asm/mmu_context.h> | ||
44 | #include <asm/registers.h> /* required by inline asm statements */ | ||
45 | |||
46 | /* Callable from fault.c, so not static */ | ||
47 | inline void __do_tlb_refill(unsigned long address, | ||
48 | unsigned long long is_text_not_data, pte_t *pte) | ||
49 | { | ||
50 | unsigned long long ptel; | ||
51 | unsigned long long pteh=0; | ||
52 | struct tlb_info *tlbp; | ||
53 | unsigned long long next; | ||
54 | |||
55 | /* Get PTEL first */ | ||
56 | ptel = pte_val(*pte); | ||
57 | |||
58 | /* | ||
59 | * Set PTEH register | ||
60 | */ | ||
61 | pteh = address & MMU_VPN_MASK; | ||
62 | |||
63 | /* Sign extend based on neff. */ | ||
64 | #if (NEFF == 32) | ||
65 | /* Faster sign extension */ | ||
66 | pteh = (unsigned long long)(signed long long)(signed long)pteh; | ||
67 | #else | ||
68 | /* General case */ | ||
69 | pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh; | ||
70 | #endif | ||
71 | |||
72 | /* Set the ASID. */ | ||
73 | pteh |= get_asid() << PTEH_ASID_SHIFT; | ||
74 | pteh |= PTEH_VALID; | ||
75 | |||
76 | /* Set PTEL register, set_pte has performed the sign extension */ | ||
77 | ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ | ||
78 | |||
79 | tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb); | ||
80 | next = tlbp->next; | ||
81 | __flush_tlb_slot(next); | ||
82 | asm volatile ("putcfg %0,1,%2\n\n\t" | ||
83 | "putcfg %0,0,%1\n" | ||
84 | : : "r" (next), "r" (pteh), "r" (ptel) ); | ||
85 | |||
86 | next += TLB_STEP; | ||
87 | if (next > tlbp->last) next = tlbp->first; | ||
88 | tlbp->next = next; | ||
89 | |||
90 | } | ||
91 | |||
92 | static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long protection_flags, | ||
93 | unsigned long long textaccess, | ||
94 | unsigned long address) | ||
95 | { | ||
96 | pgd_t *dir; | ||
97 | pmd_t *pmd; | ||
98 | static pte_t *pte; | ||
99 | pte_t entry; | ||
100 | |||
101 | dir = pgd_offset_k(address); | ||
102 | pmd = pmd_offset(dir, address); | ||
103 | |||
104 | if (pmd_none(*pmd)) { | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | if (pmd_bad(*pmd)) { | ||
109 | pmd_clear(pmd); | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | pte = pte_offset_kernel(pmd, address); | ||
114 | entry = *pte; | ||
115 | |||
116 | if (pte_none(entry) || !pte_present(entry)) { | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | if ((pte_val(entry) & protection_flags) != protection_flags) { | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | __do_tlb_refill(address, textaccess, pte); | ||
125 | |||
126 | return 1; | ||
127 | } | ||
128 | |||
129 | static int handle_tlbmiss(struct mm_struct *mm, unsigned long long protection_flags, | ||
130 | unsigned long long textaccess, | ||
131 | unsigned long address) | ||
132 | { | ||
133 | pgd_t *dir; | ||
134 | pmd_t *pmd; | ||
135 | pte_t *pte; | ||
136 | pte_t entry; | ||
137 | |||
138 | /* NB. The PGD currently only contains a single entry - there is no | ||
139 | page table tree stored for the top half of the address space since | ||
140 | virtual pages in that region should never be mapped in user mode. | ||
141 | (In kernel mode, the only things in that region are the 512Mb super | ||
142 | page (locked in), and vmalloc (modules) + I/O device pages (handled | ||
143 | by handle_vmalloc_fault), so no PGD for the upper half is required | ||
144 | by kernel mode either). | ||
145 | |||
146 | See how mm->pgd is allocated and initialised in pgd_alloc to see why | ||
147 | the next test is necessary. - RPC */ | ||
148 | if (address >= (unsigned long) TASK_SIZE) { | ||
149 | /* upper half - never has page table entries. */ | ||
150 | return 0; | ||
151 | } | ||
152 | dir = pgd_offset(mm, address); | ||
153 | if (pgd_none(*dir)) { | ||
154 | return 0; | ||
155 | } | ||
156 | if (!pgd_present(*dir)) { | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | pmd = pmd_offset(dir, address); | ||
161 | if (pmd_none(*pmd)) { | ||
162 | return 0; | ||
163 | } | ||
164 | if (!pmd_present(*pmd)) { | ||
165 | return 0; | ||
166 | } | ||
167 | pte = pte_offset_kernel(pmd, address); | ||
168 | entry = *pte; | ||
169 | if (pte_none(entry)) { | ||
170 | return 0; | ||
171 | } | ||
172 | if (!pte_present(entry)) { | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | /* If the page doesn't have sufficient protection bits set to service the | ||
177 | kind of fault being handled, there's not much point doing the TLB refill. | ||
178 | Punt the fault to the general handler. */ | ||
179 | if ((pte_val(entry) & protection_flags) != protection_flags) { | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | __do_tlb_refill(address, textaccess, pte); | ||
184 | |||
185 | return 1; | ||
186 | } | ||
187 | |||
188 | /* Put all this information into one structure so that everything is just arithmetic | ||
189 | relative to a single base address. This reduces the number of movi/shori pairs needed | ||
190 | just to load addresses of static data. */ | ||
191 | struct expevt_lookup { | ||
192 | unsigned short protection_flags[8]; | ||
193 | unsigned char is_text_access[8]; | ||
194 | unsigned char is_write_access[8]; | ||
195 | }; | ||
196 | |||
197 | #define PRU (1<<9) | ||
198 | #define PRW (1<<8) | ||
199 | #define PRX (1<<7) | ||
200 | #define PRR (1<<6) | ||
201 | |||
202 | #define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED) | ||
203 | #define YOUNG (_PAGE_ACCESSED) | ||
204 | |||
205 | /* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether | ||
206 | the fault happened in user mode or privileged mode. */ | ||
207 | static struct expevt_lookup expevt_lookup_table = { | ||
208 | .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW}, | ||
209 | .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0} | ||
210 | }; | ||
211 | |||
212 | /* | ||
213 | This routine handles page faults that can be serviced just by refilling a | ||
214 | TLB entry from an existing page table entry. (This case represents a very | ||
215 | large majority of page faults.) Return 1 if the fault was successfully | ||
216 | handled. Return 0 if the fault could not be handled. (This leads into the | ||
217 | general fault handling in fault.c which deals with mapping file-backed | ||
218 | pages, stack growth, segmentation faults, swapping etc etc) | ||
219 | */ | ||
220 | asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt, | ||
221 | unsigned long address) | ||
222 | { | ||
223 | struct task_struct *tsk; | ||
224 | struct mm_struct *mm; | ||
225 | unsigned long long textaccess; | ||
226 | unsigned long long protection_flags; | ||
227 | unsigned long long index; | ||
228 | unsigned long long expevt4; | ||
229 | |||
230 | /* The next few lines implement a way of hashing EXPEVT into a small array index | ||
231 | which can be used to lookup parameters specific to the type of TLBMISS being | ||
232 | handled. Note: | ||
233 | ITLBMISS has EXPEVT==0xa40 | ||
234 | RTLBMISS has EXPEVT==0x040 | ||
235 | WTLBMISS has EXPEVT==0x060 | ||
236 | */ | ||
237 | |||
238 | expevt4 = (expevt >> 4); | ||
239 | /* TODO : xor ssr_md into this expression too. Then we can check that PRU is set | ||
240 | when it needs to be. */ | ||
241 | index = expevt4 ^ (expevt4 >> 5); | ||
242 | index &= 7; | ||
243 | protection_flags = expevt_lookup_table.protection_flags[index]; | ||
244 | textaccess = expevt_lookup_table.is_text_access[index]; | ||
245 | |||
246 | #ifdef CONFIG_SH64_PROC_TLB | ||
247 | ++calls_to_do_fast_page_fault; | ||
248 | #endif | ||
249 | |||
250 | /* SIM | ||
251 | * Note this is now called with interrupts still disabled | ||
252 | * This is to cope with being called for a missing IO port | ||
253 | * address with interupts disabled. This should be fixed as | ||
254 | * soon as we have a better 'fast path' miss handler. | ||
255 | * | ||
256 | * Plus take care how you try and debug this stuff. | ||
257 | * For example, writing debug data to a port which you | ||
258 | * have just faulted on is not going to work. | ||
259 | */ | ||
260 | |||
261 | tsk = current; | ||
262 | mm = tsk->mm; | ||
263 | |||
264 | if ((address >= VMALLOC_START && address < VMALLOC_END) || | ||
265 | (address >= IOBASE_VADDR && address < IOBASE_END)) { | ||
266 | if (ssr_md) { | ||
267 | /* Process-contexts can never have this address range mapped */ | ||
268 | if (handle_vmalloc_fault(mm, protection_flags, textaccess, address)) { | ||
269 | return 1; | ||
270 | } | ||
271 | } | ||
272 | } else if (!in_interrupt() && mm) { | ||
273 | if (handle_tlbmiss(mm, protection_flags, textaccess, address)) { | ||
274 | return 1; | ||
275 | } | ||
276 | } | ||
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
diff --git a/arch/sh64/oprofile/Kconfig b/arch/sh64/oprofile/Kconfig new file mode 100644 index 000000000000..19d37730b664 --- /dev/null +++ b/arch/sh64/oprofile/Kconfig | |||
@@ -0,0 +1,23 @@ | |||
1 | |||
2 | menu "Profiling support" | ||
3 | depends on EXPERIMENTAL | ||
4 | |||
5 | config PROFILING | ||
6 | bool "Profiling support (EXPERIMENTAL)" | ||
7 | help | ||
8 | Say Y here to enable the extended profiling support mechanisms used | ||
9 | by profilers such as OProfile. | ||
10 | |||
11 | |||
12 | config OPROFILE | ||
13 | tristate "OProfile system profiling (EXPERIMENTAL)" | ||
14 | depends on PROFILING | ||
15 | help | ||
16 | OProfile is a profiling system capable of profiling the | ||
17 | whole system, include the kernel, kernel modules, libraries, | ||
18 | and applications. | ||
19 | |||
20 | If unsure, say N. | ||
21 | |||
22 | endmenu | ||
23 | |||
diff --git a/arch/sh64/oprofile/Makefile b/arch/sh64/oprofile/Makefile new file mode 100644 index 000000000000..11a451f6a9c3 --- /dev/null +++ b/arch/sh64/oprofile/Makefile | |||
@@ -0,0 +1,12 @@ | |||
1 | obj-$(CONFIG_OPROFILE) += oprofile.o | ||
2 | |||
3 | DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ | ||
4 | oprof.o cpu_buffer.o buffer_sync.o \ | ||
5 | event_buffer.o oprofile_files.o \ | ||
6 | oprofilefs.o oprofile_stats.o \ | ||
7 | timer_int.o ) | ||
8 | |||
9 | profdrvr-y := op_model_null.o | ||
10 | |||
11 | oprofile-y := $(DRIVER_OBJS) $(profdrvr-y) | ||
12 | |||
diff --git a/arch/sh64/oprofile/op_model_null.c b/arch/sh64/oprofile/op_model_null.c new file mode 100644 index 000000000000..a845b088edb4 --- /dev/null +++ b/arch/sh64/oprofile/op_model_null.c | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * arch/sh/oprofile/op_model_null.c | ||
3 | * | ||
4 | * Copyright (C) 2003 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/oprofile.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/errno.h> | ||
14 | |||
15 | int __init oprofile_arch_init(struct oprofile_operations *ops) | ||
16 | { | ||
17 | return -ENODEV; | ||
18 | } | ||
19 | |||
20 | void oprofile_arch_exit(void) | ||
21 | { | ||
22 | } | ||
23 | |||