aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/Kconfig258
-rw-r--r--arch/xtensa/Kconfig.debug7
-rw-r--r--arch/xtensa/Makefile102
-rw-r--r--arch/xtensa/boot/Makefile37
-rw-r--r--arch/xtensa/boot/boot-elf/Makefile52
-rw-r--r--arch/xtensa/boot/boot-elf/boot.ld71
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S37
-rw-r--r--arch/xtensa/boot/boot-redboot/Makefile35
-rw-r--r--arch/xtensa/boot/boot-redboot/boot.ld66
-rw-r--r--arch/xtensa/boot/boot-redboot/bootstrap.S246
-rw-r--r--arch/xtensa/boot/include/zlib.h433
-rw-r--r--arch/xtensa/boot/lib/Makefile6
-rw-r--r--arch/xtensa/boot/lib/memcpy.S36
-rw-r--r--arch/xtensa/boot/lib/zlib.c2150
-rw-r--r--arch/xtensa/boot/lib/zmem.c87
-rw-r--r--arch/xtensa/boot/ramdisk/Makefile23
-rw-r--r--arch/xtensa/configs/common_defconfig662
-rw-r--r--arch/xtensa/configs/iss_defconfig531
-rw-r--r--arch/xtensa/kernel/Makefile18
-rw-r--r--arch/xtensa/kernel/align.S459
-rw-r--r--arch/xtensa/kernel/asm-offsets.c94
-rw-r--r--arch/xtensa/kernel/coprocessor.S201
-rw-r--r--arch/xtensa/kernel/entry.S1996
-rw-r--r--arch/xtensa/kernel/head.S237
-rw-r--r--arch/xtensa/kernel/irq.c192
-rw-r--r--arch/xtensa/kernel/module.c78
-rw-r--r--arch/xtensa/kernel/pci-dma.c73
-rw-r--r--arch/xtensa/kernel/pci.c563
-rw-r--r--arch/xtensa/kernel/platform.c49
-rw-r--r--arch/xtensa/kernel/process.c482
-rw-r--r--arch/xtensa/kernel/ptrace.c407
-rw-r--r--arch/xtensa/kernel/semaphore.c226
-rw-r--r--arch/xtensa/kernel/setup.c520
-rw-r--r--arch/xtensa/kernel/signal.c713
-rw-r--r--arch/xtensa/kernel/syscalls.c418
-rw-r--r--arch/xtensa/kernel/syscalls.h248
-rw-r--r--arch/xtensa/kernel/time.c227
-rw-r--r--arch/xtensa/kernel/traps.c498
-rw-r--r--arch/xtensa/kernel/vectors.S464
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S341
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c123
-rw-r--r--arch/xtensa/lib/Makefile7
-rw-r--r--arch/xtensa/lib/checksum.S410
-rw-r--r--arch/xtensa/lib/memcopy.S315
-rw-r--r--arch/xtensa/lib/memset.S160
-rw-r--r--arch/xtensa/lib/pci-auto.c352
-rw-r--r--arch/xtensa/lib/strcasecmp.c32
-rw-r--r--arch/xtensa/lib/strncpy_user.S224
-rw-r--r--arch/xtensa/lib/strnlen_user.S147
-rw-r--r--arch/xtensa/lib/usercopy.S321
-rw-r--r--arch/xtensa/mm/Makefile13
-rw-r--r--arch/xtensa/mm/fault.c241
-rw-r--r--arch/xtensa/mm/init.c551
-rw-r--r--arch/xtensa/mm/misc.S374
-rw-r--r--arch/xtensa/mm/pgtable.c76
-rw-r--r--arch/xtensa/mm/tlb.c545
-rw-r--r--arch/xtensa/platform-iss/Makefile13
-rw-r--r--arch/xtensa/platform-iss/console.c303
-rw-r--r--arch/xtensa/platform-iss/io.c32
-rw-r--r--arch/xtensa/platform-iss/network.c855
-rw-r--r--arch/xtensa/platform-iss/setup.c112
61 files changed, 18549 insertions, 0 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
new file mode 100644
index 000000000000..3e89767cea72
--- /dev/null
+++ b/arch/xtensa/Kconfig
@@ -0,0 +1,258 @@
1# For a description of the syntax of this configuration file,
2# see Documentation/kbuild/config-language.txt.
3
4mainmenu "Linux/Xtensa Kernel Configuration"
5
6config FRAME_POINTER
7 bool
8 default n
9
10config XTENSA
11 bool
12 default y
13 help
14 Xtensa processors are 32-bit RISC machines designed by Tensilica
15 primarily for embedded systems. These processors are both
16 configurable and extensible. The Linux port to the Xtensa
17 architecture supports all processor configurations and extensions,
18 with reasonable minimum requirements. The Xtensa Linux project has
19 a home page at <http://xtensa.sourceforge.net/>.
20
21config UID16
22 bool
23 default n
24
25config RWSEM_XCHGADD_ALGORITHM
26 bool
27 default y
28
29config HAVE_DEC_LOCK
30 bool
31 default y
32
33config GENERIC_HARDIRQS
34 bool
35 default y
36
37source "init/Kconfig"
38
39menu "Processor type and features"
40
41choice
42 prompt "Xtensa Processor Configuration"
43 default XTENSA_CPU_LINUX_BE
44
45config XTENSA_CPU_LINUX_BE
46 bool "linux_be"
47 ---help---
48 The linux_be processor configuration is the baseline Xtensa
49 configurations included in this kernel and also used by
50 binutils, gcc, and gdb. It contains no TIE, no coprocessors,
51 and the following configuration options:
52
53 Code Density Option 2 Misc Special Registers
54 NSA/NSAU Instructions 128-bit Data Bus Width
55 Processor ID 8K, 2-way I and D Caches
56 Zero-Overhead Loops 2 Inst Address Break Registers
57 Big Endian 2 Data Address Break Registers
58 64 General-Purpose Registers JTAG Interface and Trace Port
59 17 Interrupts MMU w/ TLBs and Autorefill
60 3 Interrupt Levels 8 Autorefill Ways (I/D TLBs)
61 3 Timers Unaligned Exceptions
62endchoice
63
64config MMU
65 bool
66 default y
67
68config XTENSA_UNALIGNED_USER
69 bool "Unaligned memory access in use space"
70 ---help---
71 The Xtensa architecture currently does not handle unaligned
72 memory accesses in hardware but through an exception handler.
73 Per default, unaligned memory accesses are disabled in user space.
74
75 Say Y here to enable unaligned memory access in user space.
76
77config PREEMPT
78 bool "Preemptible Kernel"
79 ---help---
80 This option reduces the latency of the kernel when reacting to
81 real-time or interactive events by allowing a low priority process to
82 be preempted even if it is in kernel mode executing a system call.
83 Unfortunately the kernel code has some race conditions if both
84 CONFIG_SMP and CONFIG_PREEMPT are enabled, so this option is
85 currently disabled if you are building an SMP kernel.
86
87 Say Y here if you are building a kernel for a desktop, embedded
88 or real-time system. Say N if you are unsure.
89
90config MATH_EMULATION
91 bool "Math emulation"
92 help
93 Can we use information of configuration file?
94
95config HIGHMEM
96 bool "High memory support"
97
98endmenu
99
100menu "Platform options"
101
102choice
103 prompt "Xtensa System Type"
104 default XTENSA_PLATFORM_ISS
105
106config XTENSA_PLATFORM_ISS
107 bool "ISS"
108 help
109 ISS is an acronym for Tensilica's Instruction Set Simulator.
110
111config XTENSA_PLATFORM_XT2000
112 bool "XT2000"
113 help
114 XT2000 is the name of Tensilica's feature-rich emulation platform.
115 This hardware is capable of running a full Linux distribution.
116
117endchoice
118
119
120config XTENSA_CALIBRATE_CCOUNT
121 bool "Auto calibration of the CPU clock rate"
122 ---help---
123 On some platforms (XT2000, for example), the CPU clock rate can
124 vary. The frequency can be determined, however, by measuring
125 against a well known, fixed frequency, such as an UART oscillator.
126
127config XTENSA_CPU_CLOCK
128 int "CPU clock rate [MHz]"
129 depends on !XTENSA_CALIBRATE_CCOUNT
130 default "16"
131
132config GENERIC_CALIBRATE_DELAY
133 bool "Auto calibration of the BogoMIPS value"
134 ---help---
135 The BogoMIPS value can easily derived from the CPU frequency.
136
137config CMDLINE_BOOL
138 bool "Default bootloader kernel arguments"
139
140config CMDLINE
141 string "Initial kernel command string"
142 depends on CMDLINE_BOOL
143 default "console=ttyS0,38400 root=/dev/ram"
144 help
145 On some architectures (EBSA110 and CATS), there is currently no way
146 for the boot loader to pass arguments to the kernel. For these
147 architectures, you should supply some command-line options at build
148 time by entering them here. As a minimum, you should specify the
149 memory size and the root device (e.g., mem=64M root=/dev/nfs).
150
151config SERIAL_CONSOLE
152 bool
153 depends on XTENSA_PLATFORM_ISS
154 default y
155
156config XTENSA_ISS_NETWORK
157 bool
158 depends on XTENSA_PLATFORM_ISS
159 default y
160
161endmenu
162
163menu "Bus options"
164
165config PCI
166 bool "PCI support" if !XTENSA_PLATFORM_ISS
167 depends on !XTENSA_PLATFORM_ISS
168 default y
169 help
170 Find out whether you have a PCI motherboard. PCI is the name of a
171 bus system, i.e. the way the CPU talks to the other stuff inside
172 your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
173 VESA. If you have PCI, say Y, otherwise N.
174
175 The PCI-HOWTO, available from
176 <http://www.linuxdoc.org/docs.html#howto>, contains valuable
177 information about which PCI hardware does work under Linux and which
178 doesn't
179
180source "drivers/pci/Kconfig"
181
182config HOTPLUG
183
184 bool "Support for hot-pluggable devices"
185 ---help---
186 Say Y here if you want to plug devices into your computer while
187 the system is running, and be able to use them quickly. In many
188 cases, the devices can likewise be unplugged at any time too.
189
190 One well known example of this is PCMCIA- or PC-cards, credit-card
191 size devices such as network cards, modems or hard drives which are
192 plugged into slots found on all modern laptop computers. Another
193 example, used on modern desktops as well as laptops, is USB.
194
195 Enable HOTPLUG and KMOD, and build a modular kernel. Get agent
196 software (at <http://linux-hotplug.sourceforge.net/>) and install it.
197 Then your kernel will automatically call out to a user mode "policy
198 agent" (/sbin/hotplug) to load modules and set up software needed
199 to use devices as you hotplug them.
200
201source "drivers/pcmcia/Kconfig"
202
203source "drivers/pci/hotplug/Kconfig"
204
205endmenu
206
207menu "Exectuable file formats"
208
209# only elf supported
210config KCORE_ELF
211 bool
212 depends on PROC_FS
213 default y
214 help
215 If you enabled support for /proc file system then the file
216 /proc/kcore will contain the kernel core image in ELF format. This
217 can be used in gdb:
218
219 $ cd /usr/src/linux ; gdb vmlinux /proc/kcore
220
221 This is especially useful if you have compiled the kernel with the
222 "-g" option to preserve debugging information. It is mainly used
223 for examining kernel data structures on the live kernel.
224
225source "fs/Kconfig.binfmt"
226
227endmenu
228
229source "drivers/Kconfig"
230
231source "fs/Kconfig"
232
233menu "Xtensa initrd options"
234 depends on BLK_DEV_INITRD
235
236 config EMBEDDED_RAMDISK
237 bool "Embed root filesystem ramdisk into the kernel"
238
239config EMBEDDED_RAMDISK_IMAGE
240 string "Filename of gziped ramdisk image"
241 depends on EMBEDDED_RAMDISK
242 default "ramdisk.gz"
243 help
244 This is the filename of the ramdisk image to be built into the
245 kernel. Relative pathnames are relative to arch/xtensa/boot/ramdisk/.
246 The ramdisk image is not part of the kernel distribution; you must
247 provide one yourself.
248endmenu
249
250source "arch/xtensa/Kconfig.debug"
251
252source "security/Kconfig"
253
254source "crypto/Kconfig"
255
256source "lib/Kconfig"
257
258
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
new file mode 100644
index 000000000000..11c585295dd7
--- /dev/null
+++ b/arch/xtensa/Kconfig.debug
@@ -0,0 +1,7 @@
1menu "Kernel hacking"
2
3source "lib/Kconfig.debug"
4
5endmenu
6
7
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
new file mode 100644
index 000000000000..4fa27453b1f9
--- /dev/null
+++ b/arch/xtensa/Makefile
@@ -0,0 +1,102 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6# Copyright (C) 2001 - 2005 Tensilica Inc.
7#
8# This file is included by the global makefile so that you can add your own
9# architecture-specific flags and dependencies. Remember to do have actions
10# for "archclean" and "archdep" for cleaning up and making dependencies for
11# this architecture
12
13# Core configuration.
14# (Use CPU=<xtensa_config> to use another default compiler.)
15
16cpu-$(CONFIG_XTENSA_CPU_LINUX_BE) := linux_be
17cpu-$(CONFIG_XTENSA_CPU_LINUX_CUSTOM) := linux_custom
18
19CPU = $(cpu-y)
20export CPU
21
22# Platform configuration
23
24platform-y := common
25platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000
26platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
27
28PLATFORM = $(platform-y)
29export PLATFORM
30
31#LDFLAGS_vmlinux := -T$(word 1,$(LINKSCRIPT))
32AFLAGS_vmlinux.lds.o := -Uxtensa
33CPPFLAGS += -Iarch/xtensa -Iinclude/asm -mlongcalls -g
34AFLAGS += -Iarch/xtensa -Iinclude/asm
35CPP = $(CC) -E $(CFLAGS)
36
37cflags-y += -Iarch/xtensa -pipe -mlongcalls
38
39
40KBUILD_DEFCONFIG := common_defconfig
41
42# ramdisk/initrd support
43# You need a compressed ramdisk image, named ramdisk.gz in
44# arch/xtensa/boot/ramdisk
45
46core-$(CONFIG_EMBEDDED_RAMDISK) += arch/xtensa/boot/ramdisk/
47
48# Test for cross compiling
49
50ifneq ($(CPU),)
51 COMPILE_ARCH = $(shell uname -m)
52
53 ifneq ($(COMPILE_ARCH), xtensa)
54 ifndef CROSS_COMPILE
55 CROSS_COMPILE = xtensa_$(CPU)-
56 endif
57 endif
58endif
59
60#
61
62LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name)
63
64head-y := arch/xtensa/kernel/head.o
65core-y += arch/xtensa/kernel/ \
66 arch/xtensa/mm/ arch/xtensa/platform-$(PLATFORM)/
67libs-y += arch/xtensa/lib/ $(LIBGCC)
68
69boot := arch/xtensa/boot
70
71arch/xtensa/kernel/asm-offsets.s: \
72 arch/xtensa/kernel/asm-offsets.c \
73 include/asm-xtensa/.platform
74
75include/asm-xtensa/offsets.h: arch/xtensa/kernel/asm-offsets.s
76 $(call filechk,gen-asm-offsets)
77
78prepare: include/asm-xtensa/.platform include/asm-xtensa/offsets.h
79
80# Update machine cpu and platform symlinks if something which affects
81# them changed.
82
83include/asm-xtensa/.platform: $(wildcard include/config/arch/*.h)
84 @echo ' Setting up cpu ($(CPU)) and platform ($(PLATFORM)) symlinks'
85 $(Q)rm -f include/asm-xtensa/platform
86 $(Q)rm -f include/asm-xtensa/xtensa/config
87 $(Q)(cd include/asm-xtensa/; ln -sf platform-$(PLATFORM) platform)
88 $(Q)(cd include/asm-xtensa/xtensa; ln -sf config-$(CPU) config)
89
90all: zImage
91
92bzImage : zImage
93
94zImage zImage.initrd: vmlinux
95 $(Q)$(MAKE) $(build)=$(boot) $@
96
97CLEAN_FILES += arch/xtensa/vmlinux.lds include/asm-xtensa/offset.h
98
99define archhelp
100 @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
101endef
102
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
new file mode 100644
index 000000000000..260f456ccf0b
--- /dev/null
+++ b/arch/xtensa/boot/Makefile
@@ -0,0 +1,37 @@
1#
2# arch/xtensa/boot/Makefile
3#
4# This file is subject to the terms and conditions of the GNU General Public
5# License. See the file "COPYING" in the main directory of this archive
6# for more details.
7#
8#
9
10
11CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include
12HOSTFLAGS += -Iarch/$(ARCH)/boot/include
13
14BIG_ENDIAN := $(shell echo -e "\#ifdef __XTENSA_EL__\nint little;\n\#else\nint big;\n\#endif" | $(CC) -E -|grep -c big)
15
16
17export CFLAGS
18export AFLAGS
19export BIG_ENDIAN
20
21# Subdirs for the boot loader(s)
22
23bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf
24bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf
25
26subdir-y := lib/
27
28subdir-y += boot-elf/ boot-redboot/
29
30zImage zImage.initrd Image Image.initrd: $(bootdir-y)
31
32$(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
33 $(addprefix $(obj)/,$(host-progs))
34 $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
35
36
37
diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile
new file mode 100644
index 000000000000..f6ef6a369667
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/Makefile
@@ -0,0 +1,52 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6
7GZIP = gzip
8GZIP_FLAGS = -v9fc
9
10ifeq ($(BIG_ENDIAN),1)
11OBJCOPY_ARGS := -O elf32-xtensa-be
12else
13OBJCOPY_ARGS := -O elf32-xtensa-le
14endif
15
16export OBJCOPY_ARGS
17
18boot-y := bootstrap.o
19
20OBJS := $(addprefix $(obj)/,$(boot-y))
21
22Image: vmlinux $(OBJS)
23 $(OBJCOPY) --strip-all -R .comment -R .xt.insn -O binary \
24 vmlinux vmlinux.tmp
25 $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
26 --add-section image=vmlinux.tmp \
27 --set-section-flags image=contents,alloc,load,load,data \
28 $(OBJS) $@.tmp
29 $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
30 -T arch/$(ARCH)/boot/boot-elf/boot.ld \
31 -o arch/$(ARCH)/boot/$@.elf $@.tmp
32 rm -f $@.tmp vmlinux.tmp
33
34Image.initrd: vmlinux $(OBJS)
35 $(OBJCOPY) --strip-all -R .comment -R .xt.insn -O binary \
36 --add-section .initrd=arch/$(ARCH)/boot/ramdisk \
37 --set-section-flags .initrd=contents,alloc,load,load,data \
38 vmlinux vmlinux.tmp
39 $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
40 --add-section image=vmlinux.tmp \
41 --set-section-flags image=contents,alloc,load,load,data \
42 $(OBJS) $@.tmp
43 $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
44 -T arch/$(ARCH)/boot/boot-elf/boot.ld \
45 -o arch/$(ARCH)/boot/$@.elf $@.tmp
46 rm -f $@.tmp vmlinux.tmp
47
48
49zImage: Image
50
51zImage.initrd: Image.initrd
52
diff --git a/arch/xtensa/boot/boot-elf/boot.ld b/arch/xtensa/boot/boot-elf/boot.ld
new file mode 100644
index 000000000000..4ab06a0a7a6b
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/boot.ld
@@ -0,0 +1,71 @@
1OUTPUT_ARCH(xtensa)
2
3SECTIONS
4{
5 .start 0xD0000000 : { *(.start) }
6
7 .text 0xD0000000:
8 {
9 __reloc_start = . ;
10 _text_start = . ;
11 *(.literal .text.literal .text)
12 _text_end = . ;
13 }
14
15 .rodata ALIGN(0x04):
16 {
17 *(.rodata)
18 *(.rodata1)
19 }
20
21 .data ALIGN(0x04):
22 {
23 *(.data)
24 *(.data1)
25 *(.sdata)
26 *(.sdata2)
27 *(.got.plt)
28 *(.got)
29 *(.dynamic)
30 }
31
32 __reloc_end = . ;
33
34 .initrd ALIGN(0x10) :
35 {
36 boot_initrd_start = . ;
37 *(.initrd)
38 boot_initrd_end = .;
39 }
40
41 . = ALIGN(0x10);
42 __image_load = . ;
43 .image 0xd0001000:
44 {
45 _image_start = .;
46 *(image)
47 . = (. + 3) & ~ 3;
48 _image_end = . ;
49 }
50
51
52 .bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
53 {
54 __bss_start = .;
55 *(.sbss)
56 *(.scommon)
57 *(.dynbss)
58 *(.bss)
59 __bss_end = .;
60 }
61 _end = .;
62 _param_start = .;
63
64 .ResetVector.text 0xfe000020 :
65 {
66 *(.ResetVector.text)
67 }
68
69
70 PROVIDE (end = .);
71}
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
new file mode 100644
index 000000000000..7cba94abdab8
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -0,0 +1,37 @@
1
2#include <xtensa/config/specreg.h>
3#include <xtensa/config/core.h>
4
5#include <linux/config.h>
6#include <asm/bootparam.h>
7
8
9/* ResetVector
10 */
11 .section .ResetVector.text, "ax"
12 .global _ResetVector
13_ResetVector:
14 _j reset
15 .align 4
16RomInitAddr:
17 .word 0xd0001000
18RomBootParam:
19 .word _bootparam
20reset:
21 l32r a0, RomInitAddr
22 l32r a2, RomBootParam
23 movi a3, 0
24 movi a4, 0
25 jx a0
26
27 .align 4
28 .section .bootstrap.data, "aw"
29
30 .globl _bootparam
31_bootparam:
32 .short BP_TAG_FIRST
33 .short 4
34 .long BP_VERSION
35 .short BP_TAG_LAST
36 .short 0
37 .long 0
diff --git a/arch/xtensa/boot/boot-redboot/Makefile b/arch/xtensa/boot/boot-redboot/Makefile
new file mode 100644
index 000000000000..ca8a68bc8472
--- /dev/null
+++ b/arch/xtensa/boot/boot-redboot/Makefile
@@ -0,0 +1,35 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6
7GZIP = gzip
8GZIP_FLAGS = -v9fc
9ifeq ($(BIG_ENDIAN),1)
10OBJCOPY_ARGS := -O elf32-xtensa-be
11else
12OBJCOPY_ARGS := -O elf32-xtensa-le
13endif
14
15LD_ARGS = -T $(obj)/boot.ld
16
17boot-y := bootstrap.o
18
19OBJS := $(addprefix $(obj)/,$(boot-y))
20LIBS := arch/$(ARCH)/boot/lib/lib.a arch/$(ARCH)/lib/lib.a
21
22LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name)
23
24zImage: vmlinux $(OBJS) $(LIBS)
25 $(OBJCOPY) --strip-all -R .comment -R .xt.insn -O binary \
26 $(TOPDIR)/vmlinux vmlinux.tmp
27 gzip -vf9 vmlinux.tmp
28 $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
29 --add-section image=vmlinux.tmp.gz \
30 --set-section-flags image=contents,alloc,load,load,data \
31 $(OBJS) $@.tmp
32 $(LD) $(LD_ARGS) -o $@.elf $@.tmp $(LIBS) -L/xtensa-elf/lib $(LIBGCC)
33 $(OBJCOPY) -S -O binary $@.elf arch/$(ARCH)/boot/images/$@.redboot
34# rm -f $@.tmp $@.elf vmlinux.tmp.gz
35
diff --git a/arch/xtensa/boot/boot-redboot/boot.ld b/arch/xtensa/boot/boot-redboot/boot.ld
new file mode 100644
index 000000000000..65b726410e8a
--- /dev/null
+++ b/arch/xtensa/boot/boot-redboot/boot.ld
@@ -0,0 +1,66 @@
1OUTPUT_ARCH(xtensa)
2
3SECTIONS
4{
5 .start 0xD0200000 : { *(.start) }
6
7 .text :
8 {
9 __reloc_start = . ;
10 _text_start = . ;
11 *(.literal .text.literal .text)
12 _text_end = . ;
13 }
14
15 .rodata ALIGN(0x04):
16 {
17 *(.rodata)
18 *(.rodata1)
19 }
20
21 .data ALIGN(0x04):
22 {
23 *(.data)
24 *(.data1)
25 *(.sdata)
26 *(.sdata2)
27 *(.got.plt)
28 *(.got)
29 *(.dynamic)
30 }
31
32 __reloc_end = . ;
33
34 .initrd ALIGN(0x10) :
35 {
36 boot_initrd_start = . ;
37 *(.initrd)
38 boot_initrd_end = .;
39 }
40
41 . = ALIGN(0x10);
42 __image_load = . ;
43 .image 0xd0001000: AT(__image_load)
44 {
45 _image_start = .;
46 *(image)
47 . = (. + 3) & ~ 3;
48 _image_end = . ;
49 }
50
51
52 .bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
53 {
54 __bss_start = .;
55 *(.sbss)
56 *(.scommon)
57 *(.dynbss)
58 *(.bss)
59 __bss_end = .;
60 }
61 _end = .;
62 _param_start = .;
63
64
65 PROVIDE (end = .);
66}
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S
new file mode 100644
index 000000000000..ee636b0da81c
--- /dev/null
+++ b/arch/xtensa/boot/boot-redboot/bootstrap.S
@@ -0,0 +1,246 @@
1
2#define _ASMLANGUAGE
3#include <xtensa/config/specreg.h>
4#include <xtensa/config/core.h>
5#include <xtensa/cacheasm.h>
6
7 /*
8 * RB-Data: RedBoot data/bss
9 * P: Boot-Parameters
10 * L: Kernel-Loader
11 *
12 * The Linux-Kernel image including the loader must be loaded
13 * to a position so that the kernel and the boot parameters
14 * can fit in the space before the load address.
15 * ______________________________________________________
16 * |_RB-Data_|_P_|__________|_L_|___Linux-Kernel___|______|
17 * ^
18 * ^ Load address
19 * ______________________________________________________
20 * |___Linux-Kernel___|_P_|_L_|___________________________|
21 *
22 * The loader copies the parameter to the position that will
23 * be the end of the kernel and itself to the end of the
24 * parameter list.
25 */
26
27/* Make sure we have enough space for the 'uncompressor' */
28
29#define STACK_SIZE 32768
30#define HEAP_SIZE (131072*4)
31
32 # a2: Parameter list
33 # a3: Size of parameter list
34
35 .section .start, "ax"
36
37 .globl __start
38 /* this must be the first byte of the loader! */
39__start:
40 entry sp, 32 # we do not intend to return
41 _call0 _start
42__start_a0:
43 .align 4
44
45 .section .text, "ax"
46 .begin literal_prefix .text
47
48 /* put literals in here! */
49
50 .globl _start
51_start:
52
53 /* 'reset' window registers */
54
55 movi a4, 1
56 wsr a4, PS
57 rsync
58
59 rsr a5, WINDOWBASE
60 ssl a5
61 sll a4, a4
62 wsr a4, WINDOWSTART
63 rsync
64
65 movi a4, 0x00040000
66 wsr a4, PS
67 rsync
68
69 /* copy the loader to its address
70 * Note: The loader itself is a very small piece, so we assume we
71 * don't partially overlap. We also assume (even more important)
72 * that the kernel image is out of the way. Usually, when the
73 * load address of this image is not at an arbitrary address,
74 * but aligned to some 10K's we shouldn't overlap.
75 */
76
77 /* Note: The assembler cannot relax "addi a0, a0, ..." to an
78 l32r, so we load to a4 first. */
79
80 addi a4, a0, __start - __start_a0
81 mov a0, a4
82 movi a4, __start
83 movi a5, __reloc_end
84
85 # a0: address where this code has been loaded
86 # a4: compiled address of __start
87 # a5: compiled end address
88
89 mov.n a7, a0
90 mov.n a8, a4
91
921:
93 l32i a10, a7, 0
94 l32i a11, a7, 4
95 s32i a10, a8, 0
96 s32i a11, a8, 4
97 l32i a10, a7, 8
98 l32i a11, a7, 12
99 s32i a10, a8, 8
100 s32i a11, a8, 12
101 addi a8, a8, 16
102 addi a7, a7, 16
103 blt a8, a5, 1b
104
105
106 /* We have to flush and invalidate the caches here before we jump. */
107
108#if XCHAL_DCACHE_IS_WRITEBACK
109 dcache_writeback_all a5, a6
110#endif
111 icache_invalidate_all a5, a6
112
113 movi a11, _reloc
114 jx a11
115
116 .globl _reloc
117_reloc:
118
119 /* RedBoot is now at the end of the memory, so we don't have
120 * to copy the parameter list. Keep the code around; in case
121 * we need it again. */
122#if 0
123 # a0: load address
124 # a2: start address of parameter list
125 # a3: length of parameter list
126 # a4: __start
127
128 /* copy the parameter list out of the way */
129
130 movi a6, _param_start
131 add a3, a2, a3
1322:
133 l32i a8, a2, 0
134 s32i a8, a6, 0
135 addi a2, a2, 4
136 addi a6, a6, 4
137 blt a2, a3, 2b
138#endif
139
140 /* clear BSS section */
141 movi a6, __bss_start
142 movi a7, __bss_end
143 movi.n a5, 0
1443:
145 s32i a5, a6, 0
146 addi a6, a6, 4
147 blt a6, a7, 3b
148
149 movi a5, -16
150 movi a1, _stack + STACK_SIZE
151 and a1, a1, a5
152
153 /* Uncompress the kernel */
154
155 # a0: load address
156 # a2: boot parameter
157 # a4: __start
158
159 movi a3, __image_load
160 sub a4, a3, a4
161 add a8, a0, a4
162
163 # a1 Stack
164 # a8(a4) Load address of the image
165
166 movi a6, _image_start
167 movi a10, _image_end
168 movi a7, 0x1000000
169 sub a11, a10, a6
170 movi a9, complen
171 s32i a11, a9, 0
172
173 movi a0, 0
174
175 # a6 destination
176 # a7 maximum size of destination
177 # a8 source
178 # a9 ptr to length
179
180 .extern gunzip
181 movi a4, gunzip
182 beqz a4, 1f
183
184 callx4 a4
185
186 j 2f
187
188
189 # a6 destination start
190 # a7 maximum size of destination
191 # a8 source start
192 # a9 ptr to length
193 # a10 destination end
194
1951:
196 l32i a9, a8, 0
197 l32i a11, a8, 4
198 s32i a9, a6, 0
199 s32i a11, a6, 4
200 l32i a9, a8, 8
201 l32i a11, a8, 12
202 s32i a9, a6, 8
203 s32i a11, a6, 12
204 addi a6, a6, 16
205 addi a8, a8, 16
206 blt a6, a10, 1b
207
208
209 /* jump to the kernel */
2102:
211#if XCHAL_DCACHE_IS_WRITEBACK
212 dcache_writeback_all a5, a6
213#endif
214 icache_invalidate_all a5, a6
215
216 movi a5, __start
217 movi a3, boot_initrd_start
218 movi a4, boot_initrd_end
219 sub a3, a3, a5
220 sub a4, a4, a5
221 add a3, a0, a3
222 add a4, a0, a4
223
224 # a2 Boot parameter list
225 # a3 initrd_start (virtual load address)
226 # a4 initrd_end (virtual load address)
227
228 movi a0, _image_start
229 jx a0
230
231 .align 16
232 .data
233 .globl avail_ram
234avail_ram:
235 .long _heap
236 .globl end_avail
237end_avail:
238 .long _heap + HEAP_SIZE
239
240 .comm _stack, STACK_SIZE
241 .comm _heap, HEAP_SIZE
242
243 .globl end_avail
244 .comm complen, 4
245
246 .end literal_prefix
diff --git a/arch/xtensa/boot/include/zlib.h b/arch/xtensa/boot/include/zlib.h
new file mode 100644
index 000000000000..ea29b6237852
--- /dev/null
+++ b/arch/xtensa/boot/include/zlib.h
@@ -0,0 +1,433 @@
1/*
2 * BK Id: SCCS/s.zlib.h 1.8 05/18/01 15:17:23 cort
3 */
4/*
5 * This file is derived from zlib.h and zconf.h from the zlib-0.95
6 * distribution by Jean-loup Gailly and Mark Adler, with some additions
7 * by Paul Mackerras to aid in implementing Deflate compression and
8 * decompression for PPP packets.
9 */
10
11/*
12 * ==FILEVERSION 960122==
13 *
14 * This marker is used by the Linux installation script to determine
15 * whether an up-to-date version of this file is already installed.
16 */
17
18/* zlib.h -- interface of the 'zlib' general purpose compression library
19 version 0.95, Aug 16th, 1995.
20
21 Copyright (C) 1995 Jean-loup Gailly and Mark Adler
22
23 This software is provided 'as-is', without any express or implied
24 warranty. In no event will the authors be held liable for any damages
25 arising from the use of this software.
26
27 Permission is granted to anyone to use this software for any purpose,
28 including commercial applications, and to alter it and redistribute it
29 freely, subject to the following restrictions:
30
31 1. The origin of this software must not be misrepresented; you must not
32 claim that you wrote the original software. If you use this software
33 in a product, an acknowledgment in the product documentation would be
34 appreciated but is not required.
35 2. Altered source versions must be plainly marked as such, and must not be
36 misrepresented as being the original software.
37 3. This notice may not be removed or altered from any source distribution.
38
39 Jean-loup Gailly Mark Adler
40 gzip@prep.ai.mit.edu madler@alumni.caltech.edu
41 */
42
43#ifndef _ZLIB_H
44#define _ZLIB_H
45
46/* #include "zconf.h" */ /* included directly here */
47
48/* zconf.h -- configuration of the zlib compression library
49 * Copyright (C) 1995 Jean-loup Gailly.
50 * For conditions of distribution and use, see copyright notice in zlib.h
51 */
52
53/* From: zconf.h,v 1.12 1995/05/03 17:27:12 jloup Exp */
54
55/*
56 The library does not install any signal handler. It is recommended to
57 add at least a handler for SIGSEGV when decompressing; the library checks
58 the consistency of the input data whenever possible but may go nuts
59 for some forms of corrupted input.
60 */
61
62/*
63 * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
64 * than 64k bytes at a time (needed on systems with 16-bit int).
65 * Compile with -DUNALIGNED_OK if it is OK to access shorts or ints
66 * at addresses which are not a multiple of their size.
67 * Under DOS, -DFAR=far or -DFAR=__far may be needed.
68 */
69
70#ifndef STDC
71# if defined(MSDOS) || defined(__STDC__) || defined(__cplusplus)
72# define STDC
73# endif
74#endif
75
76#ifdef __MWERKS__ /* Metrowerks CodeWarrior declares fileno() in unix.h */
77# include <unix.h>
78#endif
79
80/* Maximum value for memLevel in deflateInit2 */
81#ifndef MAX_MEM_LEVEL
82# ifdef MAXSEG_64K
83# define MAX_MEM_LEVEL 8
84# else
85# define MAX_MEM_LEVEL 9
86# endif
87#endif
88
89#ifndef FAR
90# define FAR
91#endif
92
93/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
94#ifndef MAX_WBITS
95# define MAX_WBITS 15 /* 32K LZ77 window */
96#endif
97
98/* The memory requirements for deflate are (in bytes):
99 1 << (windowBits+2) + 1 << (memLevel+9)
100 that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
101 plus a few kilobytes for small objects. For example, if you want to reduce
102 the default memory requirements from 256K to 128K, compile with
103 make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
104 Of course this will generally degrade compression (there's no free lunch).
105
106 The memory requirements for inflate are (in bytes) 1 << windowBits
107 that is, 32K for windowBits=15 (default value) plus a few kilobytes
108 for small objects.
109*/
110
111 /* Type declarations */
112
113#ifndef OF /* function prototypes */
114# ifdef STDC
115# define OF(args) args
116# else
117# define OF(args) ()
118# endif
119#endif
120
121typedef unsigned char Byte; /* 8 bits */
122typedef unsigned int uInt; /* 16 bits or more */
123typedef unsigned long uLong; /* 32 bits or more */
124
125typedef Byte FAR Bytef;
126typedef char FAR charf;
127typedef int FAR intf;
128typedef uInt FAR uIntf;
129typedef uLong FAR uLongf;
130
131#ifdef STDC
132 typedef void FAR *voidpf;
133 typedef void *voidp;
134#else
135 typedef Byte FAR *voidpf;
136 typedef Byte *voidp;
137#endif
138
139/* end of original zconf.h */
140
141#define ZLIB_VERSION "0.95P"
142
143/*
144 The 'zlib' compression library provides in-memory compression and
145 decompression functions, including integrity checks of the uncompressed
146 data. This version of the library supports only one compression method
147 (deflation) but other algorithms may be added later and will have the same
148 stream interface.
149
150 For compression the application must provide the output buffer and
151 may optionally provide the input buffer for optimization. For decompression,
152 the application must provide the input buffer and may optionally provide
153 the output buffer for optimization.
154
155 Compression can be done in a single step if the buffers are large
156 enough (for example if an input file is mmap'ed), or can be done by
157 repeated calls of the compression function. In the latter case, the
158 application must provide more input and/or consume the output
159 (providing more output space) before each call.
160*/
161
162typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
163typedef void (*free_func) OF((voidpf opaque, voidpf address, uInt nbytes));
164
165struct internal_state;
166
167typedef struct z_stream_s {
168 Bytef *next_in; /* next input byte */
169 uInt avail_in; /* number of bytes available at next_in */
170 uLong total_in; /* total nb of input bytes read so far */
171
172 Bytef *next_out; /* next output byte should be put there */
173 uInt avail_out; /* remaining free space at next_out */
174 uLong total_out; /* total nb of bytes output so far */
175
176 char *msg; /* last error message, NULL if no error */
177 struct internal_state FAR *state; /* not visible by applications */
178
179 alloc_func zalloc; /* used to allocate the internal state */
180 free_func zfree; /* used to free the internal state */
181 voidp opaque; /* private data object passed to zalloc and zfree */
182
183 Byte data_type; /* best guess about the data type: ascii or binary */
184
185} z_stream;
186
187/*
188 The application must update next_in and avail_in when avail_in has
189 dropped to zero. It must update next_out and avail_out when avail_out
190 has dropped to zero. The application must initialize zalloc, zfree and
191 opaque before calling the init function. All other fields are set by the
192 compression library and must not be updated by the application.
193
194 The opaque value provided by the application will be passed as the first
195 parameter for calls of zalloc and zfree. This can be useful for custom
196 memory management. The compression library attaches no meaning to the
197 opaque value.
198
199 zalloc must return Z_NULL if there is not enough memory for the object.
200 On 16-bit systems, the functions zalloc and zfree must be able to allocate
201 exactly 65536 bytes, but will not be required to allocate more than this
202 if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
203 pointers returned by zalloc for objects of exactly 65536 bytes *must*
204 have their offset normalized to zero. The default allocation function
205 provided by this library ensures this (see zutil.c). To reduce memory
206 requirements and avoid any allocation of 64K objects, at the expense of
207 compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
208
209 The fields total_in and total_out can be used for statistics or
210 progress reports. After compression, total_in holds the total size of
211 the uncompressed data and may be saved for use in the decompressor
212 (particularly if the decompressor wants to decompress everything in
213 a single step).
214*/
215
216 /* constants */
217
218#define Z_NO_FLUSH 0
219#define Z_PARTIAL_FLUSH 1
220#define Z_FULL_FLUSH 2
221#define Z_SYNC_FLUSH 3 /* experimental: partial_flush + byte align */
222#define Z_FINISH 4
223#define Z_PACKET_FLUSH 5
224/* See deflate() below for the usage of these constants */
225
226#define Z_OK 0
227#define Z_STREAM_END 1
228#define Z_ERRNO (-1)
229#define Z_STREAM_ERROR (-2)
230#define Z_DATA_ERROR (-3)
231#define Z_MEM_ERROR (-4)
232#define Z_BUF_ERROR (-5)
233/* error codes for the compression/decompression functions */
234
235#define Z_BEST_SPEED 1
236#define Z_BEST_COMPRESSION 9
237#define Z_DEFAULT_COMPRESSION (-1)
238/* compression levels */
239
240#define Z_FILTERED 1
241#define Z_HUFFMAN_ONLY 2
242#define Z_DEFAULT_STRATEGY 0
243
244#define Z_BINARY 0
245#define Z_ASCII 1
246#define Z_UNKNOWN 2
247/* Used to set the data_type field */
248
249#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
250
251extern char *zlib_version;
252/* The application can compare zlib_version and ZLIB_VERSION for consistency.
253 If the first character differs, the library code actually used is
254 not compatible with the zlib.h header file used by the application.
255 */
256
257 /* basic functions */
258
259extern int inflateInit OF((z_stream *strm));
260/*
261 Initializes the internal stream state for decompression. The fields
262 zalloc and zfree must be initialized before by the caller. If zalloc and
263 zfree are set to Z_NULL, inflateInit updates them to use default allocation
264 functions.
265
266 inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
267 enough memory. msg is set to null if there is no error message.
268 inflateInit does not perform any decompression: this will be done by
269 inflate().
270*/
271
272
273extern int inflate OF((z_stream *strm, int flush));
274/*
275 Performs one or both of the following actions:
276
277 - Decompress more input starting at next_in and update next_in and avail_in
278 accordingly. If not all input can be processed (because there is not
279 enough room in the output buffer), next_in is updated and processing
280 will resume at this point for the next call of inflate().
281
282 - Provide more output starting at next_out and update next_out and avail_out
283 accordingly. inflate() always provides as much output as possible
284 (until there is no more input data or no more space in the output buffer).
285
286 Before the call of inflate(), the application should ensure that at least
287 one of the actions is possible, by providing more input and/or consuming
288 more output, and updating the next_* and avail_* values accordingly.
289 The application can consume the uncompressed output when it wants, for
290 example when the output buffer is full (avail_out == 0), or after each
291 call of inflate().
292
293 If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
294 inflate flushes as much output as possible to the output buffer. The
295 flushing behavior of inflate is not specified for values of the flush
296 parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
297 current implementation actually flushes as much output as possible
298 anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
299 has been consumed, it is expecting to see the length field of a stored
300 block; if not, it returns Z_DATA_ERROR.
301
302 inflate() should normally be called until it returns Z_STREAM_END or an
303 error. However if all decompression is to be performed in a single step
304 (a single call of inflate), the parameter flush should be set to
305 Z_FINISH. In this case all pending input is processed and all pending
306 output is flushed; avail_out must be large enough to hold all the
307 uncompressed data. (The size of the uncompressed data may have been saved
308 by the compressor for this purpose.) The next operation on this stream must
309 be inflateEnd to deallocate the decompression state. The use of Z_FINISH
310 is never required, but can be used to inform inflate that a faster routine
311 may be used for the single inflate() call.
312
313 inflate() returns Z_OK if some progress has been made (more input
314 processed or more output produced), Z_STREAM_END if the end of the
315 compressed data has been reached and all uncompressed output has been
316 produced, Z_DATA_ERROR if the input data was corrupted, Z_STREAM_ERROR if
317 the stream structure was inconsistent (for example if next_in or next_out
318 was NULL), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no
319 progress is possible or if there was not enough room in the output buffer
320 when Z_FINISH is used. In the Z_DATA_ERROR case, the application may then
321 call inflateSync to look for a good compression block. */
322
323
324extern int inflateEnd OF((z_stream *strm));
325/*
326 All dynamically allocated data structures for this stream are freed.
327 This function discards any unprocessed input and does not flush any
328 pending output.
329
330 inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
331 was inconsistent. In the error case, msg may be set but then points to a
332 static string (which must not be deallocated).
333*/
334
335 /* advanced functions */
336
337extern int inflateInit2 OF((z_stream *strm,
338 int windowBits));
339/*
340 This is another version of inflateInit with more compression options. The
341 fields next_out, zalloc and zfree must be initialized before by the caller.
342
343 The windowBits parameter is the base two logarithm of the maximum window
344 size (the size of the history buffer). It should be in the range 8..15 for
345 this version of the library (the value 16 will be allowed soon). The
346 default value is 15 if inflateInit is used instead. If a compressed stream
347 with a larger window size is given as input, inflate() will return with
348 the error code Z_DATA_ERROR instead of trying to allocate a larger window.
349
350 If next_out is not null, the library will use this buffer for the history
351 buffer; the buffer must either be large enough to hold the entire output
352 data, or have at least 1<<windowBits bytes. If next_out is null, the
353 library will allocate its own buffer (and leave next_out null). next_in
354 need not be provided here but must be provided by the application for the
355 next call of inflate().
356
357 If the history buffer is provided by the application, next_out must
358 never be changed by the application since the decompressor maintains
359 history information inside this buffer from call to call; the application
360 can only reset next_out to the beginning of the history buffer when
361 avail_out is zero and all output has been consumed.
362
363 inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
364 not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
365 windowBits < 8). msg is set to null if there is no error message.
366 inflateInit2 does not perform any decompression: this will be done by
367 inflate().
368*/
369
370extern int inflateSync OF((z_stream *strm));
371/*
372 Skips invalid compressed data until the special marker (see deflate()
373 above) can be found, or until all available input is skipped. No output
374 is provided.
375
376 inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
377 if no more input was provided, Z_DATA_ERROR if no marker has been found,
378 or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
379 case, the application may save the current current value of total_in which
380 indicates where valid compressed data was found. In the error case, the
381 application may repeatedly call inflateSync, providing more input each time,
382 until success or end of the input data.
383*/
384
385extern int inflateReset OF((z_stream *strm));
386/*
387 This function is equivalent to inflateEnd followed by inflateInit,
388 but does not free and reallocate all the internal decompression state.
389 The stream will keep attributes that may have been set by inflateInit2.
390
391 inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
392 stream state was inconsistent (such as zalloc or state being NULL).
393*/
394
395extern int inflateIncomp OF((z_stream *strm));
396/*
397 This function adds the data at next_in (avail_in bytes) to the output
398 history without performing any output. There must be no pending output,
399 and the decompressor must be expecting to see the start of a block.
400 Calling this function is equivalent to decompressing a stored block
401 containing the data at next_in (except that the data is not output).
402*/
403
404 /* checksum functions */
405
406/*
407 This function is not related to compression but is exported
408 anyway because it might be useful in applications using the
409 compression library.
410*/
411
412extern uLong adler32 OF((uLong adler, Bytef *buf, uInt len));
413
414/*
415 Update a running Adler-32 checksum with the bytes buf[0..len-1] and
416 return the updated checksum. If buf is NULL, this function returns
417 the required initial value for the checksum.
418 An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
419 much faster. Usage example:
420
421 uLong adler = adler32(0L, Z_NULL, 0);
422
423 while (read_buffer(buffer, length) != EOF) {
424 adler = adler32(adler, buffer, length);
425 }
426 if (adler != original_adler) error();
427*/
428
429#ifndef _Z_UTIL_H
430 struct internal_state {int dummy;}; /* hack for buggy compilers */
431#endif
432
433#endif /* _ZLIB_H */
diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile
new file mode 100644
index 000000000000..c0a74dc3a0df
--- /dev/null
+++ b/arch/xtensa/boot/lib/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for some libs needed by zImage.
3#
4
5
6lib-y := zlib.o zmem.o
diff --git a/arch/xtensa/boot/lib/memcpy.S b/arch/xtensa/boot/lib/memcpy.S
new file mode 100644
index 000000000000..a029f5df2d5c
--- /dev/null
+++ b/arch/xtensa/boot/lib/memcpy.S
@@ -0,0 +1,36 @@
1/*
2 * arch/xtensa/lib/memcpy.S
3 *
4 * ANSI C standard library function memcpy
5 *
6 * This file is subject to the terms and conditions of the GNU General
7 * Public License. See the file "COPYING" in the main directory of
8 * this archive for more details.
9 *
10 * Copyright (C) 2002 Tensilica Inc.
11 */
12
13#define _ASMLANGUAGE
14#include <xtensa/config/core.h>
15
16.text
17.align 4
18.global bcopy
19.type bcopy,@function
20bcopy:
21 movi a14, xthal_bcopy // a14 safe to use regardless of whether caller
22 // used call4 or call8 (can't have used call12)
23 jx a14 // let the Core HAL do the work
24
25.text
26.align 4
27.global memcpy
28.type memcpy,@function
29memcpy:
30.global memmove
31.type memmove,@function
32memmove:
33 movi a14, xthal_memcpy // a14 safe to use regardless of whether caller
34 // used call4 or call8 (can't have used call12)
35 jx a14 // let the Core HAL do the work
36
diff --git a/arch/xtensa/boot/lib/zlib.c b/arch/xtensa/boot/lib/zlib.c
new file mode 100644
index 000000000000..e3859f631077
--- /dev/null
+++ b/arch/xtensa/boot/lib/zlib.c
@@ -0,0 +1,2150 @@
1/*
2 * BK Id: SCCS/s.zlib.c 1.8 05/18/01 15:17:24 cort
3 */
4/*
5 * This file is derived from various .h and .c files from the zlib-0.95
6 * distribution by Jean-loup Gailly and Mark Adler, with some additions
7 * by Paul Mackerras to aid in implementing Deflate compression and
8 * decompression for PPP packets. See zlib.h for conditions of
9 * distribution and use.
10 *
11 * Changes that have been made include:
12 * - changed functions not used outside this file to "local"
13 * - added minCompression parameter to deflateInit2
14 * - added Z_PACKET_FLUSH (see zlib.h for details)
15 * - added inflateIncomp
16 *
17 */
18
19/*+++++*/
20/* zutil.h -- internal interface and configuration of the compression library
21 * Copyright (C) 1995 Jean-loup Gailly.
22 * For conditions of distribution and use, see copyright notice in zlib.h
23 */
24
25/* WARNING: this file should *not* be used by applications. It is
26 part of the implementation of the compression library and is
27 subject to change. Applications should only use zlib.h.
28 */
29
30/* From: zutil.h,v 1.9 1995/05/03 17:27:12 jloup Exp */
31
32#define _Z_UTIL_H
33
34#include "zlib.h"
35
36#ifndef local
37# define local static
38#endif
39/* compile with -Dlocal if your debugger can't find static symbols */
40
41#define FAR
42
43typedef unsigned char uch;
44typedef uch FAR uchf;
45typedef unsigned short ush;
46typedef ush FAR ushf;
47typedef unsigned long ulg;
48
49extern char *z_errmsg[]; /* indexed by 1-zlib_error */
50
51#define ERR_RETURN(strm,err) return (strm->msg=z_errmsg[1-err], err)
52/* To be used only when the state is known to be valid */
53
54#ifndef NULL
55#define NULL ((void *) 0)
56#endif
57
58 /* common constants */
59
60#define DEFLATED 8
61
62#ifndef DEF_WBITS
63# define DEF_WBITS MAX_WBITS
64#endif
65/* default windowBits for decompression. MAX_WBITS is for compression only */
66
67#if MAX_MEM_LEVEL >= 8
68# define DEF_MEM_LEVEL 8
69#else
70# define DEF_MEM_LEVEL MAX_MEM_LEVEL
71#endif
72/* default memLevel */
73
74#define STORED_BLOCK 0
75#define STATIC_TREES 1
76#define DYN_TREES 2
77/* The three kinds of block type */
78
79#define MIN_MATCH 3
80#define MAX_MATCH 258
81/* The minimum and maximum match lengths */
82
83 /* functions */
84
85#include <linux/string.h>
86#define zmemcpy memcpy
87#define zmemzero(dest, len) memset(dest, 0, len)
88
89/* Diagnostic functions */
90#ifdef DEBUG_ZLIB
91# include <stdio.h>
92# ifndef verbose
93# define verbose 0
94# endif
95# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
96# define Trace(x) fprintf x
97# define Tracev(x) {if (verbose) fprintf x ;}
98# define Tracevv(x) {if (verbose>1) fprintf x ;}
99# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
100# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
101#else
102# define Assert(cond,msg)
103# define Trace(x)
104# define Tracev(x)
105# define Tracevv(x)
106# define Tracec(c,x)
107# define Tracecv(c,x)
108#endif
109
110
111typedef uLong (*check_func) OF((uLong check, Bytef *buf, uInt len));
112
113/* voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); */
114/* void zcfree OF((voidpf opaque, voidpf ptr)); */
115
116#define ZALLOC(strm, items, size) \
117 (*((strm)->zalloc))((strm)->opaque, (items), (size))
118#define ZFREE(strm, addr, size) \
119 (*((strm)->zfree))((strm)->opaque, (voidpf)(addr), (size))
120#define TRY_FREE(s, p, n) {if (p) ZFREE(s, p, n);}
121
122/* deflate.h -- internal compression state
123 * Copyright (C) 1995 Jean-loup Gailly
124 * For conditions of distribution and use, see copyright notice in zlib.h
125 */
126
127/* WARNING: this file should *not* be used by applications. It is
128 part of the implementation of the compression library and is
129 subject to change. Applications should only use zlib.h.
130 */
131
132/*+++++*/
133/* infblock.h -- header to use infblock.c
134 * Copyright (C) 1995 Mark Adler
135 * For conditions of distribution and use, see copyright notice in zlib.h
136 */
137
138/* WARNING: this file should *not* be used by applications. It is
139 part of the implementation of the compression library and is
140 subject to change. Applications should only use zlib.h.
141 */
142
143struct inflate_blocks_state;
144typedef struct inflate_blocks_state FAR inflate_blocks_statef;
145
146local inflate_blocks_statef * inflate_blocks_new OF((
147 z_stream *z,
148 check_func c, /* check function */
149 uInt w)); /* window size */
150
151local int inflate_blocks OF((
152 inflate_blocks_statef *,
153 z_stream *,
154 int)); /* initial return code */
155
156local void inflate_blocks_reset OF((
157 inflate_blocks_statef *,
158 z_stream *,
159 uLongf *)); /* check value on output */
160
161local int inflate_blocks_free OF((
162 inflate_blocks_statef *,
163 z_stream *,
164 uLongf *)); /* check value on output */
165
166local int inflate_addhistory OF((
167 inflate_blocks_statef *,
168 z_stream *));
169
170local int inflate_packet_flush OF((
171 inflate_blocks_statef *));
172
173/*+++++*/
174/* inftrees.h -- header to use inftrees.c
175 * Copyright (C) 1995 Mark Adler
176 * For conditions of distribution and use, see copyright notice in zlib.h
177 */
178
179/* WARNING: this file should *not* be used by applications. It is
180 part of the implementation of the compression library and is
181 subject to change. Applications should only use zlib.h.
182 */
183
184/* Huffman code lookup table entry--this entry is four bytes for machines
185 that have 16-bit pointers (e.g. PC's in the small or medium model). */
186
187typedef struct inflate_huft_s FAR inflate_huft;
188
189struct inflate_huft_s {
190 union {
191 struct {
192 Byte Exop; /* number of extra bits or operation */
193 Byte Bits; /* number of bits in this code or subcode */
194 } what;
195 uInt Nalloc; /* number of these allocated here */
196 Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
197 } word; /* 16-bit, 8 bytes for 32-bit machines) */
198 union {
199 uInt Base; /* literal, length base, or distance base */
200 inflate_huft *Next; /* pointer to next level of table */
201 } more;
202};
203
204#ifdef DEBUG_ZLIB
205 local uInt inflate_hufts;
206#endif
207
208local int inflate_trees_bits OF((
209 uIntf *, /* 19 code lengths */
210 uIntf *, /* bits tree desired/actual depth */
211 inflate_huft * FAR *, /* bits tree result */
212 z_stream *)); /* for zalloc, zfree functions */
213
214local int inflate_trees_dynamic OF((
215 uInt, /* number of literal/length codes */
216 uInt, /* number of distance codes */
217 uIntf *, /* that many (total) code lengths */
218 uIntf *, /* literal desired/actual bit depth */
219 uIntf *, /* distance desired/actual bit depth */
220 inflate_huft * FAR *, /* literal/length tree result */
221 inflate_huft * FAR *, /* distance tree result */
222 z_stream *)); /* for zalloc, zfree functions */
223
224local int inflate_trees_fixed OF((
225 uIntf *, /* literal desired/actual bit depth */
226 uIntf *, /* distance desired/actual bit depth */
227 inflate_huft * FAR *, /* literal/length tree result */
228 inflate_huft * FAR *)); /* distance tree result */
229
230local int inflate_trees_free OF((
231 inflate_huft *, /* tables to free */
232 z_stream *)); /* for zfree function */
233
234
235/*+++++*/
236/* infcodes.h -- header to use infcodes.c
237 * Copyright (C) 1995 Mark Adler
238 * For conditions of distribution and use, see copyright notice in zlib.h
239 */
240
241/* WARNING: this file should *not* be used by applications. It is
242 part of the implementation of the compression library and is
243 subject to change. Applications should only use zlib.h.
244 */
245
246struct inflate_codes_state;
247typedef struct inflate_codes_state FAR inflate_codes_statef;
248
249local inflate_codes_statef *inflate_codes_new OF((
250 uInt, uInt,
251 inflate_huft *, inflate_huft *,
252 z_stream *));
253
254local int inflate_codes OF((
255 inflate_blocks_statef *,
256 z_stream *,
257 int));
258
259local void inflate_codes_free OF((
260 inflate_codes_statef *,
261 z_stream *));
262
263
264/*+++++*/
265/* inflate.c -- zlib interface to inflate modules
266 * Copyright (C) 1995 Mark Adler
267 * For conditions of distribution and use, see copyright notice in zlib.h
268 */
269
270/* inflate private state */
271struct internal_state {
272
273 /* mode */
274 enum {
275 METHOD, /* waiting for method byte */
276 FLAG, /* waiting for flag byte */
277 BLOCKS, /* decompressing blocks */
278 CHECK4, /* four check bytes to go */
279 CHECK3, /* three check bytes to go */
280 CHECK2, /* two check bytes to go */
281 CHECK1, /* one check byte to go */
282 DONE, /* finished check, done */
283 BAD} /* got an error--stay here */
284 mode; /* current inflate mode */
285
286 /* mode dependent information */
287 union {
288 uInt method; /* if FLAGS, method byte */
289 struct {
290 uLong was; /* computed check value */
291 uLong need; /* stream check value */
292 } check; /* if CHECK, check values to compare */
293 uInt marker; /* if BAD, inflateSync's marker bytes count */
294 } sub; /* submode */
295
296 /* mode independent information */
297 int nowrap; /* flag for no wrapper */
298 uInt wbits; /* log2(window size) (8..15, defaults to 15) */
299 inflate_blocks_statef
300 *blocks; /* current inflate_blocks state */
301
302};
303
304
305int inflateReset(z)
306z_stream *z;
307{
308 uLong c;
309
310 if (z == Z_NULL || z->state == Z_NULL)
311 return Z_STREAM_ERROR;
312 z->total_in = z->total_out = 0;
313 z->msg = Z_NULL;
314 z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
315 inflate_blocks_reset(z->state->blocks, z, &c);
316 Trace((stderr, "inflate: reset\n"));
317 return Z_OK;
318}
319
320
321int inflateEnd(z)
322z_stream *z;
323{
324 uLong c;
325
326 if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
327 return Z_STREAM_ERROR;
328 if (z->state->blocks != Z_NULL)
329 inflate_blocks_free(z->state->blocks, z, &c);
330 ZFREE(z, z->state, sizeof(struct internal_state));
331 z->state = Z_NULL;
332 Trace((stderr, "inflate: end\n"));
333 return Z_OK;
334}
335
336
337int inflateInit2(z, w)
338z_stream *z;
339int w;
340{
341 /* initialize state */
342 if (z == Z_NULL)
343 return Z_STREAM_ERROR;
344/* if (z->zalloc == Z_NULL) z->zalloc = zcalloc; */
345/* if (z->zfree == Z_NULL) z->zfree = zcfree; */
346 if ((z->state = (struct internal_state FAR *)
347 ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
348 return Z_MEM_ERROR;
349 z->state->blocks = Z_NULL;
350
351 /* handle undocumented nowrap option (no zlib header or check) */
352 z->state->nowrap = 0;
353 if (w < 0)
354 {
355 w = - w;
356 z->state->nowrap = 1;
357 }
358
359 /* set window size */
360 if (w < 8 || w > 15)
361 {
362 inflateEnd(z);
363 return Z_STREAM_ERROR;
364 }
365 z->state->wbits = (uInt)w;
366
367 /* create inflate_blocks state */
368 if ((z->state->blocks =
369 inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, 1 << w))
370 == Z_NULL)
371 {
372 inflateEnd(z);
373 return Z_MEM_ERROR;
374 }
375 Trace((stderr, "inflate: allocated\n"));
376
377 /* reset state */
378 inflateReset(z);
379 return Z_OK;
380}
381
382
383int inflateInit(z)
384z_stream *z;
385{
386 return inflateInit2(z, DEF_WBITS);
387}
388
389
390#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
391#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
392
393int inflate(z, f)
394z_stream *z;
395int f;
396{
397 int r;
398 uInt b;
399
400 if (z == Z_NULL || z->next_in == Z_NULL)
401 return Z_STREAM_ERROR;
402 r = Z_BUF_ERROR;
403 while (1) switch (z->state->mode)
404 {
405 case METHOD:
406 NEEDBYTE
407 if (((z->state->sub.method = NEXTBYTE) & 0xf) != DEFLATED)
408 {
409 z->state->mode = BAD;
410 z->msg = "unknown compression method";
411 z->state->sub.marker = 5; /* can't try inflateSync */
412 break;
413 }
414 if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
415 {
416 z->state->mode = BAD;
417 z->msg = "invalid window size";
418 z->state->sub.marker = 5; /* can't try inflateSync */
419 break;
420 }
421 z->state->mode = FLAG;
422 case FLAG:
423 NEEDBYTE
424 if ((b = NEXTBYTE) & 0x20)
425 {
426 z->state->mode = BAD;
427 z->msg = "invalid reserved bit";
428 z->state->sub.marker = 5; /* can't try inflateSync */
429 break;
430 }
431 if (((z->state->sub.method << 8) + b) % 31)
432 {
433 z->state->mode = BAD;
434 z->msg = "incorrect header check";
435 z->state->sub.marker = 5; /* can't try inflateSync */
436 break;
437 }
438 Trace((stderr, "inflate: zlib header ok\n"));
439 z->state->mode = BLOCKS;
440 case BLOCKS:
441 r = inflate_blocks(z->state->blocks, z, r);
442 if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
443 r = inflate_packet_flush(z->state->blocks);
444 if (r == Z_DATA_ERROR)
445 {
446 z->state->mode = BAD;
447 z->state->sub.marker = 0; /* can try inflateSync */
448 break;
449 }
450 if (r != Z_STREAM_END)
451 return r;
452 r = Z_OK;
453 inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
454 if (z->state->nowrap)
455 {
456 z->state->mode = DONE;
457 break;
458 }
459 z->state->mode = CHECK4;
460 case CHECK4:
461 NEEDBYTE
462 z->state->sub.check.need = (uLong)NEXTBYTE << 24;
463 z->state->mode = CHECK3;
464 case CHECK3:
465 NEEDBYTE
466 z->state->sub.check.need += (uLong)NEXTBYTE << 16;
467 z->state->mode = CHECK2;
468 case CHECK2:
469 NEEDBYTE
470 z->state->sub.check.need += (uLong)NEXTBYTE << 8;
471 z->state->mode = CHECK1;
472 case CHECK1:
473 NEEDBYTE
474 z->state->sub.check.need += (uLong)NEXTBYTE;
475
476 if (z->state->sub.check.was != z->state->sub.check.need)
477 {
478 z->state->mode = BAD;
479 z->msg = "incorrect data check";
480 z->state->sub.marker = 5; /* can't try inflateSync */
481 break;
482 }
483 Trace((stderr, "inflate: zlib check ok\n"));
484 z->state->mode = DONE;
485 case DONE:
486 return Z_STREAM_END;
487 case BAD:
488 return Z_DATA_ERROR;
489 default:
490 return Z_STREAM_ERROR;
491 }
492
493 empty:
494 if (f != Z_PACKET_FLUSH)
495 return r;
496 z->state->mode = BAD;
497 z->state->sub.marker = 0; /* can try inflateSync */
498 return Z_DATA_ERROR;
499}
500
501/*
502 * This subroutine adds the data at next_in/avail_in to the output history
503 * without performing any output. The output buffer must be "caught up";
504 * i.e. no pending output (hence s->read equals s->write), and the state must
505 * be BLOCKS (i.e. we should be willing to see the start of a series of
506 * BLOCKS). On exit, the output will also be caught up, and the checksum
507 * will have been updated if need be.
508 */
509
510int inflateIncomp(z)
511z_stream *z;
512{
513 if (z->state->mode != BLOCKS)
514 return Z_DATA_ERROR;
515 return inflate_addhistory(z->state->blocks, z);
516}
517
518
519int inflateSync(z)
520z_stream *z;
521{
522 uInt n; /* number of bytes to look at */
523 Bytef *p; /* pointer to bytes */
524 uInt m; /* number of marker bytes found in a row */
525 uLong r, w; /* temporaries to save total_in and total_out */
526
527 /* set up */
528 if (z == Z_NULL || z->state == Z_NULL)
529 return Z_STREAM_ERROR;
530 if (z->state->mode != BAD)
531 {
532 z->state->mode = BAD;
533 z->state->sub.marker = 0;
534 }
535 if ((n = z->avail_in) == 0)
536 return Z_BUF_ERROR;
537 p = z->next_in;
538 m = z->state->sub.marker;
539
540 /* search */
541 while (n && m < 4)
542 {
543 if (*p == (Byte)(m < 2 ? 0 : 0xff))
544 m++;
545 else if (*p)
546 m = 0;
547 else
548 m = 4 - m;
549 p++, n--;
550 }
551
552 /* restore */
553 z->total_in += p - z->next_in;
554 z->next_in = p;
555 z->avail_in = n;
556 z->state->sub.marker = m;
557
558 /* return no joy or set up to restart on a new block */
559 if (m != 4)
560 return Z_DATA_ERROR;
561 r = z->total_in; w = z->total_out;
562 inflateReset(z);
563 z->total_in = r; z->total_out = w;
564 z->state->mode = BLOCKS;
565 return Z_OK;
566}
567
568#undef NEEDBYTE
569#undef NEXTBYTE
570
571/*+++++*/
572/* infutil.h -- types and macros common to blocks and codes
573 * Copyright (C) 1995 Mark Adler
574 * For conditions of distribution and use, see copyright notice in zlib.h
575 */
576
577/* WARNING: this file should *not* be used by applications. It is
578 part of the implementation of the compression library and is
579 subject to change. Applications should only use zlib.h.
580 */
581
582/* inflate blocks semi-private state */
583struct inflate_blocks_state {
584
585 /* mode */
586 enum {
587 TYPE, /* get type bits (3, including end bit) */
588 LENS, /* get lengths for stored */
589 STORED, /* processing stored block */
590 TABLE, /* get table lengths */
591 BTREE, /* get bit lengths tree for a dynamic block */
592 DTREE, /* get length, distance trees for a dynamic block */
593 CODES, /* processing fixed or dynamic block */
594 DRY, /* output remaining window bytes */
595 DONEB, /* finished last block, done */
596 BADB} /* got a data error--stuck here */
597 mode; /* current inflate_block mode */
598
599 /* mode dependent information */
600 union {
601 uInt left; /* if STORED, bytes left to copy */
602 struct {
603 uInt table; /* table lengths (14 bits) */
604 uInt index; /* index into blens (or border) */
605 uIntf *blens; /* bit lengths of codes */
606 uInt bb; /* bit length tree depth */
607 inflate_huft *tb; /* bit length decoding tree */
608 int nblens; /* # elements allocated at blens */
609 } trees; /* if DTREE, decoding info for trees */
610 struct {
611 inflate_huft *tl, *td; /* trees to free */
612 inflate_codes_statef
613 *codes;
614 } decode; /* if CODES, current state */
615 } sub; /* submode */
616 uInt last; /* true if this block is the last block */
617
618 /* mode independent information */
619 uInt bitk; /* bits in bit buffer */
620 uLong bitb; /* bit buffer */
621 Bytef *window; /* sliding window */
622 Bytef *end; /* one byte after sliding window */
623 Bytef *read; /* window read pointer */
624 Bytef *write; /* window write pointer */
625 check_func checkfn; /* check function */
626 uLong check; /* check on output */
627
628};
629
630
631/* defines for inflate input/output */
632/* update pointers and return */
633#define UPDBITS {s->bitb=b;s->bitk=k;}
634#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
635#define UPDOUT {s->write=q;}
636#define UPDATE {UPDBITS UPDIN UPDOUT}
637#define LEAVE {UPDATE return inflate_flush(s,z,r);}
638/* get bytes and bits */
639#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
640#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
641#define NEXTBYTE (n--,*p++)
642#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
643#define DUMPBITS(j) {b>>=(j);k-=(j);}
644/* output bytes */
645#define WAVAIL (q<s->read?s->read-q-1:s->end-q)
646#define LOADOUT {q=s->write;m=WAVAIL;}
647#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=WAVAIL;}}
648#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
649#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;}
650#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
651/* load local pointers */
652#define LOAD {LOADIN LOADOUT}
653
654/*
655 * The IBM 150 firmware munges the data right after _etext[]. This
656 * protects it. -- Cort
657 */
658local uInt protect_mask[] = {0, 0, 0, 0, 0, 0, 0, 0, 0 ,0 ,0 ,0};
659/* And'ing with mask[n] masks the lower n bits */
660local uInt inflate_mask[] = {
661 0x0000,
662 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
663 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
664};
665
666/* copy as much as possible from the sliding window to the output area */
667local int inflate_flush OF((
668 inflate_blocks_statef *,
669 z_stream *,
670 int));
671
672/*+++++*/
673/* inffast.h -- header to use inffast.c
674 * Copyright (C) 1995 Mark Adler
675 * For conditions of distribution and use, see copyright notice in zlib.h
676 */
677
678/* WARNING: this file should *not* be used by applications. It is
679 part of the implementation of the compression library and is
680 subject to change. Applications should only use zlib.h.
681 */
682
683local int inflate_fast OF((
684 uInt,
685 uInt,
686 inflate_huft *,
687 inflate_huft *,
688 inflate_blocks_statef *,
689 z_stream *));
690
691
692/*+++++*/
693/* infblock.c -- interpret and process block types to last block
694 * Copyright (C) 1995 Mark Adler
695 * For conditions of distribution and use, see copyright notice in zlib.h
696 */
697
698/* Table for deflate from PKZIP's appnote.txt. */
699local uInt border[] = { /* Order of the bit length code lengths */
700 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
701
702/*
703 Notes beyond the 1.93a appnote.txt:
704
705 1. Distance pointers never point before the beginning of the output
706 stream.
707 2. Distance pointers can point back across blocks, up to 32k away.
708 3. There is an implied maximum of 7 bits for the bit length table and
709 15 bits for the actual data.
710 4. If only one code exists, then it is encoded using one bit. (Zero
711 would be more efficient, but perhaps a little confusing.) If two
712 codes exist, they are coded using one bit each (0 and 1).
713 5. There is no way of sending zero distance codes--a dummy must be
714 sent if there are none. (History: a pre 2.0 version of PKZIP would
715 store blocks with no distance codes, but this was discovered to be
716 too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
717 zero distance codes, which is sent as one code of zero bits in
718 length.
719 6. There are up to 286 literal/length codes. Code 256 represents the
720 end-of-block. Note however that the static length tree defines
721 288 codes just to fill out the Huffman codes. Codes 286 and 287
722 cannot be used though, since there is no length base or extra bits
723 defined for them. Similarily, there are up to 30 distance codes.
724 However, static trees define 32 codes (all 5 bits) to fill out the
725 Huffman codes, but the last two had better not show up in the data.
726 7. Unzip can check dynamic Huffman blocks for complete code sets.
727 The exception is that a single code would not be complete (see #4).
728 8. The five bits following the block type is really the number of
729 literal codes sent minus 257.
730 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
731 (1+6+6). Therefore, to output three times the length, you output
732 three codes (1+1+1), whereas to output four times the same length,
733 you only need two codes (1+3). Hmm.
734 10. In the tree reconstruction algorithm, Code = Code + Increment
735 only if BitLength(i) is not zero. (Pretty obvious.)
736 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
737 12. Note: length code 284 can represent 227-258, but length code 285
738 really is 258. The last length deserves its own, short code
739 since it gets used a lot in very redundant files. The length
740 258 is special since 258 - 3 (the min match length) is 255.
741 13. The literal/length and distance code bit lengths are read as a
742 single stream of lengths. It is possible (and advantageous) for
743 a repeat code (16, 17, or 18) to go across the boundary between
744 the two sets of lengths.
745 */
746
747
748local void inflate_blocks_reset(s, z, c)
749inflate_blocks_statef *s;
750z_stream *z;
751uLongf *c;
752{
753 if (s->checkfn != Z_NULL)
754 *c = s->check;
755 if (s->mode == BTREE || s->mode == DTREE)
756 ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
757 if (s->mode == CODES)
758 {
759 inflate_codes_free(s->sub.decode.codes, z);
760 inflate_trees_free(s->sub.decode.td, z);
761 inflate_trees_free(s->sub.decode.tl, z);
762 }
763 s->mode = TYPE;
764 s->bitk = 0;
765 s->bitb = 0;
766 s->read = s->write = s->window;
767 if (s->checkfn != Z_NULL)
768 s->check = (*s->checkfn)(0L, Z_NULL, 0);
769 Trace((stderr, "inflate: blocks reset\n"));
770}
771
772
773local inflate_blocks_statef *inflate_blocks_new(z, c, w)
774z_stream *z;
775check_func c;
776uInt w;
777{
778 inflate_blocks_statef *s;
779
780 if ((s = (inflate_blocks_statef *)ZALLOC
781 (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
782 return s;
783 if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
784 {
785 ZFREE(z, s, sizeof(struct inflate_blocks_state));
786 return Z_NULL;
787 }
788 s->end = s->window + w;
789 s->checkfn = c;
790 s->mode = TYPE;
791 Trace((stderr, "inflate: blocks allocated\n"));
792 inflate_blocks_reset(s, z, &s->check);
793 return s;
794}
795
796
797local int inflate_blocks(s, z, r)
798inflate_blocks_statef *s;
799z_stream *z;
800int r;
801{
802 uInt t; /* temporary storage */
803 uLong b; /* bit buffer */
804 uInt k; /* bits in bit buffer */
805 Bytef *p; /* input data pointer */
806 uInt n; /* bytes available there */
807 Bytef *q; /* output window write pointer */
808 uInt m; /* bytes to end of window or read pointer */
809
810 /* copy input/output information to locals (UPDATE macro restores) */
811 LOAD
812
813 /* process input based on current state */
814 while (1) switch (s->mode)
815 {
816 case TYPE:
817 NEEDBITS(3)
818 t = (uInt)b & 7;
819 s->last = t & 1;
820 switch (t >> 1)
821 {
822 case 0: /* stored */
823 Trace((stderr, "inflate: stored block%s\n",
824 s->last ? " (last)" : ""));
825 DUMPBITS(3)
826 t = k & 7; /* go to byte boundary */
827 DUMPBITS(t)
828 s->mode = LENS; /* get length of stored block */
829 break;
830 case 1: /* fixed */
831 Trace((stderr, "inflate: fixed codes block%s\n",
832 s->last ? " (last)" : ""));
833 {
834 uInt bl, bd;
835 inflate_huft *tl, *td;
836
837 inflate_trees_fixed(&bl, &bd, &tl, &td);
838 s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
839 if (s->sub.decode.codes == Z_NULL)
840 {
841 r = Z_MEM_ERROR;
842 LEAVE
843 }
844 s->sub.decode.tl = Z_NULL; /* don't try to free these */
845 s->sub.decode.td = Z_NULL;
846 }
847 DUMPBITS(3)
848 s->mode = CODES;
849 break;
850 case 2: /* dynamic */
851 Trace((stderr, "inflate: dynamic codes block%s\n",
852 s->last ? " (last)" : ""));
853 DUMPBITS(3)
854 s->mode = TABLE;
855 break;
856 case 3: /* illegal */
857 DUMPBITS(3)
858 s->mode = BADB;
859 z->msg = "invalid block type";
860 r = Z_DATA_ERROR;
861 LEAVE
862 }
863 break;
864 case LENS:
865 NEEDBITS(32)
866 if (((~b) >> 16) != (b & 0xffff))
867 {
868 s->mode = BADB;
869 z->msg = "invalid stored block lengths";
870 r = Z_DATA_ERROR;
871 LEAVE
872 }
873 s->sub.left = (uInt)b & 0xffff;
874 b = k = 0; /* dump bits */
875 Tracev((stderr, "inflate: stored length %u\n", s->sub.left));
876 s->mode = s->sub.left ? STORED : TYPE;
877 break;
878 case STORED:
879 if (n == 0)
880 LEAVE
881 NEEDOUT
882 t = s->sub.left;
883 if (t > n) t = n;
884 if (t > m) t = m;
885 zmemcpy(q, p, t);
886 p += t; n -= t;
887 q += t; m -= t;
888 if ((s->sub.left -= t) != 0)
889 break;
890 Tracev((stderr, "inflate: stored end, %lu total out\n",
891 z->total_out + (q >= s->read ? q - s->read :
892 (s->end - s->read) + (q - s->window))));
893 s->mode = s->last ? DRY : TYPE;
894 break;
895 case TABLE:
896 NEEDBITS(14)
897 s->sub.trees.table = t = (uInt)b & 0x3fff;
898#ifndef PKZIP_BUG_WORKAROUND
899 if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
900 {
901 s->mode = BADB;
902 z->msg = "too many length or distance symbols";
903 r = Z_DATA_ERROR;
904 LEAVE
905 }
906#endif
907 t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
908 if (t < 19)
909 t = 19;
910 if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
911 {
912 r = Z_MEM_ERROR;
913 LEAVE
914 }
915 s->sub.trees.nblens = t;
916 DUMPBITS(14)
917 s->sub.trees.index = 0;
918 Tracev((stderr, "inflate: table sizes ok\n"));
919 s->mode = BTREE;
920 case BTREE:
921 while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
922 {
923 NEEDBITS(3)
924 s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
925 DUMPBITS(3)
926 }
927 while (s->sub.trees.index < 19)
928 s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
929 s->sub.trees.bb = 7;
930 t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
931 &s->sub.trees.tb, z);
932 if (t != Z_OK)
933 {
934 r = t;
935 if (r == Z_DATA_ERROR)
936 s->mode = BADB;
937 LEAVE
938 }
939 s->sub.trees.index = 0;
940 Tracev((stderr, "inflate: bits tree ok\n"));
941 s->mode = DTREE;
942 case DTREE:
943 while (t = s->sub.trees.table,
944 s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
945 {
946 inflate_huft *h;
947 uInt i, j, c;
948
949 t = s->sub.trees.bb;
950 NEEDBITS(t)
951 h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
952 t = h->word.what.Bits;
953 c = h->more.Base;
954 if (c < 16)
955 {
956 DUMPBITS(t)
957 s->sub.trees.blens[s->sub.trees.index++] = c;
958 }
959 else /* c == 16..18 */
960 {
961 i = c == 18 ? 7 : c - 14;
962 j = c == 18 ? 11 : 3;
963 NEEDBITS(t + i)
964 DUMPBITS(t)
965 j += (uInt)b & inflate_mask[i];
966 DUMPBITS(i)
967 i = s->sub.trees.index;
968 t = s->sub.trees.table;
969 if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
970 (c == 16 && i < 1))
971 {
972 s->mode = BADB;
973 z->msg = "invalid bit length repeat";
974 r = Z_DATA_ERROR;
975 LEAVE
976 }
977 c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
978 do {
979 s->sub.trees.blens[i++] = c;
980 } while (--j);
981 s->sub.trees.index = i;
982 }
983 }
984 inflate_trees_free(s->sub.trees.tb, z);
985 s->sub.trees.tb = Z_NULL;
986 {
987 uInt bl, bd;
988 inflate_huft *tl, *td;
989 inflate_codes_statef *c;
990
991 bl = 9; /* must be <= 9 for lookahead assumptions */
992 bd = 6; /* must be <= 9 for lookahead assumptions */
993 t = s->sub.trees.table;
994 t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
995 s->sub.trees.blens, &bl, &bd, &tl, &td, z);
996 if (t != Z_OK)
997 {
998 if (t == (uInt)Z_DATA_ERROR)
999 s->mode = BADB;
1000 r = t;
1001 LEAVE
1002 }
1003 Tracev((stderr, "inflate: trees ok\n"));
1004 if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
1005 {
1006 inflate_trees_free(td, z);
1007 inflate_trees_free(tl, z);
1008 r = Z_MEM_ERROR;
1009 LEAVE
1010 }
1011 ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
1012 s->sub.decode.codes = c;
1013 s->sub.decode.tl = tl;
1014 s->sub.decode.td = td;
1015 }
1016 s->mode = CODES;
1017 case CODES:
1018 UPDATE
1019 if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
1020 return inflate_flush(s, z, r);
1021 r = Z_OK;
1022 inflate_codes_free(s->sub.decode.codes, z);
1023 inflate_trees_free(s->sub.decode.td, z);
1024 inflate_trees_free(s->sub.decode.tl, z);
1025 LOAD
1026 Tracev((stderr, "inflate: codes end, %lu total out\n",
1027 z->total_out + (q >= s->read ? q - s->read :
1028 (s->end - s->read) + (q - s->window))));
1029 if (!s->last)
1030 {
1031 s->mode = TYPE;
1032 break;
1033 }
1034 if (k > 7) /* return unused byte, if any */
1035 {
1036 Assert(k < 16, "inflate_codes grabbed too many bytes")
1037 k -= 8;
1038 n++;
1039 p--; /* can always return one */
1040 }
1041 s->mode = DRY;
1042 case DRY:
1043 FLUSH
1044 if (s->read != s->write)
1045 LEAVE
1046 s->mode = DONEB;
1047 case DONEB:
1048 r = Z_STREAM_END;
1049 LEAVE
1050 case BADB:
1051 r = Z_DATA_ERROR;
1052 LEAVE
1053 default:
1054 r = Z_STREAM_ERROR;
1055 LEAVE
1056 }
1057}
1058
1059
1060local int inflate_blocks_free(s, z, c)
1061inflate_blocks_statef *s;
1062z_stream *z;
1063uLongf *c;
1064{
1065 inflate_blocks_reset(s, z, c);
1066 ZFREE(z, s->window, s->end - s->window);
1067 ZFREE(z, s, sizeof(struct inflate_blocks_state));
1068 Trace((stderr, "inflate: blocks freed\n"));
1069 return Z_OK;
1070}
1071
1072/*
1073 * This subroutine adds the data at next_in/avail_in to the output history
1074 * without performing any output. The output buffer must be "caught up";
1075 * i.e. no pending output (hence s->read equals s->write), and the state must
1076 * be BLOCKS (i.e. we should be willing to see the start of a series of
1077 * BLOCKS). On exit, the output will also be caught up, and the checksum
1078 * will have been updated if need be.
1079 */
1080local int inflate_addhistory(s, z)
1081inflate_blocks_statef *s;
1082z_stream *z;
1083{
1084 uLong b; /* bit buffer */ /* NOT USED HERE */
1085 uInt k; /* bits in bit buffer */ /* NOT USED HERE */
1086 uInt t; /* temporary storage */
1087 Bytef *p; /* input data pointer */
1088 uInt n; /* bytes available there */
1089 Bytef *q; /* output window write pointer */
1090 uInt m; /* bytes to end of window or read pointer */
1091
1092 if (s->read != s->write)
1093 return Z_STREAM_ERROR;
1094 if (s->mode != TYPE)
1095 return Z_DATA_ERROR;
1096
1097 /* we're ready to rock */
1098 LOAD
1099 /* while there is input ready, copy to output buffer, moving
1100 * pointers as needed.
1101 */
1102 while (n) {
1103 t = n; /* how many to do */
1104 /* is there room until end of buffer? */
1105 if (t > m) t = m;
1106 /* update check information */
1107 if (s->checkfn != Z_NULL)
1108 s->check = (*s->checkfn)(s->check, q, t);
1109 zmemcpy(q, p, t);
1110 q += t;
1111 p += t;
1112 n -= t;
1113 z->total_out += t;
1114 s->read = q; /* drag read pointer forward */
1115/* WRAP */ /* expand WRAP macro by hand to handle s->read */
1116 if (q == s->end) {
1117 s->read = q = s->window;
1118 m = WAVAIL;
1119 }
1120 }
1121 UPDATE
1122 return Z_OK;
1123}
1124
1125
1126/*
1127 * At the end of a Deflate-compressed PPP packet, we expect to have seen
1128 * a `stored' block type value but not the (zero) length bytes.
1129 */
1130local int inflate_packet_flush(s)
1131 inflate_blocks_statef *s;
1132{
1133 if (s->mode != LENS)
1134 return Z_DATA_ERROR;
1135 s->mode = TYPE;
1136 return Z_OK;
1137}
1138
1139
1140/*+++++*/
1141/* inftrees.c -- generate Huffman trees for efficient decoding
1142 * Copyright (C) 1995 Mark Adler
1143 * For conditions of distribution and use, see copyright notice in zlib.h
1144 */
1145
1146/* simplify the use of the inflate_huft type with some defines */
1147#define base more.Base
1148#define next more.Next
1149#define exop word.what.Exop
1150#define bits word.what.Bits
1151
1152
1153local int huft_build OF((
1154 uIntf *, /* code lengths in bits */
1155 uInt, /* number of codes */
1156 uInt, /* number of "simple" codes */
1157 uIntf *, /* list of base values for non-simple codes */
1158 uIntf *, /* list of extra bits for non-simple codes */
1159 inflate_huft * FAR*,/* result: starting table */
1160 uIntf *, /* maximum lookup bits (returns actual) */
1161 z_stream *)); /* for zalloc function */
1162
1163local voidpf falloc OF((
1164 voidpf, /* opaque pointer (not used) */
1165 uInt, /* number of items */
1166 uInt)); /* size of item */
1167
1168local void ffree OF((
1169 voidpf q, /* opaque pointer (not used) */
1170 voidpf p, /* what to free (not used) */
1171 uInt n)); /* number of bytes (not used) */
1172
1173/* Tables for deflate from PKZIP's appnote.txt. */
1174local uInt cplens[] = { /* Copy lengths for literal codes 257..285 */
1175 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
1176 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
1177 /* actually lengths - 2; also see note #13 above about 258 */
1178local uInt cplext[] = { /* Extra bits for literal codes 257..285 */
1179 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
1180 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 192, 192}; /* 192==invalid */
1181local uInt cpdist[] = { /* Copy offsets for distance codes 0..29 */
1182 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
1183 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
1184 8193, 12289, 16385, 24577};
1185local uInt cpdext[] = { /* Extra bits for distance codes */
1186 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
1187 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
1188 12, 12, 13, 13};
1189
1190/*
1191 Huffman code decoding is performed using a multi-level table lookup.
1192 The fastest way to decode is to simply build a lookup table whose
1193 size is determined by the longest code. However, the time it takes
1194 to build this table can also be a factor if the data being decoded
1195 is not very long. The most common codes are necessarily the
1196 shortest codes, so those codes dominate the decoding time, and hence
1197 the speed. The idea is you can have a shorter table that decodes the
1198 shorter, more probable codes, and then point to subsidiary tables for
1199 the longer codes. The time it costs to decode the longer codes is
1200 then traded against the time it takes to make longer tables.
1201
1202 This results of this trade are in the variables lbits and dbits
1203 below. lbits is the number of bits the first level table for literal/
1204 length codes can decode in one step, and dbits is the same thing for
1205 the distance codes. Subsequent tables are also less than or equal to
1206 those sizes. These values may be adjusted either when all of the
1207 codes are shorter than that, in which case the longest code length in
1208 bits is used, or when the shortest code is *longer* than the requested
1209 table size, in which case the length of the shortest code in bits is
1210 used.
1211
1212 There are two different values for the two tables, since they code a
1213 different number of possibilities each. The literal/length table
1214 codes 286 possible values, or in a flat code, a little over eight
1215 bits. The distance table codes 30 possible values, or a little less
1216 than five bits, flat. The optimum values for speed end up being
1217 about one bit more than those, so lbits is 8+1 and dbits is 5+1.
1218 The optimum values may differ though from machine to machine, and
1219 possibly even between compilers. Your mileage may vary.
1220 */
1221
1222
1223/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
1224#define BMAX 15 /* maximum bit length of any code */
1225#define N_MAX 288 /* maximum number of codes in any set */
1226
1227#ifdef DEBUG_ZLIB
1228 uInt inflate_hufts;
1229#endif
1230
1231local int huft_build(b, n, s, d, e, t, m, zs)
1232uIntf *b; /* code lengths in bits (all assumed <= BMAX) */
1233uInt n; /* number of codes (assumed <= N_MAX) */
1234uInt s; /* number of simple-valued codes (0..s-1) */
1235uIntf *d; /* list of base values for non-simple codes */
1236uIntf *e; /* list of extra bits for non-simple codes */
1237inflate_huft * FAR *t; /* result: starting table */
1238uIntf *m; /* maximum lookup bits, returns actual */
1239z_stream *zs; /* for zalloc function */
1240/* Given a list of code lengths and a maximum table size, make a set of
1241 tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
1242 if the given code set is incomplete (the tables are still built in this
1243 case), Z_DATA_ERROR if the input is invalid (all zero length codes or an
1244 over-subscribed set of lengths), or Z_MEM_ERROR if not enough memory. */
1245{
1246
1247 uInt a; /* counter for codes of length k */
1248 uInt c[BMAX+1]; /* bit length count table */
1249 uInt f; /* i repeats in table every f entries */
1250 int g; /* maximum code length */
1251 int h; /* table level */
1252 register uInt i; /* counter, current code */
1253 register uInt j; /* counter */
1254 register int k; /* number of bits in current code */
1255 int l; /* bits per table (returned in m) */
1256 register uIntf *p; /* pointer into c[], b[], or v[] */
1257 inflate_huft *q; /* points to current table */
1258 struct inflate_huft_s r; /* table entry for structure assignment */
1259 inflate_huft *u[BMAX]; /* table stack */
1260 uInt v[N_MAX]; /* values in order of bit length */
1261 register int w; /* bits before this table == (l * h) */
1262 uInt x[BMAX+1]; /* bit offsets, then code stack */
1263 uIntf *xp; /* pointer into x */
1264 int y; /* number of dummy codes added */
1265 uInt z; /* number of entries in current table */
1266
1267
1268 /* Generate counts for each bit length */
1269 p = c;
1270#define C0 *p++ = 0;
1271#define C2 C0 C0 C0 C0
1272#define C4 C2 C2 C2 C2
1273 C4 /* clear c[]--assume BMAX+1 is 16 */
1274 p = b; i = n;
1275 do {
1276 c[*p++]++; /* assume all entries <= BMAX */
1277 } while (--i);
1278 if (c[0] == n) /* null input--all zero length codes */
1279 {
1280 *t = (inflate_huft *)Z_NULL;
1281 *m = 0;
1282 return Z_OK;
1283 }
1284
1285
1286 /* Find minimum and maximum length, bound *m by those */
1287 l = *m;
1288 for (j = 1; j <= BMAX; j++)
1289 if (c[j])
1290 break;
1291 k = j; /* minimum code length */
1292 if ((uInt)l < j)
1293 l = j;
1294 for (i = BMAX; i; i--)
1295 if (c[i])
1296 break;
1297 g = i; /* maximum code length */
1298 if ((uInt)l > i)
1299 l = i;
1300 *m = l;
1301
1302
1303 /* Adjust last length count to fill out codes, if needed */
1304 for (y = 1 << j; j < i; j++, y <<= 1)
1305 if ((y -= c[j]) < 0)
1306 return Z_DATA_ERROR;
1307 if ((y -= c[i]) < 0)
1308 return Z_DATA_ERROR;
1309 c[i] += y;
1310
1311
1312 /* Generate starting offsets into the value table for each length */
1313 x[1] = j = 0;
1314 p = c + 1; xp = x + 2;
1315 while (--i) { /* note that i == g from above */
1316 *xp++ = (j += *p++);
1317 }
1318
1319
1320 /* Make a table of values in order of bit lengths */
1321 p = b; i = 0;
1322 do {
1323 if ((j = *p++) != 0)
1324 v[x[j]++] = i;
1325 } while (++i < n);
1326
1327
1328 /* Generate the Huffman codes and for each, make the table entries */
1329 x[0] = i = 0; /* first Huffman code is zero */
1330 p = v; /* grab values in bit order */
1331 h = -1; /* no tables yet--level -1 */
1332 w = -l; /* bits decoded == (l * h) */
1333 u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
1334 q = (inflate_huft *)Z_NULL; /* ditto */
1335 z = 0; /* ditto */
1336
1337 /* go through the bit lengths (k already is bits in shortest code) */
1338 for (; k <= g; k++)
1339 {
1340 a = c[k];
1341 while (a--)
1342 {
1343 /* here i is the Huffman code of length k bits for value *p */
1344 /* make tables up to required level */
1345 while (k > w + l)
1346 {
1347 h++;
1348 w += l; /* previous table always l bits */
1349
1350 /* compute minimum size table less than or equal to l bits */
1351 z = (z = g - w) > (uInt)l ? l : z; /* table size upper limit */
1352 if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
1353 { /* too few codes for k-w bit table */
1354 f -= a + 1; /* deduct codes from patterns left */
1355 xp = c + k;
1356 if (j < z)
1357 while (++j < z) /* try smaller tables up to z bits */
1358 {
1359 if ((f <<= 1) <= *++xp)
1360 break; /* enough codes to use up j bits */
1361 f -= *xp; /* else deduct codes from patterns */
1362 }
1363 }
1364 z = 1 << j; /* table entries for j-bit table */
1365
1366 /* allocate and link in new table */
1367 if ((q = (inflate_huft *)ZALLOC
1368 (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
1369 {
1370 if (h)
1371 inflate_trees_free(u[0], zs);
1372 return Z_MEM_ERROR; /* not enough memory */
1373 }
1374 q->word.Nalloc = z + 1;
1375#ifdef DEBUG_ZLIB
1376 inflate_hufts += z + 1;
1377#endif
1378 *t = q + 1; /* link to list for huft_free() */
1379 *(t = &(q->next)) = Z_NULL;
1380 u[h] = ++q; /* table starts after link */
1381
1382 /* connect to last table, if there is one */
1383 if (h)
1384 {
1385 x[h] = i; /* save pattern for backing up */
1386 r.bits = (Byte)l; /* bits to dump before this table */
1387 r.exop = (Byte)j; /* bits in this table */
1388 r.next = q; /* pointer to this table */
1389 j = i >> (w - l); /* (get around Turbo C bug) */
1390 u[h-1][j] = r; /* connect to last table */
1391 }
1392 }
1393
1394 /* set up table entry in r */
1395 r.bits = (Byte)(k - w);
1396 if (p >= v + n)
1397 r.exop = 128 + 64; /* out of values--invalid code */
1398 else if (*p < s)
1399 {
1400 r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
1401 r.base = *p++; /* simple code is just the value */
1402 }
1403 else
1404 {
1405 r.exop = (Byte)e[*p - s] + 16 + 64; /* non-simple--look up in lists */
1406 r.base = d[*p++ - s];
1407 }
1408
1409 /* fill code-like entries with r */
1410 f = 1 << (k - w);
1411 for (j = i >> w; j < z; j += f)
1412 q[j] = r;
1413
1414 /* backwards increment the k-bit code i */
1415 for (j = 1 << (k - 1); i & j; j >>= 1)
1416 i ^= j;
1417 i ^= j;
1418
1419 /* backup over finished tables */
1420 while ((i & ((1 << w) - 1)) != x[h])
1421 {
1422 h--; /* don't need to update q */
1423 w -= l;
1424 }
1425 }
1426 }
1427
1428
1429 /* Return Z_BUF_ERROR if we were given an incomplete table */
1430 return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
1431}
1432
1433
1434local int inflate_trees_bits(c, bb, tb, z)
1435uIntf *c; /* 19 code lengths */
1436uIntf *bb; /* bits tree desired/actual depth */
1437inflate_huft * FAR *tb; /* bits tree result */
1438z_stream *z; /* for zfree function */
1439{
1440 int r;
1441
1442 r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
1443 if (r == Z_DATA_ERROR)
1444 z->msg = "oversubscribed dynamic bit lengths tree";
1445 else if (r == Z_BUF_ERROR)
1446 {
1447 inflate_trees_free(*tb, z);
1448 z->msg = "incomplete dynamic bit lengths tree";
1449 r = Z_DATA_ERROR;
1450 }
1451 return r;
1452}
1453
1454
1455local int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z)
1456uInt nl; /* number of literal/length codes */
1457uInt nd; /* number of distance codes */
1458uIntf *c; /* that many (total) code lengths */
1459uIntf *bl; /* literal desired/actual bit depth */
1460uIntf *bd; /* distance desired/actual bit depth */
1461inflate_huft * FAR *tl; /* literal/length tree result */
1462inflate_huft * FAR *td; /* distance tree result */
1463z_stream *z; /* for zfree function */
1464{
1465 int r;
1466
1467 /* build literal/length tree */
1468 if ((r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z)) != Z_OK)
1469 {
1470 if (r == Z_DATA_ERROR)
1471 z->msg = "oversubscribed literal/length tree";
1472 else if (r == Z_BUF_ERROR)
1473 {
1474 inflate_trees_free(*tl, z);
1475 z->msg = "incomplete literal/length tree";
1476 r = Z_DATA_ERROR;
1477 }
1478 return r;
1479 }
1480
1481 /* build distance tree */
1482 if ((r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z)) != Z_OK)
1483 {
1484 if (r == Z_DATA_ERROR)
1485 z->msg = "oversubscribed literal/length tree";
1486 else if (r == Z_BUF_ERROR) {
1487#ifdef PKZIP_BUG_WORKAROUND
1488 r = Z_OK;
1489 }
1490#else
1491 inflate_trees_free(*td, z);
1492 z->msg = "incomplete literal/length tree";
1493 r = Z_DATA_ERROR;
1494 }
1495 inflate_trees_free(*tl, z);
1496 return r;
1497#endif
1498 }
1499
1500 /* done */
1501 return Z_OK;
1502}
1503
1504
1505/* build fixed tables only once--keep them here */
1506local int fixed_lock = 0;
1507local int fixed_built = 0;
1508#define FIXEDH 530 /* number of hufts used by fixed tables */
1509local uInt fixed_left = FIXEDH;
1510local inflate_huft fixed_mem[FIXEDH];
1511local uInt fixed_bl;
1512local uInt fixed_bd;
1513local inflate_huft *fixed_tl;
1514local inflate_huft *fixed_td;
1515
1516
1517local voidpf falloc(q, n, s)
1518voidpf q; /* opaque pointer (not used) */
1519uInt n; /* number of items */
1520uInt s; /* size of item */
1521{
1522 Assert(s == sizeof(inflate_huft) && n <= fixed_left,
1523 "inflate_trees falloc overflow");
1524 if (q) s++; /* to make some compilers happy */
1525 fixed_left -= n;
1526 return (voidpf)(fixed_mem + fixed_left);
1527}
1528
1529
1530local void ffree(q, p, n)
1531voidpf q;
1532voidpf p;
1533uInt n;
1534{
1535 Assert(0, "inflate_trees ffree called!");
1536 if (q) q = p; /* to make some compilers happy */
1537}
1538
1539
1540local int inflate_trees_fixed(bl, bd, tl, td)
1541uIntf *bl; /* literal desired/actual bit depth */
1542uIntf *bd; /* distance desired/actual bit depth */
1543inflate_huft * FAR *tl; /* literal/length tree result */
1544inflate_huft * FAR *td; /* distance tree result */
1545{
1546 /* build fixed tables if not built already--lock out other instances */
1547 while (++fixed_lock > 1)
1548 fixed_lock--;
1549 if (!fixed_built)
1550 {
1551 int k; /* temporary variable */
1552 unsigned c[288]; /* length list for huft_build */
1553 z_stream z; /* for falloc function */
1554
1555 /* set up fake z_stream for memory routines */
1556 z.zalloc = falloc;
1557 z.zfree = ffree;
1558 z.opaque = Z_NULL;
1559
1560 /* literal table */
1561 for (k = 0; k < 144; k++)
1562 c[k] = 8;
1563 for (; k < 256; k++)
1564 c[k] = 9;
1565 for (; k < 280; k++)
1566 c[k] = 7;
1567 for (; k < 288; k++)
1568 c[k] = 8;
1569 fixed_bl = 7;
1570 huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
1571
1572 /* distance table */
1573 for (k = 0; k < 30; k++)
1574 c[k] = 5;
1575 fixed_bd = 5;
1576 huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
1577
1578 /* done */
1579 fixed_built = 1;
1580 }
1581 fixed_lock--;
1582 *bl = fixed_bl;
1583 *bd = fixed_bd;
1584 *tl = fixed_tl;
1585 *td = fixed_td;
1586 return Z_OK;
1587}
1588
1589
1590local int inflate_trees_free(t, z)
1591inflate_huft *t; /* table to free */
1592z_stream *z; /* for zfree function */
1593/* Free the malloc'ed tables built by huft_build(), which makes a linked
1594 list of the tables it made, with the links in a dummy first entry of
1595 each table. */
1596{
1597 register inflate_huft *p, *q;
1598
1599 /* Go through linked list, freeing from the malloced (t[-1]) address. */
1600 p = t;
1601 while (p != Z_NULL)
1602 {
1603 q = (--p)->next;
1604 ZFREE(z, p, p->word.Nalloc * sizeof(inflate_huft));
1605 p = q;
1606 }
1607 return Z_OK;
1608}
1609
1610/*+++++*/
1611/* infcodes.c -- process literals and length/distance pairs
1612 * Copyright (C) 1995 Mark Adler
1613 * For conditions of distribution and use, see copyright notice in zlib.h
1614 */
1615
1616/* simplify the use of the inflate_huft type with some defines */
1617#define base more.Base
1618#define next more.Next
1619#define exop word.what.Exop
1620#define bits word.what.Bits
1621
1622/* inflate codes private state */
1623struct inflate_codes_state {
1624
1625 /* mode */
1626 enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
1627 START, /* x: set up for LEN */
1628 LEN, /* i: get length/literal/eob next */
1629 LENEXT, /* i: getting length extra (have base) */
1630 DIST, /* i: get distance next */
1631 DISTEXT, /* i: getting distance extra */
1632 COPY, /* o: copying bytes in window, waiting for space */
1633 LIT, /* o: got literal, waiting for output space */
1634 WASH, /* o: got eob, possibly still output waiting */
1635 END, /* x: got eob and all data flushed */
1636 BADCODE} /* x: got error */
1637 mode; /* current inflate_codes mode */
1638
1639 /* mode dependent information */
1640 uInt len;
1641 union {
1642 struct {
1643 inflate_huft *tree; /* pointer into tree */
1644 uInt need; /* bits needed */
1645 } code; /* if LEN or DIST, where in tree */
1646 uInt lit; /* if LIT, literal */
1647 struct {
1648 uInt get; /* bits to get for extra */
1649 uInt dist; /* distance back to copy from */
1650 } copy; /* if EXT or COPY, where and how much */
1651 } sub; /* submode */
1652
1653 /* mode independent information */
1654 Byte lbits; /* ltree bits decoded per branch */
1655 Byte dbits; /* dtree bits decoder per branch */
1656 inflate_huft *ltree; /* literal/length/eob tree */
1657 inflate_huft *dtree; /* distance tree */
1658
1659};
1660
1661
1662local inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z)
1663uInt bl, bd;
1664inflate_huft *tl, *td;
1665z_stream *z;
1666{
1667 inflate_codes_statef *c;
1668
1669 if ((c = (inflate_codes_statef *)
1670 ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
1671 {
1672 c->mode = START;
1673 c->lbits = (Byte)bl;
1674 c->dbits = (Byte)bd;
1675 c->ltree = tl;
1676 c->dtree = td;
1677 Tracev((stderr, "inflate: codes new\n"));
1678 }
1679 return c;
1680}
1681
1682
1683local int inflate_codes(s, z, r)
1684inflate_blocks_statef *s;
1685z_stream *z;
1686int r;
1687{
1688 uInt j; /* temporary storage */
1689 inflate_huft *t; /* temporary pointer */
1690 uInt e; /* extra bits or operation */
1691 uLong b; /* bit buffer */
1692 uInt k; /* bits in bit buffer */
1693 Bytef *p; /* input data pointer */
1694 uInt n; /* bytes available there */
1695 Bytef *q; /* output window write pointer */
1696 uInt m; /* bytes to end of window or read pointer */
1697 Bytef *f; /* pointer to copy strings from */
1698 inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
1699
1700 /* copy input/output information to locals (UPDATE macro restores) */
1701 LOAD
1702
1703 /* process input and output based on current state */
1704 while (1) switch (c->mode)
1705 { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
1706 case START: /* x: set up for LEN */
1707#ifndef SLOW
1708 if (m >= 258 && n >= 10)
1709 {
1710 UPDATE
1711 r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
1712 LOAD
1713 if (r != Z_OK)
1714 {
1715 c->mode = r == Z_STREAM_END ? WASH : BADCODE;
1716 break;
1717 }
1718 }
1719#endif /* !SLOW */
1720 c->sub.code.need = c->lbits;
1721 c->sub.code.tree = c->ltree;
1722 c->mode = LEN;
1723 case LEN: /* i: get length/literal/eob next */
1724 j = c->sub.code.need;
1725 NEEDBITS(j)
1726 t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
1727 DUMPBITS(t->bits)
1728 e = (uInt)(t->exop);
1729 if (e == 0) /* literal */
1730 {
1731 c->sub.lit = t->base;
1732 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
1733 "inflate: literal '%c'\n" :
1734 "inflate: literal 0x%02x\n", t->base));
1735 c->mode = LIT;
1736 break;
1737 }
1738 if (e & 16) /* length */
1739 {
1740 c->sub.copy.get = e & 15;
1741 c->len = t->base;
1742 c->mode = LENEXT;
1743 break;
1744 }
1745 if ((e & 64) == 0) /* next table */
1746 {
1747 c->sub.code.need = e;
1748 c->sub.code.tree = t->next;
1749 break;
1750 }
1751 if (e & 32) /* end of block */
1752 {
1753 Tracevv((stderr, "inflate: end of block\n"));
1754 c->mode = WASH;
1755 break;
1756 }
1757 c->mode = BADCODE; /* invalid code */
1758 z->msg = "invalid literal/length code";
1759 r = Z_DATA_ERROR;
1760 LEAVE
1761 case LENEXT: /* i: getting length extra (have base) */
1762 j = c->sub.copy.get;
1763 NEEDBITS(j)
1764 c->len += (uInt)b & inflate_mask[j];
1765 DUMPBITS(j)
1766 c->sub.code.need = c->dbits;
1767 c->sub.code.tree = c->dtree;
1768 Tracevv((stderr, "inflate: length %u\n", c->len));
1769 c->mode = DIST;
1770 case DIST: /* i: get distance next */
1771 j = c->sub.code.need;
1772 NEEDBITS(j)
1773 t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
1774 DUMPBITS(t->bits)
1775 e = (uInt)(t->exop);
1776 if (e & 16) /* distance */
1777 {
1778 c->sub.copy.get = e & 15;
1779 c->sub.copy.dist = t->base;
1780 c->mode = DISTEXT;
1781 break;
1782 }
1783 if ((e & 64) == 0) /* next table */
1784 {
1785 c->sub.code.need = e;
1786 c->sub.code.tree = t->next;
1787 break;
1788 }
1789 c->mode = BADCODE; /* invalid code */
1790 z->msg = "invalid distance code";
1791 r = Z_DATA_ERROR;
1792 LEAVE
1793 case DISTEXT: /* i: getting distance extra */
1794 j = c->sub.copy.get;
1795 NEEDBITS(j)
1796 c->sub.copy.dist += (uInt)b & inflate_mask[j];
1797 DUMPBITS(j)
1798 Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist));
1799 c->mode = COPY;
1800 case COPY: /* o: copying bytes in window, waiting for space */
1801#ifndef __TURBOC__ /* Turbo C bug for following expression */
1802 f = (uInt)(q - s->window) < c->sub.copy.dist ?
1803 s->end - (c->sub.copy.dist - (q - s->window)) :
1804 q - c->sub.copy.dist;
1805#else
1806 f = q - c->sub.copy.dist;
1807 if ((uInt)(q - s->window) < c->sub.copy.dist)
1808 f = s->end - (c->sub.copy.dist - (q - s->window));
1809#endif
1810 while (c->len)
1811 {
1812 NEEDOUT
1813 OUTBYTE(*f++)
1814 if (f == s->end)
1815 f = s->window;
1816 c->len--;
1817 }
1818 c->mode = START;
1819 break;
1820 case LIT: /* o: got literal, waiting for output space */
1821 NEEDOUT
1822 OUTBYTE(c->sub.lit)
1823 c->mode = START;
1824 break;
1825 case WASH: /* o: got eob, possibly more output */
1826 FLUSH
1827 if (s->read != s->write)
1828 LEAVE
1829 c->mode = END;
1830 case END:
1831 r = Z_STREAM_END;
1832 LEAVE
1833 case BADCODE: /* x: got error */
1834 r = Z_DATA_ERROR;
1835 LEAVE
1836 default:
1837 r = Z_STREAM_ERROR;
1838 LEAVE
1839 }
1840}
1841
1842
1843local void inflate_codes_free(c, z)
1844inflate_codes_statef *c;
1845z_stream *z;
1846{
1847 ZFREE(z, c, sizeof(struct inflate_codes_state));
1848 Tracev((stderr, "inflate: codes free\n"));
1849}
1850
1851/*+++++*/
1852/* inflate_util.c -- data and routines common to blocks and codes
1853 * Copyright (C) 1995 Mark Adler
1854 * For conditions of distribution and use, see copyright notice in zlib.h
1855 */
1856
1857/* copy as much as possible from the sliding window to the output area */
1858local int inflate_flush(s, z, r)
1859inflate_blocks_statef *s;
1860z_stream *z;
1861int r;
1862{
1863 uInt n;
1864 Bytef *p, *q;
1865
1866 /* local copies of source and destination pointers */
1867 p = z->next_out;
1868 q = s->read;
1869
1870 /* compute number of bytes to copy as far as end of window */
1871 n = (uInt)((q <= s->write ? s->write : s->end) - q);
1872 if (n > z->avail_out) n = z->avail_out;
1873 if (n && r == Z_BUF_ERROR) r = Z_OK;
1874
1875 /* update counters */
1876 z->avail_out -= n;
1877 z->total_out += n;
1878
1879 /* update check information */
1880 if (s->checkfn != Z_NULL)
1881 s->check = (*s->checkfn)(s->check, q, n);
1882
1883 /* copy as far as end of window */
1884 zmemcpy(p, q, n);
1885 p += n;
1886 q += n;
1887
1888 /* see if more to copy at beginning of window */
1889 if (q == s->end)
1890 {
1891 /* wrap pointers */
1892 q = s->window;
1893 if (s->write == s->end)
1894 s->write = s->window;
1895
1896 /* compute bytes to copy */
1897 n = (uInt)(s->write - q);
1898 if (n > z->avail_out) n = z->avail_out;
1899 if (n && r == Z_BUF_ERROR) r = Z_OK;
1900
1901 /* update counters */
1902 z->avail_out -= n;
1903 z->total_out += n;
1904
1905 /* update check information */
1906 if (s->checkfn != Z_NULL)
1907 s->check = (*s->checkfn)(s->check, q, n);
1908
1909 /* copy */
1910 zmemcpy(p, q, n);
1911 p += n;
1912 q += n;
1913 }
1914
1915 /* update pointers */
1916 z->next_out = p;
1917 s->read = q;
1918
1919 /* done */
1920 return r;
1921}
1922
1923
1924/*+++++*/
1925/* inffast.c -- process literals and length/distance pairs fast
1926 * Copyright (C) 1995 Mark Adler
1927 * For conditions of distribution and use, see copyright notice in zlib.h
1928 */
1929
1930/* simplify the use of the inflate_huft type with some defines */
1931#define base more.Base
1932#define next more.Next
1933#define exop word.what.Exop
1934#define bits word.what.Bits
1935
1936/* macros for bit input with no checking and for returning unused bytes */
1937#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
1938#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
1939
1940/* Called with number of bytes left to write in window at least 258
1941 (the maximum string length) and number of input bytes available
1942 at least ten. The ten bytes are six bytes for the longest length/
1943 distance pair plus four bytes for overloading the bit buffer. */
1944
1945local int inflate_fast(bl, bd, tl, td, s, z)
1946uInt bl, bd;
1947inflate_huft *tl, *td;
1948inflate_blocks_statef *s;
1949z_stream *z;
1950{
1951 inflate_huft *t; /* temporary pointer */
1952 uInt e; /* extra bits or operation */
1953 uLong b; /* bit buffer */
1954 uInt k; /* bits in bit buffer */
1955 Bytef *p; /* input data pointer */
1956 uInt n; /* bytes available there */
1957 Bytef *q; /* output window write pointer */
1958 uInt m; /* bytes to end of window or read pointer */
1959 uInt ml; /* mask for literal/length tree */
1960 uInt md; /* mask for distance tree */
1961 uInt c; /* bytes to copy */
1962 uInt d; /* distance back to copy from */
1963 Bytef *r; /* copy source pointer */
1964
1965 /* load input, output, bit values */
1966 LOAD
1967
1968 /* initialize masks */
1969 ml = inflate_mask[bl];
1970 md = inflate_mask[bd];
1971
1972 /* do until not enough input or output space for fast loop */
1973 do { /* assume called with m >= 258 && n >= 10 */
1974 /* get literal/length code */
1975 GRABBITS(20) /* max bits for literal/length code */
1976 if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
1977 {
1978 DUMPBITS(t->bits)
1979 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
1980 "inflate: * literal '%c'\n" :
1981 "inflate: * literal 0x%02x\n", t->base));
1982 *q++ = (Byte)t->base;
1983 m--;
1984 continue;
1985 }
1986 do {
1987 DUMPBITS(t->bits)
1988 if (e & 16)
1989 {
1990 /* get extra bits for length */
1991 e &= 15;
1992 c = t->base + ((uInt)b & inflate_mask[e]);
1993 DUMPBITS(e)
1994 Tracevv((stderr, "inflate: * length %u\n", c));
1995
1996 /* decode distance base of block to copy */
1997 GRABBITS(15); /* max bits for distance code */
1998 e = (t = td + ((uInt)b & md))->exop;
1999 do {
2000 DUMPBITS(t->bits)
2001 if (e & 16)
2002 {
2003 /* get extra bits to add to distance base */
2004 e &= 15;
2005 GRABBITS(e) /* get extra bits (up to 13) */
2006 d = t->base + ((uInt)b & inflate_mask[e]);
2007 DUMPBITS(e)
2008 Tracevv((stderr, "inflate: * distance %u\n", d));
2009
2010 /* do the copy */
2011 m -= c;
2012 if ((uInt)(q - s->window) >= d) /* offset before dest */
2013 { /* just copy */
2014 r = q - d;
2015 *q++ = *r++; c--; /* minimum count is three, */
2016 *q++ = *r++; c--; /* so unroll loop a little */
2017 }
2018 else /* else offset after destination */
2019 {
2020 e = d - (q - s->window); /* bytes from offset to end */
2021 r = s->end - e; /* pointer to offset */
2022 if (c > e) /* if source crosses, */
2023 {
2024 c -= e; /* copy to end of window */
2025 do {
2026 *q++ = *r++;
2027 } while (--e);
2028 r = s->window; /* copy rest from start of window */
2029 }
2030 }
2031 do { /* copy all or what's left */
2032 *q++ = *r++;
2033 } while (--c);
2034 break;
2035 }
2036 else if ((e & 64) == 0)
2037 e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
2038 else
2039 {
2040 z->msg = "invalid distance code";
2041 UNGRAB
2042 UPDATE
2043 return Z_DATA_ERROR;
2044 }
2045 } while (1);
2046 break;
2047 }
2048 if ((e & 64) == 0)
2049 {
2050 if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
2051 {
2052 DUMPBITS(t->bits)
2053 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
2054 "inflate: * literal '%c'\n" :
2055 "inflate: * literal 0x%02x\n", t->base));
2056 *q++ = (Byte)t->base;
2057 m--;
2058 break;
2059 }
2060 }
2061 else if (e & 32)
2062 {
2063 Tracevv((stderr, "inflate: * end of block\n"));
2064 UNGRAB
2065 UPDATE
2066 return Z_STREAM_END;
2067 }
2068 else
2069 {
2070 z->msg = "invalid literal/length code";
2071 UNGRAB
2072 UPDATE
2073 return Z_DATA_ERROR;
2074 }
2075 } while (1);
2076 } while (m >= 258 && n >= 10);
2077
2078 /* not enough input or output--restore pointers and return */
2079 UNGRAB
2080 UPDATE
2081 return Z_OK;
2082}
2083
2084
2085/*+++++*/
2086/* zutil.c -- target dependent utility functions for the compression library
2087 * Copyright (C) 1995 Jean-loup Gailly.
2088 * For conditions of distribution and use, see copyright notice in zlib.h
2089 */
2090
2091/* From: zutil.c,v 1.8 1995/05/03 17:27:12 jloup Exp */
2092
2093char *zlib_version = ZLIB_VERSION;
2094
2095char *z_errmsg[] = {
2096"stream end", /* Z_STREAM_END 1 */
2097"", /* Z_OK 0 */
2098"file error", /* Z_ERRNO (-1) */
2099"stream error", /* Z_STREAM_ERROR (-2) */
2100"data error", /* Z_DATA_ERROR (-3) */
2101"insufficient memory", /* Z_MEM_ERROR (-4) */
2102"buffer error", /* Z_BUF_ERROR (-5) */
2103""};
2104
2105
2106/*+++++*/
2107/* adler32.c -- compute the Adler-32 checksum of a data stream
2108 * Copyright (C) 1995 Mark Adler
2109 * For conditions of distribution and use, see copyright notice in zlib.h
2110 */
2111
2112/* From: adler32.c,v 1.6 1995/05/03 17:27:08 jloup Exp */
2113
2114#define BASE 65521L /* largest prime smaller than 65536 */
2115#define NMAX 5552
2116/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
2117
2118#define DO1(buf) {s1 += *buf++; s2 += s1;}
2119#define DO2(buf) DO1(buf); DO1(buf);
2120#define DO4(buf) DO2(buf); DO2(buf);
2121#define DO8(buf) DO4(buf); DO4(buf);
2122#define DO16(buf) DO8(buf); DO8(buf);
2123
2124/* ========================================================================= */
2125uLong adler32(adler, buf, len)
2126 uLong adler;
2127 Bytef *buf;
2128 uInt len;
2129{
2130 unsigned long s1 = adler & 0xffff;
2131 unsigned long s2 = (adler >> 16) & 0xffff;
2132 int k;
2133
2134 if (buf == Z_NULL) return 1L;
2135
2136 while (len > 0) {
2137 k = len < NMAX ? len : NMAX;
2138 len -= k;
2139 while (k >= 16) {
2140 DO16(buf);
2141 k -= 16;
2142 }
2143 if (k != 0) do {
2144 DO1(buf);
2145 } while (--k);
2146 s1 %= BASE;
2147 s2 %= BASE;
2148 }
2149 return (s2 << 16) | s1;
2150}
diff --git a/arch/xtensa/boot/lib/zmem.c b/arch/xtensa/boot/lib/zmem.c
new file mode 100644
index 000000000000..7848f126d67d
--- /dev/null
+++ b/arch/xtensa/boot/lib/zmem.c
@@ -0,0 +1,87 @@
1#include "zlib.h"
2
3/* bits taken from ppc */
4
5extern void *avail_ram, *end_avail;
6
7void exit (void)
8{
9 for (;;);
10}
11
12void *zalloc(void *x, unsigned items, unsigned size)
13{
14 void *p = avail_ram;
15
16 size *= items;
17 size = (size + 7) & -8;
18 avail_ram += size;
19 if (avail_ram > end_avail) {
20 //puts("oops... out of memory\n");
21 //pause();
22 exit ();
23 }
24 return p;
25}
26
27void zfree(void *x, void *addr, unsigned nb)
28{
29}
30
31
32#define HEAD_CRC 2
33#define EXTRA_FIELD 4
34#define ORIG_NAME 8
35#define COMMENT 0x10
36#define RESERVED 0xe0
37
38#define DEFLATED 8
39
40void gunzip (void *dst, int dstlen, unsigned char *src, int *lenp)
41{
42 z_stream s;
43 int r, i, flags;
44
45 /* skip header */
46
47 i = 10;
48 flags = src[3];
49 if (src[2] != DEFLATED || (flags & RESERVED) != 0) {
50 //puts("bad gzipped data\n");
51 exit();
52 }
53 if ((flags & EXTRA_FIELD) != 0)
54 i = 12 + src[10] + (src[11] << 8);
55 if ((flags & ORIG_NAME) != 0)
56 while (src[i++] != 0)
57 ;
58 if ((flags & COMMENT) != 0)
59 while (src[i++] != 0)
60 ;
61 if ((flags & HEAD_CRC) != 0)
62 i += 2;
63 if (i >= *lenp) {
64 //puts("gunzip: ran out of data in header\n");
65 exit();
66 }
67
68 s.zalloc = zalloc;
69 s.zfree = zfree;
70 r = inflateInit2(&s, -MAX_WBITS);
71 if (r != Z_OK) {
72 //puts("inflateInit2 returned "); puthex(r); puts("\n");
73 exit();
74 }
75 s.next_in = src + i;
76 s.avail_in = *lenp - i;
77 s.next_out = dst;
78 s.avail_out = dstlen;
79 r = inflate(&s, Z_FINISH);
80 if (r != Z_OK && r != Z_STREAM_END) {
81 //puts("inflate returned "); puthex(r); puts("\n");
82 exit();
83 }
84 *lenp = s.next_out - (unsigned char *) dst;
85 inflateEnd(&s);
86}
87
diff --git a/arch/xtensa/boot/ramdisk/Makefile b/arch/xtensa/boot/ramdisk/Makefile
new file mode 100644
index 000000000000..b12f76352438
--- /dev/null
+++ b/arch/xtensa/boot/ramdisk/Makefile
@@ -0,0 +1,23 @@
1#
2# Makefile for a ramdisk image
3#
4
5BIG_ENDIAN := $(shell echo -e "\#ifdef __XTENSA_EL__\nint little;\n\#else\nint big;\n\#endif" | $(CC) -E -|grep -c big)
6
7ifeq ($(BIG_ENDIAN),1)
8OBJCOPY_ARGS := -O elf32-xtensa-be
9else
10OBJCOPY_ARGS := -O elf32-xtensa-le
11endif
12
13obj-y = ramdisk.o
14
15RAMDISK_IMAGE = arch/$(ARCH)/boot/ramdisk/$(CONFIG_EMBEDDED_RAMDISK_IMAGE)
16
17arch/$(ARCH)/boot/ramdisk/ramdisk.o:
18 $(Q)echo -e "dummy:" | $(AS) -o $@;
19 $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) \
20 --add-section .initrd=$(RAMDISK_IMAGE) \
21 --set-section-flags .initrd=contents,alloc,load,load,data \
22 arch/$(ARCH)/boot/ramdisk/ramdisk.o $@
23
diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig
new file mode 100644
index 000000000000..1d230ee081b4
--- /dev/null
+++ b/arch/xtensa/configs/common_defconfig
@@ -0,0 +1,662 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.11-rc2
4# Tue Mar 1 16:36:53 2005
5#
6# CONFIG_FRAME_POINTER is not set
7CONFIG_XTENSA=y
8# CONFIG_UID16 is not set
9CONFIG_RWSEM_XCHGADD_ALGORITHM=y
10CONFIG_HAVE_DEC_LOCK=y
11CONFIG_GENERIC_HARDIRQS=y
12
13#
14# Code maturity level options
15#
16CONFIG_EXPERIMENTAL=y
17CONFIG_CLEAN_COMPILE=y
18CONFIG_BROKEN_ON_SMP=y
19
20#
21# General setup
22#
23CONFIG_LOCALVERSION=""
24CONFIG_SWAP=y
25CONFIG_SYSVIPC=y
26# CONFIG_POSIX_MQUEUE is not set
27CONFIG_BSD_PROCESS_ACCT=y
28# CONFIG_BSD_PROCESS_ACCT_V3 is not set
29CONFIG_SYSCTL=y
30# CONFIG_AUDIT is not set
31CONFIG_LOG_BUF_SHIFT=14
32# CONFIG_HOTPLUG is not set
33CONFIG_KOBJECT_UEVENT=y
34# CONFIG_IKCONFIG is not set
35# CONFIG_EMBEDDED is not set
36CONFIG_KALLSYMS=y
37# CONFIG_KALLSYMS_ALL is not set
38# CONFIG_KALLSYMS_EXTRA_PASS is not set
39CONFIG_FUTEX=y
40CONFIG_EPOLL=y
41# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
42CONFIG_SHMEM=y
43CONFIG_CC_ALIGN_FUNCTIONS=0
44CONFIG_CC_ALIGN_LABELS=0
45CONFIG_CC_ALIGN_LOOPS=0
46CONFIG_CC_ALIGN_JUMPS=0
47# CONFIG_TINY_SHMEM is not set
48
49#
50# Loadable module support
51#
52CONFIG_MODULES=y
53# CONFIG_MODULE_UNLOAD is not set
54CONFIG_OBSOLETE_MODPARM=y
55CONFIG_MODVERSIONS=y
56# CONFIG_MODULE_SRCVERSION_ALL is not set
57CONFIG_KMOD=y
58
59#
60# Processor type and features
61#
62CONFIG_XTENSA_ARCH_LINUX_BE=y
63# CONFIG_XTENSA_ARCH_LINUX_LE is not set
64# CONFIG_XTENSA_ARCH_LINUX_TEST is not set
65# CONFIG_XTENSA_ARCH_S5 is not set
66# CONFIG_XTENSA_CUSTOM is not set
67CONFIG_MMU=y
68# CONFIG_XTENSA_UNALIGNED_USER is not set
69# CONFIG_PREEMPT is not set
70# CONFIG_MATH_EMULATION is not set
71# CONFIG_HIGHMEM is not set
72
73#
74# Platform options
75#
76# CONFIG_XTENSA_PLATFORM_ISS is not set
77CONFIG_XTENSA_PLATFORM_XT2000=y
78CONFIG_XTENSA_CALIBRATE_CCOUNT=y
79CONFIG_GENERIC_CALIBRATE_DELAY=y
80CONFIG_CMDLINE_BOOL=y
81CONFIG_CMDLINE="console=ttyS0,38400 ip=bootp root=nfs nfsroot=/opt/montavista/pro/devkit/xtensa/linux_be/target"
82
83#
84# Bus options
85#
86CONFIG_PCI=y
87# CONFIG_PCI_LEGACY_PROC is not set
88# CONFIG_PCI_NAMES is not set
89
90#
91# PCCARD (PCMCIA/CardBus) support
92#
93# CONFIG_PCCARD is not set
94
95#
96# PC-card bridges
97#
98
99#
100# PCI Hotplug Support
101#
102# CONFIG_HOTPLUG_PCI is not set
103
104#
105# Exectuable file formats
106#
107CONFIG_KCORE_ELF=y
108CONFIG_BINFMT_ELF=y
109CONFIG_BINFMT_MISC=y
110
111#
112# Device Drivers
113#
114
115#
116# Generic Driver Options
117#
118CONFIG_STANDALONE=y
119CONFIG_PREVENT_FIRMWARE_BUILD=y
120# CONFIG_FW_LOADER is not set
121# CONFIG_DEBUG_DRIVER is not set
122
123#
124# Memory Technology Devices (MTD)
125#
126# CONFIG_MTD is not set
127
128#
129# Parallel port support
130#
131# CONFIG_PARPORT is not set
132
133#
134# Plug and Play support
135#
136
137#
138# Block devices
139#
140# CONFIG_BLK_DEV_FD is not set
141# CONFIG_BLK_CPQ_DA is not set
142# CONFIG_BLK_CPQ_CISS_DA is not set
143# CONFIG_BLK_DEV_DAC960 is not set
144# CONFIG_BLK_DEV_UMEM is not set
145# CONFIG_BLK_DEV_COW_COMMON is not set
146# CONFIG_BLK_DEV_LOOP is not set
147# CONFIG_BLK_DEV_NBD is not set
148# CONFIG_BLK_DEV_SX8 is not set
149# CONFIG_BLK_DEV_RAM is not set
150CONFIG_BLK_DEV_RAM_COUNT=16
151CONFIG_INITRAMFS_SOURCE=""
152# CONFIG_CDROM_PKTCDVD is not set
153
154#
155# IO Schedulers
156#
157CONFIG_IOSCHED_NOOP=y
158CONFIG_IOSCHED_AS=y
159CONFIG_IOSCHED_DEADLINE=y
160CONFIG_IOSCHED_CFQ=y
161# CONFIG_ATA_OVER_ETH is not set
162
163#
164# ATA/ATAPI/MFM/RLL support
165#
166# CONFIG_IDE is not set
167
168#
169# SCSI device support
170#
171# CONFIG_SCSI is not set
172
173#
174# Multi-device support (RAID and LVM)
175#
176# CONFIG_MD is not set
177
178#
179# Fusion MPT device support
180#
181
182#
183# IEEE 1394 (FireWire) support
184#
185# CONFIG_IEEE1394 is not set
186
187#
188# I2O device support
189#
190# CONFIG_I2O is not set
191
192#
193# Networking support
194#
195CONFIG_NET=y
196
197#
198# Networking options
199#
200# CONFIG_PACKET is not set
201# CONFIG_NETLINK_DEV is not set
202CONFIG_UNIX=y
203# CONFIG_NET_KEY is not set
204CONFIG_INET=y
205CONFIG_IP_MULTICAST=y
206CONFIG_IP_ADVANCED_ROUTER=y
207CONFIG_IP_MULTIPLE_TABLES=y
208CONFIG_IP_ROUTE_MULTIPATH=y
209CONFIG_IP_ROUTE_VERBOSE=y
210CONFIG_IP_PNP=y
211CONFIG_IP_PNP_DHCP=y
212CONFIG_IP_PNP_BOOTP=y
213CONFIG_IP_PNP_RARP=y
214# CONFIG_NET_IPIP is not set
215# CONFIG_NET_IPGRE is not set
216# CONFIG_IP_MROUTE is not set
217# CONFIG_ARPD is not set
218# CONFIG_SYN_COOKIES is not set
219# CONFIG_INET_AH is not set
220# CONFIG_INET_ESP is not set
221# CONFIG_INET_IPCOMP is not set
222# CONFIG_INET_TUNNEL is not set
223# CONFIG_IP_TCPDIAG is not set
224# CONFIG_IP_TCPDIAG_IPV6 is not set
225# CONFIG_IPV6 is not set
226# CONFIG_NETFILTER is not set
227
228#
229# SCTP Configuration (EXPERIMENTAL)
230#
231# CONFIG_IP_SCTP is not set
232# CONFIG_ATM is not set
233# CONFIG_BRIDGE is not set
234# CONFIG_VLAN_8021Q is not set
235# CONFIG_DECNET is not set
236# CONFIG_LLC2 is not set
237# CONFIG_IPX is not set
238# CONFIG_ATALK is not set
239# CONFIG_X25 is not set
240# CONFIG_LAPB is not set
241# CONFIG_NET_DIVERT is not set
242# CONFIG_ECONET is not set
243# CONFIG_WAN_ROUTER is not set
244
245#
246# QoS and/or fair queueing
247#
248CONFIG_NET_SCHED=y
249CONFIG_NET_SCH_CLK_JIFFIES=y
250# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
251# CONFIG_NET_SCH_CLK_CPU is not set
252CONFIG_NET_SCH_CBQ=m
253CONFIG_NET_SCH_HTB=m
254# CONFIG_NET_SCH_HFSC is not set
255CONFIG_NET_SCH_PRIO=m
256CONFIG_NET_SCH_RED=m
257CONFIG_NET_SCH_SFQ=m
258CONFIG_NET_SCH_TEQL=m
259CONFIG_NET_SCH_TBF=m
260CONFIG_NET_SCH_GRED=m
261CONFIG_NET_SCH_DSMARK=m
262# CONFIG_NET_SCH_NETEM is not set
263CONFIG_NET_SCH_INGRESS=m
264CONFIG_NET_QOS=y
265CONFIG_NET_ESTIMATOR=y
266CONFIG_NET_CLS=y
267CONFIG_NET_CLS_TCINDEX=m
268CONFIG_NET_CLS_ROUTE4=m
269CONFIG_NET_CLS_ROUTE=y
270CONFIG_NET_CLS_FW=m
271CONFIG_NET_CLS_U32=m
272# CONFIG_CLS_U32_PERF is not set
273# CONFIG_NET_CLS_IND is not set
274CONFIG_NET_CLS_RSVP=m
275CONFIG_NET_CLS_RSVP6=m
276# CONFIG_NET_CLS_ACT is not set
277CONFIG_NET_CLS_POLICE=y
278
279#
280# Network testing
281#
282# CONFIG_NET_PKTGEN is not set
283# CONFIG_NETPOLL is not set
284# CONFIG_NET_POLL_CONTROLLER is not set
285# CONFIG_HAMRADIO is not set
286# CONFIG_IRDA is not set
287# CONFIG_BT is not set
288CONFIG_NETDEVICES=y
289CONFIG_DUMMY=y
290# CONFIG_BONDING is not set
291# CONFIG_EQUALIZER is not set
292# CONFIG_TUN is not set
293
294#
295# ARCnet devices
296#
297# CONFIG_ARCNET is not set
298
299#
300# Ethernet (10 or 100Mbit)
301#
302CONFIG_NET_ETHERNET=y
303# CONFIG_MII is not set
304CONFIG_XT2000_SONIC=y
305# CONFIG_HAPPYMEAL is not set
306# CONFIG_SUNGEM is not set
307# CONFIG_NET_VENDOR_3COM is not set
308
309#
310# Tulip family network device support
311#
312# CONFIG_NET_TULIP is not set
313# CONFIG_HP100 is not set
314# CONFIG_NET_PCI is not set
315
316#
317# Ethernet (1000 Mbit)
318#
319# CONFIG_ACENIC is not set
320# CONFIG_DL2K is not set
321# CONFIG_E1000 is not set
322# CONFIG_NS83820 is not set
323# CONFIG_HAMACHI is not set
324# CONFIG_YELLOWFIN is not set
325# CONFIG_R8169 is not set
326# CONFIG_SK98LIN is not set
327# CONFIG_TIGON3 is not set
328
329#
330# Ethernet (10000 Mbit)
331#
332# CONFIG_IXGB is not set
333# CONFIG_S2IO is not set
334
335#
336# Token Ring devices
337#
338# CONFIG_TR is not set
339
340#
341# Wireless LAN (non-hamradio)
342#
343CONFIG_NET_RADIO=y
344
345#
346# Obsolete Wireless cards support (pre-802.11)
347#
348CONFIG_STRIP=m
349
350#
351# Wireless 802.11b ISA/PCI cards support
352#
353CONFIG_HERMES=m
354# CONFIG_PLX_HERMES is not set
355# CONFIG_TMD_HERMES is not set
356# CONFIG_PCI_HERMES is not set
357# CONFIG_ATMEL is not set
358
359#
360# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
361#
362# CONFIG_PRISM54 is not set
363CONFIG_NET_WIRELESS=y
364
365#
366# Wan interfaces
367#
368# CONFIG_WAN is not set
369# CONFIG_FDDI is not set
370# CONFIG_HIPPI is not set
371# CONFIG_PPP is not set
372# CONFIG_SLIP is not set
373# CONFIG_SHAPER is not set
374# CONFIG_NETCONSOLE is not set
375
376#
377# ISDN subsystem
378#
379# CONFIG_ISDN is not set
380
381#
382# Telephony Support
383#
384# CONFIG_PHONE is not set
385
386#
387# Input device support
388#
389CONFIG_INPUT=y
390
391#
392# Userland interfaces
393#
394CONFIG_INPUT_MOUSEDEV=y
395# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
396CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
397CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
398# CONFIG_INPUT_JOYDEV is not set
399# CONFIG_INPUT_TSDEV is not set
400# CONFIG_INPUT_EVDEV is not set
401# CONFIG_INPUT_EVBUG is not set
402
403#
404# Input I/O drivers
405#
406# CONFIG_GAMEPORT is not set
407CONFIG_SOUND_GAMEPORT=y
408CONFIG_SERIO=y
409# CONFIG_SERIO_I8042 is not set
410# CONFIG_SERIO_SERPORT is not set
411# CONFIG_SERIO_CT82C710 is not set
412# CONFIG_SERIO_PCIPS2 is not set
413# CONFIG_SERIO_RAW is not set
414
415#
416# Input Device Drivers
417#
418# CONFIG_INPUT_KEYBOARD is not set
419# CONFIG_INPUT_MOUSE is not set
420# CONFIG_INPUT_JOYSTICK is not set
421# CONFIG_INPUT_TOUCHSCREEN is not set
422# CONFIG_INPUT_MISC is not set
423
424#
425# Character devices
426#
427CONFIG_VT=y
428CONFIG_VT_CONSOLE=y
429CONFIG_HW_CONSOLE=y
430# CONFIG_SERIAL_NONSTANDARD is not set
431
432#
433# Serial drivers
434#
435CONFIG_SERIAL_8250=y
436CONFIG_SERIAL_8250_CONSOLE=y
437CONFIG_SERIAL_8250_NR_UARTS=4
438# CONFIG_SERIAL_8250_EXTENDED is not set
439
440#
441# Non-8250 serial port support
442#
443CONFIG_SERIAL_CORE=y
444CONFIG_SERIAL_CORE_CONSOLE=y
445CONFIG_UNIX98_PTYS=y
446CONFIG_LEGACY_PTYS=y
447CONFIG_LEGACY_PTY_COUNT=256
448
449#
450# IPMI
451#
452# CONFIG_IPMI_HANDLER is not set
453
454#
455# Watchdog Cards
456#
457# CONFIG_WATCHDOG is not set
458# CONFIG_RTC is not set
459# CONFIG_GEN_RTC is not set
460# CONFIG_DTLK is not set
461# CONFIG_R3964 is not set
462# CONFIG_APPLICOM is not set
463
464#
465# Ftape, the floppy tape device driver
466#
467# CONFIG_DRM is not set
468# CONFIG_RAW_DRIVER is not set
469
470#
471# I2C support
472#
473# CONFIG_I2C is not set
474
475#
476# Dallas's 1-wire bus
477#
478# CONFIG_W1 is not set
479
480#
481# Misc devices
482#
483
484#
485# Multimedia devices
486#
487# CONFIG_VIDEO_DEV is not set
488
489#
490# Digital Video Broadcasting Devices
491#
492# CONFIG_DVB is not set
493
494#
495# Graphics support
496#
497# CONFIG_FB is not set
498
499#
500# Console display driver support
501#
502# CONFIG_VGA_CONSOLE is not set
503CONFIG_DUMMY_CONSOLE=y
504# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
505
506#
507# Sound
508#
509# CONFIG_SOUND is not set
510
511#
512# USB support
513#
514# CONFIG_USB is not set
515CONFIG_USB_ARCH_HAS_HCD=y
516CONFIG_USB_ARCH_HAS_OHCI=y
517
518#
519# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
520#
521
522#
523# USB Gadget Support
524#
525# CONFIG_USB_GADGET is not set
526
527#
528# MMC/SD Card support
529#
530# CONFIG_MMC is not set
531
532#
533# InfiniBand support
534#
535# CONFIG_INFINIBAND is not set
536
537#
538# File systems
539#
540# CONFIG_EXT2_FS is not set
541# CONFIG_EXT3_FS is not set
542# CONFIG_JBD is not set
543# CONFIG_REISERFS_FS is not set
544# CONFIG_JFS_FS is not set
545# CONFIG_XFS_FS is not set
546# CONFIG_MINIX_FS is not set
547# CONFIG_ROMFS_FS is not set
548# CONFIG_QUOTA is not set
549CONFIG_DNOTIFY=y
550# CONFIG_AUTOFS_FS is not set
551# CONFIG_AUTOFS4_FS is not set
552
553#
554# CD-ROM/DVD Filesystems
555#
556# CONFIG_ISO9660_FS is not set
557# CONFIG_UDF_FS is not set
558
559#
560# DOS/FAT/NT Filesystems
561#
562# CONFIG_MSDOS_FS is not set
563# CONFIG_VFAT_FS is not set
564# CONFIG_NTFS_FS is not set
565
566#
567# Pseudo filesystems
568#
569CONFIG_PROC_FS=y
570# CONFIG_PROC_KCORE is not set
571CONFIG_SYSFS=y
572CONFIG_DEVFS_FS=y
573# CONFIG_DEVFS_MOUNT is not set
574# CONFIG_DEVFS_DEBUG is not set
575# CONFIG_DEVPTS_FS_XATTR is not set
576# CONFIG_TMPFS is not set
577# CONFIG_HUGETLB_PAGE is not set
578CONFIG_RAMFS=y
579
580#
581# Miscellaneous filesystems
582#
583# CONFIG_ADFS_FS is not set
584# CONFIG_AFFS_FS is not set
585# CONFIG_HFS_FS is not set
586# CONFIG_HFSPLUS_FS is not set
587# CONFIG_BEFS_FS is not set
588# CONFIG_BFS_FS is not set
589# CONFIG_EFS_FS is not set
590# CONFIG_CRAMFS is not set
591# CONFIG_VXFS_FS is not set
592# CONFIG_HPFS_FS is not set
593# CONFIG_QNX4FS_FS is not set
594# CONFIG_SYSV_FS is not set
595# CONFIG_UFS_FS is not set
596
597#
598# Network File Systems
599#
600CONFIG_NFS_FS=y
601CONFIG_NFS_V3=y
602# CONFIG_NFS_V4 is not set
603# CONFIG_NFS_DIRECTIO is not set
604# CONFIG_NFSD is not set
605CONFIG_ROOT_NFS=y
606CONFIG_LOCKD=y
607CONFIG_LOCKD_V4=y
608# CONFIG_EXPORTFS is not set
609CONFIG_SUNRPC=y
610# CONFIG_RPCSEC_GSS_KRB5 is not set
611# CONFIG_RPCSEC_GSS_SPKM3 is not set
612# CONFIG_SMB_FS is not set
613# CONFIG_CIFS is not set
614# CONFIG_NCP_FS is not set
615# CONFIG_CODA_FS is not set
616# CONFIG_AFS_FS is not set
617
618#
619# Partition Types
620#
621# CONFIG_PARTITION_ADVANCED is not set
622CONFIG_MSDOS_PARTITION=y
623
624#
625# Native Language Support
626#
627# CONFIG_NLS is not set
628
629#
630# Kernel hacking
631#
632CONFIG_DEBUG_KERNEL=y
633# CONFIG_DEBUG_STACKOVERFLOW is not set
634# CONFIG_DEBUG_SLAB is not set
635CONFIG_MAGIC_SYSRQ=y
636# CONFIG_DEBUG_SPINLOCK is not set
637# CONFIG_DEBUG_PAGEALLOC is not set
638# CONFIG_DEBUG_INFO is not set
639# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
640# CONFIG_KGDB is not set
641
642#
643# Security options
644#
645# CONFIG_KEYS is not set
646# CONFIG_SECURITY is not set
647
648#
649# Cryptographic options
650#
651# CONFIG_CRYPTO is not set
652
653#
654# Hardware crypto devices
655#
656
657#
658# Library routines
659#
660# CONFIG_CRC_CCITT is not set
661# CONFIG_CRC32 is not set
662# CONFIG_LIBCRC32C is not set
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
new file mode 100644
index 000000000000..802621dd4867
--- /dev/null
+++ b/arch/xtensa/configs/iss_defconfig
@@ -0,0 +1,531 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.11-rc2
4# Fri Feb 25 19:21:24 2005
5#
6CONFIG_FRAME_POINTER=y
7CONFIG_XTENSA=y
8# CONFIG_UID16 is not set
9CONFIG_RWSEM_XCHGADD_ALGORITHM=y
10CONFIG_HAVE_DEC_LOCK=y
11CONFIG_GENERIC_HARDIRQS=y
12
13#
14# Code maturity level options
15#
16CONFIG_EXPERIMENTAL=y
17CONFIG_CLEAN_COMPILE=y
18CONFIG_BROKEN_ON_SMP=y
19
20#
21# General setup
22#
23CONFIG_LOCALVERSION=""
24CONFIG_SWAP=y
25CONFIG_SYSVIPC=y
26# CONFIG_POSIX_MQUEUE is not set
27# CONFIG_BSD_PROCESS_ACCT is not set
28CONFIG_SYSCTL=y
29# CONFIG_AUDIT is not set
30CONFIG_LOG_BUF_SHIFT=14
31# CONFIG_HOTPLUG is not set
32# CONFIG_KOBJECT_UEVENT is not set
33# CONFIG_IKCONFIG is not set
34CONFIG_EMBEDDED=y
35CONFIG_KALLSYMS=y
36# CONFIG_KALLSYMS_ALL is not set
37# CONFIG_KALLSYMS_EXTRA_PASS is not set
38CONFIG_FUTEX=y
39CONFIG_EPOLL=y
40# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
41CONFIG_SHMEM=y
42CONFIG_CC_ALIGN_FUNCTIONS=0
43CONFIG_CC_ALIGN_LABELS=0
44CONFIG_CC_ALIGN_LOOPS=0
45CONFIG_CC_ALIGN_JUMPS=0
46# CONFIG_TINY_SHMEM is not set
47
48#
49# Loadable module support
50#
51# CONFIG_MODULES is not set
52
53#
54# Processor type and features
55#
56CONFIG_XTENSA_ARCH_LINUX_BE=y
57# CONFIG_XTENSA_ARCH_LINUX_LE is not set
58# CONFIG_XTENSA_ARCH_LINUX_TEST is not set
59# CONFIG_XTENSA_ARCH_S5 is not set
60# CONFIG_XTENSA_CUSTOM is not set
61CONFIG_MMU=y
62# CONFIG_XTENSA_UNALIGNED_USER is not set
63# CONFIG_PREEMPT is not set
64# CONFIG_MATH_EMULATION is not set
65# CONFIG_HIGHMEM is not set
66
67#
68# Platform options
69#
70CONFIG_XTENSA_PLATFORM_ISS=y
71# CONFIG_XTENSA_PLATFORM_XT2000 is not set
72# CONFIG_XTENSA_PLATFORM_ARUBA is not set
73# CONFIG_XTENSA_CALIBRATE_CCOUNT is not set
74CONFIG_XTENSA_CPU_CLOCK=10
75# CONFIG_GENERIC_CALIBRATE_DELAY is not set
76CONFIG_CMDLINE_BOOL=y
77CONFIG_CMDLINE="console=ttyS0,38400 eth0=tuntap,,tap0 ip=192.168.168.5:192.168.168.1 root=nfs nfsroot=192.168.168.1:/opt/montavista/pro/devkit/xtensa/linux_be/target"
78CONFIG_SERIAL_CONSOLE=y
79CONFIG_XTENSA_ISS_NETWORK=y
80
81#
82# Bus options
83#
84
85#
86# PCCARD (PCMCIA/CardBus) support
87#
88# CONFIG_PCCARD is not set
89
90#
91# PC-card bridges
92#
93
94#
95# PCI Hotplug Support
96#
97
98#
99# Exectuable file formats
100#
101CONFIG_KCORE_ELF=y
102CONFIG_BINFMT_ELF=y
103# CONFIG_BINFMT_MISC is not set
104
105#
106# Device Drivers
107#
108
109#
110# Generic Driver Options
111#
112# CONFIG_STANDALONE is not set
113CONFIG_PREVENT_FIRMWARE_BUILD=y
114# CONFIG_FW_LOADER is not set
115# CONFIG_DEBUG_DRIVER is not set
116
117#
118# Memory Technology Devices (MTD)
119#
120# CONFIG_MTD is not set
121
122#
123# Parallel port support
124#
125# CONFIG_PARPORT is not set
126
127#
128# Plug and Play support
129#
130
131#
132# Block devices
133#
134# CONFIG_BLK_DEV_FD is not set
135# CONFIG_BLK_DEV_COW_COMMON is not set
136# CONFIG_BLK_DEV_LOOP is not set
137# CONFIG_BLK_DEV_NBD is not set
138# CONFIG_BLK_DEV_RAM is not set
139CONFIG_BLK_DEV_RAM_COUNT=16
140CONFIG_INITRAMFS_SOURCE=""
141# CONFIG_CDROM_PKTCDVD is not set
142
143#
144# IO Schedulers
145#
146CONFIG_IOSCHED_NOOP=y
147# CONFIG_IOSCHED_AS is not set
148# CONFIG_IOSCHED_DEADLINE is not set
149# CONFIG_IOSCHED_CFQ is not set
150# CONFIG_ATA_OVER_ETH is not set
151
152#
153# ATA/ATAPI/MFM/RLL support
154#
155# CONFIG_IDE is not set
156
157#
158# SCSI device support
159#
160# CONFIG_SCSI is not set
161
162#
163# Multi-device support (RAID and LVM)
164#
165# CONFIG_MD is not set
166
167#
168# Fusion MPT device support
169#
170
171#
172# IEEE 1394 (FireWire) support
173#
174
175#
176# I2O device support
177#
178
179#
180# Networking support
181#
182CONFIG_NET=y
183
184#
185# Networking options
186#
187CONFIG_PACKET=y
188# CONFIG_PACKET_MMAP is not set
189# CONFIG_NETLINK_DEV is not set
190CONFIG_UNIX=y
191# CONFIG_NET_KEY is not set
192CONFIG_INET=y
193# CONFIG_IP_MULTICAST is not set
194# CONFIG_IP_ADVANCED_ROUTER is not set
195CONFIG_IP_PNP=y
196CONFIG_IP_PNP_DHCP=y
197CONFIG_IP_PNP_BOOTP=y
198CONFIG_IP_PNP_RARP=y
199# CONFIG_NET_IPIP is not set
200# CONFIG_NET_IPGRE is not set
201# CONFIG_ARPD is not set
202# CONFIG_SYN_COOKIES is not set
203# CONFIG_INET_AH is not set
204# CONFIG_INET_ESP is not set
205# CONFIG_INET_IPCOMP is not set
206# CONFIG_INET_TUNNEL is not set
207# CONFIG_IP_TCPDIAG is not set
208# CONFIG_IP_TCPDIAG_IPV6 is not set
209# CONFIG_IPV6 is not set
210# CONFIG_NETFILTER is not set
211
212#
213# SCTP Configuration (EXPERIMENTAL)
214#
215# CONFIG_IP_SCTP is not set
216# CONFIG_SCTP_HMAC_NONE is not set
217# CONFIG_SCTP_HMAC_SHA1 is not set
218# CONFIG_SCTP_HMAC_MD5 is not set
219# CONFIG_ATM is not set
220# CONFIG_BRIDGE is not set
221# CONFIG_VLAN_8021Q is not set
222# CONFIG_DECNET is not set
223# CONFIG_LLC2 is not set
224# CONFIG_IPX is not set
225# CONFIG_ATALK is not set
226# CONFIG_X25 is not set
227# CONFIG_LAPB is not set
228# CONFIG_NET_DIVERT is not set
229# CONFIG_ECONET is not set
230# CONFIG_WAN_ROUTER is not set
231
232#
233# QoS and/or fair queueing
234#
235# CONFIG_NET_SCHED is not set
236# CONFIG_NET_SCH_CLK_JIFFIES is not set
237# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
238# CONFIG_NET_SCH_CLK_CPU is not set
239# CONFIG_NET_CLS_ROUTE is not set
240
241#
242# Network testing
243#
244# CONFIG_NET_PKTGEN is not set
245# CONFIG_NETPOLL is not set
246# CONFIG_NET_POLL_CONTROLLER is not set
247# CONFIG_HAMRADIO is not set
248# CONFIG_IRDA is not set
249# CONFIG_BT is not set
250# CONFIG_NETDEVICES is not set
251
252#
253# ISDN subsystem
254#
255# CONFIG_ISDN is not set
256
257#
258# Telephony Support
259#
260# CONFIG_PHONE is not set
261
262#
263# Input device support
264#
265CONFIG_INPUT=y
266
267#
268# Userland interfaces
269#
270# CONFIG_INPUT_MOUSEDEV is not set
271# CONFIG_INPUT_JOYDEV is not set
272# CONFIG_INPUT_TSDEV is not set
273# CONFIG_INPUT_EVDEV is not set
274# CONFIG_INPUT_EVBUG is not set
275
276#
277# Input I/O drivers
278#
279# CONFIG_GAMEPORT is not set
280CONFIG_SOUND_GAMEPORT=y
281# CONFIG_SERIO is not set
282# CONFIG_SERIO_I8042 is not set
283
284#
285# Input Device Drivers
286#
287# CONFIG_INPUT_KEYBOARD is not set
288# CONFIG_INPUT_MOUSE is not set
289# CONFIG_INPUT_JOYSTICK is not set
290# CONFIG_INPUT_TOUCHSCREEN is not set
291# CONFIG_INPUT_MISC is not set
292
293#
294# Character devices
295#
296CONFIG_VT=y
297CONFIG_VT_CONSOLE=y
298CONFIG_HW_CONSOLE=y
299# CONFIG_SERIAL_NONSTANDARD is not set
300
301#
302# Serial drivers
303#
304# CONFIG_SERIAL_8250 is not set
305
306#
307# Non-8250 serial port support
308#
309CONFIG_UNIX98_PTYS=y
310CONFIG_LEGACY_PTYS=y
311CONFIG_LEGACY_PTY_COUNT=256
312
313#
314# IPMI
315#
316# CONFIG_IPMI_HANDLER is not set
317
318#
319# Watchdog Cards
320#
321CONFIG_WATCHDOG=y
322CONFIG_WATCHDOG_NOWAYOUT=y
323
324#
325# Watchdog Device Drivers
326#
327CONFIG_SOFT_WATCHDOG=y
328# CONFIG_RTC is not set
329# CONFIG_GEN_RTC is not set
330# CONFIG_DTLK is not set
331# CONFIG_R3964 is not set
332
333#
334# Ftape, the floppy tape device driver
335#
336# CONFIG_DRM is not set
337# CONFIG_RAW_DRIVER is not set
338
339#
340# I2C support
341#
342# CONFIG_I2C is not set
343
344#
345# Dallas's 1-wire bus
346#
347# CONFIG_W1 is not set
348
349#
350# Misc devices
351#
352
353#
354# Multimedia devices
355#
356# CONFIG_VIDEO_DEV is not set
357
358#
359# Digital Video Broadcasting Devices
360#
361# CONFIG_DVB is not set
362
363#
364# Graphics support
365#
366# CONFIG_FB is not set
367
368#
369# Console display driver support
370#
371# CONFIG_VGA_CONSOLE is not set
372CONFIG_DUMMY_CONSOLE=y
373# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
374
375#
376# Sound
377#
378# CONFIG_SOUND is not set
379
380#
381# USB support
382#
383# CONFIG_USB_ARCH_HAS_HCD is not set
384# CONFIG_USB_ARCH_HAS_OHCI is not set
385
386#
387# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
388#
389
390#
391# USB Gadget Support
392#
393# CONFIG_USB_GADGET is not set
394
395#
396# MMC/SD Card support
397#
398# CONFIG_MMC is not set
399
400#
401# InfiniBand support
402#
403# CONFIG_INFINIBAND is not set
404
405#
406# File systems
407#
408# CONFIG_EXT2_FS is not set
409# CONFIG_EXT3_FS is not set
410# CONFIG_JBD is not set
411# CONFIG_REISERFS_FS is not set
412# CONFIG_JFS_FS is not set
413# CONFIG_XFS_FS is not set
414# CONFIG_MINIX_FS is not set
415# CONFIG_ROMFS_FS is not set
416# CONFIG_QUOTA is not set
417# CONFIG_DNOTIFY is not set
418# CONFIG_AUTOFS_FS is not set
419# CONFIG_AUTOFS4_FS is not set
420
421#
422# CD-ROM/DVD Filesystems
423#
424# CONFIG_ISO9660_FS is not set
425# CONFIG_UDF_FS is not set
426
427#
428# DOS/FAT/NT Filesystems
429#
430# CONFIG_MSDOS_FS is not set
431# CONFIG_VFAT_FS is not set
432# CONFIG_NTFS_FS is not set
433
434#
435# Pseudo filesystems
436#
437CONFIG_PROC_FS=y
438CONFIG_PROC_KCORE=y
439CONFIG_SYSFS=y
440CONFIG_DEVFS_FS=y
441CONFIG_DEVFS_MOUNT=y
442# CONFIG_DEVFS_DEBUG is not set
443# CONFIG_DEVPTS_FS_XATTR is not set
444CONFIG_TMPFS=y
445# CONFIG_TMPFS_XATTR is not set
446# CONFIG_HUGETLB_PAGE is not set
447CONFIG_RAMFS=y
448
449#
450# Miscellaneous filesystems
451#
452# CONFIG_ADFS_FS is not set
453# CONFIG_AFFS_FS is not set
454# CONFIG_HFS_FS is not set
455# CONFIG_HFSPLUS_FS is not set
456# CONFIG_BEFS_FS is not set
457# CONFIG_BFS_FS is not set
458# CONFIG_EFS_FS is not set
459# CONFIG_CRAMFS is not set
460# CONFIG_VXFS_FS is not set
461# CONFIG_HPFS_FS is not set
462# CONFIG_QNX4FS_FS is not set
463# CONFIG_SYSV_FS is not set
464# CONFIG_UFS_FS is not set
465
466#
467# Network File Systems
468#
469CONFIG_NFS_FS=y
470CONFIG_NFS_V3=y
471# CONFIG_NFS_V4 is not set
472CONFIG_NFS_DIRECTIO=y
473# CONFIG_NFSD is not set
474CONFIG_ROOT_NFS=y
475CONFIG_LOCKD=y
476CONFIG_LOCKD_V4=y
477# CONFIG_EXPORTFS is not set
478CONFIG_SUNRPC=y
479# CONFIG_RPCSEC_GSS_KRB5 is not set
480# CONFIG_RPCSEC_GSS_SPKM3 is not set
481# CONFIG_SMB_FS is not set
482# CONFIG_CIFS is not set
483# CONFIG_NCP_FS is not set
484# CONFIG_CODA_FS is not set
485# CONFIG_AFS_FS is not set
486
487#
488# Partition Types
489#
490# CONFIG_PARTITION_ADVANCED is not set
491CONFIG_MSDOS_PARTITION=y
492
493#
494# Native Language Support
495#
496# CONFIG_NLS is not set
497
498#
499# Kernel hacking
500#
501CONFIG_DEBUG_KERNEL=y
502# CONFIG_DEBUG_STACKOVERFLOW is not set
503# CONFIG_DEBUG_SLAB is not set
504# CONFIG_MAGIC_SYSRQ is not set
505# CONFIG_DEBUG_SPINLOCK is not set
506# CONFIG_DEBUG_PAGEALLOC is not set
507# CONFIG_DEBUG_INFO is not set
508# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
509# CONFIG_KGDB is not set
510
511#
512# Security options
513#
514# CONFIG_KEYS is not set
515# CONFIG_SECURITY is not set
516
517#
518# Cryptographic options
519#
520# CONFIG_CRYPTO is not set
521
522#
523# Hardware crypto devices
524#
525
526#
527# Library routines
528#
529# CONFIG_CRC_CCITT is not set
530# CONFIG_CRC32 is not set
531# CONFIG_LIBCRC32C is not set
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
new file mode 100644
index 000000000000..d573017a5dde
--- /dev/null
+++ b/arch/xtensa/kernel/Makefile
@@ -0,0 +1,18 @@
1#
2# Makefile for the Linux/Xtensa kernel.
3#
4
5extra-y := head.o vmlinux.lds
6
7
8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
9 setup.o signal.o syscalls.o time.o traps.o vectors.o platform.o \
10 pci-dma.o
11
12## windowspill.o
13
14obj-$(CONFIG_KGDB) += xtensa-stub.o
15obj-$(CONFIG_PCI) += pci.o
16obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
17
18
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
new file mode 100644
index 000000000000..74b1e90ef08c
--- /dev/null
+++ b/arch/xtensa/kernel/align.S
@@ -0,0 +1,459 @@
1/*
2 * arch/xtensa/kernel/align.S
3 *
4 * Handle unalignment exceptions in kernel space.
5 *
6 * This file is subject to the terms and conditions of the GNU General
7 * Public License. See the file "COPYING" in the main directory of
8 * this archive for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica, Inc.
11 *
12 * Rewritten by Chris Zankel <chris@zankel.net>
13 *
14 * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
15 * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
16 */
17
18#include <linux/linkage.h>
19#include <asm/ptrace.h>
20#include <asm/ptrace.h>
21#include <asm/current.h>
22#include <asm/offsets.h>
23#include <asm/pgtable.h>
24#include <asm/processor.h>
25#include <asm/page.h>
26#include <asm/thread_info.h>
27
28#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
29
30/* First-level exception handler for unaligned exceptions.
31 *
32 * Note: This handler works only for kernel exceptions. Unaligned user
33 * access should get a seg fault.
34 */
35
36/* Big and little endian 16-bit values are located in
37 * different halves of a register. HWORD_START helps to
38 * abstract the notion of extracting a 16-bit value from a
39 * register.
40 * We also have to define new shifting instructions because
41 * lsb and msb are on 'opposite' ends in a register for
42 * different endian machines.
43 *
44 * Assume a memory region in ascending address:
45 * 0 1 2 3|4 5 6 7
46 *
47 * When loading one word into a register, the content of that register is:
48 * LE 3 2 1 0, 7 6 5 4
49 * BE 0 1 2 3, 4 5 6 7
50 *
51 * Masking the bits of the higher/lower address means:
52 * LE X X 0 0, 0 0 X X
53 * BE 0 0 X X, X X 0 0
54 *
55 * Shifting to higher/lower addresses, means:
56 * LE shift left / shift right
57 * BE shift right / shift left
58 *
59 * Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
60 * LE mask 0 0 X X / shift left
61 * BE shift left / mask 0 0 X X
62 */
63
64#define UNALIGNED_USER_EXCEPTION
65
66#if XCHAL_HAVE_BE
67
68#define HWORD_START 16
69#define INSN_OP0 28
70#define INSN_T 24
71#define INSN_OP1 16
72
73.macro __src_b r, w0, w1; src \r, \w0, \w1; .endm
74.macro __ssa8 r; ssa8b \r; .endm
75.macro __ssa8r r; ssa8l \r; .endm
76.macro __sh r, s; srl \r, \s; .endm
77.macro __sl r, s; sll \r, \s; .endm
78.macro __exth r, s; extui \r, \s, 0, 16; .endm
79.macro __extl r, s; slli \r, \s, 16; .endm
80
81#else
82
83#define HWORD_START 0
84#define INSN_OP0 0
85#define INSN_T 4
86#define INSN_OP1 12
87
88.macro __src_b r, w0, w1; src \r, \w1, \w0; .endm
89.macro __ssa8 r; ssa8l \r; .endm
90.macro __ssa8r r; ssa8b \r; .endm
91.macro __sh r, s; sll \r, \s; .endm
92.macro __sl r, s; srl \r, \s; .endm
93.macro __exth r, s; slli \r, \s, 16; .endm
94.macro __extl r, s; extui \r, \s, 0, 16; .endm
95
96#endif
97
98/*
99 * xxxx xxxx = imm8 field
100 * yyyy = imm4 field
101 * ssss = s field
102 * tttt = t field
103 *
104 * 16 0
105 * -------------------
106 * L32I.N yyyy ssss tttt 1000
107 * S32I.N yyyy ssss tttt 1001
108 *
109 * 23 0
110 * -----------------------------
111 * res 0000 0010
112 * L16UI xxxx xxxx 0001 ssss tttt 0010
113 * L32I xxxx xxxx 0010 ssss tttt 0010
114 * XXX 0011 ssss tttt 0010
115 * XXX 0100 ssss tttt 0010
116 * S16I xxxx xxxx 0101 ssss tttt 0010
117 * S32I xxxx xxxx 0110 ssss tttt 0010
118 * XXX 0111 ssss tttt 0010
119 * XXX 1000 ssss tttt 0010
120 * L16SI xxxx xxxx 1001 ssss tttt 0010
121 * XXX 1010 0010
122 * **L32AI xxxx xxxx 1011 ssss tttt 0010 unsupported
123 * XXX 1100 0010
124 * XXX 1101 0010
125 * XXX 1110 0010
126 * **S32RI xxxx xxxx 1111 ssss tttt 0010 unsupported
127 * -----------------------------
128 * ^ ^ ^
129 * sub-opcode (NIBBLE_R) -+ | |
130 * t field (NIBBLE_T) -----------+ |
131 * major opcode (NIBBLE_OP0) --------------+
132 */
133
134#define OP0_L32I_N 0x8 /* load immediate narrow */
135#define OP0_S32I_N 0x9 /* store immediate narrow */
136#define OP1_SI_MASK 0x4 /* OP1 bit set for stores */
137#define OP1_SI_BIT 2 /* OP1 bit number for stores */
138
139#define OP1_L32I 0x2
140#define OP1_L16UI 0x1
141#define OP1_L16SI 0x9
142#define OP1_L32AI 0xb
143
144#define OP1_S32I 0x6
145#define OP1_S16I 0x5
146#define OP1_S32RI 0xf
147
148/*
149 * Entry condition:
150 *
151 * a0: trashed, original value saved on stack (PT_AREG0)
152 * a1: a1
153 * a2: new stack pointer, original in DEPC
154 * a3: dispatch table
155 * depc: a2, original value saved on stack (PT_DEPC)
156 * excsave_1: a3
157 *
158 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
159 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
160 */
161
162
163ENTRY(fast_unaligned)
164
165 /* Note: We don't expect the address to be aligned on a word
166 * boundary. After all, the processor generated that exception
167 * and it would be a hardware fault.
168 */
169
170 /* Save some working register */
171
172 s32i a4, a2, PT_AREG4
173 s32i a5, a2, PT_AREG5
174 s32i a6, a2, PT_AREG6
175 s32i a7, a2, PT_AREG7
176 s32i a8, a2, PT_AREG8
177
178 rsr a0, DEPC
179 xsr a3, EXCSAVE_1
180 s32i a0, a2, PT_AREG2
181 s32i a3, a2, PT_AREG3
182
183 /* Keep value of SAR in a0 */
184
185 rsr a0, SAR
186 rsr a8, EXCVADDR # load unaligned memory address
187
188 /* Now, identify one of the following load/store instructions.
189 *
190 * The only possible danger of a double exception on the
191 * following l32i instructions is kernel code in vmalloc
192 * memory. The processor was just executing at the EPC_1
193 * address, and indeed, already fetched the instruction. That
194 * guarantees a TLB mapping, which hasn't been replaced by
195 * this unaligned exception handler that uses only static TLB
196 * mappings. However, high-level interrupt handlers might
197 * modify TLB entries, so for the generic case, we register a
198 * TABLE_FIXUP handler here, too.
199 */
200
201 /* a3...a6 saved on stack, a2 = SP */
202
203 /* Extract the instruction that caused the unaligned access. */
204
205 rsr a7, EPC_1 # load exception address
206 movi a3, ~3
207 and a3, a3, a7 # mask lower bits
208
209 l32i a4, a3, 0 # load 2 words
210 l32i a5, a3, 4
211
212 __ssa8 a7
213 __src_b a4, a4, a5 # a4 has the instruction
214
215 /* Analyze the instruction (load or store?). */
216
217 extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
218
219#if XCHAL_HAVE_NARROW
220 _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
221 addi a6, a5, -OP0_S32I_N
222 _beqz a6, .Lstore # S32I.N, do a store
223#endif
224 /* 'store indicator bit' not set, jump */
225 _bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
226
227 /* Store: Jump to table entry to get the value in the source register.*/
228
229.Lstore:movi a5, .Lstore_table # table
230 extui a6, a4, INSN_T, 4 # get source register
231 addx8 a5, a6, a5
232 jx a5 # jump into table
233
234 /* Invalid instruction, CRITICAL! */
235.Linvalid_instruction_load:
236 j .Linvalid_instruction
237
238 /* Load: Load memory address. */
239
240.Lload: movi a3, ~3
241 and a3, a3, a8 # align memory address
242
243 __ssa8 a8
244#ifdef UNALIGNED_USER_EXCEPTION
245 addi a3, a3, 8
246 l32e a5, a3, -8
247 l32e a6, a3, -4
248#else
249 l32i a5, a3, 0
250 l32i a6, a3, 4
251#endif
252 __src_b a3, a5, a6 # a3 has the data word
253
254#if XCHAL_HAVE_NARROW
255 addi a7, a7, 2 # increment PC (assume 16-bit insn)
256
257 extui a5, a4, INSN_OP0, 4
258 _beqi a5, OP0_L32I_N, 1f # l32i.n: jump
259
260 addi a7, a7, 1
261#else
262 addi a7, a7, 3
263#endif
264
265 extui a5, a4, INSN_OP1, 4
266 _beqi a5, OP1_L32I, 1f # l32i: jump
267
268 extui a3, a3, 0, 16 # extract lower 16 bits
269 _beqi a5, OP1_L16UI, 1f
270 addi a5, a5, -OP1_L16SI
271 _bnez a5, .Linvalid_instruction_load
272
273 /* sign extend value */
274
275 slli a3, a3, 16
276 srai a3, a3, 16
277
278 /* Set target register. */
279
2801:
281
282#if XCHAL_HAVE_LOOP
283 rsr a3, LEND # check if we reached LEND
284 bne a7, a3, 1f
285 rsr a3, LCOUNT # and LCOUNT != 0
286 beqz a3, 1f
287 addi a3, a3, -1 # decrement LCOUNT and set
288 rsr a7, LBEG # set PC to LBEGIN
289 wsr a3, LCOUNT
290#endif
291
2921: wsr a7, EPC_1 # skip load instruction
293 extui a4, a4, INSN_T, 4 # extract target register
294 movi a5, .Lload_table
295 addx8 a4, a4, a5
296 jx a4 # jump to entry for target register
297
298 .align 8
299.Lload_table:
300 s32i a3, a2, PT_AREG0; _j .Lexit; .align 8
301 mov a1, a3; _j .Lexit; .align 8 # fishy??
302 s32i a3, a2, PT_AREG2; _j .Lexit; .align 8
303 s32i a3, a2, PT_AREG3; _j .Lexit; .align 8
304 s32i a3, a2, PT_AREG4; _j .Lexit; .align 8
305 s32i a3, a2, PT_AREG5; _j .Lexit; .align 8
306 s32i a3, a2, PT_AREG6; _j .Lexit; .align 8
307 s32i a3, a2, PT_AREG7; _j .Lexit; .align 8
308 s32i a3, a2, PT_AREG8; _j .Lexit; .align 8
309 mov a9, a3 ; _j .Lexit; .align 8
310 mov a10, a3 ; _j .Lexit; .align 8
311 mov a11, a3 ; _j .Lexit; .align 8
312 mov a12, a3 ; _j .Lexit; .align 8
313 mov a13, a3 ; _j .Lexit; .align 8
314 mov a14, a3 ; _j .Lexit; .align 8
315 mov a15, a3 ; _j .Lexit; .align 8
316
317.Lstore_table:
318 l32i a3, a2, PT_AREG0; _j 1f; .align 8
319 mov a3, a1; _j 1f; .align 8 # fishy??
320 l32i a3, a2, PT_AREG2; _j 1f; .align 8
321 l32i a3, a2, PT_AREG3; _j 1f; .align 8
322 l32i a3, a2, PT_AREG4; _j 1f; .align 8
323 l32i a3, a2, PT_AREG5; _j 1f; .align 8
324 l32i a3, a2, PT_AREG6; _j 1f; .align 8
325 l32i a3, a2, PT_AREG7; _j 1f; .align 8
326 l32i a3, a2, PT_AREG8; _j 1f; .align 8
327 mov a3, a9 ; _j 1f; .align 8
328 mov a3, a10 ; _j 1f; .align 8
329 mov a3, a11 ; _j 1f; .align 8
330 mov a3, a12 ; _j 1f; .align 8
331 mov a3, a13 ; _j 1f; .align 8
332 mov a3, a14 ; _j 1f; .align 8
333 mov a3, a15 ; _j 1f; .align 8
334
3351: # a7: instruction pointer, a4: instruction, a3: value
336
337 movi a6, 0 # mask: ffffffff:00000000
338
339#if XCHAL_HAVE_NARROW
340 addi a7, a7, 2 # incr. PC,assume 16-bit instruction
341
342 extui a5, a4, INSN_OP0, 4 # extract OP0
343 addi a5, a5, -OP0_S32I_N
344 _beqz a5, 1f # s32i.n: jump
345
346 addi a7, a7, 1 # increment PC, 32-bit instruction
347#else
348 addi a7, a7, 3 # increment PC, 32-bit instruction
349#endif
350
351 extui a5, a4, INSN_OP1, 4 # extract OP1
352 _beqi a5, OP1_S32I, 1f # jump if 32 bit store
353 _bnei a5, OP1_S16I, .Linvalid_instruction_store
354
355 movi a5, -1
356 __extl a3, a3 # get 16-bit value
357 __exth a6, a5 # get 16-bit mask ffffffff:ffff0000
358
359 /* Get memory address */
360
3611:
362#if XCHAL_HAVE_LOOP
363 rsr a3, LEND # check if we reached LEND
364 bne a7, a3, 1f
365 rsr a3, LCOUNT # and LCOUNT != 0
366 beqz a3, 1f
367 addi a3, a3, -1 # decrement LCOUNT and set
368 rsr a7, LBEG # set PC to LBEGIN
369 wsr a3, LCOUNT
370#endif
371
3721: wsr a7, EPC_1 # skip store instruction
373 movi a4, ~3
374 and a4, a4, a8 # align memory address
375
376 /* Insert value into memory */
377
378 movi a5, -1 # mask: ffffffff:XXXX0000
379#ifdef UNALIGNED_USER_EXCEPTION
380 addi a4, a4, 8
381#endif
382
383 __ssa8r a8
384 __src_b a7, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE)
385 __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE)
386#ifdef UNALIGNED_USER_EXCEPTION
387 l32e a5, a4, -8
388#else
389 l32i a5, a4, 0 # load lower address word
390#endif
391 and a5, a5, a7 # mask
392 __sh a7, a3 # shift value
393 or a5, a5, a7 # or with original value
394#ifdef UNALIGNED_USER_EXCEPTION
395 s32e a5, a4, -8
396 l32e a7, a4, -4
397#else
398 s32i a5, a4, 0 # store
399 l32i a7, a4, 4 # same for upper address word
400#endif
401 __sl a5, a3
402 and a6, a7, a6
403 or a6, a6, a5
404#ifdef UNALIGNED_USER_EXCEPTION
405 s32e a6, a4, -4
406#else
407 s32i a6, a4, 4
408#endif
409
410 /* Done. restore stack and return */
411
412.Lexit:
413 movi a4, 0
414 rsr a3, EXCSAVE_1
415 s32i a4, a3, EXC_TABLE_FIXUP
416
417 /* Restore working register */
418
419 l32i a7, a2, PT_AREG7
420 l32i a6, a2, PT_AREG6
421 l32i a5, a2, PT_AREG5
422 l32i a4, a2, PT_AREG4
423 l32i a3, a2, PT_AREG3
424
425 /* restore SAR and return */
426
427 wsr a0, SAR
428 l32i a0, a2, PT_AREG0
429 l32i a2, a2, PT_AREG2
430 rfe
431
432 /* We cannot handle this exception. */
433
434 .extern _kernel_exception
435.Linvalid_instruction_store:
436.Linvalid_instruction:
437
438 /* Restore a4...a8 and SAR, set SP, and jump to default exception. */
439
440 l32i a8, a2, PT_AREG8
441 l32i a7, a2, PT_AREG7
442 l32i a6, a2, PT_AREG6
443 l32i a5, a2, PT_AREG5
444 l32i a4, a2, PT_AREG4
445 wsr a0, SAR
446 mov a1, a2
447
448 rsr a0, PS
449 bbsi.l a2, PS_UM_SHIFT, 1f # jump if user mode
450
451 movi a0, _kernel_exception
452 jx a0
453
4541: movi a0, _user_exception
455 jx a0
456
457
458#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
459
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
new file mode 100644
index 000000000000..840cd9a1d3d2
--- /dev/null
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -0,0 +1,94 @@
1/*
2 * arch/xtensa/kernel/asm-offsets.c
3 *
4 * Generates definitions from c-type structures used by assembly sources.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15#include <asm/processor.h>
16
17#include <linux/types.h>
18#include <linux/sched.h>
19#include <linux/stddef.h>
20#include <linux/thread_info.h>
21#include <linux/ptrace.h>
22#include <asm/ptrace.h>
23#include <asm/processor.h>
24#include <asm/uaccess.h>
25
26#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
27#define BLANK() asm volatile("\n->" : : )
28
29int main(void)
30{
31 /* struct pt_regs */
32 DEFINE(PT_PC, offsetof (struct pt_regs, pc));
33 DEFINE(PT_PS, offsetof (struct pt_regs, ps));
34 DEFINE(PT_DEPC, offsetof (struct pt_regs, depc));
35 DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause));
36 DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr));
37 DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause));
38 DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask));
39 DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg));
40 DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
41 DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
42 DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
43 DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
44 DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
45 DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
46 DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
47 DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2]));
48 DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3]));
49 DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4]));
50 DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5]));
51 DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6]));
52 DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7]));
53 DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8]));
54 DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9]));
55 DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10]));
56 DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11]));
57 DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12]));
58 DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13]));
59 DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14]));
60 DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
61 DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
62 DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
63 DEFINE(PT_SIZE, sizeof(struct pt_regs));
64 DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
65 DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
66 BLANK();
67
68 /* struct task_struct */
69 DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
70 DEFINE(TASK_MM, offsetof (struct task_struct, mm));
71 DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
72 DEFINE(TASK_PID, offsetof (struct task_struct, pid));
73 DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
74 DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, thread_info));
75 DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
76 BLANK();
77
78 /* struct thread_info (offset from start_struct) */
79 DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
80 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
81 DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
82 DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
83 BLANK();
84
85 /* struct mm_struct */
86 DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
87 DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
88 DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
89 BLANK();
90 DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT);
91 return 0;
92}
93
94
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
new file mode 100644
index 000000000000..356192a4d39d
--- /dev/null
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -0,0 +1,201 @@
1/*
2 * arch/xtensa/kernel/coprocessor.S
3 *
4 * Xtensa processor configuration-specific table of coprocessor and
5 * other custom register layout information.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2003 - 2005 Tensilica Inc.
12 *
13 * Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
14 */
15
16/*
17 * This module contains a table that describes the layout of the various
18 * custom registers and states associated with each coprocessor, as well
19 * as those not associated with any coprocessor ("extra state").
20 * This table is included with core dumps and is available via the ptrace
21 * interface, allowing the layout of such register/state information to
22 * be modified in the kernel without affecting the debugger. Each
23 * register or state is identified using a 32-bit "libdb target number"
24 * assigned when the Xtensa processor is generated.
25 */
26
27#include <linux/config.h>
28#include <linux/linkage.h>
29#include <asm/processor.h>
30
31#if XCHAL_HAVE_CP
32
33#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE)
34
35ENTRY(release_coprocessors)
36
37 entry a1, 16
38 # a2: task
39 movi a3, 1 << XCHAL_CP_MAX # a3: coprocessor-bit
40 movi a4, coprocessor_info+CP_LAST # a4: owner-table
41 # a5: tmp
42 movi a6, 0 # a6: 0
43 rsil a7, LOCKLEVEL # a7: PS
44
451: /* Check if task is coprocessor owner of coprocessor[i]. */
46
47 l32i a5, a4, COPROCESSOR_INFO_OWNER
48 srli a3, a3, 1
49 beqz a3, 1f
50 addi a4, a4, -8
51 beq a2, a5, 1b
52
53 /* Found an entry: Clear entry CPENABLE bit to disable CP. */
54
55 rsr a5, CPENABLE
56 s32i a6, a4, COPROCESSOR_INFO_OWNER
57 xor a5, a3, a5
58 wsr a5, CPENABLE
59
60 bnez a3, 1b
61
621: wsr a7, PS
63 rsync
64 retw
65
66
67ENTRY(disable_coprocessor)
68 entry sp, 16
69 rsil a7, LOCKLEVEL
70 rsr a3, CPENABLE
71 movi a4, 1
72 ssl a2
73 sll a4, a4
74 and a4, a3, a4
75 xor a3, a3, a4
76 wsr a3, CPENABLE
77 wsr a7, PS
78 rsync
79 retw
80
81ENTRY(enable_coprocessor)
82 entry sp, 16
83 rsil a7, LOCKLEVEL
84 rsr a3, CPENABLE
85 movi a4, 1
86 ssl a2
87 sll a4, a4
88 or a3, a3, a4
89 wsr a3, CPENABLE
90 wsr a7, PS
91 rsync
92 retw
93
94#endif
95
96ENTRY(save_coprocessor_extra)
97 entry sp, 16
98 xchal_extra_store_funcbody
99 retw
100
101ENTRY(restore_coprocessor_extra)
102 entry sp, 16
103 xchal_extra_load_funcbody
104 retw
105
106ENTRY(save_coprocessor_registers)
107 entry sp, 16
108 xchal_cpi_store_funcbody
109 retw
110
111ENTRY(restore_coprocessor_registers)
112 entry sp, 16
113 xchal_cpi_load_funcbody
114 retw
115
116
117/*
118 * The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros
119 * describe the contents of coprocessor & extra save areas in terms of
120 * undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros. We define these
121 * latter macros here; they expand into a table of the format we want.
122 * The general format is:
123 *
124 * CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum,
125 * bitmask, rsv2, rsv3)
126 * CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum,
127 * bitmask, rsv2, rsv3)
128 * CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index,
129 * numentries, contentsize, regname_base,
130 * regfile_name, rsv2, rsv3)
131 *
132 * For this table, we only care about the <libdbnum>, <offset> and <size>
133 * fields.
134 */
135
136/* Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below: */
137
138#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum, \
139 bitmask, rsv2, rsv3) \
140 reg_entry libdbnum, offset, size ;
141#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum, \
142 bitmask, rsv2, rsv3) \
143 reg_entry libdbnum, offset, size ;
144#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
145 numentries, contentsize, regname_base, \
146 regfile_name, rsv2, rsv3) \
147 reg_entry libdbnum, offset, size ;
148
149/* A single table entry: */
150 .macro reg_entry libdbnum, offset, size
151 .ifne (__last_offset-(__last_group_offset+\offset))
152 /* padding entry */
153 .word (0xFC000000+__last_offset-(__last_group_offset+\offset))
154 .endif
155 .word \libdbnum /* actual entry */
156 .set __last_offset, __last_group_offset+\offset+\size
157 .endm /* reg_entry */
158
159
160/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
161 .macro reg_group cpnum, num_entries, align
162 .set __last_group_offset, (__last_offset + \align- 1) & -\align
163 .ifne \num_entries
164 .word 0xFD000000+(\cpnum<<16)+\num_entries
165 .endif
166 .endm /* reg_group */
167
168/*
169 * Register info tables.
170 */
171
172 .section .rodata, "a"
173 .globl _xtensa_reginfo_tables
174 .globl _xtensa_reginfo_table_size
175 .align 4
176_xtensa_reginfo_table_size:
177 .word _xtensa_reginfo_table_end - _xtensa_reginfo_tables
178
179_xtensa_reginfo_tables:
180 .set __last_offset, 0
181 reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN
182 XCHAL_EXTRA_SA_CONTENTS_LIBDB
183 reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN
184 XCHAL_CP0_SA_CONTENTS_LIBDB
185 reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN
186 XCHAL_CP1_SA_CONTENTS_LIBDB
187 reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN
188 XCHAL_CP2_SA_CONTENTS_LIBDB
189 reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN
190 XCHAL_CP3_SA_CONTENTS_LIBDB
191 reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN
192 XCHAL_CP4_SA_CONTENTS_LIBDB
193 reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN
194 XCHAL_CP5_SA_CONTENTS_LIBDB
195 reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN
196 XCHAL_CP6_SA_CONTENTS_LIBDB
197 reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN
198 XCHAL_CP7_SA_CONTENTS_LIBDB
199 .word 0xFC000000 /* invalid register number,marks end of table*/
200_xtensa_reginfo_table_end:
201
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
new file mode 100644
index 000000000000..c64a01f71de6
--- /dev/null
+++ b/arch/xtensa/kernel/entry.S
@@ -0,0 +1,1996 @@
1/*
2 * arch/xtensa/kernel/entry.S
3 *
4 * Low-level exception handling
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2004-2005 by Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 *
14 */
15
16#include <linux/linkage.h>
17#include <asm/offsets.h>
18#include <asm/processor.h>
19#include <asm/thread_info.h>
20#include <asm/uaccess.h>
21#include <asm/unistd.h>
22#include <asm/ptrace.h>
23#include <asm/current.h>
24#include <asm/pgtable.h>
25#include <asm/page.h>
26#include <asm/signal.h>
27#include <xtensa/coreasm.h>
28
29/* Unimplemented features. */
30
31#undef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
32#undef KERNEL_STACK_OVERFLOW_CHECK
33#undef PREEMPTIBLE_KERNEL
34#undef ALLOCA_EXCEPTION_IN_IRAM
35
36/* Not well tested.
37 *
38 * - fast_coprocessor
39 */
40
41/*
42 * Macro to find first bit set in WINDOWBASE from the left + 1
43 *
44 * 100....0 -> 1
45 * 010....0 -> 2
46 * 000....1 -> WSBITS
47 */
48
49 .macro ffs_ws bit mask
50
51#if XCHAL_HAVE_NSA
52 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0)
53 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1
54#else
55 movi \bit, WSBITS
56#if WSBITS > 16
57 _bltui \mask, 0x10000, 99f
58 addi \bit, \bit, -16
59 extui \mask, \mask, 16, 16
60#endif
61#if WSBITS > 8
6299: _bltui \mask, 0x100, 99f
63 addi \bit, \bit, -8
64 srli \mask, \mask, 8
65#endif
6699: _bltui \mask, 0x10, 99f
67 addi \bit, \bit, -4
68 srli \mask, \mask, 4
6999: _bltui \mask, 0x4, 99f
70 addi \bit, \bit, -2
71 srli \mask, \mask, 2
7299: _bltui \mask, 0x2, 99f
73 addi \bit, \bit, -1
7499:
75
76#endif
77 .endm
78
79/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
80
81/*
82 * First-level exception handler for user exceptions.
83 * Save some special registers, extra states and all registers in the AR
84 * register file that were in use in the user task, and jump to the common
85 * exception code.
86 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
87 * save them for kernel exceptions).
88 *
89 * Entry condition for user_exception:
90 *
91 * a0: trashed, original value saved on stack (PT_AREG0)
92 * a1: a1
93 * a2: new stack pointer, original value in depc
94 * a3: dispatch table
95 * depc: a2, original value saved on stack (PT_DEPC)
96 * excsave1: a3
97 *
98 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
99 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
100 *
101 * Entry condition for _user_exception:
102 *
103 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
104 * excsave has been restored, and
105 * stack pointer (a1) has been set.
106 *
107 * Note: _user_exception might be at an odd adress. Don't use call0..call12
108 */
109
110ENTRY(user_exception)
111
112 /* Save a2, a3, and depc, restore excsave_1 and set SP. */
113
114 xsr a3, EXCSAVE_1
115 rsr a0, DEPC
116 s32i a1, a2, PT_AREG1
117 s32i a0, a2, PT_AREG2
118 s32i a3, a2, PT_AREG3
119 mov a1, a2
120
121 .globl _user_exception
122_user_exception:
123
124 /* Save SAR and turn off single stepping */
125
126 movi a2, 0
127 rsr a3, SAR
128 wsr a2, ICOUNTLEVEL
129 s32i a3, a1, PT_SAR
130
131 /* Rotate ws so that the current windowbase is at bit0. */
132 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
133
134 rsr a2, WINDOWBASE
135 rsr a3, WINDOWSTART
136 ssr a2
137 s32i a2, a1, PT_WINDOWBASE
138 s32i a3, a1, PT_WINDOWSTART
139 slli a2, a3, 32-WSBITS
140 src a2, a3, a2
141 srli a2, a2, 32-WSBITS
142 s32i a2, a1, PT_WMASK # needed for restoring registers
143
144 /* Save only live registers. */
145
146 _bbsi.l a2, 1, 1f
147 s32i a4, a1, PT_AREG4
148 s32i a5, a1, PT_AREG5
149 s32i a6, a1, PT_AREG6
150 s32i a7, a1, PT_AREG7
151 _bbsi.l a2, 2, 1f
152 s32i a8, a1, PT_AREG8
153 s32i a9, a1, PT_AREG9
154 s32i a10, a1, PT_AREG10
155 s32i a11, a1, PT_AREG11
156 _bbsi.l a2, 3, 1f
157 s32i a12, a1, PT_AREG12
158 s32i a13, a1, PT_AREG13
159 s32i a14, a1, PT_AREG14
160 s32i a15, a1, PT_AREG15
161 _bnei a2, 1, 1f # only one valid frame?
162
163 /* Only one valid frame, skip saving regs. */
164
165 j 2f
166
167 /* Save the remaining registers.
168 * We have to save all registers up to the first '1' from
169 * the right, except the current frame (bit 0).
170 * Assume a2 is: 001001000110001
171 * All regiser frames starting from the top fiel to the marked '1'
172 * must be saved.
173 */
174
1751: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
176 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
177 and a3, a3, a2 # max. only one bit is set
178
179 /* Find number of frames to save */
180
181 ffs_ws a0, a3 # number of frames to the '1' from left
182
183 /* Store information into WMASK:
184 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
185 * bits 4...: number of valid 4-register frames
186 */
187
188 slli a3, a0, 4 # number of frames to save in bits 8..4
189 extui a2, a2, 0, 4 # mask for the first 16 registers
190 or a2, a3, a2
191 s32i a2, a1, PT_WMASK # needed when we restore the reg-file
192
193 /* Save 4 registers at a time */
194
1951: rotw -1
196 s32i a0, a5, PT_AREG_END - 16
197 s32i a1, a5, PT_AREG_END - 12
198 s32i a2, a5, PT_AREG_END - 8
199 s32i a3, a5, PT_AREG_END - 4
200 addi a0, a4, -1
201 addi a1, a5, -16
202 _bnez a0, 1b
203
204 /* WINDOWBASE still in SAR! */
205
206 rsr a2, SAR # original WINDOWBASE
207 movi a3, 1
208 ssl a2
209 sll a3, a3
210 wsr a3, WINDOWSTART # set corresponding WINDOWSTART bit
211 wsr a2, WINDOWBASE # and WINDOWSTART
212 rsync
213
214 /* We are back to the original stack pointer (a1) */
215
2162:
217#if XCHAL_EXTRA_SA_SIZE
218
219 /* For user exceptions, save the extra state into the user's TCB.
220 * Note: We must assume that xchal_extra_store_funcbody destroys a2..a15
221 */
222
223 GET_CURRENT(a2,a1)
224 addi a2, a2, THREAD_CP_SAVE
225 xchal_extra_store_funcbody
226#endif
227
228 /* Now, jump to the common exception handler. */
229
230 j common_exception
231
232
233/*
234 * First-level exit handler for kernel exceptions
235 * Save special registers and the live window frame.
236 * Note: Even though we changes the stack pointer, we don't have to do a
237 * MOVSP here, as we do that when we return from the exception.
238 * (See comment in the kernel exception exit code)
239 *
240 * Entry condition for kernel_exception:
241 *
242 * a0: trashed, original value saved on stack (PT_AREG0)
243 * a1: a1
244 * a2: new stack pointer, original in DEPC
245 * a3: dispatch table
246 * depc: a2, original value saved on stack (PT_DEPC)
247 * excsave_1: a3
248 *
249 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
250 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
251 *
252 * Entry condition for _kernel_exception:
253 *
254 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
255 * excsave has been restored, and
256 * stack pointer (a1) has been set.
257 *
258 * Note: _kernel_exception might be at an odd adress. Don't use call0..call12
259 */
260
261ENTRY(kernel_exception)
262
263 /* Save a0, a2, a3, DEPC and set SP. */
264
265 xsr a3, EXCSAVE_1 # restore a3, excsave_1
266 rsr a0, DEPC # get a2
267 s32i a1, a2, PT_AREG1
268 s32i a0, a2, PT_AREG2
269 s32i a3, a2, PT_AREG3
270 mov a1, a2
271
272 .globl _kernel_exception
273_kernel_exception:
274
275 /* Save SAR and turn off single stepping */
276
277 movi a2, 0
278 rsr a3, SAR
279 wsr a2, ICOUNTLEVEL
280 s32i a3, a1, PT_SAR
281
282 /* Rotate ws so that the current windowbase is at bit0. */
283 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
284
285 rsr a2, WINDOWBASE # don't need to save these, we only
286 rsr a3, WINDOWSTART # need shifted windowstart: windowmask
287 ssr a2
288 slli a2, a3, 32-WSBITS
289 src a2, a3, a2
290 srli a2, a2, 32-WSBITS
291 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
292
293 /* Save only the live window-frame */
294
295 _bbsi.l a2, 1, 1f
296 s32i a4, a1, PT_AREG4
297 s32i a5, a1, PT_AREG5
298 s32i a6, a1, PT_AREG6
299 s32i a7, a1, PT_AREG7
300 _bbsi.l a2, 2, 1f
301 s32i a8, a1, PT_AREG8
302 s32i a9, a1, PT_AREG9
303 s32i a10, a1, PT_AREG10
304 s32i a11, a1, PT_AREG11
305 _bbsi.l a2, 3, 1f
306 s32i a12, a1, PT_AREG12
307 s32i a13, a1, PT_AREG13
308 s32i a14, a1, PT_AREG14
309 s32i a15, a1, PT_AREG15
310
3111:
312
313#ifdef KERNEL_STACK_OVERFLOW_CHECK
314
315 /* Stack overflow check, for debugging */
316 extui a2, a1, TASK_SIZE_BITS,XX
317 movi a3, SIZE??
318 _bge a2, a3, out_of_stack_panic
319
320#endif
321
322/*
323 * This is the common exception handler.
324 * We get here from the user exception handler or simply by falling through
325 * from the kernel exception handler.
326 * Save the remaining special registers, switch to kernel mode, and jump
327 * to the second-level exception handler.
328 *
329 */
330
331common_exception:
332
333 /* Save EXCVADDR, DEBUGCAUSE, and PC, and clear LCOUNT */
334
335 rsr a2, DEBUGCAUSE
336 rsr a3, EPC_1
337 s32i a2, a1, PT_DEBUGCAUSE
338 s32i a3, a1, PT_PC
339
340 rsr a3, EXCVADDR
341 movi a2, 0
342 s32i a3, a1, PT_EXCVADDR
343 xsr a2, LCOUNT
344 s32i a2, a1, PT_LCOUNT
345
346 /* It is now save to restore the EXC_TABLE_FIXUP variable. */
347
348 rsr a0, EXCCAUSE
349 movi a3, 0
350 rsr a2, EXCSAVE_1
351 s32i a0, a1, PT_EXCCAUSE
352 s32i a3, a2, EXC_TABLE_FIXUP
353
354 /* All unrecoverable states are saved on stack, now, and a1 is valid,
355 * so we can allow exceptions and interrupts (*) again.
356 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
357 *
358 * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before
359 * (interrupts disabled) and if this exception is not an interrupt.
360 */
361
362 rsr a3, PS
363 addi a0, a0, -4
364 movi a2, 1
365 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
366 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
367 movi a2, PS_WOE_MASK
368 or a3, a3, a2
369 rsr a0, EXCCAUSE
370 xsr a3, PS
371
372 s32i a3, a1, PT_PS # save ps
373
374 /* Save LBEG, LEND */
375
376 rsr a2, LBEG
377 rsr a3, LEND
378 s32i a2, a1, PT_LBEG
379 s32i a3, a1, PT_LEND
380
381 /* Go to second-level dispatcher. Set up parameters to pass to the
382 * exception handler and call the exception handler.
383 */
384
385 movi a4, exc_table
386 mov a6, a1 # pass stack frame
387 mov a7, a0 # pass EXCCAUSE
388 addx4 a4, a0, a4
389 l32i a4, a4, EXC_TABLE_DEFAULT # load handler
390
391 /* Call the second-level handler */
392
393 callx4 a4
394
395 /* Jump here for exception exit */
396
397common_exception_return:
398
399 /* Jump if we are returning from kernel exceptions. */
400
4011: l32i a3, a1, PT_PS
402 _bbsi.l a3, PS_UM_SHIFT, 2f
403 j kernel_exception_exit
404
405 /* Specific to a user exception exit:
406 * We need to check some flags for signal handling and rescheduling,
407 * and have to restore WB and WS, extra states, and all registers
408 * in the register file that were in use in the user task.
409 */
410
4112: wsr a3, PS /* disable interrupts */
412
413 /* Check for signals (keep interrupts disabled while we read TI_FLAGS)
414 * Note: PS.INTLEVEL = 0, PS.EXCM = 1
415 */
416
417 GET_THREAD_INFO(a2,a1)
418 l32i a4, a2, TI_FLAGS
419
420 /* Enable interrupts again.
421 * Note: When we get here, we certainly have handled any interrupts.
422 * (Hint: There is only one user exception frame on stack)
423 */
424
425 movi a3, PS_WOE_MASK
426
427 _bbsi.l a4, TIF_NEED_RESCHED, 3f
428 _bbci.l a4, TIF_SIGPENDING, 4f
429
430#ifndef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
431 l32i a4, a1, PT_DEPC
432 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
433#endif
434
435 /* Reenable interrupts and call do_signal() */
436
437 wsr a3, PS
438 movi a4, do_signal # int do_signal(struct pt_regs*, sigset_t*)
439 mov a6, a1
440 movi a7, 0
441 callx4 a4
442 j 1b
443
4443: /* Reenable interrupts and reschedule */
445
446 wsr a3, PS
447 movi a4, schedule # void schedule (void)
448 callx4 a4
449 j 1b
450
451 /* Restore the state of the task and return from the exception. */
452
453
454 /* If we are returning from a user exception, and the process
455 * to run next has PT_SINGLESTEP set, we want to setup
456 * ICOUNT and ICOUNTLEVEL to step one instruction.
457 * PT_SINGLESTEP is set by sys_ptrace (ptrace.c)
458 */
459
4604: /* a2 holds GET_CURRENT(a2,a1) */
461
462 l32i a3, a2, TI_TASK
463 l32i a3, a3, TASK_PTRACE
464 bbci.l a3, PT_SINGLESTEP_BIT, 1f # jump if single-step flag is not set
465
466 movi a3, -2 # PT_SINGLESTEP flag is set,
467 movi a4, 1 # icountlevel of 1 means it won't
468 wsr a3, ICOUNT # start counting until after rfe
469 wsr a4, ICOUNTLEVEL # so setup icount & icountlevel.
470 isync
471
4721:
473
474#if XCHAL_EXTRA_SA_SIZE
475
476 /* For user exceptions, restore the extra state from the user's TCB. */
477
478 /* Note: a2 still contains GET_CURRENT(a2,a1) */
479 addi a2, a2, THREAD_CP_SAVE
480 xchal_extra_load_funcbody
481
482 /* We must assume that xchal_extra_store_funcbody destroys
483 * registers a2..a15. FIXME, this list can eventually be
484 * reduced once real register requirements of the macro are
485 * finalized. */
486
487#endif /* XCHAL_EXTRA_SA_SIZE */
488
489
490 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
491
492 l32i a2, a1, PT_WINDOWBASE
493 l32i a3, a1, PT_WINDOWSTART
494 wsr a1, DEPC # use DEPC as temp storage
495 wsr a3, WINDOWSTART # restore WINDOWSTART
496 ssr a2 # preserve user's WB in the SAR
497 wsr a2, WINDOWBASE # switch to user's saved WB
498 rsync
499 rsr a1, DEPC # restore stack pointer
500 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
501 rotw -1 # we restore a4..a7
502 _bltui a6, 16, 1f # only have to restore current window?
503
504 /* The working registers are a0 and a3. We are restoring to
505 * a4..a7. Be careful not to destroy what we have just restored.
506 * Note: wmask has the format YYYYM:
507 * Y: number of registers saved in groups of 4
508 * M: 4 bit mask of first 16 registers
509 */
510
511 mov a2, a6
512 mov a3, a5
513
5142: rotw -1 # a0..a3 become a4..a7
515 addi a3, a7, -4*4 # next iteration
516 addi a2, a6, -16 # decrementing Y in WMASK
517 l32i a4, a3, PT_AREG_END + 0
518 l32i a5, a3, PT_AREG_END + 4
519 l32i a6, a3, PT_AREG_END + 8
520 l32i a7, a3, PT_AREG_END + 12
521 _bgeui a2, 16, 2b
522
523 /* Clear unrestored registers (don't leak anything to user-land */
524
5251: rsr a0, WINDOWBASE
526 rsr a3, SAR
527 sub a3, a0, a3
528 beqz a3, 2f
529 extui a3, a3, 0, WBBITS
530
5311: rotw -1
532 addi a3, a7, -1
533 movi a4, 0
534 movi a5, 0
535 movi a6, 0
536 movi a7, 0
537 bgei a3, 1, 1b
538
539 /* We are back were we were when we started.
540 * Note: a2 still contains WMASK (if we've returned to the original
541 * frame where we had loaded a2), or at least the lower 4 bits
542 * (if we have restored WSBITS-1 frames).
543 */
544
5452: j common_exception_exit
546
547 /* This is the kernel exception exit.
548 * We avoided to do a MOVSP when we entered the exception, but we
549 * have to do it here.
550 */
551
552kernel_exception_exit:
553
554 /* Disable interrupts (a3 holds PT_PS) */
555
556 wsr a3, PS
557
558#ifdef PREEMPTIBLE_KERNEL
559
560#ifdef CONFIG_PREEMPT
561
562 /*
563 * Note: We've just returned from a call4, so we have
564 * at least 4 addt'l regs.
565 */
566
567 /* Check current_thread_info->preempt_count */
568
569 GET_THREAD_INFO(a2)
570 l32i a3, a2, TI_PREEMPT
571 bnez a3, 1f
572
573 l32i a2, a2, TI_FLAGS
574
5751:
576
577#endif
578
579#endif
580
581 /* Check if we have to do a movsp.
582 *
583 * We only have to do a movsp if the previous window-frame has
584 * been spilled to the *temporary* exception stack instead of the
585 * task's stack. This is the case if the corresponding bit in
586 * WINDOWSTART for the previous window-frame was set before
587 * (not spilled) but is zero now (spilled).
588 * If this bit is zero, all other bits except the one for the
589 * current window frame are also zero. So, we can use a simple test:
590 * 'and' WINDOWSTART and WINDOWSTART-1:
591 *
592 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
593 *
594 * The result is zero only if one bit was set.
595 *
596 * (Note: We might have gone through several task switches before
597 * we come back to the current task, so WINDOWBASE might be
598 * different from the time the exception occurred.)
599 */
600
601 /* Test WINDOWSTART before and after the exception.
602 * We actually have WMASK, so we only have to test if it is 1 or not.
603 */
604
605 l32i a2, a1, PT_WMASK
606 _beqi a2, 1, common_exception_exit # Spilled before exception,jump
607
608 /* Test WINDOWSTART now. If spilled, do the movsp */
609
610 rsr a3, WINDOWSTART
611 addi a0, a3, -1
612 and a3, a3, a0
613 _bnez a3, common_exception_exit
614
615 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
616
617 addi a0, a1, -16
618 l32i a3, a0, 0
619 l32i a4, a0, 4
620 s32i a3, a1, PT_SIZE+0
621 s32i a4, a1, PT_SIZE+4
622 l32i a3, a0, 8
623 l32i a4, a0, 12
624 s32i a3, a1, PT_SIZE+8
625 s32i a4, a1, PT_SIZE+12
626
627 /* Common exception exit.
628 * We restore the special register and the current window frame, and
629 * return from the exception.
630 *
631 * Note: We expect a2 to hold PT_WMASK
632 */
633
634common_exception_exit:
635
636 _bbsi.l a2, 1, 1f
637 l32i a4, a1, PT_AREG4
638 l32i a5, a1, PT_AREG5
639 l32i a6, a1, PT_AREG6
640 l32i a7, a1, PT_AREG7
641 _bbsi.l a2, 2, 1f
642 l32i a8, a1, PT_AREG8
643 l32i a9, a1, PT_AREG9
644 l32i a10, a1, PT_AREG10
645 l32i a11, a1, PT_AREG11
646 _bbsi.l a2, 3, 1f
647 l32i a12, a1, PT_AREG12
648 l32i a13, a1, PT_AREG13
649 l32i a14, a1, PT_AREG14
650 l32i a15, a1, PT_AREG15
651
652 /* Restore PC, SAR */
653
6541: l32i a2, a1, PT_PC
655 l32i a3, a1, PT_SAR
656 wsr a2, EPC_1
657 wsr a3, SAR
658
659 /* Restore LBEG, LEND, LCOUNT */
660
661 l32i a2, a1, PT_LBEG
662 l32i a3, a1, PT_LEND
663 wsr a2, LBEG
664 l32i a2, a1, PT_LCOUNT
665 wsr a3, LEND
666 wsr a2, LCOUNT
667
668 /* Check if it was double exception. */
669
670 l32i a0, a1, PT_DEPC
671 l32i a3, a1, PT_AREG3
672 l32i a2, a1, PT_AREG2
673 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
674
675 /* Restore a0...a3 and return */
676
677 l32i a0, a1, PT_AREG0
678 l32i a1, a1, PT_AREG1
679 rfe
680
6811: wsr a0, DEPC
682 l32i a0, a1, PT_AREG0
683 l32i a1, a1, PT_AREG1
684 rfde
685
686/*
687 * Debug exception handler.
688 *
689 * Currently, we don't support KGDB, so only user application can be debugged.
690 *
691 * When we get here, a0 is trashed and saved to excsave[debuglevel]
692 */
693
694ENTRY(debug_exception)
695
696 rsr a0, EPS + XCHAL_DEBUGLEVEL
697 bbsi.l a0, PS_EXCM_SHIFT, 1f # exception mode
698
699 /* Set EPC_1 and EXCCAUSE */
700
701 wsr a2, DEPC # save a2 temporarily
702 rsr a2, EPC + XCHAL_DEBUGLEVEL
703 wsr a2, EPC_1
704
705 movi a2, EXCCAUSE_MAPPED_DEBUG
706 wsr a2, EXCCAUSE
707
708 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
709
710 movi a2, 1 << PS_EXCM_SHIFT
711 or a2, a0, a2
712 movi a0, debug_exception # restore a3, debug jump vector
713 wsr a2, PS
714 xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
715
716 /* Switch to kernel/user stack, restore jump vector, and save a0 */
717
718 bbsi.l a2, PS_UM_SHIFT, 2f # jump if user mode
719
720 addi a2, a1, -16-PT_SIZE # assume kernel stack
721 s32i a0, a2, PT_AREG0
722 movi a0, 0
723 s32i a1, a2, PT_AREG1
724 s32i a0, a2, PT_DEPC # mark it as a regular exception
725 xsr a0, DEPC
726 s32i a3, a2, PT_AREG3
727 s32i a0, a2, PT_AREG2
728 mov a1, a2
729 j _kernel_exception
730
7312: rsr a2, EXCSAVE_1
732 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
733 s32i a0, a2, PT_AREG0
734 movi a0, 0
735 s32i a1, a2, PT_AREG1
736 s32i a0, a2, PT_DEPC
737 xsr a0, DEPC
738 s32i a3, a2, PT_AREG3
739 s32i a0, a2, PT_AREG2
740 mov a1, a2
741 j _user_exception
742
743 /* Debug exception while in exception mode. */
7441: j 1b // FIXME!!
745
746
747/*
748 * We get here in case of an unrecoverable exception.
749 * The only thing we can do is to be nice and print a panic message.
750 * We only produce a single stack frame for panic, so ???
751 *
752 *
753 * Entry conditions:
754 *
755 * - a0 contains the caller address; original value saved in excsave1.
756 * - the original a0 contains a valid return address (backtrace) or 0.
757 * - a2 contains a valid stackpointer
758 *
759 * Notes:
760 *
761 * - If the stack pointer could be invalid, the caller has to setup a
762 * dummy stack pointer (e.g. the stack of the init_task)
763 *
764 * - If the return address could be invalid, the caller has to set it
765 * to 0, so the backtrace would stop.
766 *
767 */
768 .align 4
769unrecoverable_text:
770 .ascii "Unrecoverable error in exception handler\0"
771
772ENTRY(unrecoverable_exception)
773
774 movi a0, 1
775 movi a1, 0
776
777 wsr a0, WINDOWSTART
778 wsr a1, WINDOWBASE
779 rsync
780
781 movi a1, PS_WOE_MASK | 1
782 wsr a1, PS
783 rsync
784
785 movi a1, init_task
786 movi a0, 0
787 addi a1, a1, PT_REGS_OFFSET
788
789 movi a4, panic
790 movi a6, unrecoverable_text
791
792 callx4 a4
793
7941: j 1b
795
796
797/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
798
799/*
800 * Fast-handler for alloca exceptions
801 *
802 * The ALLOCA handler is entered when user code executes the MOVSP
803 * instruction and the caller's frame is not in the register file.
804 * In this case, the caller frame's a0..a3 are on the stack just
805 * below sp (a1), and this handler moves them.
806 *
807 * For "MOVSP <ar>,<as>" without destination register a1, this routine
808 * simply moves the value from <as> to <ar> without moving the save area.
809 *
810 * Entry condition:
811 *
812 * a0: trashed, original value saved on stack (PT_AREG0)
813 * a1: a1
814 * a2: new stack pointer, original in DEPC
815 * a3: dispatch table
816 * depc: a2, original value saved on stack (PT_DEPC)
817 * excsave_1: a3
818 *
819 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
820 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
821 */
822
823#if XCHAL_HAVE_BE
824#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4
825#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4
826#else
827#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4
828#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4
829#endif
830
831ENTRY(fast_alloca)
832
833 /* We shouldn't be in a double exception. */
834
835 l32i a0, a2, PT_DEPC
836 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
837
838 rsr a0, DEPC # get a2
839 s32i a4, a2, PT_AREG4 # save a4 and
840 s32i a0, a2, PT_AREG2 # a2 to stack
841
842 /* Exit critical section. */
843
844 movi a0, 0
845 s32i a0, a3, EXC_TABLE_FIXUP
846
847 /* Restore a3, excsave_1 */
848
849 xsr a3, EXCSAVE_1 # make sure excsave_1 is valid for dbl.
850 rsr a4, EPC_1 # get exception address
851 s32i a3, a2, PT_AREG3 # save a3 to stack
852
853#ifdef ALLOCA_EXCEPTION_IN_IRAM
854#error iram not supported
855#else
856 /* Note: l8ui not allowed in IRAM/IROM!! */
857 l8ui a0, a4, 1 # read as(src) from MOVSP instruction
858#endif
859 movi a3, .Lmovsp_src
860 _EXTUI_MOVSP_SRC(a0) # extract source register number
861 addx8 a3, a0, a3
862 jx a3
863
864.Lunhandled_double:
865 wsr a0, EXCSAVE_1
866 movi a0, unrecoverable_exception
867 callx0 a0
868
869 .align 8
870.Lmovsp_src:
871 l32i a3, a2, PT_AREG0; _j 1f; .align 8
872 mov a3, a1; _j 1f; .align 8
873 l32i a3, a2, PT_AREG2; _j 1f; .align 8
874 l32i a3, a2, PT_AREG3; _j 1f; .align 8
875 l32i a3, a2, PT_AREG4; _j 1f; .align 8
876 mov a3, a5; _j 1f; .align 8
877 mov a3, a6; _j 1f; .align 8
878 mov a3, a7; _j 1f; .align 8
879 mov a3, a8; _j 1f; .align 8
880 mov a3, a9; _j 1f; .align 8
881 mov a3, a10; _j 1f; .align 8
882 mov a3, a11; _j 1f; .align 8
883 mov a3, a12; _j 1f; .align 8
884 mov a3, a13; _j 1f; .align 8
885 mov a3, a14; _j 1f; .align 8
886 mov a3, a15; _j 1f; .align 8
887
8881:
889
890#ifdef ALLOCA_EXCEPTION_IN_IRAM
891#error iram not supported
892#else
893 l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction
894#endif
895 addi a4, a4, 3 # step over movsp
896 _EXTUI_MOVSP_DST(a0) # extract destination register
897 wsr a4, EPC_1 # save new epc_1
898
899 _bnei a0, 1, 1f # no 'movsp a1, ax': jump
900
901 /* Move the save area. This implies the use of the L32E
902 * and S32E instructions, because this move must be done with
903 * the user's PS.RING privilege levels, not with ring 0
904 * (kernel's) privileges currently active with PS.EXCM
905 * set. Note that we have stil registered a fixup routine with the
906 * double exception vector in case a double exception occurs.
907 */
908
909 /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
910
911 l32e a0, a1, -16
912 l32e a4, a1, -12
913 s32e a0, a3, -16
914 s32e a4, a3, -12
915 l32e a0, a1, -8
916 l32e a4, a1, -4
917 s32e a0, a3, -8
918 s32e a4, a3, -4
919
920 /* Restore stack-pointer and all the other saved registers. */
921
922 mov a1, a3
923
924 l32i a4, a2, PT_AREG4
925 l32i a3, a2, PT_AREG3
926 l32i a0, a2, PT_AREG0
927 l32i a2, a2, PT_AREG2
928 rfe
929
930 /* MOVSP <at>,<as> was invoked with <at> != a1.
931 * Because the stack pointer is not being modified,
932 * we should be able to just modify the pointer
933 * without moving any save area.
934 * The processor only traps these occurrences if the
935 * caller window isn't live, so unfortunately we can't
936 * use this as an alternate trap mechanism.
937 * So we just do the move. This requires that we
938 * resolve the destination register, not just the source,
939 * so there's some extra work.
940 * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
941 */
942
943 /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
944
9451: movi a4, .Lmovsp_dst
946 addx8 a4, a0, a4
947 jx a4
948
949 .align 8
950.Lmovsp_dst:
951 s32i a3, a2, PT_AREG0; _j 1f; .align 8
952 mov a1, a3; _j 1f; .align 8
953 s32i a3, a2, PT_AREG2; _j 1f; .align 8
954 s32i a3, a2, PT_AREG3; _j 1f; .align 8
955 s32i a3, a2, PT_AREG4; _j 1f; .align 8
956 mov a5, a3; _j 1f; .align 8
957 mov a6, a3; _j 1f; .align 8
958 mov a7, a3; _j 1f; .align 8
959 mov a8, a3; _j 1f; .align 8
960 mov a9, a3; _j 1f; .align 8
961 mov a10, a3; _j 1f; .align 8
962 mov a11, a3; _j 1f; .align 8
963 mov a12, a3; _j 1f; .align 8
964 mov a13, a3; _j 1f; .align 8
965 mov a14, a3; _j 1f; .align 8
966 mov a15, a3; _j 1f; .align 8
967
9681: l32i a4, a2, PT_AREG4
969 l32i a3, a2, PT_AREG3
970 l32i a0, a2, PT_AREG0
971 l32i a2, a2, PT_AREG2
972 rfe
973
974
975/*
976 * fast system calls.
977 *
978 * WARNING: The kernel doesn't save the entire user context before
979 * handling a fast system call. These functions are small and short,
980 * usually offering some functionality not available to user tasks.
981 *
982 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
983 *
984 * Entry condition:
985 *
986 * a0: trashed, original value saved on stack (PT_AREG0)
987 * a1: a1
988 * a2: new stack pointer, original in DEPC
989 * a3: dispatch table
990 * depc: a2, original value saved on stack (PT_DEPC)
991 * excsave_1: a3
992 */
993
994ENTRY(fast_syscall_kernel)
995
996 /* Skip syscall. */
997
998 rsr a0, EPC_1
999 addi a0, a0, 3
1000 wsr a0, EPC_1
1001
1002 l32i a0, a2, PT_DEPC
1003 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1004
1005 rsr a0, DEPC # get syscall-nr
1006 _beqz a0, fast_syscall_spill_registers
1007
1008 addi a0, a0, -__NR_sysxtensa
1009 _beqz a0, fast_syscall_sysxtensa
1010
1011 j kernel_exception
1012
1013
1014ENTRY(fast_syscall_user)
1015
1016 /* Skip syscall. */
1017
1018 rsr a0, EPC_1
1019 addi a0, a0, 3
1020 wsr a0, EPC_1
1021
1022 l32i a0, a2, PT_DEPC
1023 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1024
1025 rsr a0, DEPC # get syscall-nr
1026 _beqz a0, fast_syscall_spill_registers
1027
1028 addi a0, a0, -__NR_sysxtensa
1029 _beqz a0, fast_syscall_sysxtensa
1030
1031 j user_exception
1032
1033ENTRY(fast_syscall_unrecoverable)
1034
1035 /* Restore all states. */
1036
1037 l32i a0, a2, PT_AREG0 # restore a0
1038 xsr a2, DEPC # restore a2, depc
1039 rsr a3, EXCSAVE_1
1040
1041 wsr a0, EXCSAVE_1
1042 movi a0, unrecoverable_exception
1043 callx0 a0
1044
1045
1046
1047/*
1048 * sysxtensa syscall handler
1049 *
1050 * int sysxtensa (XTENSA_ATOMIC_SET, ptr, val, unused);
1051 * int sysxtensa (XTENSA_ATOMIC_ADD, ptr, val, unused);
1052 * int sysxtensa (XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
1053 * int sysxtensa (XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1054 * a2 a6 a3 a4 a5
1055 *
1056 * Entry condition:
1057 *
1058 * a0: trashed, original value saved on stack (PT_AREG0)
1059 * a1: a1
1060 * a2: new stack pointer, original in DEPC
1061 * a3: dispatch table
1062 * depc: a2, original value saved on stack (PT_DEPC)
1063 * excsave_1: a3
1064 *
1065 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1066 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1067 *
1068 * Note: we don't have to save a2; a2 holds the return value
1069 *
1070 * We use the two macros TRY and CATCH:
1071 *
1072 * TRY adds an entry to the __ex_table fixup table for the immediately
1073 * following instruction.
1074 *
1075 * CATCH catches any exception that occurred at one of the preceeding TRY
1076 * statements and continues from there
1077 *
1078 * Usage TRY l32i a0, a1, 0
1079 * <other code>
1080 * done: rfe
1081 * CATCH <set return code>
1082 * j done
1083 */
1084
1085#define TRY \
1086 .section __ex_table, "a"; \
1087 .word 66f, 67f; \
1088 .text; \
108966:
1090
1091#define CATCH \
109267:
1093
1094ENTRY(fast_syscall_sysxtensa)
1095
1096 _beqz a6, 1f
1097 _blti a6, SYSXTENSA_COUNT, 2f
1098
10991: j user_exception
1100
11012: xsr a3, EXCSAVE_1 # restore a3, excsave1
1102 s32i a7, a2, PT_AREG7
1103
1104 movi a7, 4 # sizeof(unsigned int)
1105 verify_area a3, a7, a0, a2, .Leac
1106
1107 _beqi a6, SYSXTENSA_ATOMIC_SET, .Lset
1108 _beqi a6, SYSXTENSA_ATOMIC_EXG_ADD, .Lexg
1109 _beqi a6, SYSXTENSA_ATOMIC_ADD, .Ladd
1110
1111 /* Fall through for SYSXTENSA_ATOMIC_CMP_SWP */
1112
1113.Lswp: /* Atomic compare and swap */
1114
1115TRY l32i a7, a3, 0 # read old value
1116 bne a7, a4, 1f # same as old value? jump
1117 s32i a5, a3, 0 # different, modify value
1118 movi a7, 1 # and return 1
1119 j .Lret
1120
11211: movi a7, 0 # same values: return 0
1122 j .Lret
1123
1124.Ladd: /* Atomic add */
1125.Lexg: /* Atomic (exchange) add */
1126
1127TRY l32i a7, a3, 0
1128 add a4, a4, a7
1129 s32i a4, a3, 0
1130 j .Lret
1131
1132.Lset: /* Atomic set */
1133
1134TRY l32i a7, a3, 0 # read old value as return value
1135 s32i a4, a3, 0 # write new value
1136
1137.Lret: mov a0, a2
1138 mov a2, a7
1139 l32i a7, a0, PT_AREG7
1140 l32i a3, a0, PT_AREG3
1141 l32i a0, a0, PT_AREG0
1142 rfe
1143
1144CATCH
1145.Leac: movi a7, -EFAULT
1146 j .Lret
1147
1148
1149
1150/* fast_syscall_spill_registers.
1151 *
1152 * Entry condition:
1153 *
1154 * a0: trashed, original value saved on stack (PT_AREG0)
1155 * a1: a1
1156 * a2: new stack pointer, original in DEPC
1157 * a3: dispatch table
1158 * depc: a2, original value saved on stack (PT_DEPC)
1159 * excsave_1: a3
1160 *
1161 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1162 * Note: We don't need to save a2 in depc (return value)
1163 */
1164
1165ENTRY(fast_syscall_spill_registers)
1166
1167 /* Register a FIXUP handler (pass current wb as a parameter) */
1168
1169 movi a0, fast_syscall_spill_registers_fixup
1170 s32i a0, a3, EXC_TABLE_FIXUP
1171 rsr a0, WINDOWBASE
1172 s32i a0, a3, EXC_TABLE_PARAM
1173
1174 /* Save a3 and SAR on stack. */
1175
1176 rsr a0, SAR
1177 xsr a3, EXCSAVE_1 # restore a3 and excsave_1
1178 s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4
1179 s32i a3, a2, PT_AREG3
1180
1181 /* The spill routine might clobber a7, a11, and a15. */
1182
1183 s32i a7, a2, PT_AREG5
1184 s32i a11, a2, PT_AREG6
1185 s32i a15, a2, PT_AREG7
1186
1187 call0 _spill_registers # destroys a3, DEPC, and SAR
1188
1189 /* Advance PC, restore registers and SAR, and return from exception. */
1190
1191 l32i a3, a2, PT_AREG4
1192 l32i a0, a2, PT_AREG0
1193 wsr a3, SAR
1194 l32i a3, a2, PT_AREG3
1195
1196 /* Restore clobbered registers. */
1197
1198 l32i a7, a2, PT_AREG5
1199 l32i a11, a2, PT_AREG6
1200 l32i a15, a2, PT_AREG7
1201
1202 movi a2, 0
1203 rfe
1204
1205/* Fixup handler.
1206 *
1207 * We get here if the spill routine causes an exception, e.g. tlb miss.
1208 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1209 * we entered the spill routine and jump to the user exception handler.
1210 *
1211 * a0: value of depc, original value in depc
1212 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1213 * a3: exctable, original value in excsave1
1214 */
1215
1216fast_syscall_spill_registers_fixup:
1217
1218 rsr a2, WINDOWBASE # get current windowbase (a2 is saved)
1219 xsr a0, DEPC # restore depc and a0
1220 ssl a2 # set shift (32 - WB)
1221
1222 /* We need to make sure the current registers (a0-a3) are preserved.
1223 * To do this, we simply set the bit for the current window frame
1224 * in WS, so that the exception handlers save them to the task stack.
1225 */
1226
1227 rsr a3, EXCSAVE_1 # get spill-mask
1228 slli a2, a3, 1 # shift left by one
1229
1230 slli a3, a2, 32-WSBITS
1231 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
1232 wsr a2, WINDOWSTART # set corrected windowstart
1233
1234 movi a3, exc_table
1235 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
1236 l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task)
1237
1238 /* Return to the original (user task) WINDOWBASE.
1239 * We leave the following frame behind:
1240 * a0, a1, a2 same
1241 * a3: trashed (saved in excsave_1)
1242 * depc: depc (we have to return to that address)
1243 * excsave_1: a3
1244 */
1245
1246 wsr a3, WINDOWBASE
1247 rsync
1248
1249 /* We are now in the original frame when we entered _spill_registers:
1250 * a0: return address
1251 * a1: used, stack pointer
1252 * a2: kernel stack pointer
1253 * a3: available, saved in EXCSAVE_1
1254 * depc: exception address
1255 * excsave: a3
1256 * Note: This frame might be the same as above.
1257 */
1258
1259#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
1260 /* Restore registers we precautiously saved.
1261 * We have the value of the 'right' a3
1262 */
1263
1264 l32i a7, a2, PT_AREG5
1265 l32i a11, a2, PT_AREG6
1266 l32i a15, a2, PT_AREG7
1267#endif
1268
1269 /* Setup stack pointer. */
1270
1271 addi a2, a2, -PT_USER_SIZE
1272 s32i a0, a2, PT_AREG0
1273
1274 /* Make sure we return to this fixup handler. */
1275
1276 movi a3, fast_syscall_spill_registers_fixup_return
1277 s32i a3, a2, PT_DEPC # setup depc
1278
1279 /* Jump to the exception handler. */
1280
1281 movi a3, exc_table
1282 rsr a0, EXCCAUSE
1283 addx4 a0, a0, a3 # find entry in table
1284 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1285 jx a0
1286
1287fast_syscall_spill_registers_fixup_return:
1288
1289 /* When we return here, all registers have been restored (a2: DEPC) */
1290
1291 wsr a2, DEPC # exception address
1292
1293 /* Restore fixup handler. */
1294
1295 xsr a3, EXCSAVE_1
1296 movi a2, fast_syscall_spill_registers_fixup
1297 s32i a2, a3, EXC_TABLE_FIXUP
1298 rsr a2, WINDOWBASE
1299 s32i a2, a3, EXC_TABLE_PARAM
1300 l32i a2, a3, EXC_TABLE_KSTK
1301
1302#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
1303 /* Save registers again that might be clobbered. */
1304
1305 s32i a7, a2, PT_AREG5
1306 s32i a11, a2, PT_AREG6
1307 s32i a15, a2, PT_AREG7
1308#endif
1309
1310 /* Load WB at the time the exception occurred. */
1311
1312 rsr a3, SAR # WB is still in SAR
1313 neg a3, a3
1314 wsr a3, WINDOWBASE
1315 rsync
1316
1317 /* Restore a3 and return. */
1318
1319 movi a3, exc_table
1320 xsr a3, EXCSAVE_1
1321
1322 rfde
1323
1324
1325/*
1326 * spill all registers.
1327 *
1328 * This is not a real function. The following conditions must be met:
1329 *
1330 * - must be called with call0.
1331 * - uses DEPC, a3 and SAR.
1332 * - the last 'valid' register of each frame are clobbered.
1333 * - the caller must have registered a fixup handler
1334 * (or be inside a critical section)
1335 * - PS_EXCM must be set (PS_WOE cleared?)
1336 */
1337
1338ENTRY(_spill_registers)
1339
1340 /*
1341 * Rotate ws so that the current windowbase is at bit 0.
1342 * Assume ws = xxxwww1yy (www1 current window frame).
1343 * Rotate ws right so that a2 = yyxxxwww1.
1344 */
1345
1346 wsr a2, DEPC # preserve a2
1347 rsr a2, WINDOWBASE
1348 rsr a3, WINDOWSTART
1349 ssr a2 # holds WB
1350 slli a2, a3, WSBITS
1351 or a3, a3, a2 # a2 = xxxwww1yyxxxwww1yy
1352 srl a3, a3
1353
1354 /* We are done if there are no more than the current register frame. */
1355
1356 extui a3, a3, 1, WSBITS-2 # a3 = 0yyxxxwww
1357 movi a2, (1 << (WSBITS-1))
1358 _beqz a3, .Lnospill # only one active frame? jump
1359
1360 /* We want 1 at the top, so that we return to the current windowbase */
1361
1362 or a3, a3, a2 # 1yyxxxwww
1363
1364 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1365
1366 wsr a3, WINDOWSTART # save shifted windowstart
1367 neg a2, a3
1368 and a3, a2, a3 # first bit set from right: 000010000
1369
1370 ffs_ws a2, a3 # a2: shifts to skip empty frames
1371 movi a3, WSBITS
1372 sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right
1373 ssr a2 # save in SAR for later.
1374
1375 rsr a3, WINDOWBASE
1376 add a3, a3, a2
1377 rsr a2, DEPC # restore a2
1378 wsr a3, WINDOWBASE
1379 rsync
1380
1381 rsr a3, WINDOWSTART
1382 srl a3, a3 # shift windowstart
1383
1384 /* WB is now just one frame below the oldest frame in the register
1385 window. WS is shifted so the oldest frame is in bit 0, thus, WB
1386 and WS differ by one 4-register frame. */
1387
1388 /* Save frames. Depending what call was used (call4, call8, call12),
1389 * we have to save 4,8. or 12 registers.
1390 */
1391
1392 _bbsi.l a3, 1, .Lc4
1393 _bbsi.l a3, 2, .Lc8
1394
1395 /* Special case: we have a call12-frame starting at a4. */
1396
1397 _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
1398
1399 s32e a4, a1, -16 # a1 is valid with an empty spill area
1400 l32e a4, a5, -12
1401 s32e a8, a4, -48
1402 mov a8, a4
1403 l32e a4, a1, -16
1404 j .Lc12c
1405
1406.Lloop: _bbsi.l a3, 1, .Lc4
1407 _bbci.l a3, 2, .Lc12
1408
1409.Lc8: s32e a4, a13, -16
1410 l32e a4, a5, -12
1411 s32e a8, a4, -32
1412 s32e a5, a13, -12
1413 s32e a6, a13, -8
1414 s32e a7, a13, -4
1415 s32e a9, a4, -28
1416 s32e a10, a4, -24
1417 s32e a11, a4, -20
1418
1419 srli a11, a3, 2 # shift windowbase by 2
1420 rotw 2
1421 _bnei a3, 1, .Lloop
1422
1423.Lexit: /* Done. Do the final rotation, set WS, and return. */
1424
1425 rotw 1
1426 rsr a3, WINDOWBASE
1427 ssl a3
1428 movi a3, 1
1429 sll a3, a3
1430 wsr a3, WINDOWSTART
1431
1432.Lnospill:
1433 jx a0
1434
1435.Lc4: s32e a4, a9, -16
1436 s32e a5, a9, -12
1437 s32e a6, a9, -8
1438 s32e a7, a9, -4
1439
1440 srli a7, a3, 1
1441 rotw 1
1442 _bnei a3, 1, .Lloop
1443 j .Lexit
1444
1445.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
1446
1447 /* 12-register frame (call12) */
1448
1449 l32e a2, a5, -12
1450 s32e a8, a2, -48
1451 mov a8, a2
1452
1453.Lc12c: s32e a9, a8, -44
1454 s32e a10, a8, -40
1455 s32e a11, a8, -36
1456 s32e a12, a8, -32
1457 s32e a13, a8, -28
1458 s32e a14, a8, -24
1459 s32e a15, a8, -20
1460 srli a15, a3, 3
1461
1462 /* The stack pointer for a4..a7 is out of reach, so we rotate the
1463 * window, grab the stackpointer, and rotate back.
1464 * Alternatively, we could also use the following approach, but that
1465 * makes the fixup routine much more complicated:
1466 * rotw 1
1467 * s32e a0, a13, -16
1468 * ...
1469 * rotw 2
1470 */
1471
1472 rotw 1
1473 mov a5, a13
1474 rotw -1
1475
1476 s32e a4, a9, -16
1477 s32e a5, a9, -12
1478 s32e a6, a9, -8
1479 s32e a7, a9, -4
1480
1481 rotw 3
1482
1483 _beqi a3, 1, .Lexit
1484 j .Lloop
1485
1486.Linvalid_mask:
1487
1488 /* We get here because of an unrecoverable error in the window
1489 * registers. If we are in user space, we kill the application,
1490 * however, this condition is unrecoverable in kernel space.
1491 */
1492
1493 rsr a0, PS
1494 _bbci.l a0, PS_UM_SHIFT, 1f
1495
1496 /* User space: Setup a dummy frame and kill application.
1497 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1498 */
1499
1500 movi a0, 1
1501 movi a1, 0
1502
1503 wsr a0, WINDOWSTART
1504 wsr a1, WINDOWBASE
1505 rsync
1506
1507 movi a0, 0
1508
1509 movi a3, exc_table
1510 l32i a1, a3, EXC_TABLE_KSTK
1511 wsr a3, EXCSAVE_1
1512
1513 movi a4, PS_WOE_MASK | 1
1514 wsr a4, PS
1515 rsync
1516
1517 movi a6, SIGSEGV
1518 movi a4, do_exit
1519 callx4 a4
1520
15211: /* Kernel space: PANIC! */
1522
1523 wsr a0, EXCSAVE_1
1524 movi a0, unrecoverable_exception
1525 callx0 a0 # should not return
15261: j 1b
1527
1528/*
1529 * We should never get here. Bail out!
1530 */
1531
1532ENTRY(fast_second_level_miss_double_kernel)
1533
15341: movi a0, unrecoverable_exception
1535 callx0 a0 # should not return
15361: j 1b
1537
1538/* First-level entry handler for user, kernel, and double 2nd-level
1539 * TLB miss exceptions. Note that for now, user and kernel miss
1540 * exceptions share the same entry point and are handled identically.
1541 *
1542 * An old, less-efficient C version of this function used to exist.
1543 * We include it below, interleaved as comments, for reference.
1544 *
1545 * Entry condition:
1546 *
1547 * a0: trashed, original value saved on stack (PT_AREG0)
1548 * a1: a1
1549 * a2: new stack pointer, original in DEPC
1550 * a3: dispatch table
1551 * depc: a2, original value saved on stack (PT_DEPC)
1552 * excsave_1: a3
1553 *
1554 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1555 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1556 */
1557
1558ENTRY(fast_second_level_miss)
1559
1560 /* Save a1. Note: we don't expect a double exception. */
1561
1562 s32i a1, a2, PT_AREG1
1563
1564 /* We need to map the page of PTEs for the user task. Find
1565 * the pointer to that page. Also, it's possible for tsk->mm
1566 * to be NULL while tsk->active_mm is nonzero if we faulted on
1567 * a vmalloc address. In that rare case, we must use
1568 * active_mm instead to avoid a fault in this handler. See
1569 *
1570 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1571 * (or search Internet on "mm vs. active_mm")
1572 *
1573 * if (!mm)
1574 * mm = tsk->active_mm;
1575 * pgd = pgd_offset (mm, regs->excvaddr);
1576 * pmd = pmd_offset (pgd, regs->excvaddr);
1577 * pmdval = *pmd;
1578 */
1579
1580 GET_CURRENT(a1,a2)
1581 l32i a0, a1, TASK_MM # tsk->mm
1582 beqz a0, 9f
1583
15848: rsr a1, EXCVADDR # fault address
1585 _PGD_OFFSET(a0, a1, a1)
1586 l32i a0, a0, 0 # read pmdval
1587 //beqi a0, _PAGE_USER, 2f
1588 beqz a0, 2f
1589
1590 /* Read ptevaddr and convert to top of page-table page.
1591 *
1592 * vpnval = read_ptevaddr_register() & PAGE_MASK;
1593 * vpnval += DTLB_WAY_PGTABLE;
1594 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1595 * write_dtlb_entry (pteval, vpnval);
1596 *
1597 * The messy computation for 'pteval' above really simplifies
1598 * into the following:
1599 *
1600 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_KERNEL
1601 */
1602
1603 movi a1, -PAGE_OFFSET
1604 add a0, a0, a1 # pmdval - PAGE_OFFSET
1605 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
1606 xor a0, a0, a1
1607
1608
1609 movi a1, PAGE_DIRECTORY
1610 or a0, a0, a1 # ... | PAGE_DIRECTORY
1611
1612 rsr a1, PTEVADDR
1613 srli a1, a1, PAGE_SHIFT
1614 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
1615 addi a1, a1, DTLB_WAY_PGTABLE # ... + way_number
1616
1617 wdtlb a0, a1
1618 dsync
1619
1620 /* Exit critical section. */
1621
1622 movi a0, 0
1623 s32i a0, a3, EXC_TABLE_FIXUP
1624
1625 /* Restore the working registers, and return. */
1626
1627 l32i a0, a2, PT_AREG0
1628 l32i a1, a2, PT_AREG1
1629 l32i a2, a2, PT_DEPC
1630 xsr a3, EXCSAVE_1
1631
1632 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1633
1634 /* Restore excsave1 and return. */
1635
1636 rsr a2, DEPC
1637 rfe
1638
1639 /* Return from double exception. */
1640
16411: xsr a2, DEPC
1642 esync
1643 rfde
1644
16459: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1646 j 8b
1647
16482: /* Invalid PGD, default exception handling */
1649
1650 rsr a1, DEPC
1651 xsr a3, EXCSAVE_1
1652 s32i a1, a2, PT_AREG2
1653 s32i a3, a2, PT_AREG3
1654 mov a1, a2
1655
1656 rsr a2, PS
1657 bbsi.l a2, PS_UM_SHIFT, 1f
1658 j _kernel_exception
16591: j _user_exception
1660
1661
1662/*
1663 * StoreProhibitedException
1664 *
1665 * Update the pte and invalidate the itlb mapping for this pte.
1666 *
1667 * Entry condition:
1668 *
1669 * a0: trashed, original value saved on stack (PT_AREG0)
1670 * a1: a1
1671 * a2: new stack pointer, original in DEPC
1672 * a3: dispatch table
1673 * depc: a2, original value saved on stack (PT_DEPC)
1674 * excsave_1: a3
1675 *
1676 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1677 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1678 */
1679
1680ENTRY(fast_store_prohibited)
1681
1682 /* Save a1 and a4. */
1683
1684 s32i a1, a2, PT_AREG1
1685 s32i a4, a2, PT_AREG4
1686
1687 GET_CURRENT(a1,a2)
1688 l32i a0, a1, TASK_MM # tsk->mm
1689 beqz a0, 9f
1690
16918: rsr a1, EXCVADDR # fault address
1692 _PGD_OFFSET(a0, a1, a4)
1693 l32i a0, a0, 0
1694 //beqi a0, _PAGE_USER, 2f # FIXME use _PAGE_INVALID
1695 beqz a0, 2f
1696
1697 _PTE_OFFSET(a0, a1, a4)
1698 l32i a4, a0, 0 # read pteval
1699 movi a1, _PAGE_VALID | _PAGE_RW
1700 bnall a4, a1, 2f
1701
1702 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_WRENABLE
1703 or a4, a4, a1
1704 rsr a1, EXCVADDR
1705 s32i a4, a0, 0
1706
1707 /* We need to flush the cache if we have page coloring. */
1708#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1709 dhwb a0, 0
1710#endif
1711 pdtlb a0, a1
1712 beqz a0, 1f
1713 idtlb a0 // FIXME do we need this?
1714 wdtlb a4, a0
17151:
1716
1717 /* Exit critical section. */
1718
1719 movi a0, 0
1720 s32i a0, a3, EXC_TABLE_FIXUP
1721
1722 /* Restore the working registers, and return. */
1723
1724 l32i a4, a2, PT_AREG4
1725 l32i a1, a2, PT_AREG1
1726 l32i a0, a2, PT_AREG0
1727 l32i a2, a2, PT_DEPC
1728
1729 /* Restore excsave1 and a3. */
1730
1731 xsr a3, EXCSAVE_1
1732 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1733
1734 rsr a2, DEPC
1735 rfe
1736
1737 /* Double exception. Restore FIXUP handler and return. */
1738
17391: xsr a2, DEPC
1740 esync
1741 rfde
1742
17439: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1744 j 8b
1745
17462: /* If there was a problem, handle fault in C */
1747
1748 rsr a4, DEPC # still holds a2
1749 xsr a3, EXCSAVE_1
1750 s32i a4, a2, PT_AREG2
1751 s32i a3, a2, PT_AREG3
1752 l32i a4, a2, PT_AREG4
1753 mov a1, a2
1754
1755 rsr a2, PS
1756 bbsi.l a2, PS_UM_SHIFT, 1f
1757 j _kernel_exception
17581: j _user_exception
1759
1760
1761#if XCHAL_EXTRA_SA_SIZE
1762
1763#warning fast_coprocessor untested
1764
1765/*
1766 * Entry condition:
1767 *
1768 * a0: trashed, original value saved on stack (PT_AREG0)
1769 * a1: a1
1770 * a2: new stack pointer, original in DEPC
1771 * a3: dispatch table
1772 * depc: a2, original value saved on stack (PT_DEPC)
1773 * excsave_1: a3
1774 *
1775 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1776 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1777 */
1778
1779ENTRY(fast_coprocessor_double)
1780 wsr a0, EXCSAVE_1
1781 movi a0, unrecoverable_exception
1782 callx0 a0
1783
1784ENTRY(fast_coprocessor)
1785
1786 /* Fatal if we are in a double exception. */
1787
1788 l32i a0, a2, PT_DEPC
1789 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double
1790
1791 /* Save some registers a1, a3, a4, SAR */
1792
1793 xsr a3, EXCSAVE_1
1794 s32i a3, a2, PT_AREG3
1795 rsr a3, SAR
1796 s32i a4, a2, PT_AREG4
1797 s32i a1, a2, PT_AREG1
1798 s32i a5, a1, PT_AREG5
1799 s32i a3, a2, PT_SAR
1800 mov a1, a2
1801
1802 /* Currently, the HAL macros only guarantee saving a0 and a1.
1803 * These can and will be refined in the future, but for now,
1804 * just save the remaining registers of a2...a15.
1805 */
1806 s32i a6, a1, PT_AREG6
1807 s32i a7, a1, PT_AREG7
1808 s32i a8, a1, PT_AREG8
1809 s32i a9, a1, PT_AREG9
1810 s32i a10, a1, PT_AREG10
1811 s32i a11, a1, PT_AREG11
1812 s32i a12, a1, PT_AREG12
1813 s32i a13, a1, PT_AREG13
1814 s32i a14, a1, PT_AREG14
1815 s32i a15, a1, PT_AREG15
1816
1817 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
1818
1819 rsr a0, EXCCAUSE
1820 addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED
1821
1822 /* Set corresponding CPENABLE bit */
1823
1824 movi a4, 1
1825 ssl a3 # SAR: 32 - coprocessor_number
1826 rsr a5, CPENABLE
1827 sll a4, a4
1828 or a4, a5, a4
1829 wsr a4, CPENABLE
1830 rsync
1831 movi a5, coprocessor_info # list of owner and offset into cp_save
1832 addx8 a0, a4, a5 # entry for CP
1833
1834 bne a4, a5, .Lload # bit wasn't set before, cp not in use
1835
1836 /* Now compare the current task with the owner of the coprocessor.
1837 * If they are the same, there is no reason to save or restore any
1838 * coprocessor state. Having already enabled the coprocessor,
1839 * branch ahead to return.
1840 */
1841 GET_CURRENT(a5,a1)
1842 l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP
1843 beq a4, a5, .Ldone
1844
1845 /* Find location to dump current coprocessor state:
1846 * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor]
1847 *
1848 * Note: a0 pointer to the entry in the coprocessor owner table,
1849 * a3 coprocessor number,
1850 * a4 current owner of coprocessor.
1851 */
1852 l32i a5, a0, COPROCESSOR_INFO_OFFSET
1853 addi a2, a4, THREAD_CP_SAVE
1854 add a2, a2, a5
1855
1856 /* Store current coprocessor states. (a5 still has CP number) */
1857
1858 xchal_cpi_store_funcbody
1859
1860 /* The macro might have destroyed a3 (coprocessor number), but
1861 * SAR still has 32 - coprocessor_number!
1862 */
1863 movi a3, 32
1864 rsr a4, SAR
1865 sub a3, a3, a4
1866
1867.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into
1868 * the coprocessor owner table.
1869 *
1870 * Note: a0 pointer to the entry in the coprocessor owner table,
1871 * a3 coprocessor number.
1872 */
1873 GET_CURRENT(a4,a1)
1874 s32i a4, a0, 0
1875
1876 /* Find location from where to restore the current coprocessor state.*/
1877
1878 l32i a5, a0, COPROCESSOR_INFO_OFFSET
1879 addi a2, a4, THREAD_CP_SAVE
1880 add a2, a2, a4
1881
1882 xchal_cpi_load_funcbody
1883
1884 /* We must assume that the xchal_cpi_store_funcbody macro destroyed
1885 * registers a2..a15.
1886 */
1887
1888.Ldone: l32i a15, a1, PT_AREG15
1889 l32i a14, a1, PT_AREG14
1890 l32i a13, a1, PT_AREG13
1891 l32i a12, a1, PT_AREG12
1892 l32i a11, a1, PT_AREG11
1893 l32i a10, a1, PT_AREG10
1894 l32i a9, a1, PT_AREG9
1895 l32i a8, a1, PT_AREG8
1896 l32i a7, a1, PT_AREG7
1897 l32i a6, a1, PT_AREG6
1898 l32i a5, a1, PT_AREG5
1899 l32i a4, a1, PT_AREG4
1900 l32i a3, a1, PT_AREG3
1901 l32i a2, a1, PT_AREG2
1902 l32i a0, a1, PT_AREG0
1903 l32i a1, a1, PT_AREG1
1904
1905 rfe
1906
1907#endif /* XCHAL_EXTRA_SA_SIZE */
1908
1909/*
1910 * Task switch.
1911 *
1912 * struct task* _switch_to (struct task* prev, struct task* next)
1913 * a2 a2 a3
1914 */
1915
1916ENTRY(_switch_to)
1917
1918 entry a1, 16
1919
1920 mov a4, a3 # preserve a3
1921
1922 s32i a0, a2, THREAD_RA # save return address
1923 s32i a1, a2, THREAD_SP # save stack pointer
1924
1925 /* Disable ints while we manipulate the stack pointer; spill regs. */
1926
1927 movi a5, PS_EXCM_MASK | LOCKLEVEL
1928 xsr a5, PS
1929 rsr a3, EXCSAVE_1
1930 rsync
1931 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
1932
1933 call0 _spill_registers
1934
1935 /* Set kernel stack (and leave critical section)
1936 * Note: It's save to set it here. The stack will not be overwritten
1937 * because the kernel stack will only be loaded again after
1938 * we return from kernel space.
1939 */
1940
1941 l32i a0, a4, TASK_THREAD_INFO
1942 rsr a3, EXCSAVE_1 # exc_table
1943 movi a1, 0
1944 addi a0, a0, PT_REGS_OFFSET
1945 s32i a1, a3, EXC_TABLE_FIXUP
1946 s32i a0, a3, EXC_TABLE_KSTK
1947
1948 /* restore context of the task that 'next' addresses */
1949
1950 l32i a0, a4, THREAD_RA /* restore return address */
1951 l32i a1, a4, THREAD_SP /* restore stack pointer */
1952
1953 wsr a5, PS
1954 rsync
1955
1956 retw
1957
1958
1959ENTRY(ret_from_fork)
1960
1961 /* void schedule_tail (struct task_struct *prev)
1962 * Note: prev is still in a6 (return value from fake call4 frame)
1963 */
1964 movi a4, schedule_tail
1965 callx4 a4
1966
1967 movi a4, do_syscall_trace
1968 callx4 a4
1969
1970 j common_exception_return
1971
1972
1973
1974/*
1975 * Table of syscalls
1976 */
1977
1978.data
1979.align 4
1980.global sys_call_table
1981sys_call_table:
1982
1983#define SYSCALL(call, narg) .word call
1984#include "syscalls.h"
1985
1986/*
1987 * Number of arguments of each syscall
1988 */
1989
1990.global sys_narg_table
1991sys_narg_table:
1992
1993#undef SYSCALL
1994#define SYSCALL(call, narg) .byte narg
1995#include "syscalls.h"
1996
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
new file mode 100644
index 000000000000..6e9b5225b8f6
--- /dev/null
+++ b/arch/xtensa/kernel/head.S
@@ -0,0 +1,237 @@
1/*
2 * arch/xtensa/kernel/head.S
3 *
4 * Xtensa Processor startup code.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
14 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
15 * Kevin Chea
16 */
17
18#include <xtensa/cacheasm.h>
19#include <linux/config.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22
23/*
24 * This module contains the entry code for kernel images. It performs the
25 * minimal setup needed to call the generic C routines.
26 *
27 * Prerequisites:
28 *
29 * - The kernel image has been loaded to the actual address where it was
30 * compiled to.
31 * - a2 contains either 0 or a pointer to a list of boot parameters.
32 * (see setup.c for more details)
33 *
34 */
35
36 .macro iterate from, to , cmd
37 .ifeq ((\to - \from) & ~0xfff)
38 \cmd \from
39 iterate "(\from+1)", \to, \cmd
40 .endif
41 .endm
42
43/*
44 * _start
45 *
46 * The bootloader passes a pointer to a list of boot parameters in a2.
47 */
48
49 /* The first bytes of the kernel image must be an instruction, so we
50 * manually allocate and define the literal constant we need for a jx
51 * instruction.
52 */
53
54 .section .head.text, "ax"
55 .globl _start
56_start: _j 2f
57 .align 4
581: .word _startup
592: l32r a0, 1b
60 jx a0
61
62 .text
63 .align 4
64_startup:
65
66 /* Disable interrupts and exceptions. */
67
68 movi a0, XCHAL_PS_EXCM_MASK
69 wsr a0, PS
70
71 /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
72
73 wsr a2, EXCSAVE_1
74
75 /* Start with a fresh windowbase and windowstart. */
76
77 movi a1, 1
78 movi a0, 0
79 wsr a1, WINDOWSTART
80 wsr a0, WINDOWBASE
81 rsync
82
83 /* Set a0 to 0 for the remaining initialization. */
84
85 movi a0, 0
86
87 /* Clear debugging registers. */
88
89#if XCHAL_HAVE_DEBUG
90 wsr a0, IBREAKENABLE
91 wsr a0, ICOUNT
92 movi a1, 15
93 wsr a0, ICOUNTLEVEL
94
95 .macro reset_dbreak num
96 wsr a0, DBREAKC + \num
97 .endm
98
99 iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak
100#endif
101
102 /* Clear CCOUNT (not really necessary, but nice) */
103
104 wsr a0, CCOUNT # not really necessary, but nice
105
106 /* Disable zero-loops. */
107
108#if XCHAL_HAVE_LOOPS
109 wsr a0, LCOUNT
110#endif
111
112 /* Disable all timers. */
113
114 .macro reset_timer num
115 wsr a0, CCOMPARE_0 + \num
116 .endm
117 iterate 0, XCHAL_NUM_TIMERS-1, reset_timer
118
119 /* Interrupt initialization. */
120
121 movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
122 wsr a0, INTENABLE
123 wsr a2, INTCLEAR
124
125 /* Disable coprocessors. */
126
127#if XCHAL_CP_NUM > 0
128 wsr a0, CPENABLE
129#endif
130
131 /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
132 *
133 * Note: PS.EXCM must be cleared before using any loop
134 * instructions; otherwise, they are silently disabled, and
135 * at most one iteration of the loop is executed.
136 */
137
138 movi a1, 1
139 wsr a1, PS
140 rsync
141
142 /* Initialize the caches.
143 * Does not include flushing writeback d-cache.
144 * a6, a7 are just working registers (clobbered).
145 */
146
147 icache_reset a2, a3
148 dcache_reset a2, a3
149
150 /* Unpack data sections
151 *
152 * The linker script used to build the Linux kernel image
153 * creates a table located at __boot_reloc_table_start
154 * that contans the information what data needs to be unpacked.
155 *
156 * Uses a2-a7.
157 */
158
159 movi a2, __boot_reloc_table_start
160 movi a3, __boot_reloc_table_end
161
1621: beq a2, a3, 3f # no more entries?
163 l32i a4, a2, 0 # start destination (in RAM)
164 l32i a5, a2, 4 # end desination (in RAM)
165 l32i a6, a2, 8 # start source (in ROM)
166 addi a2, a2, 12 # next entry
167 beq a4, a5, 1b # skip, empty entry
168 beq a4, a6, 1b # skip, source and dest. are the same
169
1702: l32i a7, a6, 0 # load word
171 addi a6, a6, 4
172 s32i a7, a4, 0 # store word
173 addi a4, a4, 4
174 bltu a4, a5, 2b
175 j 1b
176
1773:
178 /* All code and initialized data segments have been copied.
179 * Now clear the BSS segment.
180 */
181
182 movi a2, _bss_start # start of BSS
183 movi a3, _bss_end # end of BSS
184
1851: addi a2, a2, 4
186 s32i a0, a2, 0
187 blt a2, a3, 1b
188
189#if XCHAL_DCACHE_IS_WRITEBACK
190
191 /* After unpacking, flush the writeback cache to memory so the
192 * instructions/data are available.
193 */
194
195 dcache_writeback_all a2, a3
196#endif
197
198 /* Setup stack and enable window exceptions (keep irqs disabled) */
199
200 movi a1, init_thread_union
201 addi a1, a1, KERNEL_STACK_SIZE
202
203 movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
204 wsr a2, PS # (enable reg-windows; progmode stack)
205 rsync
206
207 /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
208
209 movi a2, debug_exception
210 wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL
211
212 /* Set up EXCSAVE[1] to point to the exc_table. */
213
214 movi a6, exc_table
215 xsr a6, EXCSAVE_1
216
217 /* init_arch kick-starts the linux kernel */
218
219 movi a4, init_arch
220 callx4 a4
221
222 movi a4, start_kernel
223 callx4 a4
224
225should_never_return:
226 j should_never_return
227
228 /* Define some common data structures here. We define them
229 * here in this assembly file due to their unusual alignment
230 * requirements.
231 */
232
233 .comm swapper_pg_dir,PAGE_SIZE,PAGE_SIZE
234 .comm empty_bad_page_table,PAGE_SIZE,PAGE_SIZE
235 .comm empty_bad_page,PAGE_SIZE,PAGE_SIZE
236 .comm empty_zero_page,PAGE_SIZE,PAGE_SIZE
237
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
new file mode 100644
index 000000000000..4cbf6d91571f
--- /dev/null
+++ b/arch/xtensa/kernel/irq.c
@@ -0,0 +1,192 @@
1/*
2 * linux/arch/xtensa/kernel/irq.c
3 *
4 * Xtensa built-in interrupt controller and some generic functions copied
5 * from i386.
6 *
7 * Copyright (C) 2002 - 2005 Tensilica, Inc.
8 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
9 *
10 *
11 * Chris Zankel <chris@zankel.net>
12 * Kevin Chea
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/seq_file.h>
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/kernel_stat.h>
21
22#include <asm/uaccess.h>
23#include <asm/platform.h>
24
25static void enable_xtensa_irq(unsigned int irq);
26static void disable_xtensa_irq(unsigned int irq);
27static void mask_and_ack_xtensa(unsigned int irq);
28static void end_xtensa_irq(unsigned int irq);
29
30static unsigned int cached_irq_mask;
31
32atomic_t irq_err_count;
33
34/*
35 * 'what should we do if we get a hw irq event on an illegal vector'.
36 * each architecture has to answer this themselves.
37 */
38void ack_bad_irq(unsigned int irq)
39{
40 printk("unexpected IRQ trap at vector %02x\n", irq);
41}
42
43/*
44 * do_IRQ handles all normal device IRQ's (the special
45 * SMP cross-CPU interrupts have their own specific
46 * handlers).
47 */
48
49unsigned int do_IRQ(int irq, struct pt_regs *regs)
50{
51 irq_enter();
52
53#ifdef CONFIG_DEBUG_STACKOVERFLOW
54 /* Debugging check for stack overflow: is there less than 1KB free? */
55 {
56 unsigned long sp;
57
58 __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
59 sp &= THREAD_SIZE - 1;
60
61 if (unlikely(sp < (sizeof(thread_info) + 1024)))
62 printk("Stack overflow in do_IRQ: %ld\n",
63 sp - sizeof(struct thread_info));
64 }
65#endif
66
67 __do_IRQ(irq, regs);
68
69 irq_exit();
70
71 return 1;
72}
73
74/*
75 * Generic, controller-independent functions:
76 */
77
78int show_interrupts(struct seq_file *p, void *v)
79{
80 int i = *(loff_t *) v, j;
81 struct irqaction * action;
82 unsigned long flags;
83
84 if (i == 0) {
85 seq_printf(p, " ");
86 for (j=0; j<NR_CPUS; j++)
87 if (cpu_online(j))
88 seq_printf(p, "CPU%d ",j);
89 seq_putc(p, '\n');
90 }
91
92 if (i < NR_IRQS) {
93 spin_lock_irqsave(&irq_desc[i].lock, flags);
94 action = irq_desc[i].action;
95 if (!action)
96 goto skip;
97 seq_printf(p, "%3d: ",i);
98#ifndef CONFIG_SMP
99 seq_printf(p, "%10u ", kstat_irqs(i));
100#else
101 for (j = 0; j < NR_CPUS; j++)
102 if (cpu_online(j))
103 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
104#endif
105 seq_printf(p, " %14s", irq_desc[i].handler->typename);
106 seq_printf(p, " %s", action->name);
107
108 for (action=action->next; action; action = action->next)
109 seq_printf(p, ", %s", action->name);
110
111 seq_putc(p, '\n');
112skip:
113 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
114 } else if (i == NR_IRQS) {
115 seq_printf(p, "NMI: ");
116 for (j = 0; j < NR_CPUS; j++)
117 if (cpu_online(j))
118 seq_printf(p, "%10u ", nmi_count(j));
119 seq_putc(p, '\n');
120 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
121 }
122 return 0;
123}
124/* shutdown is same as "disable" */
125#define shutdown_xtensa_irq disable_xtensa_irq
126
127static unsigned int startup_xtensa_irq(unsigned int irq)
128{
129 enable_xtensa_irq(irq);
130 return 0; /* never anything pending */
131}
132
133static struct hw_interrupt_type xtensa_irq_type = {
134 "Xtensa-IRQ",
135 startup_xtensa_irq,
136 shutdown_xtensa_irq,
137 enable_xtensa_irq,
138 disable_xtensa_irq,
139 mask_and_ack_xtensa,
140 end_xtensa_irq
141};
142
143static inline void mask_irq(unsigned int irq)
144{
145 cached_irq_mask &= ~(1 << irq);
146 set_sr (cached_irq_mask, INTENABLE);
147}
148
149static inline void unmask_irq(unsigned int irq)
150{
151 cached_irq_mask |= 1 << irq;
152 set_sr (cached_irq_mask, INTENABLE);
153}
154
155static void disable_xtensa_irq(unsigned int irq)
156{
157 unsigned long flags;
158 local_save_flags(flags);
159 mask_irq(irq);
160 local_irq_restore(flags);
161}
162
163static void enable_xtensa_irq(unsigned int irq)
164{
165 unsigned long flags;
166 local_save_flags(flags);
167 unmask_irq(irq);
168 local_irq_restore(flags);
169}
170
171static void mask_and_ack_xtensa(unsigned int irq)
172{
173 disable_xtensa_irq(irq);
174}
175
176static void end_xtensa_irq(unsigned int irq)
177{
178 enable_xtensa_irq(irq);
179}
180
181
182void __init init_IRQ(void)
183{
184 int i;
185
186 for (i=0; i < XTENSA_NR_IRQS; i++)
187 irq_desc[i].handler = &xtensa_irq_type;
188
189 cached_irq_mask = 0;
190
191 platform_init_irq();
192}
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
new file mode 100644
index 000000000000..d1683cfa19a2
--- /dev/null
+++ b/arch/xtensa/kernel/module.c
@@ -0,0 +1,78 @@
1/*
2 * arch/xtensa/kernel/platform.c
3 *
4 * Module support.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/moduleloader.h>
18#include <linux/elf.h>
19#include <linux/vmalloc.h>
20#include <linux/fs.h>
21#include <linux/string.h>
22#include <linux/kernel.h>
23#include <linux/cache.h>
24
25LIST_HEAD(module_buf_list);
26
27void *module_alloc(unsigned long size)
28{
29 panic("module_alloc not implemented");
30}
31
32void module_free(struct module *mod, void *module_region)
33{
34 panic("module_free not implemented");
35}
36
37int module_frob_arch_sections(Elf32_Ehdr *hdr,
38 Elf32_Shdr *sechdrs,
39 char *secstrings,
40 struct module *me)
41{
42 panic("module_frob_arch_sections not implemented");
43}
44
45int apply_relocate(Elf32_Shdr *sechdrs,
46 const char *strtab,
47 unsigned int symindex,
48 unsigned int relsec,
49 struct module *module)
50{
51 panic ("apply_relocate not implemented");
52}
53
54int apply_relocate_add(Elf32_Shdr *sechdrs,
55 const char *strtab,
56 unsigned int symindex,
57 unsigned int relsec,
58 struct module *module)
59{
60 panic("apply_relocate_add not implemented");
61}
62
63int module_finalize(const Elf_Ehdr *hdr,
64 const Elf_Shdr *sechdrs,
65 struct module *me)
66{
67 panic ("module_finalize not implemented");
68}
69
70void module_arch_cleanup(struct module *mod)
71{
72 panic("module_arch_cleanup not implemented");
73}
74
75struct bug_entry *module_find_bug(unsigned long bugaddr)
76{
77 panic("module_find_bug not implemented");
78}
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
new file mode 100644
index 000000000000..84fde258cf85
--- /dev/null
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -0,0 +1,73 @@
1/*
2 * arch/xtensa/pci-dma.c
3 *
4 * DMA coherent memory allocation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Copyright (C) 2002 - 2005 Tensilica Inc.
12 *
13 * Based on version for i386.
14 *
15 * Chris Zankel <chris@zankel.net>
16 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
17 */
18
19#include <linux/types.h>
20#include <linux/mm.h>
21#include <linux/string.h>
22#include <linux/pci.h>
23#include <asm/io.h>
24#include <asm/cacheflush.h>
25
26/*
27 * Note: We assume that the full memory space is always mapped to 'kseg'
28 * Otherwise we have to use page attributes (not implemented).
29 */
30
31void *
32dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
33{
34 void *ret;
35
36 /* ignore region speicifiers */
37 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
38
39 if (dev == NULL || (*dev->dma_mask < 0xffffffff))
40 gfp |= GFP_DMA;
41 ret = (void *)__get_free_pages(gfp, get_order(size));
42
43 if (ret != NULL) {
44 memset(ret, 0, size);
45 *handle = virt_to_bus(ret);
46 }
47 return (void*) BYPASS_ADDR((unsigned long)ret);
48}
49
50void dma_free_coherent(struct device *hwdev, size_t size,
51 void *vaddr, dma_addr_t dma_handle)
52{
53 free_pages(CACHED_ADDR((unsigned long)vaddr), get_order(size));
54}
55
56
57void consistent_sync(void *vaddr, size_t size, int direction)
58{
59 switch (direction) {
60 case PCI_DMA_NONE:
61 BUG();
62 case PCI_DMA_FROMDEVICE: /* invalidate only */
63 __invalidate_dcache_range((unsigned long)vaddr,
64 (unsigned long)size);
65 break;
66
67 case PCI_DMA_TODEVICE: /* writeback only */
68 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
69 __flush_invalidate_dcache_range((unsigned long)vaddr,
70 (unsigned long)size);
71 break;
72 }
73}
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
new file mode 100644
index 000000000000..d29a81648637
--- /dev/null
+++ b/arch/xtensa/kernel/pci.c
@@ -0,0 +1,563 @@
1/*
2 * arch/xtensa/pcibios.c
3 *
4 * PCI bios-type initialisation for PCI machines
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Copyright (C) 2001-2005 Tensilica Inc.
12 *
13 * Based largely on work from Cort (ppc/kernel/pci.c)
14 * IO functions copied from sparc.
15 *
16 * Chris Zankel <chris@zankel.net>
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/pci.h>
23#include <linux/delay.h>
24#include <linux/string.h>
25#include <linux/init.h>
26#include <linux/sched.h>
27#include <linux/errno.h>
28#include <linux/bootmem.h>
29
30#include <asm/pci-bridge.h>
31#include <asm/platform.h>
32
33#undef DEBUG
34
35#ifdef DEBUG
36#define DBG(x...) printk(x)
37#else
38#define DBG(x...)
39#endif
40
41/* PCI Controller */
42
43
44/*
45 * pcibios_alloc_controller
46 * pcibios_enable_device
47 * pcibios_fixups
48 * pcibios_align_resource
49 * pcibios_fixup_bus
50 * pcibios_setup
51 * pci_bus_add_device
52 * pci_mmap_page_range
53 */
54
55struct pci_controller* pci_ctrl_head;
56struct pci_controller** pci_ctrl_tail = &pci_ctrl_head;
57
58static int pci_bus_count;
59
60static void pcibios_fixup_resources(struct pci_dev* dev);
61
62#if 0 // FIXME
63struct pci_fixup pcibios_fixups[] = {
64 { DECLARE_PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources },
65 { 0 }
66};
67#endif
68
69void
70pcibios_update_resource(struct pci_dev *dev, struct resource *root,
71 struct resource *res, int resource)
72{
73 u32 new, check, mask;
74 int reg;
75 struct pci_controller* pci_ctrl = dev->sysdata;
76
77 new = res->start;
78 if (pci_ctrl && res->flags & IORESOURCE_IO) {
79 new -= pci_ctrl->io_space.base;
80 }
81 new |= (res->flags & PCI_REGION_FLAG_MASK);
82 if (resource < 6) {
83 reg = PCI_BASE_ADDRESS_0 + 4*resource;
84 } else if (resource == PCI_ROM_RESOURCE) {
85 res->flags |= PCI_ROM_ADDRESS_ENABLE;
86 reg = dev->rom_base_reg;
87 } else {
88 /* Somebody might have asked allocation of a non-standard resource */
89 return;
90 }
91
92 pci_write_config_dword(dev, reg, new);
93 pci_read_config_dword(dev, reg, &check);
94 mask = (new & PCI_BASE_ADDRESS_SPACE_IO) ?
95 PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK;
96
97 if ((new ^ check) & mask) {
98 printk(KERN_ERR "PCI: Error while updating region "
99 "%s/%d (%08x != %08x)\n", dev->slot_name, resource,
100 new, check);
101 }
102}
103
104/*
105 * We need to avoid collisions with `mirrored' VGA ports
106 * and other strange ISA hardware, so we always want the
107 * addresses to be allocated in the 0x000-0x0ff region
108 * modulo 0x400.
109 *
110 * Why? Because some silly external IO cards only decode
111 * the low 10 bits of the IO address. The 0x00-0xff region
112 * is reserved for motherboard devices that decode all 16
113 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
114 * but we want to try to avoid allocating at 0x2900-0x2bff
115 * which might have be mirrored at 0x0100-0x03ff..
116 */
117void
118pcibios_align_resource(void *data, struct resource *res, unsigned long size,
119 unsigned long align)
120{
121 struct pci_dev *dev = data;
122
123 if (res->flags & IORESOURCE_IO) {
124 unsigned long start = res->start;
125
126 if (size > 0x100) {
127 printk(KERN_ERR "PCI: I/O Region %s/%d too large"
128 " (%ld bytes)\n", dev->slot_name,
129 dev->resource - res, size);
130 }
131
132 if (start & 0x300) {
133 start = (start + 0x3ff) & ~0x3ff;
134 res->start = start;
135 }
136 }
137}
138
139int
140pcibios_enable_resources(struct pci_dev *dev, int mask)
141{
142 u16 cmd, old_cmd;
143 int idx;
144 struct resource *r;
145
146 pci_read_config_word(dev, PCI_COMMAND, &cmd);
147 old_cmd = cmd;
148 for(idx=0; idx<6; idx++) {
149 r = &dev->resource[idx];
150 if (!r->start && r->end) {
151 printk (KERN_ERR "PCI: Device %s not available because "
152 "of resource collisions\n", dev->slot_name);
153 return -EINVAL;
154 }
155 if (r->flags & IORESOURCE_IO)
156 cmd |= PCI_COMMAND_IO;
157 if (r->flags & IORESOURCE_MEM)
158 cmd |= PCI_COMMAND_MEMORY;
159 }
160 if (dev->resource[PCI_ROM_RESOURCE].start)
161 cmd |= PCI_COMMAND_MEMORY;
162 if (cmd != old_cmd) {
163 printk("PCI: Enabling device %s (%04x -> %04x)\n",
164 dev->slot_name, old_cmd, cmd);
165 pci_write_config_word(dev, PCI_COMMAND, cmd);
166 }
167 return 0;
168}
169
170struct pci_controller * __init pcibios_alloc_controller(void)
171{
172 struct pci_controller *pci_ctrl;
173
174 pci_ctrl = (struct pci_controller *)alloc_bootmem(sizeof(*pci_ctrl));
175 memset(pci_ctrl, 0, sizeof(struct pci_controller));
176
177 *pci_ctrl_tail = pci_ctrl;
178 pci_ctrl_tail = &pci_ctrl->next;
179
180 return pci_ctrl;
181}
182
183static int __init pcibios_init(void)
184{
185 struct pci_controller *pci_ctrl;
186 struct pci_bus *bus;
187 int next_busno = 0, i;
188
189 printk("PCI: Probing PCI hardware\n");
190
191 /* Scan all of the recorded PCI controllers. */
192 for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
193 pci_ctrl->last_busno = 0xff;
194 bus = pci_scan_bus(pci_ctrl->first_busno, pci_ctrl->ops,
195 pci_ctrl);
196 if (pci_ctrl->io_resource.flags) {
197 unsigned long offs;
198
199 offs = (unsigned long)pci_ctrl->io_space.base;
200 pci_ctrl->io_resource.start += offs;
201 pci_ctrl->io_resource.end += offs;
202 bus->resource[0] = &pci_ctrl->io_resource;
203 }
204 for (i = 0; i < 3; ++i)
205 if (pci_ctrl->mem_resources[i].flags)
206 bus->resource[i+1] =&pci_ctrl->mem_resources[i];
207 pci_ctrl->bus = bus;
208 pci_ctrl->last_busno = bus->subordinate;
209 if (next_busno <= pci_ctrl->last_busno)
210 next_busno = pci_ctrl->last_busno+1;
211 }
212 pci_bus_count = next_busno;
213
214 return platform_pcibios_fixup();
215}
216
217subsys_initcall(pcibios_init);
218
219void __init pcibios_fixup_bus(struct pci_bus *bus)
220{
221 struct pci_controller *pci_ctrl = bus->sysdata;
222 struct resource *res;
223 unsigned long io_offset;
224 int i;
225
226 io_offset = (unsigned long)pci_ctrl->io_space.base;
227 if (bus->parent == NULL) {
228 /* this is a host bridge - fill in its resources */
229 pci_ctrl->bus = bus;
230
231 bus->resource[0] = res = &pci_ctrl->io_resource;
232 if (!res->flags) {
233 if (io_offset)
234 printk (KERN_ERR "I/O resource not set for host"
235 " bridge %d\n", pci_ctrl->index);
236 res->start = 0;
237 res->end = IO_SPACE_LIMIT;
238 res->flags = IORESOURCE_IO;
239 }
240 res->start += io_offset;
241 res->end += io_offset;
242
243 for (i = 0; i < 3; i++) {
244 res = &pci_ctrl->mem_resources[i];
245 if (!res->flags) {
246 if (i > 0)
247 continue;
248 printk(KERN_ERR "Memory resource not set for "
249 "host bridge %d\n", pci_ctrl->index);
250 res->start = 0;
251 res->end = ~0U;
252 res->flags = IORESOURCE_MEM;
253 }
254 bus->resource[i+1] = res;
255 }
256 } else {
257 /* This is a subordinate bridge */
258 pci_read_bridge_bases(bus);
259
260 for (i = 0; i < 4; i++) {
261 if ((res = bus->resource[i]) == NULL || !res->flags)
262 continue;
263 if (io_offset && (res->flags & IORESOURCE_IO)) {
264 res->start += io_offset;
265 res->end += io_offset;
266 }
267 }
268 }
269}
270
271char __init *pcibios_setup(char *str)
272{
273 return str;
274}
275
276/* the next one is stolen from the alpha port... */
277
278void __init
279pcibios_update_irq(struct pci_dev *dev, int irq)
280{
281 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
282}
283
284int pcibios_enable_device(struct pci_dev *dev, int mask)
285{
286 u16 cmd, old_cmd;
287 int idx;
288 struct resource *r;
289
290 pci_read_config_word(dev, PCI_COMMAND, &cmd);
291 old_cmd = cmd;
292 for (idx=0; idx<6; idx++) {
293 r = &dev->resource[idx];
294 if (!r->start && r->end) {
295 printk(KERN_ERR "PCI: Device %s not available because "
296 "of resource collisions\n", dev->slot_name);
297 return -EINVAL;
298 }
299 if (r->flags & IORESOURCE_IO)
300 cmd |= PCI_COMMAND_IO;
301 if (r->flags & IORESOURCE_MEM)
302 cmd |= PCI_COMMAND_MEMORY;
303 }
304 if (cmd != old_cmd) {
305 printk("PCI: Enabling device %s (%04x -> %04x)\n",
306 dev->slot_name, old_cmd, cmd);
307 pci_write_config_word(dev, PCI_COMMAND, cmd);
308 }
309
310 return 0;
311}
312
313#ifdef CONFIG_PROC_FS
314
315/*
316 * Return the index of the PCI controller for device pdev.
317 */
318
319int
320pci_controller_num(struct pci_dev *dev)
321{
322 struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
323 return pci_ctrl->index;
324}
325
326#endif /* CONFIG_PROC_FS */
327
328
329static void
330pcibios_fixup_resources(struct pci_dev *dev)
331{
332 struct pci_controller* pci_ctrl = (struct pci_controller *)dev->sysdata;
333 int i;
334 unsigned long offset;
335
336 if (!pci_ctrl) {
337 printk(KERN_ERR "No pci_ctrl for PCI dev %s!\n",dev->slot_name);
338 return;
339 }
340 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
341 struct resource *res = dev->resource + i;
342 if (!res->start || !res->flags)
343 continue;
344 if (res->end == 0xffffffff) {
345 DBG("PCI:%s Resource %d [%08lx-%08lx] is unassigned\n",
346 dev->slot_name, i, res->start, res->end);
347 res->end -= res->start;
348 res->start = 0;
349 continue;
350 }
351 offset = 0;
352 if (res->flags & IORESOURCE_IO)
353 offset = (unsigned long) pci_ctrl->io_space.base;
354 else if (res->flags & IORESOURCE_MEM)
355 offset = (unsigned long) pci_ctrl->mem_space.base;
356
357 if (offset != 0) {
358 res->start += offset;
359 res->end += offset;
360#ifdef DEBUG
361 printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n",
362 i, res->flags, dev->slot_name,
363 res->start - offset, res->start);
364#endif
365 }
366 }
367}
368
369/*
370 * Platform support for /proc/bus/pci/X/Y mmap()s,
371 * modelled on the sparc64 implementation by Dave Miller.
372 * -- paulus.
373 */
374
375/*
376 * Adjust vm_pgoff of VMA such that it is the physical page offset
377 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
378 *
379 * Basically, the user finds the base address for his device which he wishes
380 * to mmap. They read the 32-bit value from the config space base register,
381 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
382 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
383 *
384 * Returns negative error code on failure, zero on success.
385 */
386static __inline__ int
387__pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
388 enum pci_mmap_state mmap_state)
389{
390 struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
391 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
392 unsigned long io_offset = 0;
393 int i, res_bit;
394
395 if (pci_ctrl == 0)
396 return -EINVAL; /* should never happen */
397
398 /* If memory, add on the PCI bridge address offset */
399 if (mmap_state == pci_mmap_mem) {
400 res_bit = IORESOURCE_MEM;
401 } else {
402 io_offset = (unsigned long)pci_ctrl->io_space.base;
403 offset += io_offset;
404 res_bit = IORESOURCE_IO;
405 }
406
407 /*
408 * Check that the offset requested corresponds to one of the
409 * resources of the device.
410 */
411 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
412 struct resource *rp = &dev->resource[i];
413 int flags = rp->flags;
414
415 /* treat ROM as memory (should be already) */
416 if (i == PCI_ROM_RESOURCE)
417 flags |= IORESOURCE_MEM;
418
419 /* Active and same type? */
420 if ((flags & res_bit) == 0)
421 continue;
422
423 /* In the range of this resource? */
424 if (offset < (rp->start & PAGE_MASK) || offset > rp->end)
425 continue;
426
427 /* found it! construct the final physical address */
428 if (mmap_state == pci_mmap_io)
429 offset += pci_ctrl->io_space.start - io_offset;
430 vma->vm_pgoff = offset >> PAGE_SHIFT;
431 return 0;
432 }
433
434 return -EINVAL;
435}
436
437/*
438 * Set vm_flags of VMA, as appropriate for this architecture, for a pci device
439 * mapping.
440 */
441static __inline__ void
442__pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
443 enum pci_mmap_state mmap_state)
444{
445 vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
446}
447
448/*
449 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
450 * device mapping.
451 */
452static __inline__ void
453__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
454 enum pci_mmap_state mmap_state, int write_combine)
455{
456 int prot = pgprot_val(vma->vm_page_prot);
457
458 /* Set to write-through */
459 prot &= ~_PAGE_NO_CACHE;
460#if 0
461 if (!write_combine)
462 prot |= _PAGE_WRITETHRU;
463#endif
464 vma->vm_page_prot = __pgprot(prot);
465}
466
467/*
468 * Perform the actual remap of the pages for a PCI device mapping, as
469 * appropriate for this architecture. The region in the process to map
470 * is described by vm_start and vm_end members of VMA, the base physical
471 * address is found in vm_pgoff.
472 * The pci device structure is provided so that architectures may make mapping
473 * decisions on a per-device or per-bus basis.
474 *
475 * Returns a negative error code on failure, zero on success.
476 */
477int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
478 enum pci_mmap_state mmap_state,
479 int write_combine)
480{
481 int ret;
482
483 ret = __pci_mmap_make_offset(dev, vma, mmap_state);
484 if (ret < 0)
485 return ret;
486
487 __pci_mmap_set_flags(dev, vma, mmap_state);
488 __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
489
490 ret = io_remap_page_range(vma, vma->vm_start, vma->vm_pgoff<<PAGE_SHIFT,
491 vma->vm_end - vma->vm_start, vma->vm_page_prot);
492
493 return ret;
494}
495
496/*
497 * This probably belongs here rather than ioport.c because
498 * we do not want this crud linked into SBus kernels.
499 * Also, think for a moment about likes of floppy.c that
500 * include architecture specific parts. They may want to redefine ins/outs.
501 *
502 * We do not use horroble macroses here because we want to
503 * advance pointer by sizeof(size).
504 */
505void outsb(unsigned long addr, const void *src, unsigned long count) {
506 while (count) {
507 count -= 1;
508 writeb(*(const char *)src, addr);
509 src += 1;
510 addr += 1;
511 }
512}
513
514void outsw(unsigned long addr, const void *src, unsigned long count) {
515 while (count) {
516 count -= 2;
517 writew(*(const short *)src, addr);
518 src += 2;
519 addr += 2;
520 }
521}
522
523void outsl(unsigned long addr, const void *src, unsigned long count) {
524 while (count) {
525 count -= 4;
526 writel(*(const long *)src, addr);
527 src += 4;
528 addr += 4;
529 }
530}
531
532void insb(unsigned long addr, void *dst, unsigned long count) {
533 while (count) {
534 count -= 1;
535 *(unsigned char *)dst = readb(addr);
536 dst += 1;
537 addr += 1;
538 }
539}
540
541void insw(unsigned long addr, void *dst, unsigned long count) {
542 while (count) {
543 count -= 2;
544 *(unsigned short *)dst = readw(addr);
545 dst += 2;
546 addr += 2;
547 }
548}
549
550void insl(unsigned long addr, void *dst, unsigned long count) {
551 while (count) {
552 count -= 4;
553 /*
554 * XXX I am sure we are in for an unaligned trap here.
555 */
556 *(unsigned long *)dst = readl(addr);
557 dst += 4;
558 addr += 4;
559 }
560}
561
562
563
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
new file mode 100644
index 000000000000..cf1362784443
--- /dev/null
+++ b/arch/xtensa/kernel/platform.c
@@ -0,0 +1,49 @@
1/*
2 * arch/xtensa/kernel/platform.c
3 *
4 * Default platform functions.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15#include <linux/config.h>
16#include <linux/types.h>
17#include <linux/pci.h>
18#include <linux/time.h>
19#include <asm/platform.h>
20#include <asm/timex.h>
21
22#define _F(r,f,a,b) \
23 r __platform_##f a b; \
24 r platform_##f a __attribute__((weak, alias("__platform_"#f)))
25
26/*
27 * Default functions that are used if no platform specific function is defined.
28 * (Please, refer to include/asm-xtensa/platform.h for more information)
29 */
30
31_F(void, setup, (char** cmd), { });
32_F(void, init_irq, (void), { });
33_F(void, restart, (void), { while(1); });
34_F(void, halt, (void), { while(1); });
35_F(void, power_off, (void), { while(1); });
36_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
37_F(void, heartbeat, (void), { });
38_F(int, pcibios_fixup, (void), { return 0; });
39_F(int, get_rtc_time, (time_t* t), { return 0; });
40_F(int, set_rtc_time, (time_t t), { return 0; });
41
42#if CONFIG_XTENSA_CALIBRATE_CCOUNT
43_F(void, calibrate_ccount, (void),
44{
45 printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
46 ccount_per_jiffy = 100 * (1000000UL/HZ);
47});
48#endif
49
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
new file mode 100644
index 000000000000..4099703b14be
--- /dev/null
+++ b/arch/xtensa/kernel/process.c
@@ -0,0 +1,482 @@
1// TODO verify coprocessor handling
2/*
3 * arch/xtensa/kernel/process.c
4 *
5 * Xtensa Processor version.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2001 - 2005 Tensilica Inc.
12 *
13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
14 * Chris Zankel <chris@zankel.net>
15 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
16 * Kevin Chea
17 */
18
19#include <linux/config.h>
20#include <linux/errno.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/stddef.h>
27#include <linux/unistd.h>
28#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/elf.h>
31#include <linux/init.h>
32#include <linux/prctl.h>
33#include <linux/init_task.h>
34#include <linux/module.h>
35#include <linux/mqueue.h>
36
37#include <asm/pgtable.h>
38#include <asm/uaccess.h>
39#include <asm/system.h>
40#include <asm/io.h>
41#include <asm/processor.h>
42#include <asm/platform.h>
43#include <asm/mmu.h>
44#include <asm/irq.h>
45#include <asm/atomic.h>
46#include <asm/offsets.h>
47#include <asm/coprocessor.h>
48
49extern void ret_from_fork(void);
50
51static struct fs_struct init_fs = INIT_FS;
52static struct files_struct init_files = INIT_FILES;
53static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
54static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
55struct mm_struct init_mm = INIT_MM(init_mm);
56EXPORT_SYMBOL(init_mm);
57
58union thread_union init_thread_union
59 __attribute__((__section__(".data.init_task"))) =
60{ INIT_THREAD_INFO(init_task) };
61
62struct task_struct init_task = INIT_TASK(init_task);
63EXPORT_SYMBOL(init_task);
64
65struct task_struct *current_set[NR_CPUS] = {&init_task, };
66
67
68#if XCHAL_CP_NUM > 0
69
70/*
71 * Coprocessor ownership.
72 */
73
74coprocessor_info_t coprocessor_info[] = {
75 { 0, XTENSA_CPE_CP0_OFFSET },
76 { 0, XTENSA_CPE_CP1_OFFSET },
77 { 0, XTENSA_CPE_CP2_OFFSET },
78 { 0, XTENSA_CPE_CP3_OFFSET },
79 { 0, XTENSA_CPE_CP4_OFFSET },
80 { 0, XTENSA_CPE_CP5_OFFSET },
81 { 0, XTENSA_CPE_CP6_OFFSET },
82 { 0, XTENSA_CPE_CP7_OFFSET },
83};
84
85#endif
86
87/*
88 * Powermanagement idle function, if any is provided by the platform.
89 */
90
91void cpu_idle(void)
92{
93 local_irq_enable();
94
95 /* endless idle loop with no priority at all */
96 while (1) {
97 while (!need_resched())
98 platform_idle();
99 preempt_enable();
100 schedule();
101 }
102}
103
104/*
105 * Free current thread data structures etc..
106 */
107
108void exit_thread(void)
109{
110 release_coprocessors(current); /* Empty macro if no CPs are defined */
111}
112
113void flush_thread(void)
114{
115 release_coprocessors(current); /* Empty macro if no CPs are defined */
116}
117
118/*
119 * Copy thread.
120 *
121 * The stack layout for the new thread looks like this:
122 *
123 * +------------------------+ <- sp in childregs (= tos)
124 * | childregs |
125 * +------------------------+ <- thread.sp = sp in dummy-frame
126 * | dummy-frame | (saved in dummy-frame spill-area)
127 * +------------------------+
128 *
129 * We create a dummy frame to return to ret_from_fork:
130 * a0 points to ret_from_fork (simulating a call4)
131 * sp points to itself (thread.sp)
132 * a2, a3 are unused.
133 *
134 * Note: This is a pristine frame, so we don't need any spill region on top of
135 * childregs.
136 */
137
138int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
139 unsigned long unused,
140 struct task_struct * p, struct pt_regs * regs)
141{
142 struct pt_regs *childregs;
143 unsigned long tos;
144 int user_mode = user_mode(regs);
145
146 /* Set up new TSS. */
147 tos = (unsigned long)p->thread_info + THREAD_SIZE;
148 if (user_mode)
149 childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
150 else
151 childregs = (struct pt_regs*)tos - 1;
152
153 *childregs = *regs;
154
155 /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
156 *((int*)childregs - 3) = (unsigned long)childregs;
157 *((int*)childregs - 4) = 0;
158
159 childregs->areg[1] = tos;
160 childregs->areg[2] = 0;
161 p->set_child_tid = p->clear_child_tid = NULL;
162 p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
163 p->thread.sp = (unsigned long)childregs;
164 if (user_mode(regs)) {
165
166 int len = childregs->wmask & ~0xf;
167 childregs->areg[1] = usp;
168 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
169 &regs->areg[XCHAL_NUM_AREGS - len/4], len);
170
171 if (clone_flags & CLONE_SETTLS)
172 childregs->areg[2] = childregs->areg[6];
173
174 } else {
175 /* In kernel space, we start a new thread with a new stack. */
176 childregs->wmask = 1;
177 }
178 return 0;
179}
180
181
182/*
183 * Create a kernel thread
184 */
185
186int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
187{
188 long retval;
189 __asm__ __volatile__
190 ("mov a5, %4\n\t" /* preserve fn in a5 */
191 "mov a6, %3\n\t" /* preserve and setup arg in a6 */
192 "movi a2, %1\n\t" /* load __NR_clone for syscall*/
193 "mov a3, sp\n\t" /* sp check and sys_clone */
194 "mov a4, %5\n\t" /* load flags for syscall */
195 "syscall\n\t"
196 "beq a3, sp, 1f\n\t" /* branch if parent */
197 "callx4 a5\n\t" /* call fn */
198 "movi a2, %2\n\t" /* load __NR_exit for syscall */
199 "mov a3, a6\n\t" /* load fn return value */
200 "syscall\n"
201 "1:\n\t"
202 "mov %0, a2\n\t" /* parent returns zero */
203 :"=r" (retval)
204 :"i" (__NR_clone), "i" (__NR_exit),
205 "r" (arg), "r" (fn),
206 "r" (flags | CLONE_VM)
207 : "a2", "a3", "a4", "a5", "a6" );
208 return retval;
209}
210
211
212/*
213 * These bracket the sleeping functions..
214 */
215
216unsigned long get_wchan(struct task_struct *p)
217{
218 unsigned long sp, pc;
219 unsigned long stack_page = (unsigned long) p->thread_info;
220 int count = 0;
221
222 if (!p || p == current || p->state == TASK_RUNNING)
223 return 0;
224
225 sp = p->thread.sp;
226 pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
227
228 do {
229 if (sp < stack_page + sizeof(struct task_struct) ||
230 sp >= (stack_page + THREAD_SIZE) ||
231 pc == 0)
232 return 0;
233 if (!in_sched_functions(pc))
234 return pc;
235
236 /* Stack layout: sp-4: ra, sp-3: sp' */
237
238 pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
239 sp = *(unsigned long *)sp - 3;
240 } while (count++ < 16);
241 return 0;
242}
243
244/*
245 * do_copy_regs() gathers information from 'struct pt_regs' and
246 * 'current->thread.areg[]' to fill in the xtensa_gregset_t
247 * structure.
248 *
249 * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
250 * of processor registers. Besides different ordering,
251 * xtensa_gregset_t contains non-live register information that
252 * 'struct pt_regs' does not. Exception handling (primarily) uses
253 * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
254 *
255 */
256
257void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
258 struct task_struct *tsk)
259{
260 int i, n, wb_offset;
261
262 elfregs->xchal_config_id0 = XCHAL_HW_CONFIGID0;
263 elfregs->xchal_config_id1 = XCHAL_HW_CONFIGID1;
264
265 __asm__ __volatile__ ("rsr %0, 176\n" : "=a" (i));
266 elfregs->cpux = i;
267 __asm__ __volatile__ ("rsr %0, 208\n" : "=a" (i));
268 elfregs->cpuy = i;
269
270 /* Note: PS.EXCM is not set while user task is running; its
271 * being set in regs->ps is for exception handling convenience.
272 */
273
274 elfregs->pc = regs->pc;
275 elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK);
276 elfregs->exccause = regs->exccause;
277 elfregs->excvaddr = regs->excvaddr;
278 elfregs->windowbase = regs->windowbase;
279 elfregs->windowstart = regs->windowstart;
280 elfregs->lbeg = regs->lbeg;
281 elfregs->lend = regs->lend;
282 elfregs->lcount = regs->lcount;
283 elfregs->sar = regs->sar;
284 elfregs->syscall = regs->syscall;
285
286 /* Copy register file.
287 * The layout looks like this:
288 *
289 * | a0 ... a15 | Z ... Z | arX ... arY |
290 * current window unused saved frames
291 */
292
293 memset (elfregs->ar, 0, sizeof(elfregs->ar));
294
295 wb_offset = regs->windowbase * 4;
296 n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
297
298 for (i = 0; i < n; i++)
299 elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
300
301 n = (regs->wmask >> 4) * 4;
302
303 for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
304 elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
305}
306
307void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
308{
309 do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
310}
311
312
313/* The inverse of do_copy_regs(). No error or sanity checking. */
314
315void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
316 struct task_struct *tsk)
317{
318 int i, n, wb_offset;
319
320 /* Note: PS.EXCM is not set while user task is running; it
321 * needs to be set in regs->ps is for exception handling convenience.
322 */
323
324 regs->pc = elfregs->pc;
325 regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK);
326 regs->exccause = elfregs->exccause;
327 regs->excvaddr = elfregs->excvaddr;
328 regs->windowbase = elfregs->windowbase;
329 regs->windowstart = elfregs->windowstart;
330 regs->lbeg = elfregs->lbeg;
331 regs->lend = elfregs->lend;
332 regs->lcount = elfregs->lcount;
333 regs->sar = elfregs->sar;
334 regs->syscall = elfregs->syscall;
335
336 /* Clear everything. */
337
338 memset (regs->areg, 0, sizeof(regs->areg));
339
340 /* Copy regs from live window frame. */
341
342 wb_offset = regs->windowbase * 4;
343 n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
344
345 for (i = 0; i < n; i++)
346 regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
347
348 n = (regs->wmask >> 4) * 4;
349
350 for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
351 regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
352}
353
354/*
355 * do_save_fpregs() gathers information from 'struct pt_regs' and
356 * 'current->thread' to fill in the elf_fpregset_t structure.
357 *
358 * Core files and ptrace use elf_fpregset_t.
359 */
360
361void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
362 struct task_struct *tsk)
363{
364#if XCHAL_HAVE_CP
365
366 extern unsigned char _xtensa_reginfo_tables[];
367 extern unsigned _xtensa_reginfo_table_size;
368 int i;
369 unsigned long flags;
370
371 /* Before dumping coprocessor state from memory,
372 * ensure any live coprocessor contents for this
373 * task are first saved to memory:
374 */
375 local_irq_save(flags);
376
377 for (i = 0; i < XCHAL_CP_MAX; i++) {
378 if (tsk == coprocessor_info[i].owner) {
379 enable_coprocessor(i);
380 save_coprocessor_registers(
381 tsk->thread.cp_save+coprocessor_info[i].offset,i);
382 disable_coprocessor(i);
383 }
384 }
385
386 local_irq_restore(flags);
387
388 /* Now dump coprocessor & extra state: */
389 memcpy((unsigned char*)fpregs,
390 _xtensa_reginfo_tables, _xtensa_reginfo_table_size);
391 memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
392 tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
393#endif
394}
395
396/*
397 * The inverse of do_save_fpregs().
398 * Copies coprocessor and extra state from fpregs into regs and tsk->thread.
399 * Returns 0 on success, non-zero if layout doesn't match.
400 */
401
402int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
403 struct task_struct *tsk)
404{
405#if XCHAL_HAVE_CP
406
407 extern unsigned char _xtensa_reginfo_tables[];
408 extern unsigned _xtensa_reginfo_table_size;
409 int i;
410 unsigned long flags;
411
412 /* Make sure save area layouts match.
413 * FIXME: in the future we could allow restoring from
414 * a different layout of the same registers, by comparing
415 * fpregs' table with _xtensa_reginfo_tables and matching
416 * entries and copying registers one at a time.
417 * Not too sure yet whether that's very useful.
418 */
419
420 if( memcmp((unsigned char*)fpregs,
421 _xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
422 return -1;
423 }
424
425 /* Before restoring coprocessor state from memory,
426 * ensure any live coprocessor contents for this
427 * task are first invalidated.
428 */
429
430 local_irq_save(flags);
431
432 for (i = 0; i < XCHAL_CP_MAX; i++) {
433 if (tsk == coprocessor_info[i].owner) {
434 enable_coprocessor(i);
435 save_coprocessor_registers(
436 tsk->thread.cp_save+coprocessor_info[i].offset,i);
437 coprocessor_info[i].owner = 0;
438 disable_coprocessor(i);
439 }
440 }
441
442 local_irq_restore(flags);
443
444 /* Now restore coprocessor & extra state: */
445
446 memcpy(tsk->thread.cp_save,
447 (unsigned char*)fpregs + _xtensa_reginfo_table_size,
448 XTENSA_CP_EXTRA_SIZE);
449#endif
450 return 0;
451}
452/*
453 * Fill in the CP structure for a core dump for a particular task.
454 */
455
456int
457dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
458{
459/* see asm/coprocessor.h for this magic number 16 */
460#if TOTAL_CPEXTRA_SIZE > 16
461 do_save_fpregs (r, regs, task);
462
463 /* For now, bit 16 means some extra state may be present: */
464// FIXME!! need to track to return more accurate mask
465 return 0x10000 | XCHAL_CP_MASK;
466#else
467 return 0; /* no coprocessors active on this processor */
468#endif
469}
470
471/*
472 * Fill in the CP structure for a core dump.
473 * This includes any FPU coprocessor.
474 * Here, we dump all coprocessors, and other ("extra") custom state.
475 *
476 * This function is called by elf_core_dump() in fs/binfmt_elf.c
477 * (in which case 'regs' comes from calls to do_coredump, see signals.c).
478 */
479int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
480{
481 return dump_task_fpu(regs, current, r);
482}
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
new file mode 100644
index 000000000000..9ef07a4dd2a2
--- /dev/null
+++ b/arch/xtensa/kernel/ptrace.c
@@ -0,0 +1,407 @@
1// TODO some minor issues
2/*
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
6 *
7 * Copyright (C) 2001 - 2005 Tensilica Inc.
8 *
9 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
10 * Chris Zankel <chris@zankel.net>
11 * Scott Foehner<sfoehner@yahoo.com>,
12 * Kevin Chea
13 * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
14 */
15
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/mm.h>
20#include <linux/errno.h>
21#include <linux/ptrace.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/security.h>
25
26#include <asm/pgtable.h>
27#include <asm/page.h>
28#include <asm/system.h>
29#include <asm/uaccess.h>
30#include <asm/ptrace.h>
31#include <asm/elf.h>
32
33#define TEST_KERNEL // verify kernel operations FIXME: remove
34
35
36/*
37 * Called by kernel/ptrace.c when detaching..
38 *
39 * Make sure single step bits etc are not set.
40 */
41
42void ptrace_disable(struct task_struct *child)
43{
44 /* Nothing to do.. */
45}
46
47int sys_ptrace(long request, long pid, long addr, long data)
48{
49 struct task_struct *child;
50 int ret = -EPERM;
51
52 lock_kernel();
53
54#if 0
55 if ((int)request != 1)
56 printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
57 (int) request, (int) pid, (unsigned long) addr,
58 (unsigned long) data);
59#endif
60
61 if (request == PTRACE_TRACEME) {
62
63 /* Are we already being traced? */
64
65 if (current->ptrace & PT_PTRACED)
66 goto out;
67
68 if ((ret = security_ptrace(current->parent, current)))
69 goto out;
70
71 /* Set the ptrace bit in the process flags. */
72
73 current->ptrace |= PT_PTRACED;
74 ret = 0;
75 goto out;
76 }
77
78 ret = -ESRCH;
79 read_lock(&tasklist_lock);
80 child = find_task_by_pid(pid);
81 if (child)
82 get_task_struct(child);
83 read_unlock(&tasklist_lock);
84 if (!child)
85 goto out;
86
87 ret = -EPERM;
88 if (pid == 1) /* you may not mess with init */
89 goto out;
90
91 if (request == PTRACE_ATTACH) {
92 ret = ptrace_attach(child);
93 goto out_tsk;
94 }
95
96 if ((ret = ptrace_check_attach(child, request == PTRACE_KILL)) < 0)
97 goto out_tsk;
98
99 switch (request) {
100 case PTRACE_PEEKTEXT: /* read word at location addr. */
101 case PTRACE_PEEKDATA:
102 {
103 unsigned long tmp;
104 int copied;
105
106 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
107 ret = -EIO;
108 if (copied != sizeof(tmp))
109 break;
110 ret = put_user(tmp,(unsigned long *) data);
111
112 goto out;
113 }
114
115 /* Read the word at location addr in the USER area. */
116
117 case PTRACE_PEEKUSR:
118 {
119 struct pt_regs *regs;
120 unsigned long tmp;
121
122 regs = xtensa_pt_regs(child);
123 tmp = 0; /* Default return value. */
124
125 switch(addr) {
126
127 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
128 {
129 int ar = addr - REG_AR_BASE - regs->windowbase * 4;
130 ar &= (XCHAL_NUM_AREGS - 1);
131 if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
132 tmp = regs->areg[ar];
133 else
134 ret = -EIO;
135 break;
136 }
137 case REG_A_BASE ... REG_A_BASE + 15:
138 tmp = regs->areg[addr - REG_A_BASE];
139 break;
140 case REG_PC:
141 tmp = regs->pc;
142 break;
143 case REG_PS:
144 /* Note: PS.EXCM is not set while user task is running;
145 * its being set in regs is for exception handling
146 * convenience. */
147 tmp = (regs->ps & ~XCHAL_PS_EXCM_MASK);
148 break;
149 case REG_WB:
150 tmp = regs->windowbase;
151 break;
152 case REG_WS:
153 tmp = regs->windowstart;
154 break;
155 case REG_LBEG:
156 tmp = regs->lbeg;
157 break;
158 case REG_LEND:
159 tmp = regs->lend;
160 break;
161 case REG_LCOUNT:
162 tmp = regs->lcount;
163 break;
164 case REG_SAR:
165 tmp = regs->sar;
166 break;
167 case REG_DEPC:
168 tmp = regs->depc;
169 break;
170 case REG_EXCCAUSE:
171 tmp = regs->exccause;
172 break;
173 case REG_EXCVADDR:
174 tmp = regs->excvaddr;
175 break;
176 case SYSCALL_NR:
177 tmp = regs->syscall;
178 break;
179 default:
180 tmp = 0;
181 ret = -EIO;
182 goto out;
183 }
184 ret = put_user(tmp, (unsigned long *) data);
185 goto out;
186 }
187
188 case PTRACE_POKETEXT: /* write the word at location addr. */
189 case PTRACE_POKEDATA:
190 if (access_process_vm(child, addr, &data, sizeof(data), 1)
191 == sizeof(data))
192 break;
193 ret = -EIO;
194 goto out;
195
196 case PTRACE_POKEUSR:
197 {
198 struct pt_regs *regs;
199 regs = xtensa_pt_regs(child);
200
201 switch (addr) {
202 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
203 {
204 int ar = addr - REG_AR_BASE - regs->windowbase * 4;
205 if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
206 regs->areg[ar & (XCHAL_NUM_AREGS - 1)] = data;
207 else
208 ret = -EIO;
209 break;
210 }
211 case REG_A_BASE ... REG_A_BASE + 15:
212 regs->areg[addr - REG_A_BASE] = data;
213 break;
214 case REG_PC:
215 regs->pc = data;
216 break;
217 case SYSCALL_NR:
218 regs->syscall = data;
219 break;
220#ifdef TEST_KERNEL
221 case REG_WB:
222 regs->windowbase = data;
223 break;
224 case REG_WS:
225 regs->windowstart = data;
226 break;
227#endif
228
229 default:
230 /* The rest are not allowed. */
231 ret = -EIO;
232 break;
233 }
234 break;
235 }
236
237 /* continue and stop at next (return from) syscall */
238 case PTRACE_SYSCALL:
239 case PTRACE_CONT: /* restart after signal. */
240 {
241 ret = -EIO;
242 if ((unsigned long) data > _NSIG)
243 break;
244 if (request == PTRACE_SYSCALL)
245 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
246 else
247 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
248 child->exit_code = data;
249 /* Make sure the single step bit is not set. */
250 child->ptrace &= ~PT_SINGLESTEP;
251 wake_up_process(child);
252 ret = 0;
253 break;
254 }
255
256 /*
257 * make the child exit. Best I can do is send it a sigkill.
258 * perhaps it should be put in the status that it wants to
259 * exit.
260 */
261 case PTRACE_KILL:
262 ret = 0;
263 if (child->state == EXIT_ZOMBIE) /* already dead */
264 break;
265 child->exit_code = SIGKILL;
266 child->ptrace &= ~PT_SINGLESTEP;
267 wake_up_process(child);
268 break;
269
270 case PTRACE_SINGLESTEP:
271 ret = -EIO;
272 if ((unsigned long) data > _NSIG)
273 break;
274 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
275 child->ptrace |= PT_SINGLESTEP;
276 child->exit_code = data;
277 wake_up_process(child);
278 ret = 0;
279 break;
280
281 case PTRACE_GETREGS:
282 {
283 /* 'data' points to user memory in which to write.
284 * Mainly due to the non-live register values, we
285 * reformat the register values into something more
286 * standard. For convenience, we use the handy
287 * elf_gregset_t format. */
288
289 xtensa_gregset_t format;
290 struct pt_regs *regs = xtensa_pt_regs(child);
291
292 do_copy_regs (&format, regs, child);
293
294 /* Now, copy to user space nice and easy... */
295 ret = 0;
296 if (copy_to_user((void *)data, &format, sizeof(elf_gregset_t)))
297 ret = -EFAULT;
298 break;
299 }
300
301 case PTRACE_SETREGS:
302 {
303 /* 'data' points to user memory that contains the new
304 * values in the elf_gregset_t format. */
305
306 xtensa_gregset_t format;
307 struct pt_regs *regs = xtensa_pt_regs(child);
308
309 if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
310 ret = -EFAULT;
311 break;
312 }
313
314 /* FIXME: Perhaps we want some sanity checks on
315 * these user-space values? See ARM version. Are
316 * debuggers a security concern? */
317
318 do_restore_regs (&format, regs, child);
319
320 ret = 0;
321 break;
322 }
323
324 case PTRACE_GETFPREGS:
325 {
326 /* 'data' points to user memory in which to write.
327 * For convenience, we use the handy
328 * elf_fpregset_t format. */
329
330 elf_fpregset_t fpregs;
331 struct pt_regs *regs = xtensa_pt_regs(child);
332
333 do_save_fpregs (&fpregs, regs, child);
334
335 /* Now, copy to user space nice and easy... */
336 ret = 0;
337 if (copy_to_user((void *)data, &fpregs, sizeof(elf_fpregset_t)))
338 ret = -EFAULT;
339
340 break;
341 }
342
343 case PTRACE_SETFPREGS:
344 {
345 /* 'data' points to user memory that contains the new
346 * values in the elf_fpregset_t format.
347 */
348 elf_fpregset_t fpregs;
349 struct pt_regs *regs = xtensa_pt_regs(child);
350
351 ret = 0;
352 if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
353 ret = -EFAULT;
354 break;
355 }
356
357 if (do_restore_fpregs (&fpregs, regs, child))
358 ret = -EIO;
359 break;
360 }
361
362 case PTRACE_GETFPREGSIZE:
363 /* 'data' points to 'unsigned long' set to the size
364 * of elf_fpregset_t
365 */
366 ret = put_user(sizeof(elf_fpregset_t), (unsigned long *) data);
367 break;
368
369 case PTRACE_DETACH: /* detach a process that was attached. */
370 ret = ptrace_detach(child, data);
371 break;
372
373 default:
374 ret = ptrace_request(child, request, addr, data);
375 goto out;
376 }
377out_tsk:
378 put_task_struct(child);
379out:
380 unlock_kernel();
381 return ret;
382}
383
384void do_syscall_trace(void)
385{
386 if (!test_thread_flag(TIF_SYSCALL_TRACE))
387 return;
388
389 if (!(current->ptrace & PT_PTRACED))
390 return;
391
392 /*
393 * The 0x80 provides a way for the tracing parent to distinguish
394 * between a syscall stop and SIGTRAP delivery
395 */
396 ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
397
398 /*
399 * this isn't the same as continuing with a signal, but it will do
400 * for normal use. strace only continues with a signal if the
401 * stopping signal is not SIGTRAP. -brl
402 */
403 if (current->exit_code) {
404 send_sig(current->exit_code, current, 1);
405 current->exit_code = 0;
406 }
407}
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c
new file mode 100644
index 000000000000..d40f4b1b75ac
--- /dev/null
+++ b/arch/xtensa/kernel/semaphore.c
@@ -0,0 +1,226 @@
1/*
2 * arch/xtensa/kernel/semaphore.c
3 *
4 * Generic semaphore code. Buyer beware. Do your own specific changes
5 * in <asm/semaphore-helper.h>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2001 - 2005 Tensilica Inc.
12 *
13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
14 * Chris Zankel <chris@zankel.net>
15 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
16 * Kevin Chea
17 */
18
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <linux/init.h>
22#include <asm/semaphore.h>
23#include <asm/errno.h>
24
25/*
26 * These two _must_ execute atomically wrt each other.
27 */
28
29static __inline__ void wake_one_more(struct semaphore * sem)
30{
31 atomic_inc((atomic_t *)&sem->sleepers);
32}
33
34static __inline__ int waking_non_zero(struct semaphore *sem)
35{
36 unsigned long flags;
37 int ret = 0;
38
39 spin_lock_irqsave(&semaphore_wake_lock, flags);
40 if (sem->sleepers > 0) {
41 sem->sleepers--;
42 ret = 1;
43 }
44 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
45 return ret;
46}
47
48/*
49 * waking_non_zero_interruptible:
50 * 1 got the lock
51 * 0 go to sleep
52 * -EINTR interrupted
53 *
54 * We must undo the sem->count down_interruptible() increment while we are
55 * protected by the spinlock in order to make atomic this atomic_inc() with the
56 * atomic_read() in wake_one_more(), otherwise we can race. -arca
57 */
58
59static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
60 struct task_struct *tsk)
61{
62 unsigned long flags;
63 int ret = 0;
64
65 spin_lock_irqsave(&semaphore_wake_lock, flags);
66 if (sem->sleepers > 0) {
67 sem->sleepers--;
68 ret = 1;
69 } else if (signal_pending(tsk)) {
70 atomic_inc(&sem->count);
71 ret = -EINTR;
72 }
73 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
74 return ret;
75}
76
77/*
78 * waking_non_zero_trylock:
79 * 1 failed to lock
80 * 0 got the lock
81 *
82 * We must undo the sem->count down_trylock() increment while we are
83 * protected by the spinlock in order to make atomic this atomic_inc() with the
84 * atomic_read() in wake_one_more(), otherwise we can race. -arca
85 */
86
87static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
88{
89 unsigned long flags;
90 int ret = 1;
91
92 spin_lock_irqsave(&semaphore_wake_lock, flags);
93 if (sem->sleepers <= 0)
94 atomic_inc(&sem->count);
95 else {
96 sem->sleepers--;
97 ret = 0;
98 }
99 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
100 return ret;
101}
102
103spinlock_t semaphore_wake_lock;
104
105/*
106 * Semaphores are implemented using a two-way counter:
107 * The "count" variable is decremented for each process
108 * that tries to sleep, while the "waking" variable is
109 * incremented when the "up()" code goes to wake up waiting
110 * processes.
111 *
112 * Notably, the inline "up()" and "down()" functions can
113 * efficiently test if they need to do any extra work (up
114 * needs to do something only if count was negative before
115 * the increment operation.
116 *
117 * waking_non_zero() (from asm/semaphore.h) must execute
118 * atomically.
119 *
120 * When __up() is called, the count was negative before
121 * incrementing it, and we need to wake up somebody.
122 *
123 * This routine adds one to the count of processes that need to
124 * wake up and exit. ALL waiting processes actually wake up but
125 * only the one that gets to the "waking" field first will gate
126 * through and acquire the semaphore. The others will go back
127 * to sleep.
128 *
129 * Note that these functions are only called when there is
130 * contention on the lock, and as such all this is the
131 * "non-critical" part of the whole semaphore business. The
132 * critical part is the inline stuff in <asm/semaphore.h>
133 * where we want to avoid any extra jumps and calls.
134 */
135
136void __up(struct semaphore *sem)
137{
138 wake_one_more(sem);
139 wake_up(&sem->wait);
140}
141
142/*
143 * Perform the "down" function. Return zero for semaphore acquired,
144 * return negative for signalled out of the function.
145 *
146 * If called from __down, the return is ignored and the wait loop is
147 * not interruptible. This means that a task waiting on a semaphore
148 * using "down()" cannot be killed until someone does an "up()" on
149 * the semaphore.
150 *
151 * If called from __down_interruptible, the return value gets checked
152 * upon return. If the return value is negative then the task continues
153 * with the negative value in the return register (it can be tested by
154 * the caller).
155 *
156 * Either form may be used in conjunction with "up()".
157 *
158 */
159
160#define DOWN_VAR \
161 struct task_struct *tsk = current; \
162 wait_queue_t wait; \
163 init_waitqueue_entry(&wait, tsk);
164
165#define DOWN_HEAD(task_state) \
166 \
167 \
168 tsk->state = (task_state); \
169 add_wait_queue(&sem->wait, &wait); \
170 \
171 /* \
172 * Ok, we're set up. sem->count is known to be less than zero \
173 * so we must wait. \
174 * \
175 * We can let go the lock for purposes of waiting. \
176 * We re-acquire it after awaking so as to protect \
177 * all semaphore operations. \
178 * \
179 * If "up()" is called before we call waking_non_zero() then \
180 * we will catch it right away. If it is called later then \
181 * we will have to go through a wakeup cycle to catch it. \
182 * \
183 * Multiple waiters contend for the semaphore lock to see \
184 * who gets to gate through and who has to wait some more. \
185 */ \
186 for (;;) {
187
188#define DOWN_TAIL(task_state) \
189 tsk->state = (task_state); \
190 } \
191 tsk->state = TASK_RUNNING; \
192 remove_wait_queue(&sem->wait, &wait);
193
194void __sched __down(struct semaphore * sem)
195{
196 DOWN_VAR
197 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
198 if (waking_non_zero(sem))
199 break;
200 schedule();
201 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
202}
203
204int __sched __down_interruptible(struct semaphore * sem)
205{
206 int ret = 0;
207 DOWN_VAR
208 DOWN_HEAD(TASK_INTERRUPTIBLE)
209
210 ret = waking_non_zero_interruptible(sem, tsk);
211 if (ret)
212 {
213 if (ret == 1)
214 /* ret != 0 only if we get interrupted -arca */
215 ret = 0;
216 break;
217 }
218 schedule();
219 DOWN_TAIL(TASK_INTERRUPTIBLE)
220 return ret;
221}
222
223int __down_trylock(struct semaphore * sem)
224{
225 return waking_non_zero_trylock(sem);
226}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
new file mode 100644
index 000000000000..1f5bf5d624e4
--- /dev/null
+++ b/arch/xtensa/kernel/setup.c
@@ -0,0 +1,520 @@
1/*
2 * arch/xtensa/setup.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995 Linus Torvalds
9 * Copyright (C) 2001 - 2005 Tensilica Inc.
10 *
11 * Chris Zankel <chris@zankel.net>
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 * Kevin Chea
14 * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
15 */
16
17#include <linux/config.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/proc_fs.h>
21#include <linux/tty.h>
22#include <linux/bootmem.h>
23#include <linux/kernel.h>
24
25#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
26# include <linux/console.h>
27#endif
28
29#ifdef CONFIG_RTC
30# include <linux/timex.h>
31#endif
32
33#ifdef CONFIG_PROC_FS
34# include <linux/seq_file.h>
35#endif
36
37#include <asm/system.h>
38#include <asm/bootparam.h>
39#include <asm/pgtable.h>
40#include <asm/processor.h>
41#include <asm/timex.h>
42#include <asm/platform.h>
43#include <asm/page.h>
44#include <asm/setup.h>
45
46#include <xtensa/config/system.h>
47
48#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
49struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
50#endif
51
52#ifdef CONFIG_BLK_DEV_FD
53extern struct fd_ops no_fd_ops;
54struct fd_ops *fd_ops;
55#endif
56
57#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
58extern struct ide_ops no_ide_ops;
59struct ide_ops *ide_ops;
60#endif
61
62extern struct rtc_ops no_rtc_ops;
63struct rtc_ops *rtc_ops;
64
65#ifdef CONFIG_PC_KEYB
66extern struct kbd_ops no_kbd_ops;
67struct kbd_ops *kbd_ops;
68#endif
69
70#ifdef CONFIG_BLK_DEV_INITRD
71extern void *initrd_start;
72extern void *initrd_end;
73extern void *__initrd_start;
74extern void *__initrd_end;
75int initrd_is_mapped = 0;
76extern int initrd_below_start_ok;
77#endif
78
79unsigned char aux_device_present;
80extern unsigned long loops_per_jiffy;
81
82/* Command line specified as configuration option. */
83
84static char command_line[COMMAND_LINE_SIZE];
85
86#ifdef CONFIG_CMDLINE_BOOL
87static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
88#endif
89
90sysmem_info_t __initdata sysmem;
91
92#ifdef CONFIG_BLK_DEV_INITRD
93int initrd_is_mapped;
94#endif
95
96extern void init_mmu(void);
97
98/*
99 * Boot parameter parsing.
100 *
101 * The Xtensa port uses a list of variable-sized tags to pass data to
102 * the kernel. The first tag must be a BP_TAG_FIRST tag for the list
103 * to be recognised. The list is terminated with a zero-sized
104 * BP_TAG_LAST tag.
105 */
106
107typedef struct tagtable {
108 u32 tag;
109 int (*parse)(const bp_tag_t*);
110} tagtable_t;
111
112#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \
113 __attribute__((unused, __section__(".taglist"))) = { tag, fn }
114
115/* parse current tag */
116
117static int __init parse_tag_mem(const bp_tag_t *tag)
118{
119 meminfo_t *mi = (meminfo_t*)(tag->data);
120
121 if (mi->type != MEMORY_TYPE_CONVENTIONAL)
122 return -1;
123
124 if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
125 printk(KERN_WARNING
126 "Ignoring memory bank 0x%08lx size %ldKB\n",
127 (unsigned long)mi->start,
128 (unsigned long)mi->end - (unsigned long)mi->start);
129 return -EINVAL;
130 }
131 sysmem.bank[sysmem.nr_banks].type = mi->type;
132 sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(mi->start);
133 sysmem.bank[sysmem.nr_banks].end = mi->end & PAGE_SIZE;
134 sysmem.nr_banks++;
135
136 return 0;
137}
138
139__tagtable(BP_TAG_MEMORY, parse_tag_mem);
140
141#ifdef CONFIG_BLK_DEV_INITRD
142
143static int __init parse_tag_initrd(const bp_tag_t* tag)
144{
145 meminfo_t* mi;
146 mi = (meminfo_t*)(tag->data);
147 initrd_start = (void*)(mi->start);
148 initrd_end = (void*)(mi->end);
149
150 return 0;
151}
152
153__tagtable(BP_TAG_INITRD, parse_tag_initrd);
154
155#endif /* CONFIG_BLK_DEV_INITRD */
156
157static int __init parse_tag_cmdline(const bp_tag_t* tag)
158{
159 strncpy(command_line, (char*)(tag->data), COMMAND_LINE_SIZE);
160 command_line[COMMAND_LINE_SIZE - 1] = '\0';
161 return 0;
162}
163
164__tagtable(BP_TAG_COMMAND_LINE, parse_tag_cmdline);
165
166static int __init parse_bootparam(const bp_tag_t* tag)
167{
168 extern tagtable_t __tagtable_begin, __tagtable_end;
169 tagtable_t *t;
170
171 /* Boot parameters must start with a BP_TAG_FIRST tag. */
172
173 if (tag->id != BP_TAG_FIRST) {
174 printk(KERN_WARNING "Invalid boot parameters!\n");
175 return 0;
176 }
177
178 tag = (bp_tag_t*)((unsigned long)tag + sizeof(bp_tag_t) + tag->size);
179
180 /* Parse all tags. */
181
182 while (tag != NULL && tag->id != BP_TAG_LAST) {
183 for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
184 if (tag->id == t->tag) {
185 t->parse(tag);
186 break;
187 }
188 }
189 if (t == &__tagtable_end)
190 printk(KERN_WARNING "Ignoring tag "
191 "0x%08x\n", tag->id);
192 tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size);
193 }
194
195 return 0;
196}
197
198/*
199 * Initialize architecture. (Early stage)
200 */
201
202void __init init_arch(bp_tag_t *bp_start)
203{
204
205#ifdef CONFIG_BLK_DEV_INITRD
206 initrd_start = &__initrd_start;
207 initrd_end = &__initrd_end;
208#endif
209
210 sysmem.nr_banks = 0;
211
212#ifdef CONFIG_CMDLINE_BOOL
213 strcpy(command_line, default_command_line);
214#endif
215
216 /* Parse boot parameters */
217
218 if (bp_start)
219 parse_bootparam(bp_start);
220
221 if (sysmem.nr_banks == 0) {
222 sysmem.nr_banks = 1;
223 sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
224 sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
225 + PLATFORM_DEFAULT_MEM_SIZE;
226 }
227
228 /* Early hook for platforms */
229
230 platform_init(bp_start);
231
232 /* Initialize MMU. */
233
234 init_mmu();
235}
236
237/*
238 * Initialize system. Setup memory and reserve regions.
239 */
240
241extern char _end;
242extern char _stext;
243extern char _WindowVectors_text_start;
244extern char _WindowVectors_text_end;
245extern char _DebugInterruptVector_literal_start;
246extern char _DebugInterruptVector_text_end;
247extern char _KernelExceptionVector_literal_start;
248extern char _KernelExceptionVector_text_end;
249extern char _UserExceptionVector_literal_start;
250extern char _UserExceptionVector_text_end;
251extern char _DoubleExceptionVector_literal_start;
252extern char _DoubleExceptionVector_text_end;
253
254void __init setup_arch(char **cmdline_p)
255{
256 extern int mem_reserve(unsigned long, unsigned long, int);
257 extern void bootmem_init(void);
258
259 memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
260 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
261 *cmdline_p = command_line;
262
263 /* Reserve some memory regions */
264
265#ifdef CONFIG_BLK_DEV_INITRD
266 if (initrd_start < initrd_end) {
267 initrd_is_mapped = mem_reserve(__pa(initrd_start),
268 __pa(initrd_end), 0);
269 initrd_below_start_ok = 1;
270 } else {
271 initrd_start = 0;
272 }
273#endif
274
275 mem_reserve(__pa(&_stext),__pa(&_end), 1);
276
277 mem_reserve(__pa(&_WindowVectors_text_start),
278 __pa(&_WindowVectors_text_end), 0);
279
280 mem_reserve(__pa(&_DebugInterruptVector_literal_start),
281 __pa(&_DebugInterruptVector_text_end), 0);
282
283 mem_reserve(__pa(&_KernelExceptionVector_literal_start),
284 __pa(&_KernelExceptionVector_text_end), 0);
285
286 mem_reserve(__pa(&_UserExceptionVector_literal_start),
287 __pa(&_UserExceptionVector_text_end), 0);
288
289 mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
290 __pa(&_DoubleExceptionVector_text_end), 0);
291
292 bootmem_init();
293
294 platform_setup(cmdline_p);
295
296
297 paging_init();
298
299#ifdef CONFIG_VT
300# if defined(CONFIG_VGA_CONSOLE)
301 conswitchp = &vga_con;
302# elif defined(CONFIG_DUMMY_CONSOLE)
303 conswitchp = &dummy_con;
304# endif
305#endif
306
307#if CONFIG_PCI
308 platform_pcibios_init();
309#endif
310}
311
312void machine_restart(char * cmd)
313{
314 platform_restart();
315}
316
317void machine_halt(void)
318{
319 platform_halt();
320 while (1);
321}
322
323void machine_power_off(void)
324{
325 platform_power_off();
326 while (1);
327}
328#ifdef CONFIG_PROC_FS
329
330/*
331 * Display some core information through /proc/cpuinfo.
332 */
333
334static int
335c_show(struct seq_file *f, void *slot)
336{
337 /* high-level stuff */
338 seq_printf(f,"processor\t: 0\n"
339 "vendor_id\t: Tensilica\n"
340 "model\t\t: Xtensa " XCHAL_HW_RELEASE_NAME "\n"
341 "core ID\t\t: " XCHAL_CORE_ID "\n"
342 "build ID\t: 0x%x\n"
343 "byte order\t: %s\n"
344 "cpu MHz\t\t: %lu.%02lu\n"
345 "bogomips\t: %lu.%02lu\n",
346 XCHAL_BUILD_UNIQUE_ID,
347 XCHAL_HAVE_BE ? "big" : "little",
348 CCOUNT_PER_JIFFY/(1000000/HZ),
349 (CCOUNT_PER_JIFFY/(10000/HZ)) % 100,
350 loops_per_jiffy/(500000/HZ),
351 (loops_per_jiffy/(5000/HZ)) % 100);
352
353 seq_printf(f,"flags\t\t: "
354#if XCHAL_HAVE_NMI
355 "nmi "
356#endif
357#if XCHAL_HAVE_DEBUG
358 "debug "
359# if XCHAL_HAVE_OCD
360 "ocd "
361# endif
362#endif
363#if XCHAL_HAVE_DENSITY
364 "density "
365#endif
366#if XCHAL_HAVE_BOOLEANS
367 "boolean "
368#endif
369#if XCHAL_HAVE_LOOPS
370 "loop "
371#endif
372#if XCHAL_HAVE_NSA
373 "nsa "
374#endif
375#if XCHAL_HAVE_MINMAX
376 "minmax "
377#endif
378#if XCHAL_HAVE_SEXT
379 "sext "
380#endif
381#if XCHAL_HAVE_CLAMPS
382 "clamps "
383#endif
384#if XCHAL_HAVE_MAC16
385 "mac16 "
386#endif
387#if XCHAL_HAVE_MUL16
388 "mul16 "
389#endif
390#if XCHAL_HAVE_MUL32
391 "mul32 "
392#endif
393#if XCHAL_HAVE_MUL32_HIGH
394 "mul32h "
395#endif
396#if XCHAL_HAVE_FP
397 "fpu "
398#endif
399 "\n");
400
401 /* Registers. */
402 seq_printf(f,"physical aregs\t: %d\n"
403 "misc regs\t: %d\n"
404 "ibreak\t\t: %d\n"
405 "dbreak\t\t: %d\n",
406 XCHAL_NUM_AREGS,
407 XCHAL_NUM_MISC_REGS,
408 XCHAL_NUM_IBREAK,
409 XCHAL_NUM_DBREAK);
410
411
412 /* Interrupt. */
413 seq_printf(f,"num ints\t: %d\n"
414 "ext ints\t: %d\n"
415 "int levels\t: %d\n"
416 "timers\t\t: %d\n"
417 "debug level\t: %d\n",
418 XCHAL_NUM_INTERRUPTS,
419 XCHAL_NUM_EXTINTERRUPTS,
420 XCHAL_NUM_INTLEVELS,
421 XCHAL_NUM_TIMERS,
422 XCHAL_DEBUGLEVEL);
423
424 /* Coprocessors */
425#if XCHAL_HAVE_CP
426 seq_printf(f, "coprocessors\t: %d\n", XCHAL_CP_NUM);
427#else
428 seq_printf(f, "coprocessors\t: none\n");
429#endif
430
431 /* {I,D}{RAM,ROM} and XLMI */
432 seq_printf(f,"inst ROMs\t: %d\n"
433 "inst RAMs\t: %d\n"
434 "data ROMs\t: %d\n"
435 "data RAMs\t: %d\n"
436 "XLMI ports\t: %d\n",
437 XCHAL_NUM_IROM,
438 XCHAL_NUM_IRAM,
439 XCHAL_NUM_DROM,
440 XCHAL_NUM_DRAM,
441 XCHAL_NUM_XLMI);
442
443 /* Cache */
444 seq_printf(f,"icache line size: %d\n"
445 "icache ways\t: %d\n"
446 "icache size\t: %d\n"
447 "icache flags\t: "
448#if XCHAL_ICACHE_LINE_LOCKABLE
449 "lock"
450#endif
451 "\n"
452 "dcache line size: %d\n"
453 "dcache ways\t: %d\n"
454 "dcache size\t: %d\n"
455 "dcache flags\t: "
456#if XCHAL_DCACHE_IS_WRITEBACK
457 "writeback"
458#endif
459#if XCHAL_DCACHE_LINE_LOCKABLE
460 "lock"
461#endif
462 "\n",
463 XCHAL_ICACHE_LINESIZE,
464 XCHAL_ICACHE_WAYS,
465 XCHAL_ICACHE_SIZE,
466 XCHAL_DCACHE_LINESIZE,
467 XCHAL_DCACHE_WAYS,
468 XCHAL_DCACHE_SIZE);
469
470 /* MMU */
471 seq_printf(f,"ASID bits\t: %d\n"
472 "ASID invalid\t: %d\n"
473 "ASID kernel\t: %d\n"
474 "rings\t\t: %d\n"
475 "itlb ways\t: %d\n"
476 "itlb AR ways\t: %d\n"
477 "dtlb ways\t: %d\n"
478 "dtlb AR ways\t: %d\n",
479 XCHAL_MMU_ASID_BITS,
480 XCHAL_MMU_ASID_INVALID,
481 XCHAL_MMU_ASID_KERNEL,
482 XCHAL_MMU_RINGS,
483 XCHAL_ITLB_WAYS,
484 XCHAL_ITLB_ARF_WAYS,
485 XCHAL_DTLB_WAYS,
486 XCHAL_DTLB_ARF_WAYS);
487
488 return 0;
489}
490
491/*
492 * We show only CPU #0 info.
493 */
494static void *
495c_start(struct seq_file *f, loff_t *pos)
496{
497 return (void *) ((*pos == 0) ? (void *)1 : NULL);
498}
499
500static void *
501c_next(struct seq_file *f, void *v, loff_t *pos)
502{
503 return NULL;
504}
505
506static void
507c_stop(struct seq_file *f, void *v)
508{
509}
510
511struct seq_operations cpuinfo_op =
512{
513 start: c_start,
514 next: c_next,
515 stop: c_stop,
516 show: c_show
517};
518
519#endif /* CONFIG_PROC_FS */
520
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
new file mode 100644
index 000000000000..df6e1e17b096
--- /dev/null
+++ b/arch/xtensa/kernel/signal.c
@@ -0,0 +1,713 @@
1// TODO coprocessor stuff
2/*
3 * linux/arch/xtensa/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * Joe Taylor <joe@tensilica.com>
9 * Chris Zankel <chris@zankel.net>
10 *
11 *
12 *
13 */
14
15#include <xtensa/config/core.h>
16#include <xtensa/hal.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/kernel.h>
22#include <linux/signal.h>
23#include <linux/errno.h>
24#include <linux/wait.h>
25#include <linux/ptrace.h>
26#include <linux/unistd.h>
27#include <linux/stddef.h>
28#include <linux/personality.h>
29#include <asm/ucontext.h>
30#include <asm/uaccess.h>
31#include <asm/pgtable.h>
32#include <asm/cacheflush.h>
33
34#define DEBUG_SIG 0
35
36#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
37
38asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options,
39 struct rusage * ru);
40asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
41
42extern struct task_struct *coproc_owners[];
43
44
45/*
46 * Atomically swap in the new signal mask, and wait for a signal.
47 */
48
49int sys_sigsuspend(struct pt_regs *regs)
50{
51 old_sigset_t mask = (old_sigset_t) regs->areg[3];
52 sigset_t saveset;
53
54 mask &= _BLOCKABLE;
55 spin_lock_irq(&current->sighand->siglock);
56 saveset = current->blocked;
57 siginitset(&current->blocked, mask);
58 recalc_sigpending();
59 spin_unlock_irq(&current->sighand->siglock);
60
61 regs->areg[2] = -EINTR;
62 while (1) {
63 current->state = TASK_INTERRUPTIBLE;
64 schedule();
65 if (do_signal(regs, &saveset))
66 return -EINTR;
67 }
68}
69
70asmlinkage int
71sys_rt_sigsuspend(struct pt_regs *regs)
72{
73 sigset_t *unewset = (sigset_t *) regs->areg[4];
74 size_t sigsetsize = (size_t) regs->areg[3];
75 sigset_t saveset, newset;
76 /* XXX: Don't preclude handling different sized sigset_t's. */
77 if (sigsetsize != sizeof(sigset_t))
78 return -EINVAL;
79
80 if (copy_from_user(&newset, unewset, sizeof(newset)))
81 return -EFAULT;
82 sigdelsetmask(&newset, ~_BLOCKABLE);
83 spin_lock_irq(&current->sighand->siglock);
84 saveset = current->blocked;
85 current->blocked = newset;
86 recalc_sigpending();
87 spin_unlock_irq(&current->sighand->siglock);
88
89 regs->areg[2] = -EINTR;
90 while (1) {
91 current->state = TASK_INTERRUPTIBLE;
92 schedule();
93 if (do_signal(regs, &saveset))
94 return -EINTR;
95 }
96}
97
98asmlinkage int
99sys_sigaction(int sig, const struct old_sigaction *act,
100 struct old_sigaction *oact)
101{
102 struct k_sigaction new_ka, old_ka;
103 int ret;
104
105 if (act) {
106 old_sigset_t mask;
107 if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
108 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
109 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
110 return -EFAULT;
111 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
112 __get_user(mask, &act->sa_mask);
113 siginitset(&new_ka.sa.sa_mask, mask);
114 }
115
116 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
117
118 if (!ret && oact) {
119 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
120 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
121 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
122 return -EFAULT;
123 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
124 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
125 }
126
127 return ret;
128}
129
130asmlinkage int
131sys_sigaltstack(struct pt_regs *regs)
132{
133 const stack_t *uss = (stack_t *) regs->areg[4];
134 stack_t *uoss = (stack_t *) regs->areg[3];
135
136 if (regs->depc > 64)
137 panic ("Double exception sys_sigreturn\n");
138
139
140 return do_sigaltstack(uss, uoss, regs->areg[1]);
141}
142
143
144/*
145 * Do a signal return; undo the signal stack.
146 */
147
148struct sigframe
149{
150 struct sigcontext sc;
151 struct _cpstate cpstate;
152 unsigned long extramask[_NSIG_WORDS-1];
153 unsigned char retcode[6];
154 unsigned int reserved[4]; /* Reserved area for chaining */
155 unsigned int window[4]; /* Window of 4 registers for initial context */
156};
157
158struct rt_sigframe
159{
160 struct siginfo info;
161 struct ucontext uc;
162 struct _cpstate cpstate;
163 unsigned char retcode[6];
164 unsigned int reserved[4]; /* Reserved area for chaining */
165 unsigned int window[4]; /* Window of 4 registers for initial context */
166};
167
168extern void release_all_cp (struct task_struct *);
169
170
171// FIXME restore_cpextra
172static inline int
173restore_cpextra (struct _cpstate *buf)
174{
175#if 0
176 /* The signal handler may have used coprocessors in which
177 * case they are still enabled. We disable them to force a
178 * reloading of the original task's CP state by the lazy
179 * context-switching mechanisms of CP exception handling.
180 * Also, we essentially discard any coprocessor state that the
181 * signal handler created. */
182
183 struct task_struct *tsk = current;
184 release_all_cp(tsk);
185 return __copy_from_user(tsk->thread.cpextra, buf, TOTAL_CPEXTRA_SIZE);
186#endif
187 return 0;
188}
189
190/* Note: We don't copy double exception 'tregs', we have to finish double exc. first before we return to signal handler! This dbl.exc.handler might cause another double exception, but I think we are fine as the situation is the same as if we had returned to the signal handerl and got an interrupt immediately...
191 */
192
193
194static int
195restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
196{
197 struct thread_struct *thread;
198 unsigned int err = 0;
199 unsigned long ps;
200 struct _cpstate *buf;
201
202#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
203 COPY(pc);
204 COPY(depc);
205 COPY(wmask);
206 COPY(lbeg);
207 COPY(lend);
208 COPY(lcount);
209 COPY(sar);
210 COPY(windowbase);
211 COPY(windowstart);
212#undef COPY
213
214 /* For PS, restore only PS.CALLINC.
215 * Assume that all other bits are either the same as for the signal
216 * handler, or the user mode value doesn't matter (e.g. PS.OWB).
217 */
218 err |= __get_user(ps, &sc->sc_ps);
219 regs->ps = (regs->ps & ~XCHAL_PS_CALLINC_MASK)
220 | (ps & XCHAL_PS_CALLINC_MASK);
221
222 /* Additional corruption checks */
223
224 if ((regs->windowbase >= (XCHAL_NUM_AREGS/4))
225 || ((regs->windowstart & ~((1<<(XCHAL_NUM_AREGS/4)) - 1)) != 0) )
226 err = 1;
227 if ((regs->lcount > 0)
228 && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
229 err = 1;
230
231 /* Restore extended register state.
232 * See struct thread_struct in processor.h.
233 */
234 thread = &current->thread;
235
236 err |= __copy_from_user (regs->areg, sc->sc_areg, XCHAL_NUM_AREGS*4);
237 err |= __get_user(buf, &sc->sc_cpstate);
238 if (buf) {
239 if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
240 goto badframe;
241 err |= restore_cpextra(buf);
242 }
243
244 regs->syscall = -1; /* disable syscall checks */
245 return err;
246
247badframe:
248 return 1;
249}
250
251static inline void
252flush_my_cpstate(struct task_struct *tsk)
253{
254 unsigned long flags;
255 local_irq_save(flags);
256
257#if 0 // FIXME
258 for (i = 0; i < XCHAL_CP_NUM; i++) {
259 if (tsk == coproc_owners[i]) {
260 xthal_validate_cp(i);
261 xthal_save_cpregs(tsk->thread.cpregs_ptr[i], i);
262
263 /* Invalidate and "disown" the cp to allow
264 * callers the chance to reset cp state in the
265 * task_struct. */
266
267 xthal_invalidate_cp(i);
268 coproc_owners[i] = 0;
269 }
270 }
271#endif
272 local_irq_restore(flags);
273}
274
275/* Return codes:
276 0: nothing saved
277 1: stuff to save, successful
278 -1: stuff to save, error happened
279*/
280static int
281save_cpextra (struct _cpstate *buf)
282{
283#if (XCHAL_EXTRA_SA_SIZE == 0) && (XCHAL_CP_NUM == 0)
284 return 0;
285#else
286
287 /* FIXME: If a task has never used a coprocessor, there is
288 * no need to save and restore anything. Tracking this
289 * information would allow us to optimize this section.
290 * Perhaps we can use current->used_math or (current->flags &
291 * PF_USEDFPU) or define a new field in the thread
292 * structure. */
293
294 /* We flush any live, task-owned cp state to the task_struct,
295 * then copy it all to the sigframe. Then we clear all
296 * cp/extra state in the task_struct, effectively
297 * clearing/resetting all cp/extra state for the signal
298 * handler (cp-exception handling will load these new values
299 * into the cp/extra registers.) This step is important for
300 * things like a floating-point cp, where the OS must reset
301 * the FCR to the default rounding mode. */
302
303 int err = 0;
304 struct task_struct *tsk = current;
305
306 flush_my_cpstate(tsk);
307 /* Note that we just copy everything: 'extra' and 'cp' state together.*/
308 err |= __copy_to_user(buf, tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
309 memset(tsk->thread.cp_save, 0, XTENSA_CP_EXTRA_SIZE);
310
311#if (XTENSA_CP_EXTRA_SIZE == 0)
312#error Sanity check on memset above, cpextra_size should not be zero.
313#endif
314
315 return err ? -1 : 1;
316#endif
317}
318
319static int
320setup_sigcontext(struct sigcontext *sc, struct _cpstate *cpstate,
321 struct pt_regs *regs, unsigned long mask)
322{
323 struct thread_struct *thread;
324 int err = 0;
325
326//printk("setup_sigcontext\n");
327#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
328 COPY(pc);
329 COPY(ps);
330 COPY(depc);
331 COPY(wmask);
332 COPY(lbeg);
333 COPY(lend);
334 COPY(lcount);
335 COPY(sar);
336 COPY(windowbase);
337 COPY(windowstart);
338#undef COPY
339
340 /* Save extended register state.
341 * See struct thread_struct in processor.h.
342 */
343 thread = &current->thread;
344 err |= __copy_to_user (sc->sc_areg, regs->areg, XCHAL_NUM_AREGS * 4);
345 err |= save_cpextra(cpstate);
346 err |= __put_user(err ? NULL : cpstate, &sc->sc_cpstate);
347 /* non-iBCS2 extensions.. */
348 err |= __put_user(mask, &sc->oldmask);
349
350 return err;
351}
352
353asmlinkage int sys_sigreturn(struct pt_regs *regs)
354{
355 struct sigframe *frame = (struct sigframe *)regs->areg[1];
356 sigset_t set;
357 if (regs->depc > 64)
358 panic ("Double exception sys_sigreturn\n");
359
360 if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
361 goto badframe;
362
363 if (__get_user(set.sig[0], &frame->sc.oldmask)
364 || (_NSIG_WORDS > 1
365 && __copy_from_user(&set.sig[1], &frame->extramask,
366 sizeof(frame->extramask))))
367 goto badframe;
368
369 sigdelsetmask(&set, ~_BLOCKABLE);
370
371 spin_lock_irq(&current->sighand->siglock);
372 current->blocked = set;
373 recalc_sigpending();
374 spin_unlock_irq(&current->sighand->siglock);
375
376 if (restore_sigcontext(regs, &frame->sc))
377 goto badframe;
378 return regs->areg[2];
379
380badframe:
381 force_sig(SIGSEGV, current);
382 return 0;
383}
384
385asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
386{
387 struct rt_sigframe *frame = (struct rt_sigframe *)regs->areg[1];
388 sigset_t set;
389 stack_t st;
390 int ret;
391 if (regs->depc > 64)
392 {
393 printk("!!!!!!! DEPC !!!!!!!\n");
394 return 0;
395 }
396
397 if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
398 goto badframe;
399
400 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
401 goto badframe;
402
403 sigdelsetmask(&set, ~_BLOCKABLE);
404 spin_lock_irq(&current->sighand->siglock);
405 current->blocked = set;
406 recalc_sigpending();
407 spin_unlock_irq(&current->sighand->siglock);
408
409 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
410 goto badframe;
411 ret = regs->areg[2];
412
413 if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
414 goto badframe;
415 /* It is more difficult to avoid calling this function than to
416 call it and ignore errors. */
417 do_sigaltstack(&st, NULL, regs->areg[1]);
418
419 return ret;
420
421badframe:
422 force_sig(SIGSEGV, current);
423 return 0;
424}
425
426/*
427 * Set up a signal frame.
428 */
429
430/*
431 * Determine which stack to use..
432 */
433static inline void *
434get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
435{
436 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
437 sp = current->sas_ss_sp + current->sas_ss_size;
438
439 return (void *)((sp - frame_size) & -16ul);
440}
441
442#define USE_SIGRETURN 0
443#define USE_RT_SIGRETURN 1
444
445static int
446gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
447{
448 unsigned int retcall;
449 int err = 0;
450
451#if 0
452 /* Ignoring SA_RESTORER for now; it's supposed to be obsolete,
453 * and the xtensa glibc doesn't use it.
454 */
455 if (ka->sa.sa_flags & SA_RESTORER) {
456 regs->pr = (unsigned long) ka->sa.sa_restorer;
457 } else
458#endif /* 0 */
459 {
460
461#if (__NR_sigreturn > 255) || (__NR_rt_sigreturn > 255)
462
463/* The 12-bit immediate is really split up within the 24-bit MOVI
464 * instruction. As long as the above system call numbers fit within
465 * 8-bits, the following code works fine. See the Xtensa ISA for
466 * details.
467 */
468
469#error Generating the MOVI instruction below breaks!
470#endif
471
472 retcall = use_rt_sigreturn ? __NR_rt_sigreturn : __NR_sigreturn;
473
474#ifdef __XTENSA_EB__ /* Big Endian version */
475 /* Generate instruction: MOVI a2, retcall */
476 err |= __put_user(0x22, &codemem[0]);
477 err |= __put_user(0x0a, &codemem[1]);
478 err |= __put_user(retcall, &codemem[2]);
479 /* Generate instruction: SYSCALL */
480 err |= __put_user(0x00, &codemem[3]);
481 err |= __put_user(0x05, &codemem[4]);
482 err |= __put_user(0x00, &codemem[5]);
483
484#elif defined __XTENSA_EL__ /* Little Endian version */
485 /* Generate instruction: MOVI a2, retcall */
486 err |= __put_user(0x22, &codemem[0]);
487 err |= __put_user(0xa0, &codemem[1]);
488 err |= __put_user(retcall, &codemem[2]);
489 /* Generate instruction: SYSCALL */
490 err |= __put_user(0x00, &codemem[3]);
491 err |= __put_user(0x50, &codemem[4]);
492 err |= __put_user(0x00, &codemem[5]);
493#else
494#error Must use compiler for Xtensa processors.
495#endif
496 }
497
498 /* Flush generated code out of the data cache */
499
500 if (err == 0)
501 __flush_invalidate_cache_range((unsigned long)codemem, 6UL);
502
503 return err;
504}
505
506static void
507set_thread_state(struct pt_regs *regs, void *stack, unsigned char *retaddr,
508 void *handler, unsigned long arg1, void *arg2, void *arg3)
509{
510 /* Set up registers for signal handler */
511 start_thread(regs, (unsigned long) handler, (unsigned long) stack);
512
513 /* Set up a stack frame for a call4
514 * Note: PS.CALLINC is set to one by start_thread
515 */
516 regs->areg[4] = (((unsigned long) retaddr) & 0x3fffffff) | 0x40000000;
517 regs->areg[6] = arg1;
518 regs->areg[7] = (unsigned long) arg2;
519 regs->areg[8] = (unsigned long) arg3;
520}
521
522static void setup_frame(int sig, struct k_sigaction *ka,
523 sigset_t *set, struct pt_regs *regs)
524{
525 struct sigframe *frame;
526 int err = 0;
527 int signal;
528
529 frame = get_sigframe(ka, regs->areg[1], sizeof(*frame));
530 if (regs->depc > 64)
531 {
532 printk("!!!!!!! DEPC !!!!!!!\n");
533 return;
534 }
535
536
537 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
538 goto give_sigsegv;
539
540 signal = current_thread_info()->exec_domain
541 && current_thread_info()->exec_domain->signal_invmap
542 && sig < 32
543 ? current_thread_info()->exec_domain->signal_invmap[sig]
544 : sig;
545
546 err |= setup_sigcontext(&frame->sc, &frame->cpstate, regs, set->sig[0]);
547
548 if (_NSIG_WORDS > 1) {
549 err |= __copy_to_user(frame->extramask, &set->sig[1],
550 sizeof(frame->extramask));
551 }
552
553 /* Create sys_sigreturn syscall in stack frame */
554 err |= gen_return_code(frame->retcode, USE_SIGRETURN);
555
556 if (err)
557 goto give_sigsegv;
558
559 /* Create signal handler execution context.
560 * Return context not modified until this point.
561 */
562 set_thread_state(regs, frame, frame->retcode,
563 ka->sa.sa_handler, signal, &frame->sc, NULL);
564
565 /* Set access mode to USER_DS. Nomenclature is outdated, but
566 * functionality is used in uaccess.h
567 */
568 set_fs(USER_DS);
569
570
571#if DEBUG_SIG
572 printk("SIG deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
573 current->comm, current->pid, signal, frame, regs->pc);
574#endif
575
576 return;
577
578give_sigsegv:
579 if (sig == SIGSEGV)
580 ka->sa.sa_handler = SIG_DFL;
581 force_sig(SIGSEGV, current);
582}
583
584static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
585 sigset_t *set, struct pt_regs *regs)
586{
587 struct rt_sigframe *frame;
588 int err = 0;
589 int signal;
590
591 frame = get_sigframe(ka, regs->areg[1], sizeof(*frame));
592 if (regs->depc > 64)
593 panic ("Double exception sys_sigreturn\n");
594
595 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
596 goto give_sigsegv;
597
598 signal = current_thread_info()->exec_domain
599 && current_thread_info()->exec_domain->signal_invmap
600 && sig < 32
601 ? current_thread_info()->exec_domain->signal_invmap[sig]
602 : sig;
603
604 err |= copy_siginfo_to_user(&frame->info, info);
605
606 /* Create the ucontext. */
607 err |= __put_user(0, &frame->uc.uc_flags);
608 err |= __put_user(0, &frame->uc.uc_link);
609 err |= __put_user((void *)current->sas_ss_sp,
610 &frame->uc.uc_stack.ss_sp);
611 err |= __put_user(sas_ss_flags(regs->areg[1]),
612 &frame->uc.uc_stack.ss_flags);
613 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
614 err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->cpstate,
615 regs, set->sig[0]);
616 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
617
618 /* Create sys_rt_sigreturn syscall in stack frame */
619 err |= gen_return_code(frame->retcode, USE_RT_SIGRETURN);
620
621 if (err)
622 goto give_sigsegv;
623
624 /* Create signal handler execution context.
625 * Return context not modified until this point.
626 */
627 set_thread_state(regs, frame, frame->retcode,
628 ka->sa.sa_handler, signal, &frame->info, &frame->uc);
629
630 /* Set access mode to USER_DS. Nomenclature is outdated, but
631 * functionality is used in uaccess.h
632 */
633 set_fs(USER_DS);
634
635#if DEBUG_SIG
636 printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
637 current->comm, current->pid, signal, frame, regs->pc);
638#endif
639
640 return;
641
642give_sigsegv:
643 if (sig == SIGSEGV)
644 ka->sa.sa_handler = SIG_DFL;
645 force_sig(SIGSEGV, current);
646}
647
648
649
650/*
651 * Note that 'init' is a special process: it doesn't get signals it doesn't
652 * want to handle. Thus you cannot kill init even with a SIGKILL even by
653 * mistake.
654 *
655 * Note that we go through the signals twice: once to check the signals that
656 * the kernel can handle, and then we build all the user-level signal handling
657 * stack-frames in one go after that.
658 */
659int do_signal(struct pt_regs *regs, sigset_t *oldset)
660{
661 siginfo_t info;
662 int signr;
663 struct k_sigaction ka;
664
665 if (!oldset)
666 oldset = &current->blocked;
667
668 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
669
670 /* Are we from a system call? */
671 if (regs->syscall >= 0) {
672 /* If so, check system call restarting.. */
673 switch (regs->areg[2]) {
674 case ERESTARTNOHAND:
675 case ERESTART_RESTARTBLOCK:
676 regs->areg[2] = -EINTR;
677 break;
678
679 case ERESTARTSYS:
680 if (!(ka.sa.sa_flags & SA_RESTART)) {
681 regs->areg[2] = -EINTR;
682 break;
683 }
684 /* fallthrough */
685 case ERESTARTNOINTR:
686 regs->areg[2] = regs->syscall;
687 regs->pc -= 3;
688 }
689 }
690
691 if (signr == 0)
692 return 0; /* no signals delivered */
693
694 /* Whee! Actually deliver the signal. */
695
696 /* Set up the stack frame */
697 if (ka.sa.sa_flags & SA_SIGINFO)
698 setup_rt_frame(signr, &ka, &info, oldset, regs);
699 else
700 setup_frame(signr, &ka, oldset, regs);
701
702 if (ka.sa.sa_flags & SA_ONESHOT)
703 ka.sa.sa_handler = SIG_DFL;
704
705 if (!(ka.sa.sa_flags & SA_NODEFER)) {
706 spin_lock_irq(&current->sighand->siglock);
707 sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask);
708 sigaddset(&current->blocked, signr);
709 recalc_sigpending();
710 spin_unlock_irq(&current->sighand->siglock);
711 }
712 return 1;
713}
diff --git a/arch/xtensa/kernel/syscalls.c b/arch/xtensa/kernel/syscalls.c
new file mode 100644
index 000000000000..abc8ed6c7026
--- /dev/null
+++ b/arch/xtensa/kernel/syscalls.c
@@ -0,0 +1,418 @@
1/*
2 * arch/xtensa/kernel/syscall.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 * Copyright (C) 2000 Silicon Graphics, Inc.
10 * Copyright (C) 1995 - 2000 by Ralf Baechle
11 *
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
14 * Chris Zankel <chris@zankel.net>
15 * Kevin Chea
16 *
17 */
18
19#define DEBUG 0
20
21#include <linux/config.h>
22#include <linux/linkage.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/mman.h>
27#include <linux/sched.h>
28#include <linux/file.h>
29#include <linux/slab.h>
30#include <linux/utsname.h>
31#include <linux/unistd.h>
32#include <linux/stringify.h>
33#include <linux/syscalls.h>
34#include <linux/sem.h>
35#include <linux/msg.h>
36#include <linux/shm.h>
37#include <linux/errno.h>
38#include <asm/ptrace.h>
39#include <asm/signal.h>
40#include <asm/uaccess.h>
41#include <asm/hardirq.h>
42#include <asm/mman.h>
43#include <asm/shmparam.h>
44#include <asm/page.h>
45#include <asm/ipc.h>
46
47extern void do_syscall_trace(void);
48typedef int (*syscall_t)(void *a0,...);
49extern int (*do_syscalls)(struct pt_regs *regs, syscall_t fun,
50 int narg);
51extern syscall_t sys_call_table[];
52extern unsigned char sys_narg_table[];
53
54/*
55 * sys_pipe() is the normal C calling standard for creating a pipe. It's not
56 * the way unix traditional does this, though.
57 */
58
59int sys_pipe(int __user *userfds)
60{
61 int fd[2];
62 int error;
63
64 error = do_pipe(fd);
65 if (!error) {
66 if (copy_to_user(userfds, fd, 2 * sizeof(int)))
67 error = -EFAULT;
68 }
69 return error;
70}
71
72/*
73 * Common code for old and new mmaps.
74 */
75
76static inline long do_mmap2(unsigned long addr, unsigned long len,
77 unsigned long prot, unsigned long flags,
78 unsigned long fd, unsigned long pgoff)
79{
80 int error = -EBADF;
81 struct file * file = NULL;
82
83 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
84 if (!(flags & MAP_ANONYMOUS)) {
85 file = fget(fd);
86 if (!file)
87 goto out;
88 }
89
90 down_write(&current->mm->mmap_sem);
91 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
92 up_write(&current->mm->mmap_sem);
93
94 if (file)
95 fput(file);
96out:
97 return error;
98}
99
100unsigned long old_mmap(unsigned long addr, size_t len, int prot,
101 int flags, int fd, off_t offset)
102{
103 return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
104}
105
106long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
107 unsigned long flags, unsigned long fd, unsigned long pgoff)
108{
109 return do_mmap2(addr, len, prot, flags, fd, pgoff);
110}
111
112int sys_fork(struct pt_regs *regs)
113{
114 return do_fork(SIGCHLD, regs->areg[1], regs, 0, NULL, NULL);
115}
116
117int sys_vfork(struct pt_regs *regs)
118{
119 return do_fork(CLONE_VFORK|CLONE_VM|SIGCHLD, regs->areg[1],
120 regs, 0, NULL, NULL);
121}
122
123int sys_clone(struct pt_regs *regs)
124{
125 unsigned long clone_flags;
126 unsigned long newsp;
127 int __user *parent_tidptr, *child_tidptr;
128 clone_flags = regs->areg[4];
129 newsp = regs->areg[3];
130 parent_tidptr = (int __user *)regs->areg[5];
131 child_tidptr = (int __user *)regs->areg[6];
132 if (!newsp)
133 newsp = regs->areg[1];
134 return do_fork(clone_flags,newsp,regs,0,parent_tidptr,child_tidptr);
135}
136
137/*
138 * sys_execve() executes a new program.
139 */
140
141int sys_execve(struct pt_regs *regs)
142{
143 int error;
144 char * filename;
145
146 filename = getname((char *) (long)regs->areg[5]);
147 error = PTR_ERR(filename);
148 if (IS_ERR(filename))
149 goto out;
150 error = do_execve(filename, (char **) (long)regs->areg[3],
151 (char **) (long)regs->areg[4], regs);
152 putname(filename);
153
154out:
155 return error;
156}
157
158int sys_uname(struct old_utsname * name)
159{
160 if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
161 return 0;
162 return -EFAULT;
163}
164
165int sys_olduname(struct oldold_utsname * name)
166{
167 int error;
168
169 if (!name)
170 return -EFAULT;
171 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
172 return -EFAULT;
173
174 error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
175 error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
176 error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
177 error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
178 error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
179 error -= __put_user(0,name->release+__OLD_UTS_LEN);
180 error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
181 error -= __put_user(0,name->version+__OLD_UTS_LEN);
182 error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
183 error -= __put_user(0,name->machine+__OLD_UTS_LEN);
184
185 return error ? -EFAULT : 0;
186}
187
188
189/*
190 * Build the string table for the builtin "poor man's strace".
191 */
192
193#if DEBUG
194#define SYSCALL(fun, narg) #fun,
195static char *sfnames[] = {
196#include "syscalls.h"
197};
198#undef SYS
199#endif
200
201void system_call (struct pt_regs *regs)
202{
203 syscall_t syscall;
204 unsigned long parm0, parm1, parm2, parm3, parm4, parm5;
205 int nargs, res;
206 unsigned int syscallnr;
207 int ps;
208
209#if DEBUG
210 int i;
211 unsigned long parms[6];
212 char *sysname;
213#endif
214
215 regs->syscall = regs->areg[2];
216
217 do_syscall_trace();
218
219 /* Have to load after syscall_trace because strace
220 * sometimes changes regs->syscall.
221 */
222 syscallnr = regs->syscall;
223
224 parm0 = parm1 = parm2 = parm3 = parm4 = parm5 = 0;
225
226 /* Restore interrupt level to syscall invoker's.
227 * If this were in assembly, we wouldn't disable
228 * interrupts in the first place:
229 */
230 local_save_flags (ps);
231 local_irq_restore((ps & ~XCHAL_PS_INTLEVEL_MASK) |
232 (regs->ps & XCHAL_PS_INTLEVEL_MASK) );
233
234 if (syscallnr > __NR_Linux_syscalls) {
235 regs->areg[2] = -ENOSYS;
236 return;
237 }
238
239 syscall = sys_call_table[syscallnr];
240 nargs = sys_narg_table[syscallnr];
241
242 if (syscall == NULL) {
243 regs->areg[2] = -ENOSYS;
244 return;
245 }
246
247 /* There shouldn't be more than six arguments in the table! */
248
249 if (nargs > 6)
250 panic("Internal error - too many syscall arguments (%d)!\n",
251 nargs);
252
253 /* Linux takes system-call arguments in registers. The ABI
254 * and Xtensa software conventions require the system-call
255 * number in a2. If an argument exists in a2, we move it to
256 * the next available register. Note that for improved
257 * efficiency, we do NOT shift all parameters down one
258 * register to maintain the original order.
259 *
260 * At best case (zero arguments), we just write the syscall
261 * number to a2. At worst case (1 to 6 arguments), we move
262 * the argument in a2 to the next available register, then
263 * write the syscall number to a2.
264 *
265 * For clarity, the following truth table enumerates all
266 * possibilities.
267 *
268 * arguments syscall number arg0, arg1, arg2, arg3, arg4, arg5
269 * --------- -------------- ----------------------------------
270 * 0 a2
271 * 1 a2 a3
272 * 2 a2 a4, a3
273 * 3 a2 a5, a3, a4
274 * 4 a2 a6, a3, a4, a5
275 * 5 a2 a7, a3, a4, a5, a6
276 * 6 a2 a8, a3, a4, a5, a6, a7
277 */
278 if (nargs) {
279 parm0 = regs->areg[nargs+2];
280 parm1 = regs->areg[3];
281 parm2 = regs->areg[4];
282 parm3 = regs->areg[5];
283 parm4 = regs->areg[6];
284 parm5 = regs->areg[7];
285 } else /* nargs == 0 */
286 parm0 = (unsigned long) regs;
287
288#if DEBUG
289 parms[0] = parm0;
290 parms[1] = parm1;
291 parms[2] = parm2;
292 parms[3] = parm3;
293 parms[4] = parm4;
294 parms[5] = parm5;
295
296 sysname = sfnames[syscallnr];
297 if (strncmp(sysname, "sys_", 4) == 0)
298 sysname = sysname + 4;
299
300 printk("\017SYSCALL:I:%x:%d:%s %s(", regs->pc, current->pid,
301 current->comm, sysname);
302 for (i = 0; i < nargs; i++)
303 printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
304 printk(")\n");
305#endif
306
307 res = syscall((void *)parm0, parm1, parm2, parm3, parm4, parm5);
308
309#if DEBUG
310 printk("\017SYSCALL:O:%d:%s %s(",current->pid, current->comm, sysname);
311 for (i = 0; i < nargs; i++)
312 printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
313 if (res < 4096)
314 printk(") = %d\n", res);
315 else
316 printk(") = %#x\n", res);
317#endif /* DEBUG */
318
319 regs->areg[2] = res;
320 do_syscall_trace();
321}
322
323/*
324 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
325 *
326 * This is really horribly ugly.
327 */
328
329int sys_ipc (uint call, int first, int second,
330 int third, void __user *ptr, long fifth)
331{
332 int version, ret;
333
334 version = call >> 16; /* hack for backward compatibility */
335 call &= 0xffff;
336 ret = -ENOSYS;
337
338 switch (call) {
339 case SEMOP:
340 ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
341 second, NULL);
342 break;
343
344 case SEMTIMEDOP:
345 ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
346 second, (const struct timespec *) fifth);
347 break;
348
349 case SEMGET:
350 ret = sys_semget (first, second, third);
351 break;
352
353 case SEMCTL: {
354 union semun fourth;
355
356 if (ptr && !get_user(fourth.__pad, (void *__user *) ptr))
357 ret = sys_semctl (first, second, third, fourth);
358 break;
359 }
360
361 case MSGSND:
362 ret = sys_msgsnd (first, (struct msgbuf __user*) ptr,
363 second, third);
364 break;
365
366 case MSGRCV:
367 switch (version) {
368 case 0: {
369 struct ipc_kludge tmp;
370
371 if (ptr && !copy_from_user(&tmp,
372 (struct ipc_kludge *) ptr,
373 sizeof (tmp)))
374 ret = sys_msgrcv (first, tmp.msgp, second,
375 tmp.msgtyp, third);
376 break;
377 }
378
379 default:
380 ret = sys_msgrcv (first, (struct msgbuf __user *) ptr,
381 second, 0, third);
382 break;
383 }
384 break;
385
386 case MSGGET:
387 ret = sys_msgget ((key_t) first, second);
388 break;
389
390 case MSGCTL:
391 ret = sys_msgctl (first, second, (struct msqid_ds __user*) ptr);
392 break;
393
394 case SHMAT: {
395 ulong raddr;
396 ret = do_shmat (first, (char __user *) ptr, second, &raddr);
397
398 if (!ret)
399 ret = put_user (raddr, (ulong __user *) third);
400
401 break;
402 }
403
404 case SHMDT:
405 ret = sys_shmdt ((char __user *)ptr);
406 break;
407
408 case SHMGET:
409 ret = sys_shmget (first, second, third);
410 break;
411
412 case SHMCTL:
413 ret = sys_shmctl (first, second, (struct shmid_ds __user*) ptr);
414 break;
415 }
416 return ret;
417}
418
diff --git a/arch/xtensa/kernel/syscalls.h b/arch/xtensa/kernel/syscalls.h
new file mode 100644
index 000000000000..5b3f75f50feb
--- /dev/null
+++ b/arch/xtensa/kernel/syscalls.h
@@ -0,0 +1,248 @@
1/*
2 * arch/xtensa/kernel/syscalls.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
9 * Copyright (C) 2001 - 2005 Tensilica Inc.
10 *
11 * Changes by Joe Taylor <joe@tensilica.com>
12 */
13
14/*
15 * This file is being included twice - once to build a list of all
16 * syscalls and once to build a table of how many arguments each syscall
17 * accepts. Syscalls that receive a pointer to the saved registers are
18 * marked as having zero arguments.
19 *
20 * The binary compatibility calls are in a separate list.
21 *
22 * Entry '0' used to be system_call. It's removed to disable indirect
23 * system calls for now so user tasks can't recurse. See mips'
24 * sys_syscall for a comparable example.
25 */
26
27SYSCALL(0, 0) /* 00 */
28
29SYSCALL(sys_exit, 1)
30SYSCALL(sys_fork, 0)
31SYSCALL(sys_read, 3)
32SYSCALL(sys_write, 3)
33SYSCALL(sys_open, 3) /* 05 */
34SYSCALL(sys_close, 1)
35SYSCALL(sys_waitpid, 3)
36SYSCALL(sys_creat, 2)
37SYSCALL(sys_link, 2)
38SYSCALL(sys_unlink, 1) /* 10 */
39SYSCALL(sys_execve, 0)
40SYSCALL(sys_chdir, 1)
41SYSCALL(sys_time, 1)
42SYSCALL(sys_mknod, 3)
43SYSCALL(sys_chmod, 2) /* 15 */
44SYSCALL(sys_lchown, 3)
45SYSCALL(sys_ni_syscall, 0)
46SYSCALL(sys_stat, 2)
47SYSCALL(sys_lseek, 3)
48SYSCALL(sys_getpid, 0) /* 20 */
49SYSCALL(sys_mount, 5)
50SYSCALL(sys_oldumount, 1)
51SYSCALL(sys_setuid, 1)
52SYSCALL(sys_getuid, 0)
53SYSCALL(sys_stime, 1) /* 25 */
54SYSCALL(sys_ptrace, 4)
55SYSCALL(sys_alarm, 1)
56SYSCALL(sys_fstat, 2)
57SYSCALL(sys_pause, 0)
58SYSCALL(sys_utime, 2) /* 30 */
59SYSCALL(sys_ni_syscall, 0)
60SYSCALL(sys_ni_syscall, 0)
61SYSCALL(sys_access, 2)
62SYSCALL(sys_nice, 1)
63SYSCALL(sys_ni_syscall, 0) /* 35 */
64SYSCALL(sys_sync, 0)
65SYSCALL(sys_kill, 2)
66SYSCALL(sys_rename, 2)
67SYSCALL(sys_mkdir, 2)
68SYSCALL(sys_rmdir, 1) /* 40 */
69SYSCALL(sys_dup, 1)
70SYSCALL(sys_pipe, 1)
71SYSCALL(sys_times, 1)
72SYSCALL(sys_ni_syscall, 0)
73SYSCALL(sys_brk, 1) /* 45 */
74SYSCALL(sys_setgid, 1)
75SYSCALL(sys_getgid, 0)
76SYSCALL(sys_ni_syscall, 0) /* was signal(2) */
77SYSCALL(sys_geteuid, 0)
78SYSCALL(sys_getegid, 0) /* 50 */
79SYSCALL(sys_acct, 1)
80SYSCALL(sys_umount, 2)
81SYSCALL(sys_ni_syscall, 0)
82SYSCALL(sys_ioctl, 3)
83SYSCALL(sys_fcntl, 3) /* 55 */
84SYSCALL(sys_ni_syscall, 2)
85SYSCALL(sys_setpgid, 2)
86SYSCALL(sys_ni_syscall, 0)
87SYSCALL(sys_olduname, 1)
88SYSCALL(sys_umask, 1) /* 60 */
89SYSCALL(sys_chroot, 1)
90SYSCALL(sys_ustat, 2)
91SYSCALL(sys_dup2, 2)
92SYSCALL(sys_getppid, 0)
93SYSCALL(sys_getpgrp, 0) /* 65 */
94SYSCALL(sys_setsid, 0)
95SYSCALL(sys_sigaction, 3)
96SYSCALL(sys_sgetmask, 0)
97SYSCALL(sys_ssetmask, 1)
98SYSCALL(sys_setreuid, 2) /* 70 */
99SYSCALL(sys_setregid, 2)
100SYSCALL(sys_sigsuspend, 0)
101SYSCALL(sys_sigpending, 1)
102SYSCALL(sys_sethostname, 2)
103SYSCALL(sys_setrlimit, 2) /* 75 */
104SYSCALL(sys_getrlimit, 2)
105SYSCALL(sys_getrusage, 2)
106SYSCALL(sys_gettimeofday, 2)
107SYSCALL(sys_settimeofday, 2)
108SYSCALL(sys_getgroups, 2) /* 80 */
109SYSCALL(sys_setgroups, 2)
110SYSCALL(sys_ni_syscall, 0) /* old_select */
111SYSCALL(sys_symlink, 2)
112SYSCALL(sys_lstat, 2)
113SYSCALL(sys_readlink, 3) /* 85 */
114SYSCALL(sys_uselib, 1)
115SYSCALL(sys_swapon, 2)
116SYSCALL(sys_reboot, 3)
117SYSCALL(old_readdir, 3)
118SYSCALL(old_mmap, 6) /* 90 */
119SYSCALL(sys_munmap, 2)
120SYSCALL(sys_truncate, 2)
121SYSCALL(sys_ftruncate, 2)
122SYSCALL(sys_fchmod, 2)
123SYSCALL(sys_fchown, 3) /* 95 */
124SYSCALL(sys_getpriority, 2)
125SYSCALL(sys_setpriority, 3)
126SYSCALL(sys_ni_syscall, 0)
127SYSCALL(sys_statfs, 2)
128SYSCALL(sys_fstatfs, 2) /* 100 */
129SYSCALL(sys_ni_syscall, 3)
130SYSCALL(sys_socketcall, 2)
131SYSCALL(sys_syslog, 3)
132SYSCALL(sys_setitimer, 3)
133SYSCALL(sys_getitimer, 2) /* 105 */
134SYSCALL(sys_newstat, 2)
135SYSCALL(sys_newlstat, 2)
136SYSCALL(sys_newfstat, 2)
137SYSCALL(sys_uname, 1)
138SYSCALL(sys_ni_syscall, 0) /* 110 */
139SYSCALL(sys_vhangup, 0)
140SYSCALL(sys_ni_syscall, 0) /* was sys_idle() */
141SYSCALL(sys_ni_syscall, 0)
142SYSCALL(sys_wait4, 4)
143SYSCALL(sys_swapoff, 1) /* 115 */
144SYSCALL(sys_sysinfo, 1)
145SYSCALL(sys_ipc, 5) /* 6 really, but glibc uses only 5) */
146SYSCALL(sys_fsync, 1)
147SYSCALL(sys_sigreturn, 0)
148SYSCALL(sys_clone, 0) /* 120 */
149SYSCALL(sys_setdomainname, 2)
150SYSCALL(sys_newuname, 1)
151SYSCALL(sys_ni_syscall, 0) /* sys_modify_ldt */
152SYSCALL(sys_adjtimex, 1)
153SYSCALL(sys_mprotect, 3) /* 125 */
154SYSCALL(sys_sigprocmask, 3)
155SYSCALL(sys_ni_syscall, 2) /* old sys_create_module */
156SYSCALL(sys_init_module, 2)
157SYSCALL(sys_delete_module, 1)
158SYSCALL(sys_ni_syscall, 1) /* old sys_get_kernel_sysm */ /* 130 */
159SYSCALL(sys_quotactl, 0)
160SYSCALL(sys_getpgid, 1)
161SYSCALL(sys_fchdir, 1)
162SYSCALL(sys_bdflush, 2)
163SYSCALL(sys_sysfs, 3) /* 135 */
164SYSCALL(sys_personality, 1)
165SYSCALL(sys_ni_syscall, 0) /* for afs_syscall */
166SYSCALL(sys_setfsuid, 1)
167SYSCALL(sys_setfsgid, 1)
168SYSCALL(sys_llseek, 5) /* 140 */
169SYSCALL(sys_getdents, 3)
170SYSCALL(sys_select, 5)
171SYSCALL(sys_flock, 2)
172SYSCALL(sys_msync, 3)
173SYSCALL(sys_readv, 3) /* 145 */
174SYSCALL(sys_writev, 3)
175SYSCALL(sys_ni_syscall, 3)
176SYSCALL(sys_ni_syscall, 3)
177SYSCALL(sys_ni_syscall, 4) /* handled in fast syscall handler. */
178SYSCALL(sys_ni_syscall, 0) /* 150 */
179SYSCALL(sys_getsid, 1)
180SYSCALL(sys_fdatasync, 1)
181SYSCALL(sys_sysctl, 1)
182SYSCALL(sys_mlock, 2)
183SYSCALL(sys_munlock, 2) /* 155 */
184SYSCALL(sys_mlockall, 1)
185SYSCALL(sys_munlockall, 0)
186SYSCALL(sys_sched_setparam,2)
187SYSCALL(sys_sched_getparam,2)
188SYSCALL(sys_sched_setscheduler,3) /* 160 */
189SYSCALL(sys_sched_getscheduler,1)
190SYSCALL(sys_sched_yield,0)
191SYSCALL(sys_sched_get_priority_max,1)
192SYSCALL(sys_sched_get_priority_min,1)
193SYSCALL(sys_sched_rr_get_interval,2) /* 165 */
194SYSCALL(sys_nanosleep,2)
195SYSCALL(sys_mremap,4)
196SYSCALL(sys_accept, 3)
197SYSCALL(sys_bind, 3)
198SYSCALL(sys_connect, 3) /* 170 */
199SYSCALL(sys_getpeername, 3)
200SYSCALL(sys_getsockname, 3)
201SYSCALL(sys_getsockopt, 5)
202SYSCALL(sys_listen, 2)
203SYSCALL(sys_recv, 4) /* 175 */
204SYSCALL(sys_recvfrom, 6)
205SYSCALL(sys_recvmsg, 3)
206SYSCALL(sys_send, 4)
207SYSCALL(sys_sendmsg, 3)
208SYSCALL(sys_sendto, 6) /* 180 */
209SYSCALL(sys_setsockopt, 5)
210SYSCALL(sys_shutdown, 2)
211SYSCALL(sys_socket, 3)
212SYSCALL(sys_socketpair, 4)
213SYSCALL(sys_setresuid, 3) /* 185 */
214SYSCALL(sys_getresuid, 3)
215SYSCALL(sys_ni_syscall, 5) /* old sys_query_module */
216SYSCALL(sys_poll, 3)
217SYSCALL(sys_nfsservctl, 3)
218SYSCALL(sys_setresgid, 3) /* 190 */
219SYSCALL(sys_getresgid, 3)
220SYSCALL(sys_prctl, 5)
221SYSCALL(sys_rt_sigreturn, 0)
222SYSCALL(sys_rt_sigaction, 4)
223SYSCALL(sys_rt_sigprocmask, 4) /* 195 */
224SYSCALL(sys_rt_sigpending, 2)
225SYSCALL(sys_rt_sigtimedwait, 4)
226SYSCALL(sys_rt_sigqueueinfo, 3)
227SYSCALL(sys_rt_sigsuspend, 0)
228SYSCALL(sys_pread64, 5) /* 200 */
229SYSCALL(sys_pwrite64, 5)
230SYSCALL(sys_chown, 3)
231SYSCALL(sys_getcwd, 2)
232SYSCALL(sys_capget, 2)
233SYSCALL(sys_capset, 2) /* 205 */
234SYSCALL(sys_sigaltstack, 0)
235SYSCALL(sys_sendfile, 4)
236SYSCALL(sys_ni_syscall, 0)
237SYSCALL(sys_ni_syscall, 0)
238SYSCALL(sys_mmap2, 6) /* 210 */
239SYSCALL(sys_truncate64, 2)
240SYSCALL(sys_ftruncate64, 2)
241SYSCALL(sys_stat64, 2)
242SYSCALL(sys_lstat64, 2)
243SYSCALL(sys_fstat64, 2) /* 215 */
244SYSCALL(sys_pivot_root, 2)
245SYSCALL(sys_mincore, 3)
246SYSCALL(sys_madvise, 3)
247SYSCALL(sys_getdents64, 3)
248SYSCALL(sys_vfork, 0) /* 220 */
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
new file mode 100644
index 000000000000..e07287db5a40
--- /dev/null
+++ b/arch/xtensa/kernel/time.c
@@ -0,0 +1,227 @@
1/*
2 * arch/xtensa/kernel/time.c
3 *
4 * Timer and clock support.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15#include <linux/config.h>
16#include <linux/errno.h>
17#include <linux/time.h>
18#include <linux/timex.h>
19#include <linux/interrupt.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/profile.h>
24#include <linux/delay.h>
25
26#include <asm/timex.h>
27#include <asm/platform.h>
28
29
30extern volatile unsigned long wall_jiffies;
31
32u64 jiffies_64 = INITIAL_JIFFIES;
33EXPORT_SYMBOL(jiffies_64);
34
35spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
36EXPORT_SYMBOL(rtc_lock);
37
38
39#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
40unsigned long ccount_per_jiffy; /* per 1/HZ */
41unsigned long ccount_nsec; /* nsec per ccount increment */
42#endif
43
44unsigned int last_ccount_stamp;
45static long last_rtc_update = 0;
46
47/*
48 * Scheduler clock - returns current tim in nanosec units.
49 */
50
51unsigned long long sched_clock(void)
52{
53 return (unsigned long long)jiffies * (1000000000 / HZ);
54}
55
56static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
57static struct irqaction timer_irqaction = {
58 .handler = timer_interrupt,
59 .flags = SA_INTERRUPT,
60 .name = "timer",
61};
62
63void __init time_init(void)
64{
65 time_t sec_o, sec_n = 0;
66
67 /* The platform must provide a function to calibrate the processor
68 * speed for the CALIBRATE.
69 */
70
71#if CONFIG_XTENSA_CALIBRATE_CCOUNT
72 printk("Calibrating CPU frequency ");
73 platform_calibrate_ccount();
74 printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
75 (int)(ccount_per_jiffy/(10000/HZ))%100);
76#endif
77
78 /* Set time from RTC (if provided) */
79
80 if (platform_get_rtc_time(&sec_o) == 0)
81 while (platform_get_rtc_time(&sec_n))
82 if (sec_o != sec_n)
83 break;
84
85 xtime.tv_nsec = 0;
86 last_rtc_update = xtime.tv_sec = sec_n;
87 last_ccount_stamp = get_ccount();
88
89 set_normalized_timespec(&wall_to_monotonic,
90 -xtime.tv_sec, -xtime.tv_nsec);
91
92 /* Initialize the linux timer interrupt. */
93
94 setup_irq(LINUX_TIMER_INT, &timer_irqaction);
95 set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
96}
97
98
99int do_settimeofday(struct timespec *tv)
100{
101 time_t wtm_sec, sec = tv->tv_sec;
102 long wtm_nsec, nsec = tv->tv_nsec;
103 unsigned long ccount;
104
105 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
106 return -EINVAL;
107
108 write_seqlock_irq(&xtime_lock);
109
110 /* This is revolting. We need to set "xtime" correctly. However, the
111 * value in this location is the value at the most recent update of
112 * wall time. Discover what correction gettimeofday() would have
113 * made, and then undo it!
114 */
115 ccount = get_ccount();
116 nsec -= (ccount - last_ccount_stamp) * CCOUNT_NSEC;
117 nsec -= (jiffies - wall_jiffies) * CCOUNT_PER_JIFFY * CCOUNT_NSEC;
118
119 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
120 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
121
122 set_normalized_timespec(&xtime, sec, nsec);
123 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
124
125 time_adjust = 0; /* stop active adjtime() */
126 time_status |= STA_UNSYNC;
127 time_maxerror = NTP_PHASE_LIMIT;
128 time_esterror = NTP_PHASE_LIMIT;
129 write_sequnlock_irq(&xtime_lock);
130 return 0;
131}
132
133EXPORT_SYMBOL(do_settimeofday);
134
135
136void do_gettimeofday(struct timeval *tv)
137{
138 unsigned long flags;
139 unsigned long sec, usec, delta, lost, seq;
140
141 do {
142 seq = read_seqbegin_irqsave(&xtime_lock, flags);
143
144 delta = get_ccount() - last_ccount_stamp;
145 sec = xtime.tv_sec;
146 usec = (xtime.tv_nsec / NSEC_PER_USEC);
147
148 lost = jiffies - wall_jiffies;
149
150 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
151
152 usec += lost * (1000000UL/HZ) + (delta * CCOUNT_NSEC) / NSEC_PER_USEC;
153 for (; usec >= 1000000; sec++, usec -= 1000000)
154 ;
155
156 tv->tv_sec = sec;
157 tv->tv_usec = usec;
158}
159
160EXPORT_SYMBOL(do_gettimeofday);
161
162/*
163 * The timer interrupt is called HZ times per second.
164 */
165
166irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
167{
168
169 unsigned long next;
170
171 next = get_linux_timer();
172
173again:
174 while ((signed long)(get_ccount() - next) > 0) {
175
176 profile_tick(CPU_PROFILING, regs);
177#ifndef CONFIG_SMP
178 update_process_times(user_mode(regs));
179#endif
180
181 write_seqlock(&xtime_lock);
182
183 last_ccount_stamp = next;
184 next += CCOUNT_PER_JIFFY;
185 do_timer (regs); /* Linux handler in kernel/timer.c */
186
187 if ((time_status & STA_UNSYNC) == 0 &&
188 xtime.tv_sec - last_rtc_update >= 659 &&
189 abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ &&
190 jiffies - wall_jiffies == 1) {
191
192 if (platform_set_rtc_time(xtime.tv_sec+1) == 0)
193 last_rtc_update = xtime.tv_sec+1;
194 else
195 /* Do it again in 60 s */
196 last_rtc_update += 60;
197 }
198 write_sequnlock(&xtime_lock);
199 }
200
201 /* NOTE: writing CCOMPAREn clears the interrupt. */
202
203 set_linux_timer (next);
204
205 /* Make sure we didn't miss any tick... */
206
207 if ((signed long)(get_ccount() - next) > 0)
208 goto again;
209
210 /* Allow platform to do something usefull (Wdog). */
211
212 platform_heartbeat();
213
214 return IRQ_HANDLED;
215}
216
217#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
218void __devinit calibrate_delay(void)
219{
220 loops_per_jiffy = CCOUNT_PER_JIFFY;
221 printk("Calibrating delay loop (skipped)... "
222 "%lu.%02lu BogoMIPS preset\n",
223 loops_per_jiffy/(1000000/HZ),
224 (loops_per_jiffy/(10000/HZ)) % 100);
225}
226#endif
227
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
new file mode 100644
index 000000000000..804246e743b1
--- /dev/null
+++ b/arch/xtensa/kernel/traps.c
@@ -0,0 +1,498 @@
1/*
2 * arch/xtensa/kernel/traps.c
3 *
4 * Exception handling.
5 *
6 * Derived from code with the following copyrights:
7 * Copyright (C) 1994 - 1999 by Ralf Baechle
8 * Modified for R3000 by Paul M. Antoine, 1995, 1996
9 * Complete output from die() by Ulf Carlsson, 1998
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 *
12 * Essentially rewritten for the Xtensa architecture port.
13 *
14 * Copyright (C) 2001 - 2005 Tensilica Inc.
15 *
16 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
17 * Chris Zankel <chris@zankel.net>
18 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
19 * Kevin Chea
20 *
21 * This file is subject to the terms and conditions of the GNU General Public
22 * License. See the file "COPYING" in the main directory of this archive
23 * for more details.
24 */
25
26#include <linux/kernel.h>
27#include <linux/sched.h>
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/stringify.h>
31#include <linux/kallsyms.h>
32
33#include <asm/ptrace.h>
34#include <asm/timex.h>
35#include <asm/uaccess.h>
36#include <asm/pgtable.h>
37#include <asm/processor.h>
38
39#ifdef CONFIG_KGDB
40extern int gdb_enter;
41extern int return_from_debug_flag;
42#endif
43
44/*
45 * Machine specific interrupt handlers
46 */
47
48extern void kernel_exception(void);
49extern void user_exception(void);
50
51extern void fast_syscall_kernel(void);
52extern void fast_syscall_user(void);
53extern void fast_alloca(void);
54extern void fast_unaligned(void);
55extern void fast_second_level_miss(void);
56extern void fast_store_prohibited(void);
57extern void fast_coprocessor(void);
58
59extern void do_illegal_instruction (struct pt_regs*);
60extern void do_interrupt (struct pt_regs*);
61extern void do_unaligned_user (struct pt_regs*);
62extern void do_multihit (struct pt_regs*, unsigned long);
63extern void do_page_fault (struct pt_regs*, unsigned long);
64extern void do_debug (struct pt_regs*);
65extern void system_call (struct pt_regs*);
66
67/*
68 * The vector table must be preceded by a save area (which
69 * implies it must be in RAM, unless one places RAM immediately
70 * before a ROM and puts the vector at the start of the ROM (!))
71 */
72
73#define KRNL 0x01
74#define USER 0x02
75
76#define COPROCESSOR(x) \
77{ XCHAL_EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
78
79typedef struct {
80 int cause;
81 int fast;
82 void* handler;
83} dispatch_init_table_t;
84
85dispatch_init_table_t __init dispatch_init_table[] = {
86
87{ XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
88{ XCHAL_EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
89{ XCHAL_EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
90{ XCHAL_EXCCAUSE_SYSTEM_CALL, 0, system_call },
91/* XCHAL_EXCCAUSE_INSTRUCTION_FETCH unhandled */
92/* XCHAL_EXCCAUSE_LOAD_STORE_ERROR unhandled*/
93{ XCHAL_EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
94{ XCHAL_EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
95/* XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
96/* XCHAL_EXCCAUSE_PRIVILEGED unhandled */
97#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
98#ifdef CONFIG_UNALIGNED_USER
99{ XCHAL_EXCCAUSE_UNALIGNED, USER, fast_unaligned },
100#else
101{ XCHAL_EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
102#endif
103{ XCHAL_EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
104#endif
105{ XCHAL_EXCCAUSE_ITLB_MISS, 0, do_page_fault },
106{ XCHAL_EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
107{ XCHAL_EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
108{ XCHAL_EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
109/* XCHAL_EXCCAUSE_SIZE_RESTRICTION unhandled */
110{ XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
111{ XCHAL_EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
112{ XCHAL_EXCCAUSE_DTLB_MISS, 0, do_page_fault },
113{ XCHAL_EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
114{ XCHAL_EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
115/* XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
116{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
117{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
118{ XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
119/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
120#if (XCHAL_CP_MASK & 1)
121COPROCESSOR(0),
122#endif
123#if (XCHAL_CP_MASK & 2)
124COPROCESSOR(1),
125#endif
126#if (XCHAL_CP_MASK & 4)
127COPROCESSOR(2),
128#endif
129#if (XCHAL_CP_MASK & 8)
130COPROCESSOR(3),
131#endif
132#if (XCHAL_CP_MASK & 16)
133COPROCESSOR(4),
134#endif
135#if (XCHAL_CP_MASK & 32)
136COPROCESSOR(5),
137#endif
138#if (XCHAL_CP_MASK & 64)
139COPROCESSOR(6),
140#endif
141#if (XCHAL_CP_MASK & 128)
142COPROCESSOR(7),
143#endif
144{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
145{ -1, -1, 0 }
146
147};
148
149/* The exception table <exc_table> serves two functions:
150 * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
151 * 2. it is a temporary memory buffer for the exception handlers.
152 */
153
154unsigned long exc_table[EXC_TABLE_SIZE/4];
155
156void die(const char*, struct pt_regs*, long);
157
158static inline void
159__die_if_kernel(const char *str, struct pt_regs *regs, long err)
160{
161 if (!user_mode(regs))
162 die(str, regs, err);
163}
164
165/*
166 * Unhandled Exceptions. Kill user task or panic if in kernel space.
167 */
168
169void do_unhandled(struct pt_regs *regs, unsigned long exccause)
170{
171 __die_if_kernel("Caught unhandled exception - should not happen",
172 regs, SIGKILL);
173
174 /* If in user mode, send SIGILL signal to current process */
175 printk("Caught unhandled exception in '%s' "
176 "(pid = %d, pc = %#010lx) - should not happen\n"
177 "\tEXCCAUSE is %ld\n",
178 current->comm, current->pid, regs->pc, exccause);
179 force_sig(SIGILL, current);
180}
181
182/*
183 * Multi-hit exception. This if fatal!
184 */
185
186void do_multihit(struct pt_regs *regs, unsigned long exccause)
187{
188 die("Caught multihit exception", regs, SIGKILL);
189}
190
191/*
192 * Level-1 interrupt.
193 * We currently have no priority encoding.
194 */
195
196unsigned long ignored_level1_interrupts;
197extern void do_IRQ(int, struct pt_regs *);
198
199void do_interrupt (struct pt_regs *regs)
200{
201 unsigned long intread = get_sr (INTREAD);
202 unsigned long intenable = get_sr (INTENABLE);
203 int i, mask;
204
205 /* Handle all interrupts (no priorities).
206 * (Clear the interrupt before processing, in case it's
207 * edge-triggered or software-generated)
208 */
209
210 for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
211 if (mask & (intread & intenable)) {
212 set_sr (mask, INTCLEAR);
213 do_IRQ (i,regs);
214 }
215 }
216}
217
218/*
219 * Illegal instruction. Fatal if in kernel space.
220 */
221
222void
223do_illegal_instruction(struct pt_regs *regs)
224{
225 __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
226
227 /* If in user mode, send SIGILL signal to current process. */
228
229 printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
230 current->comm, current->pid, regs->pc);
231 force_sig(SIGILL, current);
232}
233
234
235/*
236 * Handle unaligned memory accesses from user space. Kill task.
237 *
238 * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
239 * accesses causes from user space.
240 */
241
242#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
243#ifndef CONFIG_UNALIGNED_USER
244void
245do_unaligned_user (struct pt_regs *regs)
246{
247 siginfo_t info;
248
249 __die_if_kernel("Unhandled unaligned exception in kernel",
250 regs, SIGKILL);
251
252 current->thread.bad_vaddr = regs->excvaddr;
253 current->thread.error_code = -3;
254 printk("Unaligned memory access to %08lx in '%s' "
255 "(pid = %d, pc = %#010lx)\n",
256 regs->excvaddr, current->comm, current->pid, regs->pc);
257 info.si_signo = SIGBUS;
258 info.si_errno = 0;
259 info.si_code = BUS_ADRALN;
260 info.si_addr = (void *) regs->excvaddr;
261 force_sig_info(SIGSEGV, &info, current);
262
263}
264#endif
265#endif
266
267void
268do_debug(struct pt_regs *regs)
269{
270#ifdef CONFIG_KGDB
271 /* If remote debugging is configured AND enabled, we give control to
272 * kgdb. Otherwise, we fall through, perhaps giving control to the
273 * native debugger.
274 */
275
276 if (gdb_enter) {
277 extern void gdb_handle_exception(struct pt_regs *);
278 gdb_handle_exception(regs);
279 return_from_debug_flag = 1;
280 return;
281 }
282#endif
283
284 __die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
285
286 /* If in user mode, send SIGTRAP signal to current process */
287
288 force_sig(SIGTRAP, current);
289}
290
291
292/*
293 * Initialize dispatch tables.
294 *
295 * The exception vectors are stored compressed the __init section in the
296 * dispatch_init_table. This function initializes the following three tables
297 * from that compressed table:
298 * - fast user first dispatch table for user exceptions
299 * - fast kernel first dispatch table for kernel exceptions
300 * - default C-handler C-handler called by the default fast handler.
301 *
302 * See vectors.S for more details.
303 */
304
305#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
306
307void trap_init(void)
308{
309 int i;
310
311 /* Setup default vectors. */
312
313 for(i = 0; i < 64; i++) {
314 set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception);
315 set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception);
316 set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled);
317 }
318
319 /* Setup specific handlers. */
320
321 for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
322
323 int fast = dispatch_init_table[i].fast;
324 int cause = dispatch_init_table[i].cause;
325 void *handler = dispatch_init_table[i].handler;
326
327 if (fast == 0)
328 set_handler (EXC_TABLE_DEFAULT/4 + cause, handler);
329 if (fast && fast & USER)
330 set_handler (EXC_TABLE_FAST_USER/4 + cause, handler);
331 if (fast && fast & KRNL)
332 set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler);
333 }
334
335 /* Initialize EXCSAVE_1 to hold the address of the exception table. */
336
337 i = (unsigned long)exc_table;
338 __asm__ __volatile__("wsr %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i));
339}
340
341/*
342 * This function dumps the current valid window frame and other base registers.
343 */
344
345void show_regs(struct pt_regs * regs)
346{
347 int i, wmask;
348
349 wmask = regs->wmask & ~1;
350
351 for (i = 0; i < 32; i++) {
352 if (wmask & (1 << (i / 4)))
353 break;
354 if ((i % 8) == 0)
355 printk ("\n" KERN_INFO "a%02d: ", i);
356 printk("%08lx ", regs->areg[i]);
357 }
358 printk("\n");
359
360 printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
361 regs->pc, regs->ps, regs->depc, regs->excvaddr);
362 printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
363 regs->lbeg, regs->lend, regs->lcount, regs->sar);
364 if (user_mode(regs))
365 printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
366 regs->windowbase, regs->windowstart, regs->wmask,
367 regs->syscall);
368}
369
370void show_trace(struct task_struct *task, unsigned long *sp)
371{
372 unsigned long a0, a1, pc;
373 unsigned long sp_start, sp_end;
374
375 a1 = (unsigned long)sp;
376
377 if (a1 == 0)
378 __asm__ __volatile__ ("mov %0, a1\n" : "=a"(a1));
379
380
381 sp_start = a1 & ~(THREAD_SIZE-1);
382 sp_end = sp_start + THREAD_SIZE;
383
384 printk("Call Trace:");
385#ifdef CONFIG_KALLSYMS
386 printk("\n");
387#endif
388 spill_registers();
389
390 while (a1 > sp_start && a1 < sp_end) {
391 sp = (unsigned long*)a1;
392
393 a0 = *(sp - 4);
394 a1 = *(sp - 3);
395
396 if (a1 <= (unsigned long) sp)
397 break;
398
399 pc = MAKE_PC_FROM_RA(a0, a1);
400
401 if (kernel_text_address(pc)) {
402 printk(" [<%08lx>] ", pc);
403 print_symbol("%s\n", pc);
404 }
405 }
406 printk("\n");
407}
408
409/*
410 * This routine abuses get_user()/put_user() to reference pointers
411 * with at least a bit of error checking ...
412 */
413
414static int kstack_depth_to_print = 24;
415
416void show_stack(struct task_struct *task, unsigned long *sp)
417{
418 int i = 0;
419 unsigned long *stack;
420
421 if (sp == 0)
422 __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
423
424 stack = sp;
425
426 printk("\nStack: ");
427
428 for (i = 0; i < kstack_depth_to_print; i++) {
429 if (kstack_end(sp))
430 break;
431 if (i && ((i % 8) == 0))
432 printk("\n ");
433 printk("%08lx ", *sp++);
434 }
435 printk("\n");
436 show_trace(task, stack);
437}
438
439void dump_stack(void)
440{
441 show_stack(current, NULL);
442}
443
444EXPORT_SYMBOL(dump_stack);
445
446
447void show_code(unsigned int *pc)
448{
449 long i;
450
451 printk("\nCode:");
452
453 for(i = -3 ; i < 6 ; i++) {
454 unsigned long insn;
455 if (__get_user(insn, pc + i)) {
456 printk(" (Bad address in pc)\n");
457 break;
458 }
459 printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
460 }
461}
462
463spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
464
465void die(const char * str, struct pt_regs * regs, long err)
466{
467 static int die_counter;
468 int nl = 0;
469
470 console_verbose();
471 spin_lock_irq(&die_lock);
472
473 printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
474#ifdef CONFIG_PREEMPT
475 printk("PREEMPT ");
476 nl = 1;
477#endif
478 if (nl)
479 printk("\n");
480 show_regs(regs);
481 if (!user_mode(regs))
482 show_stack(NULL, (unsigned long*)regs->areg[1]);
483
484 spin_unlock_irq(&die_lock);
485
486 if (in_interrupt())
487 panic("Fatal exception in interrupt");
488
489 if (panic_on_oops) {
490 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
491 set_current_state(TASK_UNINTERRUPTIBLE);
492 schedule_timeout(5 * HZ);
493 panic("Fatal exception");
494 }
495 do_exit(err);
496}
497
498
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
new file mode 100644
index 000000000000..81808f0c6742
--- /dev/null
+++ b/arch/xtensa/kernel/vectors.S
@@ -0,0 +1,464 @@
1/*
2 * arch/xtensa/kernel/vectors.S
3 *
4 * This file contains all exception vectors (user, kernel, and double),
5 * as well as the window vectors (overflow and underflow), and the debug
6 * vector. These are the primary vectors executed by the processor if an
7 * exception occurs.
8 *
9 * This file is subject to the terms and conditions of the GNU General
10 * Public License. See the file "COPYING" in the main directory of
11 * this archive for more details.
12 *
13 * Copyright (C) 2005 Tensilica, Inc.
14 *
15 * Chris Zankel <chris@zankel.net>
16 *
17 */
18
19/*
20 * We use a two-level table approach. The user and kernel exception vectors
21 * use a first-level dispatch table to dispatch the exception to a registered
22 * fast handler or the default handler, if no fast handler was registered.
23 * The default handler sets up a C-stack and dispatches the exception to a
24 * registerd C handler in the second-level dispatch table.
25 *
26 * Fast handler entry condition:
27 *
28 * a0: trashed, original value saved on stack (PT_AREG0)
29 * a1: a1
30 * a2: new stack pointer, original value in depc
31 * a3: dispatch table
32 * depc: a2, original value saved on stack (PT_DEPC)
33 * excsave_1: a3
34 *
35 * The value for PT_DEPC saved to stack also functions as a boolean to
36 * indicate that the exception is either a double or a regular exception:
37 *
38 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception
39 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
40 *
41 * Note: Neither the kernel nor the user exception handler generate literals.
42 *
43 */
44
45#include <linux/linkage.h>
46#include <asm/ptrace.h>
47#include <asm/ptrace.h>
48#include <asm/current.h>
49#include <asm/offsets.h>
50#include <asm/pgtable.h>
51#include <asm/processor.h>
52#include <asm/page.h>
53#include <asm/thread_info.h>
54#include <asm/processor.h>
55
56
57/*
58 * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0)
59 *
60 * We get here when an exception occurred while we were in userland.
61 * We switch to the kernel stack and jump to the first level handler
62 * associated to the exception cause.
63 *
64 * Note: the saved kernel stack pointer (EXC_TABLE_KSTK) is already
65 * decremented by PT_USER_SIZE.
66 */
67
68 .section .UserExceptionVector.text, "ax"
69
70ENTRY(_UserExceptionVector)
71
72 xsr a3, EXCSAVE_1 # save a3 and get dispatch table
73 wsr a2, DEPC # save a2
74 l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2
75 s32i a0, a2, PT_AREG0 # save a0 to ESF
76 rsr a0, EXCCAUSE # retrieve exception cause
77 s32i a0, a2, PT_DEPC # mark it as a regular exception
78 addx4 a0, a0, a3 # find entry in table
79 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
80 jx a0
81
82/*
83 * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
84 *
85 * We get this exception when we were already in kernel space.
86 * We decrement the current stack pointer (kernel) by PT_SIZE and
87 * jump to the first-level handler associated with the exception cause.
88 *
89 * Note: we need to preserve space for the spill region.
90 */
91
92 .section .KernelExceptionVector.text, "ax"
93
94ENTRY(_KernelExceptionVector)
95
96 xsr a3, EXCSAVE_1 # save a3, and get dispatch table
97 wsr a2, DEPC # save a2
98 addi a2, a1, -16-PT_SIZE # adjust stack pointer
99 s32i a0, a2, PT_AREG0 # save a0 to ESF
100 rsr a0, EXCCAUSE # retrieve exception cause
101 s32i a0, a2, PT_DEPC # mark it as a regular exception
102 addx4 a0, a0, a3 # find entry in table
103 l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
104 jx a0
105
106
107/*
108 * Double exception vector (Exceptions with PS.EXCM == 1)
109 * We get this exception when another exception occurs while were are
110 * already in an exception, such as window overflow/underflow exception,
111 * or 'expected' exceptions, for example memory exception when we were trying
112 * to read data from an invalid address in user space.
113 *
114 * Note that this vector is never invoked for level-1 interrupts, because such
115 * interrupts are disabled (masked) when PS.EXCM is set.
116 *
117 * We decode the exception and take the appropriate action. However, the
118 * double exception vector is much more careful, because a lot more error
119 * cases go through the double exception vector than through the user and
120 * kernel exception vectors.
121 *
122 * Occasionally, the kernel expects a double exception to occur. This usually
123 * happens when accessing user-space memory with the user's permissions
124 * (l32e/s32e instructions). The kernel state, though, is not always suitable
125 * for immediate transfer of control to handle_double, where "normal" exception
126 * processing occurs. Also in kernel mode, TLB misses can occur if accessing
127 * vmalloc memory, possibly requiring repair in a double exception handler.
128 *
129 * The variable at TABLE_FIXUP offset from the pointer in EXCSAVE_1 doubles as
130 * a boolean variable and a pointer to a fixup routine. If the variable
131 * EXC_TABLE_FIXUP is non-zero, this handler jumps to that address. A value of
132 * zero indicates to use the default kernel/user exception handler.
133 * There is only one exception, when the value is identical to the exc_table
134 * label, the kernel is in trouble. This mechanism is used to protect critical
135 * sections, mainly when the handler writes to the stack to assert the stack
136 * pointer is valid. Once the fixup/default handler leaves that area, the
137 * EXC_TABLE_FIXUP variable is reset to the fixup handler or zero.
138 *
139 * Procedures wishing to use this mechanism should set EXC_TABLE_FIXUP to the
140 * nonzero address of a fixup routine before it could cause a double exception
141 * and reset it before it returns.
142 *
143 * Some other things to take care of when a fast exception handler doesn't
144 * specify a particular fixup handler but wants to use the default handlers:
145 *
146 * - The original stack pointer (in a1) must not be modified. The fast
147 * exception handler should only use a2 as the stack pointer.
148 *
149 * - If the fast handler manipulates the stack pointer (in a2), it has to
150 * register a valid fixup handler and cannot use the default handlers.
151 *
152 * - The handler can use any other generic register from a3 to a15, but it
153 * must save the content of these registers to stack (PT_AREG3...PT_AREGx)
154 *
155 * - These registers must be saved before a double exception can occur.
156 *
157 * - If we ever implement handling signals while in double exceptions, the
158 * number of registers a fast handler has saved (excluding a0 and a1) must
159 * be written to PT_AREG1. (1 if only a3 is used, 2 for a3 and a4, etc. )
160 *
161 * The fixup handlers are special handlers:
162 *
163 * - Fixup entry conditions differ from regular exceptions:
164 *
165 * a0: DEPC
166 * a1: a1
167 * a2: trashed, original value in EXC_TABLE_DOUBLE_A2
168 * a3: exctable
169 * depc: a0
170 * excsave_1: a3
171 *
172 * - When the kernel enters the fixup handler, it still assumes it is in a
173 * critical section, so EXC_TABLE_FIXUP variable is set to exc_table.
174 * The fixup handler, therefore, has to re-register itself as the fixup
175 * handler before it returns from the double exception.
176 *
177 * - Fixup handler can share the same exception frame with the fast handler.
178 * The kernel stack pointer is not changed when entering the fixup handler.
179 *
180 * - Fixup handlers can jump to the default kernel and user exception
181 * handlers. Before it jumps, though, it has to setup a exception frame
182 * on stack. Because the default handler resets the register fixup handler
183 * the fixup handler must make sure that the default handler returns to
184 * it instead of the exception address, so it can re-register itself as
185 * the fixup handler.
186 *
187 * In case of a critical condition where the kernel cannot recover, we jump
188 * to unrecoverable_exception with the following entry conditions.
189 * All registers a0...a15 are unchanged from the last exception, except:
190 *
191 * a0: last address before we jumped to the unrecoverable_exception.
192 * excsave_1: a0
193 *
194 *
195 * See the handle_alloca_user and spill_registers routines for example clients.
196 *
197 * FIXME: Note: we currently don't allow signal handling coming from a double
198 * exception, so the item markt with (*) is not required.
199 */
200
201 .section .DoubleExceptionVector.text, "ax"
202 .begin literal_prefix .DoubleExceptionVector
203
204ENTRY(_DoubleExceptionVector)
205
206 /* Deliberately destroy excsave (don't assume it's value was valid). */
207
208 wsr a3, EXCSAVE_1 # save a3
209
210 /* Check for kernel double exception (usually fatal). */
211
212 rsr a3, PS
213 _bbci.l a3, PS_UM_SHIFT, .Lksp
214
215 /* Check if we are currently handling a window exception. */
216 /* Note: We don't need to indicate that we enter a critical section. */
217
218 xsr a0, DEPC # get DEPC, save a0
219
220 movi a3, XCHAL_WINDOW_VECTORS_VADDR
221 _bltu a0, a3, .Lfixup
222 addi a3, a3, XSHAL_WINDOW_VECTORS_SIZE
223 _bgeu a0, a3, .Lfixup
224
225 /* Window overflow/underflow exception. Get stack pointer. */
226
227 mov a3, a2
228 movi a2, exc_table
229 l32i a2, a2, EXC_TABLE_KSTK
230
231 /* Check for overflow/underflow exception, jump if overflow. */
232
233 _bbci.l a0, 6, .Lovfl
234
235 /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
236
237 /* Restart window underflow exception.
238 * We return to the instruction in user space that caused the window
239 * underflow exception. Therefore, we change window base to the value
240 * before we entered the window underflow exception and prepare the
241 * registers to return as if we were coming from a regular exception
242 * by changing depc (in a0).
243 * Note: We can trash the current window frame (a0...a3) and depc!
244 */
245
246 wsr a2, DEPC # save stack pointer temporarily
247 rsr a0, PS
248 extui a0, a0, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
249 wsr a0, WINDOWBASE
250 rsync
251
252 /* We are now in the previous window frame. Save registers again. */
253
254 xsr a2, DEPC # save a2 and get stack pointer
255 s32i a0, a2, PT_AREG0
256
257 wsr a3, EXCSAVE_1 # save a3
258 movi a3, exc_table
259
260 rsr a0, EXCCAUSE
261 s32i a0, a2, PT_DEPC # mark it as a regular exception
262 addx4 a0, a0, a3
263 l32i a0, a0, EXC_TABLE_FAST_USER
264 jx a0
265
266.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
267
268 /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */
269
270 movi a3, exc_table
271 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable
272
273 /* Enter critical section. */
274
275 l32i a2, a3, EXC_TABLE_FIXUP
276 s32i a3, a3, EXC_TABLE_FIXUP
277 beq a2, a3, .Lunrecoverable_fixup # critical!
278 beqz a2, .Ldflt # no handler was registered
279
280 /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */
281
282 jx a2
283
284.Ldflt: /* Get stack pointer. */
285
286 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
287 addi a2, a3, -PT_USER_SIZE
288
289.Lovfl: /* Jump to default handlers. */
290
291 /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
292
293 xsr a3, DEPC
294 s32i a0, a2, PT_DEPC
295 s32i a3, a2, PT_AREG0
296
297 /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
298
299 movi a3, exc_table
300 rsr a0, EXCCAUSE
301 addx4 a0, a0, a3
302 l32i a0, a0, EXC_TABLE_FAST_USER
303 jx a0
304
305 /*
306 * We only allow the ITLB miss exception if we are in kernel space.
307 * All other exceptions are unexpected and thus unrecoverable!
308 */
309
310 .extern fast_second_level_miss_double_kernel
311
312.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
313
314 rsr a3, EXCCAUSE
315 beqi a3, XCHAL_EXCCAUSE_ITLB_MISS, 1f
316 addi a3, a3, -XCHAL_EXCCAUSE_DTLB_MISS
317 bnez a3, .Lunrecoverable
3181: movi a3, fast_second_level_miss_double_kernel
319 jx a3
320
321 /* Critical! We can't handle this situation. PANIC! */
322
323 .extern unrecoverable_exception
324
325.Lunrecoverable_fixup:
326 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
327 xsr a0, DEPC
328
329.Lunrecoverable:
330 rsr a3, EXCSAVE_1
331 wsr a0, EXCSAVE_1
332 movi a0, unrecoverable_exception
333 callx0 a0
334
335 .end literal_prefix
336
337
338/*
339 * Debug interrupt vector
340 *
341 * There is not much space here, so simply jump to another handler.
342 * EXCSAVE[DEBUGLEVEL] has been set to that handler.
343 */
344
345 .section .DebugInterruptVector.text, "ax"
346
347ENTRY(_DebugInterruptVector)
348 xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
349 jx a0
350
351
352
353/* Window overflow and underflow handlers.
354 * The handlers must be 64 bytes apart, first starting with the underflow
355 * handlers underflow-4 to underflow-12, then the overflow handlers
356 * overflow-4 to overflow-12.
357 *
358 * Note: We rerun the underflow handlers if we hit an exception, so
359 * we try to access any page that would cause a page fault early.
360 */
361
362 .section .WindowVectors.text, "ax"
363
364
365/* 4-Register Window Overflow Vector (Handler) */
366
367 .align 64
368.global _WindowOverflow4
369_WindowOverflow4:
370 s32e a0, a5, -16
371 s32e a1, a5, -12
372 s32e a2, a5, -8
373 s32e a3, a5, -4
374 rfwo
375
376
377/* 4-Register Window Underflow Vector (Handler) */
378
379 .align 64
380.global _WindowUnderflow4
381_WindowUnderflow4:
382 l32e a0, a5, -16
383 l32e a1, a5, -12
384 l32e a2, a5, -8
385 l32e a3, a5, -4
386 rfwu
387
388
389/* 8-Register Window Overflow Vector (Handler) */
390
391 .align 64
392.global _WindowOverflow8
393_WindowOverflow8:
394 s32e a0, a9, -16
395 l32e a0, a1, -12
396 s32e a2, a9, -8
397 s32e a1, a9, -12
398 s32e a3, a9, -4
399 s32e a4, a0, -32
400 s32e a5, a0, -28
401 s32e a6, a0, -24
402 s32e a7, a0, -20
403 rfwo
404
405/* 8-Register Window Underflow Vector (Handler) */
406
407 .align 64
408.global _WindowUnderflow8
409_WindowUnderflow8:
410 l32e a1, a9, -12
411 l32e a0, a9, -16
412 l32e a7, a1, -12
413 l32e a2, a9, -8
414 l32e a4, a7, -32
415 l32e a3, a9, -4
416 l32e a5, a7, -28
417 l32e a6, a7, -24
418 l32e a7, a7, -20
419 rfwu
420
421
422/* 12-Register Window Overflow Vector (Handler) */
423
424 .align 64
425.global _WindowOverflow12
426_WindowOverflow12:
427 s32e a0, a13, -16
428 l32e a0, a1, -12
429 s32e a1, a13, -12
430 s32e a2, a13, -8
431 s32e a3, a13, -4
432 s32e a4, a0, -48
433 s32e a5, a0, -44
434 s32e a6, a0, -40
435 s32e a7, a0, -36
436 s32e a8, a0, -32
437 s32e a9, a0, -28
438 s32e a10, a0, -24
439 s32e a11, a0, -20
440 rfwo
441
442/* 12-Register Window Underflow Vector (Handler) */
443
444 .align 64
445.global _WindowUnderflow12
446_WindowUnderflow12:
447 l32e a1, a13, -12
448 l32e a0, a13, -16
449 l32e a11, a1, -12
450 l32e a2, a13, -8
451 l32e a4, a11, -48
452 l32e a8, a11, -32
453 l32e a3, a13, -4
454 l32e a5, a11, -44
455 l32e a6, a11, -40
456 l32e a7, a11, -36
457 l32e a9, a11, -28
458 l32e a10, a11, -24
459 l32e a11, a11, -20
460 rfwu
461
462 .text
463
464
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..476b2b53cd01
--- /dev/null
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -0,0 +1,341 @@
1/*
2 * arch/xtensa/kernel/vmlinux.lds.S
3 *
4 * Xtensa linker script
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
14 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
15 */
16
17#include <asm-generic/vmlinux.lds.h>
18
19#include <linux/config.h>
20#define _NOCLANGUAGE
21#include <xtensa/config/core.h>
22#include <xtensa/config/system.h>
23OUTPUT_ARCH(xtensa)
24ENTRY(_start)
25
26#if XCHAL_MEMORY_ORDER == XTHAL_BIGENDIAN
27jiffies = jiffies_64 + 4;
28#else
29jiffies = jiffies_64;
30#endif
31
32#define KERNELOFFSET 0x1000
33
34/* Note: In the following macros, it would be nice to specify only the
35 vector name and section kind and construct "sym" and "section" using
36 CPP concatenation, but that does not work reliably. Concatenating a
37 string with "." produces an invalid token. CPP will not print a
38 warning because it thinks this is an assembly file, but it leaves
39 them as multiple tokens and there may or may not be whitespace
40 between them. */
41
42/* Macro for a relocation entry */
43
44#define RELOCATE_ENTRY(sym, section) \
45 LONG(sym ## _start); \
46 LONG(sym ## _end); \
47 LONG(LOADADDR(section))
48
49/* Macro to define a section for a vector.
50 *
51 * Use of the MIN function catches the types of errors illustrated in
52 * the following example:
53 *
54 * Assume the section .DoubleExceptionVector.literal is completely
55 * full. Then a programmer adds code to .DoubleExceptionVector.text
56 * that produces another literal. The final literal position will
57 * overlay onto the first word of the adjacent code section
58 * .DoubleExceptionVector.text. (In practice, the literals will
59 * overwrite the code, and the first few instructions will be
60 * garbage.)
61 */
62
63#define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \
64 section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \
65 LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
66 { \
67 . = ALIGN(4); \
68 sym ## _start = ABSOLUTE(.); \
69 *(section) \
70 sym ## _end = ABSOLUTE(.); \
71 }
72
73/*
74 * Mapping of input sections to output sections when linking.
75 */
76
77SECTIONS
78{
79 . = XCHAL_KSEG_CACHED_VADDR + KERNELOFFSET;
80 /* .text section */
81
82 _text = .;
83 _stext = .;
84 _ftext = .;
85
86 .text :
87 {
88 /* The .head.text section must be the first section! */
89 *(.head.text)
90 *(.literal .text)
91 *(.srom.text)
92 VMLINUX_SYMBOL(__sched_text_start) = .;
93 *(.sched.text.literal .sched.text)
94 VMLINUX_SYMBOL(__sched_text_end) = .;
95 VMLINUX_SYMBOL(__lock_text_start) = .;
96 *(.spinlock.text.literal .spinlock.text)
97 VMLINUX_SYMBOL(__lock_text_end) = .;
98
99 }
100 _etext = .;
101
102 . = ALIGN(16);
103
104 RODATA
105
106 /* Relocation table */
107
108 . = ALIGN(16);
109 __boot_reloc_table_start = ABSOLUTE(.);
110
111 __relocate : {
112
113 RELOCATE_ENTRY(_WindowVectors_text,
114 .WindowVectors.text);
115#if 0
116 RELOCATE_ENTRY(_KernelExceptionVector_literal,
117 .KernelExceptionVector.literal);
118#endif
119 RELOCATE_ENTRY(_KernelExceptionVector_text,
120 .KernelExceptionVector.text);
121#if 0
122 RELOCATE_ENTRY(_UserExceptionVector_literal,
123 .UserExceptionVector.literal);
124#endif
125 RELOCATE_ENTRY(_UserExceptionVector_text,
126 .UserExceptionVector.text);
127 RELOCATE_ENTRY(_DoubleExceptionVector_literal,
128 .DoubleExceptionVector.literal);
129 RELOCATE_ENTRY(_DoubleExceptionVector_text,
130 .DoubleExceptionVector.text);
131 }
132 __boot_reloc_table_end = ABSOLUTE(.) ;
133
134 .fixup : { *(.fixup) }
135
136 . = ALIGN(16);
137
138 __ex_table : {
139 __start___ex_table = .;
140 *(__ex_table)
141 __stop___ex_table = .;
142 }
143
144 /* Data section */
145
146 . = ALIGN(XCHAL_ICACHE_LINESIZE);
147 _fdata = .;
148 .data :
149 {
150 *(.data) CONSTRUCTORS
151 . = ALIGN(XCHAL_ICACHE_LINESIZE);
152 *(.data.cacheline_aligned)
153 }
154
155 _edata = .;
156
157 /* The initial task */
158 . = ALIGN(8192);
159 .data.init_task : { *(.data.init_task) }
160
161 /* Initialization code and data: */
162
163 . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
164 __init_begin = .;
165 .init.text : {
166 _sinittext = .;
167 *(.init.text.literal) *(.init.text)
168 _einittext = .;
169 }
170
171 .init.data :
172 {
173 *(.init.data)
174 . = ALIGN(0x4);
175 __tagtable_begin = .;
176 *(.taglist)
177 __tagtable_end = .;
178 }
179
180 . = ALIGN(XCHAL_ICACHE_LINESIZE);
181
182 __setup_start = .;
183 .init.setup : { *(.init.setup) }
184 __setup_end = .;
185
186 __initcall_start = .;
187 .initcall.init : {
188 *(.initcall1.init)
189 *(.initcall2.init)
190 *(.initcall3.init)
191 *(.initcall4.init)
192 *(.initcall5.init)
193 *(.initcall6.init)
194 *(.initcall7.init)
195 }
196 __initcall_end = .;
197
198 __con_initcall_start = .;
199 .con_initcall.init : { *(.con_initcall.init) }
200 __con_initcall_end = .;
201
202 SECURITY_INIT
203
204 . = ALIGN(4);
205
206 __start___ftr_fixup = .;
207 __ftr_fixup : { *(__ftr_fixup) }
208 __stop___ftr_fixup = .;
209
210 . = ALIGN(32);
211 __per_cpu_start = .;
212 .data.percpu : { *(.data.percpu) }
213 __per_cpu_end = .;
214
215 . = ALIGN(4096);
216 __initramfs_start =.;
217 .init.ramfs : { *(.init.ramfs) }
218 __initramfs_end = .;
219
220 /* We need this dummy segment here */
221
222 . = ALIGN(4);
223 .dummy : { LONG(0) }
224
225 /* The vectors are relocated to the real position at startup time */
226
227 SECTION_VECTOR (_WindowVectors_text,
228 .WindowVectors.text,
229 XCHAL_WINDOW_VECTORS_VADDR, 4,
230 .dummy)
231 SECTION_VECTOR (_DebugInterruptVector_literal,
232 .DebugInterruptVector.literal,
233 XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL) - 4,
234 SIZEOF(.WindowVectors.text),
235 .WindowVectors.text)
236 SECTION_VECTOR (_DebugInterruptVector_text,
237 .DebugInterruptVector.text,
238 XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL),
239 4,
240 .DebugInterruptVector.literal)
241 SECTION_VECTOR (_KernelExceptionVector_literal,
242 .KernelExceptionVector.literal,
243 XCHAL_KERNELEXC_VECTOR_VADDR - 4,
244 SIZEOF(.DebugInterruptVector.text),
245 .DebugInterruptVector.text)
246 SECTION_VECTOR (_KernelExceptionVector_text,
247 .KernelExceptionVector.text,
248 XCHAL_KERNELEXC_VECTOR_VADDR,
249 4,
250 .KernelExceptionVector.literal)
251 SECTION_VECTOR (_UserExceptionVector_literal,
252 .UserExceptionVector.literal,
253 XCHAL_USEREXC_VECTOR_VADDR - 4,
254 SIZEOF(.KernelExceptionVector.text),
255 .KernelExceptionVector.text)
256 SECTION_VECTOR (_UserExceptionVector_text,
257 .UserExceptionVector.text,
258 XCHAL_USEREXC_VECTOR_VADDR,
259 4,
260 .UserExceptionVector.literal)
261 SECTION_VECTOR (_DoubleExceptionVector_literal,
262 .DoubleExceptionVector.literal,
263 XCHAL_DOUBLEEXC_VECTOR_VADDR - 16,
264 SIZEOF(.UserExceptionVector.text),
265 .UserExceptionVector.text)
266 SECTION_VECTOR (_DoubleExceptionVector_text,
267 .DoubleExceptionVector.text,
268 XCHAL_DOUBLEEXC_VECTOR_VADDR,
269 32,
270 .DoubleExceptionVector.literal)
271
272 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
273 . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
274
275 __init_end = .;
276
277 . = ALIGN(8192);
278
279 /* BSS section */
280 _bss_start = .;
281 .sbss : { *(.sbss) *(.scommon) }
282 .bss : { *(COMMON) *(.bss) }
283 _bss_end = .;
284 _end = .;
285
286 /* only used by the boot loader */
287
288 . = ALIGN(0x10);
289 .bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) }
290
291 . = ALIGN(0x1000);
292 __initrd_start = .;
293 .initrd : { *(.initrd) }
294 __initrd_end = .;
295
296 .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
297 {
298 *(.ResetVector.text)
299 }
300
301
302 /* Sections to be discarded */
303 /DISCARD/ :
304 {
305 *(.text.exit)
306 *(.text.exit.literal)
307 *(.data.exit)
308 *(.exitcall.exit)
309 }
310
311
312 .debug 0 : { *(.debug) }
313 .line 0 : { *(.line) }
314 .debug_srcinfo 0 : { *(.debug_srcinfo) }
315 .debug_sfnames 0 : { *(.debug_sfnames) }
316 .debug_aranges 0 : { *(.debug_aranges) }
317 .debug_pubnames 0 : { *(.debug_pubnames) }
318 .debug_info 0 : { *(.debug_info) }
319 .debug_abbrev 0 : { *(.debug_abbrev) }
320 .debug_line 0 : { *(.debug_line) }
321 .debug_frame 0 : { *(.debug_frame) }
322 .debug_str 0 : { *(.debug_str) }
323 .debug_loc 0 : { *(.debug_loc) }
324 .debug_macinfo 0 : { *(.debug_macinfo) }
325 .debug_weaknames 0 : { *(.debug_weaknames) }
326 .debug_funcnames 0 : { *(.debug_funcnames) }
327 .debug_typenames 0 : { *(.debug_typenames) }
328 .debug_varnames 0 : { *(.debug_varnames) }
329
330 .xt.insn 0 :
331 {
332 *(.xt.insn)
333 *(.gnu.linkonce.x*)
334 }
335
336 .xt.lit 0 :
337 {
338 *(.xt.lit)
339 *(.gnu.linkonce.p*)
340 }
341}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
new file mode 100644
index 000000000000..efae56a51475
--- /dev/null
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -0,0 +1,123 @@
1/*
2 * arch/xtensa/kernel/xtensa_ksyms.c
3 *
4 * Export Xtensa-specific functions for loadable modules.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Joe Taylor <joe@tensilica.com>
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/interrupt.h>
20#include <asm/irq.h>
21#include <linux/in6.h>
22#include <linux/pci.h>
23#include <linux/ide.h>
24
25#include <asm/uaccess.h>
26#include <asm/checksum.h>
27#include <asm/dma.h>
28#include <asm/io.h>
29#include <asm/page.h>
30#include <asm/pgalloc.h>
31#include <asm/semaphore.h>
32#ifdef CONFIG_BLK_DEV_FD
33#include <asm/floppy.h>
34#endif
35#ifdef CONFIG_NET
36#include <net/checksum.h>
37#endif /* CONFIG_NET */
38
39
40/*
41 * String functions
42 */
43EXPORT_SYMBOL(memcmp);
44EXPORT_SYMBOL(memset);
45EXPORT_SYMBOL(memcpy);
46EXPORT_SYMBOL(memmove);
47EXPORT_SYMBOL(memchr);
48EXPORT_SYMBOL(strcat);
49EXPORT_SYMBOL(strchr);
50EXPORT_SYMBOL(strlen);
51EXPORT_SYMBOL(strpbrk);
52EXPORT_SYMBOL(strncat);
53EXPORT_SYMBOL(strnlen);
54EXPORT_SYMBOL(strrchr);
55EXPORT_SYMBOL(strstr);
56
57EXPORT_SYMBOL(enable_irq);
58EXPORT_SYMBOL(disable_irq);
59EXPORT_SYMBOL(kernel_thread);
60
61/*
62 * gcc internal math functions
63 */
64extern long long __ashrdi3(long long, int);
65extern long long __ashldi3(long long, int);
66extern long long __lshrdi3(long long, int);
67extern int __divsi3(int, int);
68extern int __modsi3(int, int);
69extern long long __muldi3(long long, long long);
70extern int __mulsi3(int, int);
71extern unsigned int __udivsi3(unsigned int, unsigned int);
72extern unsigned int __umodsi3(unsigned int, unsigned int);
73extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
74extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
75
76EXPORT_SYMBOL(__ashldi3);
77EXPORT_SYMBOL(__ashrdi3);
78EXPORT_SYMBOL(__lshrdi3);
79EXPORT_SYMBOL(__divsi3);
80EXPORT_SYMBOL(__modsi3);
81EXPORT_SYMBOL(__muldi3);
82EXPORT_SYMBOL(__mulsi3);
83EXPORT_SYMBOL(__udivsi3);
84EXPORT_SYMBOL(__umodsi3);
85EXPORT_SYMBOL(__udivdi3);
86EXPORT_SYMBOL(__umoddi3);
87
88/*
89 * Semaphore operations
90 */
91EXPORT_SYMBOL(__down);
92EXPORT_SYMBOL(__down_interruptible);
93EXPORT_SYMBOL(__down_trylock);
94EXPORT_SYMBOL(__up);
95
96#ifdef CONFIG_NET
97/*
98 * Networking support
99 */
100EXPORT_SYMBOL(csum_partial_copy_generic);
101#endif /* CONFIG_NET */
102
103/*
104 * Architecture-specific symbols
105 */
106EXPORT_SYMBOL(__xtensa_copy_user);
107
108/*
109 * Kernel hacking ...
110 */
111
112#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
113// FIXME EXPORT_SYMBOL(screen_info);
114#endif
115
116EXPORT_SYMBOL(get_wchan);
117
118EXPORT_SYMBOL(outsb);
119EXPORT_SYMBOL(outsw);
120EXPORT_SYMBOL(outsl);
121EXPORT_SYMBOL(insb);
122EXPORT_SYMBOL(insw);
123EXPORT_SYMBOL(insl);
diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile
new file mode 100644
index 000000000000..ed935b58e8a4
--- /dev/null
+++ b/arch/xtensa/lib/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for Xtensa-specific library files.
3#
4
5lib-y += memcopy.o memset.o checksum.o strcasecmp.o \
6 usercopy.o strncpy_user.o strnlen_user.o
7lib-$(CONFIG_PCI) += pci-auto.o
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
new file mode 100644
index 000000000000..e2d64dfd530c
--- /dev/null
+++ b/arch/xtensa/lib/checksum.S
@@ -0,0 +1,410 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IP/TCP/UDP checksumming routines
7 *
8 * Xtensa version: Copyright (C) 2001 Tensilica, Inc. by Kevin Chea
9 * Optimized by Joe Taylor
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <asm/errno.h>
18#include <linux/linkage.h>
19#define _ASMLANGUAGE
20#include <xtensa/config/core.h>
21
22/*
23 * computes a partial checksum, e.g. for TCP/UDP fragments
24 */
25
26/*
27 * unsigned int csum_partial(const unsigned char *buf, int len,
28 * unsigned int sum);
29 * a2 = buf
30 * a3 = len
31 * a4 = sum
32 *
33 * This function assumes 2- or 4-byte alignment. Other alignments will fail!
34 */
35
36/* ONES_ADD converts twos-complement math to ones-complement. */
37#define ONES_ADD(sum, val) \
38 add sum, sum, val ; \
39 bgeu sum, val, 99f ; \
40 addi sum, sum, 1 ; \
4199: ;
42
43.text
44ENTRY(csum_partial)
45 /*
46 * Experiments with Ethernet and SLIP connections show that buf
47 * is aligned on either a 2-byte or 4-byte boundary.
48 */
49 entry sp, 32
50 extui a5, a2, 0, 2
51 bnez a5, 8f /* branch if 2-byte aligned */
52 /* Fall-through on common case, 4-byte alignment */
531:
54 srli a5, a3, 5 /* 32-byte chunks */
55#if XCHAL_HAVE_LOOPS
56 loopgtz a5, 2f
57#else
58 beqz a5, 2f
59 slli a5, a5, 5
60 add a5, a5, a2 /* a5 = end of last 32-byte chunk */
61.Loop1:
62#endif
63 l32i a6, a2, 0
64 l32i a7, a2, 4
65 ONES_ADD(a4, a6)
66 ONES_ADD(a4, a7)
67 l32i a6, a2, 8
68 l32i a7, a2, 12
69 ONES_ADD(a4, a6)
70 ONES_ADD(a4, a7)
71 l32i a6, a2, 16
72 l32i a7, a2, 20
73 ONES_ADD(a4, a6)
74 ONES_ADD(a4, a7)
75 l32i a6, a2, 24
76 l32i a7, a2, 28
77 ONES_ADD(a4, a6)
78 ONES_ADD(a4, a7)
79 addi a2, a2, 4*8
80#if !XCHAL_HAVE_LOOPS
81 blt a2, a5, .Loop1
82#endif
832:
84 extui a5, a3, 2, 3 /* remaining 4-byte chunks */
85#if XCHAL_HAVE_LOOPS
86 loopgtz a5, 3f
87#else
88 beqz a5, 3f
89 slli a5, a5, 2
90 add a5, a5, a2 /* a5 = end of last 4-byte chunk */
91.Loop2:
92#endif
93 l32i a6, a2, 0
94 ONES_ADD(a4, a6)
95 addi a2, a2, 4
96#if !XCHAL_HAVE_LOOPS
97 blt a2, a5, .Loop2
98#endif
993:
100 _bbci.l a3, 1, 5f /* remaining 2-byte chunk */
101 l16ui a6, a2, 0
102 ONES_ADD(a4, a6)
103 addi a2, a2, 2
1045:
105 _bbci.l a3, 0, 7f /* remaining 1-byte chunk */
1066: l8ui a6, a2, 0
107#ifdef __XTENSA_EB__
108 slli a6, a6, 8 /* load byte into bits 8..15 */
109#endif
110 ONES_ADD(a4, a6)
1117:
112 mov a2, a4
113 retw
114
115 /* uncommon case, buf is 2-byte aligned */
1168:
117 beqz a3, 7b /* branch if len == 0 */
118 beqi a3, 1, 6b /* branch if len == 1 */
119
120 extui a5, a2, 0, 1
121 bnez a5, 8f /* branch if 1-byte aligned */
122
123 l16ui a6, a2, 0 /* common case, len >= 2 */
124 ONES_ADD(a4, a6)
125 addi a2, a2, 2 /* adjust buf */
126 addi a3, a3, -2 /* adjust len */
127 j 1b /* now buf is 4-byte aligned */
128
129 /* case: odd-byte aligned, len > 1
130 * This case is dog slow, so don't give us an odd address.
131 * (I don't think this ever happens, but just in case.)
132 */
1338:
134 srli a5, a3, 2 /* 4-byte chunks */
135#if XCHAL_HAVE_LOOPS
136 loopgtz a5, 2f
137#else
138 beqz a5, 2f
139 slli a5, a5, 2
140 add a5, a5, a2 /* a5 = end of last 4-byte chunk */
141.Loop3:
142#endif
143 l8ui a6, a2, 0 /* bits 24..31 */
144 l16ui a7, a2, 1 /* bits 8..23 */
145 l8ui a8, a2, 3 /* bits 0.. 8 */
146#ifdef __XTENSA_EB__
147 slli a6, a6, 24
148#else
149 slli a8, a8, 24
150#endif
151 slli a7, a7, 8
152 or a7, a7, a6
153 or a7, a7, a8
154 ONES_ADD(a4, a7)
155 addi a2, a2, 4
156#if !XCHAL_HAVE_LOOPS
157 blt a2, a5, .Loop3
158#endif
1592:
160 _bbci.l a3, 1, 3f /* remaining 2-byte chunk, still odd addr */
161 l8ui a6, a2, 0
162 l8ui a7, a2, 1
163#ifdef __XTENSA_EB__
164 slli a6, a6, 8
165#else
166 slli a7, a7, 8
167#endif
168 or a7, a7, a6
169 ONES_ADD(a4, a7)
170 addi a2, a2, 2
1713:
172 j 5b /* branch to handle the remaining byte */
173
174
175
176/*
177 * Copy from ds while checksumming, otherwise like csum_partial
178 *
179 * The macros SRC and DST specify the type of access for the instruction.
180 * thus we can call a custom exception handler for each access type.
181 */
182
183#define SRC(y...) \
184 9999: y; \
185 .section __ex_table, "a"; \
186 .long 9999b, 6001f ; \
187 .previous
188
189#define DST(y...) \
190 9999: y; \
191 .section __ex_table, "a"; \
192 .long 9999b, 6002f ; \
193 .previous
194
195/*
196unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
197 int sum, int *src_err_ptr, int *dst_err_ptr)
198 a2 = src
199 a3 = dst
200 a4 = len
201 a5 = sum
202 a6 = src_err_ptr
203 a7 = dst_err_ptr
204 a8 = temp
205 a9 = temp
206 a10 = temp
207 a11 = original len for exception handling
208 a12 = original dst for exception handling
209
210 This function is optimized for 4-byte aligned addresses. Other
211 alignments work, but not nearly as efficiently.
212 */
213
214ENTRY(csum_partial_copy_generic)
215 entry sp, 32
216 mov a12, a3
217 mov a11, a4
218 or a10, a2, a3
219
220 /* We optimize the following alignment tests for the 4-byte
221 aligned case. Two bbsi.l instructions might seem more optimal
222 (commented out below). However, both labels 5: and 3: are out
223 of the imm8 range, so the assembler relaxes them into
224 equivalent bbci.l, j combinations, which is actually
225 slower. */
226
227 extui a9, a10, 0, 2
228 beqz a9, 1f /* branch if both are 4-byte aligned */
229 bbsi.l a10, 0, 5f /* branch if one address is odd */
230 j 3f /* one address is 2-byte aligned */
231
232/* _bbsi.l a10, 0, 5f */ /* branch if odd address */
233/* _bbsi.l a10, 1, 3f */ /* branch if 2-byte-aligned address */
234
2351:
236 /* src and dst are both 4-byte aligned */
237 srli a10, a4, 5 /* 32-byte chunks */
238#if XCHAL_HAVE_LOOPS
239 loopgtz a10, 2f
240#else
241 beqz a10, 2f
242 slli a10, a10, 5
243 add a10, a10, a2 /* a10 = end of last 32-byte src chunk */
244.Loop5:
245#endif
246SRC( l32i a9, a2, 0 )
247SRC( l32i a8, a2, 4 )
248DST( s32i a9, a3, 0 )
249DST( s32i a8, a3, 4 )
250 ONES_ADD(a5, a9)
251 ONES_ADD(a5, a8)
252SRC( l32i a9, a2, 8 )
253SRC( l32i a8, a2, 12 )
254DST( s32i a9, a3, 8 )
255DST( s32i a8, a3, 12 )
256 ONES_ADD(a5, a9)
257 ONES_ADD(a5, a8)
258SRC( l32i a9, a2, 16 )
259SRC( l32i a8, a2, 20 )
260DST( s32i a9, a3, 16 )
261DST( s32i a8, a3, 20 )
262 ONES_ADD(a5, a9)
263 ONES_ADD(a5, a8)
264SRC( l32i a9, a2, 24 )
265SRC( l32i a8, a2, 28 )
266DST( s32i a9, a3, 24 )
267DST( s32i a8, a3, 28 )
268 ONES_ADD(a5, a9)
269 ONES_ADD(a5, a8)
270 addi a2, a2, 32
271 addi a3, a3, 32
272#if !XCHAL_HAVE_LOOPS
273 blt a2, a10, .Loop5
274#endif
2752:
276 extui a10, a4, 2, 3 /* remaining 4-byte chunks */
277 extui a4, a4, 0, 2 /* reset len for general-case, 2-byte chunks */
278#if XCHAL_HAVE_LOOPS
279 loopgtz a10, 3f
280#else
281 beqz a10, 3f
282 slli a10, a10, 2
283 add a10, a10, a2 /* a10 = end of last 4-byte src chunk */
284.Loop6:
285#endif
286SRC( l32i a9, a2, 0 )
287DST( s32i a9, a3, 0 )
288 ONES_ADD(a5, a9)
289 addi a2, a2, 4
290 addi a3, a3, 4
291#if !XCHAL_HAVE_LOOPS
292 blt a2, a10, .Loop6
293#endif
2943:
295 /*
296 Control comes to here in two cases: (1) It may fall through
297 to here from the 4-byte alignment case to process, at most,
298 one 2-byte chunk. (2) It branches to here from above if
299 either src or dst is 2-byte aligned, and we process all bytes
300 here, except for perhaps a trailing odd byte. It's
301 inefficient, so align your addresses to 4-byte boundaries.
302
303 a2 = src
304 a3 = dst
305 a4 = len
306 a5 = sum
307 */
308 srli a10, a4, 1 /* 2-byte chunks */
309#if XCHAL_HAVE_LOOPS
310 loopgtz a10, 4f
311#else
312 beqz a10, 4f
313 slli a10, a10, 1
314 add a10, a10, a2 /* a10 = end of last 2-byte src chunk */
315.Loop7:
316#endif
317SRC( l16ui a9, a2, 0 )
318DST( s16i a9, a3, 0 )
319 ONES_ADD(a5, a9)
320 addi a2, a2, 2
321 addi a3, a3, 2
322#if !XCHAL_HAVE_LOOPS
323 blt a2, a10, .Loop7
324#endif
3254:
326 /* This section processes a possible trailing odd byte. */
327 _bbci.l a4, 0, 8f /* 1-byte chunk */
328SRC( l8ui a9, a2, 0 )
329DST( s8i a9, a3, 0 )
330#ifdef __XTENSA_EB__
331 slli a9, a9, 8 /* shift byte to bits 8..15 */
332#endif
333 ONES_ADD(a5, a9)
3348:
335 mov a2, a5
336 retw
337
3385:
339 /* Control branch to here when either src or dst is odd. We
340 process all bytes using 8-bit accesses. Grossly inefficient,
341 so don't feed us an odd address. */
342
343 srli a10, a4, 1 /* handle in pairs for 16-bit csum */
344#if XCHAL_HAVE_LOOPS
345 loopgtz a10, 6f
346#else
347 beqz a10, 6f
348 slli a10, a10, 1
349 add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */
350.Loop8:
351#endif
352SRC( l8ui a9, a2, 0 )
353SRC( l8ui a8, a2, 1 )
354DST( s8i a9, a3, 0 )
355DST( s8i a8, a3, 1 )
356#ifdef __XTENSA_EB__
357 slli a9, a9, 8 /* combine into a single 16-bit value */
358#else /* for checksum computation */
359 slli a8, a8, 8
360#endif
361 or a9, a9, a8
362 ONES_ADD(a5, a9)
363 addi a2, a2, 2
364 addi a3, a3, 2
365#if !XCHAL_HAVE_LOOPS
366 blt a2, a10, .Loop8
367#endif
3686:
369 j 4b /* process the possible trailing odd byte */
370
371
372# Exception handler:
373.section .fixup, "ax"
374/*
375 a6 = src_err_ptr
376 a7 = dst_err_ptr
377 a11 = original len for exception handling
378 a12 = original dst for exception handling
379*/
380
3816001:
382 _movi a2, -EFAULT
383 s32i a2, a6, 0 /* src_err_ptr */
384
385 # clear the complete destination - computing the rest
386 # is too much work
387 movi a2, 0
388#if XCHAL_HAVE_LOOPS
389 loopgtz a11, 2f
390#else
391 beqz a11, 2f
392 add a11, a11, a12 /* a11 = ending address */
393.Leloop:
394#endif
395 s8i a2, a12, 0
396 addi a12, a12, 1
397#if !XCHAL_HAVE_LOOPS
398 blt a12, a11, .Leloop
399#endif
4002:
401 retw
402
4036002:
404 movi a2, -EFAULT
405 s32i a2, a7, 0 /* dst_err_ptr */
406 movi a2, 0
407 retw
408
409.previous
410
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
new file mode 100644
index 000000000000..e8f6d7eb7222
--- /dev/null
+++ b/arch/xtensa/lib/memcopy.S
@@ -0,0 +1,315 @@
1/*
2 * arch/xtensa/lib/hal/memcopy.S -- Core HAL library functions
3 * xthal_memcpy and xthal_bcopy
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 2002 - 2005 Tensilica Inc.
10 */
11
12#include <xtensa/coreasm.h>
13
14 .macro src_b r, w0, w1
15#ifdef __XTENSA_EB__
16 src \r, \w0, \w1
17#else
18 src \r, \w1, \w0
19#endif
20 .endm
21
22 .macro ssa8 r
23#ifdef __XTENSA_EB__
24 ssa8b \r
25#else
26 ssa8l \r
27#endif
28 .endm
29
30
31/*
32 * void *memcpy(void *dst, const void *src, size_t len);
33 * void *memmove(void *dst, const void *src, size_t len);
34 * void *bcopy(const void *src, void *dst, size_t len);
35 *
36 * This function is intended to do the same thing as the standard
37 * library function memcpy() (or bcopy()) for most cases.
38 * However, where the source and/or destination references
39 * an instruction RAM or ROM or a data RAM or ROM, that
40 * source and/or destination will always be accessed with
41 * 32-bit load and store instructions (as required for these
42 * types of devices).
43 *
44 * !!!!!!! XTFIXME:
45 * !!!!!!! Handling of IRAM/IROM has not yet
46 * !!!!!!! been implemented.
47 *
48 * The bcopy version is provided here to avoid the overhead
49 * of an extra call, for callers that require this convention.
50 *
51 * The (general case) algorithm is as follows:
52 * If destination is unaligned, align it by conditionally
53 * copying 1 and 2 bytes.
54 * If source is aligned,
55 * do 16 bytes with a loop, and then finish up with
56 * 8, 4, 2, and 1 byte copies conditional on the length;
57 * else (if source is unaligned),
58 * do the same, but use SRC to align the source data.
59 * This code tries to use fall-through branches for the common
60 * case of aligned source and destination and multiple
61 * of 4 (or 8) length.
62 *
63 * Register use:
64 * a0/ return address
65 * a1/ stack pointer
66 * a2/ return value
67 * a3/ src
68 * a4/ length
69 * a5/ dst
70 * a6/ tmp
71 * a7/ tmp
72 * a8/ tmp
73 * a9/ tmp
74 * a10/ tmp
75 * a11/ tmp
76 */
77
78 .text
79 .align 4
80 .global bcopy
81 .type bcopy,@function
82bcopy:
83 entry sp, 16 # minimal stack frame
84 # a2=src, a3=dst, a4=len
85 mov a5, a3 # copy dst so that a2 is return value
86 mov a3, a2
87 mov a2, a5
88 j .Lcommon # go to common code for memcpy+bcopy
89
90
91/*
92 * Byte by byte copy
93 */
94 .align 4
95 .byte 0 # 1 mod 4 alignment for LOOPNEZ
96 # (0 mod 4 alignment for LBEG)
97.Lbytecopy:
98#if XCHAL_HAVE_LOOPS
99 loopnez a4, .Lbytecopydone
100#else /* !XCHAL_HAVE_LOOPS */
101 beqz a4, .Lbytecopydone
102 add a7, a3, a4 # a7 = end address for source
103#endif /* !XCHAL_HAVE_LOOPS */
104.Lnextbyte:
105 l8ui a6, a3, 0
106 addi a3, a3, 1
107 s8i a6, a5, 0
108 addi a5, a5, 1
109#if !XCHAL_HAVE_LOOPS
110 blt a3, a7, .Lnextbyte
111#endif /* !XCHAL_HAVE_LOOPS */
112.Lbytecopydone:
113 retw
114
115/*
116 * Destination is unaligned
117 */
118
119 .align 4
120.Ldst1mod2: # dst is only byte aligned
121 _bltui a4, 7, .Lbytecopy # do short copies byte by byte
122
123 # copy 1 byte
124 l8ui a6, a3, 0
125 addi a3, a3, 1
126 addi a4, a4, -1
127 s8i a6, a5, 0
128 addi a5, a5, 1
129 _bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
130 # return to main algorithm
131.Ldst2mod4: # dst 16-bit aligned
132 # copy 2 bytes
133 _bltui a4, 6, .Lbytecopy # do short copies byte by byte
134 l8ui a6, a3, 0
135 l8ui a7, a3, 1
136 addi a3, a3, 2
137 addi a4, a4, -2
138 s8i a6, a5, 0
139 s8i a7, a5, 1
140 addi a5, a5, 2
141 j .Ldstaligned # dst is now aligned, return to main algorithm
142
143 .align 4
144 .global memcpy
145 .type memcpy,@function
146memcpy:
147 .global memmove
148 .type memmove,@function
149memmove:
150
151 entry sp, 16 # minimal stack frame
152 # a2/ dst, a3/ src, a4/ len
153 mov a5, a2 # copy dst so that a2 is return value
154.Lcommon:
155 _bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2
156 _bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4
157.Ldstaligned: # return here from .Ldst?mod? once dst is aligned
158 srli a7, a4, 4 # number of loop iterations with 16B
159 # per iteration
160 movi a8, 3 # if source is not aligned,
161 _bany a3, a8, .Lsrcunaligned # then use shifting copy
162 /*
163 * Destination and source are word-aligned, use word copy.
164 */
165 # copy 16 bytes per iteration for word-aligned dst and word-aligned src
166#if XCHAL_HAVE_LOOPS
167 loopnez a7, .Loop1done
168#else /* !XCHAL_HAVE_LOOPS */
169 beqz a7, .Loop1done
170 slli a8, a7, 4
171 add a8, a8, a3 # a8 = end of last 16B source chunk
172#endif /* !XCHAL_HAVE_LOOPS */
173.Loop1:
174 l32i a6, a3, 0
175 l32i a7, a3, 4
176 s32i a6, a5, 0
177 l32i a6, a3, 8
178 s32i a7, a5, 4
179 l32i a7, a3, 12
180 s32i a6, a5, 8
181 addi a3, a3, 16
182 s32i a7, a5, 12
183 addi a5, a5, 16
184#if !XCHAL_HAVE_LOOPS
185 blt a3, a8, .Loop1
186#endif /* !XCHAL_HAVE_LOOPS */
187.Loop1done:
188 bbci.l a4, 3, .L2
189 # copy 8 bytes
190 l32i a6, a3, 0
191 l32i a7, a3, 4
192 addi a3, a3, 8
193 s32i a6, a5, 0
194 s32i a7, a5, 4
195 addi a5, a5, 8
196.L2:
197 bbsi.l a4, 2, .L3
198 bbsi.l a4, 1, .L4
199 bbsi.l a4, 0, .L5
200 retw
201.L3:
202 # copy 4 bytes
203 l32i a6, a3, 0
204 addi a3, a3, 4
205 s32i a6, a5, 0
206 addi a5, a5, 4
207 bbsi.l a4, 1, .L4
208 bbsi.l a4, 0, .L5
209 retw
210.L4:
211 # copy 2 bytes
212 l16ui a6, a3, 0
213 addi a3, a3, 2
214 s16i a6, a5, 0
215 addi a5, a5, 2
216 bbsi.l a4, 0, .L5
217 retw
218.L5:
219 # copy 1 byte
220 l8ui a6, a3, 0
221 s8i a6, a5, 0
222 retw
223
224/*
225 * Destination is aligned, Source is unaligned
226 */
227
228 .align 4
229.Lsrcunaligned:
230 _beqz a4, .Ldone # avoid loading anything for zero-length copies
231 # copy 16 bytes per iteration for word-aligned dst and unaligned src
232 ssa8 a3 # set shift amount from byte offset
233#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS (simulator) with the
234 lint or ferret client, or 0 to save a few cycles */
235#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
236 and a11, a3, a8 # save unalignment offset for below
237 sub a3, a3, a11 # align a3
238#endif
239 l32i a6, a3, 0 # load first word
240#if XCHAL_HAVE_LOOPS
241 loopnez a7, .Loop2done
242#else /* !XCHAL_HAVE_LOOPS */
243 beqz a7, .Loop2done
244 slli a10, a7, 4
245 add a10, a10, a3 # a10 = end of last 16B source chunk
246#endif /* !XCHAL_HAVE_LOOPS */
247.Loop2:
248 l32i a7, a3, 4
249 l32i a8, a3, 8
250 src_b a6, a6, a7
251 s32i a6, a5, 0
252 l32i a9, a3, 12
253 src_b a7, a7, a8
254 s32i a7, a5, 4
255 l32i a6, a3, 16
256 src_b a8, a8, a9
257 s32i a8, a5, 8
258 addi a3, a3, 16
259 src_b a9, a9, a6
260 s32i a9, a5, 12
261 addi a5, a5, 16
262#if !XCHAL_HAVE_LOOPS
263 blt a3, a10, .Loop2
264#endif /* !XCHAL_HAVE_LOOPS */
265.Loop2done:
266 bbci.l a4, 3, .L12
267 # copy 8 bytes
268 l32i a7, a3, 4
269 l32i a8, a3, 8
270 src_b a6, a6, a7
271 s32i a6, a5, 0
272 addi a3, a3, 8
273 src_b a7, a7, a8
274 s32i a7, a5, 4
275 addi a5, a5, 8
276 mov a6, a8
277.L12:
278 bbci.l a4, 2, .L13
279 # copy 4 bytes
280 l32i a7, a3, 4
281 addi a3, a3, 4
282 src_b a6, a6, a7
283 s32i a6, a5, 0
284 addi a5, a5, 4
285 mov a6, a7
286.L13:
287#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
288 add a3, a3, a11 # readjust a3 with correct misalignment
289#endif
290 bbsi.l a4, 1, .L14
291 bbsi.l a4, 0, .L15
292.Ldone: retw
293.L14:
294 # copy 2 bytes
295 l8ui a6, a3, 0
296 l8ui a7, a3, 1
297 addi a3, a3, 2
298 s8i a6, a5, 0
299 s8i a7, a5, 1
300 addi a5, a5, 2
301 bbsi.l a4, 0, .L15
302 retw
303.L15:
304 # copy 1 byte
305 l8ui a6, a3, 0
306 s8i a6, a5, 0
307 retw
308
309/*
310 * Local Variables:
311 * mode:fundamental
312 * comment-start: "# "
313 * comment-start-skip: "# *"
314 * End:
315 */
diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S
new file mode 100644
index 000000000000..4de25134bc62
--- /dev/null
+++ b/arch/xtensa/lib/memset.S
@@ -0,0 +1,160 @@
1/*
2 * arch/xtensa/lib/memset.S
3 *
4 * ANSI C standard library function memset
5 * (Well, almost. .fixup code might return zero.)
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file "COPYING" in the main directory of
9 * this archive for more details.
10 *
11 * Copyright (C) 2002 Tensilica Inc.
12 */
13
14#include <xtensa/coreasm.h>
15
16/*
17 * void *memset(void *dst, int c, size_t length)
18 *
19 * The algorithm is as follows:
20 * Create a word with c in all byte positions
21 * If the destination is aligned,
22 * do 16B chucks with a loop, and then finish up with
23 * 8B, 4B, 2B, and 1B stores conditional on the length.
24 * If destination is unaligned, align it by conditionally
25 * setting 1B and 2B and then go to aligned case.
26 * This code tries to use fall-through branches for the common
27 * case of an aligned destination (except for the branches to
28 * the alignment labels).
29 */
30
31/* Load or store instructions that may cause exceptions use the EX macro. */
32
33#define EX(insn,reg1,reg2,offset,handler) \
349: insn reg1, reg2, offset; \
35 .section __ex_table, "a"; \
36 .word 9b, handler; \
37 .previous
38
39
40.text
41.align 4
42.global memset
43.type memset,@function
44memset:
45 entry sp, 16 # minimal stack frame
46 # a2/ dst, a3/ c, a4/ length
47 extui a3, a3, 0, 8 # mask to just 8 bits
48 slli a7, a3, 8 # duplicate character in all bytes of word
49 or a3, a3, a7 # ...
50 slli a7, a3, 16 # ...
51 or a3, a3, a7 # ...
52 mov a5, a2 # copy dst so that a2 is return value
53 movi a6, 3 # for alignment tests
54 bany a2, a6, .Ldstunaligned # if dst is unaligned
55.L0: # return here from .Ldstunaligned when dst is aligned
56 srli a7, a4, 4 # number of loop iterations with 16B
57 # per iteration
58 bnez a4, .Laligned
59 retw
60
61/*
62 * Destination is word-aligned.
63 */
64 # set 16 bytes per iteration for word-aligned dst
65 .align 4 # 1 mod 4 alignment for LOOPNEZ
66 .byte 0 # (0 mod 4 alignment for LBEG)
67.Laligned:
68#if XCHAL_HAVE_LOOPS
69 loopnez a7, .Loop1done
70#else /* !XCHAL_HAVE_LOOPS */
71 beqz a7, .Loop1done
72 slli a6, a7, 4
73 add a6, a6, a5 # a6 = end of last 16B chunk
74#endif /* !XCHAL_HAVE_LOOPS */
75.Loop1:
76 EX(s32i, a3, a5, 0, memset_fixup)
77 EX(s32i, a3, a5, 4, memset_fixup)
78 EX(s32i, a3, a5, 8, memset_fixup)
79 EX(s32i, a3, a5, 12, memset_fixup)
80 addi a5, a5, 16
81#if !XCHAL_HAVE_LOOPS
82 blt a5, a6, .Loop1
83#endif /* !XCHAL_HAVE_LOOPS */
84.Loop1done:
85 bbci.l a4, 3, .L2
86 # set 8 bytes
87 EX(s32i, a3, a5, 0, memset_fixup)
88 EX(s32i, a3, a5, 4, memset_fixup)
89 addi a5, a5, 8
90.L2:
91 bbci.l a4, 2, .L3
92 # set 4 bytes
93 EX(s32i, a3, a5, 0, memset_fixup)
94 addi a5, a5, 4
95.L3:
96 bbci.l a4, 1, .L4
97 # set 2 bytes
98 EX(s16i, a3, a5, 0, memset_fixup)
99 addi a5, a5, 2
100.L4:
101 bbci.l a4, 0, .L5
102 # set 1 byte
103 EX(s8i, a3, a5, 0, memset_fixup)
104.L5:
105.Lret1:
106 retw
107
108/*
109 * Destination is unaligned
110 */
111
112.Ldstunaligned:
113 bltui a4, 8, .Lbyteset # do short copies byte by byte
114 bbci.l a5, 0, .L20 # branch if dst alignment half-aligned
115 # dst is only byte aligned
116 # set 1 byte
117 EX(s8i, a3, a5, 0, memset_fixup)
118 addi a5, a5, 1
119 addi a4, a4, -1
120 # now retest if dst aligned
121 bbci.l a5, 1, .L0 # if now aligned, return to main algorithm
122.L20:
123 # dst half-aligned
124 # set 2 bytes
125 EX(s16i, a3, a5, 0, memset_fixup)
126 addi a5, a5, 2
127 addi a4, a4, -2
128 j .L0 # dst is now aligned, return to main algorithm
129
130/*
131 * Byte by byte set
132 */
133 .align 4
134 .byte 0 # 1 mod 4 alignment for LOOPNEZ
135 # (0 mod 4 alignment for LBEG)
136.Lbyteset:
137#if XCHAL_HAVE_LOOPS
138 loopnez a4, .Lbytesetdone
139#else /* !XCHAL_HAVE_LOOPS */
140 beqz a4, .Lbytesetdone
141 add a6, a5, a4 # a6 = ending address
142#endif /* !XCHAL_HAVE_LOOPS */
143.Lbyteloop:
144 EX(s8i, a3, a5, 0, memset_fixup)
145 addi a5, a5, 1
146#if !XCHAL_HAVE_LOOPS
147 blt a5, a6, .Lbyteloop
148#endif /* !XCHAL_HAVE_LOOPS */
149.Lbytesetdone:
150 retw
151
152
153 .section .fixup, "ax"
154 .align 4
155
156/* We return zero if a failure occurred. */
157
158memset_fixup:
159 movi a2, 0
160 retw
diff --git a/arch/xtensa/lib/pci-auto.c b/arch/xtensa/lib/pci-auto.c
new file mode 100644
index 000000000000..90c790f6123b
--- /dev/null
+++ b/arch/xtensa/lib/pci-auto.c
@@ -0,0 +1,352 @@
1/*
2 * arch/xtensa/kernel/pci-auto.c
3 *
4 * PCI autoconfiguration library
5 *
6 * Copyright (C) 2001 - 2005 Tensilica Inc.
7 *
8 * Chris Zankel <zankel@tensilica.com, cez@zankel.net>
9 *
10 * Based on work from Matt Porter <mporter@mvista.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/pci.h>
21
22#include <asm/pci-bridge.h>
23
24
25/*
26 *
27 * Setting up a PCI
28 *
29 * pci_ctrl->first_busno = <first bus number (0)>
30 * pci_ctrl->last_busno = <last bus number (0xff)>
31 * pci_ctrl->ops = <PCI config operations>
32 * pci_ctrl->map_irq = <function to return the interrupt number for a device>
33 *
34 * pci_ctrl->io_space.start = <IO space start address (PCI view)>
35 * pci_ctrl->io_space.end = <IO space end address (PCI view)>
36 * pci_ctrl->io_space.base = <IO space offset: address 0 from CPU space>
37 * pci_ctrl->mem_space.start = <MEM space start address (PCI view)>
38 * pci_ctrl->mem_space.end = <MEM space end address (PCI view)>
39 * pci_ctrl->mem_space.base = <MEM space offset: address 0 from CPU space>
40 *
41 * pcibios_init_resource(&pci_ctrl->io_resource, <IO space start>,
42 * <IO space end>, IORESOURCE_IO, "PCI host bridge");
43 * pcibios_init_resource(&pci_ctrl->mem_resources[0], <MEM space start>,
44 * <MEM space end>, IORESOURCE_MEM, "PCI host bridge");
45 *
46 * pci_ctrl->last_busno = pciauto_bus_scan(pci_ctrl,pci_ctrl->first_busno);
47 *
48 * int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
49 *
50 */
51
52
53/* define DEBUG to print some debugging messages. */
54
55#undef DEBUG
56
57#ifdef DEBUG
58# define DBG(x...) printk(x)
59#else
60# define DBG(x...)
61#endif
62
63static int pciauto_upper_iospc;
64static int pciauto_upper_memspc;
65
66static struct pci_dev pciauto_dev;
67static struct pci_bus pciauto_bus;
68
69/*
70 * Helper functions
71 */
72
73/* Initialize the bars of a PCI device. */
74
75static void __init
76pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
77{
78 int bar_size;
79 int bar, bar_nr;
80 int *upper_limit;
81 int found_mem64 = 0;
82
83 for (bar = PCI_BASE_ADDRESS_0, bar_nr = 0;
84 bar <= bar_limit;
85 bar+=4, bar_nr++)
86 {
87 /* Tickle the BAR and get the size */
88 pci_write_config_dword(dev, bar, 0xffffffff);
89 pci_read_config_dword(dev, bar, &bar_size);
90
91 /* If BAR is not implemented go to the next BAR */
92 if (!bar_size)
93 continue;
94
95 /* Check the BAR type and set our address mask */
96 if (bar_size & PCI_BASE_ADDRESS_SPACE_IO)
97 {
98 bar_size &= PCI_BASE_ADDRESS_IO_MASK;
99 upper_limit = &pciauto_upper_iospc;
100 DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr);
101 }
102 else
103 {
104 if ((bar_size & PCI_BASE_ADDRESS_MEM_TYPE_MASK) ==
105 PCI_BASE_ADDRESS_MEM_TYPE_64)
106 found_mem64 = 1;
107
108 bar_size &= PCI_BASE_ADDRESS_MEM_MASK;
109 upper_limit = &pciauto_upper_memspc;
110 DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr);
111 }
112
113 /* Allocate a base address (bar_size is negative!) */
114 *upper_limit = (*upper_limit + bar_size) & bar_size;
115
116 /* Write it out and update our limit */
117 pci_write_config_dword(dev, bar, *upper_limit);
118
119 /*
120 * If we are a 64-bit decoder then increment to the
121 * upper 32 bits of the bar and force it to locate
122 * in the lower 4GB of memory.
123 */
124
125 if (found_mem64)
126 pci_write_config_dword(dev, (bar+=4), 0x00000000);
127
128 DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit);
129 }
130}
131
132/* Initialize the interrupt number. */
133
134static void __init
135pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn)
136{
137 u8 pin;
138 int irq = 0;
139
140 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
141
142 /* Fix illegal pin numbers. */
143
144 if (pin == 0 || pin > 4)
145 pin = 1;
146
147 if (pci_ctrl->map_irq)
148 irq = pci_ctrl->map_irq(dev, PCI_SLOT(devfn), pin);
149
150 if (irq == -1)
151 irq = 0;
152
153 DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin);
154
155 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
156}
157
158
159static void __init
160pciauto_prescan_setup_bridge(struct pci_dev *dev, int current_bus,
161 int sub_bus, int *iosave, int *memsave)
162{
163 /* Configure bus number registers */
164 pci_write_config_byte(dev, PCI_PRIMARY_BUS, current_bus);
165 pci_write_config_byte(dev, PCI_SECONDARY_BUS, sub_bus + 1);
166 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, 0xff);
167
168 /* Round memory allocator to 1MB boundary */
169 pciauto_upper_memspc &= ~(0x100000 - 1);
170 *memsave = pciauto_upper_memspc;
171
172 /* Round I/O allocator to 4KB boundary */
173 pciauto_upper_iospc &= ~(0x1000 - 1);
174 *iosave = pciauto_upper_iospc;
175
176 /* Set up memory and I/O filter limits, assume 32-bit I/O space */
177 pci_write_config_word(dev, PCI_MEMORY_LIMIT,
178 ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16);
179 pci_write_config_byte(dev, PCI_IO_LIMIT,
180 ((pciauto_upper_iospc - 1) & 0x0000f000) >> 8);
181 pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
182 ((pciauto_upper_iospc - 1) & 0xffff0000) >> 16);
183}
184
185static void __init
186pciauto_postscan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus,
187 int *iosave, int *memsave)
188{
189 int cmdstat;
190
191 /* Configure bus number registers */
192 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, sub_bus);
193
194 /*
195 * Round memory allocator to 1MB boundary.
196 * If no space used, allocate minimum.
197 */
198 pciauto_upper_memspc &= ~(0x100000 - 1);
199 if (*memsave == pciauto_upper_memspc)
200 pciauto_upper_memspc -= 0x00100000;
201
202 pci_write_config_word(dev, PCI_MEMORY_BASE, pciauto_upper_memspc >> 16);
203
204 /* Allocate 1MB for pre-fretch */
205 pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT,
206 ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16);
207
208 pciauto_upper_memspc -= 0x100000;
209
210 pci_write_config_word(dev, PCI_PREF_MEMORY_BASE,
211 pciauto_upper_memspc >> 16);
212
213 /* Round I/O allocator to 4KB boundary */
214 pciauto_upper_iospc &= ~(0x1000 - 1);
215 if (*iosave == pciauto_upper_iospc)
216 pciauto_upper_iospc -= 0x1000;
217
218 pci_write_config_byte(dev, PCI_IO_BASE,
219 (pciauto_upper_iospc & 0x0000f000) >> 8);
220 pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
221 pciauto_upper_iospc >> 16);
222
223 /* Enable memory and I/O accesses, enable bus master */
224 pci_read_config_dword(dev, PCI_COMMAND, &cmdstat);
225 pci_write_config_dword(dev, PCI_COMMAND,
226 cmdstat |
227 PCI_COMMAND_IO |
228 PCI_COMMAND_MEMORY |
229 PCI_COMMAND_MASTER);
230}
231
232/*
233 * Scan the current PCI bus.
234 */
235
236
237int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
238{
239 int sub_bus, pci_devfn, pci_class, cmdstat, found_multi=0;
240 unsigned short vid;
241 unsigned char header_type;
242 struct pci_dev *dev = &pciauto_dev;
243
244 pciauto_dev.bus = &pciauto_bus;
245 pciauto_dev.sysdata = pci_ctrl;
246 pciauto_bus.ops = pci_ctrl->ops;
247
248 /*
249 * Fetch our I/O and memory space upper boundaries used
250 * to allocated base addresses on this pci_controller.
251 */
252
253 if (current_bus == pci_ctrl->first_busno)
254 {
255 pciauto_upper_iospc = pci_ctrl->io_resource.end + 1;
256 pciauto_upper_memspc = pci_ctrl->mem_resources[0].end + 1;
257 }
258
259 sub_bus = current_bus;
260
261 for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++)
262 {
263 /* Skip our host bridge */
264 if ((current_bus == pci_ctrl->first_busno) && (pci_devfn == 0))
265 continue;
266
267 if (PCI_FUNC(pci_devfn) && !found_multi)
268 continue;
269
270 pciauto_bus.number = current_bus;
271 pciauto_dev.devfn = pci_devfn;
272
273 /* If config space read fails from this device, move on */
274 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type))
275 continue;
276
277 if (!PCI_FUNC(pci_devfn))
278 found_multi = header_type & 0x80;
279 pci_read_config_word(dev, PCI_VENDOR_ID, &vid);
280
281 if (vid == 0xffff || vid == 0x0000) {
282 found_multi = 0;
283 continue;
284 }
285
286 pci_read_config_dword(dev, PCI_CLASS_REVISION, &pci_class);
287
288 if ((pci_class >> 16) == PCI_CLASS_BRIDGE_PCI) {
289
290 int iosave, memsave;
291
292 DBG("PCI Autoconfig: Found P2P bridge, device %d\n",
293 PCI_SLOT(pci_devfn));
294
295 /* Allocate PCI I/O and/or memory space */
296 pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1);
297
298 pciauto_prescan_setup_bridge(dev, current_bus, sub_bus,
299 &iosave, &memsave);
300 sub_bus = pciauto_bus_scan(pci_ctrl, sub_bus+1);
301 pciauto_postscan_setup_bridge(dev, current_bus, sub_bus,
302 &iosave, &memsave);
303 pciauto_bus.number = current_bus;
304
305 continue;
306
307 }
308
309
310#if 0
311 /* Skip legacy mode IDE controller */
312
313 if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) {
314
315 unsigned char prg_iface;
316 pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface);
317
318 if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) {
319 DBG("PCI Autoconfig: Skipping legacy mode "
320 "IDE controller\n");
321 continue;
322 }
323 }
324#endif
325
326 /*
327 * Found a peripheral, enable some standard
328 * settings
329 */
330
331 pci_read_config_dword(dev, PCI_COMMAND, &cmdstat);
332 pci_write_config_dword(dev, PCI_COMMAND,
333 cmdstat |
334 PCI_COMMAND_IO |
335 PCI_COMMAND_MEMORY |
336 PCI_COMMAND_MASTER);
337 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
338
339 /* Allocate PCI I/O and/or memory space */
340 DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n",
341 current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) );
342
343 pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5);
344 pciauto_setup_irq(pci_ctrl, dev, pci_devfn);
345 }
346 return sub_bus;
347}
348
349
350
351
352
diff --git a/arch/xtensa/lib/strcasecmp.c b/arch/xtensa/lib/strcasecmp.c
new file mode 100644
index 000000000000..165b2d6effa5
--- /dev/null
+++ b/arch/xtensa/lib/strcasecmp.c
@@ -0,0 +1,32 @@
1/*
2 * linux/arch/xtensa/lib/strcasecmp.c
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file "COPYING" in the main directory of
6 * this archive for more details.
7 *
8 * Copyright (C) 2002 Tensilica Inc.
9 */
10
11#include <linux/string.h>
12
13
14/* We handle nothing here except the C locale. Since this is used in
15 only one place, on strings known to contain only 7 bit ASCII, this
16 is ok. */
17
18int strcasecmp(const char *a, const char *b)
19{
20 int ca, cb;
21
22 do {
23 ca = *a++ & 0xff;
24 cb = *b++ & 0xff;
25 if (ca >= 'A' && ca <= 'Z')
26 ca += 'a' - 'A';
27 if (cb >= 'A' && cb <= 'Z')
28 cb += 'a' - 'A';
29 } while (ca == cb && ca != '\0');
30
31 return ca - cb;
32}
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
new file mode 100644
index 000000000000..71d55df43893
--- /dev/null
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -0,0 +1,224 @@
1/*
2 * arch/xtensa/lib/strncpy_user.S
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file "COPYING" in the main directory of
6 * this archive for more details.
7 *
8 * Returns: -EFAULT if exception before terminator, N if the entire
9 * buffer filled, else strlen.
10 *
11 * Copyright (C) 2002 Tensilica Inc.
12 */
13
14#include <xtensa/coreasm.h>
15#include <linux/errno.h>
16
17/* Load or store instructions that may cause exceptions use the EX macro. */
18
19#define EX(insn,reg1,reg2,offset,handler) \
209: insn reg1, reg2, offset; \
21 .section __ex_table, "a"; \
22 .word 9b, handler; \
23 .previous
24
25/*
26 * char *__strncpy_user(char *dst, const char *src, size_t len)
27 */
28.text
29.begin literal
30.align 4
31.Lmask0:
32 .byte 0xff, 0x00, 0x00, 0x00
33.Lmask1:
34 .byte 0x00, 0xff, 0x00, 0x00
35.Lmask2:
36 .byte 0x00, 0x00, 0xff, 0x00
37.Lmask3:
38 .byte 0x00, 0x00, 0x00, 0xff
39.end literal
40
41# Register use
42# a0/ return address
43# a1/ stack pointer
44# a2/ return value
45# a3/ src
46# a4/ len
47# a5/ mask0
48# a6/ mask1
49# a7/ mask2
50# a8/ mask3
51# a9/ tmp
52# a10/ tmp
53# a11/ dst
54# a12/ tmp
55
56.align 4
57.global __strncpy_user
58.type __strncpy_user,@function
59__strncpy_user:
60 entry sp, 16 # minimal stack frame
61 # a2/ dst, a3/ src, a4/ len
62 mov a11, a2 # leave dst in return value register
63 beqz a4, .Lret # if len is zero
64 l32r a5, .Lmask0 # mask for byte 0
65 l32r a6, .Lmask1 # mask for byte 1
66 l32r a7, .Lmask2 # mask for byte 2
67 l32r a8, .Lmask3 # mask for byte 3
68 bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned
69 bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned
70.Lsrcaligned: # return here when src is word-aligned
71 srli a12, a4, 2 # number of loop iterations with 4B per loop
72 movi a9, 3
73 bnone a11, a9, .Laligned
74 j .Ldstunaligned
75
76.Lsrc1mod2: # src address is odd
77 EX(l8ui, a9, a3, 0, fixup_l) # get byte 0
78 addi a3, a3, 1 # advance src pointer
79 EX(s8i, a9, a11, 0, fixup_s) # store byte 0
80 beqz a9, .Lret # if byte 0 is zero
81 addi a11, a11, 1 # advance dst pointer
82 addi a4, a4, -1 # decrement len
83 beqz a4, .Lret # if len is zero
84 bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned
85
86.Lsrc2mod4: # src address is 2 mod 4
87 EX(l8ui, a9, a3, 0, fixup_l) # get byte 0
88 /* 1-cycle interlock */
89 EX(s8i, a9, a11, 0, fixup_s) # store byte 0
90 beqz a9, .Lret # if byte 0 is zero
91 addi a11, a11, 1 # advance dst pointer
92 addi a4, a4, -1 # decrement len
93 beqz a4, .Lret # if len is zero
94 EX(l8ui, a9, a3, 1, fixup_l) # get byte 0
95 addi a3, a3, 2 # advance src pointer
96 EX(s8i, a9, a11, 0, fixup_s) # store byte 0
97 beqz a9, .Lret # if byte 0 is zero
98 addi a11, a11, 1 # advance dst pointer
99 addi a4, a4, -1 # decrement len
100 bnez a4, .Lsrcaligned # if len is nonzero
101.Lret:
102 sub a2, a11, a2 # compute strlen
103 retw
104
105/*
106 * dst is word-aligned, src is word-aligned
107 */
108 .align 4 # 1 mod 4 alignment for LOOPNEZ
109 .byte 0 # (0 mod 4 alignment for LBEG)
110.Laligned:
111#if XCHAL_HAVE_LOOPS
112 loopnez a12, .Loop1done
113#else
114 beqz a12, .Loop1done
115 slli a12, a12, 2
116 add a12, a12, a11 # a12 = end of last 4B chunck
117#endif
118.Loop1:
119 EX(l32i, a9, a3, 0, fixup_l) # get word from src
120 addi a3, a3, 4 # advance src pointer
121 bnone a9, a5, .Lz0 # if byte 0 is zero
122 bnone a9, a6, .Lz1 # if byte 1 is zero
123 bnone a9, a7, .Lz2 # if byte 2 is zero
124 EX(s32i, a9, a11, 0, fixup_s) # store word to dst
125 bnone a9, a8, .Lz3 # if byte 3 is zero
126 addi a11, a11, 4 # advance dst pointer
127#if !XCHAL_HAVE_LOOPS
128 blt a11, a12, .Loop1
129#endif
130
131.Loop1done:
132 bbci.l a4, 1, .L100
133 # copy 2 bytes
134 EX(l16ui, a9, a3, 0, fixup_l)
135 addi a3, a3, 2 # advance src pointer
136#ifdef __XTENSA_EB__
137 bnone a9, a7, .Lz0 # if byte 2 is zero
138 bnone a9, a8, .Lz1 # if byte 3 is zero
139#else
140 bnone a9, a5, .Lz0 # if byte 0 is zero
141 bnone a9, a6, .Lz1 # if byte 1 is zero
142#endif
143 EX(s16i, a9, a11, 0, fixup_s)
144 addi a11, a11, 2 # advance dst pointer
145.L100:
146 bbci.l a4, 0, .Lret
147 EX(l8ui, a9, a3, 0, fixup_l)
148 /* slot */
149 EX(s8i, a9, a11, 0, fixup_s)
150 beqz a9, .Lret # if byte is zero
151 addi a11, a11, 1-3 # advance dst ptr 1, but also cancel
152 # the effect of adding 3 in .Lz3 code
153 /* fall thru to .Lz3 and "retw" */
154
155.Lz3: # byte 3 is zero
156 addi a11, a11, 3 # advance dst pointer
157 sub a2, a11, a2 # compute strlen
158 retw
159.Lz0: # byte 0 is zero
160#ifdef __XTENSA_EB__
161 movi a9, 0
162#endif /* __XTENSA_EB__ */
163 EX(s8i, a9, a11, 0, fixup_s)
164 sub a2, a11, a2 # compute strlen
165 retw
166.Lz1: # byte 1 is zero
167#ifdef __XTENSA_EB__
168 extui a9, a9, 16, 16
169#endif /* __XTENSA_EB__ */
170 EX(s16i, a9, a11, 0, fixup_s)
171 addi a11, a11, 1 # advance dst pointer
172 sub a2, a11, a2 # compute strlen
173 retw
174.Lz2: # byte 2 is zero
175#ifdef __XTENSA_EB__
176 extui a9, a9, 16, 16
177#endif /* __XTENSA_EB__ */
178 EX(s16i, a9, a11, 0, fixup_s)
179 movi a9, 0
180 EX(s8i, a9, a11, 2, fixup_s)
181 addi a11, a11, 2 # advance dst pointer
182 sub a2, a11, a2 # compute strlen
183 retw
184
185 .align 4 # 1 mod 4 alignment for LOOPNEZ
186 .byte 0 # (0 mod 4 alignment for LBEG)
187.Ldstunaligned:
188/*
189 * for now just use byte copy loop
190 */
191#if XCHAL_HAVE_LOOPS
192 loopnez a4, .Lunalignedend
193#else
194 beqz a4, .Lunalignedend
195 add a12, a11, a4 # a12 = ending address
196#endif /* XCHAL_HAVE_LOOPS */
197.Lnextbyte:
198 EX(l8ui, a9, a3, 0, fixup_l)
199 addi a3, a3, 1
200 EX(s8i, a9, a11, 0, fixup_s)
201 beqz a9, .Lunalignedend
202 addi a11, a11, 1
203#if !XCHAL_HAVE_LOOPS
204 blt a11, a12, .Lnextbyte
205#endif
206
207.Lunalignedend:
208 sub a2, a11, a2 # compute strlen
209 retw
210
211
212 .section .fixup, "ax"
213 .align 4
214
215 /* For now, just return -EFAULT. Future implementations might
216 * like to clear remaining kernel space, like the fixup
217 * implementation in memset(). Thus, we differentiate between
218 * load/store fixups. */
219
220fixup_s:
221fixup_l:
222 movi a2, -EFAULT
223 retw
224
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
new file mode 100644
index 000000000000..cdff4d670f3b
--- /dev/null
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -0,0 +1,147 @@
1/*
2 * arch/xtensa/lib/strnlen_user.S
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file "COPYING" in the main directory of
6 * this archive for more details.
7 *
8 * Returns strnlen, including trailing zero terminator.
9 * Zero indicates error.
10 *
11 * Copyright (C) 2002 Tensilica Inc.
12 */
13
14#include <xtensa/coreasm.h>
15
16/* Load or store instructions that may cause exceptions use the EX macro. */
17
18#define EX(insn,reg1,reg2,offset,handler) \
199: insn reg1, reg2, offset; \
20 .section __ex_table, "a"; \
21 .word 9b, handler; \
22 .previous
23
24/*
25 * size_t __strnlen_user(const char *s, size_t len)
26 */
27.text
28.begin literal
29.align 4
30.Lmask0:
31 .byte 0xff, 0x00, 0x00, 0x00
32.Lmask1:
33 .byte 0x00, 0xff, 0x00, 0x00
34.Lmask2:
35 .byte 0x00, 0x00, 0xff, 0x00
36.Lmask3:
37 .byte 0x00, 0x00, 0x00, 0xff
38.end literal
39
40# Register use:
41# a2/ src
42# a3/ len
43# a4/ tmp
44# a5/ mask0
45# a6/ mask1
46# a7/ mask2
47# a8/ mask3
48# a9/ tmp
49# a10/ tmp
50
51.align 4
52.global __strnlen_user
53.type __strnlen_user,@function
54__strnlen_user:
55 entry sp, 16 # minimal stack frame
56 # a2/ s, a3/ len
57 addi a4, a2, -4 # because we overincrement at the end;
58 # we compensate with load offsets of 4
59 l32r a5, .Lmask0 # mask for byte 0
60 l32r a6, .Lmask1 # mask for byte 1
61 l32r a7, .Lmask2 # mask for byte 2
62 l32r a8, .Lmask3 # mask for byte 3
63 bbsi.l a2, 0, .L1mod2 # if only 8-bit aligned
64 bbsi.l a2, 1, .L2mod4 # if only 16-bit aligned
65
66/*
67 * String is word-aligned.
68 */
69.Laligned:
70 srli a10, a3, 2 # number of loop iterations with 4B per loop
71#if XCHAL_HAVE_LOOPS
72 loopnez a10, .Ldone
73#else
74 beqz a10, .Ldone
75 slli a10, a10, 2
76 add a10, a10, a4 # a10 = end of last 4B chunk
77#endif /* XCHAL_HAVE_LOOPS */
78.Loop:
79 EX(l32i, a9, a4, 4, lenfixup) # get next word of string
80 addi a4, a4, 4 # advance string pointer
81 bnone a9, a5, .Lz0 # if byte 0 is zero
82 bnone a9, a6, .Lz1 # if byte 1 is zero
83 bnone a9, a7, .Lz2 # if byte 2 is zero
84 bnone a9, a8, .Lz3 # if byte 3 is zero
85#if !XCHAL_HAVE_LOOPS
86 blt a4, a10, .Loop
87#endif
88
89.Ldone:
90 EX(l32i, a9, a4, 4, lenfixup) # load 4 bytes for remaining checks
91
92 bbci.l a3, 1, .L100
93 # check two more bytes (bytes 0, 1 of word)
94 addi a4, a4, 2 # advance string pointer
95 bnone a9, a5, .Lz0 # if byte 0 is zero
96 bnone a9, a6, .Lz1 # if byte 1 is zero
97.L100:
98 bbci.l a3, 0, .L101
99 # check one more byte (byte 2 of word)
100 # Actually, we don't need to check. Zero or nonzero, we'll add one.
101 # Do not add an extra one for the NULL terminator since we have
102 # exhausted the original len parameter.
103 addi a4, a4, 1 # advance string pointer
104.L101:
105 sub a2, a4, a2 # compute length
106 retw
107
108# NOTE that in several places below, we point to the byte just after
109# the zero byte in order to include the NULL terminator in the count.
110
111.Lz3: # byte 3 is zero
112 addi a4, a4, 3 # point to zero byte
113.Lz0: # byte 0 is zero
114 addi a4, a4, 1 # point just beyond zero byte
115 sub a2, a4, a2 # subtract to get length
116 retw
117.Lz1: # byte 1 is zero
118 addi a4, a4, 1+1 # point just beyond zero byte
119 sub a2, a4, a2 # subtract to get length
120 retw
121.Lz2: # byte 2 is zero
122 addi a4, a4, 2+1 # point just beyond zero byte
123 sub a2, a4, a2 # subtract to get length
124 retw
125
126.L1mod2: # address is odd
127 EX(l8ui, a9, a4, 4, lenfixup) # get byte 0
128 addi a4, a4, 1 # advance string pointer
129 beqz a9, .Lz3 # if byte 0 is zero
130 bbci.l a4, 1, .Laligned # if string pointer is now word-aligned
131
132.L2mod4: # address is 2 mod 4
133 addi a4, a4, 2 # advance ptr for aligned access
134 EX(l32i, a9, a4, 0, lenfixup) # get word with first two bytes of string
135 bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero
136 bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero
137 # byte 3 is zero
138 addi a4, a4, 3+1 # point just beyond zero byte
139 sub a2, a4, a2 # subtract to get length
140 retw
141
142 .section .fixup, "ax"
143 .align 4
144lenfixup:
145 movi a2, 0
146 retw
147
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
new file mode 100644
index 000000000000..265db2693cbd
--- /dev/null
+++ b/arch/xtensa/lib/usercopy.S
@@ -0,0 +1,321 @@
1/*
2 * arch/xtensa/lib/usercopy.S
3 *
4 * Copy to/from user space (derived from arch/xtensa/lib/hal/memcopy.S)
5 *
6 * DO NOT COMBINE this function with <arch/xtensa/lib/hal/memcopy.S>.
7 * It needs to remain separate and distinct. The hal files are part
8 * of the the Xtensa link-time HAL, and those files may differ per
9 * processor configuration. Patching the kernel for another
10 * processor configuration includes replacing the hal files, and we
11 * could loose the special functionality for accessing user-space
12 * memory during such a patch. We sacrifice a little code space here
13 * in favor to simplify code maintenance.
14 *
15 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file "COPYING" in the main directory of
17 * this archive for more details.
18 *
19 * Copyright (C) 2002 Tensilica Inc.
20 */
21
22
23/*
24 * size_t __xtensa_copy_user (void *dst, const void *src, size_t len);
25 *
26 * The returned value is the number of bytes not copied. Implies zero
27 * is success.
28 *
29 * The general case algorithm is as follows:
30 * If the destination and source are both aligned,
31 * do 16B chunks with a loop, and then finish up with
32 * 8B, 4B, 2B, and 1B copies conditional on the length.
33 * If destination is aligned and source unaligned,
34 * do the same, but use SRC to align the source data.
35 * If destination is unaligned, align it by conditionally
36 * copying 1B and 2B and then retest.
37 * This code tries to use fall-through braches for the common
38 * case of aligned destinations (except for the branches to
39 * the alignment label).
40 *
41 * Register use:
42 * a0/ return address
43 * a1/ stack pointer
44 * a2/ return value
45 * a3/ src
46 * a4/ length
47 * a5/ dst
48 * a6/ tmp
49 * a7/ tmp
50 * a8/ tmp
51 * a9/ tmp
52 * a10/ tmp
53 * a11/ original length
54 */
55
56#include <xtensa/coreasm.h>
57
58#ifdef __XTENSA_EB__
59#define ALIGN(R, W0, W1) src R, W0, W1
60#define SSA8(R) ssa8b R
61#else
62#define ALIGN(R, W0, W1) src R, W1, W0
63#define SSA8(R) ssa8l R
64#endif
65
66/* Load or store instructions that may cause exceptions use the EX macro. */
67
68#define EX(insn,reg1,reg2,offset,handler) \
699: insn reg1, reg2, offset; \
70 .section __ex_table, "a"; \
71 .word 9b, handler; \
72 .previous
73
74
75 .text
76 .align 4
77 .global __xtensa_copy_user
78 .type __xtensa_copy_user,@function
79__xtensa_copy_user:
80 entry sp, 16 # minimal stack frame
81 # a2/ dst, a3/ src, a4/ len
82 mov a5, a2 # copy dst so that a2 is return value
83 mov a11, a4 # preserve original len for error case
84.Lcommon:
85 bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2
86 bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4
87.Ldstaligned: # return here from .Ldstunaligned when dst is aligned
88 srli a7, a4, 4 # number of loop iterations with 16B
89 # per iteration
90 movi a8, 3 # if source is also aligned,
91 bnone a3, a8, .Laligned # then use word copy
92 SSA8( a3) # set shift amount from byte offset
93 bnez a4, .Lsrcunaligned
94 movi a2, 0 # return success for len==0
95 retw
96
97/*
98 * Destination is unaligned
99 */
100
101.Ldst1mod2: # dst is only byte aligned
102 bltui a4, 7, .Lbytecopy # do short copies byte by byte
103
104 # copy 1 byte
105 EX(l8ui, a6, a3, 0, l_fixup)
106 addi a3, a3, 1
107 EX(s8i, a6, a5, 0, s_fixup)
108 addi a5, a5, 1
109 addi a4, a4, -1
110 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
111 # return to main algorithm
112.Ldst2mod4: # dst 16-bit aligned
113 # copy 2 bytes
114 bltui a4, 6, .Lbytecopy # do short copies byte by byte
115 EX(l8ui, a6, a3, 0, l_fixup)
116 EX(l8ui, a7, a3, 1, l_fixup)
117 addi a3, a3, 2
118 EX(s8i, a6, a5, 0, s_fixup)
119 EX(s8i, a7, a5, 1, s_fixup)
120 addi a5, a5, 2
121 addi a4, a4, -2
122 j .Ldstaligned # dst is now aligned, return to main algorithm
123
124/*
125 * Byte by byte copy
126 */
127 .align 4
128 .byte 0 # 1 mod 4 alignment for LOOPNEZ
129 # (0 mod 4 alignment for LBEG)
130.Lbytecopy:
131#if XCHAL_HAVE_LOOPS
132 loopnez a4, .Lbytecopydone
133#else /* !XCHAL_HAVE_LOOPS */
134 beqz a4, .Lbytecopydone
135 add a7, a3, a4 # a7 = end address for source
136#endif /* !XCHAL_HAVE_LOOPS */
137.Lnextbyte:
138 EX(l8ui, a6, a3, 0, l_fixup)
139 addi a3, a3, 1
140 EX(s8i, a6, a5, 0, s_fixup)
141 addi a5, a5, 1
142#if !XCHAL_HAVE_LOOPS
143 blt a3, a7, .Lnextbyte
144#endif /* !XCHAL_HAVE_LOOPS */
145.Lbytecopydone:
146 movi a2, 0 # return success for len bytes copied
147 retw
148
149/*
150 * Destination and source are word-aligned.
151 */
152 # copy 16 bytes per iteration for word-aligned dst and word-aligned src
153 .align 4 # 1 mod 4 alignment for LOOPNEZ
154 .byte 0 # (0 mod 4 alignment for LBEG)
155.Laligned:
156#if XCHAL_HAVE_LOOPS
157 loopnez a7, .Loop1done
158#else /* !XCHAL_HAVE_LOOPS */
159 beqz a7, .Loop1done
160 slli a8, a7, 4
161 add a8, a8, a3 # a8 = end of last 16B source chunk
162#endif /* !XCHAL_HAVE_LOOPS */
163.Loop1:
164 EX(l32i, a6, a3, 0, l_fixup)
165 EX(l32i, a7, a3, 4, l_fixup)
166 EX(s32i, a6, a5, 0, s_fixup)
167 EX(l32i, a6, a3, 8, l_fixup)
168 EX(s32i, a7, a5, 4, s_fixup)
169 EX(l32i, a7, a3, 12, l_fixup)
170 EX(s32i, a6, a5, 8, s_fixup)
171 addi a3, a3, 16
172 EX(s32i, a7, a5, 12, s_fixup)
173 addi a5, a5, 16
174#if !XCHAL_HAVE_LOOPS
175 blt a3, a8, .Loop1
176#endif /* !XCHAL_HAVE_LOOPS */
177.Loop1done:
178 bbci.l a4, 3, .L2
179 # copy 8 bytes
180 EX(l32i, a6, a3, 0, l_fixup)
181 EX(l32i, a7, a3, 4, l_fixup)
182 addi a3, a3, 8
183 EX(s32i, a6, a5, 0, s_fixup)
184 EX(s32i, a7, a5, 4, s_fixup)
185 addi a5, a5, 8
186.L2:
187 bbci.l a4, 2, .L3
188 # copy 4 bytes
189 EX(l32i, a6, a3, 0, l_fixup)
190 addi a3, a3, 4
191 EX(s32i, a6, a5, 0, s_fixup)
192 addi a5, a5, 4
193.L3:
194 bbci.l a4, 1, .L4
195 # copy 2 bytes
196 EX(l16ui, a6, a3, 0, l_fixup)
197 addi a3, a3, 2
198 EX(s16i, a6, a5, 0, s_fixup)
199 addi a5, a5, 2
200.L4:
201 bbci.l a4, 0, .L5
202 # copy 1 byte
203 EX(l8ui, a6, a3, 0, l_fixup)
204 EX(s8i, a6, a5, 0, s_fixup)
205.L5:
206 movi a2, 0 # return success for len bytes copied
207 retw
208
209/*
210 * Destination is aligned, Source is unaligned
211 */
212
213 .align 4
214 .byte 0 # 1 mod 4 alignement for LOOPNEZ
215 # (0 mod 4 alignment for LBEG)
216.Lsrcunaligned:
217 # copy 16 bytes per iteration for word-aligned dst and unaligned src
218 and a10, a3, a8 # save unalignment offset for below
219 sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware)
220 EX(l32i, a6, a3, 0, l_fixup) # load first word
221#if XCHAL_HAVE_LOOPS
222 loopnez a7, .Loop2done
223#else /* !XCHAL_HAVE_LOOPS */
224 beqz a7, .Loop2done
225 slli a10, a7, 4
226 add a10, a10, a3 # a10 = end of last 16B source chunk
227#endif /* !XCHAL_HAVE_LOOPS */
228.Loop2:
229 EX(l32i, a7, a3, 4, l_fixup)
230 EX(l32i, a8, a3, 8, l_fixup)
231 ALIGN( a6, a6, a7)
232 EX(s32i, a6, a5, 0, s_fixup)
233 EX(l32i, a9, a3, 12, l_fixup)
234 ALIGN( a7, a7, a8)
235 EX(s32i, a7, a5, 4, s_fixup)
236 EX(l32i, a6, a3, 16, l_fixup)
237 ALIGN( a8, a8, a9)
238 EX(s32i, a8, a5, 8, s_fixup)
239 addi a3, a3, 16
240 ALIGN( a9, a9, a6)
241 EX(s32i, a9, a5, 12, s_fixup)
242 addi a5, a5, 16
243#if !XCHAL_HAVE_LOOPS
244 blt a3, a10, .Loop2
245#endif /* !XCHAL_HAVE_LOOPS */
246.Loop2done:
247 bbci.l a4, 3, .L12
248 # copy 8 bytes
249 EX(l32i, a7, a3, 4, l_fixup)
250 EX(l32i, a8, a3, 8, l_fixup)
251 ALIGN( a6, a6, a7)
252 EX(s32i, a6, a5, 0, s_fixup)
253 addi a3, a3, 8
254 ALIGN( a7, a7, a8)
255 EX(s32i, a7, a5, 4, s_fixup)
256 addi a5, a5, 8
257 mov a6, a8
258.L12:
259 bbci.l a4, 2, .L13
260 # copy 4 bytes
261 EX(l32i, a7, a3, 4, l_fixup)
262 addi a3, a3, 4
263 ALIGN( a6, a6, a7)
264 EX(s32i, a6, a5, 0, s_fixup)
265 addi a5, a5, 4
266 mov a6, a7
267.L13:
268 add a3, a3, a10 # readjust a3 with correct misalignment
269 bbci.l a4, 1, .L14
270 # copy 2 bytes
271 EX(l8ui, a6, a3, 0, l_fixup)
272 EX(l8ui, a7, a3, 1, l_fixup)
273 addi a3, a3, 2
274 EX(s8i, a6, a5, 0, s_fixup)
275 EX(s8i, a7, a5, 1, s_fixup)
276 addi a5, a5, 2
277.L14:
278 bbci.l a4, 0, .L15
279 # copy 1 byte
280 EX(l8ui, a6, a3, 0, l_fixup)
281 EX(s8i, a6, a5, 0, s_fixup)
282.L15:
283 movi a2, 0 # return success for len bytes copied
284 retw
285
286
287 .section .fixup, "ax"
288 .align 4
289
290/* a2 = original dst; a5 = current dst; a11= original len
291 * bytes_copied = a5 - a2
292 * retval = bytes_not_copied = original len - bytes_copied
293 * retval = a11 - (a5 - a2)
294 *
295 * Clearing the remaining pieces of kernel memory plugs security
296 * holes. This functionality is the equivalent of the *_zeroing
297 * functions that some architectures provide.
298 */
299
300.Lmemset:
301 .word memset
302
303s_fixup:
304 sub a2, a5, a2 /* a2 <-- bytes copied */
305 sub a2, a11, a2 /* a2 <-- bytes not copied */
306 retw
307
308l_fixup:
309 sub a2, a5, a2 /* a2 <-- bytes copied */
310 sub a2, a11, a2 /* a2 <-- bytes not copied == return value */
311
312 /* void *memset(void *s, int c, size_t n); */
313 mov a6, a5 /* s */
314 movi a7, 0 /* c */
315 mov a8, a2 /* n */
316 l32r a4, .Lmemset
317 callx4 a4
318 /* Ignore memset return value in a6. */
319 /* a2 still contains bytes not copied. */
320 retw
321
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
new file mode 100644
index 000000000000..a5aed5932d7b
--- /dev/null
+++ b/arch/xtensa/mm/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for the Linux/Xtensa-specific parts of the memory manager.
3#
4# Note! Dependencies are done automagically by 'make dep', which also
5# removes any old dependencies. DON'T put your own dependencies here
6# unless it's something special (ie not a .c file).
7#
8# Note 2! The CFLAGS definition is now in the main makefile...
9
10obj-y := init.o fault.o tlb.o misc.o
11obj-m :=
12obj-n :=
13obj- :=
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
new file mode 100644
index 000000000000..a945a33e85a1
--- /dev/null
+++ b/arch/xtensa/mm/fault.c
@@ -0,0 +1,241 @@
1// TODO VM_EXEC flag work-around, cache aliasing
2/*
3 * arch/xtensa/mm/fault.c
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 2001 - 2005 Tensilica Inc.
10 *
11 * Chris Zankel <chris@zankel.net>
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 */
14
15#include <linux/mm.h>
16#include <linux/module.h>
17#include <asm/mmu_context.h>
18#include <asm/cacheflush.h>
19#include <asm/hardirq.h>
20#include <asm/uaccess.h>
21#include <asm/system.h>
22#include <asm/pgalloc.h>
23
24unsigned long asid_cache = ASID_FIRST_VERSION;
25void bad_page_fault(struct pt_regs*, unsigned long, int);
26
27/*
28 * This routine handles page faults. It determines the address,
29 * and the problem, and then passes it off to one of the appropriate
30 * routines.
31 *
32 * Note: does not handle Miss and MultiHit.
33 */
34
35void do_page_fault(struct pt_regs *regs)
36{
37 struct vm_area_struct * vma;
38 struct mm_struct *mm = current->mm;
39 unsigned int exccause = regs->exccause;
40 unsigned int address = regs->excvaddr;
41 siginfo_t info;
42
43 int is_write, is_exec;
44
45 info.si_code = SEGV_MAPERR;
46
47 /* We fault-in kernel-space virtual memory on-demand. The
48 * 'reference' page table is init_mm.pgd.
49 */
50 if (address >= TASK_SIZE && !user_mode(regs))
51 goto vmalloc_fault;
52
53 /* If we're in an interrupt or have no user
54 * context, we must not take the fault..
55 */
56 if (in_atomic() || !mm) {
57 bad_page_fault(regs, address, SIGSEGV);
58 return;
59 }
60
61 is_write = (exccause == XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
62 is_exec = (exccause == XCHAL_EXCCAUSE_ITLB_PRIVILEGE ||
63 exccause == XCHAL_EXCCAUSE_ITLB_MISS ||
64 exccause == XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
65
66#if 0
67 printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
68 address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
69#endif
70
71 down_read(&mm->mmap_sem);
72 vma = find_vma(mm, address);
73
74 if (!vma)
75 goto bad_area;
76 if (vma->vm_start <= address)
77 goto good_area;
78 if (!(vma->vm_flags & VM_GROWSDOWN))
79 goto bad_area;
80 if (expand_stack(vma, address))
81 goto bad_area;
82
83 /* Ok, we have a good vm_area for this memory access, so
84 * we can handle it..
85 */
86
87good_area:
88 info.si_code = SEGV_ACCERR;
89
90 if (is_write) {
91 if (!(vma->vm_flags & VM_WRITE))
92 goto bad_area;
93 } else if (is_exec) {
94 if (!(vma->vm_flags & VM_EXEC))
95 goto bad_area;
96 } else /* Allow read even from write-only pages. */
97 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
98 goto bad_area;
99
100 /* If for any reason at all we couldn't handle the fault,
101 * make sure we exit gracefully rather than endlessly redo
102 * the fault.
103 */
104survive:
105 switch (handle_mm_fault(mm, vma, address, is_write)) {
106 case VM_FAULT_MINOR:
107 current->min_flt++;
108 break;
109 case VM_FAULT_MAJOR:
110 current->maj_flt++;
111 break;
112 case VM_FAULT_SIGBUS:
113 goto do_sigbus;
114 case VM_FAULT_OOM:
115 goto out_of_memory;
116 default:
117 BUG();
118 }
119
120 up_read(&mm->mmap_sem);
121 return;
122
123 /* Something tried to access memory that isn't in our memory map..
124 * Fix it, but check if it's kernel or user first..
125 */
126bad_area:
127 up_read(&mm->mmap_sem);
128 if (user_mode(regs)) {
129 current->thread.bad_vaddr = address;
130 current->thread.error_code = is_write;
131 info.si_signo = SIGSEGV;
132 info.si_errno = 0;
133 /* info.si_code has been set above */
134 info.si_addr = (void *) address;
135 force_sig_info(SIGSEGV, &info, current);
136 return;
137 }
138 bad_page_fault(regs, address, SIGSEGV);
139 return;
140
141
142 /* We ran out of memory, or some other thing happened to us that made
143 * us unable to handle the page fault gracefully.
144 */
145out_of_memory:
146 up_read(&mm->mmap_sem);
147 if (current->pid == 1) {
148 yield();
149 down_read(&mm->mmap_sem);
150 goto survive;
151 }
152 printk("VM: killing process %s\n", current->comm);
153 if (user_mode(regs))
154 do_exit(SIGKILL);
155 bad_page_fault(regs, address, SIGKILL);
156 return;
157
158do_sigbus:
159 up_read(&mm->mmap_sem);
160
161 /* Send a sigbus, regardless of whether we were in kernel
162 * or user mode.
163 */
164 current->thread.bad_vaddr = address;
165 info.si_code = SIGBUS;
166 info.si_errno = 0;
167 info.si_code = BUS_ADRERR;
168 info.si_addr = (void *) address;
169 force_sig_info(SIGBUS, &info, current);
170
171 /* Kernel mode? Handle exceptions or die */
172 if (!user_mode(regs))
173 bad_page_fault(regs, address, SIGBUS);
174
175vmalloc_fault:
176 {
177 /* Synchronize this task's top level page-table
178 * with the 'reference' page table.
179 */
180 struct mm_struct *act_mm = current->active_mm;
181 int index = pgd_index(address);
182 pgd_t *pgd, *pgd_k;
183 pmd_t *pmd, *pmd_k;
184 pte_t *pte_k;
185
186 if (act_mm == NULL)
187 goto bad_page_fault;
188
189 pgd = act_mm->pgd + index;
190 pgd_k = init_mm.pgd + index;
191
192 if (!pgd_present(*pgd_k))
193 goto bad_page_fault;
194
195 pgd_val(*pgd) = pgd_val(*pgd_k);
196
197 pmd = pmd_offset(pgd, address);
198 pmd_k = pmd_offset(pgd_k, address);
199 if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
200 goto bad_page_fault;
201
202 pmd_val(*pmd) = pmd_val(*pmd_k);
203 pte_k = pte_offset_kernel(pmd_k, address);
204
205 if (!pte_present(*pte_k))
206 goto bad_page_fault;
207 return;
208 }
209bad_page_fault:
210 bad_page_fault(regs, address, SIGKILL);
211 return;
212}
213
214
215void
216bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
217{
218 extern void die(const char*, struct pt_regs*, long);
219 const struct exception_table_entry *entry;
220
221 /* Are we prepared to handle this kernel fault? */
222 if ((entry = search_exception_tables(regs->pc)) != NULL) {
223#if 1
224 printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
225 current->comm, regs->pc, entry->fixup);
226#endif
227 current->thread.bad_uaddr = address;
228 regs->pc = entry->fixup;
229 return;
230 }
231
232 /* Oops. The kernel tried to access some bad page. We'll have to
233 * terminate things with extreme prejudice.
234 */
235 printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
236 "address %08lx\n pc = %08lx, ra = %08lx\n",
237 address, regs->pc, regs->areg[0]);
238 die("Oops", regs, sig);
239 do_exit(sig);
240}
241
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
new file mode 100644
index 000000000000..56aace84aaeb
--- /dev/null
+++ b/arch/xtensa/mm/init.c
@@ -0,0 +1,551 @@
1/*
2 * arch/xtensa/mm/init.c
3 *
4 * Derived from MIPS, PPC.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
14 * Marc Gauthier
15 * Kevin Chea
16 */
17
18#include <linux/config.h>
19#include <linux/init.h>
20#include <linux/signal.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/errno.h>
24#include <linux/string.h>
25#include <linux/types.h>
26#include <linux/ptrace.h>
27#include <linux/bootmem.h>
28#include <linux/swap.h>
29
30#include <asm/pgtable.h>
31#include <asm/bootparam.h>
32#include <asm/mmu_context.h>
33#include <asm/tlb.h>
34#include <asm/tlbflush.h>
35#include <asm/page.h>
36#include <asm/pgalloc.h>
37#include <asm/pgtable.h>
38
39
40#define DEBUG 0
41
42DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
43//static DEFINE_SPINLOCK(tlb_lock);
44
45/*
46 * This flag is used to indicate that the page was mapped and modified in
47 * kernel space, so the cache is probably dirty at that address.
48 * If cache aliasing is enabled and the page color mismatches, update_mmu_cache
49 * synchronizes the caches if this bit is set.
50 */
51
52#define PG_cache_clean PG_arch_1
53
54/* References to section boundaries */
55
56extern char _ftext, _etext, _fdata, _edata, _rodata_end;
57extern char __init_begin, __init_end;
58
59/*
60 * mem_reserve(start, end, must_exist)
61 *
62 * Reserve some memory from the memory pool.
63 *
64 * Parameters:
65 * start Start of region,
66 * end End of region,
67 * must_exist Must exist in memory pool.
68 *
69 * Returns:
70 * 0 (memory area couldn't be mapped)
71 * -1 (success)
72 */
73
74int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
75{
76 int i;
77
78 if (start == end)
79 return 0;
80
81 start = start & PAGE_MASK;
82 end = PAGE_ALIGN(end);
83
84 for (i = 0; i < sysmem.nr_banks; i++)
85 if (start < sysmem.bank[i].end
86 && end >= sysmem.bank[i].start)
87 break;
88
89 if (i == sysmem.nr_banks) {
90 if (must_exist)
91 printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
92 "not in any region!\n", start, end);
93 return 0;
94 }
95
96 if (start > sysmem.bank[i].start) {
97 if (end < sysmem.bank[i].end) {
98 /* split entry */
99 if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
100 panic("meminfo overflow\n");
101 sysmem.bank[sysmem.nr_banks].start = end;
102 sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
103 sysmem.nr_banks++;
104 }
105 sysmem.bank[i].end = start;
106 } else {
107 if (end < sysmem.bank[i].end)
108 sysmem.bank[i].start = end;
109 else {
110 /* remove entry */
111 sysmem.nr_banks--;
112 sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
113 sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
114 }
115 }
116 return -1;
117}
118
119
120/*
121 * Initialize the bootmem system and give it all the memory we have available.
122 */
123
124void __init bootmem_init(void)
125{
126 unsigned long pfn;
127 unsigned long bootmap_start, bootmap_size;
128 int i;
129
130 max_low_pfn = max_pfn = 0;
131 min_low_pfn = ~0;
132
133 for (i=0; i < sysmem.nr_banks; i++) {
134 pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT;
135 if (pfn < min_low_pfn)
136 min_low_pfn = pfn;
137 pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT;
138 if (pfn > max_pfn)
139 max_pfn = pfn;
140 }
141
142 if (min_low_pfn > max_pfn)
143 panic("No memory found!\n");
144
145 max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ?
146 max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT;
147
148 /* Find an area to use for the bootmem bitmap. */
149
150 bootmap_size = bootmem_bootmap_pages(max_low_pfn) << PAGE_SHIFT;
151 bootmap_start = ~0;
152
153 for (i=0; i<sysmem.nr_banks; i++)
154 if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) {
155 bootmap_start = sysmem.bank[i].start;
156 break;
157 }
158
159 if (bootmap_start == ~0UL)
160 panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
161
162 /* Reserve the bootmem bitmap area */
163
164 mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1);
165 bootmap_size = init_bootmem_node(NODE_DATA(0), min_low_pfn,
166 bootmap_start >> PAGE_SHIFT,
167 max_low_pfn);
168
169 /* Add all remaining memory pieces into the bootmem map */
170
171 for (i=0; i<sysmem.nr_banks; i++)
172 free_bootmem(sysmem.bank[i].start,
173 sysmem.bank[i].end - sysmem.bank[i].start);
174
175}
176
177
178void __init paging_init(void)
179{
180 unsigned long zones_size[MAX_NR_ZONES];
181 int i;
182
183 /* All pages are DMA-able, so we put them all in the DMA zone. */
184
185 zones_size[ZONE_DMA] = max_low_pfn;
186 for (i = 1; i < MAX_NR_ZONES; i++)
187 zones_size[i] = 0;
188
189#ifdef CONFIG_HIGHMEM
190 zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
191#endif
192
193 /* Initialize the kernel's page tables. */
194
195 memset(swapper_pg_dir, 0, PAGE_SIZE);
196
197 free_area_init(zones_size);
198}
199
200/*
201 * Flush the mmu and reset associated register to default values.
202 */
203
204void __init init_mmu (void)
205{
206 /* Writing zeros to the <t>TLBCFG special registers ensure
207 * that valid values exist in the register. For existing
208 * PGSZID<w> fields, zero selects the first element of the
209 * page-size array. For nonexistant PGSZID<w> fields, zero is
210 * the best value to write. Also, when changing PGSZID<w>
211 * fields, the corresponding TLB must be flushed.
212 */
213 set_itlbcfg_register (0);
214 set_dtlbcfg_register (0);
215 flush_tlb_all ();
216
217 /* Set rasid register to a known value. */
218
219 set_rasid_register (ASID_ALL_RESERVED);
220
221 /* Set PTEVADDR special register to the start of the page
222 * table, which is in kernel mappable space (ie. not
223 * statically mapped). This register's value is undefined on
224 * reset.
225 */
226 set_ptevaddr_register (PGTABLE_START);
227}
228
229/*
230 * Initialize memory pages.
231 */
232
233void __init mem_init(void)
234{
235 unsigned long codesize, reservedpages, datasize, initsize;
236 unsigned long highmemsize, tmp, ram;
237
238 max_mapnr = num_physpages = max_low_pfn;
239 high_memory = (void *) __va(max_mapnr << PAGE_SHIFT);
240 highmemsize = 0;
241
242#if CONFIG_HIGHMEM
243#error HIGHGMEM not implemented in init.c
244#endif
245
246 totalram_pages += free_all_bootmem();
247
248 reservedpages = ram = 0;
249 for (tmp = 0; tmp < max_low_pfn; tmp++) {
250 ram++;
251 if (PageReserved(mem_map+tmp))
252 reservedpages++;
253 }
254
255 codesize = (unsigned long) &_etext - (unsigned long) &_ftext;
256 datasize = (unsigned long) &_edata - (unsigned long) &_fdata;
257 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
258
259 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
260 "%ldk data, %ldk init %ldk highmem)\n",
261 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
262 ram << (PAGE_SHIFT-10),
263 codesize >> 10,
264 reservedpages << (PAGE_SHIFT-10),
265 datasize >> 10,
266 initsize >> 10,
267 highmemsize >> 10);
268}
269
270void
271free_reserved_mem(void *start, void *end)
272{
273 for (; start < end; start += PAGE_SIZE) {
274 ClearPageReserved(virt_to_page(start));
275 set_page_count(virt_to_page(start), 1);
276 free_page((unsigned long)start);
277 totalram_pages++;
278 }
279}
280
281#ifdef CONFIG_BLK_DEV_INITRD
282extern int initrd_is_mapped;
283
284void free_initrd_mem(unsigned long start, unsigned long end)
285{
286 if (initrd_is_mapped) {
287 free_reserved_mem((void*)start, (void*)end);
288 printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10);
289 }
290}
291#endif
292
293void free_initmem(void)
294{
295 free_reserved_mem(&__init_begin, &__init_end);
296 printk("Freeing unused kernel memory: %dk freed\n",
297 (&__init_end - &__init_begin) >> 10);
298}
299
300void show_mem(void)
301{
302 int i, free = 0, total = 0, reserved = 0;
303 int shared = 0, cached = 0;
304
305 printk("Mem-info:\n");
306 show_free_areas();
307 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
308 i = max_mapnr;
309 while (i-- > 0) {
310 total++;
311 if (PageReserved(mem_map+i))
312 reserved++;
313 else if (PageSwapCache(mem_map+i))
314 cached++;
315 else if (!page_count(mem_map + i))
316 free++;
317 else
318 shared += page_count(mem_map + i) - 1;
319 }
320 printk("%d pages of RAM\n", total);
321 printk("%d reserved pages\n", reserved);
322 printk("%d pages shared\n", shared);
323 printk("%d pages swap cached\n",cached);
324 printk("%d free pages\n", free);
325}
326
327/* ------------------------------------------------------------------------- */
328
329#if (DCACHE_WAY_SIZE > PAGE_SIZE)
330
331/*
332 * With cache aliasing, the page color of the page in kernel space and user
333 * space might mismatch. We temporarily map the page to a different virtual
334 * address with the same color and clear the page there.
335 */
336
337void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page)
338{
339
340 /* There shouldn't be any entries for this page. */
341
342 __flush_invalidate_dcache_page_phys(__pa(page_address(page)));
343
344 if (!PAGE_COLOR_EQ(vaddr, kaddr)) {
345 unsigned long v, p;
346
347 /* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */
348
349 spin_lock(&tlb_lock);
350
351 p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL)));
352 kaddr = (void*)PAGE_COLOR_MAP0(vaddr);
353 v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0;
354 __asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v));
355
356 clear_page(kaddr);
357
358 spin_unlock(&tlb_lock);
359 } else {
360 clear_page(kaddr);
361 }
362
363 /* We need to make sure that i$ and d$ are coherent. */
364
365 clear_bit(PG_cache_clean, &page->flags);
366}
367
368/*
369 * With cache aliasing, we have to make sure that the page color of the page
370 * in kernel space matches that of the virtual user address before we read
371 * the page. If the page color differ, we create a temporary DTLB entry with
372 * the corrent page color and use this 'temporary' address as the source.
373 * We then use the same approach as in clear_user_page and copy the data
374 * to the kernel space and clear the PG_cache_clean bit to synchronize caches
375 * later.
376 *
377 * Note:
378 * Instead of using another 'way' for the temporary DTLB entry, we could
379 * probably use the same entry that points to the kernel address (after
380 * saving the original value and restoring it when we are done).
381 */
382
383void copy_user_page(void* to, void* from, unsigned long vaddr,
384 struct page* to_page)
385{
386 /* There shouldn't be any entries for the new page. */
387
388 __flush_invalidate_dcache_page_phys(__pa(page_address(to_page)));
389
390 spin_lock(&tlb_lock);
391
392 if (!PAGE_COLOR_EQ(vaddr, from)) {
393 unsigned long v, p, t;
394
395 __asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1"
396 : "=a"(p), "=a"(t) : "a"(from));
397 from = (void*)PAGE_COLOR_MAP0(vaddr);
398 v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0;
399 __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
400 }
401
402 if (!PAGE_COLOR_EQ(vaddr, to)) {
403 unsigned long v, p;
404
405 p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL)));
406 to = (void*)PAGE_COLOR_MAP1(vaddr);
407 v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1;
408 __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
409 }
410 copy_page(to, from);
411
412 spin_unlock(&tlb_lock);
413
414 /* We need to make sure that i$ and d$ are coherent. */
415
416 clear_bit(PG_cache_clean, &to_page->flags);
417}
418
419
420
421/*
422 * Any time the kernel writes to a user page cache page, or it is about to
423 * read from a page cache page this routine is called.
424 *
425 * Note:
426 * The kernel currently only provides one architecture bit in the page
427 * flags that we use for I$/D$ coherency. Maybe, in future, we can
428 * use a sepearte bit for deferred dcache aliasing:
429 * If the page is not mapped yet, we only need to set a flag,
430 * if mapped, we need to invalidate the page.
431 */
432// FIXME: we probably need this for WB caches not only for Page Coloring..
433
434void flush_dcache_page(struct page *page)
435{
436 unsigned long addr = __pa(page_address(page));
437 struct address_space *mapping = page_mapping(page);
438
439 __flush_invalidate_dcache_page_phys(addr);
440
441 if (!test_bit(PG_cache_clean, &page->flags))
442 return;
443
444 /* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/
445#if 0
446 if (mapping && !mapping_mapped(mapping))
447 clear_bit(PG_cache_clean, &page->flags);
448 else
449#endif
450 __invalidate_icache_page_phys(addr);
451}
452
453void flush_cache_range(struct vm_area_struct* vma, unsigned long s,
454 unsigned long e)
455{
456 __flush_invalidate_cache_all();
457}
458
459void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
460 unsigned long pfn)
461{
462 struct page *page = pfn_to_page(pfn);
463
464 /* Remove any entry for the old mapping. */
465
466 if (current->active_mm == vma->vm_mm) {
467 unsigned long addr = __pa(page_address(page));
468 __flush_invalidate_dcache_page_phys(addr);
469 if ((vma->vm_flags & VM_EXEC) != 0)
470 __invalidate_icache_page_phys(addr);
471 } else {
472 BUG();
473 }
474}
475
476#endif /* (DCACHE_WAY_SIZE > PAGE_SIZE) */
477
478
479pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr)
480{
481 pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0);
482 if (likely(pte)) {
483 pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET);
484 int i;
485 for (i = 0; i < 1024; i++, ptep++)
486 pte_clear(mm, addr, ptep);
487 }
488 return pte;
489}
490
491struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr)
492{
493 struct page *page;
494
495 page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0);
496
497 if (likely(page)) {
498 pte_t* ptep = kmap_atomic(page, KM_USER0);
499 int i;
500
501 for (i = 0; i < 1024; i++, ptep++)
502 pte_clear(mm, addr, ptep);
503
504 kunmap_atomic(ptep, KM_USER0);
505 }
506 return page;
507}
508
509
510/*
511 * Handle D$/I$ coherency.
512 *
513 * Note:
514 * We only have one architecture bit for the page flags, so we cannot handle
515 * cache aliasing, yet.
516 */
517
518void
519update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
520{
521 unsigned long pfn = pte_pfn(pte);
522 struct page *page;
523 unsigned long vaddr = addr & PAGE_MASK;
524
525 if (!pfn_valid(pfn))
526 return;
527
528 page = pfn_to_page(pfn);
529
530 invalidate_itlb_mapping(addr);
531 invalidate_dtlb_mapping(addr);
532
533 /* We have a new mapping. Use it. */
534
535 write_dtlb_entry(pte, dtlb_probe(addr));
536
537 /* If the processor can execute from this page, synchronize D$/I$. */
538
539 if ((vma->vm_flags & VM_EXEC) != 0) {
540
541 write_itlb_entry(pte, itlb_probe(addr));
542
543 /* Synchronize caches, if not clean. */
544
545 if (!test_and_set_bit(PG_cache_clean, &page->flags)) {
546 __flush_dcache_page(vaddr);
547 __invalidate_icache_page(vaddr);
548 }
549 }
550}
551
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
new file mode 100644
index 000000000000..327c0f17187c
--- /dev/null
+++ b/arch/xtensa/mm/misc.S
@@ -0,0 +1,374 @@
1/*
2 * arch/xtensa/mm/misc.S
3 *
4 * Miscellaneous assembly functions.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15/* Note: we might want to implement some of the loops as zero-overhead-loops,
16 * where applicable and if supported by the processor.
17 */
18
19#include <linux/linkage.h>
20#include <asm/page.h>
21#include <asm/pgtable.h>
22
23#include <xtensa/cacheasm.h>
24#include <xtensa/cacheattrasm.h>
25
26/* clear_page (page) */
27
28ENTRY(clear_page)
29 entry a1, 16
30 addi a4, a2, PAGE_SIZE
31 movi a3, 0
32
331: s32i a3, a2, 0
34 s32i a3, a2, 4
35 s32i a3, a2, 8
36 s32i a3, a2, 12
37 s32i a3, a2, 16
38 s32i a3, a2, 20
39 s32i a3, a2, 24
40 s32i a3, a2, 28
41 addi a2, a2, 32
42 blt a2, a4, 1b
43
44 retw
45
46/*
47 * copy_page (void *to, void *from)
48 * a2 a3
49 */
50
51ENTRY(copy_page)
52 entry a1, 16
53 addi a4, a2, PAGE_SIZE
54
551: l32i a5, a3, 0
56 l32i a6, a3, 4
57 l32i a7, a3, 8
58 s32i a5, a2, 0
59 s32i a6, a2, 4
60 s32i a7, a2, 8
61 l32i a5, a3, 12
62 l32i a6, a3, 16
63 l32i a7, a3, 20
64 s32i a5, a2, 12
65 s32i a6, a2, 16
66 s32i a7, a2, 20
67 l32i a5, a3, 24
68 l32i a6, a3, 28
69 s32i a5, a2, 24
70 s32i a6, a2, 28
71 addi a2, a2, 32
72 addi a3, a3, 32
73 blt a2, a4, 1b
74
75 retw
76
77
78/*
79 * void __flush_invalidate_cache_all(void)
80 */
81
82ENTRY(__flush_invalidate_cache_all)
83 entry sp, 16
84 dcache_writeback_inv_all a2, a3
85 icache_invalidate_all a2, a3
86 retw
87
88/*
89 * void __invalidate_icache_all(void)
90 */
91
92ENTRY(__invalidate_icache_all)
93 entry sp, 16
94 icache_invalidate_all a2, a3
95 retw
96
97/*
98 * void __flush_invalidate_dcache_all(void)
99 */
100
101ENTRY(__flush_invalidate_dcache_all)
102 entry sp, 16
103 dcache_writeback_inv_all a2, a3
104 retw
105
106
107/*
108 * void __flush_invalidate_cache_range(ulong start, ulong size)
109 */
110
111ENTRY(__flush_invalidate_cache_range)
112 entry sp, 16
113 mov a4, a2
114 mov a5, a3
115 dcache_writeback_inv_region a4, a5, a6
116 icache_invalidate_region a2, a3, a4
117 retw
118
119/*
120 * void __invalidate_icache_page(ulong start)
121 */
122
123ENTRY(__invalidate_icache_page)
124 entry sp, 16
125 movi a3, PAGE_SIZE
126 icache_invalidate_region a2, a3, a4
127 retw
128
129/*
130 * void __invalidate_dcache_page(ulong start)
131 */
132
133ENTRY(__invalidate_dcache_page)
134 entry sp, 16
135 movi a3, PAGE_SIZE
136 dcache_invalidate_region a2, a3, a4
137 retw
138
139/*
140 * void __invalidate_icache_range(ulong start, ulong size)
141 */
142
143ENTRY(__invalidate_icache_range)
144 entry sp, 16
145 icache_invalidate_region a2, a3, a4
146 retw
147
148/*
149 * void __invalidate_dcache_range(ulong start, ulong size)
150 */
151
152ENTRY(__invalidate_dcache_range)
153 entry sp, 16
154 dcache_invalidate_region a2, a3, a4
155 retw
156
157/*
158 * void __flush_dcache_page(ulong start)
159 */
160
161ENTRY(__flush_dcache_page)
162 entry sp, 16
163 movi a3, PAGE_SIZE
164 dcache_writeback_region a2, a3, a4
165 retw
166
167/*
168 * void __flush_invalidate_dcache_page(ulong start)
169 */
170
171ENTRY(__flush_invalidate_dcache_page)
172 entry sp, 16
173 movi a3, PAGE_SIZE
174 dcache_writeback_inv_region a2, a3, a4
175 retw
176
177/*
178 * void __flush_invalidate_dcache_range(ulong start, ulong size)
179 */
180
181ENTRY(__flush_invalidate_dcache_range)
182 entry sp, 16
183 dcache_writeback_inv_region a2, a3, a4
184 retw
185
186/*
187 * void __invalidate_dcache_all(void)
188 */
189
190ENTRY(__invalidate_dcache_all)
191 entry sp, 16
192 dcache_invalidate_all a2, a3
193 retw
194
195/*
196 * void __flush_invalidate_dcache_page_phys(ulong start)
197 */
198
199ENTRY(__flush_invalidate_dcache_page_phys)
200 entry sp, 16
201
202 movi a3, XCHAL_DCACHE_SIZE
203 movi a4, PAGE_MASK | 1
204 addi a2, a2, 1
205
2061: addi a3, a3, -XCHAL_DCACHE_LINESIZE
207
208 ldct a6, a3
209 dsync
210 and a6, a6, a4
211 beq a6, a2, 2f
212 bgeui a3, 2, 1b
213 retw
214
2152: diwbi a3, 0
216 bgeui a3, 2, 1b
217 retw
218
219ENTRY(check_dcache_low0)
220 entry sp, 16
221
222 movi a3, XCHAL_DCACHE_SIZE / 4
223 movi a4, PAGE_MASK | 1
224 addi a2, a2, 1
225
2261: addi a3, a3, -XCHAL_DCACHE_LINESIZE
227
228 ldct a6, a3
229 dsync
230 and a6, a6, a4
231 beq a6, a2, 2f
232 bgeui a3, 2, 1b
233 retw
234
2352: j 2b
236
237ENTRY(check_dcache_high0)
238 entry sp, 16
239
240 movi a5, XCHAL_DCACHE_SIZE / 4
241 movi a3, XCHAL_DCACHE_SIZE / 2
242 movi a4, PAGE_MASK | 1
243 addi a2, a2, 1
244
2451: addi a3, a3, -XCHAL_DCACHE_LINESIZE
246 addi a5, a5, -XCHAL_DCACHE_LINESIZE
247
248 ldct a6, a3
249 dsync
250 and a6, a6, a4
251 beq a6, a2, 2f
252 bgeui a5, 2, 1b
253 retw
254
2552: j 2b
256
257ENTRY(check_dcache_low1)
258 entry sp, 16
259
260 movi a5, XCHAL_DCACHE_SIZE / 4
261 movi a3, XCHAL_DCACHE_SIZE * 3 / 4
262 movi a4, PAGE_MASK | 1
263 addi a2, a2, 1
264
2651: addi a3, a3, -XCHAL_DCACHE_LINESIZE
266 addi a5, a5, -XCHAL_DCACHE_LINESIZE
267
268 ldct a6, a3
269 dsync
270 and a6, a6, a4
271 beq a6, a2, 2f
272 bgeui a5, 2, 1b
273 retw
274
2752: j 2b
276
277ENTRY(check_dcache_high1)
278 entry sp, 16
279
280 movi a5, XCHAL_DCACHE_SIZE / 4
281 movi a3, XCHAL_DCACHE_SIZE
282 movi a4, PAGE_MASK | 1
283 addi a2, a2, 1
284
2851: addi a3, a3, -XCHAL_DCACHE_LINESIZE
286 addi a5, a5, -XCHAL_DCACHE_LINESIZE
287
288 ldct a6, a3
289 dsync
290 and a6, a6, a4
291 beq a6, a2, 2f
292 bgeui a5, 2, 1b
293 retw
294
2952: j 2b
296
297
298/*
299 * void __invalidate_icache_page_phys(ulong start)
300 */
301
302ENTRY(__invalidate_icache_page_phys)
303 entry sp, 16
304
305 movi a3, XCHAL_ICACHE_SIZE
306 movi a4, PAGE_MASK | 1
307 addi a2, a2, 1
308
3091: addi a3, a3, -XCHAL_ICACHE_LINESIZE
310
311 lict a6, a3
312 isync
313 and a6, a6, a4
314 beq a6, a2, 2f
315 bgeui a3, 2, 1b
316 retw
317
3182: iii a3, 0
319 bgeui a3, 2, 1b
320 retw
321
322
323#if 0
324
325 movi a3, XCHAL_DCACHE_WAYS - 1
326 movi a4, PAGE_SIZE
327
3281: mov a5, a2
329 add a6, a2, a4
330
3312: diwbi a5, 0
332 diwbi a5, XCHAL_DCACHE_LINESIZE
333 diwbi a5, XCHAL_DCACHE_LINESIZE * 2
334 diwbi a5, XCHAL_DCACHE_LINESIZE * 3
335
336 addi a5, a5, XCHAL_DCACHE_LINESIZE * 4
337 blt a5, a6, 2b
338
339 addi a3, a3, -1
340 addi a2, a2, XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS
341 bgez a3, 1b
342
343 retw
344
345ENTRY(__invalidate_icache_page_index)
346 entry sp, 16
347
348 movi a3, XCHAL_ICACHE_WAYS - 1
349 movi a4, PAGE_SIZE
350
3511: mov a5, a2
352 add a6, a2, a4
353
3542: iii a5, 0
355 iii a5, XCHAL_ICACHE_LINESIZE
356 iii a5, XCHAL_ICACHE_LINESIZE * 2
357 iii a5, XCHAL_ICACHE_LINESIZE * 3
358
359 addi a5, a5, XCHAL_ICACHE_LINESIZE * 4
360 blt a5, a6, 2b
361
362 addi a3, a3, -1
363 addi a2, a2, XCHAL_ICACHE_SIZE / XCHAL_ICACHE_WAYS
364 bgez a3, 2b
365
366 retw
367
368#endif
369
370
371
372
373
374
diff --git a/arch/xtensa/mm/pgtable.c b/arch/xtensa/mm/pgtable.c
new file mode 100644
index 000000000000..e5e119c820e4
--- /dev/null
+++ b/arch/xtensa/mm/pgtable.c
@@ -0,0 +1,76 @@
1/*
2 * arch/xtensa/mm/fault.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 *
10 * Chris Zankel <chris@zankel.net>
11 */
12
13#if (DCACHE_SIZE > PAGE_SIZE)
14
15pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
16{
17 pte_t *pte, p;
18 int color = ADDR_COLOR(address);
19 int i;
20
21 p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER);
22
23 if (likely(p)) {
24 struct page *page;
25
26 for (i = 0; i < COLOR_SIZE; i++, p++) {
27 page = virt_to_page(pte);
28
29 set_page_count(page, 1);
30 ClearPageCompound(page);
31
32 if (ADDR_COLOR(p) == color)
33 pte = p;
34 else
35 free_page(p);
36 }
37 clear_page(pte);
38 }
39 return pte;
40}
41
42#ifdef PROFILING
43
44int mask;
45int hit;
46int flush;
47
48#endif
49
50struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
51{
52 struct page *page, p;
53 int color = ADDR_COLOR(address);
54
55 p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
56
57 if (likely(p)) {
58 for (i = 0; i < PAGE_ORDER; i++) {
59 set_page_count(p, 1);
60 ClearPageCompound(p);
61
62 if (PADDR_COLOR(page_address(pg)) == color)
63 page = p;
64 else
65 free_page(p);
66 }
67 clear_highpage(page);
68 }
69
70 return page;
71}
72
73#endif
74
75
76
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
new file mode 100644
index 000000000000..d3bd3bfc3b3b
--- /dev/null
+++ b/arch/xtensa/mm/tlb.c
@@ -0,0 +1,545 @@
1/*
2 * arch/xtensa/mm/mmu.c
3 *
4 * Logic that manipulates the Xtensa MMU. Derived from MIPS.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2003 Tensilica Inc.
11 *
12 * Joe Taylor
13 * Chris Zankel <chris@zankel.net>
14 * Marc Gauthier
15 */
16
17#include <linux/mm.h>
18#include <asm/processor.h>
19#include <asm/mmu_context.h>
20#include <asm/tlbflush.h>
21#include <asm/system.h>
22#include <asm/cacheflush.h>
23
24
25static inline void __flush_itlb_all (void)
26{
27 int way, index;
28
29 for (way = 0; way < XCHAL_ITLB_ARF_WAYS; way++) {
30 for (index = 0; index < ITLB_ENTRIES_PER_ARF_WAY; index++) {
31 int entry = way + (index << PAGE_SHIFT);
32 invalidate_itlb_entry_no_isync (entry);
33 }
34 }
35 asm volatile ("isync\n");
36}
37
38static inline void __flush_dtlb_all (void)
39{
40 int way, index;
41
42 for (way = 0; way < XCHAL_DTLB_ARF_WAYS; way++) {
43 for (index = 0; index < DTLB_ENTRIES_PER_ARF_WAY; index++) {
44 int entry = way + (index << PAGE_SHIFT);
45 invalidate_dtlb_entry_no_isync (entry);
46 }
47 }
48 asm volatile ("isync\n");
49}
50
51
52void flush_tlb_all (void)
53{
54 __flush_itlb_all();
55 __flush_dtlb_all();
56}
57
58/* If mm is current, we simply assign the current task a new ASID, thus,
59 * invalidating all previous tlb entries. If mm is someone else's user mapping,
60 * wie invalidate the context, thus, when that user mapping is swapped in,
61 * a new context will be assigned to it.
62 */
63
64void flush_tlb_mm(struct mm_struct *mm)
65{
66#if 0
67 printk("[tlbmm<%lx>]\n", (unsigned long)mm->context);
68#endif
69
70 if (mm == current->active_mm) {
71 int flags;
72 local_save_flags(flags);
73 get_new_mmu_context(mm, asid_cache);
74 set_rasid_register(ASID_INSERT(mm->context));
75 local_irq_restore(flags);
76 }
77 else
78 mm->context = 0;
79}
80
81void flush_tlb_range (struct vm_area_struct *vma,
82 unsigned long start, unsigned long end)
83{
84 struct mm_struct *mm = vma->vm_mm;
85 unsigned long flags;
86
87 if (mm->context == NO_CONTEXT)
88 return;
89
90#if 0
91 printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
92 (unsigned long)mm->context, start, end);
93#endif
94 local_save_flags(flags);
95
96 if (end-start + (PAGE_SIZE-1) <= SMALLEST_NTLB_ENTRIES << PAGE_SHIFT) {
97 int oldpid = get_rasid_register();
98 set_rasid_register (ASID_INSERT(mm->context));
99 start &= PAGE_MASK;
100 if (vma->vm_flags & VM_EXEC)
101 while(start < end) {
102 invalidate_itlb_mapping(start);
103 invalidate_dtlb_mapping(start);
104 start += PAGE_SIZE;
105 }
106 else
107 while(start < end) {
108 invalidate_dtlb_mapping(start);
109 start += PAGE_SIZE;
110 }
111
112 set_rasid_register(oldpid);
113 } else {
114 get_new_mmu_context(mm, asid_cache);
115 if (mm == current->active_mm)
116 set_rasid_register(ASID_INSERT(mm->context));
117 }
118 local_irq_restore(flags);
119}
120
121void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
122{
123 struct mm_struct* mm = vma->vm_mm;
124 unsigned long flags;
125 int oldpid;
126#if 0
127 printk("[tlbpage<%02lx,%08lx>]\n",
128 (unsigned long)mm->context, page);
129#endif
130
131 if(mm->context == NO_CONTEXT)
132 return;
133
134 local_save_flags(flags);
135
136 oldpid = get_rasid_register();
137
138 if (vma->vm_flags & VM_EXEC)
139 invalidate_itlb_mapping(page);
140 invalidate_dtlb_mapping(page);
141
142 set_rasid_register(oldpid);
143
144 local_irq_restore(flags);
145
146#if 0
147 flush_tlb_all();
148 return;
149#endif
150}
151
152
153#ifdef DEBUG_TLB
154
155#define USE_ITLB 0
156#define USE_DTLB 1
157
158struct way_config_t {
159 int indicies;
160 int indicies_log2;
161 int pgsz_log2;
162 int arf;
163};
164
165static struct way_config_t itlb[XCHAL_ITLB_WAYS] =
166{
167 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES),
168 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2),
169 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN),
170 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF)
171 },
172 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES),
173 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2),
174 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN),
175 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF)
176 },
177 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES),
178 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2),
179 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN),
180 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF)
181 },
182 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES),
183 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2),
184 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN),
185 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF)
186 },
187 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES),
188 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2),
189 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN),
190 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF)
191 },
192 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES),
193 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2),
194 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN),
195 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF)
196 },
197 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES),
198 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2),
199 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN),
200 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF)
201 }
202};
203
204static struct way_config_t dtlb[XCHAL_DTLB_WAYS] =
205{
206 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES),
207 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2),
208 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN),
209 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF)
210 },
211 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES),
212 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2),
213 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN),
214 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF)
215 },
216 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES),
217 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2),
218 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN),
219 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF)
220 },
221 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES),
222 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2),
223 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN),
224 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF)
225 },
226 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES),
227 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2),
228 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN),
229 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF)
230 },
231 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES),
232 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2),
233 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN),
234 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF)
235 },
236 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES),
237 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2),
238 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN),
239 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF)
240 },
241 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES),
242 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2),
243 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN),
244 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF)
245 },
246 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES),
247 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2),
248 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN),
249 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF)
250 },
251 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES),
252 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2),
253 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN),
254 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF)
255 }
256};
257
258/* Total number of entries: */
259#define ITLB_TOTAL_ENTRIES \
260 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \
261 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \
262 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \
263 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \
264 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \
265 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \
266 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES)
267#define DTLB_TOTAL_ENTRIES \
268 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \
269 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \
270 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \
271 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \
272 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \
273 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \
274 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \
275 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \
276 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \
277 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES)
278
279
280typedef struct {
281 unsigned va;
282 unsigned pa;
283 unsigned char asid;
284 unsigned char ca;
285 unsigned char way;
286 unsigned char index;
287 unsigned char pgsz_log2; /* 0 .. 32 */
288 unsigned char type; /* 0=ITLB 1=DTLB */
289} tlb_dump_entry_t;
290
291/* Return -1 if a precedes b, +1 if a follows b, 0 if same: */
292int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b )
293{
294 if (a->asid < b->asid) return -1;
295 if (a->asid > b->asid) return 1;
296 if (a->va < b->va) return -1;
297 if (a->va > b->va) return 1;
298 if (a->pa < b->pa) return -1;
299 if (a->pa > b->pa) return 1;
300 if (a->ca < b->ca) return -1;
301 if (a->ca > b->ca) return 1;
302 if (a->way < b->way) return -1;
303 if (a->way > b->way) return 1;
304 if (a->index < b->index) return -1;
305 if (a->index > b->index) return 1;
306 return 0;
307}
308
309void sort_tlb_dump_info( tlb_dump_entry_t *t, int n )
310{
311 int i, j;
312 /* Simple O(n*n) sort: */
313 for (i = 0; i < n-1; i++)
314 for (j = i+1; j < n; j++)
315 if (cmp_tlb_dump_info(t+i, t+j) > 0) {
316 tlb_dump_entry_t tmp = t[i];
317 t[i] = t[j];
318 t[j] = tmp;
319 }
320}
321
322
323static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES];
324static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES];
325
326
327static inline char *way_type (int type)
328{
329 return type ? "autorefill" : "non-autorefill";
330}
331
332void print_entry (struct way_config_t *way_info,
333 unsigned int way,
334 unsigned int index,
335 unsigned int virtual,
336 unsigned int translation)
337{
338 char valid_chr;
339 unsigned int va, pa, asid, ca;
340
341 va = virtual &
342 ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1);
343 asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1);
344 pa = translation & ~((1 << way_info->pgsz_log2) - 1);
345 ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1);
346 valid_chr = asid ? 'V' : 'I';
347
348 /* Compute and incorporate the effect of the index bits on the
349 * va. It's more useful for kernel debugging, since we always
350 * want to know the effective va anyway. */
351
352 va += index << way_info->pgsz_log2;
353
354 printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n",
355 way, index, valid_chr, va, pa, asid, ca);
356}
357
358void print_itlb_entry (struct way_config_t *way_info, int way, int index)
359{
360 print_entry (way_info, way, index,
361 read_itlb_virtual (way + (index << way_info->pgsz_log2)),
362 read_itlb_translation (way + (index << way_info->pgsz_log2)));
363}
364
365void print_dtlb_entry (struct way_config_t *way_info, int way, int index)
366{
367 print_entry (way_info, way, index,
368 read_dtlb_virtual (way + (index << way_info->pgsz_log2)),
369 read_dtlb_translation (way + (index << way_info->pgsz_log2)));
370}
371
372void dump_itlb (void)
373{
374 int way, index;
375
376 printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS);
377
378 for (way = 0; way < XCHAL_ITLB_WAYS; way++) {
379 printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
380 way, itlb[way].indicies,
381 itlb[way].pgsz_log2, way_type(itlb[way].arf));
382 for (index = 0; index < itlb[way].indicies; index++) {
383 print_itlb_entry(&itlb[way], way, index);
384 }
385 }
386}
387
388void dump_dtlb (void)
389{
390 int way, index;
391
392 printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS);
393
394 for (way = 0; way < XCHAL_DTLB_WAYS; way++) {
395 printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
396 way, dtlb[way].indicies,
397 dtlb[way].pgsz_log2, way_type(dtlb[way].arf));
398 for (index = 0; index < dtlb[way].indicies; index++) {
399 print_dtlb_entry(&dtlb[way], way, index);
400 }
401 }
402}
403
404void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config,
405 int entries, int ways, int type, int show_invalid)
406{
407 tlb_dump_entry_t *e = tinfo;
408 int way, i;
409
410 /* Gather all info: */
411 for (way = 0; way < ways; way++) {
412 struct way_config_t *cfg = config + way;
413 for (i = 0; i < cfg->indicies; i++) {
414 unsigned wayindex = way + (i << cfg->pgsz_log2);
415 unsigned vv = (type ? read_dtlb_virtual (wayindex)
416 : read_itlb_virtual (wayindex));
417 unsigned pp = (type ? read_dtlb_translation (wayindex)
418 : read_itlb_translation (wayindex));
419
420 /* Compute and incorporate the effect of the index bits on the
421 * va. It's more useful for kernel debugging, since we always
422 * want to know the effective va anyway. */
423
424 e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1));
425 e->va += (i << cfg->pgsz_log2);
426 e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1));
427 e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1));
428 e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1));
429 e->way = way;
430 e->index = i;
431 e->pgsz_log2 = cfg->pgsz_log2;
432 e->type = type;
433 e++;
434 }
435 }
436#if 1
437 /* Sort by ASID and VADDR: */
438 sort_tlb_dump_info (tinfo, entries);
439#endif
440
441 /* Display all sorted info: */
442 printk ("\n%cTLB dump:\n", (type ? 'D' : 'I'));
443 for (e = tinfo, i = 0; i < entries; i++, e++) {
444#if 0
445 if (e->asid == 0 && !show_invalid)
446 continue;
447#endif
448 printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n",
449 (e->type ? 'D' : 'I'), e->way, e->index,
450 e->asid, e->va, e->pa, e->ca,
451 (1 << (e->pgsz_log2 % 10)),
452 " kMG"[e->pgsz_log2 / 10]
453 );
454 }
455}
456
457void dump_tlbs2 (int showinv)
458{
459 dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv);
460 dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv);
461}
462
463void dump_all_tlbs (void)
464{
465 dump_tlbs2 (1);
466}
467
468void dump_valid_tlbs (void)
469{
470 dump_tlbs2 (0);
471}
472
473
474void dump_tlbs (void)
475{
476 dump_itlb();
477 dump_dtlb();
478}
479
480void dump_cache_tag(int dcache, int idx)
481{
482 int w, i, s, e;
483 unsigned long tag, index;
484 unsigned long num_lines, num_ways, cache_size, line_size;
485
486 num_ways = dcache ? XCHAL_DCACHE_WAYS : XCHAL_ICACHE_WAYS;
487 cache_size = dcache ? XCHAL_DCACHE_SIZE : XCHAL_ICACHE_SIZE;
488 line_size = dcache ? XCHAL_DCACHE_LINESIZE : XCHAL_ICACHE_LINESIZE;
489
490 num_lines = cache_size / num_ways;
491
492 s = 0; e = num_lines;
493
494 if (idx >= 0)
495 e = (s = idx * line_size) + 1;
496
497 for (i = s; i < e; i+= line_size) {
498 printk("\nline %#08x:", i);
499 for (w = 0; w < num_ways; w++) {
500 index = w * num_lines + i;
501 if (dcache)
502 __asm__ __volatile__("ldct %0, %1\n\t"
503 : "=a"(tag) : "a"(index));
504 else
505 __asm__ __volatile__("lict %0, %1\n\t"
506 : "=a"(tag) : "a"(index));
507
508 printk(" %#010lx", tag);
509 }
510 }
511 printk ("\n");
512}
513
514void dump_icache(int index)
515{
516 unsigned long data, addr;
517 int w, i;
518
519 const unsigned long num_ways = XCHAL_ICACHE_WAYS;
520 const unsigned long cache_size = XCHAL_ICACHE_SIZE;
521 const unsigned long line_size = XCHAL_ICACHE_LINESIZE;
522 const unsigned long num_lines = cache_size / num_ways / line_size;
523
524 for (w = 0; w < num_ways; w++) {
525 printk ("\nWay %d", w);
526
527 for (i = 0; i < line_size; i+= 4) {
528 addr = w * num_lines + index * line_size + i;
529 __asm__ __volatile__("licw %0, %1\n\t"
530 : "=a"(data) : "a"(addr));
531 printk(" %#010lx", data);
532 }
533 }
534 printk ("\n");
535}
536
537void dump_cache_tags(void)
538{
539 printk("Instruction cache\n");
540 dump_cache_tag(0, -1);
541 printk("Data cache\n");
542 dump_cache_tag(1, -1);
543}
544
545#endif
diff --git a/arch/xtensa/platform-iss/Makefile b/arch/xtensa/platform-iss/Makefile
new file mode 100644
index 000000000000..5b394e9620e5
--- /dev/null
+++ b/arch/xtensa/platform-iss/Makefile
@@ -0,0 +1,13 @@
1# $Id: Makefile,v 1.1.1.1 2002/08/28 16:10:14 aroll Exp $
2#
3# Makefile for the Xtensa Instruction Set Simulator (ISS)
4# "prom monitor" library routines under Linux.
5#
6# Note! Dependencies are done automagically by 'make dep', which also
7# removes any old dependencies. DON'T put your own dependencies here
8# unless it's something special (ie not a .c file).
9#
10# Note 2! The CFLAGS definitions are in the main makefile...
11
12obj-y = io.o console.o setup.o network.o
13
diff --git a/arch/xtensa/platform-iss/console.c b/arch/xtensa/platform-iss/console.c
new file mode 100644
index 000000000000..9e2b53f6a907
--- /dev/null
+++ b/arch/xtensa/platform-iss/console.c
@@ -0,0 +1,303 @@
1/*
2 * arch/xtensa/platform-iss/console.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001-2005 Tensilica Inc.
9 * Authors Christian Zankel, Joe Taylor
10 */
11
12#include <linux/module.h>
13#include <linux/config.h>
14#include <linux/kernel.h>
15#include <linux/sched.h>
16#include <linux/console.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/mm.h>
20#include <linux/major.h>
21#include <linux/param.h>
22#include <linux/serial.h>
23#include <linux/serialP.h>
24#include <linux/console.h>
25
26#include <asm/uaccess.h>
27#include <asm/irq.h>
28
29#include <xtensa/simcall.h>
30
31#include <linux/tty.h>
32#include <linux/tty_flip.h>
33
34#ifdef SERIAL_INLINE
35#define _INLINE_ inline
36#endif
37
38#define SERIAL_MAX_NUM_LINES 1
39#define SERIAL_TIMER_VALUE (20 * HZ)
40
41static struct tty_driver *serial_driver;
42static struct timer_list serial_timer;
43
44static DEFINE_SPINLOCK(timer_lock);
45
46int errno;
47
48static int __simc (int a, int b, int c, int d, int e, int f)
49{
50 int ret;
51 __asm__ __volatile__ ("simcall\n"
52 "mov %0, a2\n"
53 "mov %1, a3\n" : "=a" (ret), "=a" (errno)
54 : : "a2", "a3");
55 return ret;
56}
57
58static char *serial_version = "0.1";
59static char *serial_name = "ISS serial driver";
60
61/*
62 * This routine is called whenever a serial port is opened. It
63 * enables interrupts for a serial port, linking in its async structure into
64 * the IRQ chain. It also performs the serial-specific
65 * initialization for the tty structure.
66 */
67
68static void rs_poll(unsigned long);
69
70static int rs_open(struct tty_struct *tty, struct file * filp)
71{
72 int line = tty->index;
73
74 if ((line < 0) || (line >= SERIAL_MAX_NUM_LINES))
75 return -ENODEV;
76
77 spin_lock(&timer_lock);
78
79 if (tty->count == 1) {
80 init_timer(&serial_timer);
81 serial_timer.data = (unsigned long) tty;
82 serial_timer.function = rs_poll;
83 mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
84 }
85 spin_unlock(&timer_lock);
86
87 return 0;
88}
89
90
91/*
92 * ------------------------------------------------------------
93 * iss_serial_close()
94 *
95 * This routine is called when the serial port gets closed. First, we
96 * wait for the last remaining data to be sent. Then, we unlink its
97 * async structure from the interrupt chain if necessary, and we free
98 * that IRQ if nothing is left in the chain.
99 * ------------------------------------------------------------
100 */
101static void rs_close(struct tty_struct *tty, struct file * filp)
102{
103 spin_lock(&timer_lock);
104 if (tty->count == 1)
105 del_timer_sync(&serial_timer);
106 spin_unlock(&timer_lock);
107}
108
109
110static int rs_write(struct tty_struct * tty,
111 const unsigned char *buf, int count)
112{
113 /* see drivers/char/serialX.c to reference original version */
114
115 __simc (SYS_write, 1, (unsigned long)buf, count, 0, 0);
116 return count;
117}
118
119static void rs_poll(unsigned long priv)
120{
121 struct tty_struct* tty = (struct tty_struct*) priv;
122
123 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
124 int i = 0;
125 unsigned char c;
126
127 spin_lock(&timer_lock);
128
129 while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){
130 __simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0);
131 tty->flip.count++;
132 *tty->flip.char_buf_ptr++ = c;
133 *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
134 i++;
135 }
136
137 if (i)
138 tty_flip_buffer_push(tty);
139
140
141 mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
142 spin_unlock(&timer_lock);
143}
144
145
146static void rs_put_char(struct tty_struct *tty, unsigned char ch)
147{
148 char buf[2];
149
150 if (!tty)
151 return;
152
153 buf[0] = ch;
154 buf[1] = '\0'; /* Is this NULL necessary? */
155 __simc (SYS_write, 1, (unsigned long) buf, 1, 0, 0);
156}
157
158static void rs_flush_chars(struct tty_struct *tty)
159{
160}
161
162static int rs_write_room(struct tty_struct *tty)
163{
164 /* Let's say iss can always accept 2K characters.. */
165 return 2 * 1024;
166}
167
168static int rs_chars_in_buffer(struct tty_struct *tty)
169{
170 /* the iss doesn't buffer characters */
171 return 0;
172}
173
174static void rs_hangup(struct tty_struct *tty)
175{
176 /* Stub, once again.. */
177}
178
179static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
180{
181 /* Stub, once again.. */
182}
183
184static int rs_read_proc(char *page, char **start, off_t off, int count,
185 int *eof, void *data)
186{
187 int len = 0;
188 off_t begin = 0;
189
190 len += sprintf(page, "serinfo:1.0 driver:%s\n", serial_version);
191 *eof = 1;
192
193 if (off >= len + begin)
194 return 0;
195
196 *start = page + (off - begin);
197 return ((count < begin + len - off) ? count : begin + len - off);
198}
199
200
201int register_serial(struct serial_struct*);
202void unregister_serial(int);
203
204static struct tty_operations serial_ops = {
205 .open = rs_open,
206 .close = rs_close,
207 .write = rs_write,
208 .put_char = rs_put_char,
209 .flush_chars = rs_flush_chars,
210 .write_room = rs_write_room,
211 .chars_in_buffer = rs_chars_in_buffer,
212 .hangup = rs_hangup,
213 .wait_until_sent = rs_wait_until_sent,
214 .read_proc = rs_read_proc
215};
216
217int __init rs_init(void)
218{
219 serial_driver = alloc_tty_driver(1);
220
221 printk ("%s %s\n", serial_name, serial_version);
222
223 /* Initialize the tty_driver structure */
224
225 serial_driver->owner = THIS_MODULE;
226 serial_driver->driver_name = "iss_serial";
227 serial_driver->name = "ttyS";
228 serial_driver->major = TTY_MAJOR;
229 serial_driver->minor_start = 64;
230 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
231 serial_driver->subtype = SERIAL_TYPE_NORMAL;
232 serial_driver->init_termios = tty_std_termios;
233 serial_driver->init_termios.c_cflag =
234 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
235 serial_driver->flags = TTY_DRIVER_REAL_RAW;
236
237 tty_set_operations(serial_driver, &serial_ops);
238
239 if (tty_register_driver(serial_driver))
240 panic("Couldn't register serial driver\n");
241 return 0;
242}
243
244
245static __exit void rs_exit(void)
246{
247 int error;
248
249 if ((error = tty_unregister_driver(serial_driver)))
250 printk("ISS_SERIAL: failed to unregister serial driver (%d)\n",
251 error);
252 put_tty_driver(serial_driver);
253}
254
255
256/* We use `late_initcall' instead of just `__initcall' as a workaround for
257 * the fact that (1) simcons_tty_init can't be called before tty_init,
258 * (2) tty_init is called via `module_init', (3) if statically linked,
259 * module_init == device_init, and (4) there's no ordering of init lists.
260 * We can do this easily because simcons is always statically linked, but
261 * other tty drivers that depend on tty_init and which must use
262 * `module_init' to declare their init routines are likely to be broken.
263 */
264
265late_initcall(rs_init);
266
267
268#ifdef CONFIG_SERIAL_CONSOLE
269
270static void iss_console_write(struct console *co, const char *s, unsigned count)
271{
272 int len = strlen(s);
273
274 if (s != 0 && *s != 0)
275 __simc (SYS_write, 1, (unsigned long)s,
276 count < len ? count : len,0,0);
277}
278
279static struct tty_driver* iss_console_device(struct console *c, int *index)
280{
281 *index = c->index;
282 return serial_driver;
283}
284
285
286static struct console sercons = {
287 .name = "ttyS",
288 .write = iss_console_write,
289 .device = iss_console_device,
290 .flags = CON_PRINTBUFFER,
291 .index = -1
292};
293
294static int __init iss_console_init(void)
295{
296 register_console(&sercons);
297 return 0;
298}
299
300console_initcall(iss_console_init);
301
302#endif /* CONFIG_SERIAL_CONSOLE */
303
diff --git a/arch/xtensa/platform-iss/io.c b/arch/xtensa/platform-iss/io.c
new file mode 100644
index 000000000000..5b161a5cb65f
--- /dev/null
+++ b/arch/xtensa/platform-iss/io.c
@@ -0,0 +1,32 @@
1/* This file isn't really needed right now. */
2
3#if 0
4
5#include <asm/io.h>
6#include <xtensa/simcall.h>
7
8extern int __simc ();
9
10
11char iss_serial_getc()
12{
13 char c;
14 __simc( SYS_read, 0, &c, 1 );
15 return c;
16}
17
18void iss_serial_putc( char c )
19{
20 __simc( SYS_write, 1, &c, 1 );
21}
22
23void iss_serial_puts( char *s )
24{
25 if( s != 0 && *s != 0 )
26 __simc( SYS_write, 1, s, strlen(s) );
27}
28
29/*#error Need I/O ports to specific hardware!*/
30
31#endif
32
diff --git a/arch/xtensa/platform-iss/network.c b/arch/xtensa/platform-iss/network.c
new file mode 100644
index 000000000000..498d7dced1f4
--- /dev/null
+++ b/arch/xtensa/platform-iss/network.c
@@ -0,0 +1,855 @@
1/*
2 *
3 * arch/xtensa/platform-iss/network.c
4 *
5 * Platform specific initialization.
6 *
7 * Authors: Chris Zankel <chris@zankel.net>
8 * Based on work form the UML team.
9 *
10 * Copyright 2005 Tensilica Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 */
18
19#include <linux/config.h>
20#include <linux/list.h>
21#include <linux/irq.h>
22#include <linux/spinlock.h>
23#include <linux/slab.h>
24#include <linux/timer.h>
25#include <linux/if_ether.h>
26#include <linux/inetdevice.h>
27#include <linux/init.h>
28#include <linux/if_tun.h>
29#include <linux/etherdevice.h>
30#include <linux/interrupt.h>
31#include <linux/ioctl.h>
32#include <linux/bootmem.h>
33#include <linux/ethtool.h>
34#include <linux/rtnetlink.h>
35#include <linux/timer.h>
36
37#include <xtensa/simcall.h>
38
39#define DRIVER_NAME "iss-netdev"
40#define ETH_MAX_PACKET 1500
41#define ETH_HEADER_OTHER 14
42#define ISS_NET_TIMER_VALUE (2 * HZ)
43
44
45static DEFINE_SPINLOCK(opened_lock);
46static LIST_HEAD(opened);
47
48static DEFINE_SPINLOCK(devices_lock);
49static LIST_HEAD(devices);
50
51/* ------------------------------------------------------------------------- */
52
53/* We currently only support the TUNTAP transport protocol. */
54
55#define TRANSPORT_TUNTAP_NAME "tuntap"
56#define TRANSPORT_TUNTAP_MTU ETH_MAX_PACKET
57
58struct tuntap_info {
59 char dev_name[IFNAMSIZ];
60 int fixed_config;
61 unsigned char gw[ETH_ALEN];
62 int fd;
63};
64
65/* ------------------------------------------------------------------------- */
66
67
68/* This structure contains out private information for the driver. */
69
70struct iss_net_private {
71
72 struct list_head device_list;
73 struct list_head opened_list;
74
75 spinlock_t lock;
76 struct net_device *dev;
77 struct platform_device pdev;
78 struct timer_list tl;
79 struct net_device_stats stats;
80
81 struct timer_list timer;
82 unsigned int timer_val;
83
84 int index;
85 int mtu;
86
87 unsigned char mac[ETH_ALEN];
88 int have_mac;
89
90 struct {
91 union {
92 struct tuntap_info tuntap;
93 } info;
94
95 int (*open)(struct iss_net_private *lp);
96 void (*close)(struct iss_net_private *lp);
97 int (*read)(struct iss_net_private *lp, struct sk_buff **skb);
98 int (*write)(struct iss_net_private *lp, struct sk_buff **skb);
99 unsigned short (*protocol)(struct sk_buff *skb);
100 int (*poll)(struct iss_net_private *lp);
101 } tp;
102
103};
104
105/* ======================= ISS SIMCALL INTERFACE =========================== */
106
107/* Note: __simc must _not_ be declared inline! */
108
109static int errno;
110
111static int __simc (int a, int b, int c, int d, int e, int f)
112{
113 int ret;
114 __asm__ __volatile__ ("simcall\n"
115 "mov %0, a2\n"
116 "mov %1, a3\n" : "=a" (ret), "=a" (errno)
117 : : "a2", "a3");
118 return ret;
119}
120
121static int inline simc_open(char *file, int flags, int mode)
122{
123 return __simc(SYS_open, (int) file, flags, mode, 0, 0);
124}
125
126static int inline simc_close(int fd)
127{
128 return __simc(SYS_close, fd, 0, 0, 0, 0);
129}
130
131static int inline simc_ioctl(int fd, int request, void *arg)
132{
133 return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0);
134}
135
136static int inline simc_read(int fd, void *buf, size_t count)
137{
138 return __simc(SYS_read, fd, (int) buf, count, 0, 0);
139}
140
141static int inline simc_write(int fd, void *buf, size_t count)
142{
143 return __simc(SYS_write, fd, (int) buf, count, 0, 0);
144}
145
146static int inline simc_poll(int fd)
147{
148 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
149
150 return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv,0,0);
151}
152
153/* ================================ HELPERS ================================ */
154
155
156static char *split_if_spec(char *str, ...)
157{
158 char **arg, *end;
159 va_list ap;
160
161 va_start(ap, str);
162 while ((arg = va_arg(ap, char**)) != NULL) {
163 if (*str == '\0')
164 return NULL;
165 end = strchr(str, ',');
166 if (end != str)
167 *arg = str;
168 if (end == NULL)
169 return NULL;
170 *end ++ = '\0';
171 str = end;
172 }
173 va_end(ap);
174 return str;
175}
176
177
178#if 0
179/* Adjust SKB. */
180
181struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
182{
183 if ((skb != NULL) && (skb_tailroom(skb) < extra)) {
184 struct sk_buff *skb2;
185
186 skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
187 dev_kfree_skb(skb);
188 skb = skb2;
189 }
190 if (skb != NULL)
191 skb_put(skb, extra);
192
193 return skb;
194}
195#endif
196
197/* Return the IP address as a string for a given device. */
198
199static void dev_ip_addr(void *d, char *buf, char *bin_buf)
200{
201 struct net_device *dev = d;
202 struct in_device *ip = dev->ip_ptr;
203 struct in_ifaddr *in;
204 u32 addr;
205
206 if ((ip == NULL) || ((in = ip->ifa_list) == NULL)) {
207 printk(KERN_WARNING "Device not assigned an IP address!\n");
208 return;
209 }
210
211 addr = in->ifa_address;
212 sprintf(buf, "%d.%d.%d.%d", addr & 0xff, (addr >> 8) & 0xff,
213 (addr >> 16) & 0xff, addr >> 24);
214
215 if (bin_buf) {
216 bin_buf[0] = addr & 0xff;
217 bin_buf[1] = (addr >> 8) & 0xff;
218 bin_buf[2] = (addr >> 16) & 0xff;
219 bin_buf[3] = addr >> 24;
220 }
221}
222
223/* Set Ethernet address of the specified device. */
224
225static void inline set_ether_mac(void *d, unsigned char *addr)
226{
227 struct net_device *dev = d;
228 memcpy(dev->dev_addr, addr, ETH_ALEN);
229}
230
231
232/* ======================= TUNTAP TRANSPORT INTERFACE ====================== */
233
234static int tuntap_open(struct iss_net_private *lp)
235{
236 struct ifreq ifr;
237 char *dev_name = lp->tp.info.tuntap.dev_name;
238 int err = -EINVAL;
239 int fd;
240
241 /* We currently only support a fixed configuration. */
242
243 if (!lp->tp.info.tuntap.fixed_config)
244 return -EINVAL;
245
246 if ((fd = simc_open("/dev/net/tun", 02, 0)) < 0) { /* O_RDWR */
247 printk("Failed to open /dev/net/tun, returned %d "
248 "(errno = %d)\n", fd, errno);
249 return fd;
250 }
251
252 memset(&ifr, 0, sizeof ifr);
253 ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
254 strlcpy(ifr.ifr_name, dev_name, sizeof ifr.ifr_name - 1);
255
256 if ((err = simc_ioctl(fd, TUNSETIFF, (void*) &ifr)) < 0) {
257 printk("Failed to set interface, returned %d "
258 "(errno = %d)\n", err, errno);
259 simc_close(fd);
260 return err;
261 }
262
263 lp->tp.info.tuntap.fd = fd;
264 return err;
265}
266
267static void tuntap_close(struct iss_net_private *lp)
268{
269#if 0
270 if (lp->tp.info.tuntap.fixed_config)
271 iter_addresses(lp->tp.info.tuntap.dev, close_addr, lp->host.dev_name);
272#endif
273 simc_close(lp->tp.info.tuntap.fd);
274 lp->tp.info.tuntap.fd = -1;
275}
276
277static int tuntap_read (struct iss_net_private *lp, struct sk_buff **skb)
278{
279#if 0
280 *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER);
281 if (*skb == NULL)
282 return -ENOMEM;
283#endif
284
285 return simc_read(lp->tp.info.tuntap.fd,
286 (*skb)->data, (*skb)->dev->mtu + ETH_HEADER_OTHER);
287}
288
289static int tuntap_write (struct iss_net_private *lp, struct sk_buff **skb)
290{
291 return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
292}
293
294unsigned short tuntap_protocol(struct sk_buff *skb)
295{
296 return eth_type_trans(skb, skb->dev);
297}
298
299static int tuntap_poll(struct iss_net_private *lp)
300{
301 return simc_poll(lp->tp.info.tuntap.fd);
302}
303
304/*
305 * Currently only a device name is supported.
306 * ethX=tuntap[,[mac address][,[device name]]]
307 */
308
309static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
310{
311 const int len = strlen(TRANSPORT_TUNTAP_NAME);
312 char *dev_name = NULL, *mac_str = NULL, *rem = NULL;
313
314 /* Transport should be 'tuntap': ethX=tuntap,mac,dev_name */
315
316 if (strncmp(init, TRANSPORT_TUNTAP_NAME, len))
317 return 0;
318
319 if (*(init += strlen(TRANSPORT_TUNTAP_NAME)) == ',') {
320 if ((rem=split_if_spec(init+1, &mac_str, &dev_name)) != NULL) {
321 printk("Extra garbage on specification : '%s'\n", rem);
322 return 0;
323 }
324 } else if (*init != '\0') {
325 printk("Invalid argument: %s. Skipping device!\n", init);
326 return 0;
327 }
328
329 if (dev_name) {
330 strncpy(lp->tp.info.tuntap.dev_name, dev_name,
331 sizeof lp->tp.info.tuntap.dev_name);
332 lp->tp.info.tuntap.fixed_config = 1;
333 } else
334 strcpy(lp->tp.info.tuntap.dev_name, TRANSPORT_TUNTAP_NAME);
335
336
337#if 0
338 if (setup_etheraddr(mac_str, lp->mac))
339 lp->have_mac = 1;
340#endif
341 lp->mtu = TRANSPORT_TUNTAP_MTU;
342
343 //lp->info.tuntap.gate_addr = gate_addr;
344
345 lp->tp.info.tuntap.fd = -1;
346
347 lp->tp.open = tuntap_open;
348 lp->tp.close = tuntap_close;
349 lp->tp.read = tuntap_read;
350 lp->tp.write = tuntap_write;
351 lp->tp.protocol = tuntap_protocol;
352 lp->tp.poll = tuntap_poll;
353
354 printk("TUN/TAP backend - ");
355#if 0
356 if (lp->host.gate_addr != NULL)
357 printk("IP = %s", lp->host.gate_addr);
358#endif
359 printk("\n");
360
361 return 1;
362}
363
364/* ================================ ISS NET ================================ */
365
366static int iss_net_rx(struct net_device *dev)
367{
368 struct iss_net_private *lp = dev->priv;
369 int pkt_len;
370 struct sk_buff *skb;
371
372 /* Check if there is any new data. */
373
374 if (lp->tp.poll(lp) == 0)
375 return 0;
376
377 /* Try to allocate memory, if it fails, try again next round. */
378
379 if ((skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER)) == NULL) {
380 lp->stats.rx_dropped++;
381 return 0;
382 }
383
384 skb_reserve(skb, 2);
385
386 /* Setup skb */
387
388 skb->dev = dev;
389 skb->mac.raw = skb->data;
390 pkt_len = lp->tp.read(lp, &skb);
391 skb_put(skb, pkt_len);
392
393 if (pkt_len > 0) {
394 skb_trim(skb, pkt_len);
395 skb->protocol = lp->tp.protocol(skb);
396 // netif_rx(skb);
397 netif_rx_ni(skb);
398
399 lp->stats.rx_bytes += skb->len;
400 lp->stats.rx_packets++;
401 return pkt_len;
402 }
403 kfree_skb(skb);
404 return pkt_len;
405}
406
407static int iss_net_poll(void)
408{
409 struct list_head *ele;
410 int err, ret = 0;
411
412 spin_lock(&opened_lock);
413
414 list_for_each(ele, &opened) {
415 struct iss_net_private *lp;
416
417 lp = list_entry(ele, struct iss_net_private, opened_list);
418
419 if (!netif_running(lp->dev))
420 break;
421
422 spin_lock(&lp->lock);
423
424 while ((err = iss_net_rx(lp->dev)) > 0)
425 ret++;
426
427 spin_unlock(&lp->lock);
428
429 if (err < 0) {
430 printk(KERN_ERR "Device '%s' read returned %d, "
431 "shutting it down\n", lp->dev->name, err);
432 dev_close(lp->dev);
433 } else {
434 // FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ);
435 }
436 }
437
438 spin_unlock(&opened_lock);
439 return ret;
440}
441
442
443static void iss_net_timer(unsigned long priv)
444{
445 struct iss_net_private* lp = (struct iss_net_private*) priv;
446
447 spin_lock(&lp->lock);
448
449 iss_net_poll();
450
451 mod_timer(&lp->timer, jiffies + lp->timer_val);
452
453 spin_unlock(&lp->lock);
454}
455
456
457static int iss_net_open(struct net_device *dev)
458{
459 struct iss_net_private *lp = dev->priv;
460 char addr[sizeof "255.255.255.255\0"];
461 int err;
462
463 spin_lock(&lp->lock);
464
465 if ((err = lp->tp.open(lp)) < 0)
466 goto out;
467
468 if (!lp->have_mac) {
469 dev_ip_addr(dev, addr, &lp->mac[2]);
470 set_ether_mac(dev, lp->mac);
471 }
472
473 netif_start_queue(dev);
474
475 /* clear buffer - it can happen that the host side of the interface
476 * is full when we gethere. In this case, new data is never queued,
477 * SIGIOs never arrive, and the net never works.
478 */
479 while ((err = iss_net_rx(dev)) > 0)
480 ;
481
482 spin_lock(&opened_lock);
483 list_add(&lp->opened_list, &opened);
484 spin_unlock(&opened_lock);
485
486 init_timer(&lp->timer);
487 lp->timer_val = ISS_NET_TIMER_VALUE;
488 lp->timer.data = (unsigned long) lp;
489 lp->timer.function = iss_net_timer;
490 mod_timer(&lp->timer, jiffies + lp->timer_val);
491
492out:
493 spin_unlock(&lp->lock);
494 return err;
495}
496
497static int iss_net_close(struct net_device *dev)
498{
499 struct iss_net_private *lp = dev->priv;
500printk("iss_net_close!\n");
501 netif_stop_queue(dev);
502 spin_lock(&lp->lock);
503
504 spin_lock(&opened_lock);
505 list_del(&opened);
506 spin_unlock(&opened_lock);
507
508 del_timer_sync(&lp->timer);
509
510 lp->tp.close(lp);
511
512 spin_unlock(&lp->lock);
513 return 0;
514}
515
516static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
517{
518 struct iss_net_private *lp = dev->priv;
519 unsigned long flags;
520 int len;
521
522 netif_stop_queue(dev);
523 spin_lock_irqsave(&lp->lock, flags);
524
525 len = lp->tp.write(lp, &skb);
526
527 if (len == skb->len) {
528 lp->stats.tx_packets++;
529 lp->stats.tx_bytes += skb->len;
530 dev->trans_start = jiffies;
531 netif_start_queue(dev);
532
533 /* this is normally done in the interrupt when tx finishes */
534 netif_wake_queue(dev);
535
536 } else if (len == 0) {
537 netif_start_queue(dev);
538 lp->stats.tx_dropped++;
539
540 } else {
541 netif_start_queue(dev);
542 printk(KERN_ERR "iss_net_start_xmit: failed(%d)\n", len);
543 }
544
545 spin_unlock_irqrestore(&lp->lock, flags);
546
547 dev_kfree_skb(skb);
548 return 0;
549}
550
551
552static struct net_device_stats *iss_net_get_stats(struct net_device *dev)
553{
554 struct iss_net_private *lp = dev->priv;
555 return &lp->stats;
556}
557
558static void iss_net_set_multicast_list(struct net_device *dev)
559{
560#if 0
561 if (dev->flags & IFF_PROMISC)
562 return;
563 else if (dev->mc_count)
564 dev->flags |= IFF_ALLMULTI;
565 else
566 dev->flags &= ~IFF_ALLMULTI;
567#endif
568}
569
570static void iss_net_tx_timeout(struct net_device *dev)
571{
572#if 0
573 dev->trans_start = jiffies;
574 netif_wake_queue(dev);
575#endif
576}
577
578static int iss_net_set_mac(struct net_device *dev, void *addr)
579{
580#if 0
581 struct iss_net_private *lp = dev->priv;
582 struct sockaddr *hwaddr = addr;
583
584 spin_lock(&lp->lock);
585 memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
586 spin_unlock(&lp->lock);
587#endif
588
589 return 0;
590}
591
592static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
593{
594#if 0
595 struct iss_net_private *lp = dev->priv;
596 int err = 0;
597
598 spin_lock(&lp->lock);
599
600 // FIXME not needed new_mtu = transport_set_mtu(new_mtu, &lp->user);
601
602 if (new_mtu < 0)
603 err = new_mtu;
604 else
605 dev->mtu = new_mtu;
606
607 spin_unlock(&lp->lock);
608 return err;
609#endif
610 return -EINVAL;
611}
612
613static int iss_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
614{
615#if 0
616 static const struct ethtool_drvinfo info = {
617 .cmd = ETHTOOL_GDRVINFO,
618 .driver = DRIVER_NAME,
619 .version = "42",
620 };
621 void *useraddr;
622 u32 ethcmd;
623
624 switch (cmd) {
625 case SIOCETHTOOL:
626 useraddr = ifr->ifr_data;
627 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
628 return -EFAULT;
629
630 switch (ethcmd) {
631 case ETHTOOL_GDRVINFO:
632 if (copy_to_user(useraddr, &info, sizeof(info)))
633 return -EFAULT;
634 return 0;
635 default:
636 return -EOPNOTSUPP;
637 }
638 default:
639 return -EINVAL;
640 }
641#endif
642 return -EINVAL;
643}
644
645void iss_net_user_timer_expire(unsigned long _conn)
646{
647}
648
649
650static struct device_driver iss_net_driver = {
651 .name = DRIVER_NAME,
652 .bus = &platform_bus_type,
653};
654
655static int driver_registered;
656
657static int iss_net_configure(int index, char *init)
658{
659 struct net_device *dev;
660 struct iss_net_private *lp;
661 int err;
662
663 if ((dev = alloc_etherdev(sizeof *lp)) == NULL) {
664 printk(KERN_ERR "eth_configure: failed to allocate device\n");
665 return 1;
666 }
667
668 /* Initialize private element. */
669
670 lp = dev->priv;
671 *lp = ((struct iss_net_private) {
672 .device_list = LIST_HEAD_INIT(lp->device_list),
673 .opened_list = LIST_HEAD_INIT(lp->opened_list),
674 .lock = SPIN_LOCK_UNLOCKED,
675 .dev = dev,
676 .index = index,
677 //.fd = -1,
678 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0 },
679 .have_mac = 0,
680 });
681
682 /*
683 * Try all transport protocols.
684 * Note: more protocols can be added by adding '&& !X_init(lp, eth)'.
685 */
686
687 if (!tuntap_probe(lp, index, init)) {
688 printk("Invalid arguments. Skipping device!\n");
689 goto errout;
690 }
691
692 printk(KERN_INFO "Netdevice %d ", index);
693 if (lp->have_mac)
694 printk("(%02x:%02x:%02x:%02x:%02x:%02x) ",
695 lp->mac[0], lp->mac[1],
696 lp->mac[2], lp->mac[3],
697 lp->mac[4], lp->mac[5]);
698 printk(": ");
699
700 /* sysfs register */
701
702 if (!driver_registered) {
703 driver_register(&iss_net_driver);
704 driver_registered = 1;
705 }
706
707 spin_lock(&devices_lock);
708 list_add(&lp->device_list, &devices);
709 spin_unlock(&devices_lock);
710
711 lp->pdev.id = index;
712 lp->pdev.name = DRIVER_NAME;
713 platform_device_register(&lp->pdev);
714 SET_NETDEV_DEV(dev,&lp->pdev.dev);
715
716 /*
717 * If this name ends up conflicting with an existing registered
718 * netdevice, that is OK, register_netdev{,ice}() will notice this
719 * and fail.
720 */
721 snprintf(dev->name, sizeof dev->name, "eth%d", index);
722
723 dev->mtu = lp->mtu;
724 dev->open = iss_net_open;
725 dev->hard_start_xmit = iss_net_start_xmit;
726 dev->stop = iss_net_close;
727 dev->get_stats = iss_net_get_stats;
728 dev->set_multicast_list = iss_net_set_multicast_list;
729 dev->tx_timeout = iss_net_tx_timeout;
730 dev->set_mac_address = iss_net_set_mac;
731 dev->change_mtu = iss_net_change_mtu;
732 dev->do_ioctl = iss_net_ioctl;
733 dev->watchdog_timeo = (HZ >> 1);
734 dev->irq = -1;
735
736 rtnl_lock();
737 err = register_netdevice(dev);
738 rtnl_unlock();
739
740 if (err) {
741 printk("Error registering net device!\n");
742 /* XXX: should we call ->remove() here? */
743 free_netdev(dev);
744 return 1;
745 }
746
747 init_timer(&lp->tl);
748 lp->tl.function = iss_net_user_timer_expire;
749
750#if 0
751 if (lp->have_mac)
752 set_ether_mac(dev, lp->mac);
753#endif
754 return 0;
755
756errout:
757 // FIXME: unregister; free, etc..
758 return -EIO;
759
760}
761
762/* ------------------------------------------------------------------------- */
763
764/* Filled in during early boot */
765
766struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
767
768struct iss_net_init {
769 struct list_head list;
770 char *init; /* init string */
771 int index;
772};
773
774/*
775 * Parse the command line and look for 'ethX=...' fields, and register all
776 * those fields. They will be later initialized in iss_net_init.
777 */
778
779#define ERR KERN_ERR "iss_net_setup: "
780
781static int iss_net_setup(char *str)
782{
783 struct iss_net_private *device = NULL;
784 struct iss_net_init *new;
785 struct list_head *ele;
786 char *end;
787 int n;
788
789 n = simple_strtoul(str, &end, 0);
790 if (end == str) {
791 printk(ERR "Failed to parse '%s'\n", str);
792 return 1;
793 }
794 if (n < 0) {
795 printk(ERR "Device %d is negative\n", n);
796 return 1;
797 }
798 if (*(str = end) != '=') {
799 printk(ERR "Expected '=' after device number\n");
800 return 1;
801 }
802
803 spin_lock(&devices_lock);
804
805 list_for_each(ele, &devices) {
806 device = list_entry(ele, struct iss_net_private, device_list);
807 if (device->index == n)
808 break;
809 }
810
811 spin_unlock(&devices_lock);
812
813 if (device && device->index == n) {
814 printk(ERR "Device %d already configured\n", n);
815 return 1;
816 }
817
818 if ((new = alloc_bootmem(sizeof new)) == NULL) {
819 printk("Alloc_bootmem failed\n");
820 return 1;
821 }
822
823 INIT_LIST_HEAD(&new->list);
824 new->index = n;
825 new->init = str + 1;
826
827 list_add_tail(&new->list, &eth_cmd_line);
828 return 1;
829}
830
831#undef ERR
832
833__setup("eth", iss_net_setup);
834
835/*
836 * Initialize all ISS Ethernet devices previously registered in iss_net_setup.
837 */
838
839static int iss_net_init(void)
840{
841 struct list_head *ele, *next;
842
843 /* Walk through all Ethernet devices specified in the command line. */
844
845 list_for_each_safe(ele, next, &eth_cmd_line) {
846 struct iss_net_init *eth;
847 eth = list_entry(ele, struct iss_net_init, list);
848 iss_net_configure(eth->index, eth->init);
849 }
850
851 return 1;
852}
853
854module_init(iss_net_init);
855
diff --git a/arch/xtensa/platform-iss/setup.c b/arch/xtensa/platform-iss/setup.c
new file mode 100644
index 000000000000..2e6dcbf0cc04
--- /dev/null
+++ b/arch/xtensa/platform-iss/setup.c
@@ -0,0 +1,112 @@
1/*
2 *
3 * arch/xtensa/platform-iss/setup.c
4 *
5 * Platform specific initialization.
6 *
7 * Authors: Chris Zankel <chris@zankel.net>
8 * Joe Taylor <joe@tensilica.com>
9 *
10 * Copyright 2001 - 2005 Tensilica Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 */
18#include <linux/config.h>
19#include <linux/stddef.h>
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/errno.h>
23#include <linux/reboot.h>
24#include <linux/pci.h>
25#include <linux/kdev_t.h>
26#include <linux/types.h>
27#include <linux/major.h>
28#include <linux/blkdev.h>
29#include <linux/console.h>
30#include <linux/delay.h>
31#include <linux/stringify.h>
32#include <linux/notifier.h>
33
34#include <asm/platform.h>
35#include <asm/bootparam.h>
36
37
38void __init platform_init(bp_tag_t* bootparam)
39{
40
41}
42
43void platform_halt(void)
44{
45 printk (" ** Called platform_halt(), looping forever! **\n");
46 while (1);
47}
48
49void platform_power_off(void)
50{
51 printk (" ** Called platform_power_off(), looping forever! **\n");
52 while (1);
53}
54void platform_restart(void)
55{
56 /* Flush and reset the mmu, simulate a processor reset, and
57 * jump to the reset vector. */
58
59 __asm__ __volatile__("movi a2, 15\n\t"
60 "wsr a2, " __stringify(ICOUNTLEVEL) "\n\t"
61 "movi a2, 0\n\t"
62 "wsr a2, " __stringify(ICOUNT) "\n\t"
63 "wsr a2, " __stringify(IBREAKENABLE) "\n\t"
64 "wsr a2, " __stringify(LCOUNT) "\n\t"
65 "movi a2, 0x1f\n\t"
66 "wsr a2, " __stringify(PS) "\n\t"
67 "isync\n\t"
68 "jx %0\n\t"
69 :
70 : "a" (XCHAL_RESET_VECTOR_VADDR)
71 : "a2");
72
73 /* control never gets here */
74}
75
76extern void iss_net_poll(void);
77
78const char twirl[]="|/-\\|/-\\";
79
80void platform_heartbeat(void)
81{
82#if 0
83 static int i = 0, j = 0;
84
85 if (--i < 0) {
86 i = 99;
87 printk("\r%c\r", twirl[j++]);
88 if (j == 8)
89 j = 0;
90 }
91#endif
92}
93
94
95
96static int
97iss_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
98{
99 __asm__ __volatile__("movi a2, -1; simcall\n");
100 return NOTIFY_DONE;
101}
102
103static struct notifier_block iss_panic_block = {
104 iss_panic_event,
105 NULL,
106 0
107};
108
109void __init platform_setup(char **p_cmdline)
110{
111 notifier_chain_register(&panic_notifier_list, &iss_panic_block);
112}