aboutsummaryrefslogtreecommitdiffstats
path: root/arch/cris
diff options
context:
space:
mode:
Diffstat (limited to 'arch/cris')
-rw-r--r--arch/cris/arch-v32/Kconfig296
-rw-r--r--arch/cris/arch-v32/boot/Makefile14
-rw-r--r--arch/cris/arch-v32/boot/compressed/Makefile41
-rw-r--r--arch/cris/arch-v32/boot/compressed/README25
-rw-r--r--arch/cris/arch-v32/boot/compressed/decompress.ld30
-rw-r--r--arch/cris/arch-v32/boot/compressed/head.S193
-rw-r--r--arch/cris/arch-v32/boot/compressed/misc.c318
-rw-r--r--arch/cris/arch-v32/boot/rescue/Makefile36
-rw-r--r--arch/cris/arch-v32/boot/rescue/head.S39
-rw-r--r--arch/cris/arch-v32/boot/rescue/rescue.ld20
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig625
-rw-r--r--arch/cris/arch-v32/drivers/Makefile13
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c455
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c3522
-rw-r--r--arch/cris/arch-v32/drivers/gpio.c766
-rw-r--r--arch/cris/arch-v32/drivers/i2c.c611
-rw-r--r--arch/cris/arch-v32/drivers/i2c.h15
-rw-r--r--arch/cris/arch-v32/drivers/iop_fw_load.c219
-rw-r--r--arch/cris/arch-v32/drivers/nandflash.c157
-rw-r--r--arch/cris/arch-v32/drivers/pcf8563.c341
-rw-r--r--arch/cris/arch-v32/drivers/pci/Makefile5
-rw-r--r--arch/cris/arch-v32/drivers/pci/bios.c131
-rw-r--r--arch/cris/arch-v32/drivers/pci/dma.c149
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c1283
-rw-r--r--arch/cris/arch-v32/kernel/Makefile21
-rw-r--r--arch/cris/arch-v32/kernel/arbiter.c297
-rw-r--r--arch/cris/arch-v32/kernel/asm-offsets.c49
-rw-r--r--arch/cris/arch-v32/kernel/crisksyms.c24
-rw-r--r--arch/cris/arch-v32/kernel/debugport.c461
-rw-r--r--arch/cris/arch-v32/kernel/dma.c224
-rw-r--r--arch/cris/arch-v32/kernel/entry.S820
-rw-r--r--arch/cris/arch-v32/kernel/fasttimer.c996
-rw-r--r--arch/cris/arch-v32/kernel/head.S448
-rw-r--r--arch/cris/arch-v32/kernel/io.c154
-rw-r--r--arch/cris/arch-v32/kernel/irq.c413
-rw-r--r--arch/cris/arch-v32/kernel/kgdb.c1660
-rw-r--r--arch/cris/arch-v32/kernel/kgdb_asm.S552
-rw-r--r--arch/cris/arch-v32/kernel/pinmux.c229
-rw-r--r--arch/cris/arch-v32/kernel/process.c270
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c597
-rw-r--r--arch/cris/arch-v32/kernel/setup.c118
-rw-r--r--arch/cris/arch-v32/kernel/signal.c708
-rw-r--r--arch/cris/arch-v32/kernel/smp.c348
-rw-r--r--arch/cris/arch-v32/kernel/time.c341
-rw-r--r--arch/cris/arch-v32/kernel/traps.c160
-rw-r--r--arch/cris/arch-v32/kernel/vcs_hook.c96
-rw-r--r--arch/cris/arch-v32/kernel/vcs_hook.h42
-rw-r--r--arch/cris/arch-v32/lib/Makefile6
-rw-r--r--arch/cris/arch-v32/lib/checksum.S111
-rw-r--r--arch/cris/arch-v32/lib/checksumcopy.S120
-rw-r--r--arch/cris/arch-v32/lib/csumcpfruser.S69
-rw-r--r--arch/cris/arch-v32/lib/dram_init.S120
-rw-r--r--arch/cris/arch-v32/lib/hw_settings.S73
-rw-r--r--arch/cris/arch-v32/lib/memset.c253
-rw-r--r--arch/cris/arch-v32/lib/nand_init.S179
-rw-r--r--arch/cris/arch-v32/lib/spinlock.S33
-rw-r--r--arch/cris/arch-v32/lib/string.c219
-rw-r--r--arch/cris/arch-v32/lib/usercopy.c470
-rw-r--r--arch/cris/arch-v32/mm/Makefile3
-rw-r--r--arch/cris/arch-v32/mm/init.c174
-rw-r--r--arch/cris/arch-v32/mm/intmem.c139
-rw-r--r--arch/cris/arch-v32/mm/mmu.S141
-rw-r--r--arch/cris/arch-v32/mm/tlb.c208
-rw-r--r--arch/cris/arch-v32/output_arch.ld2
-rw-r--r--arch/cris/arch-v32/vmlinux.lds.S134
65 files changed, 20786 insertions, 0 deletions
diff --git a/arch/cris/arch-v32/Kconfig b/arch/cris/arch-v32/Kconfig
new file mode 100644
index 000000000000..22f0ddc04c50
--- /dev/null
+++ b/arch/cris/arch-v32/Kconfig
@@ -0,0 +1,296 @@
1config ETRAX_DRAM_VIRTUAL_BASE
2 hex
3 depends on ETRAX_ARCH_V32
4 default "c0000000"
5
6config ETRAX_LED1G
7 string "First green LED bit"
8 depends on ETRAX_ARCH_V32
9 default "PA3"
10 help
11 Bit to use for the first green LED (network LED).
12 Most Axis products use bit A3 here.
13
14config ETRAX_LED1R
15 string "First red LED bit"
16 depends on ETRAX_ARCH_V32
17 default "PA4"
18 help
19 Bit to use for the first red LED (network LED).
20 Most Axis products use bit A4 here.
21
22config ETRAX_LED2G
23 string "Second green LED bit"
24 depends on ETRAX_ARCH_V32
25 default "PA5"
26 help
27 Bit to use for the first green LED (status LED).
28 Most Axis products use bit A5 here.
29
30config ETRAX_LED2R
31 string "Second red LED bit"
32 depends on ETRAX_ARCH_V32
33 default "PA6"
34 help
35 Bit to use for the first red LED (network LED).
36 Most Axis products use bit A6 here.
37
38config ETRAX_LED3G
39 string "Third green LED bit"
40 depends on ETRAX_ARCH_V32
41 default "PA7"
42 help
43 Bit to use for the first green LED (drive/power LED).
44 Most Axis products use bit A7 here.
45
46config ETRAX_LED3R
47 string "Third red LED bit"
48 depends on ETRAX_ARCH_V32
49 default "PA7"
50 help
51 Bit to use for the first red LED (drive/power LED).
52 Most Axis products use bit A7 here.
53
54choice
55 prompt "Product debug-port"
56 depends on ETRAX_ARCH_V32
57 default ETRAX_DEBUG_PORT0
58
59config ETRAX_DEBUG_PORT0
60 bool "Serial-0"
61 help
62 Choose a serial port for the ETRAX debug console. Default to
63 port 0.
64
65config ETRAX_DEBUG_PORT1
66 bool "Serial-1"
67 help
68 Use serial port 1 for the console.
69
70config ETRAX_DEBUG_PORT2
71 bool "Serial-2"
72 help
73 Use serial port 2 for the console.
74
75config ETRAX_DEBUG_PORT3
76 bool "Serial-3"
77 help
78 Use serial port 3 for the console.
79
80config ETRAX_DEBUG_PORT_NULL
81 bool "disabled"
82 help
83 Disable serial-port debugging.
84
85endchoice
86
87choice
88 prompt "Kernel GDB port"
89 depends on ETRAX_KGDB
90 default ETRAX_KGDB_PORT0
91 help
92 Choose a serial port for kernel debugging. NOTE: This port should
93 not be enabled under Drivers for built-in interfaces (as it has its
94 own initialization code) and should not be the same as the debug port.
95
96config ETRAX_KGDB_PORT0
97 bool "Serial-0"
98 help
99 Use serial port 0 for kernel debugging.
100
101config ETRAX_KGDB_PORT1
102 bool "Serial-1"
103 help
104 Use serial port 1 for kernel debugging.
105
106config ETRAX_KGDB_PORT2
107 bool "Serial-2"
108 help
109 Use serial port 2 for kernel debugging.
110
111config ETRAX_KGDB_PORT3
112 bool "Serial-3"
113 help
114 Use serial port 3 for kernel debugging.
115
116endchoice
117
118config ETRAX_MEM_GRP1_CONFIG
119 hex "MEM_GRP1_CONFIG"
120 depends on ETRAX_ARCH_V32
121 default "4044a"
122 help
123 Waitstates for flash. The default value is suitable for the
124 standard flashes used in axis products (120 ns).
125
126config ETRAX_MEM_GRP2_CONFIG
127 hex "MEM_GRP2_CONFIG"
128 depends on ETRAX_ARCH_V32
129 default "0"
130 help
131 Waitstates for SRAM. 0 is a good choice for most Axis products.
132
133config ETRAX_MEM_GRP3_CONFIG
134 hex "MEM_GRP3_CONFIG"
135 depends on ETRAX_ARCH_V32
136 default "0"
137 help
138 Waitstates for CSP0-3. 0 is a good choice for most Axis products.
139 It may need to be changed if external devices such as extra
140 register-mapped LEDs are used.
141
142config ETRAX_MEM_GRP4_CONFIG
143 hex "MEM_GRP4_CONFIG"
144 depends on ETRAX_ARCH_V32
145 default "0"
146 help
147 Waitstates for CSP4-6. 0 is a good choice for most Axis products.
148
149config ETRAX_SDRAM_GRP0_CONFIG
150 hex "SDRAM_GRP0_CONFIG"
151 depends on ETRAX_ARCH_V32
152 default "336"
153 help
154 SDRAM configuration for group 0. The value depends on the
155 hardware configuration. The default value is suitable
156 for 32 MB organized as two 16 bits chips (e.g. Axis
157 part number 18550) connected as one 32 bit device (i.e. in
158 the same group).
159
160config ETRAX_SDRAM_GRP1_CONFIG
161 hex "SDRAM_GRP1_CONFIG"
162 depends on ETRAX_ARCH_V32
163 default "0"
164 help
165 SDRAM configuration for group 1. The defult value is 0
166 because group 1 is not used in the default configuration,
167 described in the help for SDRAM_GRP0_CONFIG.
168
169config ETRAX_SDRAM_TIMING
170 hex "SDRAM_TIMING"
171 depends on ETRAX_ARCH_V32
172 default "104a"
173 help
174 SDRAM timing parameters. The default value is ok for
175 most hardwares but large SDRAMs may require a faster
176 refresh (a.k.a 8K refresh). The default value implies
177 100MHz clock and SDR mode.
178
179config ETRAX_SDRAM_COMMAND
180 hex "SDRAM_COMMAND"
181 depends on ETRAX_ARCH_V32
182 default "0"
183 help
184 SDRAM command. Should be 0 unless you really know what
185 you are doing (may be != 0 for unusual address line
186 mappings such as in a MCM)..
187
188config ETRAX_DEF_GIO_PA_OE
189 hex "GIO_PA_OE"
190 depends on ETRAX_ARCH_V32
191 default "1c"
192 help
193 Configures the direction of general port A bits. 1 is out, 0 is in.
194 This is often totally different depending on the product used.
195 There are some guidelines though - if you know that only LED's are
196 connected to port PA, then they are usually connected to bits 2-4
197 and you can therefore use 1c. On other boards which don't have the
198 LED's at the general ports, these bits are used for all kinds of
199 stuff. If you don't know what to use, it is always safe to put all
200 as inputs, although floating inputs isn't good.
201
202config ETRAX_DEF_GIO_PA_OUT
203 hex "GIO_PA_OUT"
204 depends on ETRAX_ARCH_V32
205 default "00"
206 help
207 Configures the initial data for the general port A bits. Most
208 products should use 00 here.
209
210config ETRAX_DEF_GIO_PB_OE
211 hex "GIO_PB_OE"
212 depends on ETRAX_ARCH_V32
213 default "00000"
214 help
215 Configures the direction of general port B bits. 1 is out, 0 is in.
216 This is often totally different depending on the product used.
217 There are some guidelines though - if you know that only LED's are
218 connected to port PA, then they are usually connected to bits 2-4
219 and you can therefore use 1c. On other boards which don't have the
220 LED's at the general ports, these bits are used for all kinds of
221 stuff. If you don't know what to use, it is always safe to put all
222 as inputs, although floating inputs isn't good.
223
224config ETRAX_DEF_GIO_PB_OUT
225 hex "GIO_PB_OUT"
226 depends on ETRAX_ARCH_V32
227 default "00000"
228 help
229 Configures the initial data for the general port B bits. Most
230 products should use 00000 here.
231
232config ETRAX_DEF_GIO_PC_OE
233 hex "GIO_PC_OE"
234 depends on ETRAX_ARCH_V32
235 default "00000"
236 help
237 Configures the direction of general port C bits. 1 is out, 0 is in.
238 This is often totally different depending on the product used.
239 There are some guidelines though - if you know that only LED's are
240 connected to port PA, then they are usually connected to bits 2-4
241 and you can therefore use 1c. On other boards which don't have the
242 LED's at the general ports, these bits are used for all kinds of
243 stuff. If you don't know what to use, it is always safe to put all
244 as inputs, although floating inputs isn't good.
245
246config ETRAX_DEF_GIO_PC_OUT
247 hex "GIO_PC_OUT"
248 depends on ETRAX_ARCH_V32
249 default "00000"
250 help
251 Configures the initial data for the general port C bits. Most
252 products should use 00000 here.
253
254config ETRAX_DEF_GIO_PD_OE
255 hex "GIO_PD_OE"
256 depends on ETRAX_ARCH_V32
257 default "00000"
258 help
259 Configures the direction of general port D bits. 1 is out, 0 is in.
260 This is often totally different depending on the product used.
261 There are some guidelines though - if you know that only LED's are
262 connected to port PA, then they are usually connected to bits 2-4
263 and you can therefore use 1c. On other boards which don't have the
264 LED's at the general ports, these bits are used for all kinds of
265 stuff. If you don't know what to use, it is always safe to put all
266 as inputs, although floating inputs isn't good.
267
268config ETRAX_DEF_GIO_PD_OUT
269 hex "GIO_PD_OUT"
270 depends on ETRAX_ARCH_V32
271 default "00000"
272 help
273 Configures the initial data for the general port D bits. Most
274 products should use 00000 here.
275
276config ETRAX_DEF_GIO_PE_OE
277 hex "GIO_PE_OE"
278 depends on ETRAX_ARCH_V32
279 default "00000"
280 help
281 Configures the direction of general port E bits. 1 is out, 0 is in.
282 This is often totally different depending on the product used.
283 There are some guidelines though - if you know that only LED's are
284 connected to port PA, then they are usually connected to bits 2-4
285 and you can therefore use 1c. On other boards which don't have the
286 LED's at the general ports, these bits are used for all kinds of
287 stuff. If you don't know what to use, it is always safe to put all
288 as inputs, although floating inputs isn't good.
289
290config ETRAX_DEF_GIO_PE_OUT
291 hex "GIO_PE_OUT"
292 depends on ETRAX_ARCH_V32
293 default "00000"
294 help
295 Configures the initial data for the general port E bits. Most
296 products should use 00000 here.
diff --git a/arch/cris/arch-v32/boot/Makefile b/arch/cris/arch-v32/boot/Makefile
new file mode 100644
index 000000000000..26f293ab9617
--- /dev/null
+++ b/arch/cris/arch-v32/boot/Makefile
@@ -0,0 +1,14 @@
1#
2# arch/cris/arch-v32/boot/Makefile
3#
4target = $(target_boot_dir)
5src = $(src_boot_dir)
6
7zImage: compressed/vmlinuz
8
9compressed/vmlinuz: $(objtree)/vmlinux
10 @$(MAKE) -f $(src)/compressed/Makefile $(objtree)/vmlinuz
11
12clean:
13 rm -f zImage tools/build compressed/vmlinux.out
14 @$(MAKE) -f $(src)/compressed/Makefile clean
diff --git a/arch/cris/arch-v32/boot/compressed/Makefile b/arch/cris/arch-v32/boot/compressed/Makefile
new file mode 100644
index 000000000000..9f77eda914ba
--- /dev/null
+++ b/arch/cris/arch-v32/boot/compressed/Makefile
@@ -0,0 +1,41 @@
1#
2# lx25/arch/cris/arch-v32/boot/compressed/Makefile
3#
4# create a compressed vmlinux image from the original vmlinux files and romfs
5#
6
7target = $(target_compressed_dir)
8src = $(src_compressed_dir)
9
10CC = gcc-cris -mlinux -march=v32 -I $(TOPDIR)/include
11CFLAGS = -O2
12LD = gcc-cris -mlinux -march=v32 -nostdlib
13OBJCOPY = objcopy-cris
14OBJCOPYFLAGS = -O binary --remove-section=.bss
15OBJECTS = $(target)/head.o $(target)/misc.o
16
17# files to compress
18SYSTEM = $(objtree)/vmlinux.bin
19
20all: vmlinuz
21
22$(target)/decompress.bin: $(OBJECTS)
23 $(LD) -T $(src)/decompress.ld -o $(target)/decompress.o $(OBJECTS)
24 $(OBJCOPY) $(OBJCOPYFLAGS) $(target)/decompress.o $(target)/decompress.bin
25
26$(objtree)/vmlinuz: $(target) piggy.img $(target)/decompress.bin
27 cat $(target)/decompress.bin piggy.img > $(objtree)/vmlinuz
28 rm -f piggy.img
29 cp $(objtree)/vmlinuz $(src)
30
31$(target)/head.o: $(src)/head.S
32 $(CC) -D__ASSEMBLY__ -c $< -o $@
33
34# gzip the kernel image
35
36piggy.img: $(SYSTEM)
37 cat $(SYSTEM) | gzip -f -9 > piggy.img
38
39clean:
40 rm -f piggy.img $(objtree)/vmlinuz vmlinuz.o decompress.o decompress.bin $(OBJECTS)
41
diff --git a/arch/cris/arch-v32/boot/compressed/README b/arch/cris/arch-v32/boot/compressed/README
new file mode 100644
index 000000000000..e33691d15c57
--- /dev/null
+++ b/arch/cris/arch-v32/boot/compressed/README
@@ -0,0 +1,25 @@
1Creation of the self-extracting compressed kernel image (vmlinuz)
2-----------------------------------------------------------------
3$Id: README,v 1.1 2003/08/21 09:37:03 johana Exp $
4
5This can be slightly confusing because it's a process with many steps.
6
7The kernel object built by the arch/etrax100/Makefile, vmlinux, is split
8by that makefile into text and data binary files, vmlinux.text and
9vmlinux.data.
10
11Those files together with a ROM filesystem can be catted together and
12burned into a flash or executed directly at the DRAM origin.
13
14They can also be catted together and compressed with gzip, which is what
15happens in this makefile. Together they make up piggy.img.
16
17The decompressor is built into the file decompress.o. It is turned into
18the binary file decompress.bin, which is catted together with piggy.img
19into the file vmlinuz. It can be executed in an arbitrary place in flash.
20
21Be careful - it assumes some things about free locations in DRAM. It
22assumes the DRAM starts at 0x40000000 and that it is at least 8 MB,
23so it puts its code at 0x40700000, and initial stack at 0x40800000.
24
25-Bjorn
diff --git a/arch/cris/arch-v32/boot/compressed/decompress.ld b/arch/cris/arch-v32/boot/compressed/decompress.ld
new file mode 100644
index 000000000000..3c837feca3ac
--- /dev/null
+++ b/arch/cris/arch-v32/boot/compressed/decompress.ld
@@ -0,0 +1,30 @@
1/*#OUTPUT_FORMAT(elf32-us-cris) */
2OUTPUT_ARCH (crisv32)
3
4MEMORY
5 {
6 dram : ORIGIN = 0x40700000,
7 LENGTH = 0x00100000
8 }
9
10SECTIONS
11{
12 .text :
13 {
14 _stext = . ;
15 *(.text)
16 *(.rodata)
17 *(.rodata.*)
18 _etext = . ;
19 } > dram
20 .data :
21 {
22 *(.data)
23 _edata = . ;
24 } > dram
25 .bss :
26 {
27 *(.bss)
28 _end = ALIGN( 0x10 ) ;
29 } > dram
30}
diff --git a/arch/cris/arch-v32/boot/compressed/head.S b/arch/cris/arch-v32/boot/compressed/head.S
new file mode 100644
index 000000000000..0c55b83b8287
--- /dev/null
+++ b/arch/cris/arch-v32/boot/compressed/head.S
@@ -0,0 +1,193 @@
1/*
2 * Code that sets up the DRAM registers, calls the
3 * decompressor to unpack the piggybacked kernel, and jumps.
4 *
5 * Copyright (C) 1999 - 2003, Axis Communications AB
6 */
7
8#include <linux/config.h>
9#define ASSEMBLER_MACROS_ONLY
10#include <asm/arch/hwregs/asm/reg_map_asm.h>
11#include <asm/arch/hwregs/asm/gio_defs_asm.h>
12#include <asm/arch/hwregs/asm/config_defs_asm.h>
13
14#define RAM_INIT_MAGIC 0x56902387
15#define COMMAND_LINE_MAGIC 0x87109563
16
17 ;; Exported symbols
18
19 .globl input_data
20
21 .text
22start:
23 di
24
25 ;; Start clocks for used blocks.
26 move.d REG_ADDR(config, regi_config, rw_clk_ctrl), $r1
27 move.d [$r1], $r0
28 or.d REG_STATE(config, rw_clk_ctrl, cpu, yes) | \
29 REG_STATE(config, rw_clk_ctrl, bif, yes) | \
30 REG_STATE(config, rw_clk_ctrl, fix_io, yes), $r0
31 move.d $r0, [$r1]
32
33 ;; If booting from NAND flash we first have to copy some
34 ;; data from NAND flash to internal RAM to get the code
35 ;; that initializes the SDRAM. Lets copy 20 KB. This
36 ;; code executes at 0x38010000 if booting from NAND and
37 ;; we are guaranted that at least 0x200 bytes are good so
38 ;; lets start from there. The first 8192 bytes in the nand
39 ;; flash is spliced with zeroes and is thus 16384 bytes.
40 move.d 0x38010200, $r10
41 move.d 0x14200, $r11 ; Start offset in NAND flash 0x10200 + 16384
42 move.d 0x5000, $r12 ; Length of copy
43
44 ;; Before this code the tools add a partitiontable so the PC
45 ;; has an offset from the linked address.
46offset1:
47 lapcq ., $r13 ; get PC
48 add.d first_copy_complete-offset1, $r13
49
50#include "../../lib/nand_init.S"
51
52first_copy_complete:
53 ;; Initialze the DRAM registers.
54 cmp.d RAM_INIT_MAGIC, $r8 ; Already initialized?
55 beq dram_init_finished
56 nop
57
58#include "../../lib/dram_init.S"
59
60dram_init_finished:
61 lapcq ., $r13 ; get PC
62 add.d second_copy_complete-dram_init_finished, $r13
63
64 move.d REG_ADDR(config, regi_config, r_bootsel), $r0
65 move.d [$r0], $r0
66 and.d REG_MASK(config, r_bootsel, boot_mode), $r0
67 cmp.d REG_STATE(config, r_bootsel, boot_mode, nand), $r0
68 bne second_copy_complete ; No NAND boot
69 nop
70
71 ;; Copy 2MB from NAND flash to SDRAM (at 2-4MB into the SDRAM)
72 move.d 0x40204000, $r10
73 move.d 0x8000, $r11
74 move.d 0x200000, $r12
75 ba copy_nand_to_ram
76 nop
77second_copy_complete:
78
79 ;; Initiate the PA port.
80 move.d CONFIG_ETRAX_DEF_GIO_PA_OUT, $r0
81 move.d REG_ADDR(gio, regi_gio, rw_pa_dout), $r1
82 move.d $r0, [$r1]
83
84 move.d CONFIG_ETRAX_DEF_GIO_PA_OE, $r0
85 move.d REG_ADDR(gio, regi_gio, rw_pa_oe), $r1
86 move.d $r0, [$r1]
87
88 ;; Setup the stack to a suitably high address.
89 ;; We assume 8 MB is the minimum DRAM and put
90 ;; the SP at the top for now.
91
92 move.d 0x40800000, $sp
93
94 ;; Figure out where the compressed piggyback image is
95 ;; in the flash (since we wont try to copy it to DRAM
96 ;; before unpacking). It is at _edata, but in flash.
97 ;; Use (_edata - herami) as offset to the current PC.
98
99 move.d REG_ADDR(config, regi_config, r_bootsel), $r0
100 move.d [$r0], $r0
101 and.d REG_MASK(config, r_bootsel, boot_mode), $r0
102 cmp.d REG_STATE(config, r_bootsel, boot_mode, nand), $r0
103 beq hereami2
104 nop
105hereami:
106 lapcq ., $r5 ; get PC
107 and.d 0x7fffffff, $r5 ; strip any non-cache bit
108 move.d $r5, $r0 ; save for later - flash address of 'herami'
109 add.d _edata, $r5
110 sub.d hereami, $r5 ; r5 = flash address of '_edata'
111 move.d hereami, $r1 ; destination
112 ba 2f
113 nop
114hereami2:
115 lapcq ., $r5 ; get PC
116 and.d 0x00ffffff, $r5 ; strip any non-cache bit
117 move.d $r5, $r6
118 or.d 0x40200000, $r6
119 move.d $r6, $r0 ; save for later - flash address of 'herami'
120 add.d _edata, $r5
121 sub.d hereami2, $r5 ; r5 = flash address of '_edata'
122 add.d 0x40200000, $r5
123 move.d hereami2, $r1 ; destination
1242:
125 ;; Copy text+data to DRAM
126
127 move.d _edata, $r2 ; end destination
1281: move.w [$r0+], $r3
129 move.w $r3, [$r1+]
130 cmp.d $r2, $r1
131 bcs 1b
132 nop
133
134 move.d input_data, $r0 ; for the decompressor
135 move.d $r5, [$r0] ; for the decompressor
136
137 ;; Clear the decompressors BSS (between _edata and _end)
138
139 moveq 0, $r0
140 move.d _edata, $r1
141 move.d _end, $r2
1421: move.w $r0, [$r1+]
143 cmp.d $r2, $r1
144 bcs 1b
145 nop
146
147 ;; Save command line magic and address.
148 move.d _cmd_line_magic, $r12
149 move.d $r10, [$r12]
150 move.d _cmd_line_addr, $r12
151 move.d $r11, [$r12]
152
153 ;; Do the decompression and save compressed size in _inptr
154
155 jsr decompress_kernel
156 nop
157
158 ;; Restore command line magic and address.
159 move.d _cmd_line_magic, $r10
160 move.d [$r10], $r10
161 move.d _cmd_line_addr, $r11
162 move.d [$r11], $r11
163
164 ;; Put start address of root partition in r9 so the kernel can use it
165 ;; when mounting from flash
166 move.d input_data, $r0
167 move.d [$r0], $r9 ; flash address of compressed kernel
168 move.d inptr, $r0
169 add.d [$r0], $r9 ; size of compressed kernel
170 cmp.d 0x40200000, $r9
171 blo enter_kernel
172 nop
173 sub.d 0x40200000, $r9
174 add.d 0x4000, $r9
175
176enter_kernel:
177 ;; Enter the decompressed kernel
178 move.d RAM_INIT_MAGIC, $r8 ; Tell kernel that DRAM is initialized
179 jump 0x40004000 ; kernel is linked to this address
180 nop
181
182 .data
183
184input_data:
185 .dword 0 ; used by the decompressor
186_cmd_line_magic:
187 .dword 0
188_cmd_line_addr:
189 .dword 0
190is_nand_boot:
191 .dword 0
192
193#include "../../lib/hw_settings.S"
diff --git a/arch/cris/arch-v32/boot/compressed/misc.c b/arch/cris/arch-v32/boot/compressed/misc.c
new file mode 100644
index 000000000000..54644238ed59
--- /dev/null
+++ b/arch/cris/arch-v32/boot/compressed/misc.c
@@ -0,0 +1,318 @@
1/*
2 * misc.c
3 *
4 * $Id: misc.c,v 1.8 2005/04/24 18:34:29 starvik Exp $
5 *
6 * This is a collection of several routines from gzip-1.0.3
7 * adapted for Linux.
8 *
9 * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
10 * puts by Nick Holloway 1993, better puts by Martin Mares 1995
11 * adoptation for Linux/CRIS Axis Communications AB, 1999
12 *
13 */
14
15/* where the piggybacked kernel image expects itself to live.
16 * it is the same address we use when we network load an uncompressed
17 * image into DRAM, and it is the address the kernel is linked to live
18 * at by vmlinux.lds.S
19 */
20
21#define KERNEL_LOAD_ADR 0x40004000
22
23#include <linux/config.h>
24
25#include <linux/types.h>
26#include <asm/arch/hwregs/reg_rdwr.h>
27#include <asm/arch/hwregs/reg_map.h>
28#include <asm/arch/hwregs/ser_defs.h>
29
30/*
31 * gzip declarations
32 */
33
34#define OF(args) args
35#define STATIC static
36
37void* memset(void* s, int c, size_t n);
38void* memcpy(void* __dest, __const void* __src,
39 size_t __n);
40
41#define memzero(s, n) memset ((s), 0, (n))
42
43
44typedef unsigned char uch;
45typedef unsigned short ush;
46typedef unsigned long ulg;
47
48#define WSIZE 0x8000 /* Window size must be at least 32k, */
49 /* and a power of two */
50
51static uch *inbuf; /* input buffer */
52static uch window[WSIZE]; /* Sliding window buffer */
53
54unsigned inptr = 0; /* index of next byte to be processed in inbuf
55 * After decompression it will contain the
56 * compressed size, and head.S will read it.
57 */
58
59static unsigned outcnt = 0; /* bytes in output buffer */
60
61/* gzip flag byte */
62#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
63#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
64#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
65#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
66#define COMMENT 0x10 /* bit 4 set: file comment present */
67#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
68#define RESERVED 0xC0 /* bit 6,7: reserved */
69
70#define get_byte() inbuf[inptr++]
71
72/* Diagnostic functions */
73#ifdef DEBUG
74# define Assert(cond,msg) {if(!(cond)) error(msg);}
75# define Trace(x) fprintf x
76# define Tracev(x) {if (verbose) fprintf x ;}
77# define Tracevv(x) {if (verbose>1) fprintf x ;}
78# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
79# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
80#else
81# define Assert(cond,msg)
82# define Trace(x)
83# define Tracev(x)
84# define Tracevv(x)
85# define Tracec(c,x)
86# define Tracecv(c,x)
87#endif
88
89static int fill_inbuf(void);
90static void flush_window(void);
91static void error(char *m);
92static void gzip_mark(void **);
93static void gzip_release(void **);
94
95extern char *input_data; /* lives in head.S */
96
97static long bytes_out = 0;
98static uch *output_data;
99static unsigned long output_ptr = 0;
100
101static void *malloc(int size);
102static void free(void *where);
103static void error(char *m);
104static void gzip_mark(void **);
105static void gzip_release(void **);
106
107static void puts(const char *);
108
109/* the "heap" is put directly after the BSS ends, at end */
110
111extern int _end;
112static long free_mem_ptr = (long)&_end;
113
114#include "../../../../../lib/inflate.c"
115
116static void *malloc(int size)
117{
118 void *p;
119
120 if (size <0) error("Malloc error");
121
122 free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
123
124 p = (void *)free_mem_ptr;
125 free_mem_ptr += size;
126
127 return p;
128}
129
130static void free(void *where)
131{ /* Don't care */
132}
133
134static void gzip_mark(void **ptr)
135{
136 *ptr = (void *) free_mem_ptr;
137}
138
139static void gzip_release(void **ptr)
140{
141 free_mem_ptr = (long) *ptr;
142}
143
144/* decompressor info and error messages to serial console */
145
146static inline void
147serout(const char *s, reg_scope_instances regi_ser)
148{
149 reg_ser_rs_stat_din rs;
150 reg_ser_rw_dout dout = {.data = *s};
151
152 do {
153 rs = REG_RD(ser, regi_ser, rs_stat_din);
154 }
155 while (!rs.tr_rdy);/* Wait for tranceiver. */
156
157 REG_WR(ser, regi_ser, rw_dout, dout);
158}
159
160static void
161puts(const char *s)
162{
163#ifndef CONFIG_ETRAX_DEBUG_PORT_NULL
164 while (*s) {
165#ifdef CONFIG_ETRAX_DEBUG_PORT0
166 serout(s, regi_ser0);
167#endif
168#ifdef CONFIG_ETRAX_DEBUG_PORT1
169 serout(s, regi_ser1);
170#endif
171#ifdef CONFIG_ETRAX_DEBUG_PORT2
172 serout(s, regi_ser2);
173#endif
174#ifdef CONFIG_ETRAX_DEBUG_PORT3
175 serout(s, regi_ser3);
176#endif
177 *s++;
178 }
179/* CONFIG_ETRAX_DEBUG_PORT_NULL */
180#endif
181}
182
183void*
184memset(void* s, int c, size_t n)
185{
186 int i;
187 char *ss = (char*)s;
188
189 for (i=0;i<n;i++) ss[i] = c;
190}
191
192void*
193memcpy(void* __dest, __const void* __src,
194 size_t __n)
195{
196 int i;
197 char *d = (char *)__dest, *s = (char *)__src;
198
199 for (i=0;i<__n;i++) d[i] = s[i];
200}
201
202/* ===========================================================================
203 * Write the output window window[0..outcnt-1] and update crc and bytes_out.
204 * (Used for the decompressed data only.)
205 */
206
207static void
208flush_window()
209{
210 ulg c = crc; /* temporary variable */
211 unsigned n;
212 uch *in, *out, ch;
213
214 in = window;
215 out = &output_data[output_ptr];
216 for (n = 0; n < outcnt; n++) {
217 ch = *out++ = *in++;
218 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
219 }
220 crc = c;
221 bytes_out += (ulg)outcnt;
222 output_ptr += (ulg)outcnt;
223 outcnt = 0;
224}
225
226static void
227error(char *x)
228{
229 puts("\n\n");
230 puts(x);
231 puts("\n\n -- System halted\n");
232
233 while(1); /* Halt */
234}
235
236void
237setup_normal_output_buffer()
238{
239 output_data = (char *)KERNEL_LOAD_ADR;
240}
241
242static inline void
243serial_setup(reg_scope_instances regi_ser)
244{
245 reg_ser_rw_xoff xoff;
246 reg_ser_rw_tr_ctrl tr_ctrl;
247 reg_ser_rw_rec_ctrl rec_ctrl;
248 reg_ser_rw_tr_baud_div tr_baud;
249 reg_ser_rw_rec_baud_div rec_baud;
250
251 /* Turn off XOFF. */
252 xoff = REG_RD(ser, regi_ser, rw_xoff);
253
254 xoff.chr = 0;
255 xoff.automatic = regk_ser_no;
256
257 REG_WR(ser, regi_ser, rw_xoff, xoff);
258
259 /* Set baudrate and stopbits. */
260 tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl);
261 rec_ctrl = REG_RD(ser, regi_ser, rw_rec_ctrl);
262 tr_baud = REG_RD(ser, regi_ser, rw_tr_baud_div);
263 rec_baud = REG_RD(ser, regi_ser, rw_rec_baud_div);
264
265 tr_ctrl.stop_bits = 1; /* 2 stop bits. */
266
267 /*
268 * The baudrate setup is a bit fishy, but in the end the tranceiver is
269 * set to 4800 and the receiver to 115200. The magic value is
270 * 29.493 MHz.
271 */
272 tr_ctrl.base_freq = regk_ser_f29_493;
273 rec_ctrl.base_freq = regk_ser_f29_493;
274 tr_baud.div = (29493000 / 8) / 4800;
275 rec_baud.div = (29493000 / 8) / 115200;
276
277 REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl);
278 REG_WR(ser, regi_ser, rw_tr_baud_div, tr_baud);
279 REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl);
280 REG_WR(ser, regi_ser, rw_rec_baud_div, rec_baud);
281}
282
283void
284decompress_kernel()
285{
286 char revision;
287
288 /* input_data is set in head.S */
289 inbuf = input_data;
290
291#ifdef CONFIG_ETRAX_DEBUG_PORT0
292 serial_setup(regi_ser0);
293#endif
294#ifdef CONFIG_ETRAX_DEBUG_PORT1
295 serial_setup(regi_ser1);
296#endif
297#ifdef CONFIG_ETRAX_DEBUG_PORT2
298 serial_setup(regi_ser2);
299#endif
300#ifdef CONFIG_ETRAX_DEBUG_PORT3
301 serial_setup(regi_ser3);
302#endif
303
304 setup_normal_output_buffer();
305
306 makecrc();
307
308 __asm__ volatile ("move $vr,%0" : "=rm" (revision));
309 if (revision < 32)
310 {
311 puts("You need an ETRAX FS to run Linux 2.6/crisv32.\n");
312 while(1);
313 }
314
315 puts("Uncompressing Linux...\n");
316 gunzip();
317 puts("Done. Now booting the kernel.\n");
318}
diff --git a/arch/cris/arch-v32/boot/rescue/Makefile b/arch/cris/arch-v32/boot/rescue/Makefile
new file mode 100644
index 000000000000..f668a8198724
--- /dev/null
+++ b/arch/cris/arch-v32/boot/rescue/Makefile
@@ -0,0 +1,36 @@
1#
2# Makefile for rescue code
3#
4target = $(target_rescue_dir)
5src = $(src_rescue_dir)
6
7CC = gcc-cris -mlinux -march=v32 $(LINUXINCLUDE)
8CFLAGS = -O2
9LD = gcc-cris -mlinux -march=v32 -nostdlib
10OBJCOPY = objcopy-cris
11OBJCOPYFLAGS = -O binary --remove-section=.bss
12
13all: $(target)/rescue.bin
14
15rescue: rescue.bin
16 # do nothing
17
18$(target)/rescue.bin: $(target) $(target)/head.o
19 $(LD) -T $(src)/rescue.ld -o $(target)/rescue.o $(target)/head.o
20 $(OBJCOPY) $(OBJCOPYFLAGS) $(target)/rescue.o $(target)/rescue.bin
21 cp -p $(target)/rescue.bin $(objtree)
22
23$(target):
24 mkdir -p $(target)
25
26$(target)/head.o: $(src)/head.S
27 $(CC) -D__ASSEMBLY__ -c $< -o $*.o
28
29clean:
30 rm -f $(target)/*.o $(target)/*.bin
31
32fastdep:
33
34modules:
35
36modules-install:
diff --git a/arch/cris/arch-v32/boot/rescue/head.S b/arch/cris/arch-v32/boot/rescue/head.S
new file mode 100644
index 000000000000..61ede5f30f99
--- /dev/null
+++ b/arch/cris/arch-v32/boot/rescue/head.S
@@ -0,0 +1,39 @@
1/* $Id: head.S,v 1.4 2004/11/01 16:10:28 starvik Exp $
2 *
3 * This used to be the rescue code but now that is handled by the
4 * RedBoot based RFL instead. Nothing to see here, move along.
5 */
6
7#include <linux/config.h>
8#include <asm/arch/hwregs/reg_map_asm.h>
9#include <asm/arch/hwregs/config_defs_asm.h>
10
11 .text
12
13 ;; Start clocks for used blocks.
14 move.d REG_ADDR(config, regi_config, rw_clk_ctrl), $r1
15 move.d [$r1], $r0
16 or.d REG_STATE(config, rw_clk_ctrl, cpu, yes) | \
17 REG_STATE(config, rw_clk_ctrl, bif, yes) | \
18 REG_STATE(config, rw_clk_ctrl, fix_io, yes), $r0
19 move.d $r0, [$r1]
20
21 ;; Copy 68KB NAND flash to Internal RAM (if NAND boot)
22 move.d 0x38004000, $r10
23 move.d 0x8000, $r11
24 move.d 0x11000, $r12
25 move.d copy_complete, $r13
26 and.d 0x000fffff, $r13
27 or.d 0x38000000, $r13
28
29#include "../../lib/nand_init.S"
30
31 ;; No NAND found
32 move.d CONFIG_ETRAX_PTABLE_SECTOR, $r10
33 jump $r10 ; Jump to decompresser
34 nop
35
36copy_complete:
37 move.d 0x38000000 + CONFIG_ETRAX_PTABLE_SECTOR, $r10
38 jump $r10 ; Jump to decompresser
39 nop
diff --git a/arch/cris/arch-v32/boot/rescue/rescue.ld b/arch/cris/arch-v32/boot/rescue/rescue.ld
new file mode 100644
index 000000000000..42b11aa122b2
--- /dev/null
+++ b/arch/cris/arch-v32/boot/rescue/rescue.ld
@@ -0,0 +1,20 @@
1MEMORY
2 {
3 flash : ORIGIN = 0x00000000,
4 LENGTH = 0x00100000
5 }
6
7SECTIONS
8{
9 .text :
10 {
11 stext = . ;
12 *(.text)
13 etext = . ;
14 } > flash
15 .data :
16 {
17 *(.data)
18 edata = . ;
19 } > flash
20}
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
new file mode 100644
index 000000000000..a33097f95362
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -0,0 +1,625 @@
1config ETRAX_ETHERNET
2 bool "Ethernet support"
3 depends on ETRAX_ARCH_V32
4 select NET_ETHERNET
5 help
6 This option enables the ETRAX FS built-in 10/100Mbit Ethernet
7 controller.
8
9config ETRAX_ETHERNET_HW_CSUM
10 bool "Hardware accelerated ethernet checksum and scatter/gather"
11 depends on ETRAX_ETHERNET
12 depends on ETRAX_STREAMCOPROC
13 default y
14 help
15 Hardware acceleration of checksumming and scatter/gather
16
17config ETRAX_ETHERNET_IFACE0
18 depends on ETRAX_ETHERNET
19 bool "Enable network interface 0"
20
21config ETRAX_ETHERNET_IFACE1
22 depends on ETRAX_ETHERNET
23 bool "Enable network interface 1 (uses DMA6 and DMA7)"
24
25choice
26 prompt "Network LED behavior"
27 depends on ETRAX_ETHERNET
28 default ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY
29
30config ETRAX_NETWORK_LED_ON_WHEN_LINK
31 bool "LED_on_when_link"
32 help
33 Selecting LED_on_when_link will light the LED when there is a
34 connection and will flash off when there is activity.
35
36 Selecting LED_on_when_activity will light the LED only when
37 there is activity.
38
39 This setting will also affect the behaviour of other activity LEDs
40 e.g. Bluetooth.
41
42config ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY
43 bool "LED_on_when_activity"
44 help
45 Selecting LED_on_when_link will light the LED when there is a
46 connection and will flash off when there is activity.
47
48 Selecting LED_on_when_activity will light the LED only when
49 there is activity.
50
51 This setting will also affect the behaviour of other activity LEDs
52 e.g. Bluetooth.
53
54endchoice
55
56config ETRAXFS_SERIAL
57 bool "Serial-port support"
58 depends on ETRAX_ARCH_V32
59 help
60 Enables the ETRAX FS serial driver for ser0 (ttyS0)
61 You probably want this enabled.
62
63config ETRAX_SERIAL_PORT0
64 bool "Serial port 0 enabled"
65 depends on ETRAXFS_SERIAL
66 help
67 Enables the ETRAX FS serial driver for ser0 (ttyS0)
68 Normally you want this on. You can control what DMA channels to use
69 if you do not need DMA to something else.
70 ser0 can use dma4 or dma6 for output and dma5 or dma7 for input.
71
72choice
73 prompt "Ser0 DMA in channel "
74 depends on ETRAX_SERIAL_PORT0
75 default ETRAX_SERIAL_PORT0_NO_DMA_IN
76 help
77 What DMA channel to use for ser0.
78
79
80config ETRAX_SERIAL_PORT0_NO_DMA_IN
81 bool "Ser0 uses no DMA for input"
82 help
83 Do not use DMA for ser0 input.
84
85config ETRAX_SERIAL_PORT0_DMA7_IN
86 bool "Ser0 uses DMA7 for input"
87 depends on ETRAX_SERIAL_PORT0
88 help
89 Enables the DMA7 input channel for ser0 (ttyS0).
90 If you do not enable DMA, an interrupt for each character will be
91 used when receiveing data.
92 Normally you want to use DMA, unless you use the DMA channel for
93 something else.
94
95endchoice
96
97choice
98 prompt "Ser0 DMA out channel"
99 depends on ETRAX_SERIAL_PORT0
100 default ETRAX_SERIAL_PORT0_NO_DMA_OUT
101
102config ETRAX_SERIAL_PORT0_NO_DMA_OUT
103 bool "Ser0 uses no DMA for output"
104 help
105 Do not use DMA for ser0 output.
106
107config ETRAX_SERIAL_PORT0_DMA6_OUT
108 bool "Ser0 uses DMA6 for output"
109 depends on ETRAX_SERIAL_PORT0
110 help
111 Enables the DMA6 output channel for ser0 (ttyS0).
112 If you do not enable DMA, an interrupt for each character will be
113 used when transmitting data.
114 Normally you want to use DMA, unless you use the DMA channel for
115 something else.
116
117endchoice
118
119config ETRAX_SER0_DTR_BIT
120 string "Ser 0 DTR bit (empty = not used)"
121 depends on ETRAX_SERIAL_PORT0
122
123config ETRAX_SER0_RI_BIT
124 string "Ser 0 RI bit (empty = not used)"
125 depends on ETRAX_SERIAL_PORT0
126
127config ETRAX_SER0_DSR_BIT
128 string "Ser 0 DSR bit (empty = not used)"
129 depends on ETRAX_SERIAL_PORT0
130
131config ETRAX_SER0_CD_BIT
132 string "Ser 0 CD bit (empty = not used)"
133 depends on ETRAX_SERIAL_PORT0
134
135config ETRAX_SERIAL_PORT1
136 bool "Serial port 1 enabled"
137 depends on ETRAXFS_SERIAL
138 help
139 Enables the ETRAX FS serial driver for ser1 (ttyS1).
140
141choice
142 prompt "Ser1 DMA in channel "
143 depends on ETRAX_SERIAL_PORT1
144 default ETRAX_SERIAL_PORT1_NO_DMA_IN
145 help
146 What DMA channel to use for ser1.
147
148
149config ETRAX_SERIAL_PORT1_NO_DMA_IN
150 bool "Ser1 uses no DMA for input"
151 help
152 Do not use DMA for ser1 input.
153
154config ETRAX_SERIAL_PORT1_DMA5_IN
155 bool "Ser1 uses DMA5 for input"
156 depends on ETRAX_SERIAL_PORT1
157 help
158 Enables the DMA5 input channel for ser1 (ttyS1).
159 If you do not enable DMA, an interrupt for each character will be
160 used when receiveing data.
161 Normally you want this on, unless you use the DMA channel for
162 something else.
163
164endchoice
165
166choice
167 prompt "Ser1 DMA out channel "
168 depends on ETRAX_SERIAL_PORT1
169 default ETRAX_SERIAL_PORT1_NO_DMA_OUT
170 help
171 What DMA channel to use for ser1.
172
173config ETRAX_SERIAL_PORT1_NO_DMA_OUT
174 bool "Ser1 uses no DMA for output"
175 help
176 Do not use DMA for ser1 output.
177
178config ETRAX_SERIAL_PORT1_DMA4_OUT
179 bool "Ser1 uses DMA4 for output"
180 depends on ETRAX_SERIAL_PORT1
181 help
182 Enables the DMA4 output channel for ser1 (ttyS1).
183 If you do not enable DMA, an interrupt for each character will be
184 used when transmitting data.
185 Normally you want this on, unless you use the DMA channel for
186 something else.
187
188endchoice
189
190config ETRAX_SER1_DTR_BIT
191 string "Ser 1 DTR bit (empty = not used)"
192 depends on ETRAX_SERIAL_PORT1
193
194config ETRAX_SER1_RI_BIT
195 string "Ser 1 RI bit (empty = not used)"
196 depends on ETRAX_SERIAL_PORT1
197
198config ETRAX_SER1_DSR_BIT
199 string "Ser 1 DSR bit (empty = not used)"
200 depends on ETRAX_SERIAL_PORT1
201
202config ETRAX_SER1_CD_BIT
203 string "Ser 1 CD bit (empty = not used)"
204 depends on ETRAX_SERIAL_PORT1
205
206config ETRAX_SERIAL_PORT2
207 bool "Serial port 2 enabled"
208 depends on ETRAXFS_SERIAL
209 help
210 Enables the ETRAX FS serial driver for ser2 (ttyS2).
211
212choice
213 prompt "Ser2 DMA in channel "
214 depends on ETRAX_SERIAL_PORT2
215 default ETRAX_SERIAL_PORT2_NO_DMA_IN
216 help
217 What DMA channel to use for ser2.
218
219
220config ETRAX_SERIAL_PORT2_NO_DMA_IN
221 bool "Ser2 uses no DMA for input"
222 help
223 Do not use DMA for ser2 input.
224
225config ETRAX_SERIAL_PORT2_DMA3_IN
226 bool "Ser2 uses DMA3 for input"
227 depends on ETRAX_SERIAL_PORT2
228 help
229 Enables the DMA3 input channel for ser2 (ttyS2).
230 If you do not enable DMA, an interrupt for each character will be
231 used when receiveing data.
232 Normally you want to use DMA, unless you use the DMA channel for
233 something else.
234
235endchoice
236
237choice
238 prompt "Ser2 DMA out channel"
239 depends on ETRAX_SERIAL_PORT2
240 default ETRAX_SERIAL_PORT2_NO_DMA_OUT
241
242config ETRAX_SERIAL_PORT2_NO_DMA_OUT
243 bool "Ser2 uses no DMA for output"
244 help
245 Do not use DMA for ser2 output.
246
247config ETRAX_SERIAL_PORT2_DMA2_OUT
248 bool "Ser2 uses DMA2 for output"
249 depends on ETRAX_SERIAL_PORT2
250 help
251 Enables the DMA2 output channel for ser2 (ttyS2).
252 If you do not enable DMA, an interrupt for each character will be
253 used when transmitting data.
254 Normally you want to use DMA, unless you use the DMA channel for
255 something else.
256
257endchoice
258
259config ETRAX_SER2_DTR_BIT
260 string "Ser 2 DTR bit (empty = not used)"
261 depends on ETRAX_SERIAL_PORT2
262
263config ETRAX_SER2_RI_BIT
264 string "Ser 2 RI bit (empty = not used)"
265 depends on ETRAX_SERIAL_PORT2
266
267config ETRAX_SER2_DSR_BIT
268 string "Ser 2 DSR bit (empty = not used)"
269 depends on ETRAX_SERIAL_PORT2
270
271config ETRAX_SER2_CD_BIT
272 string "Ser 2 CD bit (empty = not used)"
273 depends on ETRAX_SERIAL_PORT2
274
275config ETRAX_SERIAL_PORT3
276 bool "Serial port 3 enabled"
277 depends on ETRAXFS_SERIAL
278 help
279 Enables the ETRAX FS serial driver for ser3 (ttyS3).
280
281choice
282 prompt "Ser3 DMA in channel "
283 depends on ETRAX_SERIAL_PORT3
284 default ETRAX_SERIAL_PORT3_NO_DMA_IN
285 help
286 What DMA channel to use for ser3.
287
288
289config ETRAX_SERIAL_PORT3_NO_DMA_IN
290 bool "Ser3 uses no DMA for input"
291 help
292 Do not use DMA for ser3 input.
293
294config ETRAX_SERIAL_PORT3_DMA9_IN
295 bool "Ser3 uses DMA9 for input"
296 depends on ETRAX_SERIAL_PORT3
297 help
298 Enables the DMA9 input channel for ser3 (ttyS3).
299 If you do not enable DMA, an interrupt for each character will be
300 used when receiveing data.
301 Normally you want to use DMA, unless you use the DMA channel for
302 something else.
303
304endchoice
305
306choice
307 prompt "Ser3 DMA out channel"
308 depends on ETRAX_SERIAL_PORT3
309 default ETRAX_SERIAL_PORT3_NO_DMA_OUT
310
311config ETRAX_SERIAL_PORT3_NO_DMA_OUT
312 bool "Ser3 uses no DMA for output"
313 help
314 Do not use DMA for ser3 output.
315
316config ETRAX_SERIAL_PORT3_DMA8_OUT
317 bool "Ser3 uses DMA8 for output"
318 depends on ETRAX_SERIAL_PORT3
319 help
320 Enables the DMA8 output channel for ser3 (ttyS3).
321 If you do not enable DMA, an interrupt for each character will be
322 used when transmitting data.
323 Normally you want to use DMA, unless you use the DMA channel for
324 something else.
325
326endchoice
327
328config ETRAX_SER3_DTR_BIT
329 string "Ser 3 DTR bit (empty = not used)"
330 depends on ETRAX_SERIAL_PORT3
331
332config ETRAX_SER3_RI_BIT
333 string "Ser 3 RI bit (empty = not used)"
334 depends on ETRAX_SERIAL_PORT3
335
336config ETRAX_SER3_DSR_BIT
337 string "Ser 3 DSR bit (empty = not used)"
338 depends on ETRAX_SERIAL_PORT3
339
340config ETRAX_SER3_CD_BIT
341 string "Ser 3 CD bit (empty = not used)"
342 depends on ETRAX_SERIAL_PORT3
343
344config ETRAX_RS485
345 bool "RS-485 support"
346 depends on ETRAX_SERIAL
347 help
348 Enables support for RS-485 serial communication. For a primer on
349 RS-485, see <http://www.hw.cz/english/docs/rs485/rs485.html>.
350
351config ETRAX_RS485_DISABLE_RECEIVER
352 bool "Disable serial receiver"
353 depends on ETRAX_RS485
354 help
355 It is necessary to disable the serial receiver to avoid serial
356 loopback. Not all products are able to do this in software only.
357 Axis 2400/2401 must disable receiver.
358
359config ETRAX_AXISFLASHMAP
360 bool "Axis flash-map support"
361 depends on ETRAX_ARCH_V32
362 select MTD
363 select MTD_CFI
364 select MTD_CFI_AMDSTD
365 select MTD_OBSOLETE_CHIPS
366 select MTD_AMDSTD
367 select MTD_CHAR
368 select MTD_BLOCK
369 select MTD_PARTITIONS
370 select MTD_CONCAT
371 select MTD_COMPLEX_MAPPINGS
372 help
373 This option enables MTD mapping of flash devices. Needed to use
374 flash memories. If unsure, say Y.
375
376config ETRAX_SYNCHRONOUS_SERIAL
377 bool "Synchronous serial-port support"
378 depends on ETRAX_ARCH_V32
379 help
380 Enables the ETRAX FS synchronous serial driver.
381
382config ETRAX_SYNCHRONOUS_SERIAL_PORT0
383 bool "Synchronous serial port 0 enabled"
384 depends on ETRAX_SYNCHRONOUS_SERIAL
385 help
386 Enabled synchronous serial port 0.
387
388config ETRAX_SYNCHRONOUS_SERIAL0_DMA
389 bool "Enable DMA on synchronous serial port 0."
390 depends on ETRAX_SYNCHRONOUS_SERIAL_PORT0
391 help
392 A synchronous serial port can run in manual or DMA mode.
393 Selecting this option will make it run in DMA mode.
394
395config ETRAX_SYNCHRONOUS_SERIAL_PORT1
396 bool "Synchronous serial port 1 enabled"
397 depends on ETRAX_SYNCHRONOUS_SERIAL
398 help
399 Enabled synchronous serial port 1.
400
401config ETRAX_SYNCHRONOUS_SERIAL1_DMA
402 bool "Enable DMA on synchronous serial port 1."
403 depends on ETRAX_SYNCHRONOUS_SERIAL_PORT1
404 help
405 A synchronous serial port can run in manual or DMA mode.
406 Selecting this option will make it run in DMA mode.
407
408config ETRAX_PTABLE_SECTOR
409 int "Byte-offset of partition table sector"
410 depends on ETRAX_AXISFLASHMAP
411 default "65536"
412 help
413 Byte-offset of the partition table in the first flash chip.
414 The default value is 64kB and should not be changed unless
415 you know exactly what you are doing. The only valid reason
416 for changing this is when the flash block size is bigger
417 than 64kB (e.g. when using two parallel 16 bit flashes).
418
419config ETRAX_NANDFLASH
420 bool "NAND flash support"
421 depends on ETRAX_ARCH_V32
422 select MTD_NAND
423 select MTD_NAND_IDS
424 help
425 This option enables MTD mapping of NAND flash devices. Needed to use
426 NAND flash memories. If unsure, say Y.
427
428config ETRAX_I2C
429 bool "I2C driver"
430 depends on ETRAX_ARCH_V32
431 help
432 This option enabled the I2C driver used by e.g. the RTC driver.
433
434config ETRAX_I2C_DATA_PORT
435 string "I2C data pin"
436 depends on ETRAX_I2C
437 help
438 The pin to use for I2C data.
439
440config ETRAX_I2C_CLK_PORT
441 string "I2C clock pin"
442 depends on ETRAX_I2C
443 help
444 The pin to use for I2C clock.
445
446config ETRAX_RTC
447 bool "Real Time Clock support"
448 depends on ETRAX_ARCH_V32
449 help
450 Enabled RTC support.
451
452choice
453 prompt "RTC chip"
454 depends on ETRAX_RTC
455 default ETRAX_PCF8563
456
457config ETRAX_PCF8563
458 bool "PCF8563"
459 help
460 Philips PCF8563 RTC
461
462endchoice
463
464config ETRAX_GPIO
465 bool "GPIO support"
466 depends on ETRAX_ARCH_V32
467 ---help---
468 Enables the ETRAX general port device (major 120, minors 0-4).
469 You can use this driver to access the general port bits. It supports
470 these ioctl's:
471 #include <linux/etraxgpio.h>
472 fd = open("/dev/gpioa", O_RDWR); // or /dev/gpiob
473 ioctl(fd, _IO(ETRAXGPIO_IOCTYPE, IO_SETBITS), bits_to_set);
474 ioctl(fd, _IO(ETRAXGPIO_IOCTYPE, IO_CLRBITS), bits_to_clear);
475 err = ioctl(fd, _IO(ETRAXGPIO_IOCTYPE, IO_READ_INBITS), &val);
476 Remember that you need to setup the port directions appropriately in
477 the General configuration.
478
479config ETRAX_PA_BUTTON_BITMASK
480 hex "PA-buttons bitmask"
481 depends on ETRAX_GPIO
482 default "0x02"
483 help
484 This is a bitmask (8 bits) with information about what bits on PA
485 that are used for buttons.
486 Most products has a so called TEST button on PA1, if that is true
487 use 0x02 here.
488 Use 00 if there are no buttons on PA.
489 If the bitmask is <> 00 a button driver will be included in the gpio
490 driver. ETRAX general I/O support must be enabled.
491
492config ETRAX_PA_CHANGEABLE_DIR
493 hex "PA user changeable dir mask"
494 depends on ETRAX_GPIO
495 default "0x00"
496 help
497 This is a bitmask (8 bits) with information of what bits in PA that a
498 user can change direction on using ioctl's.
499 Bit set = changeable.
500 You probably want 0x00 here, but it depends on your hardware.
501
502config ETRAX_PA_CHANGEABLE_BITS
503 hex "PA user changeable bits mask"
504 depends on ETRAX_GPIO
505 default "0x00"
506 help
507 This is a bitmask (8 bits) with information of what bits in PA
508 that a user can change the value on using ioctl's.
509 Bit set = changeable.
510
511config ETRAX_PB_CHANGEABLE_DIR
512 hex "PB user changeable dir mask"
513 depends on ETRAX_GPIO
514 default "0x00000"
515 help
516 This is a bitmask (18 bits) with information of what bits in PB
517 that a user can change direction on using ioctl's.
518 Bit set = changeable.
519 You probably want 0x00000 here, but it depends on your hardware.
520
521config ETRAX_PB_CHANGEABLE_BITS
522 hex "PB user changeable bits mask"
523 depends on ETRAX_GPIO
524 default "0x00000"
525 help
526 This is a bitmask (18 bits) with information of what bits in PB
527 that a user can change the value on using ioctl's.
528 Bit set = changeable.
529
530config ETRAX_PC_CHANGEABLE_DIR
531 hex "PC user changeable dir mask"
532 depends on ETRAX_GPIO
533 default "0x00000"
534 help
535 This is a bitmask (18 bits) with information of what bits in PC
536 that a user can change direction on using ioctl's.
537 Bit set = changeable.
538 You probably want 0x00000 here, but it depends on your hardware.
539
540config ETRAX_PC_CHANGEABLE_BITS
541 hex "PC user changeable bits mask"
542 depends on ETRAX_GPIO
543 default "0x00000"
544 help
545 This is a bitmask (18 bits) with information of what bits in PC
546 that a user can change the value on using ioctl's.
547 Bit set = changeable.
548
549config ETRAX_PD_CHANGEABLE_DIR
550 hex "PD user changeable dir mask"
551 depends on ETRAX_GPIO
552 default "0x00000"
553 help
554 This is a bitmask (18 bits) with information of what bits in PD
555 that a user can change direction on using ioctl's.
556 Bit set = changeable.
557 You probably want 0x00000 here, but it depends on your hardware.
558
559config ETRAX_PD_CHANGEABLE_BITS
560 hex "PD user changeable bits mask"
561 depends on ETRAX_GPIO
562 default "0x00000"
563 help
564 This is a bitmask (18 bits) with information of what bits in PD
565 that a user can change the value on using ioctl's.
566 Bit set = changeable.
567
568config ETRAX_PE_CHANGEABLE_DIR
569 hex "PE user changeable dir mask"
570 depends on ETRAX_GPIO
571 default "0x00000"
572 help
573 This is a bitmask (18 bits) with information of what bits in PE
574 that a user can change direction on using ioctl's.
575 Bit set = changeable.
576 You probably want 0x00000 here, but it depends on your hardware.
577
578config ETRAX_PE_CHANGEABLE_BITS
579 hex "PE user changeable bits mask"
580 depends on ETRAX_GPIO
581 default "0x00000"
582 help
583 This is a bitmask (18 bits) with information of what bits in PE
584 that a user can change the value on using ioctl's.
585 Bit set = changeable.
586
587config ETRAX_IDE
588 bool "ATA/IDE support"
589 depends on ETRAX_ARCH_V32
590 select IDE
591 select BLK_DEV_IDE
592 select BLK_DEV_IDEDISK
593 select BLK_DEV_IDECD
594 select BLK_DEV_IDEDMA
595 help
596 Enables the ETRAX IDE driver.
597
598config ETRAX_CARDBUS
599 bool "Cardbus support"
600 depends on ETRAX_ARCH_V32
601 select PCCARD
602 select CARDBUS
603 select HOTPLUG
604 select PCCARD_NONSTATIC
605 help
606 Enabled the ETRAX Carbus driver.
607
608config PCI
609 bool
610 depends on ETRAX_CARDBUS
611 default y
612
613config ETRAX_IOP_FW_LOAD
614 tristate "IO-processor hotplug firmware loading support"
615 depends on ETRAX_ARCH_V32
616 select FW_LOADER
617 help
618 Enables IO-processor hotplug firmware loading support.
619
620config ETRAX_STREAMCOPROC
621 tristate "Stream co-processor driver enabled"
622 depends on ETRAX_ARCH_V32
623 help
624 This option enables a driver for the stream co-processor
625 for cryptographic operations.
diff --git a/arch/cris/arch-v32/drivers/Makefile b/arch/cris/arch-v32/drivers/Makefile
new file mode 100644
index 000000000000..a359cd20ae75
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for Etrax-specific drivers
3#
4
5obj-$(CONFIG_ETRAX_STREAMCOPROC) += cryptocop.o
6obj-$(CONFIG_ETRAX_AXISFLASHMAP) += axisflashmap.o
7obj-$(CONFIG_ETRAX_NANDFLASH) += nandflash.o
8obj-$(CONFIG_ETRAX_GPIO) += gpio.o
9obj-$(CONFIG_ETRAX_IOP_FW_LOAD) += iop_fw_load.o
10obj-$(CONFIG_ETRAX_PCF8563) += pcf8563.o
11obj-$(CONFIG_ETRAX_I2C) += i2c.o
12obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o
13obj-$(CONFIG_PCI) += pci/
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
new file mode 100644
index 000000000000..78ed52b1cdac
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -0,0 +1,455 @@
1/*
2 * Physical mapping layer for MTD using the Axis partitiontable format
3 *
4 * Copyright (c) 2001, 2002, 2003 Axis Communications AB
5 *
6 * This file is under the GPL.
7 *
8 * First partition is always sector 0 regardless of if we find a partitiontable
9 * or not. In the start of the next sector, there can be a partitiontable that
10 * tells us what other partitions to define. If there isn't, we use a default
11 * partition split defined below.
12 *
13 * Copy of os/lx25/arch/cris/arch-v10/drivers/axisflashmap.c 1.5
14 * with minor changes.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/config.h>
22#include <linux/init.h>
23
24#include <linux/mtd/concat.h>
25#include <linux/mtd/map.h>
26#include <linux/mtd/mtd.h>
27#include <linux/mtd/mtdram.h>
28#include <linux/mtd/partitions.h>
29
30#include <asm/arch/hwregs/config_defs.h>
31#include <asm/axisflashmap.h>
32#include <asm/mmu.h>
33
34#define MEM_CSE0_SIZE (0x04000000)
35#define MEM_CSE1_SIZE (0x04000000)
36
37#define FLASH_UNCACHED_ADDR KSEG_E
38#define FLASH_CACHED_ADDR KSEG_F
39
40#if CONFIG_ETRAX_FLASH_BUSWIDTH==1
41#define flash_data __u8
42#elif CONFIG_ETRAX_FLASH_BUSWIDTH==2
43#define flash_data __u16
44#elif CONFIG_ETRAX_FLASH_BUSWIDTH==4
45#define flash_data __u16
46#endif
47
48/* From head.S */
49extern unsigned long romfs_start, romfs_length, romfs_in_flash;
50
51/* The master mtd for the entire flash. */
52struct mtd_info* axisflash_mtd = NULL;
53
54/* Map driver functions. */
55
56static map_word flash_read(struct map_info *map, unsigned long ofs)
57{
58 map_word tmp;
59 tmp.x[0] = *(flash_data *)(map->map_priv_1 + ofs);
60 return tmp;
61}
62
63static void flash_copy_from(struct map_info *map, void *to,
64 unsigned long from, ssize_t len)
65{
66 memcpy(to, (void *)(map->map_priv_1 + from), len);
67}
68
69static void flash_write(struct map_info *map, map_word d, unsigned long adr)
70{
71 *(flash_data *)(map->map_priv_1 + adr) = (flash_data)d.x[0];
72}
73
74/*
75 * The map for chip select e0.
76 *
77 * We run into tricky coherence situations if we mix cached with uncached
78 * accesses to we only use the uncached version here.
79 *
80 * The size field is the total size where the flash chips may be mapped on the
81 * chip select. MTD probes should find all devices there and it does not matter
82 * if there are unmapped gaps or aliases (mirrors of flash devices). The MTD
83 * probes will ignore them.
84 *
85 * The start address in map_priv_1 is in virtual memory so we cannot use
86 * MEM_CSE0_START but must rely on that FLASH_UNCACHED_ADDR is the start
87 * address of cse0.
88 */
89static struct map_info map_cse0 = {
90 .name = "cse0",
91 .size = MEM_CSE0_SIZE,
92 .bankwidth = CONFIG_ETRAX_FLASH_BUSWIDTH,
93 .read = flash_read,
94 .copy_from = flash_copy_from,
95 .write = flash_write,
96 .map_priv_1 = FLASH_UNCACHED_ADDR
97};
98
99/*
100 * The map for chip select e1.
101 *
102 * If there was a gap between cse0 and cse1, map_priv_1 would get the wrong
103 * address, but there isn't.
104 */
105static struct map_info map_cse1 = {
106 .name = "cse1",
107 .size = MEM_CSE1_SIZE,
108 .bankwidth = CONFIG_ETRAX_FLASH_BUSWIDTH,
109 .read = flash_read,
110 .copy_from = flash_copy_from,
111 .write = flash_write,
112 .map_priv_1 = FLASH_UNCACHED_ADDR + MEM_CSE0_SIZE
113};
114
115/* If no partition-table was found, we use this default-set. */
116#define MAX_PARTITIONS 7
117#define NUM_DEFAULT_PARTITIONS 3
118
119/*
120 * Default flash size is 2MB. CONFIG_ETRAX_PTABLE_SECTOR is most likely the
121 * size of one flash block and "filesystem"-partition needs 5 blocks to be able
122 * to use JFFS.
123 */
124static struct mtd_partition axis_default_partitions[NUM_DEFAULT_PARTITIONS] = {
125 {
126 .name = "boot firmware",
127 .size = CONFIG_ETRAX_PTABLE_SECTOR,
128 .offset = 0
129 },
130 {
131 .name = "kernel",
132 .size = 0x200000 - (6 * CONFIG_ETRAX_PTABLE_SECTOR),
133 .offset = CONFIG_ETRAX_PTABLE_SECTOR
134 },
135 {
136 .name = "filesystem",
137 .size = 5 * CONFIG_ETRAX_PTABLE_SECTOR,
138 .offset = 0x200000 - (5 * CONFIG_ETRAX_PTABLE_SECTOR)
139 }
140};
141
142/* Initialize the ones normally used. */
143static struct mtd_partition axis_partitions[MAX_PARTITIONS] = {
144 {
145 .name = "part0",
146 .size = CONFIG_ETRAX_PTABLE_SECTOR,
147 .offset = 0
148 },
149 {
150 .name = "part1",
151 .size = 0,
152 .offset = 0
153 },
154 {
155 .name = "part2",
156 .size = 0,
157 .offset = 0
158 },
159 {
160 .name = "part3",
161 .size = 0,
162 .offset = 0
163 },
164 {
165 .name = "part4",
166 .size = 0,
167 .offset = 0
168 },
169 {
170 .name = "part5",
171 .size = 0,
172 .offset = 0
173 },
174 {
175 .name = "part6",
176 .size = 0,
177 .offset = 0
178 },
179};
180
181/*
182 * Probe a chip select for AMD-compatible (JEDEC) or CFI-compatible flash
183 * chips in that order (because the amd_flash-driver is faster).
184 */
185static struct mtd_info *probe_cs(struct map_info *map_cs)
186{
187 struct mtd_info *mtd_cs = NULL;
188
189 printk(KERN_INFO
190 "%s: Probing a 0x%08lx bytes large window at 0x%08lx.\n",
191 map_cs->name, map_cs->size, map_cs->map_priv_1);
192
193#ifdef CONFIG_MTD_AMDSTD
194 mtd_cs = do_map_probe("amd_flash", map_cs);
195#endif
196#ifdef CONFIG_MTD_CFI
197 if (!mtd_cs) {
198 mtd_cs = do_map_probe("cfi_probe", map_cs);
199 }
200#endif
201
202 return mtd_cs;
203}
204
205/*
206 * Probe each chip select individually for flash chips. If there are chips on
207 * both cse0 and cse1, the mtd_info structs will be concatenated to one struct
208 * so that MTD partitions can cross chip boundries.
209 *
210 * The only known restriction to how you can mount your chips is that each
211 * chip select must hold similar flash chips. But you need external hardware
212 * to do that anyway and you can put totally different chips on cse0 and cse1
213 * so it isn't really much of a restriction.
214 */
215extern struct mtd_info* __init crisv32_nand_flash_probe (void);
216static struct mtd_info *flash_probe(void)
217{
218 struct mtd_info *mtd_cse0;
219 struct mtd_info *mtd_cse1;
220 struct mtd_info *mtd_nand = NULL;
221 struct mtd_info *mtd_total;
222 struct mtd_info *mtds[3];
223 int count = 0;
224
225 if ((mtd_cse0 = probe_cs(&map_cse0)) != NULL)
226 mtds[count++] = mtd_cse0;
227 if ((mtd_cse1 = probe_cs(&map_cse1)) != NULL)
228 mtds[count++] = mtd_cse1;
229
230#ifdef CONFIG_ETRAX_NANDFLASH
231 if ((mtd_nand = crisv32_nand_flash_probe()) != NULL)
232 mtds[count++] = mtd_nand;
233#endif
234
235 if (!mtd_cse0 && !mtd_cse1 && !mtd_nand) {
236 /* No chip found. */
237 return NULL;
238 }
239
240 if (count > 1) {
241#ifdef CONFIG_MTD_CONCAT
242 /* Since the concatenation layer adds a small overhead we
243 * could try to figure out if the chips in cse0 and cse1 are
244 * identical and reprobe the whole cse0+cse1 window. But since
245 * flash chips are slow, the overhead is relatively small.
246 * So we use the MTD concatenation layer instead of further
247 * complicating the probing procedure.
248 */
249 mtd_total = mtd_concat_create(mtds,
250 count,
251 "cse0+cse1+nand");
252#else
253 printk(KERN_ERR "%s and %s: Cannot concatenate due to kernel "
254 "(mis)configuration!\n", map_cse0.name, map_cse1.name);
255 mtd_toal = NULL;
256#endif
257 if (!mtd_total) {
258 printk(KERN_ERR "%s and %s: Concatenation failed!\n",
259 map_cse0.name, map_cse1.name);
260
261 /* The best we can do now is to only use what we found
262 * at cse0.
263 */
264 mtd_total = mtd_cse0;
265 map_destroy(mtd_cse1);
266 }
267 } else {
268 mtd_total = mtd_cse0? mtd_cse0 : mtd_cse1 ? mtd_cse1 : mtd_nand;
269
270 }
271
272 return mtd_total;
273}
274
275extern unsigned long crisv32_nand_boot;
276extern unsigned long crisv32_nand_cramfs_offset;
277
278/*
279 * Probe the flash chip(s) and, if it succeeds, read the partition-table
280 * and register the partitions with MTD.
281 */
282static int __init init_axis_flash(void)
283{
284 struct mtd_info *mymtd;
285 int err = 0;
286 int pidx = 0;
287 struct partitiontable_head *ptable_head = NULL;
288 struct partitiontable_entry *ptable;
289 int use_default_ptable = 1; /* Until proven otherwise. */
290 const char *pmsg = KERN_INFO " /dev/flash%d at 0x%08x, size 0x%08x\n";
291 static char page[512];
292 size_t len;
293
294#ifndef CONFIG_ETRAXFS_SIM
295 mymtd = flash_probe();
296 mymtd->read(mymtd, CONFIG_ETRAX_PTABLE_SECTOR, 512, &len, page);
297 ptable_head = (struct partitiontable_head *)(page + PARTITION_TABLE_OFFSET);
298
299 if (!mymtd) {
300 /* There's no reason to use this module if no flash chip can
301 * be identified. Make sure that's understood.
302 */
303 printk(KERN_INFO "axisflashmap: Found no flash chip.\n");
304 } else {
305 printk(KERN_INFO "%s: 0x%08x bytes of flash memory.\n",
306 mymtd->name, mymtd->size);
307 axisflash_mtd = mymtd;
308 }
309
310 if (mymtd) {
311 mymtd->owner = THIS_MODULE;
312 }
313 pidx++; /* First partition is always set to the default. */
314
315 if (ptable_head && (ptable_head->magic == PARTITION_TABLE_MAGIC)
316 && (ptable_head->size <
317 (MAX_PARTITIONS * sizeof(struct partitiontable_entry) +
318 PARTITIONTABLE_END_MARKER_SIZE))
319 && (*(unsigned long*)((void*)ptable_head + sizeof(*ptable_head) +
320 ptable_head->size -
321 PARTITIONTABLE_END_MARKER_SIZE)
322 == PARTITIONTABLE_END_MARKER)) {
323 /* Looks like a start, sane length and end of a
324 * partition table, lets check csum etc.
325 */
326 int ptable_ok = 0;
327 struct partitiontable_entry *max_addr =
328 (struct partitiontable_entry *)
329 ((unsigned long)ptable_head + sizeof(*ptable_head) +
330 ptable_head->size);
331 unsigned long offset = CONFIG_ETRAX_PTABLE_SECTOR;
332 unsigned char *p;
333 unsigned long csum = 0;
334
335 ptable = (struct partitiontable_entry *)
336 ((unsigned long)ptable_head + sizeof(*ptable_head));
337
338 /* Lets be PARANOID, and check the checksum. */
339 p = (unsigned char*) ptable;
340
341 while (p <= (unsigned char*)max_addr) {
342 csum += *p++;
343 csum += *p++;
344 csum += *p++;
345 csum += *p++;
346 }
347 ptable_ok = (csum == ptable_head->checksum);
348
349 /* Read the entries and use/show the info. */
350 printk(KERN_INFO " Found a%s partition table at 0x%p-0x%p.\n",
351 (ptable_ok ? " valid" : "n invalid"), ptable_head,
352 max_addr);
353
354 /* We have found a working bootblock. Now read the
355 * partition table. Scan the table. It ends when
356 * there is 0xffffffff, that is, empty flash.
357 */
358 while (ptable_ok
359 && ptable->offset != 0xffffffff
360 && ptable < max_addr
361 && pidx < MAX_PARTITIONS) {
362
363 axis_partitions[pidx].offset = offset + ptable->offset + (crisv32_nand_boot ? 16384 : 0);
364 axis_partitions[pidx].size = ptable->size;
365
366 printk(pmsg, pidx, axis_partitions[pidx].offset,
367 axis_partitions[pidx].size);
368 pidx++;
369 ptable++;
370 }
371 use_default_ptable = !ptable_ok;
372 }
373
374 if (romfs_in_flash) {
375 /* Add an overlapping device for the root partition (romfs). */
376
377 axis_partitions[pidx].name = "romfs";
378 if (crisv32_nand_boot) {
379 char* data = kmalloc(1024, GFP_KERNEL);
380 int len;
381 int offset = crisv32_nand_cramfs_offset & ~(1024-1);
382 char* tmp;
383
384 mymtd->read(mymtd, offset, 1024, &len, data);
385 tmp = &data[crisv32_nand_cramfs_offset % 512];
386 axis_partitions[pidx].size = *(unsigned*)(tmp + 4);
387 axis_partitions[pidx].offset = crisv32_nand_cramfs_offset;
388 kfree(data);
389 } else {
390 axis_partitions[pidx].size = romfs_length;
391 axis_partitions[pidx].offset = romfs_start - FLASH_CACHED_ADDR;
392 }
393
394 axis_partitions[pidx].mask_flags |= MTD_WRITEABLE;
395
396 printk(KERN_INFO
397 " Adding readonly flash partition for romfs image:\n");
398 printk(pmsg, pidx, axis_partitions[pidx].offset,
399 axis_partitions[pidx].size);
400 pidx++;
401 }
402
403 if (mymtd) {
404 if (use_default_ptable) {
405 printk(KERN_INFO " Using default partition table.\n");
406 err = add_mtd_partitions(mymtd, axis_default_partitions,
407 NUM_DEFAULT_PARTITIONS);
408 } else {
409 err = add_mtd_partitions(mymtd, axis_partitions, pidx);
410 }
411
412 if (err) {
413 panic("axisflashmap could not add MTD partitions!\n");
414 }
415 }
416/* CONFIG_EXTRAXFS_SIM */
417#endif
418
419 if (!romfs_in_flash) {
420 /* Create an RAM device for the root partition (romfs). */
421
422#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0)
423 /* No use trying to boot this kernel from RAM. Panic! */
424 printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM "
425 "device due to kernel (mis)configuration!\n");
426 panic("This kernel cannot boot from RAM!\n");
427#else
428 struct mtd_info *mtd_ram;
429
430 mtd_ram = (struct mtd_info *)kmalloc(sizeof(struct mtd_info),
431 GFP_KERNEL);
432 if (!mtd_ram) {
433 panic("axisflashmap couldn't allocate memory for "
434 "mtd_info!\n");
435 }
436
437 printk(KERN_INFO " Adding RAM partition for romfs image:\n");
438 printk(pmsg, pidx, romfs_start, romfs_length);
439
440 err = mtdram_init_device(mtd_ram, (void*)romfs_start,
441 romfs_length, "romfs");
442 if (err) {
443 panic("axisflashmap could not initialize MTD RAM "
444 "device!\n");
445 }
446#endif
447 }
448
449 return err;
450}
451
452/* This adds the above to the kernels init-call chain. */
453module_init(init_axis_flash);
454
455EXPORT_SYMBOL(axisflash_mtd);
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
new file mode 100644
index 000000000000..ca72076c630a
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -0,0 +1,3522 @@
1/* $Id: cryptocop.c,v 1.13 2005/04/21 17:27:55 henriken Exp $
2 *
3 * Stream co-processor driver for the ETRAX FS
4 *
5 * Copyright (C) 2003-2005 Axis Communications AB
6 */
7
8#include <linux/init.h>
9#include <linux/sched.h>
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/string.h>
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/spinlock.h>
16#include <linux/stddef.h>
17
18#include <asm/uaccess.h>
19#include <asm/io.h>
20#include <asm/atomic.h>
21
22#include <linux/list.h>
23#include <linux/interrupt.h>
24
25#include <asm/signal.h>
26#include <asm/irq.h>
27
28#include <asm/arch/dma.h>
29#include <asm/arch/hwregs/dma.h>
30#include <asm/arch/hwregs/reg_map.h>
31#include <asm/arch/hwregs/reg_rdwr.h>
32#include <asm/arch/hwregs/intr_vect_defs.h>
33
34#include <asm/arch/hwregs/strcop.h>
35#include <asm/arch/hwregs/strcop_defs.h>
36#include <asm/arch/cryptocop.h>
37
38
39
40#define DESCR_ALLOC_PAD (31)
41
42struct cryptocop_dma_desc {
43 char *free_buf; /* If non-null will be kfreed in free_cdesc() */
44 dma_descr_data *dma_descr;
45
46 unsigned char dma_descr_buf[sizeof(dma_descr_data) + DESCR_ALLOC_PAD];
47
48 unsigned int from_pool:1; /* If 1 'allocated' from the descriptor pool. */
49 struct cryptocop_dma_desc *next;
50};
51
52
53struct cryptocop_int_operation{
54 void *alloc_ptr;
55 cryptocop_session_id sid;
56
57 dma_descr_context ctx_out;
58 dma_descr_context ctx_in;
59
60 /* DMA descriptors allocated by driver. */
61 struct cryptocop_dma_desc *cdesc_out;
62 struct cryptocop_dma_desc *cdesc_in;
63
64 /* Strcop config to use. */
65 cryptocop_3des_mode tdes_mode;
66 cryptocop_csum_type csum_mode;
67
68 /* DMA descrs provided by consumer. */
69 dma_descr_data *ddesc_out;
70 dma_descr_data *ddesc_in;
71};
72
73
74struct cryptocop_tfrm_ctx {
75 cryptocop_tfrm_id tid;
76 unsigned int blocklength;
77
78 unsigned int start_ix;
79
80 struct cryptocop_tfrm_cfg *tcfg;
81 struct cryptocop_transform_ctx *tctx;
82
83 unsigned char previous_src;
84 unsigned char current_src;
85
86 /* Values to use in metadata out. */
87 unsigned char hash_conf;
88 unsigned char hash_mode;
89 unsigned char ciph_conf;
90 unsigned char cbcmode;
91 unsigned char decrypt;
92
93 unsigned int requires_padding:1;
94 unsigned int strict_block_length:1;
95 unsigned int active:1;
96 unsigned int done:1;
97 size_t consumed;
98 size_t produced;
99
100 /* Pad (input) descriptors to put in the DMA out list when the transform
101 * output is put on the DMA in list. */
102 struct cryptocop_dma_desc *pad_descs;
103
104 struct cryptocop_tfrm_ctx *prev_src;
105 struct cryptocop_tfrm_ctx *curr_src;
106
107 /* Mapping to HW. */
108 unsigned char unit_no;
109};
110
111
112struct cryptocop_private{
113 cryptocop_session_id sid;
114 struct cryptocop_private *next;
115};
116
117/* Session list. */
118
119struct cryptocop_transform_ctx{
120 struct cryptocop_transform_init init;
121 unsigned char dec_key[CRYPTOCOP_MAX_KEY_LENGTH];
122 unsigned int dec_key_set:1;
123
124 struct cryptocop_transform_ctx *next;
125};
126
127
128struct cryptocop_session{
129 cryptocop_session_id sid;
130
131 struct cryptocop_transform_ctx *tfrm_ctx;
132
133 struct cryptocop_session *next;
134};
135
136/* Priority levels for jobs sent to the cryptocop. Checksum operations from
137 kernel have highest priority since TCPIP stack processing must not
138 be a bottleneck. */
139typedef enum {
140 cryptocop_prio_kernel_csum = 0,
141 cryptocop_prio_kernel = 1,
142 cryptocop_prio_user = 2,
143 cryptocop_prio_no_prios = 3
144} cryptocop_queue_priority;
145
146struct cryptocop_prio_queue{
147 struct list_head jobs;
148 cryptocop_queue_priority prio;
149};
150
151struct cryptocop_prio_job{
152 struct list_head node;
153 cryptocop_queue_priority prio;
154
155 struct cryptocop_operation *oper;
156 struct cryptocop_int_operation *iop;
157};
158
159struct ioctl_job_cb_ctx {
160 unsigned int processed:1;
161};
162
163
164static struct cryptocop_session *cryptocop_sessions = NULL;
165spinlock_t cryptocop_sessions_lock;
166
167/* Next Session ID to assign. */
168static cryptocop_session_id next_sid = 1;
169
170/* Pad for checksum. */
171static const char csum_zero_pad[1] = {0x00};
172
173/* Trash buffer for mem2mem operations. */
174#define MEM2MEM_DISCARD_BUF_LENGTH (512)
175static unsigned char mem2mem_discard_buf[MEM2MEM_DISCARD_BUF_LENGTH];
176
177/* Descriptor pool. */
178/* FIXME Tweak this value. */
179#define CRYPTOCOP_DESCRIPTOR_POOL_SIZE (100)
180static struct cryptocop_dma_desc descr_pool[CRYPTOCOP_DESCRIPTOR_POOL_SIZE];
181static struct cryptocop_dma_desc *descr_pool_free_list;
182static int descr_pool_no_free;
183static spinlock_t descr_pool_lock;
184
185/* Lock to stop cryptocop to start processing of a new operation. The holder
186 of this lock MUST call cryptocop_start_job() after it is unlocked. */
187spinlock_t cryptocop_process_lock;
188
189static struct cryptocop_prio_queue cryptocop_job_queues[cryptocop_prio_no_prios];
190static spinlock_t cryptocop_job_queue_lock;
191static struct cryptocop_prio_job *cryptocop_running_job = NULL;
192static spinlock_t running_job_lock;
193
194/* The interrupt handler appends completed jobs to this list. The scehduled
195 * tasklet removes them upon sending the response to the crypto consumer. */
196static struct list_head cryptocop_completed_jobs;
197static spinlock_t cryptocop_completed_jobs_lock;
198
199DECLARE_WAIT_QUEUE_HEAD(cryptocop_ioc_process_wq);
200
201
202/** Local functions. **/
203
204static int cryptocop_open(struct inode *, struct file *);
205
206static int cryptocop_release(struct inode *, struct file *);
207
208static int cryptocop_ioctl(struct inode *inode, struct file *file,
209 unsigned int cmd, unsigned long arg);
210
211static void cryptocop_start_job(void);
212
213static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation);
214static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation);
215
216static int cryptocop_job_queue_init(void);
217static void cryptocop_job_queue_close(void);
218
219static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length);
220
221static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length);
222
223static int transform_ok(struct cryptocop_transform_init *tinit);
224
225static struct cryptocop_session *get_session(cryptocop_session_id sid);
226
227static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid);
228
229static void delete_internal_operation(struct cryptocop_int_operation *iop);
230
231static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned char *key, unsigned int keylength);
232
233static int init_stream_coprocessor(void);
234
235static void __exit exit_stream_coprocessor(void);
236
237/*#define LDEBUG*/
238#ifdef LDEBUG
239#define DEBUG(s) s
240#define DEBUG_API(s) s
241static void print_cryptocop_operation(struct cryptocop_operation *cop);
242static void print_dma_descriptors(struct cryptocop_int_operation *iop);
243static void print_strcop_crypto_op(struct strcop_crypto_op *cop);
244static void print_lock_status(void);
245static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op);
246#define assert(s) do{if (!(s)) panic(#s);} while(0);
247#else
248#define DEBUG(s)
249#define DEBUG_API(s)
250#define assert(s)
251#endif
252
253
254/* Transform constants. */
255#define DES_BLOCK_LENGTH (8)
256#define AES_BLOCK_LENGTH (16)
257#define MD5_BLOCK_LENGTH (64)
258#define SHA1_BLOCK_LENGTH (64)
259#define CSUM_BLOCK_LENGTH (2)
260#define MD5_STATE_LENGTH (16)
261#define SHA1_STATE_LENGTH (20)
262
263/* The device number. */
264#define CRYPTOCOP_MAJOR (254)
265#define CRYPTOCOP_MINOR (0)
266
267
268
269struct file_operations cryptocop_fops = {
270 owner: THIS_MODULE,
271 open: cryptocop_open,
272 release: cryptocop_release,
273 ioctl: cryptocop_ioctl
274};
275
276
277static void free_cdesc(struct cryptocop_dma_desc *cdesc)
278{
279 DEBUG(printk("free_cdesc: cdesc 0x%p, from_pool=%d\n", cdesc, cdesc->from_pool));
280 if (cdesc->free_buf) kfree(cdesc->free_buf);
281
282 if (cdesc->from_pool) {
283 unsigned long int flags;
284 spin_lock_irqsave(&descr_pool_lock, flags);
285 cdesc->next = descr_pool_free_list;
286 descr_pool_free_list = cdesc;
287 ++descr_pool_no_free;
288 spin_unlock_irqrestore(&descr_pool_lock, flags);
289 } else {
290 kfree(cdesc);
291 }
292}
293
294
295static struct cryptocop_dma_desc *alloc_cdesc(int alloc_flag)
296{
297 int use_pool = (alloc_flag & GFP_ATOMIC) ? 1 : 0;
298 struct cryptocop_dma_desc *cdesc;
299
300 if (use_pool) {
301 unsigned long int flags;
302 spin_lock_irqsave(&descr_pool_lock, flags);
303 if (!descr_pool_free_list) {
304 spin_unlock_irqrestore(&descr_pool_lock, flags);
305 DEBUG_API(printk("alloc_cdesc: pool is empty\n"));
306 return NULL;
307 }
308 cdesc = descr_pool_free_list;
309 descr_pool_free_list = descr_pool_free_list->next;
310 --descr_pool_no_free;
311 spin_unlock_irqrestore(&descr_pool_lock, flags);
312 cdesc->from_pool = 1;
313 } else {
314 cdesc = kmalloc(sizeof(struct cryptocop_dma_desc), alloc_flag);
315 if (!cdesc) {
316 DEBUG_API(printk("alloc_cdesc: kmalloc\n"));
317 return NULL;
318 }
319 cdesc->from_pool = 0;
320 }
321 cdesc->dma_descr = (dma_descr_data*)(((unsigned long int)cdesc + offsetof(struct cryptocop_dma_desc, dma_descr_buf) + DESCR_ALLOC_PAD) & ~0x0000001F);
322
323 cdesc->next = NULL;
324
325 cdesc->free_buf = NULL;
326 cdesc->dma_descr->out_eop = 0;
327 cdesc->dma_descr->in_eop = 0;
328 cdesc->dma_descr->intr = 0;
329 cdesc->dma_descr->eol = 0;
330 cdesc->dma_descr->wait = 0;
331 cdesc->dma_descr->buf = NULL;
332 cdesc->dma_descr->after = NULL;
333
334 DEBUG_API(printk("alloc_cdesc: return 0x%p, cdesc->dma_descr=0x%p, from_pool=%d\n", cdesc, cdesc->dma_descr, cdesc->from_pool));
335 return cdesc;
336}
337
338
339static void setup_descr_chain(struct cryptocop_dma_desc *cd)
340{
341 DEBUG(printk("setup_descr_chain: entering\n"));
342 while (cd) {
343 if (cd->next) {
344 cd->dma_descr->next = (dma_descr_data*)virt_to_phys(cd->next->dma_descr);
345 } else {
346 cd->dma_descr->next = NULL;
347 }
348 cd = cd->next;
349 }
350 DEBUG(printk("setup_descr_chain: exit\n"));
351}
352
353
354/* Create a pad descriptor for the transform.
355 * Return -1 for error, 0 if pad created. */
356static int create_pad_descriptor(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **pad_desc, int alloc_flag)
357{
358 struct cryptocop_dma_desc *cdesc = NULL;
359 int error = 0;
360 struct strcop_meta_out mo = {
361 .ciphsel = src_none,
362 .hashsel = src_none,
363 .csumsel = src_none
364 };
365 char *pad;
366 size_t plen;
367
368 DEBUG(printk("create_pad_descriptor: start.\n"));
369 /* Setup pad descriptor. */
370
371 DEBUG(printk("create_pad_descriptor: setting up padding.\n"));
372 cdesc = alloc_cdesc(alloc_flag);
373 if (!cdesc){
374 DEBUG_API(printk("create_pad_descriptor: alloc pad desc\n"));
375 goto error_cleanup;
376 }
377 switch (tc->unit_no) {
378 case src_md5:
379 error = create_md5_pad(alloc_flag, tc->consumed, &pad, &plen);
380 if (error){
381 DEBUG_API(printk("create_pad_descriptor: create_md5_pad_failed\n"));
382 goto error_cleanup;
383 }
384 cdesc->free_buf = pad;
385 mo.hashsel = src_dma;
386 mo.hashconf = tc->hash_conf;
387 mo.hashmode = tc->hash_mode;
388 break;
389 case src_sha1:
390 error = create_sha1_pad(alloc_flag, tc->consumed, &pad, &plen);
391 if (error){
392 DEBUG_API(printk("create_pad_descriptor: create_sha1_pad_failed\n"));
393 goto error_cleanup;
394 }
395 cdesc->free_buf = pad;
396 mo.hashsel = src_dma;
397 mo.hashconf = tc->hash_conf;
398 mo.hashmode = tc->hash_mode;
399 break;
400 case src_csum:
401 if (tc->consumed % tc->blocklength){
402 pad = (char*)csum_zero_pad;
403 plen = 1;
404 } else {
405 pad = (char*)cdesc; /* Use any pointer. */
406 plen = 0;
407 }
408 mo.csumsel = src_dma;
409 break;
410 }
411 cdesc->dma_descr->wait = 1;
412 cdesc->dma_descr->out_eop = 1; /* Since this is a pad output is pushed. EOP is ok here since the padded unit is the only one active. */
413 cdesc->dma_descr->buf = (char*)virt_to_phys((char*)pad);
414 cdesc->dma_descr->after = cdesc->dma_descr->buf + plen;
415
416 cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
417 *pad_desc = cdesc;
418
419 return 0;
420
421 error_cleanup:
422 if (cdesc) free_cdesc(cdesc);
423 return -1;
424}
425
426
427static int setup_key_dl_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **kd, int alloc_flag)
428{
429 struct cryptocop_dma_desc *key_desc = alloc_cdesc(alloc_flag);
430 struct strcop_meta_out mo = {0};
431
432 DEBUG(printk("setup_key_dl_desc\n"));
433
434 if (!key_desc) {
435 DEBUG_API(printk("setup_key_dl_desc: failed descriptor allocation.\n"));
436 return -ENOMEM;
437 }
438
439 /* Download key. */
440 if ((tc->tctx->init.alg == cryptocop_alg_aes) && (tc->tcfg->flags & CRYPTOCOP_DECRYPT)) {
441 /* Precook the AES decrypt key. */
442 if (!tc->tctx->dec_key_set){
443 get_aes_decrypt_key(tc->tctx->dec_key, tc->tctx->init.key, tc->tctx->init.keylen);
444 tc->tctx->dec_key_set = 1;
445 }
446 key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->dec_key);
447 key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8;
448 } else {
449 key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->init.key);
450 key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8;
451 }
452 /* Setup metadata. */
453 mo.dlkey = 1;
454 switch (tc->tctx->init.keylen) {
455 case 64:
456 mo.decrypt = 0;
457 mo.hashmode = 0;
458 break;
459 case 128:
460 mo.decrypt = 0;
461 mo.hashmode = 1;
462 break;
463 case 192:
464 mo.decrypt = 1;
465 mo.hashmode = 0;
466 break;
467 case 256:
468 mo.decrypt = 1;
469 mo.hashmode = 1;
470 break;
471 default:
472 break;
473 }
474 mo.ciphsel = mo.hashsel = mo.csumsel = src_none;
475 key_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
476
477 key_desc->dma_descr->out_eop = 1;
478 key_desc->dma_descr->wait = 1;
479 key_desc->dma_descr->intr = 0;
480
481 *kd = key_desc;
482 return 0;
483}
484
485static int setup_cipher_iv_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag)
486{
487 struct cryptocop_dma_desc *iv_desc = alloc_cdesc(alloc_flag);
488 struct strcop_meta_out mo = {0};
489
490 DEBUG(printk("setup_cipher_iv_desc\n"));
491
492 if (!iv_desc) {
493 DEBUG_API(printk("setup_cipher_iv_desc: failed CBC IV descriptor allocation.\n"));
494 return -ENOMEM;
495 }
496 /* Download IV. */
497 iv_desc->dma_descr->buf = (char*)virt_to_phys(tc->tcfg->iv);
498 iv_desc->dma_descr->after = iv_desc->dma_descr->buf + tc->blocklength;
499
500 /* Setup metadata. */
501 mo.hashsel = mo.csumsel = src_none;
502 mo.ciphsel = src_dma;
503 mo.ciphconf = tc->ciph_conf;
504 mo.cbcmode = tc->cbcmode;
505
506 iv_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
507
508 iv_desc->dma_descr->out_eop = 0;
509 iv_desc->dma_descr->wait = 1;
510 iv_desc->dma_descr->intr = 0;
511
512 *id = iv_desc;
513 return 0;
514}
515
516/* Map the ouput length of the transform to operation output starting on the inject index. */
517static int create_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag)
518{
519 int err = 0;
520 struct cryptocop_dma_desc head = {0};
521 struct cryptocop_dma_desc *outdesc = &head;
522 size_t iov_offset = 0;
523 size_t out_ix = 0;
524 int outiov_ix = 0;
525 struct strcop_meta_in mi = {0};
526
527 size_t out_length = tc->produced;
528 int rem_length;
529 int dlength;
530
531 assert(out_length != 0);
532 if (((tc->produced + tc->tcfg->inject_ix) > operation->tfrm_op.outlen) || (tc->produced && (operation->tfrm_op.outlen == 0))) {
533 DEBUG_API(printk("create_input_descriptors: operation outdata too small\n"));
534 return -EINVAL;
535 }
536 /* Traverse the out iovec until the result inject index is reached. */
537 while ((outiov_ix < operation->tfrm_op.outcount) && ((out_ix + operation->tfrm_op.outdata[outiov_ix].iov_len) <= tc->tcfg->inject_ix)){
538 out_ix += operation->tfrm_op.outdata[outiov_ix].iov_len;
539 outiov_ix++;
540 }
541 if (outiov_ix >= operation->tfrm_op.outcount){
542 DEBUG_API(printk("create_input_descriptors: operation outdata too small\n"));
543 return -EINVAL;
544 }
545 iov_offset = tc->tcfg->inject_ix - out_ix;
546 mi.dmasel = tc->unit_no;
547
548 /* Setup the output descriptors. */
549 while ((out_length > 0) && (outiov_ix < operation->tfrm_op.outcount)) {
550 outdesc->next = alloc_cdesc(alloc_flag);
551 if (!outdesc->next) {
552 DEBUG_API(printk("create_input_descriptors: alloc_cdesc\n"));
553 err = -ENOMEM;
554 goto error_cleanup;
555 }
556 outdesc = outdesc->next;
557 rem_length = operation->tfrm_op.outdata[outiov_ix].iov_len - iov_offset;
558 dlength = (out_length < rem_length) ? out_length : rem_length;
559
560 DEBUG(printk("create_input_descriptors:\n"
561 "outiov_ix=%d, rem_length=%d, dlength=%d\n"
562 "iov_offset=%d, outdata[outiov_ix].iov_len=%d\n"
563 "outcount=%d, outiov_ix=%d\n",
564 outiov_ix, rem_length, dlength, iov_offset, operation->tfrm_op.outdata[outiov_ix].iov_len, operation->tfrm_op.outcount, outiov_ix));
565
566 outdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.outdata[outiov_ix].iov_base + iov_offset);
567 outdesc->dma_descr->after = outdesc->dma_descr->buf + dlength;
568 outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
569
570 out_length -= dlength;
571 iov_offset += dlength;
572 if (iov_offset >= operation->tfrm_op.outdata[outiov_ix].iov_len) {
573 iov_offset = 0;
574 ++outiov_ix;
575 }
576 }
577 if (out_length > 0){
578 DEBUG_API(printk("create_input_descriptors: not enough room for output, %d remained\n", out_length));
579 err = -EINVAL;
580 goto error_cleanup;
581 }
582 /* Set sync in last descriptor. */
583 mi.sync = 1;
584 outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
585
586 *id = head.next;
587 return 0;
588
589 error_cleanup:
590 while (head.next) {
591 outdesc = head.next->next;
592 free_cdesc(head.next);
593 head.next = outdesc;
594 }
595 return err;
596}
597
598
599static int create_output_descriptors(struct cryptocop_operation *operation, int *iniov_ix, int *iniov_offset, size_t desc_len, struct cryptocop_dma_desc **current_out_cdesc, struct strcop_meta_out *meta_out, int alloc_flag)
600{
601 while (desc_len != 0) {
602 struct cryptocop_dma_desc *cdesc;
603 int rem_length = operation->tfrm_op.indata[*iniov_ix].iov_len - *iniov_offset;
604 int dlength = (desc_len < rem_length) ? desc_len : rem_length;
605
606 cdesc = alloc_cdesc(alloc_flag);
607 if (!cdesc) {
608 DEBUG_API(printk("create_output_descriptors: alloc_cdesc\n"));
609 return -ENOMEM;
610 }
611 (*current_out_cdesc)->next = cdesc;
612 (*current_out_cdesc) = cdesc;
613
614 cdesc->free_buf = NULL;
615
616 cdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.indata[*iniov_ix].iov_base + *iniov_offset);
617 cdesc->dma_descr->after = cdesc->dma_descr->buf + dlength;
618
619 desc_len -= dlength;
620 *iniov_offset += dlength;
621 assert(desc_len >= 0);
622 if (*iniov_offset >= operation->tfrm_op.indata[*iniov_ix].iov_len) {
623 *iniov_offset = 0;
624 ++(*iniov_ix);
625 if (*iniov_ix > operation->tfrm_op.incount) {
626 DEBUG_API(printk("create_output_descriptors: not enough indata in operation."));
627 return -EINVAL;
628 }
629 }
630 cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, (*meta_out));
631 } /* while (desc_len != 0) */
632 /* Last DMA descriptor gets a 'wait' bit to signal expected change in metadata. */
633 (*current_out_cdesc)->dma_descr->wait = 1; /* This will set extraneous WAIT in some situations, e.g. when padding hashes and checksums. */
634
635 return 0;
636}
637
638
639static int append_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_dma_desc **current_in_cdesc, struct cryptocop_dma_desc **current_out_cdesc, struct cryptocop_tfrm_ctx *tc, int alloc_flag)
640{
641 DEBUG(printk("append_input_descriptors, tc=0x%p, unit_no=%d\n", tc, tc->unit_no));
642 if (tc->tcfg) {
643 int failed = 0;
644 struct cryptocop_dma_desc *idescs = NULL;
645 DEBUG(printk("append_input_descriptors: pushing output, consumed %d produced %d bytes.\n", tc->consumed, tc->produced));
646 if (tc->pad_descs) {
647 DEBUG(printk("append_input_descriptors: append pad descriptors to DMA out list.\n"));
648 while (tc->pad_descs) {
649 DEBUG(printk("append descriptor 0x%p\n", tc->pad_descs));
650 (*current_out_cdesc)->next = tc->pad_descs;
651 tc->pad_descs = tc->pad_descs->next;
652 (*current_out_cdesc) = (*current_out_cdesc)->next;
653 }
654 }
655
656 /* Setup and append output descriptors to DMA in list. */
657 if (tc->unit_no == src_dma){
658 /* mem2mem. Setup DMA in descriptors to discard all input prior to the requested mem2mem data. */
659 struct strcop_meta_in mi = {.sync = 0, .dmasel = src_dma};
660 unsigned int start_ix = tc->start_ix;
661 while (start_ix){
662 unsigned int desclen = start_ix < MEM2MEM_DISCARD_BUF_LENGTH ? start_ix : MEM2MEM_DISCARD_BUF_LENGTH;
663 (*current_in_cdesc)->next = alloc_cdesc(alloc_flag);
664 if (!(*current_in_cdesc)->next){
665 DEBUG_API(printk("append_input_descriptors: alloc_cdesc mem2mem discard failed\n"));
666 return -ENOMEM;
667 }
668 (*current_in_cdesc) = (*current_in_cdesc)->next;
669 (*current_in_cdesc)->dma_descr->buf = (char*)virt_to_phys(mem2mem_discard_buf);
670 (*current_in_cdesc)->dma_descr->after = (*current_in_cdesc)->dma_descr->buf + desclen;
671 (*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
672 start_ix -= desclen;
673 }
674 mi.sync = 1;
675 (*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
676 }
677
678 failed = create_input_descriptors(operation, tc, &idescs, alloc_flag);
679 if (failed){
680 DEBUG_API(printk("append_input_descriptors: output descriptor setup failed\n"));
681 return failed;
682 }
683 DEBUG(printk("append_input_descriptors: append output descriptors to DMA in list.\n"));
684 while (idescs) {
685 DEBUG(printk("append descriptor 0x%p\n", idescs));
686 (*current_in_cdesc)->next = idescs;
687 idescs = idescs->next;
688 (*current_in_cdesc) = (*current_in_cdesc)->next;
689 }
690 }
691 return 0;
692}
693
694
695
696static int cryptocop_setup_dma_list(struct cryptocop_operation *operation, struct cryptocop_int_operation **int_op, int alloc_flag)
697{
698 struct cryptocop_session *sess;
699 struct cryptocop_transform_ctx *tctx;
700
701 struct cryptocop_tfrm_ctx digest_ctx = {
702 .previous_src = src_none,
703 .current_src = src_none,
704 .start_ix = 0,
705 .requires_padding = 1,
706 .strict_block_length = 0,
707 .hash_conf = 0,
708 .hash_mode = 0,
709 .ciph_conf = 0,
710 .cbcmode = 0,
711 .decrypt = 0,
712 .consumed = 0,
713 .produced = 0,
714 .pad_descs = NULL,
715 .active = 0,
716 .done = 0,
717 .prev_src = NULL,
718 .curr_src = NULL,
719 .tcfg = NULL};
720 struct cryptocop_tfrm_ctx cipher_ctx = {
721 .previous_src = src_none,
722 .current_src = src_none,
723 .start_ix = 0,
724 .requires_padding = 0,
725 .strict_block_length = 1,
726 .hash_conf = 0,
727 .hash_mode = 0,
728 .ciph_conf = 0,
729 .cbcmode = 0,
730 .decrypt = 0,
731 .consumed = 0,
732 .produced = 0,
733 .pad_descs = NULL,
734 .active = 0,
735 .done = 0,
736 .prev_src = NULL,
737 .curr_src = NULL,
738 .tcfg = NULL};
739 struct cryptocop_tfrm_ctx csum_ctx = {
740 .previous_src = src_none,
741 .current_src = src_none,
742 .start_ix = 0,
743 .blocklength = 2,
744 .requires_padding = 1,
745 .strict_block_length = 0,
746 .hash_conf = 0,
747 .hash_mode = 0,
748 .ciph_conf = 0,
749 .cbcmode = 0,
750 .decrypt = 0,
751 .consumed = 0,
752 .produced = 0,
753 .pad_descs = NULL,
754 .active = 0,
755 .done = 0,
756 .tcfg = NULL,
757 .prev_src = NULL,
758 .curr_src = NULL,
759 .unit_no = src_csum};
760 struct cryptocop_tfrm_cfg *tcfg = operation->tfrm_op.tfrm_cfg;
761
762 unsigned int indata_ix = 0;
763
764 /* iovec accounting. */
765 int iniov_ix = 0;
766 int iniov_offset = 0;
767
768 /* Operation descriptor cfg traversal pointer. */
769 struct cryptocop_desc *odsc;
770
771 int failed = 0;
772 /* List heads for allocated descriptors. */
773 struct cryptocop_dma_desc out_cdesc_head = {0};
774 struct cryptocop_dma_desc in_cdesc_head = {0};
775
776 struct cryptocop_dma_desc *current_out_cdesc = &out_cdesc_head;
777 struct cryptocop_dma_desc *current_in_cdesc = &in_cdesc_head;
778
779 struct cryptocop_tfrm_ctx *output_tc = NULL;
780 void *iop_alloc_ptr;
781
782 assert(operation != NULL);
783 assert(int_op != NULL);
784
785 DEBUG(printk("cryptocop_setup_dma_list: start\n"));
786 DEBUG(print_cryptocop_operation(operation));
787
788 sess = get_session(operation->sid);
789 if (!sess) {
790 DEBUG_API(printk("cryptocop_setup_dma_list: no session found for operation.\n"));
791 failed = -EINVAL;
792 goto error_cleanup;
793 }
794 iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag);
795 if (!iop_alloc_ptr) {
796 DEBUG_API(printk("cryptocop_setup_dma_list: kmalloc cryptocop_int_operation\n"));
797 failed = -ENOMEM;
798 goto error_cleanup;
799 }
800 (*int_op) = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out));
801 DEBUG(memset((*int_op), 0xff, sizeof(struct cryptocop_int_operation)));
802 (*int_op)->alloc_ptr = iop_alloc_ptr;
803 DEBUG(printk("cryptocop_setup_dma_list: *int_op=0x%p, alloc_ptr=0x%p\n", *int_op, (*int_op)->alloc_ptr));
804
805 (*int_op)->sid = operation->sid;
806 (*int_op)->cdesc_out = NULL;
807 (*int_op)->cdesc_in = NULL;
808 (*int_op)->tdes_mode = cryptocop_3des_ede;
809 (*int_op)->csum_mode = cryptocop_csum_le;
810 (*int_op)->ddesc_out = NULL;
811 (*int_op)->ddesc_in = NULL;
812
813 /* Scan operation->tfrm_op.tfrm_cfg for bad configuration and set up the local contexts. */
814 if (!tcfg) {
815 DEBUG_API(printk("cryptocop_setup_dma_list: no configured transforms in operation.\n"));
816 failed = -EINVAL;
817 goto error_cleanup;
818 }
819 while (tcfg) {
820 tctx = get_transform_ctx(sess, tcfg->tid);
821 if (!tctx) {
822 DEBUG_API(printk("cryptocop_setup_dma_list: no transform id %d in session.\n", tcfg->tid));
823 failed = -EINVAL;
824 goto error_cleanup;
825 }
826 if (tcfg->inject_ix > operation->tfrm_op.outlen){
827 DEBUG_API(printk("cryptocop_setup_dma_list: transform id %d inject_ix (%d) > operation->tfrm_op.outlen(%d)", tcfg->tid, tcfg->inject_ix, operation->tfrm_op.outlen));
828 failed = -EINVAL;
829 goto error_cleanup;
830 }
831 switch (tctx->init.alg){
832 case cryptocop_alg_mem2mem:
833 if (cipher_ctx.tcfg != NULL){
834 DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n"));
835 failed = -EINVAL;
836 goto error_cleanup;
837 }
838 /* mem2mem is handled as a NULL cipher. */
839 cipher_ctx.cbcmode = 0;
840 cipher_ctx.decrypt = 0;
841 cipher_ctx.blocklength = 1;
842 cipher_ctx.ciph_conf = 0;
843 cipher_ctx.unit_no = src_dma;
844 cipher_ctx.tcfg = tcfg;
845 cipher_ctx.tctx = tctx;
846 break;
847 case cryptocop_alg_des:
848 case cryptocop_alg_3des:
849 case cryptocop_alg_aes:
850 /* cipher */
851 if (cipher_ctx.tcfg != NULL){
852 DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n"));
853 failed = -EINVAL;
854 goto error_cleanup;
855 }
856 cipher_ctx.tcfg = tcfg;
857 cipher_ctx.tctx = tctx;
858 if (cipher_ctx.tcfg->flags & CRYPTOCOP_DECRYPT){
859 cipher_ctx.decrypt = 1;
860 }
861 switch (tctx->init.cipher_mode) {
862 case cryptocop_cipher_mode_ecb:
863 cipher_ctx.cbcmode = 0;
864 break;
865 case cryptocop_cipher_mode_cbc:
866 cipher_ctx.cbcmode = 1;
867 break;
868 default:
869 DEBUG_API(printk("cryptocop_setup_dma_list: cipher_ctx, bad cipher mode==%d\n", tctx->init.cipher_mode));
870 failed = -EINVAL;
871 goto error_cleanup;
872 }
873 DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx, set CBC mode==%d\n", cipher_ctx.cbcmode));
874 switch (tctx->init.alg){
875 case cryptocop_alg_des:
876 cipher_ctx.ciph_conf = 0;
877 cipher_ctx.unit_no = src_des;
878 cipher_ctx.blocklength = DES_BLOCK_LENGTH;
879 break;
880 case cryptocop_alg_3des:
881 cipher_ctx.ciph_conf = 1;
882 cipher_ctx.unit_no = src_des;
883 cipher_ctx.blocklength = DES_BLOCK_LENGTH;
884 break;
885 case cryptocop_alg_aes:
886 cipher_ctx.ciph_conf = 2;
887 cipher_ctx.unit_no = src_aes;
888 cipher_ctx.blocklength = AES_BLOCK_LENGTH;
889 break;
890 default:
891 panic("cryptocop_setup_dma_list: impossible algorithm %d\n", tctx->init.alg);
892 }
893 (*int_op)->tdes_mode = tctx->init.tdes_mode;
894 break;
895 case cryptocop_alg_md5:
896 case cryptocop_alg_sha1:
897 /* digest */
898 if (digest_ctx.tcfg != NULL){
899 DEBUG_API(printk("cryptocop_setup_dma_list: multiple digests in operation.\n"));
900 failed = -EINVAL;
901 goto error_cleanup;
902 }
903 digest_ctx.tcfg = tcfg;
904 digest_ctx.tctx = tctx;
905 digest_ctx.hash_mode = 0; /* Don't use explicit IV in this API. */
906 switch (tctx->init.alg){
907 case cryptocop_alg_md5:
908 digest_ctx.blocklength = MD5_BLOCK_LENGTH;
909 digest_ctx.unit_no = src_md5;
910 digest_ctx.hash_conf = 1; /* 1 => MD-5 */
911 break;
912 case cryptocop_alg_sha1:
913 digest_ctx.blocklength = SHA1_BLOCK_LENGTH;
914 digest_ctx.unit_no = src_sha1;
915 digest_ctx.hash_conf = 0; /* 0 => SHA-1 */
916 break;
917 default:
918 panic("cryptocop_setup_dma_list: impossible digest algorithm\n");
919 }
920 break;
921 case cryptocop_alg_csum:
922 /* digest */
923 if (csum_ctx.tcfg != NULL){
924 DEBUG_API(printk("cryptocop_setup_dma_list: multiple checksums in operation.\n"));
925 failed = -EINVAL;
926 goto error_cleanup;
927 }
928 (*int_op)->csum_mode = tctx->init.csum_mode;
929 csum_ctx.tcfg = tcfg;
930 csum_ctx.tctx = tctx;
931 break;
932 default:
933 /* no algorithm. */
934 DEBUG_API(printk("cryptocop_setup_dma_list: invalid algorithm %d specified in tfrm %d.\n", tctx->init.alg, tcfg->tid));
935 failed = -EINVAL;
936 goto error_cleanup;
937 }
938 tcfg = tcfg->next;
939 }
940 /* Download key if a cipher is used. */
941 if (cipher_ctx.tcfg && (cipher_ctx.tctx->init.alg != cryptocop_alg_mem2mem)){
942 struct cryptocop_dma_desc *key_desc = NULL;
943
944 failed = setup_key_dl_desc(&cipher_ctx, &key_desc, alloc_flag);
945 if (failed) {
946 DEBUG_API(printk("cryptocop_setup_dma_list: setup key dl\n"));
947 goto error_cleanup;
948 }
949 current_out_cdesc->next = key_desc;
950 current_out_cdesc = key_desc;
951 indata_ix += (unsigned int)(key_desc->dma_descr->after - key_desc->dma_descr->buf);
952
953 /* Download explicit IV if a cipher is used and CBC mode and explicit IV selected. */
954 if ((cipher_ctx.tctx->init.cipher_mode == cryptocop_cipher_mode_cbc) && (cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV)) {
955 struct cryptocop_dma_desc *iv_desc = NULL;
956
957 DEBUG(printk("cryptocop_setup_dma_list: setup cipher CBC IV descriptor.\n"));
958
959 failed = setup_cipher_iv_desc(&cipher_ctx, &iv_desc, alloc_flag);
960 if (failed) {
961 DEBUG_API(printk("cryptocop_setup_dma_list: CBC IV descriptor.\n"));
962 goto error_cleanup;
963 }
964 current_out_cdesc->next = iv_desc;
965 current_out_cdesc = iv_desc;
966 indata_ix += (unsigned int)(iv_desc->dma_descr->after - iv_desc->dma_descr->buf);
967 }
968 }
969
970 /* Process descriptors. */
971 odsc = operation->tfrm_op.desc;
972 while (odsc) {
973 struct cryptocop_desc_cfg *dcfg = odsc->cfg;
974 struct strcop_meta_out meta_out = {0};
975 size_t desc_len = odsc->length;
976 int active_count, eop_needed_count;
977
978 output_tc = NULL;
979
980 DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor\n"));
981
982 while (dcfg) {
983 struct cryptocop_tfrm_ctx *tc = NULL;
984
985 DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor configuration.\n"));
986 /* Get the local context for the transform and mark it as the output unit if it produces output. */
987 if (digest_ctx.tcfg && (digest_ctx.tcfg->tid == dcfg->tid)){
988 tc = &digest_ctx;
989 } else if (cipher_ctx.tcfg && (cipher_ctx.tcfg->tid == dcfg->tid)){
990 tc = &cipher_ctx;
991 } else if (csum_ctx.tcfg && (csum_ctx.tcfg->tid == dcfg->tid)){
992 tc = &csum_ctx;
993 }
994 if (!tc) {
995 DEBUG_API(printk("cryptocop_setup_dma_list: invalid transform %d specified in descriptor.\n", dcfg->tid));
996 failed = -EINVAL;
997 goto error_cleanup;
998 }
999 if (tc->done) {
1000 DEBUG_API(printk("cryptocop_setup_dma_list: completed transform %d reused.\n", dcfg->tid));
1001 failed = -EINVAL;
1002 goto error_cleanup;
1003 }
1004 if (!tc->active) {
1005 tc->start_ix = indata_ix;
1006 tc->active = 1;
1007 }
1008
1009 tc->previous_src = tc->current_src;
1010 tc->prev_src = tc->curr_src;
1011 /* Map source unit id to DMA source config. */
1012 switch (dcfg->src){
1013 case cryptocop_source_dma:
1014 tc->current_src = src_dma;
1015 break;
1016 case cryptocop_source_des:
1017 tc->current_src = src_des;
1018 break;
1019 case cryptocop_source_3des:
1020 tc->current_src = src_des;
1021 break;
1022 case cryptocop_source_aes:
1023 tc->current_src = src_aes;
1024 break;
1025 case cryptocop_source_md5:
1026 case cryptocop_source_sha1:
1027 case cryptocop_source_csum:
1028 case cryptocop_source_none:
1029 default:
1030 /* We do not allow using accumulating style units (SHA-1, MD5, checksum) as sources to other units.
1031 */
1032 DEBUG_API(printk("cryptocop_setup_dma_list: bad unit source configured %d.\n", dcfg->src));
1033 failed = -EINVAL;
1034 goto error_cleanup;
1035 }
1036 if (tc->current_src != src_dma) {
1037 /* Find the unit we are sourcing from. */
1038 if (digest_ctx.unit_no == tc->current_src){
1039 tc->curr_src = &digest_ctx;
1040 } else if (cipher_ctx.unit_no == tc->current_src){
1041 tc->curr_src = &cipher_ctx;
1042 } else if (csum_ctx.unit_no == tc->current_src){
1043 tc->curr_src = &csum_ctx;
1044 }
1045 if ((tc->curr_src == tc) && (tc->unit_no != src_dma)){
1046 DEBUG_API(printk("cryptocop_setup_dma_list: unit %d configured to source from itself.\n", tc->unit_no));
1047 failed = -EINVAL;
1048 goto error_cleanup;
1049 }
1050 } else {
1051 tc->curr_src = NULL;
1052 }
1053
1054 /* Detect source switch. */
1055 DEBUG(printk("cryptocop_setup_dma_list: tc->active=%d tc->unit_no=%d tc->current_src=%d tc->previous_src=%d, tc->curr_src=0x%p, tc->prev_srv=0x%p\n", tc->active, tc->unit_no, tc->current_src, tc->previous_src, tc->curr_src, tc->prev_src));
1056 if (tc->active && (tc->current_src != tc->previous_src)) {
1057 /* Only allow source switch when both the old source unit and the new one have
1058 * no pending data to process (i.e. the consumed length must be a multiple of the
1059 * transform blocklength). */
1060 /* Note: if the src == NULL we are actually sourcing from DMA out. */
1061 if (((tc->prev_src != NULL) && (tc->prev_src->consumed % tc->prev_src->blocklength)) ||
1062 ((tc->curr_src != NULL) && (tc->curr_src->consumed % tc->curr_src->blocklength)))
1063 {
1064 DEBUG_API(printk("cryptocop_setup_dma_list: can only disconnect from or connect to a unit on a multiple of the blocklength, old: cons=%d, prod=%d, block=%d, new: cons=%d prod=%d, block=%d.\n", tc->prev_src ? tc->prev_src->consumed : INT_MIN, tc->prev_src ? tc->prev_src->produced : INT_MIN, tc->prev_src ? tc->prev_src->blocklength : INT_MIN, tc->curr_src ? tc->curr_src->consumed : INT_MIN, tc->curr_src ? tc->curr_src->produced : INT_MIN, tc->curr_src ? tc->curr_src->blocklength : INT_MIN));
1065 failed = -EINVAL;
1066 goto error_cleanup;
1067 }
1068 }
1069 /* Detect unit deactivation. */
1070 if (dcfg->last) {
1071 /* Length check of this is handled below. */
1072 tc->done = 1;
1073 }
1074 dcfg = dcfg->next;
1075 } /* while (dcfg) */
1076 DEBUG(printk("cryptocop_setup_dma_list: parsing operation descriptor configuration complete.\n"));
1077
1078 if (cipher_ctx.active && (cipher_ctx.curr_src != NULL) && !cipher_ctx.curr_src->active){
1079 DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", cipher_ctx.curr_src->unit_no));
1080 failed = -EINVAL;
1081 goto error_cleanup;
1082 }
1083 if (digest_ctx.active && (digest_ctx.curr_src != NULL) && !digest_ctx.curr_src->active){
1084 DEBUG_API(printk("cryptocop_setup_dma_list: digest source from inactive unit %d\n", digest_ctx.curr_src->unit_no));
1085 failed = -EINVAL;
1086 goto error_cleanup;
1087 }
1088 if (csum_ctx.active && (csum_ctx.curr_src != NULL) && !csum_ctx.curr_src->active){
1089 DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", csum_ctx.curr_src->unit_no));
1090 failed = -EINVAL;
1091 goto error_cleanup;
1092 }
1093
1094 /* Update consumed and produced lengths.
1095
1096 The consumed length accounting here is actually cheating. If a unit source from DMA (or any
1097 other unit that process data in blocks of one octet) it is correct, but if it source from a
1098 block processing unit, i.e. a cipher, it will be temporarily incorrect at some times. However
1099 since it is only allowed--by the HW--to change source to or from a block processing unit at times where that
1100 unit has processed an exact multiple of its block length the end result will be correct.
1101 Beware that if the source change restriction change this code will need to be (much) reworked.
1102 */
1103 DEBUG(printk("cryptocop_setup_dma_list: desc->length=%d, desc_len=%d.\n", odsc->length, desc_len));
1104
1105 if (csum_ctx.active) {
1106 csum_ctx.consumed += desc_len;
1107 if (csum_ctx.done) {
1108 csum_ctx.produced = 2;
1109 }
1110 DEBUG(printk("cryptocop_setup_dma_list: csum_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", csum_ctx.consumed, csum_ctx.produced, csum_ctx.blocklength));
1111 }
1112 if (digest_ctx.active) {
1113 digest_ctx.consumed += desc_len;
1114 if (digest_ctx.done) {
1115 if (digest_ctx.unit_no == src_md5) {
1116 digest_ctx.produced = MD5_STATE_LENGTH;
1117 } else {
1118 digest_ctx.produced = SHA1_STATE_LENGTH;
1119 }
1120 }
1121 DEBUG(printk("cryptocop_setup_dma_list: digest_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", digest_ctx.consumed, digest_ctx.produced, digest_ctx.blocklength));
1122 }
1123 if (cipher_ctx.active) {
1124 /* Ciphers are allowed only to source from DMA out. That is filtered above. */
1125 assert(cipher_ctx.current_src == src_dma);
1126 cipher_ctx.consumed += desc_len;
1127 cipher_ctx.produced = cipher_ctx.blocklength * (cipher_ctx.consumed / cipher_ctx.blocklength);
1128 if (cipher_ctx.cbcmode && !(cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV) && cipher_ctx.produced){
1129 cipher_ctx.produced -= cipher_ctx.blocklength; /* Compensate for CBC iv. */
1130 }
1131 DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", cipher_ctx.consumed, cipher_ctx.produced, cipher_ctx.blocklength));
1132 }
1133
1134 /* Setup the DMA out descriptors. */
1135 /* Configure the metadata. */
1136 active_count = 0;
1137 eop_needed_count = 0;
1138 if (cipher_ctx.active) {
1139 ++active_count;
1140 if (cipher_ctx.unit_no == src_dma){
1141 /* mem2mem */
1142 meta_out.ciphsel = src_none;
1143 } else {
1144 meta_out.ciphsel = cipher_ctx.current_src;
1145 }
1146 meta_out.ciphconf = cipher_ctx.ciph_conf;
1147 meta_out.cbcmode = cipher_ctx.cbcmode;
1148 meta_out.decrypt = cipher_ctx.decrypt;
1149 DEBUG(printk("set ciphsel=%d ciphconf=%d cbcmode=%d decrypt=%d\n", meta_out.ciphsel, meta_out.ciphconf, meta_out.cbcmode, meta_out.decrypt));
1150 if (cipher_ctx.done) ++eop_needed_count;
1151 } else {
1152 meta_out.ciphsel = src_none;
1153 }
1154
1155 if (digest_ctx.active) {
1156 ++active_count;
1157 meta_out.hashsel = digest_ctx.current_src;
1158 meta_out.hashconf = digest_ctx.hash_conf;
1159 meta_out.hashmode = 0; /* Explicit mode is not used here. */
1160 DEBUG(printk("set hashsel=%d hashconf=%d hashmode=%d\n", meta_out.hashsel, meta_out.hashconf, meta_out.hashmode));
1161 if (digest_ctx.done) {
1162 assert(digest_ctx.pad_descs == NULL);
1163 failed = create_pad_descriptor(&digest_ctx, &digest_ctx.pad_descs, alloc_flag);
1164 if (failed) {
1165 DEBUG_API(printk("cryptocop_setup_dma_list: failed digest pad creation.\n"));
1166 goto error_cleanup;
1167 }
1168 }
1169 } else {
1170 meta_out.hashsel = src_none;
1171 }
1172
1173 if (csum_ctx.active) {
1174 ++active_count;
1175 meta_out.csumsel = csum_ctx.current_src;
1176 if (csum_ctx.done) {
1177 assert(csum_ctx.pad_descs == NULL);
1178 failed = create_pad_descriptor(&csum_ctx, &csum_ctx.pad_descs, alloc_flag);
1179 if (failed) {
1180 DEBUG_API(printk("cryptocop_setup_dma_list: failed csum pad creation.\n"));
1181 goto error_cleanup;
1182 }
1183 }
1184 } else {
1185 meta_out.csumsel = src_none;
1186 }
1187 DEBUG(printk("cryptocop_setup_dma_list: %d eop needed, %d active units\n", eop_needed_count, active_count));
1188 /* Setup DMA out descriptors for the indata. */
1189 failed = create_output_descriptors(operation, &iniov_ix, &iniov_offset, desc_len, &current_out_cdesc, &meta_out, alloc_flag);
1190 if (failed) {
1191 DEBUG_API(printk("cryptocop_setup_dma_list: create_output_descriptors %d\n", failed));
1192 goto error_cleanup;
1193 }
1194 /* Setup out EOP. If there are active units that are not done here they cannot get an EOP
1195 * so we ust setup a zero length descriptor to DMA to signal EOP only to done units.
1196 * If there is a pad descriptor EOP for the padded unit will be EOPed by it.
1197 */
1198 assert(active_count >= eop_needed_count);
1199 assert((eop_needed_count == 0) || (eop_needed_count == 1));
1200 if (eop_needed_count) {
1201 /* This means that the bulk operation (cipeher/m2m) is terminated. */
1202 if (active_count > 1) {
1203 /* Use zero length EOP descriptor. */
1204 struct cryptocop_dma_desc *ed = alloc_cdesc(alloc_flag);
1205 struct strcop_meta_out ed_mo = {0};
1206 if (!ed) {
1207 DEBUG_API(printk("cryptocop_setup_dma_list: alloc EOP descriptor for cipher\n"));
1208 failed = -ENOMEM;
1209 goto error_cleanup;
1210 }
1211
1212 assert(cipher_ctx.active && cipher_ctx.done);
1213
1214 if (cipher_ctx.unit_no == src_dma){
1215 /* mem2mem */
1216 ed_mo.ciphsel = src_none;
1217 } else {
1218 ed_mo.ciphsel = cipher_ctx.current_src;
1219 }
1220 ed_mo.ciphconf = cipher_ctx.ciph_conf;
1221 ed_mo.cbcmode = cipher_ctx.cbcmode;
1222 ed_mo.decrypt = cipher_ctx.decrypt;
1223
1224 ed->free_buf = NULL;
1225 ed->dma_descr->wait = 1;
1226 ed->dma_descr->out_eop = 1;
1227
1228 ed->dma_descr->buf = (char*)virt_to_phys(&ed); /* Use any valid physical address for zero length descriptor. */
1229 ed->dma_descr->after = ed->dma_descr->buf;
1230 ed->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, ed_mo);
1231 current_out_cdesc->next = ed;
1232 current_out_cdesc = ed;
1233 } else {
1234 /* Set EOP in the current out descriptor since the only active module is
1235 * the one needing the EOP. */
1236
1237 current_out_cdesc->dma_descr->out_eop = 1;
1238 }
1239 }
1240
1241 if (cipher_ctx.done && cipher_ctx.active) cipher_ctx.active = 0;
1242 if (digest_ctx.done && digest_ctx.active) digest_ctx.active = 0;
1243 if (csum_ctx.done && csum_ctx.active) csum_ctx.active = 0;
1244 indata_ix += odsc->length;
1245 odsc = odsc->next;
1246 } /* while (odsc) */ /* Process descriptors. */
1247 DEBUG(printk("cryptocop_setup_dma_list: done parsing operation descriptors\n"));
1248 if (cipher_ctx.tcfg && (cipher_ctx.active || !cipher_ctx.done)){
1249 DEBUG_API(printk("cryptocop_setup_dma_list: cipher operation not terminated.\n"));
1250 failed = -EINVAL;
1251 goto error_cleanup;
1252 }
1253 if (digest_ctx.tcfg && (digest_ctx.active || !digest_ctx.done)){
1254 DEBUG_API(printk("cryptocop_setup_dma_list: digest operation not terminated.\n"));
1255 failed = -EINVAL;
1256 goto error_cleanup;
1257 }
1258 if (csum_ctx.tcfg && (csum_ctx.active || !csum_ctx.done)){
1259 DEBUG_API(printk("cryptocop_setup_dma_list: csum operation not terminated.\n"));
1260 failed = -EINVAL;
1261 goto error_cleanup;
1262 }
1263
1264 failed = append_input_descriptors(operation, &current_in_cdesc, &current_out_cdesc, &cipher_ctx, alloc_flag);
1265 if (failed){
1266 DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1267 goto error_cleanup;
1268 }
1269 failed = append_input_descriptors(operation, &current_in_cdesc, &current_out_cdesc, &digest_ctx, alloc_flag);
1270 if (failed){
1271 DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1272 goto error_cleanup;
1273 }
1274 failed = append_input_descriptors(operation, &current_in_cdesc, &current_out_cdesc, &csum_ctx, alloc_flag);
1275 if (failed){
1276 DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1277 goto error_cleanup;
1278 }
1279
1280 DEBUG(printk("cryptocop_setup_dma_list: int_op=0x%p, *int_op=0x%p\n", int_op, *int_op));
1281 (*int_op)->cdesc_out = out_cdesc_head.next;
1282 (*int_op)->cdesc_in = in_cdesc_head.next;
1283 DEBUG(printk("cryptocop_setup_dma_list: out_cdesc_head=0x%p in_cdesc_head=0x%p\n", (*int_op)->cdesc_out, (*int_op)->cdesc_in));
1284
1285 setup_descr_chain(out_cdesc_head.next);
1286 setup_descr_chain(in_cdesc_head.next);
1287
1288 /* Last but not least: mark the last DMA in descriptor for a INTR and EOL and the the
1289 * last DMA out descriptor for EOL.
1290 */
1291 current_in_cdesc->dma_descr->intr = 1;
1292 current_in_cdesc->dma_descr->eol = 1;
1293 current_out_cdesc->dma_descr->eol = 1;
1294
1295 /* Setup DMA contexts. */
1296 (*int_op)->ctx_out.next = NULL;
1297 (*int_op)->ctx_out.eol = 1;
1298 (*int_op)->ctx_out.intr = 0;
1299 (*int_op)->ctx_out.store_mode = 0;
1300 (*int_op)->ctx_out.en = 0;
1301 (*int_op)->ctx_out.dis = 0;
1302 (*int_op)->ctx_out.md0 = 0;
1303 (*int_op)->ctx_out.md1 = 0;
1304 (*int_op)->ctx_out.md2 = 0;
1305 (*int_op)->ctx_out.md3 = 0;
1306 (*int_op)->ctx_out.md4 = 0;
1307 (*int_op)->ctx_out.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_out->dma_descr);
1308 (*int_op)->ctx_out.saved_data_buf = (*int_op)->cdesc_out->dma_descr->buf; /* Already physical address. */
1309
1310 (*int_op)->ctx_in.next = NULL;
1311 (*int_op)->ctx_in.eol = 1;
1312 (*int_op)->ctx_in.intr = 0;
1313 (*int_op)->ctx_in.store_mode = 0;
1314 (*int_op)->ctx_in.en = 0;
1315 (*int_op)->ctx_in.dis = 0;
1316 (*int_op)->ctx_in.md0 = 0;
1317 (*int_op)->ctx_in.md1 = 0;
1318 (*int_op)->ctx_in.md2 = 0;
1319 (*int_op)->ctx_in.md3 = 0;
1320 (*int_op)->ctx_in.md4 = 0;
1321
1322 (*int_op)->ctx_in.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_in->dma_descr);
1323 (*int_op)->ctx_in.saved_data_buf = (*int_op)->cdesc_in->dma_descr->buf; /* Already physical address. */
1324
1325 DEBUG(printk("cryptocop_setup_dma_list: done\n"));
1326 return 0;
1327
1328error_cleanup:
1329 {
1330 /* Free all allocated resources. */
1331 struct cryptocop_dma_desc *tmp_cdesc;
1332 while (digest_ctx.pad_descs){
1333 tmp_cdesc = digest_ctx.pad_descs->next;
1334 free_cdesc(digest_ctx.pad_descs);
1335 digest_ctx.pad_descs = tmp_cdesc;
1336 }
1337 while (csum_ctx.pad_descs){
1338 tmp_cdesc = csum_ctx.pad_descs->next;
1339 free_cdesc(csum_ctx.pad_descs);
1340 csum_ctx.pad_descs = tmp_cdesc;
1341 }
1342 assert(cipher_ctx.pad_descs == NULL); /* The ciphers are never padded. */
1343
1344 if (*int_op != NULL) delete_internal_operation(*int_op);
1345 }
1346 DEBUG_API(printk("cryptocop_setup_dma_list: done with error %d\n", failed));
1347 return failed;
1348}
1349
1350
1351static void delete_internal_operation(struct cryptocop_int_operation *iop)
1352{
1353 void *ptr = iop->alloc_ptr;
1354 struct cryptocop_dma_desc *cd = iop->cdesc_out;
1355 struct cryptocop_dma_desc *next;
1356
1357 DEBUG(printk("delete_internal_operation: iop=0x%p, alloc_ptr=0x%p\n", iop, ptr));
1358
1359 while (cd) {
1360 next = cd->next;
1361 free_cdesc(cd);
1362 cd = next;
1363 }
1364 cd = iop->cdesc_in;
1365 while (cd) {
1366 next = cd->next;
1367 free_cdesc(cd);
1368 cd = next;
1369 }
1370 kfree(ptr);
1371}
1372
1373#define MD5_MIN_PAD_LENGTH (9)
1374#define MD5_PAD_LENGTH_FIELD_LENGTH (8)
1375
1376static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length)
1377{
1378 size_t padlen = MD5_BLOCK_LENGTH - (hashed_length % MD5_BLOCK_LENGTH);
1379 unsigned char *p;
1380 int i;
1381 unsigned long long int bit_length = hashed_length << 3;
1382
1383 if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH;
1384
1385 p = kmalloc(padlen, alloc_flag);
1386 if (!pad) return -ENOMEM;
1387
1388 *p = 0x80;
1389 memset(p+1, 0, padlen - 1);
1390
1391 DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
1392
1393 i = padlen - MD5_PAD_LENGTH_FIELD_LENGTH;
1394 while (bit_length != 0){
1395 p[i++] = bit_length % 0x100;
1396 bit_length >>= 8;
1397 }
1398
1399 *pad = (char*)p;
1400 *pad_length = padlen;
1401
1402 return 0;
1403}
1404
1405#define SHA1_MIN_PAD_LENGTH (9)
1406#define SHA1_PAD_LENGTH_FIELD_LENGTH (8)
1407
1408static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length)
1409{
1410 size_t padlen = SHA1_BLOCK_LENGTH - (hashed_length % SHA1_BLOCK_LENGTH);
1411 unsigned char *p;
1412 int i;
1413 unsigned long long int bit_length = hashed_length << 3;
1414
1415 if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH;
1416
1417 p = kmalloc(padlen, alloc_flag);
1418 if (!pad) return -ENOMEM;
1419
1420 *p = 0x80;
1421 memset(p+1, 0, padlen - 1);
1422
1423 DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
1424
1425 i = padlen - 1;
1426 while (bit_length != 0){
1427 p[i--] = bit_length % 0x100;
1428 bit_length >>= 8;
1429 }
1430
1431 *pad = (char*)p;
1432 *pad_length = padlen;
1433
1434 return 0;
1435}
1436
1437
1438static int transform_ok(struct cryptocop_transform_init *tinit)
1439{
1440 switch (tinit->alg){
1441 case cryptocop_alg_csum:
1442 switch (tinit->csum_mode){
1443 case cryptocop_csum_le:
1444 case cryptocop_csum_be:
1445 break;
1446 default:
1447 DEBUG_API(printk("transform_ok: Bad mode set for csum transform\n"));
1448 return -EINVAL;
1449 }
1450 case cryptocop_alg_mem2mem:
1451 case cryptocop_alg_md5:
1452 case cryptocop_alg_sha1:
1453 if (tinit->keylen != 0) {
1454 DEBUG_API(printk("transform_ok: non-zero keylength, %d, for a digest/csum algorithm\n", tinit->keylen));
1455 return -EINVAL; /* This check is a bit strict. */
1456 }
1457 break;
1458 case cryptocop_alg_des:
1459 if (tinit->keylen != 64) {
1460 DEBUG_API(printk("transform_ok: keylen %d invalid for DES\n", tinit->keylen));
1461 return -EINVAL;
1462 }
1463 break;
1464 case cryptocop_alg_3des:
1465 if (tinit->keylen != 192) {
1466 DEBUG_API(printk("transform_ok: keylen %d invalid for 3DES\n", tinit->keylen));
1467 return -EINVAL;
1468 }
1469 break;
1470 case cryptocop_alg_aes:
1471 if (tinit->keylen != 128 && tinit->keylen != 192 && tinit->keylen != 256) {
1472 DEBUG_API(printk("transform_ok: keylen %d invalid for AES\n", tinit->keylen));
1473 return -EINVAL;
1474 }
1475 break;
1476 case cryptocop_no_alg:
1477 default:
1478 DEBUG_API(printk("transform_ok: no such algorithm %d\n", tinit->alg));
1479 return -EINVAL;
1480 }
1481
1482 switch (tinit->alg){
1483 case cryptocop_alg_des:
1484 case cryptocop_alg_3des:
1485 case cryptocop_alg_aes:
1486 if (tinit->cipher_mode != cryptocop_cipher_mode_ecb && tinit->cipher_mode != cryptocop_cipher_mode_cbc) return -EINVAL;
1487 default:
1488 break;
1489 }
1490 return 0;
1491}
1492
1493
1494int cryptocop_new_session(cryptocop_session_id *sid, struct cryptocop_transform_init *tinit, int alloc_flag)
1495{
1496 struct cryptocop_session *sess;
1497 struct cryptocop_transform_init *tfrm_in = tinit;
1498 struct cryptocop_transform_init *tmp_in;
1499 int no_tfrms = 0;
1500 int i;
1501 unsigned long int flags;
1502
1503 init_stream_coprocessor(); /* For safety if we are called early */
1504
1505 while (tfrm_in){
1506 int err;
1507 ++no_tfrms;
1508 if ((err = transform_ok(tfrm_in))) {
1509 DEBUG_API(printk("cryptocop_new_session, bad transform\n"));
1510 return err;
1511 }
1512 tfrm_in = tfrm_in->next;
1513 }
1514 if (0 == no_tfrms) {
1515 DEBUG_API(printk("cryptocop_new_session, no transforms specified\n"));
1516 return -EINVAL;
1517 }
1518
1519 sess = kmalloc(sizeof(struct cryptocop_session), alloc_flag);
1520 if (!sess){
1521 DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_session\n"));
1522 return -ENOMEM;
1523 }
1524
1525 sess->tfrm_ctx = kmalloc(no_tfrms * sizeof(struct cryptocop_transform_ctx), alloc_flag);
1526 if (!sess->tfrm_ctx) {
1527 DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_transform_ctx\n"));
1528 kfree(sess);
1529 return -ENOMEM;
1530 }
1531
1532 tfrm_in = tinit;
1533 for (i = 0; i < no_tfrms; i++){
1534 tmp_in = tfrm_in->next;
1535 while (tmp_in){
1536 if (tmp_in->tid == tfrm_in->tid) {
1537 DEBUG_API(printk("cryptocop_new_session, duplicate transform ids\n"));
1538 kfree(sess->tfrm_ctx);
1539 kfree(sess);
1540 return -EINVAL;
1541 }
1542 tmp_in = tmp_in->next;
1543 }
1544 memcpy(&sess->tfrm_ctx[i].init, tfrm_in, sizeof(struct cryptocop_transform_init));
1545 sess->tfrm_ctx[i].dec_key_set = 0;
1546 sess->tfrm_ctx[i].next = &sess->tfrm_ctx[i] + 1;
1547
1548 tfrm_in = tfrm_in->next;
1549 }
1550 sess->tfrm_ctx[i-1].next = NULL;
1551
1552 spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1553 sess->sid = next_sid;
1554 next_sid++;
1555 /* TODO If we are really paranoid we should do duplicate check to handle sid wraparound.
1556 * OTOH 2^64 is a really large number of session. */
1557 if (next_sid == 0) next_sid = 1;
1558
1559 /* Prepend to session list. */
1560 sess->next = cryptocop_sessions;
1561 cryptocop_sessions = sess;
1562 spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1563 *sid = sess->sid;
1564 return 0;
1565}
1566
1567
1568int cryptocop_free_session(cryptocop_session_id sid)
1569{
1570 struct cryptocop_transform_ctx *tc;
1571 struct cryptocop_session *sess = NULL;
1572 struct cryptocop_session *psess = NULL;
1573 unsigned long int flags;
1574 int i;
1575 LIST_HEAD(remove_list);
1576 struct list_head *node, *tmp;
1577 struct cryptocop_prio_job *pj;
1578
1579 DEBUG(printk("cryptocop_free_session: sid=%lld\n", sid));
1580
1581 spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1582 sess = cryptocop_sessions;
1583 while (sess && sess->sid != sid){
1584 psess = sess;
1585 sess = sess->next;
1586 }
1587 if (sess){
1588 if (psess){
1589 psess->next = sess->next;
1590 } else {
1591 cryptocop_sessions = sess->next;
1592 }
1593 }
1594 spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1595
1596 if (!sess) return -EINVAL;
1597
1598 /* Remove queued jobs. */
1599 spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
1600
1601 for (i = 0; i < cryptocop_prio_no_prios; i++){
1602 if (!list_empty(&(cryptocop_job_queues[i].jobs))){
1603 list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) {
1604 pj = list_entry(node, struct cryptocop_prio_job, node);
1605 if (pj->oper->sid == sid) {
1606 list_move_tail(node, &remove_list);
1607 }
1608 }
1609 }
1610 }
1611 spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
1612
1613 list_for_each_safe(node, tmp, &remove_list) {
1614 list_del(node);
1615 pj = list_entry(node, struct cryptocop_prio_job, node);
1616 pj->oper->operation_status = -EAGAIN; /* EAGAIN is not ideal for job/session terminated but it's the best choice I know of. */
1617 DEBUG(printk("cryptocop_free_session: pj=0x%p, pj->oper=0x%p, pj->iop=0x%p\n", pj, pj->oper, pj->iop));
1618 pj->oper->cb(pj->oper, pj->oper->cb_data);
1619 delete_internal_operation(pj->iop);
1620 kfree(pj);
1621 }
1622
1623 tc = sess->tfrm_ctx;
1624 /* Erase keying data. */
1625 while (tc){
1626 DEBUG(printk("cryptocop_free_session: memset keys, tfrm id=%d\n", tc->init.tid));
1627 memset(tc->init.key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH);
1628 memset(tc->dec_key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH);
1629 tc = tc->next;
1630 }
1631 kfree(sess->tfrm_ctx);
1632 kfree(sess);
1633
1634 return 0;
1635}
1636
1637static struct cryptocop_session *get_session(cryptocop_session_id sid)
1638{
1639 struct cryptocop_session *sess;
1640 unsigned long int flags;
1641
1642 spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1643 sess = cryptocop_sessions;
1644 while (sess && (sess->sid != sid)){
1645 sess = sess->next;
1646 }
1647 spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1648
1649 return sess;
1650}
1651
1652static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid)
1653{
1654 struct cryptocop_transform_ctx *tc = sess->tfrm_ctx;
1655
1656 DEBUG(printk("get_transform_ctx, sess=0x%p, tid=%d\n", sess, tid));
1657 assert(sess != NULL);
1658 while (tc && tc->init.tid != tid){
1659 DEBUG(printk("tc=0x%p, tc->next=0x%p\n", tc, tc->next));
1660 tc = tc->next;
1661 }
1662 DEBUG(printk("get_transform_ctx, returning tc=0x%p\n", tc));
1663 return tc;
1664}
1665
1666
1667
1668/* The AES s-transform matrix (s-box). */
1669static const u8 aes_sbox[256] = {
1670 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 171, 118,
1671 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, 156, 164, 114, 192,
1672 183, 253, 147, 38, 54, 63, 247, 204, 52, 165, 229, 241, 113, 216, 49, 21,
1673 4, 199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235, 39, 178, 117,
1674 9, 131, 44, 26, 27, 110, 90, 160, 82, 59, 214, 179, 41, 227, 47, 132,
1675 83, 209, 0, 237, 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207,
1676 208, 239, 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168,
1677 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243, 210,
1678 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, 93, 25, 115,
1679 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184, 20, 222, 94, 11, 219,
1680 224, 50, 58, 10, 73, 6, 36, 92, 194, 211, 172, 98, 145, 149, 228, 121,
1681 231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234, 101, 122, 174, 8,
1682 186, 120, 37, 46, 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138,
1683 112, 62, 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158,
1684 225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223,
1685 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, 187, 22
1686};
1687
1688/* AES has a 32 bit word round constants for each round in the
1689 * key schedule. round_constant[i] is really Rcon[i+1] in FIPS187.
1690 */
1691static u32 round_constant[11] = {
1692 0x01000000, 0x02000000, 0x04000000, 0x08000000,
1693 0x10000000, 0x20000000, 0x40000000, 0x80000000,
1694 0x1B000000, 0x36000000, 0x6C000000
1695};
1696
1697/* Apply the s-box to each of the four occtets in w. */
1698static u32 aes_ks_subword(const u32 w)
1699{
1700 u8 bytes[4];
1701
1702 *(u32*)(&bytes[0]) = w;
1703 bytes[0] = aes_sbox[bytes[0]];
1704 bytes[1] = aes_sbox[bytes[1]];
1705 bytes[2] = aes_sbox[bytes[2]];
1706 bytes[3] = aes_sbox[bytes[3]];
1707 return *(u32*)(&bytes[0]);
1708}
1709
1710/* The encrypt (forward) Rijndael key schedule algorithm pseudo code:
1711 * (Note that AES words are 32 bit long)
1712 *
1713 * KeyExpansion(byte key[4*Nk], word w[Nb*(Nr+1)], Nk){
1714 * word temp
1715 * i = 0
1716 * while (i < Nk) {
1717 * w[i] = word(key[4*i, 4*i + 1, 4*i + 2, 4*i + 3])
1718 * i = i + 1
1719 * }
1720 * i = Nk
1721 *
1722 * while (i < (Nb * (Nr + 1))) {
1723 * temp = w[i - 1]
1724 * if ((i mod Nk) == 0) {
1725 * temp = SubWord(RotWord(temp)) xor Rcon[i/Nk]
1726 * }
1727 * else if ((Nk > 6) && ((i mod Nk) == 4)) {
1728 * temp = SubWord(temp)
1729 * }
1730 * w[i] = w[i - Nk] xor temp
1731 * }
1732 * RotWord(t) does a 8 bit cyclic shift left on a 32 bit word.
1733 * SubWord(t) applies the AES s-box individually to each octet
1734 * in a 32 bit word.
1735 *
1736 * For AES Nk can have the values 4, 6, and 8 (corresponding to
1737 * values for Nr of 10, 12, and 14). Nb is always 4.
1738 *
1739 * To construct w[i], w[i - 1] and w[i - Nk] must be
1740 * available. Consequently we must keep a state of the last Nk words
1741 * to be able to create the last round keys.
1742 */
1743static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned char *key, unsigned int keylength)
1744{
1745 u32 temp;
1746 u32 w_ring[8]; /* nk is max 8, use elements 0..(nk - 1) as a ringbuffer */
1747 u8 w_last_ix;
1748 int i;
1749 u8 nr, nk;
1750
1751 switch (keylength){
1752 case 128:
1753 nk = 4;
1754 nr = 10;
1755 break;
1756 case 192:
1757 nk = 6;
1758 nr = 12;
1759 break;
1760 case 256:
1761 nk = 8;
1762 nr = 14;
1763 break;
1764 default:
1765 panic("stream co-processor: bad aes key length in get_aes_decrypt_key\n");
1766 };
1767
1768 /* Need to do host byte order correction here since key is byte oriented and the
1769 * kx algorithm is word (u32) oriented. */
1770 for (i = 0; i < nk; i+=1) {
1771 w_ring[i] = be32_to_cpu(*(u32*)&key[4*i]);
1772 }
1773
1774 i = (int)nk;
1775 w_last_ix = i - 1;
1776 while (i < (4 * (nr + 2))) {
1777 temp = w_ring[w_last_ix];
1778 if (!(i % nk)) {
1779 /* RotWord(temp) */
1780 temp = (temp << 8) | (temp >> 24);
1781 temp = aes_ks_subword(temp);
1782 temp ^= round_constant[i/nk - 1];
1783 } else if ((nk > 6) && ((i % nk) == 4)) {
1784 temp = aes_ks_subword(temp);
1785 }
1786 w_last_ix = (w_last_ix + 1) % nk; /* This is the same as (i-Nk) mod Nk */
1787 temp ^= w_ring[w_last_ix];
1788 w_ring[w_last_ix] = temp;
1789
1790 /* We need the round keys for round Nr+1 and Nr+2 (round key
1791 * Nr+2 is the round key beyond the last one used when
1792 * encrypting). Rounds are numbered starting from 0, Nr=10
1793 * implies 11 rounds are used in encryption/decryption.
1794 */
1795 if (i >= (4 * nr)) {
1796 /* Need to do host byte order correction here, the key
1797 * is byte oriented. */
1798 *(u32*)dec_key = cpu_to_be32(temp);
1799 dec_key += 4;
1800 }
1801 ++i;
1802 }
1803}
1804
1805
1806/**** Job/operation management. ****/
1807
1808int cryptocop_job_queue_insert_csum(struct cryptocop_operation *operation)
1809{
1810 return cryptocop_job_queue_insert(cryptocop_prio_kernel_csum, operation);
1811}
1812
1813int cryptocop_job_queue_insert_crypto(struct cryptocop_operation *operation)
1814{
1815 return cryptocop_job_queue_insert(cryptocop_prio_kernel, operation);
1816}
1817
1818int cryptocop_job_queue_insert_user_job(struct cryptocop_operation *operation)
1819{
1820 return cryptocop_job_queue_insert(cryptocop_prio_user, operation);
1821}
1822
1823static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation)
1824{
1825 int ret;
1826 struct cryptocop_prio_job *pj = NULL;
1827 unsigned long int flags;
1828
1829 DEBUG(printk("cryptocop_job_queue_insert(%d, 0x%p)\n", prio, operation));
1830
1831 if (!operation || !operation->cb){
1832 DEBUG_API(printk("cryptocop_job_queue_insert oper=0x%p, NULL operation or callback\n", operation));
1833 return -EINVAL;
1834 }
1835
1836 if ((ret = cryptocop_job_setup(&pj, operation)) != 0){
1837 DEBUG_API(printk("cryptocop_job_queue_insert: job setup failed\n"));
1838 return ret;
1839 }
1840 assert(pj != NULL);
1841
1842 spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
1843 list_add_tail(&pj->node, &cryptocop_job_queues[prio].jobs);
1844 spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
1845
1846 /* Make sure a job is running */
1847 cryptocop_start_job();
1848 return 0;
1849}
1850
1851static void cryptocop_do_tasklet(unsigned long unused);
1852DECLARE_TASKLET (cryptocop_tasklet, cryptocop_do_tasklet, 0);
1853
1854static void cryptocop_do_tasklet(unsigned long unused)
1855{
1856 struct list_head *node;
1857 struct cryptocop_prio_job *pj = NULL;
1858 unsigned long flags;
1859
1860 DEBUG(printk("cryptocop_do_tasklet: entering\n"));
1861
1862 do {
1863 spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags);
1864 if (!list_empty(&cryptocop_completed_jobs)){
1865 node = cryptocop_completed_jobs.next;
1866 list_del(node);
1867 pj = list_entry(node, struct cryptocop_prio_job, node);
1868 } else {
1869 pj = NULL;
1870 }
1871 spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags);
1872 if (pj) {
1873 assert(pj->oper != NULL);
1874
1875 /* Notify consumer of operation completeness. */
1876 DEBUG(printk("cryptocop_do_tasklet: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
1877
1878 pj->oper->operation_status = 0; /* Job is completed. */
1879 pj->oper->cb(pj->oper, pj->oper->cb_data);
1880 delete_internal_operation(pj->iop);
1881 kfree(pj);
1882 }
1883 } while (pj != NULL);
1884
1885 DEBUG(printk("cryptocop_do_tasklet: exiting\n"));
1886}
1887
1888static irqreturn_t
1889dma_done_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1890{
1891 struct cryptocop_prio_job *done_job;
1892 reg_dma_rw_ack_intr ack_intr = {
1893 .data = 1,
1894 };
1895
1896 REG_WR (dma, regi_dma9, rw_ack_intr, ack_intr);
1897
1898 DEBUG(printk("cryptocop DMA done\n"));
1899
1900 spin_lock(&running_job_lock);
1901 if (cryptocop_running_job == NULL){
1902 printk("stream co-processor got interrupt when not busy\n");
1903 spin_unlock(&running_job_lock);
1904 return IRQ_HANDLED;
1905 }
1906 done_job = cryptocop_running_job;
1907 cryptocop_running_job = NULL;
1908 spin_unlock(&running_job_lock);
1909
1910 /* Start processing a job. */
1911 if (!spin_trylock(&cryptocop_process_lock)){
1912 DEBUG(printk("cryptocop irq handler, not starting a job\n"));
1913 } else {
1914 cryptocop_start_job();
1915 spin_unlock(&cryptocop_process_lock);
1916 }
1917
1918 done_job->oper->operation_status = 0; /* Job is completed. */
1919 if (done_job->oper->fast_callback){
1920 /* This operation wants callback from interrupt. */
1921 done_job->oper->cb(done_job->oper, done_job->oper->cb_data);
1922 delete_internal_operation(done_job->iop);
1923 kfree(done_job);
1924 } else {
1925 spin_lock(&cryptocop_completed_jobs_lock);
1926 list_add_tail(&(done_job->node), &cryptocop_completed_jobs);
1927 spin_unlock(&cryptocop_completed_jobs_lock);
1928 tasklet_schedule(&cryptocop_tasklet);
1929 }
1930
1931 DEBUG(printk("cryptocop leave irq handler\n"));
1932 return IRQ_HANDLED;
1933}
1934
1935
1936/* Setup interrupts and DMA channels. */
1937static int init_cryptocop(void)
1938{
1939 unsigned long flags;
1940 reg_intr_vect_rw_mask intr_mask;
1941 reg_dma_rw_cfg dma_cfg = {.en = 1};
1942 reg_dma_rw_intr_mask intr_mask_in = {.data = regk_dma_yes}; /* Only want descriptor interrupts from the DMA in channel. */
1943 reg_dma_rw_ack_intr ack_intr = {.data = 1,.in_eop = 1 };
1944 reg_strcop_rw_cfg strcop_cfg = {
1945 .ipend = regk_strcop_little,
1946 .td1 = regk_strcop_e,
1947 .td2 = regk_strcop_d,
1948 .td3 = regk_strcop_e,
1949 .ignore_sync = 0,
1950 .en = 1
1951 };
1952
1953 if (request_irq(DMA9_INTR_VECT, dma_done_interrupt, 0, "stream co-processor DMA", NULL)) panic("request_irq stream co-processor irq dma9");
1954
1955 (void)crisv32_request_dma(8, "strcop", DMA_PANIC_ON_ERROR, 0, dma_strp);
1956 (void)crisv32_request_dma(9, "strcop", DMA_PANIC_ON_ERROR, 0, dma_strp);
1957
1958 local_irq_save(flags);
1959
1960 /* Reset and enable the cryptocop. */
1961 strcop_cfg.en = 0;
1962 REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg);
1963 strcop_cfg.en = 1;
1964 REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg);
1965
1966 /* Enable DMA9 interrupt */
1967 intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
1968 intr_mask.dma9 = 1;
1969 REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
1970
1971 /* Enable DMAs. */
1972 REG_WR(dma, regi_dma9, rw_cfg, dma_cfg); /* input DMA */
1973 REG_WR(dma, regi_dma8, rw_cfg, dma_cfg); /* output DMA */
1974
1975 /* Set up wordsize = 4 for DMAs. */
1976 DMA_WR_CMD (regi_dma8, regk_dma_set_w_size4);
1977 DMA_WR_CMD (regi_dma9, regk_dma_set_w_size4);
1978
1979 /* Enable interrupts. */
1980 REG_WR(dma, regi_dma9, rw_intr_mask, intr_mask_in);
1981
1982 /* Clear intr ack. */
1983 REG_WR(dma, regi_dma9, rw_ack_intr, ack_intr);
1984
1985 local_irq_restore(flags);
1986
1987 return 0;
1988}
1989
1990/* Free used cryptocop hw resources (interrupt and DMA channels). */
1991static void release_cryptocop(void)
1992{
1993 unsigned long flags;
1994 reg_intr_vect_rw_mask intr_mask;
1995 reg_dma_rw_cfg dma_cfg = {.en = 0};
1996 reg_dma_rw_intr_mask intr_mask_in = {0};
1997 reg_dma_rw_ack_intr ack_intr = {.data = 1,.in_eop = 1 };
1998
1999 local_irq_save(flags);
2000
2001 /* Clear intr ack. */
2002 REG_WR(dma, regi_dma9, rw_ack_intr, ack_intr);
2003
2004 /* Disable DMA9 interrupt */
2005 intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
2006 intr_mask.dma9 = 0;
2007 REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
2008
2009 /* Disable DMAs. */
2010 REG_WR(dma, regi_dma9, rw_cfg, dma_cfg); /* input DMA */
2011 REG_WR(dma, regi_dma8, rw_cfg, dma_cfg); /* output DMA */
2012
2013 /* Disable interrupts. */
2014 REG_WR(dma, regi_dma9, rw_intr_mask, intr_mask_in);
2015
2016 local_irq_restore(flags);
2017
2018 free_irq(DMA9_INTR_VECT, NULL);
2019
2020 (void)crisv32_free_dma(8);
2021 (void)crisv32_free_dma(9);
2022}
2023
2024
2025/* Init job queue. */
2026static int cryptocop_job_queue_init(void)
2027{
2028 int i;
2029
2030 INIT_LIST_HEAD(&cryptocop_completed_jobs);
2031
2032 for (i = 0; i < cryptocop_prio_no_prios; i++){
2033 cryptocop_job_queues[i].prio = (cryptocop_queue_priority)i;
2034 INIT_LIST_HEAD(&cryptocop_job_queues[i].jobs);
2035 }
2036 return 0;
2037}
2038
2039
2040static void cryptocop_job_queue_close(void)
2041{
2042 struct list_head *node, *tmp;
2043 struct cryptocop_prio_job *pj = NULL;
2044 unsigned long int process_flags, flags;
2045 int i;
2046
2047 /* FIXME: This is as yet untested code. */
2048
2049 /* Stop strcop from getting an operation to process while we are closing the
2050 module. */
2051 spin_lock_irqsave(&cryptocop_process_lock, process_flags);
2052
2053 /* Empty the job queue. */
2054 spin_lock_irqsave(&cryptocop_process_lock, process_flags);
2055 for (i = 0; i < cryptocop_prio_no_prios; i++){
2056 if (!list_empty(&(cryptocop_job_queues[i].jobs))){
2057 list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) {
2058 pj = list_entry(node, struct cryptocop_prio_job, node);
2059 list_del(node);
2060
2061 /* Call callback to notify consumer of job removal. */
2062 DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2063 pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2064 pj->oper->cb(pj->oper, pj->oper->cb_data);
2065
2066 delete_internal_operation(pj->iop);
2067 kfree(pj);
2068 }
2069 }
2070 }
2071 spin_unlock_irqrestore(&cryptocop_process_lock, process_flags);
2072
2073 /* Remove the running job, if any. */
2074 spin_lock_irqsave(&running_job_lock, flags);
2075 if (cryptocop_running_job){
2076 reg_strcop_rw_cfg rw_cfg;
2077 reg_dma_rw_cfg dma_out_cfg, dma_in_cfg;
2078
2079 /* Stop DMA. */
2080 dma_out_cfg = REG_RD(dma, regi_dma8, rw_cfg);
2081 dma_out_cfg.en = regk_dma_no;
2082 REG_WR(dma, regi_dma8, rw_cfg, dma_out_cfg);
2083
2084 dma_in_cfg = REG_RD(dma, regi_dma9, rw_cfg);
2085 dma_in_cfg.en = regk_dma_no;
2086 REG_WR(dma, regi_dma9, rw_cfg, dma_in_cfg);
2087
2088 /* Disble the cryptocop. */
2089 rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
2090 rw_cfg.en = 0;
2091 REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
2092
2093 pj = cryptocop_running_job;
2094 cryptocop_running_job = NULL;
2095
2096 /* Call callback to notify consumer of job removal. */
2097 DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2098 pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2099 pj->oper->cb(pj->oper, pj->oper->cb_data);
2100
2101 delete_internal_operation(pj->iop);
2102 kfree(pj);
2103 }
2104 spin_unlock_irqrestore(&running_job_lock, flags);
2105
2106 /* Remove completed jobs, if any. */
2107 spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags);
2108
2109 list_for_each_safe(node, tmp, &cryptocop_completed_jobs) {
2110 pj = list_entry(node, struct cryptocop_prio_job, node);
2111 list_del(node);
2112 /* Call callback to notify consumer of job removal. */
2113 DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2114 pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2115 pj->oper->cb(pj->oper, pj->oper->cb_data);
2116
2117 delete_internal_operation(pj->iop);
2118 kfree(pj);
2119 }
2120 spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags);
2121}
2122
2123
2124static void cryptocop_start_job(void)
2125{
2126 int i;
2127 struct cryptocop_prio_job *pj;
2128 unsigned long int flags;
2129 unsigned long int running_job_flags;
2130 reg_strcop_rw_cfg rw_cfg = {.en = 1, .ignore_sync = 0};
2131
2132 DEBUG(printk("cryptocop_start_job: entering\n"));
2133
2134 spin_lock_irqsave(&running_job_lock, running_job_flags);
2135 if (cryptocop_running_job != NULL){
2136 /* Already running. */
2137 DEBUG(printk("cryptocop_start_job: already running, exit\n"));
2138 spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2139 return;
2140 }
2141 spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
2142
2143 /* Check the queues in priority order. */
2144 for (i = cryptocop_prio_kernel_csum; (i < cryptocop_prio_no_prios) && list_empty(&cryptocop_job_queues[i].jobs); i++);
2145 if (i == cryptocop_prio_no_prios) {
2146 spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
2147 spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2148 DEBUG(printk("cryptocop_start_job: no jobs to run\n"));
2149 return; /* No jobs to run */
2150 }
2151 DEBUG(printk("starting job for prio %d\n", i));
2152
2153 /* TODO: Do not starve lower priority jobs. Let in a lower
2154 * prio job for every N-th processed higher prio job or some
2155 * other scheduling policy. This could reasonably be
2156 * tweakable since the optimal balance would depend on the
2157 * type of load on the system. */
2158
2159 /* Pull the DMA lists from the job and start the DMA client. */
2160 pj = list_entry(cryptocop_job_queues[i].jobs.next, struct cryptocop_prio_job, node);
2161 list_del(&pj->node);
2162 spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
2163 cryptocop_running_job = pj;
2164
2165 /* Set config register (3DES and CSUM modes). */
2166 switch (pj->iop->tdes_mode){
2167 case cryptocop_3des_eee:
2168 rw_cfg.td1 = regk_strcop_e;
2169 rw_cfg.td2 = regk_strcop_e;
2170 rw_cfg.td3 = regk_strcop_e;
2171 break;
2172 case cryptocop_3des_eed:
2173 rw_cfg.td1 = regk_strcop_e;
2174 rw_cfg.td2 = regk_strcop_e;
2175 rw_cfg.td3 = regk_strcop_d;
2176 break;
2177 case cryptocop_3des_ede:
2178 rw_cfg.td1 = regk_strcop_e;
2179 rw_cfg.td2 = regk_strcop_d;
2180 rw_cfg.td3 = regk_strcop_e;
2181 break;
2182 case cryptocop_3des_edd:
2183 rw_cfg.td1 = regk_strcop_e;
2184 rw_cfg.td2 = regk_strcop_d;
2185 rw_cfg.td3 = regk_strcop_d;
2186 break;
2187 case cryptocop_3des_dee:
2188 rw_cfg.td1 = regk_strcop_d;
2189 rw_cfg.td2 = regk_strcop_e;
2190 rw_cfg.td3 = regk_strcop_e;
2191 break;
2192 case cryptocop_3des_ded:
2193 rw_cfg.td1 = regk_strcop_d;
2194 rw_cfg.td2 = regk_strcop_e;
2195 rw_cfg.td3 = regk_strcop_d;
2196 break;
2197 case cryptocop_3des_dde:
2198 rw_cfg.td1 = regk_strcop_d;
2199 rw_cfg.td2 = regk_strcop_d;
2200 rw_cfg.td3 = regk_strcop_e;
2201 break;
2202 case cryptocop_3des_ddd:
2203 rw_cfg.td1 = regk_strcop_d;
2204 rw_cfg.td2 = regk_strcop_d;
2205 rw_cfg.td3 = regk_strcop_d;
2206 break;
2207 default:
2208 DEBUG(printk("cryptocop_setup_dma_list: bad 3DES mode\n"));
2209 }
2210 switch (pj->iop->csum_mode){
2211 case cryptocop_csum_le:
2212 rw_cfg.ipend = regk_strcop_little;
2213 break;
2214 case cryptocop_csum_be:
2215 rw_cfg.ipend = regk_strcop_big;
2216 break;
2217 default:
2218 DEBUG(printk("cryptocop_setup_dma_list: bad checksum mode\n"));
2219 }
2220 REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
2221
2222 DEBUG(printk("cryptocop_start_job: starting DMA, new cryptocop_running_job=0x%p\n"
2223 "ctx_in: 0x%p, phys: 0x%p\n"
2224 "ctx_out: 0x%p, phys: 0x%p\n",
2225 pj,
2226 &pj->iop->ctx_in, (char*)virt_to_phys(&pj->iop->ctx_in),
2227 &pj->iop->ctx_out, (char*)virt_to_phys(&pj->iop->ctx_out)));
2228
2229 /* Start input DMA. */
2230 DMA_START_CONTEXT(regi_dma9, virt_to_phys(&pj->iop->ctx_in));
2231
2232 /* Start output DMA. */
2233 DMA_START_CONTEXT(regi_dma8, virt_to_phys(&pj->iop->ctx_out));
2234
2235 spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2236 DEBUG(printk("cryptocop_start_job: exiting\n"));
2237}
2238
2239
2240static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation)
2241{
2242 int err;
2243 int alloc_flag = operation->in_interrupt ? GFP_ATOMIC : GFP_KERNEL;
2244 void *iop_alloc_ptr = NULL;
2245
2246 *pj = kmalloc(sizeof (struct cryptocop_prio_job), alloc_flag);
2247 if (!*pj) return -ENOMEM;
2248
2249 DEBUG(printk("cryptocop_job_setup: operation=0x%p\n", operation));
2250
2251 (*pj)->oper = operation;
2252 DEBUG(printk("cryptocop_job_setup, cb=0x%p cb_data=0x%p\n", (*pj)->oper->cb, (*pj)->oper->cb_data));
2253
2254 if (operation->use_dmalists) {
2255 DEBUG(print_user_dma_lists(&operation->list_op));
2256 if (!operation->list_op.inlist || !operation->list_op.outlist || !operation->list_op.out_data_buf || !operation->list_op.in_data_buf){
2257 DEBUG_API(printk("cryptocop_job_setup: bad indata (use_dmalists)\n"));
2258 kfree(*pj);
2259 return -EINVAL;
2260 }
2261 iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag);
2262 if (!iop_alloc_ptr) {
2263 DEBUG_API(printk("cryptocop_job_setup: kmalloc cryptocop_int_operation\n"));
2264 kfree(*pj);
2265 return -ENOMEM;
2266 }
2267 (*pj)->iop = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out));
2268 DEBUG(memset((*pj)->iop, 0xff, sizeof(struct cryptocop_int_operation)));
2269 (*pj)->iop->alloc_ptr = iop_alloc_ptr;
2270 (*pj)->iop->sid = operation->sid;
2271 (*pj)->iop->cdesc_out = NULL;
2272 (*pj)->iop->cdesc_in = NULL;
2273 (*pj)->iop->tdes_mode = operation->list_op.tdes_mode;
2274 (*pj)->iop->csum_mode = operation->list_op.csum_mode;
2275 (*pj)->iop->ddesc_out = operation->list_op.outlist;
2276 (*pj)->iop->ddesc_in = operation->list_op.inlist;
2277
2278 /* Setup DMA contexts. */
2279 (*pj)->iop->ctx_out.next = NULL;
2280 (*pj)->iop->ctx_out.eol = 1;
2281 (*pj)->iop->ctx_out.saved_data = operation->list_op.outlist;
2282 (*pj)->iop->ctx_out.saved_data_buf = operation->list_op.out_data_buf;
2283
2284 (*pj)->iop->ctx_in.next = NULL;
2285 (*pj)->iop->ctx_in.eol = 1;
2286 (*pj)->iop->ctx_in.saved_data = operation->list_op.inlist;
2287 (*pj)->iop->ctx_in.saved_data_buf = operation->list_op.in_data_buf;
2288 } else {
2289 if ((err = cryptocop_setup_dma_list(operation, &(*pj)->iop, alloc_flag))) {
2290 DEBUG_API(printk("cryptocop_job_setup: cryptocop_setup_dma_list failed %d\n", err));
2291 kfree(*pj);
2292 return err;
2293 }
2294 }
2295 DEBUG(print_dma_descriptors((*pj)->iop));
2296
2297 DEBUG(printk("cryptocop_job_setup, DMA list setup successful\n"));
2298
2299 return 0;
2300}
2301
2302
2303static int cryptocop_open(struct inode *inode, struct file *filp)
2304{
2305 int p = MINOR(inode->i_rdev);
2306
2307 if (p != CRYPTOCOP_MINOR) return -EINVAL;
2308
2309 filp->private_data = NULL;
2310 return 0;
2311}
2312
2313
2314static int cryptocop_release(struct inode *inode, struct file *filp)
2315{
2316 struct cryptocop_private *dev = filp->private_data;
2317 struct cryptocop_private *dev_next;
2318
2319 while (dev){
2320 dev_next = dev->next;
2321 if (dev->sid != CRYPTOCOP_SESSION_ID_NONE) {
2322 (void)cryptocop_free_session(dev->sid);
2323 }
2324 kfree(dev);
2325 dev = dev_next;
2326 }
2327
2328 return 0;
2329}
2330
2331
2332static int cryptocop_ioctl_close_session(struct inode *inode, struct file *filp,
2333 unsigned int cmd, unsigned long arg)
2334{
2335 struct cryptocop_private *dev = filp->private_data;
2336 struct cryptocop_private *prev_dev = NULL;
2337 struct strcop_session_op *sess_op = (struct strcop_session_op *)arg;
2338 struct strcop_session_op sop;
2339 int err;
2340
2341 DEBUG(printk("cryptocop_ioctl_close_session\n"));
2342
2343 if (!access_ok(VERIFY_READ, sess_op, sizeof(struct strcop_session_op)))
2344 return -EFAULT;
2345 err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op));
2346 if (err) return -EFAULT;
2347
2348 while (dev && (dev->sid != sop.ses_id)) {
2349 prev_dev = dev;
2350 dev = dev->next;
2351 }
2352 if (dev){
2353 if (prev_dev){
2354 prev_dev->next = dev->next;
2355 } else {
2356 filp->private_data = dev->next;
2357 }
2358 err = cryptocop_free_session(dev->sid);
2359 if (err) return -EFAULT;
2360 } else {
2361 DEBUG_API(printk("cryptocop_ioctl_close_session: session %lld not found\n", sop.ses_id));
2362 return -EINVAL;
2363 }
2364 return 0;
2365}
2366
2367
2368static void ioctl_process_job_callback(struct cryptocop_operation *op, void*cb_data)
2369{
2370 struct ioctl_job_cb_ctx *jc = (struct ioctl_job_cb_ctx *)cb_data;
2371
2372 DEBUG(printk("ioctl_process_job_callback: op=0x%p, cb_data=0x%p\n", op, cb_data));
2373
2374 jc->processed = 1;
2375 wake_up(&cryptocop_ioc_process_wq);
2376}
2377
2378
2379#define CRYPTOCOP_IOCTL_CIPHER_TID (1)
2380#define CRYPTOCOP_IOCTL_DIGEST_TID (2)
2381#define CRYPTOCOP_IOCTL_CSUM_TID (3)
2382
2383static size_t first_cfg_change_ix(struct strcop_crypto_op *crp_op)
2384{
2385 size_t ch_ix = 0;
2386
2387 if (crp_op->do_cipher) ch_ix = crp_op->cipher_start;
2388 if (crp_op->do_digest && (crp_op->digest_start < ch_ix)) ch_ix = crp_op->digest_start;
2389 if (crp_op->do_csum && (crp_op->csum_start < ch_ix)) ch_ix = crp_op->csum_start;
2390
2391 DEBUG(printk("first_cfg_change_ix: ix=%d\n", ch_ix));
2392 return ch_ix;
2393}
2394
2395
2396static size_t next_cfg_change_ix(struct strcop_crypto_op *crp_op, size_t ix)
2397{
2398 size_t ch_ix = INT_MAX;
2399 size_t tmp_ix = 0;
2400
2401 if (crp_op->do_cipher && ((crp_op->cipher_start + crp_op->cipher_len) > ix)){
2402 if (crp_op->cipher_start > ix) {
2403 ch_ix = crp_op->cipher_start;
2404 } else {
2405 ch_ix = crp_op->cipher_start + crp_op->cipher_len;
2406 }
2407 }
2408 if (crp_op->do_digest && ((crp_op->digest_start + crp_op->digest_len) > ix)){
2409 if (crp_op->digest_start > ix) {
2410 tmp_ix = crp_op->digest_start;
2411 } else {
2412 tmp_ix = crp_op->digest_start + crp_op->digest_len;
2413 }
2414 if (tmp_ix < ch_ix) ch_ix = tmp_ix;
2415 }
2416 if (crp_op->do_csum && ((crp_op->csum_start + crp_op->csum_len) > ix)){
2417 if (crp_op->csum_start > ix) {
2418 tmp_ix = crp_op->csum_start;
2419 } else {
2420 tmp_ix = crp_op->csum_start + crp_op->csum_len;
2421 }
2422 if (tmp_ix < ch_ix) ch_ix = tmp_ix;
2423 }
2424 if (ch_ix == INT_MAX) ch_ix = ix;
2425 DEBUG(printk("next_cfg_change_ix prev ix=%d, next ix=%d\n", ix, ch_ix));
2426 return ch_ix;
2427}
2428
2429
2430/* Map map_length bytes from the pages starting on *pageix and *pageoffset to iovecs starting on *iovix.
2431 * Return -1 for ok, 0 for fail. */
2432static int map_pages_to_iovec(struct iovec *iov, int iovlen, int *iovix, struct page **pages, int nopages, int *pageix, int *pageoffset, int map_length )
2433{
2434 int tmplen;
2435
2436 assert(iov != NULL);
2437 assert(iovix != NULL);
2438 assert(pages != NULL);
2439 assert(pageix != NULL);
2440 assert(pageoffset != NULL);
2441
2442 DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset));
2443
2444 while (map_length > 0){
2445 DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset));
2446 if (*iovix >= iovlen){
2447 DEBUG_API(printk("map_page_to_iovec: *iovix=%d >= iovlen=%d\n", *iovix, iovlen));
2448 return 0;
2449 }
2450 if (*pageix >= nopages){
2451 DEBUG_API(printk("map_page_to_iovec: *pageix=%d >= nopages=%d\n", *pageix, nopages));
2452 return 0;
2453 }
2454 iov[*iovix].iov_base = (unsigned char*)page_address(pages[*pageix]) + *pageoffset;
2455 tmplen = PAGE_SIZE - *pageoffset;
2456 if (tmplen < map_length){
2457 (*pageoffset) = 0;
2458 (*pageix)++;
2459 } else {
2460 tmplen = map_length;
2461 (*pageoffset) += map_length;
2462 }
2463 DEBUG(printk("mapping %d bytes from page %d (or %d) to iovec %d\n", tmplen, *pageix, *pageix-1, *iovix));
2464 iov[*iovix].iov_len = tmplen;
2465 map_length -= tmplen;
2466 (*iovix)++;
2467 }
2468 DEBUG(printk("map_page_to_iovec, exit, *iovix=%d\n", *iovix));
2469 return -1;
2470}
2471
2472
2473
2474static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2475{
2476 int i;
2477 struct cryptocop_private *dev = filp->private_data;
2478 struct strcop_crypto_op *crp_oper = (struct strcop_crypto_op *)arg;
2479 struct strcop_crypto_op oper = {0};
2480 int err = 0;
2481 struct cryptocop_operation *cop = NULL;
2482
2483 struct ioctl_job_cb_ctx *jc = NULL;
2484
2485 struct page **inpages = NULL;
2486 struct page **outpages = NULL;
2487 int noinpages = 0;
2488 int nooutpages = 0;
2489
2490 struct cryptocop_desc descs[5]; /* Max 5 descriptors are needed, there are three transforms that
2491 * can get connected/disconnected on different places in the indata. */
2492 struct cryptocop_desc_cfg dcfgs[5*3];
2493 int desc_ix = 0;
2494 int dcfg_ix = 0;
2495 struct cryptocop_tfrm_cfg ciph_tcfg = {0};
2496 struct cryptocop_tfrm_cfg digest_tcfg = {0};
2497 struct cryptocop_tfrm_cfg csum_tcfg = {0};
2498
2499 unsigned char *digest_result = NULL;
2500 int digest_length = 0;
2501 int cblocklen = 0;
2502 unsigned char csum_result[CSUM_BLOCK_LENGTH];
2503 struct cryptocop_session *sess;
2504
2505 int iovlen = 0;
2506 int iovix = 0;
2507 int pageix = 0;
2508 int pageoffset = 0;
2509
2510 size_t prev_ix = 0;
2511 size_t next_ix;
2512
2513 int cipher_active, digest_active, csum_active;
2514 int end_digest, end_csum;
2515 int digest_done = 0;
2516 int cipher_done = 0;
2517 int csum_done = 0;
2518
2519 DEBUG(printk("cryptocop_ioctl_process\n"));
2520
2521 if (!access_ok(VERIFY_WRITE, crp_oper, sizeof(struct strcop_crypto_op))){
2522 DEBUG_API(printk("cryptocop_ioctl_process: !access_ok crp_oper!\n"));
2523 return -EFAULT;
2524 }
2525 if (copy_from_user(&oper, crp_oper, sizeof(struct strcop_crypto_op))) {
2526 DEBUG_API(printk("cryptocop_ioctl_process: copy_from_user\n"));
2527 return -EFAULT;
2528 }
2529 DEBUG(print_strcop_crypto_op(&oper));
2530
2531 while (dev && dev->sid != oper.ses_id) dev = dev->next;
2532 if (!dev){
2533 DEBUG_API(printk("cryptocop_ioctl_process: session %lld not found\n", oper.ses_id));
2534 return -EINVAL;
2535 }
2536
2537 /* Check buffers. */
2538 if (((oper.indata + oper.inlen) < oper.indata) || ((oper.cipher_outdata + oper.cipher_outlen) < oper.cipher_outdata)){
2539 DEBUG_API(printk("cryptocop_ioctl_process: user buffers wrapped around, bad user!\n"));
2540 return -EINVAL;
2541 }
2542
2543 if (!access_ok(VERIFY_WRITE, oper.cipher_outdata, oper.cipher_outlen)){
2544 DEBUG_API(printk("cryptocop_ioctl_process: !access_ok out data!\n"));
2545 return -EFAULT;
2546 }
2547 if (!access_ok(VERIFY_READ, oper.indata, oper.inlen)){
2548 DEBUG_API(printk("cryptocop_ioctl_process: !access_ok in data!\n"));
2549 return -EFAULT;
2550 }
2551
2552 cop = kmalloc(sizeof(struct cryptocop_operation), GFP_KERNEL);
2553 if (!cop) {
2554 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n"));
2555 return -ENOMEM;
2556 }
2557 jc = kmalloc(sizeof(struct ioctl_job_cb_ctx), GFP_KERNEL);
2558 if (!jc) {
2559 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n"));
2560 err = -ENOMEM;
2561 goto error_cleanup;
2562 }
2563 jc->processed = 0;
2564
2565 cop->cb_data = jc;
2566 cop->cb = ioctl_process_job_callback;
2567 cop->operation_status = 0;
2568 cop->use_dmalists = 0;
2569 cop->in_interrupt = 0;
2570 cop->fast_callback = 0;
2571 cop->tfrm_op.tfrm_cfg = NULL;
2572 cop->tfrm_op.desc = NULL;
2573 cop->tfrm_op.indata = NULL;
2574 cop->tfrm_op.incount = 0;
2575 cop->tfrm_op.inlen = 0;
2576 cop->tfrm_op.outdata = NULL;
2577 cop->tfrm_op.outcount = 0;
2578 cop->tfrm_op.outlen = 0;
2579
2580 sess = get_session(oper.ses_id);
2581 if (!sess){
2582 DEBUG_API(printk("cryptocop_ioctl_process: bad session id.\n"));
2583 kfree(cop);
2584 kfree(jc);
2585 return -EINVAL;
2586 }
2587
2588 if (oper.do_cipher) {
2589 unsigned int cipher_outlen = 0;
2590 struct cryptocop_transform_ctx *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_CIPHER_TID);
2591 if (!tc) {
2592 DEBUG_API(printk("cryptocop_ioctl_process: no cipher transform in session.\n"));
2593 err = -EINVAL;
2594 goto error_cleanup;
2595 }
2596 ciph_tcfg.tid = CRYPTOCOP_IOCTL_CIPHER_TID;
2597 ciph_tcfg.inject_ix = 0;
2598 ciph_tcfg.flags = 0;
2599 if ((oper.cipher_start < 0) || (oper.cipher_len <= 0) || (oper.cipher_start > oper.inlen) || ((oper.cipher_start + oper.cipher_len) > oper.inlen)){
2600 DEBUG_API(printk("cryptocop_ioctl_process: bad cipher length\n"));
2601 kfree(cop);
2602 kfree(jc);
2603 return -EINVAL;
2604 }
2605 cblocklen = tc->init.alg == cryptocop_alg_aes ? AES_BLOCK_LENGTH : DES_BLOCK_LENGTH;
2606 if (oper.cipher_len % cblocklen) {
2607 kfree(cop);
2608 kfree(jc);
2609 DEBUG_API(printk("cryptocop_ioctl_process: cipher inlength not multiple of block length.\n"));
2610 return -EINVAL;
2611 }
2612 cipher_outlen = oper.cipher_len;
2613 if (tc->init.cipher_mode == cryptocop_cipher_mode_cbc){
2614 if (oper.cipher_explicit) {
2615 ciph_tcfg.flags |= CRYPTOCOP_EXPLICIT_IV;
2616 memcpy(ciph_tcfg.iv, oper.cipher_iv, cblocklen);
2617 } else {
2618 cipher_outlen = oper.cipher_len - cblocklen;
2619 }
2620 } else {
2621 if (oper.cipher_explicit){
2622 kfree(cop);
2623 kfree(jc);
2624 DEBUG_API(printk("cryptocop_ioctl_process: explicit_iv when not CBC mode\n"));
2625 return -EINVAL;
2626 }
2627 }
2628 if (oper.cipher_outlen != cipher_outlen) {
2629 kfree(cop);
2630 kfree(jc);
2631 DEBUG_API(printk("cryptocop_ioctl_process: cipher_outlen incorrect, should be %d not %d.\n", cipher_outlen, oper.cipher_outlen));
2632 return -EINVAL;
2633 }
2634
2635 if (oper.decrypt){
2636 ciph_tcfg.flags |= CRYPTOCOP_DECRYPT;
2637 } else {
2638 ciph_tcfg.flags |= CRYPTOCOP_ENCRYPT;
2639 }
2640 ciph_tcfg.next = cop->tfrm_op.tfrm_cfg;
2641 cop->tfrm_op.tfrm_cfg = &ciph_tcfg;
2642 }
2643 if (oper.do_digest){
2644 struct cryptocop_transform_ctx *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_DIGEST_TID);
2645 if (!tc) {
2646 DEBUG_API(printk("cryptocop_ioctl_process: no digest transform in session.\n"));
2647 err = -EINVAL;
2648 goto error_cleanup;
2649 }
2650 digest_length = tc->init.alg == cryptocop_alg_md5 ? 16 : 20;
2651 digest_result = kmalloc(digest_length, GFP_KERNEL);
2652 if (!digest_result) {
2653 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc digest_result\n"));
2654 err = -EINVAL;
2655 goto error_cleanup;
2656 }
2657 DEBUG(memset(digest_result, 0xff, digest_length));
2658
2659 digest_tcfg.tid = CRYPTOCOP_IOCTL_DIGEST_TID;
2660 digest_tcfg.inject_ix = 0;
2661 ciph_tcfg.inject_ix += digest_length;
2662 if ((oper.digest_start < 0) || (oper.digest_len <= 0) || (oper.digest_start > oper.inlen) || ((oper.digest_start + oper.digest_len) > oper.inlen)){
2663 DEBUG_API(printk("cryptocop_ioctl_process: bad digest length\n"));
2664 err = -EINVAL;
2665 goto error_cleanup;
2666 }
2667
2668 digest_tcfg.next = cop->tfrm_op.tfrm_cfg;
2669 cop->tfrm_op.tfrm_cfg = &digest_tcfg;
2670 }
2671 if (oper.do_csum){
2672 csum_tcfg.tid = CRYPTOCOP_IOCTL_CSUM_TID;
2673 csum_tcfg.inject_ix = digest_length;
2674 ciph_tcfg.inject_ix += 2;
2675
2676 if ((oper.csum_start < 0) || (oper.csum_len <= 0) || (oper.csum_start > oper.inlen) || ((oper.csum_start + oper.csum_len) > oper.inlen)){
2677 DEBUG_API(printk("cryptocop_ioctl_process: bad csum length\n"));
2678 kfree(cop);
2679 kfree(jc);
2680 return -EINVAL;
2681 }
2682
2683 csum_tcfg.next = cop->tfrm_op.tfrm_cfg;
2684 cop->tfrm_op.tfrm_cfg = &csum_tcfg;
2685 }
2686
2687 prev_ix = first_cfg_change_ix(&oper);
2688 if (prev_ix > oper.inlen) {
2689 DEBUG_API(printk("cryptocop_ioctl_process: length mismatch\n"));
2690 nooutpages = noinpages = 0;
2691 err = -EINVAL;
2692 goto error_cleanup;
2693 }
2694 DEBUG(printk("cryptocop_ioctl_process: inlen=%d, cipher_outlen=%d\n", oper.inlen, oper.cipher_outlen));
2695
2696 /* Map user pages for in and out data of the operation. */
2697 noinpages = (((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK) + oper.inlen - 1 - prev_ix + ~PAGE_MASK) >> PAGE_SHIFT;
2698 DEBUG(printk("cryptocop_ioctl_process: noinpages=%d\n", noinpages));
2699 inpages = kmalloc(noinpages * sizeof(struct page*), GFP_KERNEL);
2700 if (!inpages){
2701 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc inpages\n"));
2702 nooutpages = noinpages = 0;
2703 err = -ENOMEM;
2704 goto error_cleanup;
2705 }
2706 if (oper.do_cipher){
2707 nooutpages = (((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) + oper.cipher_outlen - 1 + ~PAGE_MASK) >> PAGE_SHIFT;
2708 DEBUG(printk("cryptocop_ioctl_process: nooutpages=%d\n", nooutpages));
2709 outpages = kmalloc(nooutpages * sizeof(struct page*), GFP_KERNEL);
2710 if (!outpages){
2711 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc outpages\n"));
2712 nooutpages = noinpages = 0;
2713 err = -ENOMEM;
2714 goto error_cleanup;
2715 }
2716 }
2717
2718 /* Acquire the mm page semaphore. */
2719 down_read(&current->mm->mmap_sem);
2720
2721 err = get_user_pages(current,
2722 current->mm,
2723 (unsigned long int)(oper.indata + prev_ix),
2724 noinpages,
2725 0, /* read access only for in data */
2726 0, /* no force */
2727 inpages,
2728 NULL);
2729
2730 if (err < 0) {
2731 up_read(&current->mm->mmap_sem);
2732 nooutpages = noinpages = 0;
2733 DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n"));
2734 goto error_cleanup;
2735 }
2736 noinpages = err;
2737 if (oper.do_cipher){
2738 err = get_user_pages(current,
2739 current->mm,
2740 (unsigned long int)oper.cipher_outdata,
2741 nooutpages,
2742 1, /* write access for out data */
2743 0, /* no force */
2744 outpages,
2745 NULL);
2746 up_read(&current->mm->mmap_sem);
2747 if (err < 0) {
2748 nooutpages = 0;
2749 DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n"));
2750 goto error_cleanup;
2751 }
2752 nooutpages = err;
2753 } else {
2754 up_read(&current->mm->mmap_sem);
2755 }
2756
2757 /* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and
2758 * csum output and splits when units are (dis-)connected. */
2759 cop->tfrm_op.indata = kmalloc((noinpages) * sizeof(struct iovec), GFP_KERNEL);
2760 cop->tfrm_op.outdata = kmalloc((6 + nooutpages) * sizeof(struct iovec), GFP_KERNEL);
2761 if (!cop->tfrm_op.indata || !cop->tfrm_op.outdata) {
2762 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc iovecs\n"));
2763 err = -ENOMEM;
2764 goto error_cleanup;
2765 }
2766
2767 cop->tfrm_op.inlen = oper.inlen - prev_ix;
2768 cop->tfrm_op.outlen = 0;
2769 if (oper.do_cipher) cop->tfrm_op.outlen += oper.cipher_outlen;
2770 if (oper.do_digest) cop->tfrm_op.outlen += digest_length;
2771 if (oper.do_csum) cop->tfrm_op.outlen += 2;
2772
2773 /* Setup the in iovecs. */
2774 cop->tfrm_op.incount = noinpages;
2775 if (noinpages > 1){
2776 size_t tmplen = cop->tfrm_op.inlen;
2777
2778 cop->tfrm_op.indata[0].iov_len = PAGE_SIZE - ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2779 cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2780 tmplen -= cop->tfrm_op.indata[0].iov_len;
2781 for (i = 1; i<noinpages; i++){
2782 cop->tfrm_op.indata[i].iov_len = tmplen < PAGE_SIZE ? tmplen : PAGE_SIZE;
2783 cop->tfrm_op.indata[i].iov_base = (unsigned char*)page_address(inpages[i]);
2784 tmplen -= PAGE_SIZE;
2785 }
2786 } else {
2787 cop->tfrm_op.indata[0].iov_len = oper.inlen - prev_ix;
2788 cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2789 }
2790
2791 iovlen = nooutpages + 6;
2792 pageoffset = oper.do_cipher ? ((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) : 0;
2793
2794 next_ix = next_cfg_change_ix(&oper, prev_ix);
2795 if (prev_ix == next_ix){
2796 DEBUG_API(printk("cryptocop_ioctl_process: length configuration broken.\n"));
2797 err = -EINVAL; /* This should be impossible barring bugs. */
2798 goto error_cleanup;
2799 }
2800 while (prev_ix != next_ix){
2801 end_digest = end_csum = cipher_active = digest_active = csum_active = 0;
2802 descs[desc_ix].cfg = NULL;
2803 descs[desc_ix].length = next_ix - prev_ix;
2804
2805 if (oper.do_cipher && (oper.cipher_start < next_ix) && (prev_ix < (oper.cipher_start + oper.cipher_len))) {
2806 dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CIPHER_TID;
2807 dcfgs[dcfg_ix].src = cryptocop_source_dma;
2808 cipher_active = 1;
2809
2810 if (next_ix == (oper.cipher_start + oper.cipher_len)){
2811 cipher_done = 1;
2812 dcfgs[dcfg_ix].last = 1;
2813 } else {
2814 dcfgs[dcfg_ix].last = 0;
2815 }
2816 dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2817 descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2818 ++dcfg_ix;
2819 }
2820 if (oper.do_digest && (oper.digest_start < next_ix) && (prev_ix < (oper.digest_start + oper.digest_len))) {
2821 digest_active = 1;
2822 dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_DIGEST_TID;
2823 dcfgs[dcfg_ix].src = cryptocop_source_dma;
2824 if (next_ix == (oper.digest_start + oper.digest_len)){
2825 assert(!digest_done);
2826 digest_done = 1;
2827 dcfgs[dcfg_ix].last = 1;
2828 } else {
2829 dcfgs[dcfg_ix].last = 0;
2830 }
2831 dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2832 descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2833 ++dcfg_ix;
2834 }
2835 if (oper.do_csum && (oper.csum_start < next_ix) && (prev_ix < (oper.csum_start + oper.csum_len))){
2836 csum_active = 1;
2837 dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CSUM_TID;
2838 dcfgs[dcfg_ix].src = cryptocop_source_dma;
2839 if (next_ix == (oper.csum_start + oper.csum_len)){
2840 csum_done = 1;
2841 dcfgs[dcfg_ix].last = 1;
2842 } else {
2843 dcfgs[dcfg_ix].last = 0;
2844 }
2845 dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2846 descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2847 ++dcfg_ix;
2848 }
2849 if (!descs[desc_ix].cfg){
2850 DEBUG_API(printk("cryptocop_ioctl_process: data segment %d (%d to %d) had no active transforms\n", desc_ix, prev_ix, next_ix));
2851 err = -EINVAL;
2852 goto error_cleanup;
2853 }
2854 descs[desc_ix].next = &(descs[desc_ix]) + 1;
2855 ++desc_ix;
2856 prev_ix = next_ix;
2857 next_ix = next_cfg_change_ix(&oper, prev_ix);
2858 }
2859 if (desc_ix > 0){
2860 descs[desc_ix-1].next = NULL;
2861 } else {
2862 descs[0].next = NULL;
2863 }
2864 if (oper.do_digest) {
2865 DEBUG(printk("cryptocop_ioctl_process: mapping %d byte digest output to iovec %d\n", digest_length, iovix));
2866 /* Add outdata iovec, length == <length of type of digest> */
2867 cop->tfrm_op.outdata[iovix].iov_base = digest_result;
2868 cop->tfrm_op.outdata[iovix].iov_len = digest_length;
2869 ++iovix;
2870 }
2871 if (oper.do_csum) {
2872 /* Add outdata iovec, length == 2, the length of csum. */
2873 DEBUG(printk("cryptocop_ioctl_process: mapping 2 byte csum output to iovec %d\n", iovix));
2874 /* Add outdata iovec, length == <length of type of digest> */
2875 cop->tfrm_op.outdata[iovix].iov_base = csum_result;
2876 cop->tfrm_op.outdata[iovix].iov_len = 2;
2877 ++iovix;
2878 }
2879 if (oper.do_cipher) {
2880 if (!map_pages_to_iovec(cop->tfrm_op.outdata, iovlen, &iovix, outpages, nooutpages, &pageix, &pageoffset, oper.cipher_outlen)){
2881 DEBUG_API(printk("cryptocop_ioctl_process: failed to map pages to iovec.\n"));
2882 err = -ENOSYS; /* This should be impossible barring bugs. */
2883 goto error_cleanup;
2884 }
2885 }
2886 DEBUG(printk("cryptocop_ioctl_process: setting cop->tfrm_op.outcount %d\n", iovix));
2887 cop->tfrm_op.outcount = iovix;
2888 assert(iovix <= (nooutpages + 6));
2889
2890 cop->sid = oper.ses_id;
2891 cop->tfrm_op.desc = &descs[0];
2892
2893 DEBUG(printk("cryptocop_ioctl_process: inserting job, cb_data=0x%p\n", cop->cb_data));
2894
2895 if ((err = cryptocop_job_queue_insert_user_job(cop)) != 0) {
2896 DEBUG_API(printk("cryptocop_ioctl_process: insert job %d\n", err));
2897 err = -EINVAL;
2898 goto error_cleanup;
2899 }
2900
2901 DEBUG(printk("cryptocop_ioctl_process: begin wait for result\n"));
2902
2903 wait_event(cryptocop_ioc_process_wq, (jc->processed != 0));
2904 DEBUG(printk("cryptocop_ioctl_process: end wait for result\n"));
2905 if (!jc->processed){
2906 printk(KERN_WARNING "cryptocop_ioctl_process: job not processed at completion\n");
2907 err = -EIO;
2908 goto error_cleanup;
2909 }
2910
2911 /* Job process done. Cipher output should already be correct in job so no post processing of outdata. */
2912 DEBUG(printk("cryptocop_ioctl_process: operation_status = %d\n", cop->operation_status));
2913 if (cop->operation_status == 0){
2914 if (oper.do_digest){
2915 DEBUG(printk("cryptocop_ioctl_process: copy %d bytes digest to user\n", digest_length));
2916 err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, digest), digest_result, digest_length);
2917 if (0 != err){
2918 DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, digest length %d, err %d\n", digest_length, err));
2919 err = -EFAULT;
2920 goto error_cleanup;
2921 }
2922 }
2923 if (oper.do_csum){
2924 DEBUG(printk("cryptocop_ioctl_process: copy 2 bytes checksum to user\n"));
2925 err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, csum), csum_result, 2);
2926 if (0 != err){
2927 DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, csum, err %d\n", err));
2928 err = -EFAULT;
2929 goto error_cleanup;
2930 }
2931 }
2932 err = 0;
2933 } else {
2934 DEBUG(printk("cryptocop_ioctl_process: returning err = operation_status = %d\n", cop->operation_status));
2935 err = cop->operation_status;
2936 }
2937
2938 error_cleanup:
2939 /* Release page caches. */
2940 for (i = 0; i < noinpages; i++){
2941 put_page(inpages[i]);
2942 }
2943 for (i = 0; i < nooutpages; i++){
2944 int spdl_err;
2945 /* Mark output pages dirty. */
2946 spdl_err = set_page_dirty_lock(outpages[i]);
2947 DEBUG(if (spdl_err)printk("cryptocop_ioctl_process: set_page_dirty_lock returned %d\n", spdl_err));
2948 }
2949 for (i = 0; i < nooutpages; i++){
2950 put_page(outpages[i]);
2951 }
2952
2953 if (digest_result) kfree(digest_result);
2954 if (inpages) kfree(inpages);
2955 if (outpages) kfree(outpages);
2956 if (cop){
2957 if (cop->tfrm_op.indata) kfree(cop->tfrm_op.indata);
2958 if (cop->tfrm_op.outdata) kfree(cop->tfrm_op.outdata);
2959 kfree(cop);
2960 }
2961 if (jc) kfree(jc);
2962
2963 DEBUG(print_lock_status());
2964
2965 return err;
2966}
2967
2968
2969static int cryptocop_ioctl_create_session(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2970{
2971 cryptocop_session_id sid;
2972 int err;
2973 struct cryptocop_private *dev;
2974 struct strcop_session_op *sess_op = (struct strcop_session_op *)arg;
2975 struct strcop_session_op sop;
2976 struct cryptocop_transform_init *tis = NULL;
2977 struct cryptocop_transform_init ti_cipher = {0};
2978 struct cryptocop_transform_init ti_digest = {0};
2979 struct cryptocop_transform_init ti_csum = {0};
2980
2981 if (!access_ok(VERIFY_WRITE, sess_op, sizeof(struct strcop_session_op)))
2982 return -EFAULT;
2983 err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op));
2984 if (err) return -EFAULT;
2985 if (sop.cipher != cryptocop_cipher_none) {
2986 if (!access_ok(VERIFY_READ, sop.key, sop.keylen)) return -EFAULT;
2987 }
2988 DEBUG(printk("cryptocop_ioctl_create_session, sess_op:\n"));
2989
2990 DEBUG(printk("\tcipher:%d\n"
2991 "\tcipher_mode:%d\n"
2992 "\tdigest:%d\n"
2993 "\tcsum:%d\n",
2994 (int)sop.cipher,
2995 (int)sop.cmode,
2996 (int)sop.digest,
2997 (int)sop.csum));
2998
2999 if (sop.cipher != cryptocop_cipher_none){
3000 /* Init the cipher. */
3001 switch (sop.cipher){
3002 case cryptocop_cipher_des:
3003 ti_cipher.alg = cryptocop_alg_des;
3004 break;
3005 case cryptocop_cipher_3des:
3006 ti_cipher.alg = cryptocop_alg_3des;
3007 break;
3008 case cryptocop_cipher_aes:
3009 ti_cipher.alg = cryptocop_alg_aes;
3010 break;
3011 default:
3012 DEBUG_API(printk("create session, bad cipher algorithm %d\n", sop.cipher));
3013 return -EINVAL;
3014 };
3015 DEBUG(printk("setting cipher transform %d\n", ti_cipher.alg));
3016 copy_from_user(ti_cipher.key, sop.key, sop.keylen/8);
3017 ti_cipher.keylen = sop.keylen;
3018 switch (sop.cmode){
3019 case cryptocop_cipher_mode_cbc:
3020 case cryptocop_cipher_mode_ecb:
3021 ti_cipher.cipher_mode = sop.cmode;
3022 break;
3023 default:
3024 DEBUG_API(printk("create session, bad cipher mode %d\n", sop.cmode));
3025 return -EINVAL;
3026 }
3027 DEBUG(printk("cryptocop_ioctl_create_session: setting CBC mode %d\n", ti_cipher.cipher_mode));
3028 switch (sop.des3_mode){
3029 case cryptocop_3des_eee:
3030 case cryptocop_3des_eed:
3031 case cryptocop_3des_ede:
3032 case cryptocop_3des_edd:
3033 case cryptocop_3des_dee:
3034 case cryptocop_3des_ded:
3035 case cryptocop_3des_dde:
3036 case cryptocop_3des_ddd:
3037 ti_cipher.tdes_mode = sop.des3_mode;
3038 break;
3039 default:
3040 DEBUG_API(printk("create session, bad 3DES mode %d\n", sop.des3_mode));
3041 return -EINVAL;
3042 }
3043 ti_cipher.tid = CRYPTOCOP_IOCTL_CIPHER_TID;
3044 ti_cipher.next = tis;
3045 tis = &ti_cipher;
3046 } /* if (sop.cipher != cryptocop_cipher_none) */
3047 if (sop.digest != cryptocop_digest_none){
3048 DEBUG(printk("setting digest transform\n"));
3049 switch (sop.digest){
3050 case cryptocop_digest_md5:
3051 ti_digest.alg = cryptocop_alg_md5;
3052 break;
3053 case cryptocop_digest_sha1:
3054 ti_digest.alg = cryptocop_alg_sha1;
3055 break;
3056 default:
3057 DEBUG_API(printk("create session, bad digest algorithm %d\n", sop.digest));
3058 return -EINVAL;
3059 }
3060 ti_digest.tid = CRYPTOCOP_IOCTL_DIGEST_TID;
3061 ti_digest.next = tis;
3062 tis = &ti_digest;
3063 } /* if (sop.digest != cryptocop_digest_none) */
3064 if (sop.csum != cryptocop_csum_none){
3065 DEBUG(printk("setting csum transform\n"));
3066 switch (sop.csum){
3067 case cryptocop_csum_le:
3068 case cryptocop_csum_be:
3069 ti_csum.csum_mode = sop.csum;
3070 break;
3071 default:
3072 DEBUG_API(printk("create session, bad checksum algorithm %d\n", sop.csum));
3073 return -EINVAL;
3074 }
3075 ti_csum.alg = cryptocop_alg_csum;
3076 ti_csum.tid = CRYPTOCOP_IOCTL_CSUM_TID;
3077 ti_csum.next = tis;
3078 tis = &ti_csum;
3079 } /* (sop.csum != cryptocop_csum_none) */
3080 dev = kmalloc(sizeof(struct cryptocop_private), GFP_KERNEL);
3081 if (!dev){
3082 DEBUG_API(printk("create session, alloc dev\n"));
3083 return -ENOMEM;
3084 }
3085
3086 err = cryptocop_new_session(&sid, tis, GFP_KERNEL);
3087 DEBUG({ if (err) printk("create session, cryptocop_new_session %d\n", err);});
3088
3089 if (err) {
3090 kfree(dev);
3091 return err;
3092 }
3093 sess_op->ses_id = sid;
3094 dev->sid = sid;
3095 dev->next = filp->private_data;
3096 filp->private_data = dev;
3097
3098 return 0;
3099}
3100
3101static int cryptocop_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
3102{
3103 int err = 0;
3104 if (_IOC_TYPE(cmd) != ETRAXCRYPTOCOP_IOCTYPE) {
3105 DEBUG_API(printk("cryptocop_ioctl: wrong type\n"));
3106 return -ENOTTY;
3107 }
3108 if (_IOC_NR(cmd) > CRYPTOCOP_IO_MAXNR){
3109 return -ENOTTY;
3110 }
3111 /* Access check of the argument. Some commands, e.g. create session and process op,
3112 needs additional checks. Those are handled in the command handling functions. */
3113 if (_IOC_DIR(cmd) & _IOC_READ)
3114 err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
3115 else if (_IOC_DIR(cmd) & _IOC_WRITE)
3116 err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
3117 if (err) return -EFAULT;
3118
3119 switch (cmd) {
3120 case CRYPTOCOP_IO_CREATE_SESSION:
3121 return cryptocop_ioctl_create_session(inode, filp, cmd, arg);
3122 case CRYPTOCOP_IO_CLOSE_SESSION:
3123 return cryptocop_ioctl_close_session(inode, filp, cmd, arg);
3124 case CRYPTOCOP_IO_PROCESS_OP:
3125 return cryptocop_ioctl_process(inode, filp, cmd, arg);
3126 default:
3127 DEBUG_API(printk("cryptocop_ioctl: unknown command\n"));
3128 return -ENOTTY;
3129 }
3130 return 0;
3131}
3132
3133
3134#ifdef LDEBUG
3135static void print_dma_descriptors(struct cryptocop_int_operation *iop)
3136{
3137 struct cryptocop_dma_desc *cdesc_out = iop->cdesc_out;
3138 struct cryptocop_dma_desc *cdesc_in = iop->cdesc_in;
3139 int i;
3140
3141 printk("print_dma_descriptors start\n");
3142
3143 printk("iop:\n");
3144 printk("\tsid: 0x%lld\n", iop->sid);
3145
3146 printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
3147 printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
3148 printk("\tddesc_out: 0x%p\n", iop->ddesc_out);
3149 printk("\tddesc_in: 0x%p\n", iop->ddesc_in);
3150
3151 printk("\niop->ctx_out: 0x%p phys: 0x%p\n", &iop->ctx_out, (char*)virt_to_phys(&iop->ctx_out));
3152 printk("\tnext: 0x%p\n"
3153 "\tsaved_data: 0x%p\n"
3154 "\tsaved_data_buf: 0x%p\n",
3155 iop->ctx_out.next,
3156 iop->ctx_out.saved_data,
3157 iop->ctx_out.saved_data_buf);
3158
3159 printk("\niop->ctx_in: 0x%p phys: 0x%p\n", &iop->ctx_in, (char*)virt_to_phys(&iop->ctx_in));
3160 printk("\tnext: 0x%p\n"
3161 "\tsaved_data: 0x%p\n"
3162 "\tsaved_data_buf: 0x%p\n",
3163 iop->ctx_in.next,
3164 iop->ctx_in.saved_data,
3165 iop->ctx_in.saved_data_buf);
3166
3167 i = 0;
3168 while (cdesc_out) {
3169 dma_descr_data *td;
3170 printk("cdesc_out %d, desc=0x%p\n", i, cdesc_out->dma_descr);
3171 printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_out->dma_descr));
3172 td = cdesc_out->dma_descr;
3173 printk("\n\tbuf: 0x%p\n"
3174 "\tafter: 0x%p\n"
3175 "\tmd: 0x%04x\n"
3176 "\tnext: 0x%p\n",
3177 td->buf,
3178 td->after,
3179 td->md,
3180 td->next);
3181 printk("flags:\n"
3182 "\twait:\t%d\n"
3183 "\teol:\t%d\n"
3184 "\touteop:\t%d\n"
3185 "\tineop:\t%d\n"
3186 "\tintr:\t%d\n",
3187 td->wait,
3188 td->eol,
3189 td->out_eop,
3190 td->in_eop,
3191 td->intr);
3192 cdesc_out = cdesc_out->next;
3193 i++;
3194 }
3195 i = 0;
3196 while (cdesc_in) {
3197 dma_descr_data *td;
3198 printk("cdesc_in %d, desc=0x%p\n", i, cdesc_in->dma_descr);
3199 printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_in->dma_descr));
3200 td = cdesc_in->dma_descr;
3201 printk("\n\tbuf: 0x%p\n"
3202 "\tafter: 0x%p\n"
3203 "\tmd: 0x%04x\n"
3204 "\tnext: 0x%p\n",
3205 td->buf,
3206 td->after,
3207 td->md,
3208 td->next);
3209 printk("flags:\n"
3210 "\twait:\t%d\n"
3211 "\teol:\t%d\n"
3212 "\touteop:\t%d\n"
3213 "\tineop:\t%d\n"
3214 "\tintr:\t%d\n",
3215 td->wait,
3216 td->eol,
3217 td->out_eop,
3218 td->in_eop,
3219 td->intr);
3220 cdesc_in = cdesc_in->next;
3221 i++;
3222 }
3223
3224 printk("print_dma_descriptors end\n");
3225}
3226
3227
3228static void print_strcop_crypto_op(struct strcop_crypto_op *cop)
3229{
3230 printk("print_strcop_crypto_op, 0x%p\n", cop);
3231
3232 /* Indata. */
3233 printk("indata=0x%p\n"
3234 "inlen=%d\n"
3235 "do_cipher=%d\n"
3236 "decrypt=%d\n"
3237 "cipher_explicit=%d\n"
3238 "cipher_start=%d\n"
3239 "cipher_len=%d\n"
3240 "outdata=0x%p\n"
3241 "outlen=%d\n",
3242 cop->indata,
3243 cop->inlen,
3244 cop->do_cipher,
3245 cop->decrypt,
3246 cop->cipher_explicit,
3247 cop->cipher_start,
3248 cop->cipher_len,
3249 cop->cipher_outdata,
3250 cop->cipher_outlen);
3251
3252 printk("do_digest=%d\n"
3253 "digest_start=%d\n"
3254 "digest_len=%d\n",
3255 cop->do_digest,
3256 cop->digest_start,
3257 cop->digest_len);
3258
3259 printk("do_csum=%d\n"
3260 "csum_start=%d\n"
3261 "csum_len=%d\n",
3262 cop->do_csum,
3263 cop->csum_start,
3264 cop->csum_len);
3265}
3266
3267static void print_cryptocop_operation(struct cryptocop_operation *cop)
3268{
3269 struct cryptocop_desc *d;
3270 struct cryptocop_tfrm_cfg *tc;
3271 struct cryptocop_desc_cfg *dc;
3272 int i;
3273
3274 printk("print_cryptocop_operation, cop=0x%p\n\n", cop);
3275 printk("sid: %lld\n", cop->sid);
3276 printk("operation_status=%d\n"
3277 "use_dmalists=%d\n"
3278 "in_interrupt=%d\n"
3279 "fast_callback=%d\n",
3280 cop->operation_status,
3281 cop->use_dmalists,
3282 cop->in_interrupt,
3283 cop->fast_callback);
3284
3285 if (cop->use_dmalists){
3286 print_user_dma_lists(&cop->list_op);
3287 } else {
3288 printk("cop->tfrm_op\n"
3289 "tfrm_cfg=0x%p\n"
3290 "desc=0x%p\n"
3291 "indata=0x%p\n"
3292 "incount=%d\n"
3293 "inlen=%d\n"
3294 "outdata=0x%p\n"
3295 "outcount=%d\n"
3296 "outlen=%d\n\n",
3297 cop->tfrm_op.tfrm_cfg,
3298 cop->tfrm_op.desc,
3299 cop->tfrm_op.indata,
3300 cop->tfrm_op.incount,
3301 cop->tfrm_op.inlen,
3302 cop->tfrm_op.outdata,
3303 cop->tfrm_op.outcount,
3304 cop->tfrm_op.outlen);
3305
3306 tc = cop->tfrm_op.tfrm_cfg;
3307 while (tc){
3308 printk("tfrm_cfg, 0x%p\n"
3309 "tid=%d\n"
3310 "flags=%d\n"
3311 "inject_ix=%d\n"
3312 "next=0x%p\n",
3313 tc,
3314 tc->tid,
3315 tc->flags,
3316 tc->inject_ix,
3317 tc->next);
3318 tc = tc->next;
3319 }
3320 d = cop->tfrm_op.desc;
3321 while (d){
3322 printk("\n======================desc, 0x%p\n"
3323 "length=%d\n"
3324 "cfg=0x%p\n"
3325 "next=0x%p\n",
3326 d,
3327 d->length,
3328 d->cfg,
3329 d->next);
3330 dc = d->cfg;
3331 while (dc){
3332 printk("=========desc_cfg, 0x%p\n"
3333 "tid=%d\n"
3334 "src=%d\n"
3335 "last=%d\n"
3336 "next=0x%p\n",
3337 dc,
3338 dc->tid,
3339 dc->src,
3340 dc->last,
3341 dc->next);
3342 dc = dc->next;
3343 }
3344 d = d->next;
3345 }
3346 printk("\n====iniov\n");
3347 for (i = 0; i < cop->tfrm_op.incount; i++){
3348 printk("indata[%d]\n"
3349 "base=0x%p\n"
3350 "len=%d\n",
3351 i,
3352 cop->tfrm_op.indata[i].iov_base,
3353 cop->tfrm_op.indata[i].iov_len);
3354 }
3355 printk("\n====outiov\n");
3356 for (i = 0; i < cop->tfrm_op.outcount; i++){
3357 printk("outdata[%d]\n"
3358 "base=0x%p\n"
3359 "len=%d\n",
3360 i,
3361 cop->tfrm_op.outdata[i].iov_base,
3362 cop->tfrm_op.outdata[i].iov_len);
3363 }
3364 }
3365 printk("------------end print_cryptocop_operation\n");
3366}
3367
3368
3369static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op)
3370{
3371 dma_descr_data *dd;
3372 int i;
3373
3374 printk("print_user_dma_lists, dma_op=0x%p\n", dma_op);
3375
3376 printk("out_data_buf = 0x%p, phys_to_virt(out_data_buf) = 0x%p\n", dma_op->out_data_buf, phys_to_virt((unsigned long int)dma_op->out_data_buf));
3377 printk("in_data_buf = 0x%p, phys_to_virt(in_data_buf) = 0x%p\n", dma_op->in_data_buf, phys_to_virt((unsigned long int)dma_op->in_data_buf));
3378
3379 printk("##############outlist\n");
3380 dd = phys_to_virt((unsigned long int)dma_op->outlist);
3381 i = 0;
3382 while (dd != NULL) {
3383 printk("#%d phys_to_virt(desc) 0x%p\n", i, dd);
3384 printk("\n\tbuf: 0x%p\n"
3385 "\tafter: 0x%p\n"
3386 "\tmd: 0x%04x\n"
3387 "\tnext: 0x%p\n",
3388 dd->buf,
3389 dd->after,
3390 dd->md,
3391 dd->next);
3392 printk("flags:\n"
3393 "\twait:\t%d\n"
3394 "\teol:\t%d\n"
3395 "\touteop:\t%d\n"
3396 "\tineop:\t%d\n"
3397 "\tintr:\t%d\n",
3398 dd->wait,
3399 dd->eol,
3400 dd->out_eop,
3401 dd->in_eop,
3402 dd->intr);
3403 if (dd->eol)
3404 dd = NULL;
3405 else
3406 dd = phys_to_virt((unsigned long int)dd->next);
3407 ++i;
3408 }
3409
3410 printk("##############inlist\n");
3411 dd = phys_to_virt((unsigned long int)dma_op->inlist);
3412 i = 0;
3413 while (dd != NULL) {
3414 printk("#%d phys_to_virt(desc) 0x%p\n", i, dd);
3415 printk("\n\tbuf: 0x%p\n"
3416 "\tafter: 0x%p\n"
3417 "\tmd: 0x%04x\n"
3418 "\tnext: 0x%p\n",
3419 dd->buf,
3420 dd->after,
3421 dd->md,
3422 dd->next);
3423 printk("flags:\n"
3424 "\twait:\t%d\n"
3425 "\teol:\t%d\n"
3426 "\touteop:\t%d\n"
3427 "\tineop:\t%d\n"
3428 "\tintr:\t%d\n",
3429 dd->wait,
3430 dd->eol,
3431 dd->out_eop,
3432 dd->in_eop,
3433 dd->intr);
3434 if (dd->eol)
3435 dd = NULL;
3436 else
3437 dd = phys_to_virt((unsigned long int)dd->next);
3438 ++i;
3439 }
3440}
3441
3442
3443static void print_lock_status(void)
3444{
3445 printk("**********************print_lock_status\n");
3446 printk("cryptocop_completed_jobs_lock %d\n", spin_is_locked(&cryptocop_completed_jobs_lock));
3447 printk("cryptocop_job_queue_lock %d\n", spin_is_locked(&cryptocop_job_queue_lock));
3448 printk("descr_pool_lock %d\n", spin_is_locked(&descr_pool_lock));
3449 printk("cryptocop_sessions_lock %d\n", spin_is_locked(cryptocop_sessions_lock));
3450 printk("running_job_lock %d\n", spin_is_locked(running_job_lock));
3451 printk("cryptocop_process_lock %d\n", spin_is_locked(cryptocop_process_lock));
3452}
3453#endif /* LDEBUG */
3454
3455
3456static const char cryptocop_name[] = "ETRAX FS stream co-processor";
3457
3458static int init_stream_coprocessor(void)
3459{
3460 int err;
3461 int i;
3462 static int initialized = 0;
3463
3464 if (initialized)
3465 return 0;
3466
3467 initialized = 1;
3468
3469 printk("ETRAX FS stream co-processor driver v0.01, (c) 2003 Axis Communications AB\n");
3470
3471 err = register_chrdev(CRYPTOCOP_MAJOR, cryptocop_name, &cryptocop_fops);
3472 if (err < 0) {
3473 printk(KERN_ERR "stream co-processor: could not get major number.\n");
3474 return err;
3475 }
3476
3477 err = init_cryptocop();
3478 if (err) {
3479 (void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name);
3480 return err;
3481 }
3482 err = cryptocop_job_queue_init();
3483 if (err) {
3484 release_cryptocop();
3485 (void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name);
3486 return err;
3487 }
3488 /* Init the descriptor pool. */
3489 for (i = 0; i < CRYPTOCOP_DESCRIPTOR_POOL_SIZE - 1; i++) {
3490 descr_pool[i].from_pool = 1;
3491 descr_pool[i].next = &descr_pool[i + 1];
3492 }
3493 descr_pool[i].from_pool = 1;
3494 descr_pool[i].next = NULL;
3495 descr_pool_free_list = &descr_pool[0];
3496 descr_pool_no_free = CRYPTOCOP_DESCRIPTOR_POOL_SIZE;
3497
3498 spin_lock_init(&cryptocop_completed_jobs_lock);
3499 spin_lock_init(&cryptocop_job_queue_lock);
3500 spin_lock_init(&descr_pool_lock);
3501 spin_lock_init(&cryptocop_sessions_lock);
3502 spin_lock_init(&running_job_lock);
3503 spin_lock_init(&cryptocop_process_lock);
3504
3505 cryptocop_sessions = NULL;
3506 next_sid = 1;
3507
3508 cryptocop_running_job = NULL;
3509
3510 printk("stream co-processor: init done.\n");
3511 return 0;
3512}
3513
3514static void __exit exit_stream_coprocessor(void)
3515{
3516 release_cryptocop();
3517 cryptocop_job_queue_close();
3518}
3519
3520module_init(init_stream_coprocessor);
3521module_exit(exit_stream_coprocessor);
3522
diff --git a/arch/cris/arch-v32/drivers/gpio.c b/arch/cris/arch-v32/drivers/gpio.c
new file mode 100644
index 000000000000..a551237dcb5e
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/gpio.c
@@ -0,0 +1,766 @@
1/* $Id: gpio.c,v 1.16 2005/06/19 17:06:49 starvik Exp $
2 *
3 * ETRAX CRISv32 general port I/O device
4 *
5 * Copyright (c) 1999, 2000, 2001, 2002, 2003 Axis Communications AB
6 *
7 * Authors: Bjorn Wesen (initial version)
8 * Ola Knutsson (LED handling)
9 * Johan Adolfsson (read/set directions, write, port G,
10 * port to ETRAX FS.
11 *
12 * $Log: gpio.c,v $
13 * Revision 1.16 2005/06/19 17:06:49 starvik
14 * Merge of Linux 2.6.12.
15 *
16 * Revision 1.15 2005/05/25 08:22:20 starvik
17 * Changed GPIO port order to fit packages/devices/axis-2.4.
18 *
19 * Revision 1.14 2005/04/24 18:35:08 starvik
20 * Updated with final register headers.
21 *
22 * Revision 1.13 2005/03/15 15:43:00 starvik
23 * dev_id needs to be supplied for shared IRQs.
24 *
25 * Revision 1.12 2005/03/10 17:12:00 starvik
26 * Protect alarm list with spinlock.
27 *
28 * Revision 1.11 2005/01/05 06:08:59 starvik
29 * No need to do local_irq_disable after local_irq_save.
30 *
31 * Revision 1.10 2004/11/19 08:38:31 starvik
32 * Removed old crap.
33 *
34 * Revision 1.9 2004/05/14 07:58:02 starvik
35 * Merge of changes from 2.4
36 *
37 * Revision 1.8 2003/09/11 07:29:50 starvik
38 * Merge of Linux 2.6.0-test5
39 *
40 * Revision 1.7 2003/07/10 13:25:46 starvik
41 * Compiles for 2.5.74
42 * Lindented ethernet.c
43 *
44 * Revision 1.6 2003/07/04 08:27:46 starvik
45 * Merge of Linux 2.5.74
46 *
47 * Revision 1.5 2003/06/10 08:26:37 johana
48 * Etrax -> ETRAX CRISv32
49 *
50 * Revision 1.4 2003/06/05 14:22:48 johana
51 * Initialise some_alarms.
52 *
53 * Revision 1.3 2003/06/05 10:15:46 johana
54 * New INTR_VECT macros.
55 * Enable interrupts in global config.
56 *
57 * Revision 1.2 2003/06/03 15:52:50 johana
58 * Initial CRIS v32 version.
59 *
60 * Revision 1.1 2003/06/03 08:53:15 johana
61 * Copy of os/lx25/arch/cris/arch-v10/drivers/gpio.c version 1.7.
62 *
63 */
64
65#include <linux/config.h>
66
67#include <linux/module.h>
68#include <linux/sched.h>
69#include <linux/slab.h>
70#include <linux/ioport.h>
71#include <linux/errno.h>
72#include <linux/kernel.h>
73#include <linux/fs.h>
74#include <linux/string.h>
75#include <linux/poll.h>
76#include <linux/init.h>
77#include <linux/interrupt.h>
78#include <linux/spinlock.h>
79
80#include <asm/etraxgpio.h>
81#include <asm/arch/hwregs/reg_map.h>
82#include <asm/arch/hwregs/reg_rdwr.h>
83#include <asm/arch/hwregs/gio_defs.h>
84#include <asm/arch/hwregs/intr_vect_defs.h>
85#include <asm/io.h>
86#include <asm/system.h>
87#include <asm/irq.h>
88
89/* The following gio ports on ETRAX FS is available:
90 * pa 8 bits, supports interrupts off, hi, low, set, posedge, negedge anyedge
91 * pb 18 bits
92 * pc 18 bits
93 * pd 18 bits
94 * pe 18 bits
95 * each port has a rw_px_dout, r_px_din and rw_px_oe register.
96 */
97
98#define GPIO_MAJOR 120 /* experimental MAJOR number */
99
100#define D(x)
101
102#if 0
103static int dp_cnt;
104#define DP(x) do { dp_cnt++; if (dp_cnt % 1000 == 0) x; }while(0)
105#else
106#define DP(x)
107#endif
108
109static char gpio_name[] = "etrax gpio";
110
111#if 0
112static wait_queue_head_t *gpio_wq;
113#endif
114
115static int gpio_ioctl(struct inode *inode, struct file *file,
116 unsigned int cmd, unsigned long arg);
117static ssize_t gpio_write(struct file * file, const char * buf, size_t count,
118 loff_t *off);
119static int gpio_open(struct inode *inode, struct file *filp);
120static int gpio_release(struct inode *inode, struct file *filp);
121static unsigned int gpio_poll(struct file *filp, struct poll_table_struct *wait);
122
123/* private data per open() of this driver */
124
125struct gpio_private {
126 struct gpio_private *next;
127 /* The IO_CFG_WRITE_MODE_VALUE only support 8 bits: */
128 unsigned char clk_mask;
129 unsigned char data_mask;
130 unsigned char write_msb;
131 unsigned char pad1;
132 /* These fields are generic */
133 unsigned long highalarm, lowalarm;
134 wait_queue_head_t alarm_wq;
135 int minor;
136};
137
138/* linked list of alarms to check for */
139
140static struct gpio_private *alarmlist = 0;
141
142static int gpio_some_alarms = 0; /* Set if someone uses alarm */
143static unsigned long gpio_pa_high_alarms = 0;
144static unsigned long gpio_pa_low_alarms = 0;
145
146static DEFINE_SPINLOCK(alarm_lock);
147
148#define NUM_PORTS (GPIO_MINOR_LAST+1)
149#define GIO_REG_RD_ADDR(reg) (volatile unsigned long*) (regi_gio + REG_RD_ADDR_gio_##reg )
150#define GIO_REG_WR_ADDR(reg) (volatile unsigned long*) (regi_gio + REG_RD_ADDR_gio_##reg )
151unsigned long led_dummy;
152
153static volatile unsigned long *data_out[NUM_PORTS] = {
154 GIO_REG_WR_ADDR(rw_pa_dout),
155 GIO_REG_WR_ADDR(rw_pb_dout),
156 &led_dummy,
157 GIO_REG_WR_ADDR(rw_pc_dout),
158 GIO_REG_WR_ADDR(rw_pd_dout),
159 GIO_REG_WR_ADDR(rw_pe_dout),
160};
161
162static volatile unsigned long *data_in[NUM_PORTS] = {
163 GIO_REG_RD_ADDR(r_pa_din),
164 GIO_REG_RD_ADDR(r_pb_din),
165 &led_dummy,
166 GIO_REG_RD_ADDR(r_pc_din),
167 GIO_REG_RD_ADDR(r_pd_din),
168 GIO_REG_RD_ADDR(r_pe_din),
169};
170
171static unsigned long changeable_dir[NUM_PORTS] = {
172 CONFIG_ETRAX_PA_CHANGEABLE_DIR,
173 CONFIG_ETRAX_PB_CHANGEABLE_DIR,
174 0,
175 CONFIG_ETRAX_PC_CHANGEABLE_DIR,
176 CONFIG_ETRAX_PD_CHANGEABLE_DIR,
177 CONFIG_ETRAX_PE_CHANGEABLE_DIR,
178};
179
180static unsigned long changeable_bits[NUM_PORTS] = {
181 CONFIG_ETRAX_PA_CHANGEABLE_BITS,
182 CONFIG_ETRAX_PB_CHANGEABLE_BITS,
183 0,
184 CONFIG_ETRAX_PC_CHANGEABLE_BITS,
185 CONFIG_ETRAX_PD_CHANGEABLE_BITS,
186 CONFIG_ETRAX_PE_CHANGEABLE_BITS,
187};
188
189static volatile unsigned long *dir_oe[NUM_PORTS] = {
190 GIO_REG_WR_ADDR(rw_pa_oe),
191 GIO_REG_WR_ADDR(rw_pb_oe),
192 &led_dummy,
193 GIO_REG_WR_ADDR(rw_pc_oe),
194 GIO_REG_WR_ADDR(rw_pd_oe),
195 GIO_REG_WR_ADDR(rw_pe_oe),
196};
197
198
199
200static unsigned int
201gpio_poll(struct file *file,
202 poll_table *wait)
203{
204 unsigned int mask = 0;
205 struct gpio_private *priv = (struct gpio_private *)file->private_data;
206 unsigned long data;
207 poll_wait(file, &priv->alarm_wq, wait);
208 if (priv->minor == GPIO_MINOR_A) {
209 reg_gio_rw_intr_cfg intr_cfg;
210 unsigned long tmp;
211 unsigned long flags;
212
213 local_irq_save(flags);
214 data = REG_TYPE_CONV(unsigned long, reg_gio_r_pa_din, REG_RD(gio, regi_gio, r_pa_din));
215 /* PA has support for interrupt
216 * lets activate high for those low and with highalarm set
217 */
218 intr_cfg = REG_RD(gio, regi_gio, rw_intr_cfg);
219
220 tmp = ~data & priv->highalarm & 0xFF;
221 if (tmp & (1 << 0)) {
222 intr_cfg.pa0 = regk_gio_hi;
223 }
224 if (tmp & (1 << 1)) {
225 intr_cfg.pa1 = regk_gio_hi;
226 }
227 if (tmp & (1 << 2)) {
228 intr_cfg.pa2 = regk_gio_hi;
229 }
230 if (tmp & (1 << 3)) {
231 intr_cfg.pa3 = regk_gio_hi;
232 }
233 if (tmp & (1 << 4)) {
234 intr_cfg.pa4 = regk_gio_hi;
235 }
236 if (tmp & (1 << 5)) {
237 intr_cfg.pa5 = regk_gio_hi;
238 }
239 if (tmp & (1 << 6)) {
240 intr_cfg.pa6 = regk_gio_hi;
241 }
242 if (tmp & (1 << 7)) {
243 intr_cfg.pa7 = regk_gio_hi;
244 }
245 /*
246 * lets activate low for those high and with lowalarm set
247 */
248 tmp = data & priv->lowalarm & 0xFF;
249 if (tmp & (1 << 0)) {
250 intr_cfg.pa0 = regk_gio_lo;
251 }
252 if (tmp & (1 << 1)) {
253 intr_cfg.pa1 = regk_gio_lo;
254 }
255 if (tmp & (1 << 2)) {
256 intr_cfg.pa2 = regk_gio_lo;
257 }
258 if (tmp & (1 << 3)) {
259 intr_cfg.pa3 = regk_gio_lo;
260 }
261 if (tmp & (1 << 4)) {
262 intr_cfg.pa4 = regk_gio_lo;
263 }
264 if (tmp & (1 << 5)) {
265 intr_cfg.pa5 = regk_gio_lo;
266 }
267 if (tmp & (1 << 6)) {
268 intr_cfg.pa6 = regk_gio_lo;
269 }
270 if (tmp & (1 << 7)) {
271 intr_cfg.pa7 = regk_gio_lo;
272 }
273
274 REG_WR(gio, regi_gio, rw_intr_cfg, intr_cfg);
275 local_irq_restore(flags);
276 } else if (priv->minor <= GPIO_MINOR_E)
277 data = *data_in[priv->minor];
278 else
279 return 0;
280
281 if ((data & priv->highalarm) ||
282 (~data & priv->lowalarm)) {
283 mask = POLLIN|POLLRDNORM;
284 }
285
286 DP(printk("gpio_poll ready: mask 0x%08X\n", mask));
287 return mask;
288}
289
290int etrax_gpio_wake_up_check(void)
291{
292 struct gpio_private *priv = alarmlist;
293 unsigned long data = 0;
294 int ret = 0;
295 while (priv) {
296 data = *data_in[priv->minor];
297 if ((data & priv->highalarm) ||
298 (~data & priv->lowalarm)) {
299 DP(printk("etrax_gpio_wake_up_check %i\n",priv->minor));
300 wake_up_interruptible(&priv->alarm_wq);
301 ret = 1;
302 }
303 priv = priv->next;
304 }
305 return ret;
306}
307
308static irqreturn_t
309gpio_poll_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
310{
311 if (gpio_some_alarms) {
312 return IRQ_RETVAL(etrax_gpio_wake_up_check());
313 }
314 return IRQ_NONE;
315}
316
317static irqreturn_t
318gpio_pa_interrupt(int irq, void *dev_id, struct pt_regs *regs)
319{
320 reg_gio_rw_intr_mask intr_mask;
321 reg_gio_r_masked_intr masked_intr;
322 reg_gio_rw_ack_intr ack_intr;
323 unsigned long tmp;
324 unsigned long tmp2;
325
326 /* Find what PA interrupts are active */
327 masked_intr = REG_RD(gio, regi_gio, r_masked_intr);
328 tmp = REG_TYPE_CONV(unsigned long, reg_gio_r_masked_intr, masked_intr);
329
330 /* Find those that we have enabled */
331 spin_lock(&alarm_lock);
332 tmp &= (gpio_pa_high_alarms | gpio_pa_low_alarms);
333 spin_unlock(&alarm_lock);
334
335 /* Ack them */
336 ack_intr = REG_TYPE_CONV(reg_gio_rw_ack_intr, unsigned long, tmp);
337 REG_WR(gio, regi_gio, rw_ack_intr, ack_intr);
338
339 /* Disable those interrupts.. */
340 intr_mask = REG_RD(gio, regi_gio, rw_intr_mask);
341 tmp2 = REG_TYPE_CONV(unsigned long, reg_gio_rw_intr_mask, intr_mask);
342 tmp2 &= ~tmp;
343 intr_mask = REG_TYPE_CONV(reg_gio_rw_intr_mask, unsigned long, tmp2);
344 REG_WR(gio, regi_gio, rw_intr_mask, intr_mask);
345
346 if (gpio_some_alarms) {
347 return IRQ_RETVAL(etrax_gpio_wake_up_check());
348 }
349 return IRQ_NONE;
350}
351
352
353static ssize_t gpio_write(struct file * file, const char * buf, size_t count,
354 loff_t *off)
355{
356 struct gpio_private *priv = (struct gpio_private *)file->private_data;
357 unsigned char data, clk_mask, data_mask, write_msb;
358 unsigned long flags;
359 unsigned long shadow;
360 volatile unsigned long *port;
361 ssize_t retval = count;
362 /* Only bits 0-7 may be used for write operations but allow all
363 devices except leds... */
364 if (priv->minor == GPIO_MINOR_LEDS) {
365 return -EFAULT;
366 }
367
368 if (!access_ok(VERIFY_READ, buf, count)) {
369 return -EFAULT;
370 }
371 clk_mask = priv->clk_mask;
372 data_mask = priv->data_mask;
373 /* It must have been configured using the IO_CFG_WRITE_MODE */
374 /* Perhaps a better error code? */
375 if (clk_mask == 0 || data_mask == 0) {
376 return -EPERM;
377 }
378 write_msb = priv->write_msb;
379 D(printk("gpio_write: %lu to data 0x%02X clk 0x%02X msb: %i\n",count, data_mask, clk_mask, write_msb));
380 port = data_out[priv->minor];
381
382 while (count--) {
383 int i;
384 data = *buf++;
385 if (priv->write_msb) {
386 for (i = 7; i >= 0;i--) {
387 local_irq_save(flags);
388 shadow = *port;
389 *port = shadow &= ~clk_mask;
390 if (data & 1<<i)
391 *port = shadow |= data_mask;
392 else
393 *port = shadow &= ~data_mask;
394 /* For FPGA: min 5.0ns (DCC) before CCLK high */
395 *port = shadow |= clk_mask;
396 local_irq_restore(flags);
397 }
398 } else {
399 for (i = 0; i <= 7;i++) {
400 local_irq_save(flags);
401 shadow = *port;
402 *port = shadow &= ~clk_mask;
403 if (data & 1<<i)
404 *port = shadow |= data_mask;
405 else
406 *port = shadow &= ~data_mask;
407 /* For FPGA: min 5.0ns (DCC) before CCLK high */
408 *port = shadow |= clk_mask;
409 local_irq_restore(flags);
410 }
411 }
412 }
413 return retval;
414}
415
416
417
418static int
419gpio_open(struct inode *inode, struct file *filp)
420{
421 struct gpio_private *priv;
422 int p = MINOR(inode->i_rdev);
423
424 if (p > GPIO_MINOR_LAST)
425 return -EINVAL;
426
427 priv = (struct gpio_private *)kmalloc(sizeof(struct gpio_private),
428 GFP_KERNEL);
429
430 if (!priv)
431 return -ENOMEM;
432
433 priv->minor = p;
434
435 /* initialize the io/alarm struct and link it into our alarmlist */
436
437 priv->next = alarmlist;
438 alarmlist = priv;
439 priv->clk_mask = 0;
440 priv->data_mask = 0;
441 priv->highalarm = 0;
442 priv->lowalarm = 0;
443 init_waitqueue_head(&priv->alarm_wq);
444
445 filp->private_data = (void *)priv;
446
447 return 0;
448}
449
450static int
451gpio_release(struct inode *inode, struct file *filp)
452{
453 struct gpio_private *p = alarmlist;
454 struct gpio_private *todel = (struct gpio_private *)filp->private_data;
455 /* local copies while updating them: */
456 unsigned long a_high, a_low;
457 unsigned long some_alarms;
458
459 /* unlink from alarmlist and free the private structure */
460
461 if (p == todel) {
462 alarmlist = todel->next;
463 } else {
464 while (p->next != todel)
465 p = p->next;
466 p->next = todel->next;
467 }
468
469 kfree(todel);
470 /* Check if there are still any alarms set */
471 p = alarmlist;
472 some_alarms = 0;
473 a_high = 0;
474 a_low = 0;
475 while (p) {
476 if (p->minor == GPIO_MINOR_A) {
477 a_high |= p->highalarm;
478 a_low |= p->lowalarm;
479 }
480
481 if (p->highalarm | p->lowalarm) {
482 some_alarms = 1;
483 }
484 p = p->next;
485 }
486
487 spin_lock(&alarm_lock);
488 gpio_some_alarms = some_alarms;
489 gpio_pa_high_alarms = a_high;
490 gpio_pa_low_alarms = a_low;
491 spin_unlock(&alarm_lock);
492
493 return 0;
494}
495
496/* Main device API. ioctl's to read/set/clear bits, as well as to
497 * set alarms to wait for using a subsequent select().
498 */
499
500unsigned long inline setget_input(struct gpio_private *priv, unsigned long arg)
501{
502 /* Set direction 0=unchanged 1=input,
503 * return mask with 1=input
504 */
505 unsigned long flags;
506 unsigned long dir_shadow;
507
508 local_irq_save(flags);
509 dir_shadow = *dir_oe[priv->minor];
510 dir_shadow &= ~(arg & changeable_dir[priv->minor]);
511 *dir_oe[priv->minor] = dir_shadow;
512 local_irq_restore(flags);
513
514 if (priv->minor == GPIO_MINOR_A)
515 dir_shadow ^= 0xFF; /* Only 8 bits */
516 else
517 dir_shadow ^= 0x3FFFF; /* Only 18 bits */
518 return dir_shadow;
519
520} /* setget_input */
521
522unsigned long inline setget_output(struct gpio_private *priv, unsigned long arg)
523{
524 unsigned long flags;
525 unsigned long dir_shadow;
526
527 local_irq_save(flags);
528 dir_shadow = *dir_oe[priv->minor];
529 dir_shadow |= (arg & changeable_dir[priv->minor]);
530 *dir_oe[priv->minor] = dir_shadow;
531 local_irq_restore(flags);
532 return dir_shadow;
533} /* setget_output */
534
535static int
536gpio_leds_ioctl(unsigned int cmd, unsigned long arg);
537
538static int
539gpio_ioctl(struct inode *inode, struct file *file,
540 unsigned int cmd, unsigned long arg)
541{
542 unsigned long flags;
543 unsigned long val;
544 unsigned long shadow;
545 struct gpio_private *priv = (struct gpio_private *)file->private_data;
546 if (_IOC_TYPE(cmd) != ETRAXGPIO_IOCTYPE) {
547 return -EINVAL;
548 }
549
550 switch (_IOC_NR(cmd)) {
551 case IO_READBITS: /* Use IO_READ_INBITS and IO_READ_OUTBITS instead */
552 // read the port
553 return *data_in[priv->minor];
554 break;
555 case IO_SETBITS:
556 local_irq_save(flags);
557 if (arg & 0x04)
558 printk("GPIO SET 2\n");
559 // set changeable bits with a 1 in arg
560 shadow = *data_out[priv->minor];
561 shadow |= (arg & changeable_bits[priv->minor]);
562 *data_out[priv->minor] = shadow;
563 local_irq_restore(flags);
564 break;
565 case IO_CLRBITS:
566 local_irq_save(flags);
567 if (arg & 0x04)
568 printk("GPIO CLR 2\n");
569 // clear changeable bits with a 1 in arg
570 shadow = *data_out[priv->minor];
571 shadow &= ~(arg & changeable_bits[priv->minor]);
572 *data_out[priv->minor] = shadow;
573 local_irq_restore(flags);
574 break;
575 case IO_HIGHALARM:
576 // set alarm when bits with 1 in arg go high
577 priv->highalarm |= arg;
578 spin_lock(&alarm_lock);
579 gpio_some_alarms = 1;
580 if (priv->minor == GPIO_MINOR_A) {
581 gpio_pa_high_alarms |= arg;
582 }
583 spin_unlock(&alarm_lock);
584 break;
585 case IO_LOWALARM:
586 // set alarm when bits with 1 in arg go low
587 priv->lowalarm |= arg;
588 spin_lock(&alarm_lock);
589 gpio_some_alarms = 1;
590 if (priv->minor == GPIO_MINOR_A) {
591 gpio_pa_low_alarms |= arg;
592 }
593 spin_unlock(&alarm_lock);
594 break;
595 case IO_CLRALARM:
596 // clear alarm for bits with 1 in arg
597 priv->highalarm &= ~arg;
598 priv->lowalarm &= ~arg;
599 spin_lock(&alarm_lock);
600 if (priv->minor == GPIO_MINOR_A) {
601 if (gpio_pa_high_alarms & arg ||
602 gpio_pa_low_alarms & arg) {
603 /* Must update the gpio_pa_*alarms masks */
604 }
605 }
606 spin_unlock(&alarm_lock);
607 break;
608 case IO_READDIR: /* Use IO_SETGET_INPUT/OUTPUT instead! */
609 /* Read direction 0=input 1=output */
610 return *dir_oe[priv->minor];
611 case IO_SETINPUT: /* Use IO_SETGET_INPUT instead! */
612 /* Set direction 0=unchanged 1=input,
613 * return mask with 1=input
614 */
615 return setget_input(priv, arg);
616 break;
617 case IO_SETOUTPUT: /* Use IO_SETGET_OUTPUT instead! */
618 /* Set direction 0=unchanged 1=output,
619 * return mask with 1=output
620 */
621 return setget_output(priv, arg);
622
623 case IO_CFG_WRITE_MODE:
624 {
625 unsigned long dir_shadow;
626 dir_shadow = *dir_oe[priv->minor];
627
628 priv->clk_mask = arg & 0xFF;
629 priv->data_mask = (arg >> 8) & 0xFF;
630 priv->write_msb = (arg >> 16) & 0x01;
631 /* Check if we're allowed to change the bits and
632 * the direction is correct
633 */
634 if (!((priv->clk_mask & changeable_bits[priv->minor]) &&
635 (priv->data_mask & changeable_bits[priv->minor]) &&
636 (priv->clk_mask & dir_shadow) &&
637 (priv->data_mask & dir_shadow)))
638 {
639 priv->clk_mask = 0;
640 priv->data_mask = 0;
641 return -EPERM;
642 }
643 break;
644 }
645 case IO_READ_INBITS:
646 /* *arg is result of reading the input pins */
647 val = *data_in[priv->minor];
648 if (copy_to_user((unsigned long*)arg, &val, sizeof(val)))
649 return -EFAULT;
650 return 0;
651 break;
652 case IO_READ_OUTBITS:
653 /* *arg is result of reading the output shadow */
654 val = *data_out[priv->minor];
655 if (copy_to_user((unsigned long*)arg, &val, sizeof(val)))
656 return -EFAULT;
657 break;
658 case IO_SETGET_INPUT:
659 /* bits set in *arg is set to input,
660 * *arg updated with current input pins.
661 */
662 if (copy_from_user(&val, (unsigned long*)arg, sizeof(val)))
663 return -EFAULT;
664 val = setget_input(priv, val);
665 if (copy_to_user((unsigned long*)arg, &val, sizeof(val)))
666 return -EFAULT;
667 break;
668 case IO_SETGET_OUTPUT:
669 /* bits set in *arg is set to output,
670 * *arg updated with current output pins.
671 */
672 if (copy_from_user(&val, (unsigned long*)arg, sizeof(val)))
673 return -EFAULT;
674 val = setget_output(priv, val);
675 if (copy_to_user((unsigned long*)arg, &val, sizeof(val)))
676 return -EFAULT;
677 break;
678 default:
679 if (priv->minor == GPIO_MINOR_LEDS)
680 return gpio_leds_ioctl(cmd, arg);
681 else
682 return -EINVAL;
683 } /* switch */
684
685 return 0;
686}
687
688static int
689gpio_leds_ioctl(unsigned int cmd, unsigned long arg)
690{
691 unsigned char green;
692 unsigned char red;
693
694 switch (_IOC_NR(cmd)) {
695 case IO_LEDACTIVE_SET:
696 green = ((unsigned char) arg) & 1;
697 red = (((unsigned char) arg) >> 1) & 1;
698 LED_ACTIVE_SET_G(green);
699 LED_ACTIVE_SET_R(red);
700 break;
701
702 default:
703 return -EINVAL;
704 } /* switch */
705
706 return 0;
707}
708
709struct file_operations gpio_fops = {
710 .owner = THIS_MODULE,
711 .poll = gpio_poll,
712 .ioctl = gpio_ioctl,
713 .write = gpio_write,
714 .open = gpio_open,
715 .release = gpio_release,
716};
717
718
719/* main driver initialization routine, called from mem.c */
720
721static __init int
722gpio_init(void)
723{
724 int res;
725 reg_intr_vect_rw_mask intr_mask;
726
727 /* do the formalities */
728
729 res = register_chrdev(GPIO_MAJOR, gpio_name, &gpio_fops);
730 if (res < 0) {
731 printk(KERN_ERR "gpio: couldn't get a major number.\n");
732 return res;
733 }
734
735 /* Clear all leds */
736 LED_NETWORK_SET(0);
737 LED_ACTIVE_SET(0);
738 LED_DISK_READ(0);
739 LED_DISK_WRITE(0);
740
741 printk("ETRAX FS GPIO driver v2.5, (c) 2003-2005 Axis Communications AB\n");
742 /* We call etrax_gpio_wake_up_check() from timer interrupt and
743 * from cpu_idle() in kernel/process.c
744 * The check in cpu_idle() reduces latency from ~15 ms to ~6 ms
745 * in some tests.
746 */
747 if (request_irq(TIMER_INTR_VECT, gpio_poll_timer_interrupt,
748 SA_SHIRQ | SA_INTERRUPT,"gpio poll", &alarmlist)) {
749 printk("err: timer0 irq for gpio\n");
750 }
751 if (request_irq(GEN_IO_INTR_VECT, gpio_pa_interrupt,
752 SA_SHIRQ | SA_INTERRUPT,"gpio PA", &alarmlist)) {
753 printk("err: PA irq for gpio\n");
754 }
755 /* enable the gio and timer irq in global config */
756 intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
757 intr_mask.timer = 1;
758 intr_mask.gen_io = 1;
759 REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
760
761 return res;
762}
763
764/* this makes sure that gpio_init is called during kernel boot */
765
766module_init(gpio_init);
diff --git a/arch/cris/arch-v32/drivers/i2c.c b/arch/cris/arch-v32/drivers/i2c.c
new file mode 100644
index 000000000000..440c20a94963
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/i2c.c
@@ -0,0 +1,611 @@
1/*!***************************************************************************
2*!
3*! FILE NAME : i2c.c
4*!
5*! DESCRIPTION: implements an interface for IIC/I2C, both directly from other
6*! kernel modules (i2c_writereg/readreg) and from userspace using
7*! ioctl()'s
8*!
9*! Nov 30 1998 Torbjorn Eliasson Initial version.
10*! Bjorn Wesen Elinux kernel version.
11*! Jan 14 2000 Johan Adolfsson Fixed PB shadow register stuff -
12*! don't use PB_I2C if DS1302 uses same bits,
13*! use PB.
14*| June 23 2003 Pieter Grimmerink Added 'i2c_sendnack'. i2c_readreg now
15*| generates nack on last received byte,
16*| instead of ack.
17*| i2c_getack changed data level while clock
18*| was high, causing DS75 to see a stop condition
19*!
20*! ---------------------------------------------------------------------------
21*!
22*! (C) Copyright 1999-2002 Axis Communications AB, LUND, SWEDEN
23*!
24*!***************************************************************************/
25/* $Id: i2c.c,v 1.2 2005/05/09 15:29:49 starvik Exp $ */
26/****************** INCLUDE FILES SECTION ***********************************/
27
28#include <linux/module.h>
29#include <linux/sched.h>
30#include <linux/slab.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/fs.h>
34#include <linux/string.h>
35#include <linux/init.h>
36#include <linux/config.h>
37
38#include <asm/etraxi2c.h>
39
40#include <asm/system.h>
41#include <asm/io.h>
42#include <asm/delay.h>
43
44#include "i2c.h"
45
46/****************** I2C DEFINITION SECTION *************************/
47
48#define D(x)
49
50#define I2C_MAJOR 123 /* LOCAL/EXPERIMENTAL */
51static const char i2c_name[] = "i2c";
52
53#define CLOCK_LOW_TIME 8
54#define CLOCK_HIGH_TIME 8
55#define START_CONDITION_HOLD_TIME 8
56#define STOP_CONDITION_HOLD_TIME 8
57#define ENABLE_OUTPUT 0x01
58#define ENABLE_INPUT 0x00
59#define I2C_CLOCK_HIGH 1
60#define I2C_CLOCK_LOW 0
61#define I2C_DATA_HIGH 1
62#define I2C_DATA_LOW 0
63
64#define i2c_enable()
65#define i2c_disable()
66
67/* enable or disable output-enable, to select output or input on the i2c bus */
68
69#define i2c_dir_out() crisv32_io_set_dir(&cris_i2c_data, crisv32_io_dir_out)
70#define i2c_dir_in() crisv32_io_set_dir(&cris_i2c_data, crisv32_io_dir_in)
71
72/* control the i2c clock and data signals */
73
74#define i2c_clk(x) crisv32_io_set(&cris_i2c_clk, x)
75#define i2c_data(x) crisv32_io_set(&cris_i2c_data, x)
76
77/* read a bit from the i2c interface */
78
79#define i2c_getbit() crisv32_io_rd(&cris_i2c_data)
80
81#define i2c_delay(usecs) udelay(usecs)
82
83/****************** VARIABLE SECTION ************************************/
84
85static struct crisv32_iopin cris_i2c_clk;
86static struct crisv32_iopin cris_i2c_data;
87
88/****************** FUNCTION DEFINITION SECTION *************************/
89
90
91/* generate i2c start condition */
92
93void
94i2c_start(void)
95{
96 /*
97 * SCL=1 SDA=1
98 */
99 i2c_dir_out();
100 i2c_delay(CLOCK_HIGH_TIME/6);
101 i2c_data(I2C_DATA_HIGH);
102 i2c_clk(I2C_CLOCK_HIGH);
103 i2c_delay(CLOCK_HIGH_TIME);
104 /*
105 * SCL=1 SDA=0
106 */
107 i2c_data(I2C_DATA_LOW);
108 i2c_delay(START_CONDITION_HOLD_TIME);
109 /*
110 * SCL=0 SDA=0
111 */
112 i2c_clk(I2C_CLOCK_LOW);
113 i2c_delay(CLOCK_LOW_TIME);
114}
115
116/* generate i2c stop condition */
117
118void
119i2c_stop(void)
120{
121 i2c_dir_out();
122
123 /*
124 * SCL=0 SDA=0
125 */
126 i2c_clk(I2C_CLOCK_LOW);
127 i2c_data(I2C_DATA_LOW);
128 i2c_delay(CLOCK_LOW_TIME*2);
129 /*
130 * SCL=1 SDA=0
131 */
132 i2c_clk(I2C_CLOCK_HIGH);
133 i2c_delay(CLOCK_HIGH_TIME*2);
134 /*
135 * SCL=1 SDA=1
136 */
137 i2c_data(I2C_DATA_HIGH);
138 i2c_delay(STOP_CONDITION_HOLD_TIME);
139
140 i2c_dir_in();
141}
142
143/* write a byte to the i2c interface */
144
145void
146i2c_outbyte(unsigned char x)
147{
148 int i;
149
150 i2c_dir_out();
151
152 for (i = 0; i < 8; i++) {
153 if (x & 0x80) {
154 i2c_data(I2C_DATA_HIGH);
155 } else {
156 i2c_data(I2C_DATA_LOW);
157 }
158
159 i2c_delay(CLOCK_LOW_TIME/2);
160 i2c_clk(I2C_CLOCK_HIGH);
161 i2c_delay(CLOCK_HIGH_TIME);
162 i2c_clk(I2C_CLOCK_LOW);
163 i2c_delay(CLOCK_LOW_TIME/2);
164 x <<= 1;
165 }
166 i2c_data(I2C_DATA_LOW);
167 i2c_delay(CLOCK_LOW_TIME/2);
168
169 /*
170 * enable input
171 */
172 i2c_dir_in();
173}
174
175/* read a byte from the i2c interface */
176
177unsigned char
178i2c_inbyte(void)
179{
180 unsigned char aBitByte = 0;
181 int i;
182
183 /* Switch off I2C to get bit */
184 i2c_disable();
185 i2c_dir_in();
186 i2c_delay(CLOCK_HIGH_TIME/2);
187
188 /* Get bit */
189 aBitByte |= i2c_getbit();
190
191 /* Enable I2C */
192 i2c_enable();
193 i2c_delay(CLOCK_LOW_TIME/2);
194
195 for (i = 1; i < 8; i++) {
196 aBitByte <<= 1;
197 /* Clock pulse */
198 i2c_clk(I2C_CLOCK_HIGH);
199 i2c_delay(CLOCK_HIGH_TIME);
200 i2c_clk(I2C_CLOCK_LOW);
201 i2c_delay(CLOCK_LOW_TIME);
202
203 /* Switch off I2C to get bit */
204 i2c_disable();
205 i2c_dir_in();
206 i2c_delay(CLOCK_HIGH_TIME/2);
207
208 /* Get bit */
209 aBitByte |= i2c_getbit();
210
211 /* Enable I2C */
212 i2c_enable();
213 i2c_delay(CLOCK_LOW_TIME/2);
214 }
215 i2c_clk(I2C_CLOCK_HIGH);
216 i2c_delay(CLOCK_HIGH_TIME);
217
218 /*
219 * we leave the clock low, getbyte is usually followed
220 * by sendack/nack, they assume the clock to be low
221 */
222 i2c_clk(I2C_CLOCK_LOW);
223 return aBitByte;
224}
225
226/*#---------------------------------------------------------------------------
227*#
228*# FUNCTION NAME: i2c_getack
229*#
230*# DESCRIPTION : checks if ack was received from ic2
231*#
232*#--------------------------------------------------------------------------*/
233
234int
235i2c_getack(void)
236{
237 int ack = 1;
238 /*
239 * enable output
240 */
241 i2c_dir_out();
242 /*
243 * Release data bus by setting
244 * data high
245 */
246 i2c_data(I2C_DATA_HIGH);
247 /*
248 * enable input
249 */
250 i2c_dir_in();
251 i2c_delay(CLOCK_HIGH_TIME/4);
252 /*
253 * generate ACK clock pulse
254 */
255 i2c_clk(I2C_CLOCK_HIGH);
256 /*
257 * Use PORT PB instead of I2C
258 * for input. (I2C not working)
259 */
260 i2c_clk(1);
261 i2c_data(1);
262 /*
263 * switch off I2C
264 */
265 i2c_data(1);
266 i2c_disable();
267 i2c_dir_in();
268 /*
269 * now wait for ack
270 */
271 i2c_delay(CLOCK_HIGH_TIME/2);
272 /*
273 * check for ack
274 */
275 if(i2c_getbit())
276 ack = 0;
277 i2c_delay(CLOCK_HIGH_TIME/2);
278 if(!ack){
279 if(!i2c_getbit()) /* receiver pulld SDA low */
280 ack = 1;
281 i2c_delay(CLOCK_HIGH_TIME/2);
282 }
283
284 /*
285 * our clock is high now, make sure data is low
286 * before we enable our output. If we keep data high
287 * and enable output, we would generate a stop condition.
288 */
289 i2c_data(I2C_DATA_LOW);
290
291 /*
292 * end clock pulse
293 */
294 i2c_enable();
295 i2c_dir_out();
296 i2c_clk(I2C_CLOCK_LOW);
297 i2c_delay(CLOCK_HIGH_TIME/4);
298 /*
299 * enable output
300 */
301 i2c_dir_out();
302 /*
303 * remove ACK clock pulse
304 */
305 i2c_data(I2C_DATA_HIGH);
306 i2c_delay(CLOCK_LOW_TIME/2);
307 return ack;
308}
309
310/*#---------------------------------------------------------------------------
311*#
312*# FUNCTION NAME: I2C::sendAck
313*#
314*# DESCRIPTION : Send ACK on received data
315*#
316*#--------------------------------------------------------------------------*/
317void
318i2c_sendack(void)
319{
320 /*
321 * enable output
322 */
323 i2c_delay(CLOCK_LOW_TIME);
324 i2c_dir_out();
325 /*
326 * set ack pulse high
327 */
328 i2c_data(I2C_DATA_LOW);
329 /*
330 * generate clock pulse
331 */
332 i2c_delay(CLOCK_HIGH_TIME/6);
333 i2c_clk(I2C_CLOCK_HIGH);
334 i2c_delay(CLOCK_HIGH_TIME);
335 i2c_clk(I2C_CLOCK_LOW);
336 i2c_delay(CLOCK_LOW_TIME/6);
337 /*
338 * reset data out
339 */
340 i2c_data(I2C_DATA_HIGH);
341 i2c_delay(CLOCK_LOW_TIME);
342
343 i2c_dir_in();
344}
345
346/*#---------------------------------------------------------------------------
347*#
348*# FUNCTION NAME: i2c_sendnack
349*#
350*# DESCRIPTION : Sends NACK on received data
351*#
352*#--------------------------------------------------------------------------*/
353void
354i2c_sendnack(void)
355{
356 /*
357 * enable output
358 */
359 i2c_delay(CLOCK_LOW_TIME);
360 i2c_dir_out();
361 /*
362 * set data high
363 */
364 i2c_data(I2C_DATA_HIGH);
365 /*
366 * generate clock pulse
367 */
368 i2c_delay(CLOCK_HIGH_TIME/6);
369 i2c_clk(I2C_CLOCK_HIGH);
370 i2c_delay(CLOCK_HIGH_TIME);
371 i2c_clk(I2C_CLOCK_LOW);
372 i2c_delay(CLOCK_LOW_TIME);
373
374 i2c_dir_in();
375}
376
377/*#---------------------------------------------------------------------------
378*#
379*# FUNCTION NAME: i2c_writereg
380*#
381*# DESCRIPTION : Writes a value to an I2C device
382*#
383*#--------------------------------------------------------------------------*/
384int
385i2c_writereg(unsigned char theSlave, unsigned char theReg,
386 unsigned char theValue)
387{
388 int error, cntr = 3;
389 unsigned long flags;
390
391 do {
392 error = 0;
393 /*
394 * we don't like to be interrupted
395 */
396 local_irq_save(flags);
397
398 i2c_start();
399 /*
400 * send slave address
401 */
402 i2c_outbyte((theSlave & 0xfe));
403 /*
404 * wait for ack
405 */
406 if(!i2c_getack())
407 error = 1;
408 /*
409 * now select register
410 */
411 i2c_dir_out();
412 i2c_outbyte(theReg);
413 /*
414 * now it's time to wait for ack
415 */
416 if(!i2c_getack())
417 error |= 2;
418 /*
419 * send register register data
420 */
421 i2c_outbyte(theValue);
422 /*
423 * now it's time to wait for ack
424 */
425 if(!i2c_getack())
426 error |= 4;
427 /*
428 * end byte stream
429 */
430 i2c_stop();
431 /*
432 * enable interrupt again
433 */
434 local_irq_restore(flags);
435
436 } while(error && cntr--);
437
438 i2c_delay(CLOCK_LOW_TIME);
439
440 return -error;
441}
442
443/*#---------------------------------------------------------------------------
444*#
445*# FUNCTION NAME: i2c_readreg
446*#
447*# DESCRIPTION : Reads a value from the decoder registers.
448*#
449*#--------------------------------------------------------------------------*/
450unsigned char
451i2c_readreg(unsigned char theSlave, unsigned char theReg)
452{
453 unsigned char b = 0;
454 int error, cntr = 3;
455 unsigned long flags;
456
457 do {
458 error = 0;
459 /*
460 * we don't like to be interrupted
461 */
462 local_irq_save(flags);
463 /*
464 * generate start condition
465 */
466 i2c_start();
467
468 /*
469 * send slave address
470 */
471 i2c_outbyte((theSlave & 0xfe));
472 /*
473 * wait for ack
474 */
475 if(!i2c_getack())
476 error = 1;
477 /*
478 * now select register
479 */
480 i2c_dir_out();
481 i2c_outbyte(theReg);
482 /*
483 * now it's time to wait for ack
484 */
485 if(!i2c_getack())
486 error = 1;
487 /*
488 * repeat start condition
489 */
490 i2c_delay(CLOCK_LOW_TIME);
491 i2c_start();
492 /*
493 * send slave address
494 */
495 i2c_outbyte(theSlave | 0x01);
496 /*
497 * wait for ack
498 */
499 if(!i2c_getack())
500 error = 1;
501 /*
502 * fetch register
503 */
504 b = i2c_inbyte();
505 /*
506 * last received byte needs to be nacked
507 * instead of acked
508 */
509 i2c_sendnack();
510 /*
511 * end sequence
512 */
513 i2c_stop();
514 /*
515 * enable interrupt again
516 */
517 local_irq_restore(flags);
518
519 } while(error && cntr--);
520
521 return b;
522}
523
524static int
525i2c_open(struct inode *inode, struct file *filp)
526{
527 return 0;
528}
529
530static int
531i2c_release(struct inode *inode, struct file *filp)
532{
533 return 0;
534}
535
536/* Main device API. ioctl's to write or read to/from i2c registers.
537 */
538
539static int
540i2c_ioctl(struct inode *inode, struct file *file,
541 unsigned int cmd, unsigned long arg)
542{
543 if(_IOC_TYPE(cmd) != ETRAXI2C_IOCTYPE) {
544 return -EINVAL;
545 }
546
547 switch (_IOC_NR(cmd)) {
548 case I2C_WRITEREG:
549 /* write to an i2c slave */
550 D(printk("i2cw %d %d %d\n",
551 I2C_ARGSLAVE(arg),
552 I2C_ARGREG(arg),
553 I2C_ARGVALUE(arg)));
554
555 return i2c_writereg(I2C_ARGSLAVE(arg),
556 I2C_ARGREG(arg),
557 I2C_ARGVALUE(arg));
558 case I2C_READREG:
559 {
560 unsigned char val;
561 /* read from an i2c slave */
562 D(printk("i2cr %d %d ",
563 I2C_ARGSLAVE(arg),
564 I2C_ARGREG(arg)));
565 val = i2c_readreg(I2C_ARGSLAVE(arg), I2C_ARGREG(arg));
566 D(printk("= %d\n", val));
567 return val;
568 }
569 default:
570 return -EINVAL;
571
572 }
573
574 return 0;
575}
576
577static struct file_operations i2c_fops = {
578 owner: THIS_MODULE,
579 ioctl: i2c_ioctl,
580 open: i2c_open,
581 release: i2c_release,
582};
583
584int __init
585i2c_init(void)
586{
587 int res;
588
589 /* Setup and enable the Port B I2C interface */
590
591 crisv32_io_get_name(&cris_i2c_data, CONFIG_ETRAX_I2C_DATA_PORT);
592 crisv32_io_get_name(&cris_i2c_clk, CONFIG_ETRAX_I2C_CLK_PORT);
593
594 /* register char device */
595
596 res = register_chrdev(I2C_MAJOR, i2c_name, &i2c_fops);
597 if(res < 0) {
598 printk(KERN_ERR "i2c: couldn't get a major number.\n");
599 return res;
600 }
601
602 printk(KERN_INFO "I2C driver v2.2, (c) 1999-2001 Axis Communications AB\n");
603
604 return 0;
605}
606
607/* this makes sure that i2c_init is called during boot */
608
609module_init(i2c_init);
610
611/****************** END OF FILE i2c.c ********************************/
diff --git a/arch/cris/arch-v32/drivers/i2c.h b/arch/cris/arch-v32/drivers/i2c.h
new file mode 100644
index 000000000000..bfe1a13f9f35
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/i2c.h
@@ -0,0 +1,15 @@
1
2#include <linux/init.h>
3
4/* High level I2C actions */
5int __init i2c_init(void);
6int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue);
7unsigned char i2c_readreg(unsigned char theSlave, unsigned char theReg);
8
9/* Low level I2C */
10void i2c_start(void);
11void i2c_stop(void);
12void i2c_outbyte(unsigned char x);
13unsigned char i2c_inbyte(void);
14int i2c_getack(void);
15void i2c_sendack(void);
diff --git a/arch/cris/arch-v32/drivers/iop_fw_load.c b/arch/cris/arch-v32/drivers/iop_fw_load.c
new file mode 100644
index 000000000000..11f9895ded50
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/iop_fw_load.c
@@ -0,0 +1,219 @@
1/* $Id: iop_fw_load.c,v 1.4 2005/04/07 09:27:46 larsv Exp $
2 *
3 * Firmware loader for ETRAX FS IO-Processor
4 *
5 * Copyright (C) 2004 Axis Communications AB
6 */
7
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/device.h>
12#include <linux/firmware.h>
13
14#include <asm/arch/hwregs/reg_map.h>
15#include <asm/arch/hwregs/iop/iop_reg_space.h>
16#include <asm/arch/hwregs/iop/iop_mpu_macros.h>
17#include <asm/arch/hwregs/iop/iop_mpu_defs.h>
18#include <asm/arch/hwregs/iop/iop_spu_defs.h>
19#include <asm/arch/hwregs/iop/iop_sw_cpu_defs.h>
20
21#define IOP_TIMEOUT 100
22
23static struct device iop_spu_device[2] = {
24 { .bus_id = "iop-spu0", },
25 { .bus_id = "iop-spu1", },
26};
27
28static struct device iop_mpu_device = {
29 .bus_id = "iop-mpu",
30};
31
32static int wait_mpu_idle(void)
33{
34 reg_iop_mpu_r_stat mpu_stat;
35 unsigned int timeout = IOP_TIMEOUT;
36
37 do {
38 mpu_stat = REG_RD(iop_mpu, regi_iop_mpu, r_stat);
39 } while (mpu_stat.instr_reg_busy == regk_iop_mpu_yes && --timeout > 0);
40 if (timeout == 0) {
41 printk(KERN_ERR "Timeout waiting for MPU to be idle\n");
42 return -EBUSY;
43 }
44 return 0;
45}
46
47int iop_fw_load_spu(const unsigned char *fw_name, unsigned int spu_inst)
48{
49 reg_iop_sw_cpu_rw_mc_ctrl mc_ctrl = {
50 .wr_spu0_mem = regk_iop_sw_cpu_no,
51 .wr_spu1_mem = regk_iop_sw_cpu_no,
52 .size = 4,
53 .cmd = regk_iop_sw_cpu_reg_copy,
54 .keep_owner = regk_iop_sw_cpu_yes
55 };
56 reg_iop_spu_rw_ctrl spu_ctrl = {
57 .en = regk_iop_spu_no,
58 .fsm = regk_iop_spu_no,
59 };
60 reg_iop_sw_cpu_r_mc_stat mc_stat;
61 const struct firmware *fw_entry;
62 u32 *data;
63 unsigned int timeout;
64 int retval, i;
65
66 if (spu_inst > 1)
67 return -ENODEV;
68
69 /* get firmware */
70 retval = request_firmware(&fw_entry,
71 fw_name,
72 &iop_spu_device[spu_inst]);
73 if (retval != 0)
74 {
75 printk(KERN_ERR
76 "iop_load_spu: Failed to load firmware \"%s\"\n",
77 fw_name);
78 return retval;
79 }
80 data = (u32 *) fw_entry->data;
81
82 /* acquire ownership of memory controller */
83 switch (spu_inst) {
84 case 0:
85 mc_ctrl.wr_spu0_mem = regk_iop_sw_cpu_yes;
86 REG_WR(iop_spu, regi_iop_spu0, rw_ctrl, spu_ctrl);
87 break;
88 case 1:
89 mc_ctrl.wr_spu1_mem = regk_iop_sw_cpu_yes;
90 REG_WR(iop_spu, regi_iop_spu1, rw_ctrl, spu_ctrl);
91 break;
92 }
93 timeout = IOP_TIMEOUT;
94 do {
95 REG_WR(iop_sw_cpu, regi_iop_sw_cpu, rw_mc_ctrl, mc_ctrl);
96 mc_stat = REG_RD(iop_sw_cpu, regi_iop_sw_cpu, r_mc_stat);
97 } while (mc_stat.owned_by_cpu == regk_iop_sw_cpu_no && --timeout > 0);
98 if (timeout == 0) {
99 printk(KERN_ERR "Timeout waiting to acquire MC\n");
100 retval = -EBUSY;
101 goto out;
102 }
103
104 /* write to SPU memory */
105 for (i = 0; i < (fw_entry->size/4); i++) {
106 switch (spu_inst) {
107 case 0:
108 REG_WR_INT(iop_spu, regi_iop_spu0, rw_seq_pc, (i*4));
109 break;
110 case 1:
111 REG_WR_INT(iop_spu, regi_iop_spu1, rw_seq_pc, (i*4));
112 break;
113 }
114 REG_WR_INT(iop_sw_cpu, regi_iop_sw_cpu, rw_mc_data, *data);
115 data++;
116 }
117
118 /* release ownership of memory controller */
119 (void) REG_RD(iop_sw_cpu, regi_iop_sw_cpu, rs_mc_data);
120
121 out:
122 release_firmware(fw_entry);
123 return retval;
124}
125
126int iop_fw_load_mpu(unsigned char *fw_name)
127{
128 const unsigned int start_addr = 0;
129 reg_iop_mpu_rw_ctrl mpu_ctrl;
130 const struct firmware *fw_entry;
131 u32 *data;
132 int retval, i;
133
134 /* get firmware */
135 retval = request_firmware(&fw_entry, fw_name, &iop_mpu_device);
136 if (retval != 0)
137 {
138 printk(KERN_ERR
139 "iop_load_spu: Failed to load firmware \"%s\"\n",
140 fw_name);
141 return retval;
142 }
143 data = (u32 *) fw_entry->data;
144
145 /* disable MPU */
146 mpu_ctrl.en = regk_iop_mpu_no;
147 REG_WR(iop_mpu, regi_iop_mpu, rw_ctrl, mpu_ctrl);
148 /* put start address in R0 */
149 REG_WR_VECT(iop_mpu, regi_iop_mpu, rw_r, 0, start_addr);
150 /* write to memory by executing 'SWX i, 4, R0' for each word */
151 if ((retval = wait_mpu_idle()) != 0)
152 goto out;
153 REG_WR(iop_mpu, regi_iop_mpu, rw_instr, MPU_SWX_IIR_INSTR(0, 4, 0));
154 for (i = 0; i < (fw_entry->size / 4); i++) {
155 REG_WR_INT(iop_mpu, regi_iop_mpu, rw_immediate, *data);
156 if ((retval = wait_mpu_idle()) != 0)
157 goto out;
158 data++;
159 }
160
161 out:
162 release_firmware(fw_entry);
163 return retval;
164}
165
166int iop_start_mpu(unsigned int start_addr)
167{
168 reg_iop_mpu_rw_ctrl mpu_ctrl = { .en = regk_iop_mpu_yes };
169 int retval;
170
171 /* disable MPU */
172 if ((retval = wait_mpu_idle()) != 0)
173 goto out;
174 REG_WR(iop_mpu, regi_iop_mpu, rw_instr, MPU_HALT());
175 if ((retval = wait_mpu_idle()) != 0)
176 goto out;
177 /* set PC and wait for it to bite */
178 if ((retval = wait_mpu_idle()) != 0)
179 goto out;
180 REG_WR_INT(iop_mpu, regi_iop_mpu, rw_instr, MPU_BA_I(start_addr));
181 if ((retval = wait_mpu_idle()) != 0)
182 goto out;
183 /* make sure the MPU starts executing with interrupts disabled */
184 REG_WR(iop_mpu, regi_iop_mpu, rw_instr, MPU_DI());
185 if ((retval = wait_mpu_idle()) != 0)
186 goto out;
187 /* enable MPU */
188 REG_WR(iop_mpu, regi_iop_mpu, rw_ctrl, mpu_ctrl);
189 out:
190 return retval;
191}
192
193static int __init iop_fw_load_init(void)
194{
195 device_initialize(&iop_spu_device[0]);
196 kobject_set_name(&iop_spu_device[0].kobj, "iop-spu0");
197 kobject_add(&iop_spu_device[0].kobj);
198 device_initialize(&iop_spu_device[1]);
199 kobject_set_name(&iop_spu_device[1].kobj, "iop-spu1");
200 kobject_add(&iop_spu_device[1].kobj);
201 device_initialize(&iop_mpu_device);
202 kobject_set_name(&iop_mpu_device.kobj, "iop-mpu");
203 kobject_add(&iop_mpu_device.kobj);
204 return 0;
205}
206
207static void __exit iop_fw_load_exit(void)
208{
209}
210
211module_init(iop_fw_load_init);
212module_exit(iop_fw_load_exit);
213
214MODULE_DESCRIPTION("ETRAX FS IO-Processor Firmware Loader");
215MODULE_LICENSE("GPL");
216
217EXPORT_SYMBOL(iop_fw_load_spu);
218EXPORT_SYMBOL(iop_fw_load_mpu);
219EXPORT_SYMBOL(iop_start_mpu);
diff --git a/arch/cris/arch-v32/drivers/nandflash.c b/arch/cris/arch-v32/drivers/nandflash.c
new file mode 100644
index 000000000000..fc2a619b035d
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/nandflash.c
@@ -0,0 +1,157 @@
1/*
2 * arch/cris/arch-v32/drivers/nandflash.c
3 *
4 * Copyright (c) 2004
5 *
6 * Derived from drivers/mtd/nand/spia.c
7 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
8 *
9 * $Id: nandflash.c,v 1.3 2005/06/01 10:57:12 starvik Exp $
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 */
16
17#include <linux/version.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/nand.h>
23#include <linux/mtd/partitions.h>
24#include <asm/arch/memmap.h>
25#include <asm/arch/hwregs/reg_map.h>
26#include <asm/arch/hwregs/reg_rdwr.h>
27#include <asm/arch/hwregs/gio_defs.h>
28#include <asm/arch/hwregs/bif_core_defs.h>
29#include <asm/io.h>
30
31#define CE_BIT 4
32#define CLE_BIT 5
33#define ALE_BIT 6
34#define BY_BIT 7
35
36static struct mtd_info *crisv32_mtd = NULL;
37/*
38 * hardware specific access to control-lines
39*/
40static void crisv32_hwcontrol(struct mtd_info *mtd, int cmd)
41{
42 unsigned long flags;
43 reg_gio_rw_pa_dout dout = REG_RD(gio, regi_gio, rw_pa_dout);
44
45 local_irq_save(flags);
46 switch(cmd){
47 case NAND_CTL_SETCLE:
48 dout.data |= (1<<CLE_BIT);
49 break;
50 case NAND_CTL_CLRCLE:
51 dout.data &= ~(1<<CLE_BIT);
52 break;
53 case NAND_CTL_SETALE:
54 dout.data |= (1<<ALE_BIT);
55 break;
56 case NAND_CTL_CLRALE:
57 dout.data &= ~(1<<ALE_BIT);
58 break;
59 case NAND_CTL_SETNCE:
60 dout.data |= (1<<CE_BIT);
61 break;
62 case NAND_CTL_CLRNCE:
63 dout.data &= ~(1<<CE_BIT);
64 break;
65 }
66 REG_WR(gio, regi_gio, rw_pa_dout, dout);
67 local_irq_restore(flags);
68}
69
70/*
71* read device ready pin
72*/
73int crisv32_device_ready(struct mtd_info *mtd)
74{
75 reg_gio_r_pa_din din = REG_RD(gio, regi_gio, r_pa_din);
76 return ((din.data & (1 << BY_BIT)) >> BY_BIT);
77}
78
79/*
80 * Main initialization routine
81 */
82struct mtd_info* __init crisv32_nand_flash_probe (void)
83{
84 void __iomem *read_cs;
85 void __iomem *write_cs;
86
87 reg_bif_core_rw_grp3_cfg bif_cfg = REG_RD(bif_core, regi_bif_core, rw_grp3_cfg);
88 reg_gio_rw_pa_oe pa_oe = REG_RD(gio, regi_gio, rw_pa_oe);
89 struct nand_chip *this;
90 int err = 0;
91
92 /* Allocate memory for MTD device structure and private data */
93 crisv32_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
94 GFP_KERNEL);
95 if (!crisv32_mtd) {
96 printk ("Unable to allocate CRISv32 NAND MTD device structure.\n");
97 err = -ENOMEM;
98 return NULL;
99 }
100
101 read_cs = ioremap(MEM_CSP0_START | MEM_NON_CACHEABLE, 8192);
102 write_cs = ioremap(MEM_CSP1_START | MEM_NON_CACHEABLE, 8192);
103
104 if (!read_cs || !write_cs) {
105 printk("CRISv32 NAND ioremap failed\n");
106 err = -EIO;
107 goto out_mtd;
108 }
109
110 /* Get pointer to private data */
111 this = (struct nand_chip *) (&crisv32_mtd[1]);
112
113 pa_oe.oe |= 1 << CE_BIT;
114 pa_oe.oe |= 1 << ALE_BIT;
115 pa_oe.oe |= 1 << CLE_BIT;
116 pa_oe.oe &= ~ (1 << BY_BIT);
117 REG_WR(gio, regi_gio, rw_pa_oe, pa_oe);
118
119 bif_cfg.gated_csp0 = regk_bif_core_rd;
120 bif_cfg.gated_csp1 = regk_bif_core_wr;
121 REG_WR(bif_core, regi_bif_core, rw_grp3_cfg, bif_cfg);
122
123 /* Initialize structures */
124 memset((char *) crisv32_mtd, 0, sizeof(struct mtd_info));
125 memset((char *) this, 0, sizeof(struct nand_chip));
126
127 /* Link the private data with the MTD structure */
128 crisv32_mtd->priv = this;
129
130 /* Set address of NAND IO lines */
131 this->IO_ADDR_R = read_cs;
132 this->IO_ADDR_W = write_cs;
133 this->hwcontrol = crisv32_hwcontrol;
134 this->dev_ready = crisv32_device_ready;
135 /* 20 us command delay time */
136 this->chip_delay = 20;
137 this->eccmode = NAND_ECC_SOFT;
138
139 /* Enable the following for a flash based bad block table */
140 this->options = NAND_USE_FLASH_BBT;
141
142 /* Scan to find existance of the device */
143 if (nand_scan (crisv32_mtd, 1)) {
144 err = -ENXIO;
145 goto out_ior;
146 }
147
148 return crisv32_mtd;
149
150out_ior:
151 iounmap((void *)read_cs);
152 iounmap((void *)write_cs);
153out_mtd:
154 kfree (crisv32_mtd);
155 return NULL;
156}
157
diff --git a/arch/cris/arch-v32/drivers/pcf8563.c b/arch/cris/arch-v32/drivers/pcf8563.c
new file mode 100644
index 000000000000..f894580b648b
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/pcf8563.c
@@ -0,0 +1,341 @@
1/*
2 * PCF8563 RTC
3 *
4 * From Phillips' datasheet:
5 *
6 * The PCF8563 is a CMOS real-time clock/calendar optimized for low power
7 * consumption. A programmable clock output, interupt output and voltage
8 * low detector are also provided. All address and data are transferred
9 * serially via two-line bidirectional I2C-bus. Maximum bus speed is
10 * 400 kbits/s. The built-in word address register is incremented
11 * automatically after each written or read byte.
12 *
13 * Copyright (c) 2002-2003, Axis Communications AB
14 * All rights reserved.
15 *
16 * Author: Tobias Anderberg <tobiasa@axis.com>.
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/version.h>
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/sched.h>
26#include <linux/init.h>
27#include <linux/fs.h>
28#include <linux/ioctl.h>
29#include <linux/delay.h>
30#include <linux/bcd.h>
31
32#include <asm/uaccess.h>
33#include <asm/system.h>
34#include <asm/io.h>
35#include <asm/rtc.h>
36
37#include "i2c.h"
38
39#define PCF8563_MAJOR 121 /* Local major number. */
40#define DEVICE_NAME "rtc" /* Name which is registered in /proc/devices. */
41#define PCF8563_NAME "PCF8563"
42#define DRIVER_VERSION "$Revision: 1.1 $"
43
44/* Two simple wrapper macros, saves a few keystrokes. */
45#define rtc_read(x) i2c_readreg(RTC_I2C_READ, x)
46#define rtc_write(x,y) i2c_writereg(RTC_I2C_WRITE, x, y)
47
48static const unsigned char days_in_month[] =
49 { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
50
51int pcf8563_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
52int pcf8563_open(struct inode *, struct file *);
53int pcf8563_release(struct inode *, struct file *);
54
55static struct file_operations pcf8563_fops = {
56 owner: THIS_MODULE,
57 ioctl: pcf8563_ioctl,
58 open: pcf8563_open,
59 release: pcf8563_release,
60};
61
62unsigned char
63pcf8563_readreg(int reg)
64{
65 unsigned char res = rtc_read(reg);
66
67 /* The PCF8563 does not return 0 for unimplemented bits */
68 switch (reg) {
69 case RTC_SECONDS:
70 case RTC_MINUTES:
71 res &= 0x7F;
72 break;
73 case RTC_HOURS:
74 case RTC_DAY_OF_MONTH:
75 res &= 0x3F;
76 break;
77 case RTC_WEEKDAY:
78 res &= 0x07;
79 break;
80 case RTC_MONTH:
81 res &= 0x1F;
82 break;
83 case RTC_CONTROL1:
84 res &= 0xA8;
85 break;
86 case RTC_CONTROL2:
87 res &= 0x1F;
88 break;
89 case RTC_CLOCKOUT_FREQ:
90 case RTC_TIMER_CONTROL:
91 res &= 0x83;
92 break;
93 }
94 return res;
95}
96
97void
98pcf8563_writereg(int reg, unsigned char val)
99{
100#ifdef CONFIG_ETRAX_RTC_READONLY
101 if (reg == RTC_CONTROL1 || (reg >= RTC_SECONDS && reg <= RTC_YEAR))
102 return;
103#endif
104
105 rtc_write(reg, val);
106}
107
108void
109get_rtc_time(struct rtc_time *tm)
110{
111 tm->tm_sec = rtc_read(RTC_SECONDS);
112 tm->tm_min = rtc_read(RTC_MINUTES);
113 tm->tm_hour = rtc_read(RTC_HOURS);
114 tm->tm_mday = rtc_read(RTC_DAY_OF_MONTH);
115 tm->tm_wday = rtc_read(RTC_WEEKDAY);
116 tm->tm_mon = rtc_read(RTC_MONTH);
117 tm->tm_year = rtc_read(RTC_YEAR);
118
119 if (tm->tm_sec & 0x80)
120 printk(KERN_WARNING "%s: RTC Voltage Low - reliable date/time "
121 "information is no longer guaranteed!\n", PCF8563_NAME);
122
123 tm->tm_year = BCD_TO_BIN(tm->tm_year) + ((tm->tm_mon & 0x80) ? 100 : 0);
124 tm->tm_sec &= 0x7F;
125 tm->tm_min &= 0x7F;
126 tm->tm_hour &= 0x3F;
127 tm->tm_mday &= 0x3F;
128 tm->tm_wday &= 0x07; /* Not coded in BCD. */
129 tm->tm_mon &= 0x1F;
130
131 BCD_TO_BIN(tm->tm_sec);
132 BCD_TO_BIN(tm->tm_min);
133 BCD_TO_BIN(tm->tm_hour);
134 BCD_TO_BIN(tm->tm_mday);
135 BCD_TO_BIN(tm->tm_mon);
136 tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */
137}
138
139int __init
140pcf8563_init(void)
141{
142 /* Initiate the i2c protocol. */
143 i2c_init();
144
145 /*
146 * First of all we need to reset the chip. This is done by
147 * clearing control1, control2 and clk freq and resetting
148 * all alarms.
149 */
150 if (rtc_write(RTC_CONTROL1, 0x00) < 0)
151 goto err;
152
153 if (rtc_write(RTC_CONTROL2, 0x00) < 0)
154 goto err;
155
156 if (rtc_write(RTC_CLOCKOUT_FREQ, 0x00) < 0)
157 goto err;
158
159 if (rtc_write(RTC_TIMER_CONTROL, 0x03) < 0)
160 goto err;
161
162 /* Reset the alarms. */
163 if (rtc_write(RTC_MINUTE_ALARM, 0x80) < 0)
164 goto err;
165
166 if (rtc_write(RTC_HOUR_ALARM, 0x80) < 0)
167 goto err;
168
169 if (rtc_write(RTC_DAY_ALARM, 0x80) < 0)
170 goto err;
171
172 if (rtc_write(RTC_WEEKDAY_ALARM, 0x80) < 0)
173 goto err;
174
175 if (register_chrdev(PCF8563_MAJOR, DEVICE_NAME, &pcf8563_fops) < 0) {
176 printk(KERN_INFO "%s: Unable to get major numer %d for RTC device.\n",
177 PCF8563_NAME, PCF8563_MAJOR);
178 return -1;
179 }
180
181 printk(KERN_INFO "%s Real-Time Clock Driver, %s\n", PCF8563_NAME, DRIVER_VERSION);
182
183 /* Check for low voltage, and warn about it.. */
184 if (rtc_read(RTC_SECONDS) & 0x80)
185 printk(KERN_WARNING "%s: RTC Voltage Low - reliable date/time "
186 "information is no longer guaranteed!\n", PCF8563_NAME);
187
188 return 0;
189
190err:
191 printk(KERN_INFO "%s: Error initializing chip.\n", PCF8563_NAME);
192 return -1;
193}
194
195void __exit
196pcf8563_exit(void)
197{
198 if (unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME) < 0) {
199 printk(KERN_INFO "%s: Unable to unregister device.\n", PCF8563_NAME);
200 }
201}
202
203/*
204 * ioctl calls for this driver. Why return -ENOTTY upon error? Because
205 * POSIX says so!
206 */
207int
208pcf8563_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
209{
210 /* Some sanity checks. */
211 if (_IOC_TYPE(cmd) != RTC_MAGIC)
212 return -ENOTTY;
213
214 if (_IOC_NR(cmd) > RTC_MAX_IOCTL)
215 return -ENOTTY;
216
217 switch (cmd) {
218 case RTC_RD_TIME:
219 {
220 struct rtc_time tm;
221
222 memset(&tm, 0, sizeof (struct rtc_time));
223 get_rtc_time(&tm);
224
225 if (copy_to_user((struct rtc_time *) arg, &tm, sizeof tm)) {
226 return -EFAULT;
227 }
228
229 return 0;
230 }
231
232 case RTC_SET_TIME:
233 {
234#ifdef CONFIG_ETRAX_RTC_READONLY
235 return -EPERM;
236#else
237 int leap;
238 int year;
239 int century;
240 struct rtc_time tm;
241
242 if (!capable(CAP_SYS_TIME))
243 return -EPERM;
244
245 if (copy_from_user(&tm, (struct rtc_time *) arg, sizeof tm))
246 return -EFAULT;
247
248 /* Convert from struct tm to struct rtc_time. */
249 tm.tm_year += 1900;
250 tm.tm_mon += 1;
251
252 /*
253 * Check if tm.tm_year is a leap year. A year is a leap
254 * year if it is divisible by 4 but not 100, except
255 * that years divisible by 400 _are_ leap years.
256 */
257 year = tm.tm_year;
258 leap = (tm.tm_mon == 2) && ((year % 4 == 0 && year % 100 != 0) || year % 400 == 0);
259
260 /* Perform some sanity checks. */
261 if ((tm.tm_year < 1970) ||
262 (tm.tm_mon > 12) ||
263 (tm.tm_mday == 0) ||
264 (tm.tm_mday > days_in_month[tm.tm_mon] + leap) ||
265 (tm.tm_wday >= 7) ||
266 (tm.tm_hour >= 24) ||
267 (tm.tm_min >= 60) ||
268 (tm.tm_sec >= 60))
269 return -EINVAL;
270
271 century = (tm.tm_year >= 2000) ? 0x80 : 0;
272 tm.tm_year = tm.tm_year % 100;
273
274 BIN_TO_BCD(tm.tm_year);
275 BIN_TO_BCD(tm.tm_mday);
276 BIN_TO_BCD(tm.tm_hour);
277 BIN_TO_BCD(tm.tm_min);
278 BIN_TO_BCD(tm.tm_sec);
279 tm.tm_mon |= century;
280
281 rtc_write(RTC_YEAR, tm.tm_year);
282 rtc_write(RTC_MONTH, tm.tm_mon);
283 rtc_write(RTC_WEEKDAY, tm.tm_wday); /* Not coded in BCD. */
284 rtc_write(RTC_DAY_OF_MONTH, tm.tm_mday);
285 rtc_write(RTC_HOURS, tm.tm_hour);
286 rtc_write(RTC_MINUTES, tm.tm_min);
287 rtc_write(RTC_SECONDS, tm.tm_sec);
288
289 return 0;
290#endif /* !CONFIG_ETRAX_RTC_READONLY */
291 }
292
293 case RTC_VLOW_RD:
294 {
295 int vl_bit = 0;
296
297 if (rtc_read(RTC_SECONDS) & 0x80) {
298 vl_bit = 1;
299 printk(KERN_WARNING "%s: RTC Voltage Low - reliable "
300 "date/time information is no longer guaranteed!\n",
301 PCF8563_NAME);
302 }
303 if (copy_to_user((int *) arg, &vl_bit, sizeof(int)))
304 return -EFAULT;
305
306 return 0;
307 }
308
309 case RTC_VLOW_SET:
310 {
311 /* Clear the VL bit in the seconds register */
312 int ret = rtc_read(RTC_SECONDS);
313
314 rtc_write(RTC_SECONDS, (ret & 0x7F));
315
316 return 0;
317 }
318
319 default:
320 return -ENOTTY;
321 }
322
323 return 0;
324}
325
326int
327pcf8563_open(struct inode *inode, struct file *filp)
328{
329 MOD_INC_USE_COUNT;
330 return 0;
331}
332
333int
334pcf8563_release(struct inode *inode, struct file *filp)
335{
336 MOD_DEC_USE_COUNT;
337 return 0;
338}
339
340module_init(pcf8563_init);
341module_exit(pcf8563_exit);
diff --git a/arch/cris/arch-v32/drivers/pci/Makefile b/arch/cris/arch-v32/drivers/pci/Makefile
new file mode 100644
index 000000000000..bff7482f2444
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/pci/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for Etrax cardbus driver
3#
4
5obj-$(CONFIG_ETRAX_CARDBUS) += bios.o dma.o
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c
new file mode 100644
index 000000000000..24bc149889b6
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/pci/bios.c
@@ -0,0 +1,131 @@
1#include <linux/pci.h>
2#include <linux/kernel.h>
3#include <asm/arch/hwregs/intr_vect.h>
4
5void __devinit pcibios_fixup_bus(struct pci_bus *b)
6{
7}
8
9char * __devinit pcibios_setup(char *str)
10{
11 return NULL;
12}
13
14void pcibios_set_master(struct pci_dev *dev)
15{
16 u8 lat;
17 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
18 printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
19 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
20}
21
22int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
23 enum pci_mmap_state mmap_state, int write_combine)
24{
25 unsigned long prot;
26
27 /* Leave vm_pgoff as-is, the PCI space address is the physical
28 * address on this platform.
29 */
30 vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
31
32 prot = pgprot_val(vma->vm_page_prot);
33 vma->vm_page_prot = __pgprot(prot);
34
35 /* Write-combine setting is ignored, it is changed via the mtrr
36 * interfaces on this platform.
37 */
38 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
39 vma->vm_end - vma->vm_start,
40 vma->vm_page_prot))
41 return -EAGAIN;
42
43 return 0;
44}
45
46void
47pcibios_align_resource(void *data, struct resource *res,
48 unsigned long size, unsigned long align)
49{
50 if (res->flags & IORESOURCE_IO) {
51 unsigned long start = res->start;
52
53 if (start & 0x300) {
54 start = (start + 0x3ff) & ~0x3ff;
55 res->start = start;
56 }
57 }
58}
59
60int pcibios_enable_resources(struct pci_dev *dev, int mask)
61{
62 u16 cmd, old_cmd;
63 int idx;
64 struct resource *r;
65
66 pci_read_config_word(dev, PCI_COMMAND, &cmd);
67 old_cmd = cmd;
68 for(idx=0; idx<6; idx++) {
69 /* Only set up the requested stuff */
70 if (!(mask & (1<<idx)))
71 continue;
72
73 r = &dev->resource[idx];
74 if (!r->start && r->end) {
75 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
76 return -EINVAL;
77 }
78 if (r->flags & IORESOURCE_IO)
79 cmd |= PCI_COMMAND_IO;
80 if (r->flags & IORESOURCE_MEM)
81 cmd |= PCI_COMMAND_MEMORY;
82 }
83 if (dev->resource[PCI_ROM_RESOURCE].start)
84 cmd |= PCI_COMMAND_MEMORY;
85 if (cmd != old_cmd) {
86 printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
87 pci_write_config_word(dev, PCI_COMMAND, cmd);
88 }
89 return 0;
90}
91
92int pcibios_enable_irq(struct pci_dev *dev)
93{
94 dev->irq = EXT_INTR_VECT;
95 return 0;
96}
97
98int pcibios_enable_device(struct pci_dev *dev, int mask)
99{
100 int err;
101
102 if ((err = pcibios_enable_resources(dev, mask)) < 0)
103 return err;
104
105 return pcibios_enable_irq(dev);
106}
107
108int pcibios_assign_resources(void)
109{
110 struct pci_dev *dev = NULL;
111 int idx;
112 struct resource *r;
113
114 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
115 int class = dev->class >> 8;
116
117 /* Don't touch classless devices and host bridges */
118 if (!class || class == PCI_CLASS_BRIDGE_HOST)
119 continue;
120
121 for(idx=0; idx<6; idx++) {
122 r = &dev->resource[idx];
123
124 if (!r->start && r->end)
125 pci_assign_resource(dev, idx);
126 }
127 }
128 return 0;
129}
130
131EXPORT_SYMBOL(pcibios_assign_resources);
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c
new file mode 100644
index 000000000000..10329306d23c
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/pci/dma.c
@@ -0,0 +1,149 @@
1/*
2 * Dynamic DMA mapping support.
3 *
4 * On cris there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
7 * in asm/pci.h.
8 *
9 * Borrowed from i386.
10 */
11
12#include <linux/types.h>
13#include <linux/mm.h>
14#include <linux/string.h>
15#include <linux/pci.h>
16#include <asm/io.h>
17
18struct dma_coherent_mem {
19 void *virt_base;
20 u32 device_base;
21 int size;
22 int flags;
23 unsigned long *bitmap;
24};
25
26void *dma_alloc_coherent(struct device *dev, size_t size,
27 dma_addr_t *dma_handle, unsigned int __nocast gfp)
28{
29 void *ret;
30 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
31 int order = get_order(size);
32 /* ignore region specifiers */
33 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
34
35 if (mem) {
36 int page = bitmap_find_free_region(mem->bitmap, mem->size,
37 order);
38 if (page >= 0) {
39 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
40 ret = mem->virt_base + (page << PAGE_SHIFT);
41 memset(ret, 0, size);
42 return ret;
43 }
44 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
45 return NULL;
46 }
47
48 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
49 gfp |= GFP_DMA;
50
51 ret = (void *)__get_free_pages(gfp, order);
52
53 if (ret != NULL) {
54 memset(ret, 0, size);
55 *dma_handle = virt_to_phys(ret);
56 }
57 return ret;
58}
59
60void dma_free_coherent(struct device *dev, size_t size,
61 void *vaddr, dma_addr_t dma_handle)
62{
63 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
64 int order = get_order(size);
65
66 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
67 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
68
69 bitmap_release_region(mem->bitmap, page, order);
70 } else
71 free_pages((unsigned long)vaddr, order);
72}
73
74int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
75 dma_addr_t device_addr, size_t size, int flags)
76{
77 void __iomem *mem_base;
78 int pages = size >> PAGE_SHIFT;
79 int bitmap_size = (pages + 31)/32;
80
81 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
82 goto out;
83 if (!size)
84 goto out;
85 if (dev->dma_mem)
86 goto out;
87
88 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
89
90 mem_base = ioremap(bus_addr, size);
91 if (!mem_base)
92 goto out;
93
94 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
95 if (!dev->dma_mem)
96 goto out;
97 memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
98 dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
99 if (!dev->dma_mem->bitmap)
100 goto free1_out;
101 memset(dev->dma_mem->bitmap, 0, bitmap_size);
102
103 dev->dma_mem->virt_base = mem_base;
104 dev->dma_mem->device_base = device_addr;
105 dev->dma_mem->size = pages;
106 dev->dma_mem->flags = flags;
107
108 if (flags & DMA_MEMORY_MAP)
109 return DMA_MEMORY_MAP;
110
111 return DMA_MEMORY_IO;
112
113 free1_out:
114 kfree(dev->dma_mem->bitmap);
115 out:
116 return 0;
117}
118EXPORT_SYMBOL(dma_declare_coherent_memory);
119
120void dma_release_declared_memory(struct device *dev)
121{
122 struct dma_coherent_mem *mem = dev->dma_mem;
123
124 if(!mem)
125 return;
126 dev->dma_mem = NULL;
127 iounmap(mem->virt_base);
128 kfree(mem->bitmap);
129 kfree(mem);
130}
131EXPORT_SYMBOL(dma_release_declared_memory);
132
133void *dma_mark_declared_memory_occupied(struct device *dev,
134 dma_addr_t device_addr, size_t size)
135{
136 struct dma_coherent_mem *mem = dev->dma_mem;
137 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
138 int pos, err;
139
140 if (!mem)
141 return ERR_PTR(-EINVAL);
142
143 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
144 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
145 if (err != 0)
146 return ERR_PTR(err);
147 return mem->virt_base + (pos << PAGE_SHIFT);
148}
149EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
new file mode 100644
index 000000000000..c85a6df8558f
--- /dev/null
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -0,0 +1,1283 @@
1/*
2 * Simple synchronous serial port driver for ETRAX FS.
3 *
4 * Copyright (c) 2005 Axis Communications AB
5 *
6 * Author: Mikael Starvik
7 *
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/config.h>
13#include <linux/types.h>
14#include <linux/errno.h>
15#include <linux/major.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/interrupt.h>
19#include <linux/poll.h>
20#include <linux/init.h>
21#include <linux/timer.h>
22#include <linux/spinlock.h>
23
24#include <asm/io.h>
25#include <asm/arch/dma.h>
26#include <asm/arch/pinmux.h>
27#include <asm/arch/hwregs/reg_rdwr.h>
28#include <asm/arch/hwregs/sser_defs.h>
29#include <asm/arch/hwregs/dma_defs.h>
30#include <asm/arch/hwregs/dma.h>
31#include <asm/arch/hwregs/intr_vect_defs.h>
32#include <asm/arch/hwregs/intr_vect.h>
33#include <asm/arch/hwregs/reg_map.h>
34#include <asm/sync_serial.h>
35
36/* The receiver is a bit tricky beacuse of the continuous stream of data.*/
37/* */
38/* Three DMA descriptors are linked together. Each DMA descriptor is */
39/* responsible for port->bufchunk of a common buffer. */
40/* */
41/* +---------------------------------------------+ */
42/* | +----------+ +----------+ +----------+ | */
43/* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
44/* +----------+ +----------+ +----------+ */
45/* | | | */
46/* v v v */
47/* +-------------------------------------+ */
48/* | BUFFER | */
49/* +-------------------------------------+ */
50/* |<- data_avail ->| */
51/* readp writep */
52/* */
53/* If the application keeps up the pace readp will be right after writep.*/
54/* If the application can't keep the pace we have to throw away data. */
55/* The idea is that readp should be ready with the data pointed out by */
56/* Descr[i] when the DMA has filled in Descr[i+1]. */
57/* Otherwise we will discard */
58/* the rest of the data pointed out by Descr1 and set readp to the start */
59/* of Descr2 */
60
61#define SYNC_SERIAL_MAJOR 125
62
63/* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
64/* words can be handled */
65#define IN_BUFFER_SIZE 12288
66#define IN_DESCR_SIZE 256
67#define NUM_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
68#define OUT_BUFFER_SIZE 4096
69
70#define DEFAULT_FRAME_RATE 0
71#define DEFAULT_WORD_RATE 7
72
73/* NOTE: Enabling some debug will likely cause overrun or underrun,
74 * especially if manual mode is use.
75 */
76#define DEBUG(x)
77#define DEBUGREAD(x)
78#define DEBUGWRITE(x)
79#define DEBUGPOLL(x)
80#define DEBUGRXINT(x)
81#define DEBUGTXINT(x)
82
83typedef struct sync_port
84{
85 reg_scope_instances regi_sser;
86 reg_scope_instances regi_dmain;
87 reg_scope_instances regi_dmaout;
88
89 char started; /* 1 if port has been started */
90 char port_nbr; /* Port 0 or 1 */
91 char busy; /* 1 if port is busy */
92
93 char enabled; /* 1 if port is enabled */
94 char use_dma; /* 1 if port uses dma */
95 char tr_running;
96
97 char init_irqs;
98 int output;
99 int input;
100
101 volatile unsigned int out_count; /* Remaining bytes for current transfer */
102 unsigned char* outp; /* Current position in out_buffer */
103 volatile unsigned char* volatile readp; /* Next byte to be read by application */
104 volatile unsigned char* volatile writep; /* Next byte to be written by etrax */
105 unsigned int in_buffer_size;
106 unsigned int inbufchunk;
107 unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
108 unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
109 unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
110 struct dma_descr_data* next_rx_desc;
111 struct dma_descr_data* prev_rx_desc;
112 int full;
113
114 dma_descr_data in_descr[NUM_IN_DESCR] __attribute__ ((__aligned__(16)));
115 dma_descr_context in_context __attribute__ ((__aligned__(32)));
116 dma_descr_data out_descr __attribute__ ((__aligned__(16)));
117 dma_descr_context out_context __attribute__ ((__aligned__(32)));
118 wait_queue_head_t out_wait_q;
119 wait_queue_head_t in_wait_q;
120
121 spinlock_t lock;
122} sync_port;
123
124static int etrax_sync_serial_init(void);
125static void initialize_port(int portnbr);
126static inline int sync_data_avail(struct sync_port *port);
127
128static int sync_serial_open(struct inode *, struct file*);
129static int sync_serial_release(struct inode*, struct file*);
130static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
131
132static int sync_serial_ioctl(struct inode*, struct file*,
133 unsigned int cmd, unsigned long arg);
134static ssize_t sync_serial_write(struct file * file, const char * buf,
135 size_t count, loff_t *ppos);
136static ssize_t sync_serial_read(struct file *file, char *buf,
137 size_t count, loff_t *ppos);
138
139#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
140 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
141 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
142 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
143#define SYNC_SER_DMA
144#endif
145
146static void send_word(sync_port* port);
147static void start_dma(struct sync_port *port, const char* data, int count);
148static void start_dma_in(sync_port* port);
149#ifdef SYNC_SER_DMA
150static irqreturn_t tr_interrupt(int irq, void *dev_id, struct pt_regs * regs);
151static irqreturn_t rx_interrupt(int irq, void *dev_id, struct pt_regs * regs);
152#endif
153
154#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
155 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
156 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
157 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
158#define SYNC_SER_MANUAL
159#endif
160#ifdef SYNC_SER_MANUAL
161static irqreturn_t manual_interrupt(int irq, void *dev_id, struct pt_regs * regs);
162#endif
163
164/* The ports */
165static struct sync_port ports[]=
166{
167 {
168 .regi_sser = regi_sser0,
169 .regi_dmaout = regi_dma4,
170 .regi_dmain = regi_dma5,
171#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
172 .use_dma = 1,
173#else
174 .use_dma = 0,
175#endif
176 },
177 {
178 .regi_sser = regi_sser1,
179 .regi_dmaout = regi_dma6,
180 .regi_dmain = regi_dma7,
181#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
182 .use_dma = 1,
183#else
184 .use_dma = 0,
185#endif
186 }
187};
188
189#define NUMBER_OF_PORTS (sizeof(ports)/sizeof(sync_port))
190
191static struct file_operations sync_serial_fops = {
192 .owner = THIS_MODULE,
193 .write = sync_serial_write,
194 .read = sync_serial_read,
195 .poll = sync_serial_poll,
196 .ioctl = sync_serial_ioctl,
197 .open = sync_serial_open,
198 .release = sync_serial_release
199};
200
201static int __init etrax_sync_serial_init(void)
202{
203 ports[0].enabled = 0;
204 ports[1].enabled = 0;
205
206 if (register_chrdev(SYNC_SERIAL_MAJOR,"sync serial", &sync_serial_fops) <0 )
207 {
208 printk("unable to get major for synchronous serial port\n");
209 return -EBUSY;
210 }
211
212 /* Initialize Ports */
213#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
214 if (crisv32_pinmux_alloc_fixed(pinmux_sser0))
215 {
216 printk("Unable to allocate pins for syncrhronous serial port 0\n");
217 return -EIO;
218 }
219 ports[0].enabled = 1;
220 initialize_port(0);
221#endif
222
223#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
224 if (crisv32_pinmux_alloc_fixed(pinmux_sser1))
225 {
226 printk("Unable to allocate pins for syncrhronous serial port 0\n");
227 return -EIO;
228 }
229 ports[1].enabled = 1;
230 initialize_port(1);
231#endif
232
233 printk("ETRAX FS synchronous serial port driver\n");
234 return 0;
235}
236
237static void __init initialize_port(int portnbr)
238{
239 struct sync_port* port = &ports[portnbr];
240 reg_sser_rw_cfg cfg = {0};
241 reg_sser_rw_frm_cfg frm_cfg = {0};
242 reg_sser_rw_tr_cfg tr_cfg = {0};
243 reg_sser_rw_rec_cfg rec_cfg = {0};
244
245 DEBUG(printk("Init sync serial port %d\n", portnbr));
246
247 port->port_nbr = portnbr;
248 port->init_irqs = 1;
249
250 port->outp = port->out_buffer;
251 port->output = 1;
252 port->input = 0;
253
254 port->readp = port->flip;
255 port->writep = port->flip;
256 port->in_buffer_size = IN_BUFFER_SIZE;
257 port->inbufchunk = IN_DESCR_SIZE;
258 port->next_rx_desc = &port->in_descr[0];
259 port->prev_rx_desc = &port->in_descr[NUM_IN_DESCR-1];
260 port->prev_rx_desc->eol = 1;
261
262 init_waitqueue_head(&port->out_wait_q);
263 init_waitqueue_head(&port->in_wait_q);
264
265 spin_lock_init(&port->lock);
266
267 cfg.out_clk_src = regk_sser_intern_clk;
268 cfg.out_clk_pol = regk_sser_pos;
269 cfg.clk_od_mode = regk_sser_no;
270 cfg.clk_dir = regk_sser_out;
271 cfg.gate_clk = regk_sser_no;
272 cfg.base_freq = regk_sser_f29_493;
273 cfg.clk_div = 256;
274 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
275
276 frm_cfg.wordrate = DEFAULT_WORD_RATE;
277 frm_cfg.type = regk_sser_edge;
278 frm_cfg.frame_pin_dir = regk_sser_out;
279 frm_cfg.frame_pin_use = regk_sser_frm;
280 frm_cfg.status_pin_dir = regk_sser_in;
281 frm_cfg.status_pin_use = regk_sser_hold;
282 frm_cfg.out_on = regk_sser_tr;
283 frm_cfg.tr_delay = 1;
284 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
285
286 tr_cfg.urun_stop = regk_sser_no;
287 tr_cfg.sample_size = 7;
288 tr_cfg.sh_dir = regk_sser_msbfirst;
289 tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
290 tr_cfg.rate_ctrl = regk_sser_bulk;
291 tr_cfg.data_pin_use = regk_sser_dout;
292 tr_cfg.bulk_wspace = 1;
293 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
294
295 rec_cfg.sample_size = 7;
296 rec_cfg.sh_dir = regk_sser_msbfirst;
297 rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
298 rec_cfg.fifo_thr = regk_sser_inf;
299 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
300}
301
302static inline int sync_data_avail(struct sync_port *port)
303{
304 int avail;
305 unsigned char *start;
306 unsigned char *end;
307
308 start = (unsigned char*)port->readp; /* cast away volatile */
309 end = (unsigned char*)port->writep; /* cast away volatile */
310 /* 0123456789 0123456789
311 * ----- - -----
312 * ^rp ^wp ^wp ^rp
313 */
314
315 if (end >= start)
316 avail = end - start;
317 else
318 avail = port->in_buffer_size - (start - end);
319 return avail;
320}
321
322static inline int sync_data_avail_to_end(struct sync_port *port)
323{
324 int avail;
325 unsigned char *start;
326 unsigned char *end;
327
328 start = (unsigned char*)port->readp; /* cast away volatile */
329 end = (unsigned char*)port->writep; /* cast away volatile */
330 /* 0123456789 0123456789
331 * ----- -----
332 * ^rp ^wp ^wp ^rp
333 */
334
335 if (end >= start)
336 avail = end - start;
337 else
338 avail = port->flip + port->in_buffer_size - start;
339 return avail;
340}
341
342static int sync_serial_open(struct inode *inode, struct file *file)
343{
344 int dev = MINOR(inode->i_rdev);
345 sync_port* port;
346 reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
347 reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
348
349 DEBUG(printk("Open sync serial port %d\n", dev));
350
351 if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled)
352 {
353 DEBUG(printk("Invalid minor %d\n", dev));
354 return -ENODEV;
355 }
356 port = &ports[dev];
357 /* Allow open this device twice (assuming one reader and one writer) */
358 if (port->busy == 2)
359 {
360 DEBUG(printk("Device is busy.. \n"));
361 return -EBUSY;
362 }
363 if (port->init_irqs) {
364 if (port->use_dma) {
365 if (port == &ports[0]){
366#ifdef SYNC_SER_DMA
367 if(request_irq(DMA4_INTR_VECT,
368 tr_interrupt,
369 0,
370 "synchronous serial 0 dma tr",
371 &ports[0])) {
372 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
373 return -EBUSY;
374 } else if(request_irq(DMA5_INTR_VECT,
375 rx_interrupt,
376 0,
377 "synchronous serial 1 dma rx",
378 &ports[0])) {
379 free_irq(DMA4_INTR_VECT, &port[0]);
380 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
381 return -EBUSY;
382 } else if (crisv32_request_dma(SYNC_SER0_TX_DMA_NBR,
383 "synchronous serial 0 dma tr",
384 DMA_VERBOSE_ON_ERROR,
385 0,
386 dma_sser0)) {
387 free_irq(DMA4_INTR_VECT, &port[0]);
388 free_irq(DMA5_INTR_VECT, &port[0]);
389 printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
390 return -EBUSY;
391 } else if (crisv32_request_dma(SYNC_SER0_RX_DMA_NBR,
392 "synchronous serial 0 dma rec",
393 DMA_VERBOSE_ON_ERROR,
394 0,
395 dma_sser0)) {
396 crisv32_free_dma(SYNC_SER0_TX_DMA_NBR);
397 free_irq(DMA4_INTR_VECT, &port[0]);
398 free_irq(DMA5_INTR_VECT, &port[0]);
399 printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
400 return -EBUSY;
401 }
402#endif
403 }
404 else if (port == &ports[1]){
405#ifdef SYNC_SER_DMA
406 if (request_irq(DMA6_INTR_VECT,
407 tr_interrupt,
408 0,
409 "synchronous serial 1 dma tr",
410 &ports[1])) {
411 printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
412 return -EBUSY;
413 } else if (request_irq(DMA7_INTR_VECT,
414 rx_interrupt,
415 0,
416 "synchronous serial 1 dma rx",
417 &ports[1])) {
418 free_irq(DMA6_INTR_VECT, &ports[1]);
419 printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
420 return -EBUSY;
421 } else if (crisv32_request_dma(SYNC_SER1_TX_DMA_NBR,
422 "synchronous serial 1 dma tr",
423 DMA_VERBOSE_ON_ERROR,
424 0,
425 dma_sser1)) {
426 free_irq(21, &ports[1]);
427 free_irq(20, &ports[1]);
428 printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
429 return -EBUSY;
430 } else if (crisv32_request_dma(SYNC_SER1_RX_DMA_NBR,
431 "synchronous serial 3 dma rec",
432 DMA_VERBOSE_ON_ERROR,
433 0,
434 dma_sser1)) {
435 crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
436 free_irq(DMA6_INTR_VECT, &ports[1]);
437 free_irq(DMA7_INTR_VECT, &ports[1]);
438 printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
439 return -EBUSY;
440 }
441#endif
442 }
443
444 /* Enable DMAs */
445 REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
446 REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
447 /* Enable DMA IRQs */
448 REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
449 REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
450 /* Set up wordsize = 2 for DMAs. */
451 DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
452 DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
453
454 start_dma_in(port);
455 port->init_irqs = 0;
456 } else { /* !port->use_dma */
457#ifdef SYNC_SER_MANUAL
458 if (port == &ports[0]) {
459 if (request_irq(SSER0_INTR_VECT,
460 manual_interrupt,
461 0,
462 "synchronous serial manual irq",
463 &ports[0])) {
464 printk("Can't allocate sync serial manual irq");
465 return -EBUSY;
466 }
467 } else if (port == &ports[1]) {
468 if (request_irq(SSER1_INTR_VECT,
469 manual_interrupt,
470 0,
471 "synchronous serial manual irq",
472 &ports[1])) {
473 printk(KERN_CRIT "Can't allocate sync serial manual irq");
474 return -EBUSY;
475 }
476 }
477 port->init_irqs = 0;
478#else
479 panic("sync_serial: Manual mode not supported.\n");
480#endif /* SYNC_SER_MANUAL */
481 }
482 } /* port->init_irqs */
483
484 port->busy++;
485 return 0;
486}
487
488static int sync_serial_release(struct inode *inode, struct file *file)
489{
490 int dev = MINOR(inode->i_rdev);
491 sync_port* port;
492
493 if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled)
494 {
495 DEBUG(printk("Invalid minor %d\n", dev));
496 return -ENODEV;
497 }
498 port = &ports[dev];
499 if (port->busy)
500 port->busy--;
501 if (!port->busy)
502 /* XXX */ ;
503 return 0;
504}
505
506static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
507{
508 int dev = MINOR(file->f_dentry->d_inode->i_rdev);
509 unsigned int mask = 0;
510 sync_port* port;
511 DEBUGPOLL( static unsigned int prev_mask = 0; );
512
513 port = &ports[dev];
514 poll_wait(file, &port->out_wait_q, wait);
515 poll_wait(file, &port->in_wait_q, wait);
516 /* Some room to write */
517 if (port->out_count < OUT_BUFFER_SIZE)
518 mask |= POLLOUT | POLLWRNORM;
519 /* At least an inbufchunk of data */
520 if (sync_data_avail(port) >= port->inbufchunk)
521 mask |= POLLIN | POLLRDNORM;
522
523 DEBUGPOLL(if (mask != prev_mask)
524 printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
525 mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
526 prev_mask = mask;
527 );
528 return mask;
529}
530
531static int sync_serial_ioctl(struct inode *inode, struct file *file,
532 unsigned int cmd, unsigned long arg)
533{
534 int return_val = 0;
535 int dev = MINOR(file->f_dentry->d_inode->i_rdev);
536 sync_port* port;
537 reg_sser_rw_tr_cfg tr_cfg;
538 reg_sser_rw_rec_cfg rec_cfg;
539 reg_sser_rw_frm_cfg frm_cfg;
540 reg_sser_rw_cfg gen_cfg;
541 reg_sser_rw_intr_mask intr_mask;
542
543 if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled)
544 {
545 DEBUG(printk("Invalid minor %d\n", dev));
546 return -1;
547 }
548 port = &ports[dev];
549 spin_lock_irq(&port->lock);
550
551 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
552 rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
553 frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
554 gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
555 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
556
557 switch(cmd)
558 {
559 case SSP_SPEED:
560 if (GET_SPEED(arg) == CODEC)
561 {
562 gen_cfg.base_freq = regk_sser_f32;
563 /* FREQ = 0 => 4 MHz => clk_div = 7*/
564 gen_cfg.clk_div = 6 + (1 << GET_FREQ(arg));
565 }
566 else
567 {
568 gen_cfg.base_freq = regk_sser_f29_493;
569 switch (GET_SPEED(arg))
570 {
571 case SSP150:
572 gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
573 break;
574 case SSP300:
575 gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
576 break;
577 case SSP600:
578 gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
579 break;
580 case SSP1200:
581 gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
582 break;
583 case SSP2400:
584 gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
585 break;
586 case SSP4800:
587 gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
588 break;
589 case SSP9600:
590 gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
591 break;
592 case SSP19200:
593 gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
594 break;
595 case SSP28800:
596 gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
597 break;
598 case SSP57600:
599 gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
600 break;
601 case SSP115200:
602 gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
603 break;
604 case SSP230400:
605 gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
606 break;
607 case SSP460800:
608 gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
609 break;
610 case SSP921600:
611 gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
612 break;
613 case SSP3125000:
614 gen_cfg.base_freq = regk_sser_f100;
615 gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
616 break;
617
618 }
619 }
620 frm_cfg.wordrate = GET_WORD_RATE(arg);
621
622 break;
623 case SSP_MODE:
624 switch(arg)
625 {
626 case MASTER_OUTPUT:
627 port->output = 1;
628 port->input = 0;
629 gen_cfg.clk_dir = regk_sser_out;
630 break;
631 case SLAVE_OUTPUT:
632 port->output = 1;
633 port->input = 0;
634 gen_cfg.clk_dir = regk_sser_in;
635 break;
636 case MASTER_INPUT:
637 port->output = 0;
638 port->input = 1;
639 gen_cfg.clk_dir = regk_sser_out;
640 break;
641 case SLAVE_INPUT:
642 port->output = 0;
643 port->input = 1;
644 gen_cfg.clk_dir = regk_sser_in;
645 break;
646 case MASTER_BIDIR:
647 port->output = 1;
648 port->input = 1;
649 gen_cfg.clk_dir = regk_sser_out;
650 break;
651 case SLAVE_BIDIR:
652 port->output = 1;
653 port->input = 1;
654 gen_cfg.clk_dir = regk_sser_in;
655 break;
656 default:
657 spin_unlock_irq(&port->lock);
658 return -EINVAL;
659
660 }
661 if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
662 intr_mask.rdav = regk_sser_yes;
663 break;
664 case SSP_FRAME_SYNC:
665 if (arg & NORMAL_SYNC)
666 frm_cfg.tr_delay = 1;
667 else if (arg & EARLY_SYNC)
668 frm_cfg.tr_delay = 0;
669
670 tr_cfg.bulk_wspace = frm_cfg.tr_delay;
671 frm_cfg.early_wend = regk_sser_yes;
672 if (arg & BIT_SYNC)
673 frm_cfg.type = regk_sser_edge;
674 else if (arg & WORD_SYNC)
675 frm_cfg.type = regk_sser_level;
676 else if (arg & EXTENDED_SYNC)
677 frm_cfg.early_wend = regk_sser_no;
678
679 if (arg & SYNC_ON)
680 frm_cfg.frame_pin_use = regk_sser_frm;
681 else if (arg & SYNC_OFF)
682 frm_cfg.frame_pin_use = regk_sser_gio0;
683
684 if (arg & WORD_SIZE_8)
685 rec_cfg.sample_size = tr_cfg.sample_size = 7;
686 else if (arg & WORD_SIZE_12)
687 rec_cfg.sample_size = tr_cfg.sample_size = 11;
688 else if (arg & WORD_SIZE_16)
689 rec_cfg.sample_size = tr_cfg.sample_size = 15;
690 else if (arg & WORD_SIZE_24)
691 rec_cfg.sample_size = tr_cfg.sample_size = 23;
692 else if (arg & WORD_SIZE_32)
693 rec_cfg.sample_size = tr_cfg.sample_size = 31;
694
695 if (arg & BIT_ORDER_MSB)
696 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
697 else if (arg & BIT_ORDER_LSB)
698 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
699
700 if (arg & FLOW_CONTROL_ENABLE)
701 rec_cfg.fifo_thr = regk_sser_thr16;
702 else if (arg & FLOW_CONTROL_DISABLE)
703 rec_cfg.fifo_thr = regk_sser_inf;
704
705 if (arg & CLOCK_NOT_GATED)
706 gen_cfg.gate_clk = regk_sser_no;
707 else if (arg & CLOCK_GATED)
708 gen_cfg.gate_clk = regk_sser_yes;
709
710 break;
711 case SSP_IPOLARITY:
712 /* NOTE!! negedge is considered NORMAL */
713 if (arg & CLOCK_NORMAL)
714 rec_cfg.clk_pol = regk_sser_neg;
715 else if (arg & CLOCK_INVERT)
716 rec_cfg.clk_pol = regk_sser_pos;
717
718 if (arg & FRAME_NORMAL)
719 frm_cfg.level = regk_sser_pos_hi;
720 else if (arg & FRAME_INVERT)
721 frm_cfg.level = regk_sser_neg_lo;
722
723 if (arg & STATUS_NORMAL)
724 gen_cfg.hold_pol = regk_sser_pos;
725 else if (arg & STATUS_INVERT)
726 gen_cfg.hold_pol = regk_sser_neg;
727 break;
728 case SSP_OPOLARITY:
729 if (arg & CLOCK_NORMAL)
730 gen_cfg.out_clk_pol = regk_sser_neg;
731 else if (arg & CLOCK_INVERT)
732 gen_cfg.out_clk_pol = regk_sser_pos;
733
734 if (arg & FRAME_NORMAL)
735 frm_cfg.level = regk_sser_pos_hi;
736 else if (arg & FRAME_INVERT)
737 frm_cfg.level = regk_sser_neg_lo;
738
739 if (arg & STATUS_NORMAL)
740 gen_cfg.hold_pol = regk_sser_pos;
741 else if (arg & STATUS_INVERT)
742 gen_cfg.hold_pol = regk_sser_neg;
743 break;
744 case SSP_SPI:
745 rec_cfg.fifo_thr = regk_sser_inf;
746 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
747 rec_cfg.sample_size = tr_cfg.sample_size = 7;
748 frm_cfg.frame_pin_use = regk_sser_frm;
749 frm_cfg.type = regk_sser_level;
750 frm_cfg.tr_delay = 1;
751 frm_cfg.level = regk_sser_neg_lo;
752 if (arg & SPI_SLAVE)
753 {
754 rec_cfg.clk_pol = regk_sser_neg;
755 gen_cfg.clk_dir = regk_sser_in;
756 port->input = 1;
757 port->output = 0;
758 }
759 else
760 {
761 gen_cfg.out_clk_pol = regk_sser_pos;
762 port->input = 0;
763 port->output = 1;
764 gen_cfg.clk_dir = regk_sser_out;
765 }
766 break;
767 case SSP_INBUFCHUNK:
768 break;
769 default:
770 return_val = -1;
771 }
772
773
774 if (port->started)
775 {
776 tr_cfg.tr_en = port->output;
777 rec_cfg.rec_en = port->input;
778 }
779
780 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
781 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
782 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
783 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
784 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
785
786 spin_unlock_irq(&port->lock);
787 return return_val;
788}
789
790static ssize_t sync_serial_write(struct file * file, const char * buf,
791 size_t count, loff_t *ppos)
792{
793 int dev = MINOR(file->f_dentry->d_inode->i_rdev);
794 DECLARE_WAITQUEUE(wait, current);
795 sync_port *port;
796 unsigned long c, c1;
797 unsigned long free_outp;
798 unsigned long outp;
799 unsigned long out_buffer;
800 unsigned long flags;
801
802 if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled)
803 {
804 DEBUG(printk("Invalid minor %d\n", dev));
805 return -ENODEV;
806 }
807 port = &ports[dev];
808
809 DEBUGWRITE(printk("W d%d c %lu (%d/%d)\n", port->port_nbr, count, port->out_count, OUT_BUFFER_SIZE));
810 /* Space to end of buffer */
811 /*
812 * out_buffer <c1>012345<- c ->OUT_BUFFER_SIZE
813 * outp^ +out_count
814 ^free_outp
815 * out_buffer 45<- c ->0123OUT_BUFFER_SIZE
816 * +out_count outp^
817 * free_outp
818 *
819 */
820
821 /* Read variables that may be updated by interrupts */
822 spin_lock_irqsave(&port->lock, flags);
823 count = count > OUT_BUFFER_SIZE - port->out_count ? OUT_BUFFER_SIZE - port->out_count : count;
824 outp = (unsigned long)port->outp;
825 free_outp = outp + port->out_count;
826 spin_unlock_irqrestore(&port->lock, flags);
827 out_buffer = (unsigned long)port->out_buffer;
828
829 /* Find out where and how much to write */
830 if (free_outp >= out_buffer + OUT_BUFFER_SIZE)
831 free_outp -= OUT_BUFFER_SIZE;
832 if (free_outp >= outp)
833 c = out_buffer + OUT_BUFFER_SIZE - free_outp;
834 else
835 c = outp - free_outp;
836 if (c > count)
837 c = count;
838
839// DEBUGWRITE(printk("w op %08lX fop %08lX c %lu\n", outp, free_outp, c));
840 if (copy_from_user((void*)free_outp, buf, c))
841 return -EFAULT;
842
843 if (c != count) {
844 buf += c;
845 c1 = count - c;
846 DEBUGWRITE(printk("w2 fi %lu c %lu c1 %lu\n", free_outp-out_buffer, c, c1));
847 if (copy_from_user((void*)out_buffer, buf, c1))
848 return -EFAULT;
849 }
850 spin_lock_irqsave(&port->lock, flags);
851 port->out_count += count;
852 spin_unlock_irqrestore(&port->lock, flags);
853
854 /* Make sure transmitter/receiver is running */
855 if (!port->started)
856 {
857 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
858 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
859 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
860 cfg.en = regk_sser_yes;
861 tr_cfg.tr_en = port->output;
862 rec_cfg.rec_en = port->input;
863 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
864 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
865 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
866 port->started = 1;
867 }
868
869 if (file->f_flags & O_NONBLOCK) {
870 spin_lock_irqsave(&port->lock, flags);
871 if (!port->tr_running) {
872 if (!port->use_dma) {
873 reg_sser_rw_intr_mask intr_mask;
874 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
875 /* Start sender by writing data */
876 send_word(port);
877 /* and enable transmitter ready IRQ */
878 intr_mask.trdy = 1;
879 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
880 } else {
881 start_dma(port, (unsigned char* volatile )port->outp, c);
882 }
883 }
884 spin_unlock_irqrestore(&port->lock, flags);
885 DEBUGWRITE(printk("w d%d c %lu NB\n",
886 port->port_nbr, count));
887 return count;
888 }
889
890 /* Sleep until all sent */
891
892 add_wait_queue(&port->out_wait_q, &wait);
893 set_current_state(TASK_INTERRUPTIBLE);
894 spin_lock_irqsave(&port->lock, flags);
895 if (!port->tr_running) {
896 if (!port->use_dma) {
897 reg_sser_rw_intr_mask intr_mask;
898 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
899 /* Start sender by writing data */
900 send_word(port);
901 /* and enable transmitter ready IRQ */
902 intr_mask.trdy = 1;
903 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
904 } else {
905 start_dma(port, port->outp, c);
906 }
907 }
908 spin_unlock_irqrestore(&port->lock, flags);
909 schedule();
910 set_current_state(TASK_RUNNING);
911 remove_wait_queue(&port->out_wait_q, &wait);
912 if (signal_pending(current))
913 {
914 return -EINTR;
915 }
916 DEBUGWRITE(printk("w d%d c %lu\n", port->port_nbr, count));
917 return count;
918}
919
920static ssize_t sync_serial_read(struct file * file, char * buf,
921 size_t count, loff_t *ppos)
922{
923 int dev = MINOR(file->f_dentry->d_inode->i_rdev);
924 int avail;
925 sync_port *port;
926 unsigned char* start;
927 unsigned char* end;
928 unsigned long flags;
929
930 if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled)
931 {
932 DEBUG(printk("Invalid minor %d\n", dev));
933 return -ENODEV;
934 }
935 port = &ports[dev];
936
937 DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
938
939 if (!port->started)
940 {
941 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
942 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
943 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
944 cfg.en = regk_sser_yes;
945 tr_cfg.tr_en = regk_sser_yes;
946 rec_cfg.rec_en = regk_sser_yes;
947 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
948 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
949 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
950 port->started = 1;
951 }
952
953
954 /* Calculate number of available bytes */
955 /* Save pointers to avoid that they are modified by interrupt */
956 spin_lock_irqsave(&port->lock, flags);
957 start = (unsigned char*)port->readp; /* cast away volatile */
958 end = (unsigned char*)port->writep; /* cast away volatile */
959 spin_unlock_irqrestore(&port->lock, flags);
960 while ((start == end) && !port->full) /* No data */
961 {
962 if (file->f_flags & O_NONBLOCK)
963 {
964 return -EAGAIN;
965 }
966
967 interruptible_sleep_on(&port->in_wait_q);
968 if (signal_pending(current))
969 {
970 return -EINTR;
971 }
972 spin_lock_irqsave(&port->lock, flags);
973 start = (unsigned char*)port->readp; /* cast away volatile */
974 end = (unsigned char*)port->writep; /* cast away volatile */
975 spin_unlock_irqrestore(&port->lock, flags);
976 }
977
978 /* Lazy read, never return wrapped data. */
979 if (port->full)
980 avail = port->in_buffer_size;
981 else if (end > start)
982 avail = end - start;
983 else
984 avail = port->flip + port->in_buffer_size - start;
985
986 count = count > avail ? avail : count;
987 if (copy_to_user(buf, start, count))
988 return -EFAULT;
989 /* Disable interrupts while updating readp */
990 spin_lock_irqsave(&port->lock, flags);
991 port->readp += count;
992 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
993 port->readp = port->flip;
994 port->full = 0;
995 spin_unlock_irqrestore(&port->lock, flags);
996 DEBUGREAD(printk("r %d\n", count));
997 return count;
998}
999
1000static void send_word(sync_port* port)
1001{
1002 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1003 reg_sser_rw_tr_data tr_data = {0};
1004
1005 switch(tr_cfg.sample_size)
1006 {
1007 case 8:
1008 port->out_count--;
1009 tr_data.data = *port->outp++;
1010 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1011 if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE)
1012 port->outp = port->out_buffer;
1013 break;
1014 case 12:
1015 {
1016 int data = (*port->outp++) << 8;
1017 data |= *port->outp++;
1018 port->out_count-=2;
1019 tr_data.data = data;
1020 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1021 if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE)
1022 port->outp = port->out_buffer;
1023 }
1024 break;
1025 case 16:
1026 port->out_count-=2;
1027 tr_data.data = *(unsigned short *)port->outp;
1028 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1029 port->outp+=2;
1030 if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE)
1031 port->outp = port->out_buffer;
1032 break;
1033 case 24:
1034 port->out_count-=3;
1035 tr_data.data = *(unsigned short *)port->outp;
1036 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1037 port->outp+=2;
1038 tr_data.data = *port->outp++;
1039 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1040 if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE)
1041 port->outp = port->out_buffer;
1042 break;
1043 case 32:
1044 port->out_count-=4;
1045 tr_data.data = *(unsigned short *)port->outp;
1046 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1047 port->outp+=2;
1048 tr_data.data = *(unsigned short *)port->outp;
1049 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1050 port->outp+=2;
1051 if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE)
1052 port->outp = port->out_buffer;
1053 break;
1054 }
1055}
1056
1057
1058static void start_dma(struct sync_port* port, const char* data, int count)
1059{
1060 port->tr_running = 1;
1061 port->out_descr.buf = (char*)virt_to_phys((char*)data);
1062 port->out_descr.after = port->out_descr.buf + count;
1063 port->out_descr.eol = port->out_descr.intr = 1;
1064
1065 port->out_context.saved_data = (dma_descr_data*)virt_to_phys(&port->out_descr);
1066 port->out_context.saved_data_buf = port->out_descr.buf;
1067
1068 DMA_START_CONTEXT(port->regi_dmaout, virt_to_phys((char*)&port->out_context));
1069 DEBUGTXINT(printk("dma %08lX c %d\n", (unsigned long)data, count));
1070}
1071
1072static void start_dma_in(sync_port* port)
1073{
1074 int i;
1075 char* buf;
1076 port->writep = port->flip;
1077
1078 if (port->writep > port->flip + port->in_buffer_size)
1079 {
1080 panic("Offset too large in sync serial driver\n");
1081 return;
1082 }
1083 buf = (char*)virt_to_phys(port->in_buffer);
1084 for (i = 0; i < NUM_IN_DESCR; i++) {
1085 port->in_descr[i].buf = buf;
1086 port->in_descr[i].after = buf + port->inbufchunk;
1087 port->in_descr[i].intr = 1;
1088 port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
1089 port->in_descr[i].buf = buf;
1090 buf += port->inbufchunk;
1091 }
1092 /* Link the last descriptor to the first */
1093 port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1094 port->in_descr[i-1].eol = regk_sser_yes;
1095 port->next_rx_desc = &port->in_descr[0];
1096 port->prev_rx_desc = &port->in_descr[NUM_IN_DESCR - 1];
1097 port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1098 port->in_context.saved_data_buf = port->in_descr[0].buf;
1099 DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
1100}
1101
1102#ifdef SYNC_SER_DMA
1103static irqreturn_t tr_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1104{
1105 reg_dma_r_masked_intr masked;
1106 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1107 int i;
1108 struct dma_descr_data *descr;
1109 unsigned int sentl;
1110 int found = 0;
1111
1112 for (i = 0; i < NUMBER_OF_PORTS; i++)
1113 {
1114 sync_port *port = &ports[i];
1115 if (!port->enabled || !port->use_dma )
1116 continue;
1117
1118 masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
1119
1120 if (masked.data) /* IRQ active for the port? */
1121 {
1122 found = 1;
1123 /* Clear IRQ */
1124 REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
1125 descr = &port->out_descr;
1126 sentl = descr->after - descr->buf;
1127 port->out_count -= sentl;
1128 port->outp += sentl;
1129 if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE)
1130 port->outp = port->out_buffer;
1131 if (port->out_count) {
1132 int c;
1133 c = port->out_buffer + OUT_BUFFER_SIZE - port->outp;
1134 if (c > port->out_count)
1135 c = port->out_count;
1136 DEBUGTXINT(printk("tx_int DMAWRITE %i %i\n", sentl, c));
1137 start_dma(port, port->outp, c);
1138 } else {
1139 DEBUGTXINT(printk("tx_int DMA stop %i\n", sentl));
1140 port->tr_running = 0;
1141 }
1142 wake_up_interruptible(&port->out_wait_q); /* wake up the waiting process */
1143 }
1144 }
1145 return IRQ_RETVAL(found);
1146} /* tr_interrupt */
1147
1148static irqreturn_t rx_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1149{
1150 reg_dma_r_masked_intr masked;
1151 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1152
1153 int i;
1154 int found = 0;
1155
1156 for (i = 0; i < NUMBER_OF_PORTS; i++)
1157 {
1158 sync_port *port = &ports[i];
1159
1160 if (!port->enabled || !port->use_dma )
1161 continue;
1162
1163 masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
1164
1165 if (masked.data) /* Descriptor interrupt */
1166 {
1167 found = 1;
1168 while (REG_RD(dma, port->regi_dmain, rw_data) !=
1169 virt_to_phys(port->next_rx_desc)) {
1170
1171 if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
1172 int first_size = port->flip + port->in_buffer_size - port->writep;
1173 memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
1174 memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
1175 port->writep = port->flip + port->inbufchunk - first_size;
1176 } else {
1177 memcpy((char*)port->writep,
1178 phys_to_virt((unsigned)port->next_rx_desc->buf),
1179 port->inbufchunk);
1180 port->writep += port->inbufchunk;
1181 if (port->writep >= port->flip + port->in_buffer_size)
1182 port->writep = port->flip;
1183 }
1184 if (port->writep == port->readp)
1185 {
1186 port->full = 1;
1187 }
1188
1189 port->next_rx_desc->eol = 0;
1190 port->prev_rx_desc->eol = 1;
1191 port->prev_rx_desc = phys_to_virt((unsigned)port->next_rx_desc);
1192 port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
1193 wake_up_interruptible(&port->in_wait_q); /* wake up the waiting process */
1194 DMA_CONTINUE(port->regi_dmain);
1195 REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
1196
1197 }
1198 }
1199 }
1200 return IRQ_RETVAL(found);
1201} /* rx_interrupt */
1202#endif /* SYNC_SER_DMA */
1203
1204#ifdef SYNC_SER_MANUAL
1205static irqreturn_t manual_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1206{
1207 int i;
1208 int found = 0;
1209 reg_sser_r_masked_intr masked;
1210
1211 for (i = 0; i < NUMBER_OF_PORTS; i++)
1212 {
1213 sync_port* port = &ports[i];
1214
1215 if (!port->enabled || port->use_dma)
1216 {
1217 continue;
1218 }
1219
1220 masked = REG_RD(sser, port->regi_sser, r_masked_intr);
1221 if (masked.rdav) /* Data received? */
1222 {
1223 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1224 reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
1225 found = 1;
1226 /* Read data */
1227 switch(rec_cfg.sample_size)
1228 {
1229 case 8:
1230 *port->writep++ = data.data & 0xff;
1231 break;
1232 case 12:
1233 *port->writep = (data.data & 0x0ff0) >> 4;
1234 *(port->writep + 1) = data.data & 0x0f;
1235 port->writep+=2;
1236 break;
1237 case 16:
1238 *(unsigned short*)port->writep = data.data;
1239 port->writep+=2;
1240 break;
1241 case 24:
1242 *(unsigned int*)port->writep = data.data;
1243 port->writep+=3;
1244 break;
1245 case 32:
1246 *(unsigned int*)port->writep = data.data;
1247 port->writep+=4;
1248 break;
1249 }
1250
1251 if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
1252 port->writep = port->flip;
1253 if (port->writep == port->readp) {
1254 /* receive buffer overrun, discard oldest data
1255 */
1256 port->readp++;
1257 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1258 port->readp = port->flip;
1259 }
1260 if (sync_data_avail(port) >= port->inbufchunk)
1261 wake_up_interruptible(&port->in_wait_q); /* Wake up application */
1262 }
1263
1264 if (masked.trdy) /* Transmitter ready? */
1265 {
1266 found = 1;
1267 if (port->out_count > 0) /* More data to send */
1268 send_word(port);
1269 else /* transmission finished */
1270 {
1271 reg_sser_rw_intr_mask intr_mask;
1272 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1273 intr_mask.trdy = 0;
1274 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1275 wake_up_interruptible(&port->out_wait_q); /* Wake up application */
1276 }
1277 }
1278 }
1279 return IRQ_RETVAL(found);
1280}
1281#endif
1282
1283module_init(etrax_sync_serial_init);
diff --git a/arch/cris/arch-v32/kernel/Makefile b/arch/cris/arch-v32/kernel/Makefile
new file mode 100644
index 000000000000..5d5b613cde8c
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/Makefile
@@ -0,0 +1,21 @@
1# $Id: Makefile,v 1.11 2004/12/17 10:16:13 starvik Exp $
2#
3# Makefile for the linux kernel.
4#
5
6extra-y := head.o
7
8
9obj-y := entry.o traps.o irq.o debugport.o dma.o pinmux.o \
10 process.o ptrace.o setup.o signal.o traps.o time.o \
11 arbiter.o io.o
12
13obj-$(CONFIG_ETRAXFS_SIM) += vcs_hook.o
14
15obj-$(CONFIG_SMP) += smp.o
16obj-$(CONFIG_ETRAX_KGDB) += kgdb.o kgdb_asm.o
17obj-$(CONFIG_ETRAX_FAST_TIMER) += fasttimer.o
18obj-$(CONFIG_MODULES) += crisksyms.o
19
20clean:
21
diff --git a/arch/cris/arch-v32/kernel/arbiter.c b/arch/cris/arch-v32/kernel/arbiter.c
new file mode 100644
index 000000000000..3870d2fd5160
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/arbiter.c
@@ -0,0 +1,297 @@
1/*
2 * Memory arbiter functions. Allocates bandwith through the
3 * arbiter and sets up arbiter breakpoints.
4 *
5 * The algorithm first assigns slots to the clients that has specified
6 * bandwith (e.g. ethernet) and then the remaining slots are divided
7 * on all the active clients.
8 *
9 * Copyright (c) 2004, 2005 Axis Communications AB.
10 */
11
12#include <linux/config.h>
13#include <asm/arch/hwregs/reg_map.h>
14#include <asm/arch/hwregs/reg_rdwr.h>
15#include <asm/arch/hwregs/marb_defs.h>
16#include <asm/arch/arbiter.h>
17#include <asm/arch/hwregs/intr_vect.h>
18#include <linux/interrupt.h>
19#include <linux/signal.h>
20#include <linux/errno.h>
21#include <linux/spinlock.h>
22#include <asm/io.h>
23
24struct crisv32_watch_entry
25{
26 unsigned long instance;
27 watch_callback* cb;
28 unsigned long start;
29 unsigned long end;
30 int used;
31};
32
33#define NUMBER_OF_BP 4
34#define NBR_OF_CLIENTS 14
35#define NBR_OF_SLOTS 64
36#define SDRAM_BANDWIDTH 100000000 /* Some kind of expected value */
37#define INTMEM_BANDWIDTH 400000000
38#define NBR_OF_REGIONS 2
39
40static struct crisv32_watch_entry watches[NUMBER_OF_BP] =
41{
42 {regi_marb_bp0},
43 {regi_marb_bp1},
44 {regi_marb_bp2},
45 {regi_marb_bp3}
46};
47
48static int requested_slots[NBR_OF_REGIONS][NBR_OF_CLIENTS];
49static int active_clients[NBR_OF_REGIONS][NBR_OF_CLIENTS];
50static int max_bandwidth[NBR_OF_REGIONS] = {SDRAM_BANDWIDTH, INTMEM_BANDWIDTH};
51
52DEFINE_SPINLOCK(arbiter_lock);
53
54static irqreturn_t
55crisv32_arbiter_irq(int irq, void* dev_id, struct pt_regs* regs);
56
57static void crisv32_arbiter_config(int region)
58{
59 int slot;
60 int client;
61 int interval = 0;
62 int val[NBR_OF_SLOTS];
63
64 for (slot = 0; slot < NBR_OF_SLOTS; slot++)
65 val[slot] = NBR_OF_CLIENTS + 1;
66
67 for (client = 0; client < NBR_OF_CLIENTS; client++)
68 {
69 int pos;
70 if (!requested_slots[region][client])
71 continue;
72 interval = NBR_OF_SLOTS / requested_slots[region][client];
73 pos = 0;
74 while (pos < NBR_OF_SLOTS)
75 {
76 if (val[pos] != NBR_OF_CLIENTS + 1)
77 pos++;
78 else
79 {
80 val[pos] = client;
81 pos += interval;
82 }
83 }
84 }
85
86 client = 0;
87 for (slot = 0; slot < NBR_OF_SLOTS; slot++)
88 {
89 if (val[slot] == NBR_OF_CLIENTS + 1)
90 {
91 int first = client;
92 while(!active_clients[region][client]) {
93 client = (client + 1) % NBR_OF_CLIENTS;
94 if (client == first)
95 break;
96 }
97 val[slot] = client;
98 client = (client + 1) % NBR_OF_CLIENTS;
99 }
100 if (region == EXT_REGION)
101 REG_WR_INT_VECT(marb, regi_marb, rw_ext_slots, slot, val[slot]);
102 else if (region == INT_REGION)
103 REG_WR_INT_VECT(marb, regi_marb, rw_int_slots, slot, val[slot]);
104 }
105}
106
107extern char _stext, _etext;
108
109static void crisv32_arbiter_init(void)
110{
111 static int initialized = 0;
112
113 if (initialized)
114 return;
115
116 initialized = 1;
117
118 /* CPU caches are active. */
119 active_clients[EXT_REGION][10] = active_clients[EXT_REGION][11] = 1;
120 crisv32_arbiter_config(EXT_REGION);
121 crisv32_arbiter_config(INT_REGION);
122
123 if (request_irq(MEMARB_INTR_VECT, crisv32_arbiter_irq, SA_INTERRUPT,
124 "arbiter", NULL))
125 printk(KERN_ERR "Couldn't allocate arbiter IRQ\n");
126
127#ifndef CONFIG_ETRAX_KGDB
128 /* Global watch for writes to kernel text segment. */
129 crisv32_arbiter_watch(virt_to_phys(&_stext), &_etext - &_stext,
130 arbiter_all_clients, arbiter_all_write, NULL);
131#endif
132}
133
134
135
136int crisv32_arbiter_allocate_bandwith(int client, int region,
137 unsigned long bandwidth)
138{
139 int i;
140 int total_assigned = 0;
141 int total_clients = 0;
142 int req;
143
144 crisv32_arbiter_init();
145
146 for (i = 0; i < NBR_OF_CLIENTS; i++)
147 {
148 total_assigned += requested_slots[region][i];
149 total_clients += active_clients[region][i];
150 }
151 req = NBR_OF_SLOTS / (max_bandwidth[region] / bandwidth);
152
153 if (total_assigned + total_clients + req + 1 > NBR_OF_SLOTS)
154 return -ENOMEM;
155
156 active_clients[region][client] = 1;
157 requested_slots[region][client] = req;
158 crisv32_arbiter_config(region);
159
160 return 0;
161}
162
163int crisv32_arbiter_watch(unsigned long start, unsigned long size,
164 unsigned long clients, unsigned long accesses,
165 watch_callback* cb)
166{
167 int i;
168
169 crisv32_arbiter_init();
170
171 if (start > 0x80000000) {
172 printk("Arbiter: %lX doesn't look like a physical address", start);
173 return -EFAULT;
174 }
175
176 spin_lock(&arbiter_lock);
177
178 for (i = 0; i < NUMBER_OF_BP; i++) {
179 if (!watches[i].used) {
180 reg_marb_rw_intr_mask intr_mask = REG_RD(marb, regi_marb, rw_intr_mask);
181
182 watches[i].used = 1;
183 watches[i].start = start;
184 watches[i].end = start + size;
185 watches[i].cb = cb;
186
187 REG_WR_INT(marb_bp, watches[i].instance, rw_first_addr, watches[i].start);
188 REG_WR_INT(marb_bp, watches[i].instance, rw_last_addr, watches[i].end);
189 REG_WR_INT(marb_bp, watches[i].instance, rw_op, accesses);
190 REG_WR_INT(marb_bp, watches[i].instance, rw_clients, clients);
191
192 if (i == 0)
193 intr_mask.bp0 = regk_marb_yes;
194 else if (i == 1)
195 intr_mask.bp1 = regk_marb_yes;
196 else if (i == 2)
197 intr_mask.bp2 = regk_marb_yes;
198 else if (i == 3)
199 intr_mask.bp3 = regk_marb_yes;
200
201 REG_WR(marb, regi_marb, rw_intr_mask, intr_mask);
202 spin_unlock(&arbiter_lock);
203
204 return i;
205 }
206 }
207 spin_unlock(&arbiter_lock);
208 return -ENOMEM;
209}
210
211int crisv32_arbiter_unwatch(int id)
212{
213 reg_marb_rw_intr_mask intr_mask = REG_RD(marb, regi_marb, rw_intr_mask);
214
215 crisv32_arbiter_init();
216
217 spin_lock(&arbiter_lock);
218
219 if ((id < 0) || (id >= NUMBER_OF_BP) || (!watches[id].used)) {
220 spin_unlock(&arbiter_lock);
221 return -EINVAL;
222 }
223
224 memset(&watches[id], 0, sizeof(struct crisv32_watch_entry));
225
226 if (id == 0)
227 intr_mask.bp0 = regk_marb_no;
228 else if (id == 1)
229 intr_mask.bp2 = regk_marb_no;
230 else if (id == 2)
231 intr_mask.bp2 = regk_marb_no;
232 else if (id == 3)
233 intr_mask.bp3 = regk_marb_no;
234
235 REG_WR(marb, regi_marb, rw_intr_mask, intr_mask);
236
237 spin_unlock(&arbiter_lock);
238 return 0;
239}
240
241extern void show_registers(struct pt_regs *regs);
242
243static irqreturn_t
244crisv32_arbiter_irq(int irq, void* dev_id, struct pt_regs* regs)
245{
246 reg_marb_r_masked_intr masked_intr = REG_RD(marb, regi_marb, r_masked_intr);
247 reg_marb_bp_r_brk_clients r_clients;
248 reg_marb_bp_r_brk_addr r_addr;
249 reg_marb_bp_r_brk_op r_op;
250 reg_marb_bp_r_brk_first_client r_first;
251 reg_marb_bp_r_brk_size r_size;
252 reg_marb_bp_rw_ack ack = {0};
253 reg_marb_rw_ack_intr ack_intr = {.bp0=1,.bp1=1,.bp2=1,.bp3=1};
254 struct crisv32_watch_entry* watch;
255
256 if (masked_intr.bp0) {
257 watch = &watches[0];
258 ack_intr.bp0 = regk_marb_yes;
259 } else if (masked_intr.bp1) {
260 watch = &watches[1];
261 ack_intr.bp1 = regk_marb_yes;
262 } else if (masked_intr.bp2) {
263 watch = &watches[2];
264 ack_intr.bp2 = regk_marb_yes;
265 } else if (masked_intr.bp3) {
266 watch = &watches[3];
267 ack_intr.bp3 = regk_marb_yes;
268 } else {
269 return IRQ_NONE;
270 }
271
272 /* Retrieve all useful information and print it. */
273 r_clients = REG_RD(marb_bp, watch->instance, r_brk_clients);
274 r_addr = REG_RD(marb_bp, watch->instance, r_brk_addr);
275 r_op = REG_RD(marb_bp, watch->instance, r_brk_op);
276 r_first = REG_RD(marb_bp, watch->instance, r_brk_first_client);
277 r_size = REG_RD(marb_bp, watch->instance, r_brk_size);
278
279 printk("Arbiter IRQ\n");
280 printk("Clients %X addr %X op %X first %X size %X\n",
281 REG_TYPE_CONV(int, reg_marb_bp_r_brk_clients, r_clients),
282 REG_TYPE_CONV(int, reg_marb_bp_r_brk_addr, r_addr),
283 REG_TYPE_CONV(int, reg_marb_bp_r_brk_op, r_op),
284 REG_TYPE_CONV(int, reg_marb_bp_r_brk_first_client, r_first),
285 REG_TYPE_CONV(int, reg_marb_bp_r_brk_size, r_size));
286
287 REG_WR(marb_bp, watch->instance, rw_ack, ack);
288 REG_WR(marb, regi_marb, rw_ack_intr, ack_intr);
289
290 printk("IRQ occured at %lX\n", regs->erp);
291
292 if (watch->cb)
293 watch->cb();
294
295
296 return IRQ_HANDLED;
297}
diff --git a/arch/cris/arch-v32/kernel/asm-offsets.c b/arch/cris/arch-v32/kernel/asm-offsets.c
new file mode 100644
index 000000000000..15b3d93a0496
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/asm-offsets.c
@@ -0,0 +1,49 @@
1#include <linux/sched.h>
2#include <asm/thread_info.h>
3
4/*
5 * Generate definitions needed by assembly language modules.
6 * This code generates raw asm output which is post-processed to extract
7 * and format the required data.
8 */
9
10#define DEFINE(sym, val) \
11 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
12
13#define BLANK() asm volatile("\n->" : : )
14
15int main(void)
16{
17#define ENTRY(entry) DEFINE(PT_ ## entry, offsetof(struct pt_regs, entry))
18 ENTRY(orig_r10);
19 ENTRY(r13);
20 ENTRY(r12);
21 ENTRY(r11);
22 ENTRY(r10);
23 ENTRY(r9);
24 ENTRY(acr);
25 ENTRY(srs);
26 ENTRY(mof);
27 ENTRY(ccs);
28 ENTRY(srp);
29 BLANK();
30#undef ENTRY
31#define ENTRY(entry) DEFINE(TI_ ## entry, offsetof(struct thread_info, entry))
32 ENTRY(task);
33 ENTRY(flags);
34 ENTRY(preempt_count);
35 BLANK();
36#undef ENTRY
37#define ENTRY(entry) DEFINE(THREAD_ ## entry, offsetof(struct thread_struct, entry))
38 ENTRY(ksp);
39 ENTRY(usp);
40 ENTRY(ccs);
41 BLANK();
42#undef ENTRY
43#define ENTRY(entry) DEFINE(TASK_ ## entry, offsetof(struct task_struct, entry))
44 ENTRY(pid);
45 BLANK();
46 DEFINE(LCLONE_VM, CLONE_VM);
47 DEFINE(LCLONE_UNTRACED, CLONE_UNTRACED);
48 return 0;
49}
diff --git a/arch/cris/arch-v32/kernel/crisksyms.c b/arch/cris/arch-v32/kernel/crisksyms.c
new file mode 100644
index 000000000000..2c3bb9a0afe2
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/crisksyms.c
@@ -0,0 +1,24 @@
1#include <linux/config.h>
2#include <linux/module.h>
3#include <linux/irq.h>
4#include <asm/arch/dma.h>
5#include <asm/arch/intmem.h>
6#include <asm/arch/pinmux.h>
7
8/* Functions for allocating DMA channels */
9EXPORT_SYMBOL(crisv32_request_dma);
10EXPORT_SYMBOL(crisv32_free_dma);
11
12/* Functions for handling internal RAM */
13EXPORT_SYMBOL(crisv32_intmem_alloc);
14EXPORT_SYMBOL(crisv32_intmem_free);
15EXPORT_SYMBOL(crisv32_intmem_phys_to_virt);
16EXPORT_SYMBOL(crisv32_intmem_virt_to_phys);
17
18/* Functions for handling pinmux */
19EXPORT_SYMBOL(crisv32_pinmux_alloc);
20EXPORT_SYMBOL(crisv32_pinmux_dealloc);
21
22/* Functions masking/unmasking interrupts */
23EXPORT_SYMBOL(mask_irq);
24EXPORT_SYMBOL(unmask_irq);
diff --git a/arch/cris/arch-v32/kernel/debugport.c b/arch/cris/arch-v32/kernel/debugport.c
new file mode 100644
index 000000000000..ffc1ebf2dfee
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/debugport.c
@@ -0,0 +1,461 @@
1/*
2 * Copyright (C) 2003, Axis Communications AB.
3 */
4
5#include <linux/config.h>
6#include <linux/console.h>
7#include <linux/init.h>
8#include <linux/major.h>
9#include <linux/delay.h>
10#include <linux/tty.h>
11#include <asm/system.h>
12#include <asm/io.h>
13#include <asm/arch/hwregs/ser_defs.h>
14#include <asm/arch/hwregs/dma_defs.h>
15#include <asm/arch/pinmux.h>
16
17#include <asm/irq.h>
18#include <asm/arch/hwregs/intr_vect_defs.h>
19
20struct dbg_port
21{
22 unsigned char nbr;
23 unsigned long instance;
24 unsigned int started;
25 unsigned long baudrate;
26 unsigned char parity;
27 unsigned int bits;
28};
29
30struct dbg_port ports[] =
31{
32 {
33 0,
34 regi_ser0,
35 0,
36 115200,
37 'N',
38 8
39 },
40 {
41 1,
42 regi_ser1,
43 0,
44 115200,
45 'N',
46 8
47 },
48 {
49 2,
50 regi_ser2,
51 0,
52 115200,
53 'N',
54 8
55 },
56 {
57 3,
58 regi_ser3,
59 0,
60 115200,
61 'N',
62 8
63 }
64};
65static struct dbg_port *port =
66#if defined(CONFIG_ETRAX_DEBUG_PORT0)
67&ports[0];
68#elif defined(CONFIG_ETRAX_DEBUG_PORT1)
69&ports[1];
70#elif defined(CONFIG_ETRAX_DEBUG_PORT2)
71&ports[2];
72#elif defined(CONFIG_ETRAX_DEBUG_PORT3)
73&ports[3];
74#else
75NULL;
76#endif
77
78#ifdef CONFIG_ETRAX_KGDB
79static struct dbg_port *kgdb_port =
80#if defined(CONFIG_ETRAX_KGDB_PORT0)
81&ports[0];
82#elif defined(CONFIG_ETRAX_KGDB_PORT1)
83&ports[1];
84#elif defined(CONFIG_ETRAX_KGDB_PORT2)
85&ports[2];
86#elif defined(CONFIG_ETRAX_KGDB_PORT3)
87&ports[3];
88#else
89NULL;
90#endif
91#endif
92
93#ifdef CONFIG_ETRAXFS_SIM
94extern void print_str( const char *str );
95static char buffer[1024];
96static char msg[] = "Debug: ";
97static int buffer_pos = sizeof(msg) - 1;
98#endif
99
100extern struct tty_driver *serial_driver;
101
102static void
103start_port(struct dbg_port* p)
104{
105 if (!p)
106 return;
107
108 if (p->started)
109 return;
110 p->started = 1;
111
112 if (p->nbr == 1)
113 crisv32_pinmux_alloc_fixed(pinmux_ser1);
114 else if (p->nbr == 2)
115 crisv32_pinmux_alloc_fixed(pinmux_ser2);
116 else if (p->nbr == 3)
117 crisv32_pinmux_alloc_fixed(pinmux_ser3);
118
119 /* Set up serial port registers */
120 reg_ser_rw_tr_ctrl tr_ctrl = {0};
121 reg_ser_rw_tr_dma_en tr_dma_en = {0};
122
123 reg_ser_rw_rec_ctrl rec_ctrl = {0};
124 reg_ser_rw_tr_baud_div tr_baud_div = {0};
125 reg_ser_rw_rec_baud_div rec_baud_div = {0};
126
127 tr_ctrl.base_freq = rec_ctrl.base_freq = regk_ser_f29_493;
128 tr_dma_en.en = rec_ctrl.dma_mode = regk_ser_no;
129 tr_baud_div.div = rec_baud_div.div = 29493000 / p->baudrate / 8;
130 tr_ctrl.en = rec_ctrl.en = 1;
131
132 if (p->parity == 'O')
133 {
134 tr_ctrl.par_en = regk_ser_yes;
135 tr_ctrl.par = regk_ser_odd;
136 rec_ctrl.par_en = regk_ser_yes;
137 rec_ctrl.par = regk_ser_odd;
138 }
139 else if (p->parity == 'E')
140 {
141 tr_ctrl.par_en = regk_ser_yes;
142 tr_ctrl.par = regk_ser_even;
143 rec_ctrl.par_en = regk_ser_yes;
144 rec_ctrl.par = regk_ser_odd;
145 }
146
147 if (p->bits == 7)
148 {
149 tr_ctrl.data_bits = regk_ser_bits7;
150 rec_ctrl.data_bits = regk_ser_bits7;
151 }
152
153 REG_WR (ser, p->instance, rw_tr_baud_div, tr_baud_div);
154 REG_WR (ser, p->instance, rw_rec_baud_div, rec_baud_div);
155 REG_WR (ser, p->instance, rw_tr_dma_en, tr_dma_en);
156 REG_WR (ser, p->instance, rw_tr_ctrl, tr_ctrl);
157 REG_WR (ser, p->instance, rw_rec_ctrl, rec_ctrl);
158}
159
160/* No debug */
161#ifdef CONFIG_ETRAX_DEBUG_PORT_NULL
162
163static void
164console_write(struct console *co, const char *buf, unsigned int len)
165{
166 return;
167}
168
169/* Target debug */
170#elif !defined(CONFIG_ETRAXFS_SIM)
171
172static void
173console_write_direct(struct console *co, const char *buf, unsigned int len)
174{
175 int i;
176 reg_ser_r_stat_din stat;
177 reg_ser_rw_tr_dma_en tr_dma_en, old;
178
179 /* Switch to manual mode */
180 tr_dma_en = old = REG_RD (ser, port->instance, rw_tr_dma_en);
181 if (tr_dma_en.en == regk_ser_yes) {
182 tr_dma_en.en = regk_ser_no;
183 REG_WR(ser, port->instance, rw_tr_dma_en, tr_dma_en);
184 }
185
186 /* Send data */
187 for (i = 0; i < len; i++) {
188 /* LF -> CRLF */
189 if (buf[i] == '\n') {
190 do {
191 stat = REG_RD (ser, port->instance, r_stat_din);
192 } while (!stat.tr_rdy);
193 REG_WR_INT (ser, port->instance, rw_dout, '\r');
194 }
195 /* Wait until transmitter is ready and send.*/
196 do {
197 stat = REG_RD (ser, port->instance, r_stat_din);
198 } while (!stat.tr_rdy);
199 REG_WR_INT (ser, port->instance, rw_dout, buf[i]);
200 }
201
202 /* Restore mode */
203 if (tr_dma_en.en != old.en)
204 REG_WR(ser, port->instance, rw_tr_dma_en, old);
205}
206
207static void
208console_write(struct console *co, const char *buf, unsigned int len)
209{
210 if (!port)
211 return;
212 console_write_direct(co, buf, len);
213}
214
215
216
217#else
218
219/* VCS debug */
220
221static void
222console_write(struct console *co, const char *buf, unsigned int len)
223{
224 char* pos;
225 pos = memchr(buf, '\n', len);
226 if (pos) {
227 int l = ++pos - buf;
228 memcpy(buffer + buffer_pos, buf, l);
229 memcpy(buffer, msg, sizeof(msg) - 1);
230 buffer[buffer_pos + l] = '\0';
231 print_str(buffer);
232 buffer_pos = sizeof(msg) - 1;
233 if (pos - buf != len) {
234 memcpy(buffer + buffer_pos, pos, len - l);
235 buffer_pos += len - l;
236 }
237 } else {
238 memcpy(buffer + buffer_pos, buf, len);
239 buffer_pos += len;
240 }
241}
242
243#endif
244
245int raw_printk(const char *fmt, ...)
246{
247 static char buf[1024];
248 int printed_len;
249 va_list args;
250 va_start(args, fmt);
251 printed_len = vsnprintf(buf, sizeof(buf), fmt, args);
252 va_end(args);
253 console_write(NULL, buf, strlen(buf));
254 return printed_len;
255}
256
257void
258stupid_debug(char* buf)
259{
260 console_write(NULL, buf, strlen(buf));
261}
262
263#ifdef CONFIG_ETRAX_KGDB
264/* Use polling to get a single character from the kernel debug port */
265int
266getDebugChar(void)
267{
268 reg_ser_rs_status_data stat;
269 reg_ser_rw_ack_intr ack_intr = { 0 };
270
271 do {
272 stat = REG_RD(ser, kgdb_instance, rs_status_data);
273 } while (!stat.data_avail);
274
275 /* Ack the data_avail interrupt. */
276 ack_intr.data_avail = 1;
277 REG_WR(ser, kgdb_instance, rw_ack_intr, ack_intr);
278
279 return stat.data;
280}
281
282/* Use polling to put a single character to the kernel debug port */
283void
284putDebugChar(int val)
285{
286 reg_ser_r_status_data stat;
287 do {
288 stat = REG_RD (ser, kgdb_instance, r_status_data);
289 } while (!stat.tr_ready);
290 REG_WR (ser, kgdb_instance, rw_data_out, REG_TYPE_CONV(reg_ser_rw_data_out, int, val));
291}
292#endif /* CONFIG_ETRAX_KGDB */
293
294static int __init
295console_setup(struct console *co, char *options)
296{
297 char* s;
298
299 if (options) {
300 port = &ports[co->index];
301 port->baudrate = 115200;
302 port->parity = 'N';
303 port->bits = 8;
304 port->baudrate = simple_strtoul(options, NULL, 10);
305 s = options;
306 while(*s >= '0' && *s <= '9')
307 s++;
308 if (*s) port->parity = *s++;
309 if (*s) port->bits = *s++ - '0';
310 port->started = 0;
311 start_port(port);
312 }
313 return 0;
314}
315
316/* This is a dummy serial device that throws away anything written to it.
317 * This is used when no debug output is wanted.
318 */
319static struct tty_driver dummy_driver;
320
321static int dummy_open(struct tty_struct *tty, struct file * filp)
322{
323 return 0;
324}
325
326static void dummy_close(struct tty_struct *tty, struct file * filp)
327{
328}
329
330static int dummy_write(struct tty_struct * tty,
331 const unsigned char *buf, int count)
332{
333 return count;
334}
335
336static int
337dummy_write_room(struct tty_struct *tty)
338{
339 return 8192;
340}
341
342void __init
343init_dummy_console(void)
344{
345 memset(&dummy_driver, 0, sizeof(struct tty_driver));
346 dummy_driver.driver_name = "serial";
347 dummy_driver.name = "ttyS";
348 dummy_driver.major = TTY_MAJOR;
349 dummy_driver.minor_start = 68;
350 dummy_driver.num = 1; /* etrax100 has 4 serial ports */
351 dummy_driver.type = TTY_DRIVER_TYPE_SERIAL;
352 dummy_driver.subtype = SERIAL_TYPE_NORMAL;
353 dummy_driver.init_termios = tty_std_termios;
354 dummy_driver.init_termios.c_cflag =
355 B115200 | CS8 | CREAD | HUPCL | CLOCAL; /* is normally B9600 default... */
356 dummy_driver.flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS;
357
358 dummy_driver.open = dummy_open;
359 dummy_driver.close = dummy_close;
360 dummy_driver.write = dummy_write;
361 dummy_driver.write_room = dummy_write_room;
362 if (tty_register_driver(&dummy_driver))
363 panic("Couldn't register dummy serial driver\n");
364}
365
366static struct tty_driver*
367crisv32_console_device(struct console* co, int *index)
368{
369 if (port)
370 *index = port->nbr;
371 return port ? serial_driver : &dummy_driver;
372}
373
374static struct console sercons = {
375 name : "ttyS",
376 write: console_write,
377 read : NULL,
378 device : crisv32_console_device,
379 unblank : NULL,
380 setup : console_setup,
381 flags : CON_PRINTBUFFER,
382 index : -1,
383 cflag : 0,
384 next : NULL
385};
386static struct console sercons0 = {
387 name : "ttyS",
388 write: console_write,
389 read : NULL,
390 device : crisv32_console_device,
391 unblank : NULL,
392 setup : console_setup,
393 flags : CON_PRINTBUFFER,
394 index : 0,
395 cflag : 0,
396 next : NULL
397};
398
399static struct console sercons1 = {
400 name : "ttyS",
401 write: console_write,
402 read : NULL,
403 device : crisv32_console_device,
404 unblank : NULL,
405 setup : console_setup,
406 flags : CON_PRINTBUFFER,
407 index : 1,
408 cflag : 0,
409 next : NULL
410};
411static struct console sercons2 = {
412 name : "ttyS",
413 write: console_write,
414 read : NULL,
415 device : crisv32_console_device,
416 unblank : NULL,
417 setup : console_setup,
418 flags : CON_PRINTBUFFER,
419 index : 2,
420 cflag : 0,
421 next : NULL
422};
423static struct console sercons3 = {
424 name : "ttyS",
425 write: console_write,
426 read : NULL,
427 device : crisv32_console_device,
428 unblank : NULL,
429 setup : console_setup,
430 flags : CON_PRINTBUFFER,
431 index : 3,
432 cflag : 0,
433 next : NULL
434};
435
436/* Register console for printk's, etc. */
437int __init
438init_etrax_debug(void)
439{
440 static int first = 1;
441
442 if (!first) {
443 unregister_console(&sercons);
444 register_console(&sercons0);
445 register_console(&sercons1);
446 register_console(&sercons2);
447 register_console(&sercons3);
448 init_dummy_console();
449 return 0;
450 }
451 first = 0;
452 register_console(&sercons);
453 start_port(port);
454
455#ifdef CONFIG_ETRAX_KGDB
456 start_port(kgdb_port);
457#endif /* CONFIG_ETRAX_KGDB */
458 return 0;
459}
460
461__initcall(init_etrax_debug);
diff --git a/arch/cris/arch-v32/kernel/dma.c b/arch/cris/arch-v32/kernel/dma.c
new file mode 100644
index 000000000000..b92e85799b44
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/dma.c
@@ -0,0 +1,224 @@
1/* Wrapper for DMA channel allocator that starts clocks etc */
2
3#include <linux/kernel.h>
4#include <linux/spinlock.h>
5#include <asm/dma.h>
6#include <asm/arch/hwregs/reg_map.h>
7#include <asm/arch/hwregs/reg_rdwr.h>
8#include <asm/arch/hwregs/marb_defs.h>
9#include <asm/arch/hwregs/config_defs.h>
10#include <asm/arch/hwregs/strmux_defs.h>
11#include <linux/errno.h>
12#include <asm/system.h>
13#include <asm/arch/arbiter.h>
14
15static char used_dma_channels[MAX_DMA_CHANNELS];
16static const char * used_dma_channels_users[MAX_DMA_CHANNELS];
17
18static DEFINE_SPINLOCK(dma_lock);
19
20int crisv32_request_dma(unsigned int dmanr, const char * device_id,
21 unsigned options, unsigned int bandwidth,
22 enum dma_owner owner)
23{
24 unsigned long flags;
25 reg_config_rw_clk_ctrl clk_ctrl;
26 reg_strmux_rw_cfg strmux_cfg;
27
28 if (crisv32_arbiter_allocate_bandwith(dmanr,
29 options & DMA_INT_MEM ? INT_REGION : EXT_REGION,
30 bandwidth))
31 return -ENOMEM;
32
33 spin_lock_irqsave(&dma_lock, flags);
34
35 if (used_dma_channels[dmanr]) {
36 spin_unlock_irqrestore(&dma_lock, flags);
37 if (options & DMA_VERBOSE_ON_ERROR) {
38 printk("Failed to request DMA %i for %s, already allocated by %s\n", dmanr, device_id, used_dma_channels_users[dmanr]);
39 }
40 if (options & DMA_PANIC_ON_ERROR)
41 panic("request_dma error!");
42 return -EBUSY;
43 }
44 clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
45 strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);
46
47 switch(dmanr)
48 {
49 case 0:
50 case 1:
51 clk_ctrl.dma01_eth0 = 1;
52 break;
53 case 2:
54 case 3:
55 clk_ctrl.dma23 = 1;
56 break;
57 case 4:
58 case 5:
59 clk_ctrl.dma45 = 1;
60 break;
61 case 6:
62 case 7:
63 clk_ctrl.dma67 = 1;
64 break;
65 case 8:
66 case 9:
67 clk_ctrl.dma89_strcop = 1;
68 break;
69#if MAX_DMA_CHANNELS-1 != 9
70#error Check dma.c
71#endif
72 default:
73 spin_unlock_irqrestore(&dma_lock, flags);
74 if (options & DMA_VERBOSE_ON_ERROR) {
75 printk("Failed to request DMA %i for %s, only 0-%i valid)\n", dmanr, device_id, MAX_DMA_CHANNELS-1);
76 }
77
78 if (options & DMA_PANIC_ON_ERROR)
79 panic("request_dma error!");
80 return -EINVAL;
81 }
82
83 switch(owner)
84 {
85 case dma_eth0:
86 if (dmanr == 0)
87 strmux_cfg.dma0 = regk_strmux_eth0;
88 else if (dmanr == 1)
89 strmux_cfg.dma1 = regk_strmux_eth0;
90 else
91 panic("Invalid DMA channel for eth0\n");
92 break;
93 case dma_eth1:
94 if (dmanr == 6)
95 strmux_cfg.dma6 = regk_strmux_eth1;
96 else if (dmanr == 7)
97 strmux_cfg.dma7 = regk_strmux_eth1;
98 else
99 panic("Invalid DMA channel for eth1\n");
100 break;
101 case dma_iop0:
102 if (dmanr == 2)
103 strmux_cfg.dma2 = regk_strmux_iop0;
104 else if (dmanr == 3)
105 strmux_cfg.dma3 = regk_strmux_iop0;
106 else
107 panic("Invalid DMA channel for iop0\n");
108 break;
109 case dma_iop1:
110 if (dmanr == 4)
111 strmux_cfg.dma4 = regk_strmux_iop1;
112 else if (dmanr == 5)
113 strmux_cfg.dma5 = regk_strmux_iop1;
114 else
115 panic("Invalid DMA channel for iop1\n");
116 break;
117 case dma_ser0:
118 if (dmanr == 6)
119 strmux_cfg.dma6 = regk_strmux_ser0;
120 else if (dmanr == 7)
121 strmux_cfg.dma7 = regk_strmux_ser0;
122 else
123 panic("Invalid DMA channel for ser0\n");
124 break;
125 case dma_ser1:
126 if (dmanr == 4)
127 strmux_cfg.dma4 = regk_strmux_ser1;
128 else if (dmanr == 5)
129 strmux_cfg.dma5 = regk_strmux_ser1;
130 else
131 panic("Invalid DMA channel for ser1\n");
132 break;
133 case dma_ser2:
134 if (dmanr == 2)
135 strmux_cfg.dma2 = regk_strmux_ser2;
136 else if (dmanr == 3)
137 strmux_cfg.dma3 = regk_strmux_ser2;
138 else
139 panic("Invalid DMA channel for ser2\n");
140 break;
141 case dma_ser3:
142 if (dmanr == 8)
143 strmux_cfg.dma8 = regk_strmux_ser3;
144 else if (dmanr == 9)
145 strmux_cfg.dma9 = regk_strmux_ser3;
146 else
147 panic("Invalid DMA channel for ser3\n");
148 break;
149 case dma_sser0:
150 if (dmanr == 4)
151 strmux_cfg.dma4 = regk_strmux_sser0;
152 else if (dmanr == 5)
153 strmux_cfg.dma5 = regk_strmux_sser0;
154 else
155 panic("Invalid DMA channel for sser0\n");
156 break;
157 case dma_sser1:
158 if (dmanr == 6)
159 strmux_cfg.dma6 = regk_strmux_sser1;
160 else if (dmanr == 7)
161 strmux_cfg.dma7 = regk_strmux_sser1;
162 else
163 panic("Invalid DMA channel for sser1\n");
164 break;
165 case dma_ata:
166 if (dmanr == 2)
167 strmux_cfg.dma2 = regk_strmux_ata;
168 else if (dmanr == 3)
169 strmux_cfg.dma3 = regk_strmux_ata;
170 else
171 panic("Invalid DMA channel for ata\n");
172 break;
173 case dma_strp:
174 if (dmanr == 8)
175 strmux_cfg.dma8 = regk_strmux_strcop;
176 else if (dmanr == 9)
177 strmux_cfg.dma9 = regk_strmux_strcop;
178 else
179 panic("Invalid DMA channel for strp\n");
180 break;
181 case dma_ext0:
182 if (dmanr == 6)
183 strmux_cfg.dma6 = regk_strmux_ext0;
184 else
185 panic("Invalid DMA channel for ext0\n");
186 break;
187 case dma_ext1:
188 if (dmanr == 7)
189 strmux_cfg.dma7 = regk_strmux_ext1;
190 else
191 panic("Invalid DMA channel for ext1\n");
192 break;
193 case dma_ext2:
194 if (dmanr == 2)
195 strmux_cfg.dma2 = regk_strmux_ext2;
196 else if (dmanr == 8)
197 strmux_cfg.dma8 = regk_strmux_ext2;
198 else
199 panic("Invalid DMA channel for ext2\n");
200 break;
201 case dma_ext3:
202 if (dmanr == 3)
203 strmux_cfg.dma3 = regk_strmux_ext3;
204 else if (dmanr == 9)
205 strmux_cfg.dma9 = regk_strmux_ext2;
206 else
207 panic("Invalid DMA channel for ext2\n");
208 break;
209 }
210
211 used_dma_channels[dmanr] = 1;
212 used_dma_channels_users[dmanr] = device_id;
213 REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl);
214 REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
215 spin_unlock_irqrestore(&dma_lock,flags);
216 return 0;
217}
218
219void crisv32_free_dma(unsigned int dmanr)
220{
221 spin_lock(&dma_lock);
222 used_dma_channels[dmanr] = 0;
223 spin_unlock(&dma_lock);
224}
diff --git a/arch/cris/arch-v32/kernel/entry.S b/arch/cris/arch-v32/kernel/entry.S
new file mode 100644
index 000000000000..a8ed55e5b403
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/entry.S
@@ -0,0 +1,820 @@
1/*
2 * Copyright (C) 2000-2003 Axis Communications AB
3 *
4 * Authors: Bjorn Wesen (bjornw@axis.com)
5 * Tobias Anderberg (tobiasa@axis.com), CRISv32 port.
6 *
7 * Code for the system-call and fault low-level handling routines.
8 *
9 * NOTE: This code handles signal-recognition, which happens every time
10 * after a timer-interrupt and after each system call.
11 *
12 * Stack layout in 'ret_from_system_call':
13 * ptrace needs to have all regs on the stack.
14 * if the order here is changed, it needs to be
15 * updated in fork.c:copy_process, signal.c:do_signal,
16 * ptrace.c and ptrace.h
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/linkage.h>
22#include <linux/sys.h>
23#include <asm/unistd.h>
24#include <asm/errno.h>
25#include <asm/thread_info.h>
26#include <asm/arch/offset.h>
27
28#include <asm/arch/hwregs/asm/reg_map_asm.h>
29#include <asm/arch/hwregs/asm/intr_vect_defs_asm.h>
30
31 ;; Exported functions.
32 .globl system_call
33 .globl ret_from_intr
34 .globl ret_from_fork
35 .globl resume
36 .globl multiple_interrupt
37 .globl nmi_interrupt
38 .globl spurious_interrupt
39 .globl do_sigtrap
40 .globl gdb_handle_exception
41 .globl sys_call_table
42
43 ; Check if preemptive kernel scheduling should be done.
44#ifdef CONFIG_PREEMPT
45_resume_kernel:
46 di
47 ; Load current task struct.
48 movs.w -8192, $r0 ; THREAD_SIZE = 8192
49 and.d $sp, $r0
50
51 addoq +TI_preempt_count, $r0, $acr
52 move.d [$acr], $r10 ; Preemption disabled?
53 bne _Rexit
54 nop
55
56_need_resched:
57 addoq +TI_flags, $r0, $acr
58 move.d [$acr], $r10
59 btstq TIF_NEED_RESCHED, $r10 ; Check if need_resched is set.
60 bpl _Rexit
61 nop
62
63 ; Do preemptive kernel scheduling.
64 jsr preempt_schedule_irq
65 nop
66
67 ; Load new task struct.
68 movs.w -8192, $r0 ; THREAD_SIZE = 8192.
69 and.d $sp, $r0
70
71 ; One more time with new task.
72 ba _need_resched
73 nop
74#else
75#define _resume_kernel _Rexit
76#endif
77
78 ; Called at exit from fork. schedule_tail must be called to drop
79 ; spinlock if CONFIG_PREEMPT.
80ret_from_fork:
81 jsr schedule_tail
82 nop
83 ba ret_from_sys_call
84 nop
85
86ret_from_intr:
87 ;; Check for resched if preemptive kernel, or if we're going back to
88 ;; user-mode. This test matches the user_regs(regs) macro. Don't simply
89 ;; test CCS since that doesn't necessarily reflect what mode we'll
90 ;; return into.
91 addoq +PT_ccs, $sp, $acr
92 move.d [$acr], $r0
93 btstq 16, $r0 ; User-mode flag.
94 bpl _resume_kernel
95
96 ; Note that di below is in delay slot.
97
98_resume_userspace:
99 di ; So need_resched and sigpending don't change.
100
101 movs.w -8192, $r0 ; THREAD_SIZE == 8192
102 and.d $sp, $r0
103
104 addoq +TI_flags, $r0, $acr ; current->work
105 move.d [$acr], $r10
106 and.d _TIF_WORK_MASK, $r10 ; Work to be done on return?
107 bne _work_pending
108 nop
109 ba _Rexit
110 nop
111
112 ;; The system_call is called by a BREAK instruction, which looks pretty
113 ;; much like any other exception.
114 ;;
115 ;; System calls can't be made from interrupts but we still stack ERP
116 ;; to have a complete stack frame.
117 ;;
118 ;; In r9 we have the wanted syscall number. Arguments come in r10,r11,r12,
119 ;; r13,mof,srp
120 ;;
121 ;; This function looks on the _surface_ like spaghetti programming, but it's
122 ;; really designed so that the fast-path does not force cache-loading of
123 ;; non-used instructions. Only the non-common cases cause the outlined code
124 ;; to run..
125
126system_call:
127 ;; Stack-frame similar to the irq heads, which is reversed in
128 ;; ret_from_sys_call.
129 subq 12, $sp ; Skip EXS, EDA.
130 move $erp, [$sp]
131 subq 4, $sp
132 move $srp, [$sp]
133 subq 4, $sp
134 move $ccs, [$sp]
135 subq 4, $sp
136 ei ; Allow IRQs while handling system call
137 move $spc, [$sp]
138 subq 4, $sp
139 move $mof, [$sp]
140 subq 4, $sp
141 move $srs, [$sp]
142 subq 4, $sp
143 move.d $acr, [$sp]
144 subq 14*4, $sp ; Make room for R0-R13.
145 movem $r13, [$sp] ; Push R0-R13
146 subq 4, $sp
147 move.d $r10, [$sp] ; Push orig_r10.
148
149; Set S-bit when kernel debugging to keep hardware breakpoints active.
150#ifdef CONFIG_ETRAX_KGDB
151 move $ccs, $r0
152 or.d (1<<9), $r0
153 move $r0, $ccs
154#endif
155
156 movs.w -ENOSYS, $r0
157 addoq +PT_r10, $sp, $acr
158 move.d $r0, [$acr]
159
160 ;; Check if this process is syscall-traced.
161 movs.w -8192, $r0 ; THREAD_SIZE == 8192
162 and.d $sp, $r0
163
164 addoq +TI_flags, $r0, $acr
165 move.d [$acr], $r0
166 btstq TIF_SYSCALL_TRACE, $r0
167 bmi _syscall_trace_entry
168 nop
169
170_syscall_traced:
171 ;; Check for sanity in the requested syscall number.
172 cmpu.w NR_syscalls, $r9
173 bhs ret_from_sys_call
174 lslq 2, $r9 ; Multiply by 4, in the delay slot.
175
176 ;; The location on the stack for the register structure is passed as a
177 ;; seventh argument. Some system calls need this.
178 move.d $sp, $r0
179 subq 4, $sp
180 move.d $r0, [$sp]
181
182 ;; The registers carrying parameters (R10-R13) are intact. The optional
183 ;; fifth and sixth parameters is in MOF and SRP respectivly. Put them
184 ;; back on the stack.
185 subq 4, $sp
186 move $srp, [$sp]
187 subq 4, $sp
188 move $mof, [$sp]
189
190 ;; Actually to the system call.
191 addo.d +sys_call_table, $r9, $acr
192 move.d [$acr], $acr
193 jsr $acr
194 nop
195
196 addq 3*4, $sp ; Pop the mof, srp and regs parameters.
197 addoq +PT_r10, $sp, $acr
198 move.d $r10, [$acr] ; Save the return value.
199
200 moveq 1, $r9 ; "Parameter" to ret_from_sys_call to
201 ; show it was a sys call.
202
203 ;; Fall through into ret_from_sys_call to return.
204
205ret_from_sys_call:
206 ;; R9 is a parameter:
207 ;; >= 1 from syscall
208 ;; 0 from irq
209
210 ;; Get the current task-struct pointer.
211 movs.w -8192, $r0 ; THREAD_SIZE == 8192
212 and.d $sp, $r0
213
214 di ; Make sure need_resched and sigpending don't change.
215
216 addoq +TI_flags, $r0, $acr
217 move.d [$acr], $r1
218 and.d _TIF_ALLWORK_MASK, $r1
219 bne _syscall_exit_work
220 nop
221
222_Rexit:
223 ;; This epilogue MUST match the prologues in multiple_interrupt, irq.h
224 ;; and ptregs.h.
225 addq 4, $sp ; Skip orig_r10.
226 movem [$sp+], $r13 ; Registers R0-R13.
227 move.d [$sp+], $acr
228 move [$sp], $srs
229 addq 4, $sp
230 move [$sp+], $mof
231 move [$sp+], $spc
232 move [$sp+], $ccs
233 move [$sp+], $srp
234 move [$sp+], $erp
235 addq 8, $sp ; Skip EXS, EDA.
236 jump $erp
237 rfe ; Restore condition code stack in delay-slot.
238
239 ;; We get here after doing a syscall if extra work might need to be done
240 ;; perform syscall exit tracing if needed.
241
242_syscall_exit_work:
243 ;; R0 contains current at this point and irq's are disabled.
244
245 addoq +TI_flags, $r0, $acr
246 move.d [$acr], $r1
247 btstq TIF_SYSCALL_TRACE, $r1
248 bpl _work_pending
249 nop
250 ei
251 move.d $r9, $r1 ; Preserve R9.
252 jsr do_syscall_trace
253 nop
254 move.d $r1, $r9
255 ba _resume_userspace
256 nop
257
258_work_pending:
259 addoq +TI_flags, $r0, $acr
260 move.d [$acr], $r10
261 btstq TIF_NEED_RESCHED, $r10 ; Need resched?
262 bpl _work_notifysig ; No, must be signal/notify.
263 nop
264
265_work_resched:
266 move.d $r9, $r1 ; Preserve R9.
267 jsr schedule
268 nop
269 move.d $r1, $r9
270 di
271
272 addoq +TI_flags, $r0, $acr
273 move.d [$acr], $r1
274 and.d _TIF_WORK_MASK, $r1 ; Ignore sycall trace counter.
275 beq _Rexit
276 nop
277 btstq TIF_NEED_RESCHED, $r1
278 bmi _work_resched ; current->work.need_resched.
279 nop
280
281_work_notifysig:
282 ;; Deal with pending signals and notify-resume requests.
283
284 addoq +TI_flags, $r0, $acr
285 move.d [$acr], $r13 ; The thread_info_flags parameter.
286 move.d $r9, $r10 ; do_notify_resume syscall/irq param.
287 moveq 0, $r11 ; oldset param - 0 in this case.
288 move.d $sp, $r12 ; The regs param.
289 jsr do_notify_resume
290 nop
291
292 ba _Rexit
293 nop
294
295 ;; We get here as a sidetrack when we've entered a syscall with the
296 ;; trace-bit set. We need to call do_syscall_trace and then continue
297 ;; with the call.
298
299_syscall_trace_entry:
300 ;; PT_r10 in the frame contains -ENOSYS as required, at this point.
301
302 jsr do_syscall_trace
303 nop
304
305 ;; Now re-enter the syscall code to do the syscall itself. We need to
306 ;; restore R9 here to contain the wanted syscall, and the other
307 ;; parameter-bearing registers.
308 addoq +PT_r9, $sp, $acr
309 move.d [$acr], $r9
310 addoq +PT_orig_r10, $sp, $acr
311 move.d [$acr], $r10 ; PT_r10 is already -ENOSYS.
312 addoq +PT_r11, $sp, $acr
313 move.d [$acr], $r11
314 addoq +PT_r12, $sp, $acr
315 move.d [$acr], $r12
316 addoq +PT_r13, $sp, $acr
317 move.d [$acr], $r13
318 addoq +PT_mof, $sp, $acr
319 move [$acr], $mof
320 addoq +PT_srp, $sp, $acr
321 move [$acr], $srp
322
323 ba _syscall_traced
324 nop
325
326 ;; Resume performs the actual task-switching, by switching stack
327 ;; pointers. Input arguments are:
328 ;;
329 ;; R10 = prev
330 ;; R11 = next
331 ;; R12 = thread offset in task struct.
332 ;;
333 ;; Returns old current in R10.
334
335resume:
336 subq 4, $sp
337 move $srp, [$sp] ; Keep old/new PC on the stack.
338 add.d $r12, $r10 ; R10 = current tasks tss.
339 addoq +THREAD_ccs, $r10, $acr
340 move $ccs, [$acr] ; Save IRQ enable state.
341 di
342
343 addoq +THREAD_usp, $r10, $acr
344 move $usp, [$acr] ; Save user-mode stackpointer.
345
346 ;; See copy_thread for the reason why register R9 is saved.
347 subq 10*4, $sp
348 movem $r9, [$sp] ; Save non-scratch registers and R9.
349
350 addoq +THREAD_ksp, $r10, $acr
351 move.d $sp, [$acr] ; Save kernel SP for old task.
352
353 move.d $sp, $r10 ; Return last running task in R10.
354 and.d -8192, $r10 ; Get thread_info from stackpointer.
355 addoq +TI_task, $r10, $acr
356 move.d [$acr], $r10 ; Get task.
357 add.d $r12, $r11 ; Find the new tasks tss.
358 addoq +THREAD_ksp, $r11, $acr
359 move.d [$acr], $sp ; Switch to new stackframe.
360 movem [$sp+], $r9 ; Restore non-scratch registers and R9.
361
362 addoq +THREAD_usp, $r11, $acr
363 move [$acr], $usp ; Restore user-mode stackpointer.
364
365 addoq +THREAD_ccs, $r11, $acr
366 move [$acr], $ccs ; Restore IRQ enable status.
367 move.d [$sp+], $acr
368 jump $acr ; Restore PC.
369 nop
370
371nmi_interrupt:
372
373;; If we receive a watchdog interrupt while it is not expected, then set
374;; up a canonical frame and dump register contents before dying.
375
376 ;; This prologue MUST match the one in irq.h and the struct in ptregs.h!
377 subq 12, $sp ; Skip EXS, EDA.
378 move $nrp, [$sp]
379 subq 4, $sp
380 move $srp, [$sp]
381 subq 4, $sp
382 move $ccs, [$sp]
383 subq 4, $sp
384 move $spc, [$sp]
385 subq 4, $sp
386 move $mof, [$sp]
387 subq 4, $sp
388 move $srs, [$sp]
389 subq 4, $sp
390 move.d $acr, [$sp]
391 subq 14*4, $sp ; Make room for R0-R13.
392 movem $r13, [$sp] ; Push R0-R13.
393 subq 4, $sp
394 move.d $r10, [$sp] ; Push orig_r10.
395 move.d REG_ADDR(intr_vect, regi_irq, r_nmi), $r0
396 move.d [$r0], $r0
397 btstq REG_BIT(intr_vect, r_nmi, watchdog), $r0
398 bpl 1f
399 nop
400 jsr handle_watchdog_bite ; In time.c.
401 move.d $sp, $r10 ; Pointer to registers
4021: btstq REG_BIT(intr_vect, r_nmi, ext), $r0
403 bpl 1f
404 nop
405 jsr handle_nmi
406 move.d $sp, $r10 ; Pointer to registers
4071: addq 4, $sp ; Skip orig_r10
408 movem [$sp+], $r13
409 move.d [$sp+], $acr
410 move [$sp], $srs
411 addq 4, $sp
412 move [$sp+], $mof
413 move [$sp+], $spc
414 move [$sp+], $ccs
415 move [$sp+], $srp
416 move [$sp+], $nrp
417 addq 8, $sp ; Skip EXS, EDA.
418 jump $nrp
419 rfn
420
421 .comm cause_of_death, 4 ;; Don't declare this anywhere.
422
423spurious_interrupt:
424 di
425 jump hard_reset_now
426 nop
427
428 ;; This handles the case when multiple interrupts arrive at the same
429 ;; time. Jump to the first set interrupt bit in a priotiry fashion. The
430 ;; hardware will call the unserved interrupts after the handler
431 ;; finishes.
432multiple_interrupt:
433 ;; This prologue MUST match the one in irq.h and the struct in ptregs.h!
434 subq 12, $sp ; Skip EXS, EDA.
435 move $erp, [$sp]
436 subq 4, $sp
437 move $srp, [$sp]
438 subq 4, $sp
439 move $ccs, [$sp]
440 subq 4, $sp
441 move $spc, [$sp]
442 subq 4, $sp
443 move $mof, [$sp]
444 subq 4, $sp
445 move $srs, [$sp]
446 subq 4, $sp
447 move.d $acr, [$sp]
448 subq 14*4, $sp ; Make room for R0-R13.
449 movem $r13, [$sp] ; Push R0-R13.
450 subq 4, $sp
451 move.d $r10, [$sp] ; Push orig_r10.
452
453; Set S-bit when kernel debugging to keep hardware breakpoints active.
454#ifdef CONFIG_ETRAX_KGDB
455 move $ccs, $r0
456 or.d (1<<9), $r0
457 move $r0, $ccs
458#endif
459
460 jsr crisv32_do_multiple
461 move.d $sp, $r10
462 jump ret_from_intr
463 nop
464
465do_sigtrap:
466 ;; Sigtraps the process that executed the BREAK instruction. Creates a
467 ;; frame that Rexit expects.
468 subq 4, $sp
469 move $eda, [$sp]
470 subq 4, $sp
471 move $exs, [$sp]
472 subq 4, $sp
473 move $erp, [$sp]
474 subq 4, $sp
475 move $srp, [$sp]
476 subq 4, $sp
477 move $ccs, [$sp]
478 subq 4, $sp
479 move $spc, [$sp]
480 subq 4, $sp
481 move $mof, [$sp]
482 subq 4, $sp
483 move $srs, [$sp]
484 subq 4, $sp
485 move.d $acr, [$sp]
486 di ; Need to disable irq's at this point.
487 subq 14*4, $sp ; Make room for r0-r13.
488 movem $r13, [$sp] ; Push the r0-r13 registers.
489 subq 4, $sp
490 move.d $r10, [$sp] ; Push orig_r10.
491
492 movs.w -8192, $r9 ; THREAD_SIZE == 8192
493 and.d $sp, $r9
494
495 ;; thread_info as first parameter
496 move.d $r9, $r10
497 moveq 5, $r11 ; SIGTRAP as second argument.
498 jsr ugdb_trap_user
499 nop
500 jump ret_from_intr ; Use the return routine for interrupts.
501 nop
502
503gdb_handle_exception:
504 subq 4, $sp
505 move.d $r0, [$sp]
506#ifdef CONFIG_ETRAX_KGDB
507 move $ccs, $r0 ; U-flag not affected by previous insns.
508 btstq 16, $r0 ; Test the U-flag.
509 bmi _ugdb_handle_exception ; Go to user mode debugging.
510 nop ; Empty delay-slot (cannot pop R0 here).
511 ba kgdb_handle_exception ; Go to kernel debugging.
512 move.d [$sp+], $r0 ; Restore R0 in delay slot.
513#endif
514
515_ugdb_handle_exception:
516 ba do_sigtrap ; SIGTRAP the offending process.
517 move.d [$sp+], $r0 ; Restore R0 in delay slot.
518
519 .data
520
521 .section .rodata,"a"
522sys_call_table:
523 .long sys_restart_syscall ; 0 - old "setup()" system call, used
524 ; for restarting.
525 .long sys_exit
526 .long sys_fork
527 .long sys_read
528 .long sys_write
529 .long sys_open /* 5 */
530 .long sys_close
531 .long sys_waitpid
532 .long sys_creat
533 .long sys_link
534 .long sys_unlink /* 10 */
535 .long sys_execve
536 .long sys_chdir
537 .long sys_time
538 .long sys_mknod
539 .long sys_chmod /* 15 */
540 .long sys_lchown16
541 .long sys_ni_syscall /* old break syscall holder */
542 .long sys_stat
543 .long sys_lseek
544 .long sys_getpid /* 20 */
545 .long sys_mount
546 .long sys_oldumount
547 .long sys_setuid16
548 .long sys_getuid16
549 .long sys_stime /* 25 */
550 .long sys_ptrace
551 .long sys_alarm
552 .long sys_fstat
553 .long sys_pause
554 .long sys_utime /* 30 */
555 .long sys_ni_syscall /* old stty syscall holder */
556 .long sys_ni_syscall /* old gtty syscall holder */
557 .long sys_access
558 .long sys_nice
559 .long sys_ni_syscall /* 35 old ftime syscall holder */
560 .long sys_sync
561 .long sys_kill
562 .long sys_rename
563 .long sys_mkdir
564 .long sys_rmdir /* 40 */
565 .long sys_dup
566 .long sys_pipe
567 .long sys_times
568 .long sys_ni_syscall /* old prof syscall holder */
569 .long sys_brk /* 45 */
570 .long sys_setgid16
571 .long sys_getgid16
572 .long sys_signal
573 .long sys_geteuid16
574 .long sys_getegid16 /* 50 */
575 .long sys_acct
576 .long sys_umount /* recycled never used phys( */
577 .long sys_ni_syscall /* old lock syscall holder */
578 .long sys_ioctl
579 .long sys_fcntl /* 55 */
580 .long sys_ni_syscall /* old mpx syscall holder */
581 .long sys_setpgid
582 .long sys_ni_syscall /* old ulimit syscall holder */
583 .long sys_ni_syscall /* old sys_olduname holder */
584 .long sys_umask /* 60 */
585 .long sys_chroot
586 .long sys_ustat
587 .long sys_dup2
588 .long sys_getppid
589 .long sys_getpgrp /* 65 */
590 .long sys_setsid
591 .long sys_sigaction
592 .long sys_sgetmask
593 .long sys_ssetmask
594 .long sys_setreuid16 /* 70 */
595 .long sys_setregid16
596 .long sys_sigsuspend
597 .long sys_sigpending
598 .long sys_sethostname
599 .long sys_setrlimit /* 75 */
600 .long sys_old_getrlimit
601 .long sys_getrusage
602 .long sys_gettimeofday
603 .long sys_settimeofday
604 .long sys_getgroups16 /* 80 */
605 .long sys_setgroups16
606 .long sys_select /* was old_select in Linux/E100 */
607 .long sys_symlink
608 .long sys_lstat
609 .long sys_readlink /* 85 */
610 .long sys_uselib
611 .long sys_swapon
612 .long sys_reboot
613 .long old_readdir
614 .long old_mmap /* 90 */
615 .long sys_munmap
616 .long sys_truncate
617 .long sys_ftruncate
618 .long sys_fchmod
619 .long sys_fchown16 /* 95 */
620 .long sys_getpriority
621 .long sys_setpriority
622 .long sys_ni_syscall /* old profil syscall holder */
623 .long sys_statfs
624 .long sys_fstatfs /* 100 */
625 .long sys_ni_syscall /* sys_ioperm in i386 */
626 .long sys_socketcall
627 .long sys_syslog
628 .long sys_setitimer
629 .long sys_getitimer /* 105 */
630 .long sys_newstat
631 .long sys_newlstat
632 .long sys_newfstat
633 .long sys_ni_syscall /* old sys_uname holder */
634 .long sys_ni_syscall /* sys_iopl in i386 */
635 .long sys_vhangup
636 .long sys_ni_syscall /* old "idle" system call */
637 .long sys_ni_syscall /* vm86old in i386 */
638 .long sys_wait4
639 .long sys_swapoff /* 115 */
640 .long sys_sysinfo
641 .long sys_ipc
642 .long sys_fsync
643 .long sys_sigreturn
644 .long sys_clone /* 120 */
645 .long sys_setdomainname
646 .long sys_newuname
647 .long sys_ni_syscall /* sys_modify_ldt */
648 .long sys_adjtimex
649 .long sys_mprotect /* 125 */
650 .long sys_sigprocmask
651 .long sys_ni_syscall /* old "create_module" */
652 .long sys_init_module
653 .long sys_delete_module
654 .long sys_ni_syscall /* 130: old "get_kernel_syms" */
655 .long sys_quotactl
656 .long sys_getpgid
657 .long sys_fchdir
658 .long sys_bdflush
659 .long sys_sysfs /* 135 */
660 .long sys_personality
661 .long sys_ni_syscall /* for afs_syscall */
662 .long sys_setfsuid16
663 .long sys_setfsgid16
664 .long sys_llseek /* 140 */
665 .long sys_getdents
666 .long sys_select
667 .long sys_flock
668 .long sys_msync
669 .long sys_readv /* 145 */
670 .long sys_writev
671 .long sys_getsid
672 .long sys_fdatasync
673 .long sys_sysctl
674 .long sys_mlock /* 150 */
675 .long sys_munlock
676 .long sys_mlockall
677 .long sys_munlockall
678 .long sys_sched_setparam
679 .long sys_sched_getparam /* 155 */
680 .long sys_sched_setscheduler
681 .long sys_sched_getscheduler
682 .long sys_sched_yield
683 .long sys_sched_get_priority_max
684 .long sys_sched_get_priority_min /* 160 */
685 .long sys_sched_rr_get_interval
686 .long sys_nanosleep
687 .long sys_mremap
688 .long sys_setresuid16
689 .long sys_getresuid16 /* 165 */
690 .long sys_ni_syscall /* sys_vm86 */
691 .long sys_ni_syscall /* Old sys_query_module */
692 .long sys_poll
693 .long sys_nfsservctl
694 .long sys_setresgid16 /* 170 */
695 .long sys_getresgid16
696 .long sys_prctl
697 .long sys_rt_sigreturn
698 .long sys_rt_sigaction
699 .long sys_rt_sigprocmask /* 175 */
700 .long sys_rt_sigpending
701 .long sys_rt_sigtimedwait
702 .long sys_rt_sigqueueinfo
703 .long sys_rt_sigsuspend
704 .long sys_pread64 /* 180 */
705 .long sys_pwrite64
706 .long sys_chown16
707 .long sys_getcwd
708 .long sys_capget
709 .long sys_capset /* 185 */
710 .long sys_sigaltstack
711 .long sys_sendfile
712 .long sys_ni_syscall /* streams1 */
713 .long sys_ni_syscall /* streams2 */
714 .long sys_vfork /* 190 */
715 .long sys_getrlimit
716 .long sys_mmap2
717 .long sys_truncate64
718 .long sys_ftruncate64
719 .long sys_stat64 /* 195 */
720 .long sys_lstat64
721 .long sys_fstat64
722 .long sys_lchown
723 .long sys_getuid
724 .long sys_getgid /* 200 */
725 .long sys_geteuid
726 .long sys_getegid
727 .long sys_setreuid
728 .long sys_setregid
729 .long sys_getgroups /* 205 */
730 .long sys_setgroups
731 .long sys_fchown
732 .long sys_setresuid
733 .long sys_getresuid
734 .long sys_setresgid /* 210 */
735 .long sys_getresgid
736 .long sys_chown
737 .long sys_setuid
738 .long sys_setgid
739 .long sys_setfsuid /* 215 */
740 .long sys_setfsgid
741 .long sys_pivot_root
742 .long sys_mincore
743 .long sys_madvise
744 .long sys_getdents64 /* 220 */
745 .long sys_fcntl64
746 .long sys_ni_syscall /* reserved for TUX */
747 .long sys_ni_syscall
748 .long sys_gettid
749 .long sys_readahead /* 225 */
750 .long sys_setxattr
751 .long sys_lsetxattr
752 .long sys_fsetxattr
753 .long sys_getxattr
754 .long sys_lgetxattr /* 230 */
755 .long sys_fgetxattr
756 .long sys_listxattr
757 .long sys_llistxattr
758 .long sys_flistxattr
759 .long sys_removexattr /* 235 */
760 .long sys_lremovexattr
761 .long sys_fremovexattr
762 .long sys_tkill
763 .long sys_sendfile64
764 .long sys_futex /* 240 */
765 .long sys_sched_setaffinity
766 .long sys_sched_getaffinity
767 .long sys_ni_syscall /* sys_set_thread_area */
768 .long sys_ni_syscall /* sys_get_thread_area */
769 .long sys_io_setup /* 245 */
770 .long sys_io_destroy
771 .long sys_io_getevents
772 .long sys_io_submit
773 .long sys_io_cancel
774 .long sys_fadvise64 /* 250 */
775 .long sys_ni_syscall
776 .long sys_exit_group
777 .long sys_lookup_dcookie
778 .long sys_epoll_create
779 .long sys_epoll_ctl /* 255 */
780 .long sys_epoll_wait
781 .long sys_remap_file_pages
782 .long sys_set_tid_address
783 .long sys_timer_create
784 .long sys_timer_settime /* 260 */
785 .long sys_timer_gettime
786 .long sys_timer_getoverrun
787 .long sys_timer_delete
788 .long sys_clock_settime
789 .long sys_clock_gettime /* 265 */
790 .long sys_clock_getres
791 .long sys_clock_nanosleep
792 .long sys_statfs64
793 .long sys_fstatfs64
794 .long sys_tgkill /* 270 */
795 .long sys_utimes
796 .long sys_fadvise64_64
797 .long sys_ni_syscall /* sys_vserver */
798 .long sys_ni_syscall /* sys_mbind */
799 .long sys_ni_syscall /* 275 sys_get_mempolicy */
800 .long sys_ni_syscall /* sys_set_mempolicy */
801 .long sys_mq_open
802 .long sys_mq_unlink
803 .long sys_mq_timedsend
804 .long sys_mq_timedreceive /* 280 */
805 .long sys_mq_notify
806 .long sys_mq_getsetattr
807 .long sys_ni_syscall /* reserved for kexec */
808 .long sys_waitid
809
810 /*
811 * NOTE!! This doesn't have to be exact - we just have
812 * to make sure we have _enough_ of the "sys_ni_syscall"
813 * entries. Don't panic if you notice that this hasn't
814 * been shrunk every time we add a new system call.
815 */
816
817 .rept NR_syscalls - (.-sys_call_table) / 4
818 .long sys_ni_syscall
819 .endr
820
diff --git a/arch/cris/arch-v32/kernel/fasttimer.c b/arch/cris/arch-v32/kernel/fasttimer.c
new file mode 100644
index 000000000000..ea2b4a97c8c7
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/fasttimer.c
@@ -0,0 +1,996 @@
1/* $Id: fasttimer.c,v 1.11 2005/01/04 11:15:46 starvik Exp $
2 * linux/arch/cris/kernel/fasttimer.c
3 *
4 * Fast timers for ETRAX FS
5 * This may be useful in other OS than Linux so use 2 space indentation...
6 *
7 * $Log: fasttimer.c,v $
8 * Revision 1.11 2005/01/04 11:15:46 starvik
9 * Don't share timer IRQ.
10 *
11 * Revision 1.10 2004/12/07 09:19:38 starvik
12 * Corrected includes.
13 * Use correct interrupt macros.
14 *
15 * Revision 1.9 2004/05/14 10:18:58 starvik
16 * Export fast_timer_list
17 *
18 * Revision 1.8 2004/05/14 07:58:03 starvik
19 * Merge of changes from 2.4
20 *
21 * Revision 1.7 2003/07/10 12:06:14 starvik
22 * Return IRQ_NONE if irq wasn't handled
23 *
24 * Revision 1.6 2003/07/04 08:27:49 starvik
25 * Merge of Linux 2.5.74
26 *
27 * Revision 1.5 2003/06/05 10:16:22 johana
28 * New INTR_VECT macros.
29 *
30 * Revision 1.4 2003/06/03 08:49:45 johana
31 * Fixed typo.
32 *
33 * Revision 1.3 2003/06/02 12:51:27 johana
34 * Now compiles.
35 * Commented some include files that probably can be removed.
36 *
37 * Revision 1.2 2003/06/02 12:09:41 johana
38 * Ported to ETRAX FS using the trig interrupt instead of timer1.
39 *
40 * Revision 1.3 2002/12/12 08:26:32 starvik
41 * Don't use C-comments inside CVS comments
42 *
43 * Revision 1.2 2002/12/11 15:42:02 starvik
44 * Extracted v10 (ETRAX 100LX) specific stuff from arch/cris/kernel/
45 *
46 * Revision 1.1 2002/11/18 07:58:06 starvik
47 * Fast timers (from Linux 2.4)
48 *
49 * Revision 1.5 2002/10/15 06:21:39 starvik
50 * Added call to init_waitqueue_head
51 *
52 * Revision 1.4 2002/05/28 17:47:59 johana
53 * Added del_fast_timer()
54 *
55 * Revision 1.3 2002/05/28 16:16:07 johana
56 * Handle empty fast_timer_list
57 *
58 * Revision 1.2 2002/05/27 15:38:42 johana
59 * Made it compile without warnings on Linux 2.4.
60 * (includes, wait_queue, PROC_FS and snprintf)
61 *
62 * Revision 1.1 2002/05/27 15:32:25 johana
63 * arch/etrax100/kernel/fasttimer.c v1.8 from the elinux tree.
64 *
65 * Revision 1.8 2001/11/27 13:50:40 pkj
66 * Disable interrupts while stopping the timer and while modifying the
67 * list of active timers in timer1_handler() as it may be interrupted
68 * by other interrupts (e.g., the serial interrupt) which may add fast
69 * timers.
70 *
71 * Revision 1.7 2001/11/22 11:50:32 pkj
72 * * Only store information about the last 16 timers.
73 * * proc_fasttimer_read() now uses an allocated buffer, since it
74 * requires more space than just a page even for only writing the
75 * last 16 timers. The buffer is only allocated on request, so
76 * unless /proc/fasttimer is read, it is never allocated.
77 * * Renamed fast_timer_started to fast_timers_started to match
78 * fast_timers_added and fast_timers_expired.
79 * * Some clean-up.
80 *
81 * Revision 1.6 2000/12/13 14:02:08 johana
82 * Removed volatile for fast_timer_list
83 *
84 * Revision 1.5 2000/12/13 13:55:35 johana
85 * Added DEBUG_LOG, added som cli() and cleanup
86 *
87 * Revision 1.4 2000/12/05 13:48:50 johana
88 * Added range check when writing proc file, modified timer int handling
89 *
90 * Revision 1.3 2000/11/23 10:10:20 johana
91 * More debug/logging possibilities.
92 * Moved GET_JIFFIES_USEC() to timex.h and time.c
93 *
94 * Revision 1.2 2000/11/01 13:41:04 johana
95 * Clean up and bugfixes.
96 * Created new do_gettimeofday_fast() that gets a timeval struct
97 * with time based on jiffies and *R_TIMER0_DATA, uses a table
98 * for fast conversion of timer value to microseconds.
99 * (Much faster the standard do_gettimeofday() and we don't really
100 * wan't to use the true time - we wan't the "uptime" so timers don't screw up
101 * when we change the time.
102 * TODO: Add efficient support for continuous timers as well.
103 *
104 * Revision 1.1 2000/10/26 15:49:16 johana
105 * Added fasttimer, highresolution timers.
106 *
107 * Copyright (C) 2000,2001 2002, 2003 Axis Communications AB, Lund, Sweden
108 */
109
110#include <linux/errno.h>
111#include <linux/sched.h>
112#include <linux/kernel.h>
113#include <linux/param.h>
114#include <linux/string.h>
115#include <linux/vmalloc.h>
116#include <linux/interrupt.h>
117#include <linux/time.h>
118#include <linux/delay.h>
119
120#include <asm/irq.h>
121#include <asm/system.h>
122
123#include <linux/config.h>
124#include <linux/version.h>
125
126#include <asm/arch/hwregs/reg_map.h>
127#include <asm/arch/hwregs/reg_rdwr.h>
128#include <asm/arch/hwregs/timer_defs.h>
129#include <asm/fasttimer.h>
130#include <linux/proc_fs.h>
131
132/*
133 * timer0 is running at 100MHz and generating jiffies timer ticks
134 * at 100 or 1000 HZ.
135 * fasttimer gives an API that gives timers that expire "between" the jiffies
136 * giving microsecond resolution (10 ns).
137 * fasttimer uses reg_timer_rw_trig register to get interrupt when
138 * r_time reaches a certain value.
139 */
140
141
142#define DEBUG_LOG_INCLUDED
143#define FAST_TIMER_LOG
144//#define FAST_TIMER_TEST
145
146#define FAST_TIMER_SANITY_CHECKS
147
148#ifdef FAST_TIMER_SANITY_CHECKS
149#define SANITYCHECK(x) x
150static int sanity_failed = 0;
151#else
152#define SANITYCHECK(x)
153#endif
154
155#define D1(x)
156#define D2(x)
157#define DP(x)
158
159#define __INLINE__ inline
160
161static int fast_timer_running = 0;
162static int fast_timers_added = 0;
163static int fast_timers_started = 0;
164static int fast_timers_expired = 0;
165static int fast_timers_deleted = 0;
166static int fast_timer_is_init = 0;
167static int fast_timer_ints = 0;
168
169struct fast_timer *fast_timer_list = NULL;
170
171#ifdef DEBUG_LOG_INCLUDED
172#define DEBUG_LOG_MAX 128
173static const char * debug_log_string[DEBUG_LOG_MAX];
174static unsigned long debug_log_value[DEBUG_LOG_MAX];
175static int debug_log_cnt = 0;
176static int debug_log_cnt_wrapped = 0;
177
178#define DEBUG_LOG(string, value) \
179{ \
180 unsigned long log_flags; \
181 local_irq_save(log_flags); \
182 debug_log_string[debug_log_cnt] = (string); \
183 debug_log_value[debug_log_cnt] = (unsigned long)(value); \
184 if (++debug_log_cnt >= DEBUG_LOG_MAX) \
185 { \
186 debug_log_cnt = debug_log_cnt % DEBUG_LOG_MAX; \
187 debug_log_cnt_wrapped = 1; \
188 } \
189 local_irq_restore(log_flags); \
190}
191#else
192#define DEBUG_LOG(string, value)
193#endif
194
195
196#define NUM_TIMER_STATS 16
197#ifdef FAST_TIMER_LOG
198struct fast_timer timer_added_log[NUM_TIMER_STATS];
199struct fast_timer timer_started_log[NUM_TIMER_STATS];
200struct fast_timer timer_expired_log[NUM_TIMER_STATS];
201#endif
202
203int timer_div_settings[NUM_TIMER_STATS];
204int timer_delay_settings[NUM_TIMER_STATS];
205
206
207static void
208timer_trig_handler(void);
209
210
211
212/* Not true gettimeofday, only checks the jiffies (uptime) + useconds */
213void __INLINE__ do_gettimeofday_fast(struct timeval *tv)
214{
215 unsigned long sec = jiffies;
216 unsigned long usec = GET_JIFFIES_USEC();
217
218 usec += (sec % HZ) * (1000000 / HZ);
219 sec = sec / HZ;
220
221 if (usec > 1000000)
222 {
223 usec -= 1000000;
224 sec++;
225 }
226 tv->tv_sec = sec;
227 tv->tv_usec = usec;
228}
229
230int __INLINE__ timeval_cmp(struct timeval *t0, struct timeval *t1)
231{
232 if (t0->tv_sec < t1->tv_sec)
233 {
234 return -1;
235 }
236 else if (t0->tv_sec > t1->tv_sec)
237 {
238 return 1;
239 }
240 if (t0->tv_usec < t1->tv_usec)
241 {
242 return -1;
243 }
244 else if (t0->tv_usec > t1->tv_usec)
245 {
246 return 1;
247 }
248 return 0;
249}
250
251/* Called with ints off */
252void __INLINE__ start_timer_trig(unsigned long delay_us)
253{
254 reg_timer_rw_ack_intr ack_intr = { 0 };
255 reg_timer_rw_intr_mask intr_mask;
256 reg_timer_rw_trig trig;
257 reg_timer_rw_trig_cfg trig_cfg = { 0 };
258 reg_timer_r_time r_time;
259
260 r_time = REG_RD(timer, regi_timer, r_time);
261
262 D1(printk("start_timer_trig : %d us freq: %i div: %i\n",
263 delay_us, freq_index, div));
264 /* Clear trig irq */
265 intr_mask = REG_RD(timer, regi_timer, rw_intr_mask);
266 intr_mask.trig = 0;
267 REG_WR(timer, regi_timer, rw_intr_mask, intr_mask);
268
269 /* Set timer values */
270 /* r_time is 100MHz (10 ns resolution) */
271 trig = r_time + delay_us*(1000/10);
272
273 timer_div_settings[fast_timers_started % NUM_TIMER_STATS] = trig;
274 timer_delay_settings[fast_timers_started % NUM_TIMER_STATS] = delay_us;
275
276 /* Ack interrupt */
277 ack_intr.trig = 1;
278 REG_WR(timer, regi_timer, rw_ack_intr, ack_intr);
279
280 /* Start timer */
281 REG_WR(timer, regi_timer, rw_trig, trig);
282 trig_cfg.tmr = regk_timer_time;
283 REG_WR(timer, regi_timer, rw_trig_cfg, trig_cfg);
284
285 /* Check if we have already passed the trig time */
286 r_time = REG_RD(timer, regi_timer, r_time);
287 if (r_time < trig) {
288 /* No, Enable trig irq */
289 intr_mask = REG_RD(timer, regi_timer, rw_intr_mask);
290 intr_mask.trig = 1;
291 REG_WR(timer, regi_timer, rw_intr_mask, intr_mask);
292 fast_timers_started++;
293 fast_timer_running = 1;
294 }
295 else
296 {
297 /* We have passed the time, disable trig point, ack intr */
298 trig_cfg.tmr = regk_timer_off;
299 REG_WR(timer, regi_timer, rw_trig_cfg, trig_cfg);
300 REG_WR(timer, regi_timer, rw_ack_intr, ack_intr);
301 /* call the int routine directly */
302 timer_trig_handler();
303 }
304
305}
306
307/* In version 1.4 this function takes 27 - 50 us */
308void start_one_shot_timer(struct fast_timer *t,
309 fast_timer_function_type *function,
310 unsigned long data,
311 unsigned long delay_us,
312 const char *name)
313{
314 unsigned long flags;
315 struct fast_timer *tmp;
316
317 D1(printk("sft %s %d us\n", name, delay_us));
318
319 local_irq_save(flags);
320
321 do_gettimeofday_fast(&t->tv_set);
322 tmp = fast_timer_list;
323
324 SANITYCHECK({ /* Check so this is not in the list already... */
325 while (tmp != NULL)
326 {
327 if (tmp == t)
328 {
329 printk("timer name: %s data: 0x%08lX already in list!\n", name, data);
330 sanity_failed++;
331 return;
332 }
333 else
334 {
335 tmp = tmp->next;
336 }
337 }
338 tmp = fast_timer_list;
339 });
340
341 t->delay_us = delay_us;
342 t->function = function;
343 t->data = data;
344 t->name = name;
345
346 t->tv_expires.tv_usec = t->tv_set.tv_usec + delay_us % 1000000;
347 t->tv_expires.tv_sec = t->tv_set.tv_sec + delay_us / 1000000;
348 if (t->tv_expires.tv_usec > 1000000)
349 {
350 t->tv_expires.tv_usec -= 1000000;
351 t->tv_expires.tv_sec++;
352 }
353#ifdef FAST_TIMER_LOG
354 timer_added_log[fast_timers_added % NUM_TIMER_STATS] = *t;
355#endif
356 fast_timers_added++;
357
358 /* Check if this should timeout before anything else */
359 if (tmp == NULL || timeval_cmp(&t->tv_expires, &tmp->tv_expires) < 0)
360 {
361 /* Put first in list and modify the timer value */
362 t->prev = NULL;
363 t->next = fast_timer_list;
364 if (fast_timer_list)
365 {
366 fast_timer_list->prev = t;
367 }
368 fast_timer_list = t;
369#ifdef FAST_TIMER_LOG
370 timer_started_log[fast_timers_started % NUM_TIMER_STATS] = *t;
371#endif
372 start_timer_trig(delay_us);
373 } else {
374 /* Put in correct place in list */
375 while (tmp->next &&
376 timeval_cmp(&t->tv_expires, &tmp->next->tv_expires) > 0)
377 {
378 tmp = tmp->next;
379 }
380 /* Insert t after tmp */
381 t->prev = tmp;
382 t->next = tmp->next;
383 if (tmp->next)
384 {
385 tmp->next->prev = t;
386 }
387 tmp->next = t;
388 }
389
390 D2(printk("start_one_shot_timer: %d us done\n", delay_us));
391
392 local_irq_restore(flags);
393} /* start_one_shot_timer */
394
395static inline int fast_timer_pending (const struct fast_timer * t)
396{
397 return (t->next != NULL) || (t->prev != NULL) || (t == fast_timer_list);
398}
399
400static inline int detach_fast_timer (struct fast_timer *t)
401{
402 struct fast_timer *next, *prev;
403 if (!fast_timer_pending(t))
404 return 0;
405 next = t->next;
406 prev = t->prev;
407 if (next)
408 next->prev = prev;
409 if (prev)
410 prev->next = next;
411 else
412 fast_timer_list = next;
413 fast_timers_deleted++;
414 return 1;
415}
416
417int del_fast_timer(struct fast_timer * t)
418{
419 unsigned long flags;
420 int ret;
421
422 local_irq_save(flags);
423 ret = detach_fast_timer(t);
424 t->next = t->prev = NULL;
425 local_irq_restore(flags);
426 return ret;
427} /* del_fast_timer */
428
429
430/* Interrupt routines or functions called in interrupt context */
431
432/* Timer interrupt handler for trig interrupts */
433
434static irqreturn_t
435timer_trig_interrupt(int irq, void *dev_id, struct pt_regs *regs)
436{
437 reg_timer_r_masked_intr masked_intr;
438
439 /* Check if the timer interrupt is for us (a trig int) */
440 masked_intr = REG_RD(timer, regi_timer, r_masked_intr);
441 if (!masked_intr.trig)
442 return IRQ_NONE;
443 timer_trig_handler();
444 return IRQ_HANDLED;
445}
446
447static void timer_trig_handler(void)
448{
449 reg_timer_rw_ack_intr ack_intr = { 0 };
450 reg_timer_rw_intr_mask intr_mask;
451 reg_timer_rw_trig_cfg trig_cfg = { 0 };
452 struct fast_timer *t;
453 unsigned long flags;
454
455 local_irq_save(flags);
456
457 /* Clear timer trig interrupt */
458 intr_mask = REG_RD(timer, regi_timer, rw_intr_mask);
459 intr_mask.trig = 0;
460 REG_WR(timer, regi_timer, rw_intr_mask, intr_mask);
461
462 /* First stop timer, then ack interrupt */
463 /* Stop timer */
464 trig_cfg.tmr = regk_timer_off;
465 REG_WR(timer, regi_timer, rw_trig_cfg, trig_cfg);
466
467 /* Ack interrupt */
468 ack_intr.trig = 1;
469 REG_WR(timer, regi_timer, rw_ack_intr, ack_intr);
470
471 fast_timer_running = 0;
472 fast_timer_ints++;
473
474 local_irq_restore(flags);
475
476 t = fast_timer_list;
477 while (t)
478 {
479 struct timeval tv;
480
481 /* Has it really expired? */
482 do_gettimeofday_fast(&tv);
483 D1(printk("t: %is %06ius\n", tv.tv_sec, tv.tv_usec));
484
485 if (timeval_cmp(&t->tv_expires, &tv) <= 0)
486 {
487 /* Yes it has expired */
488#ifdef FAST_TIMER_LOG
489 timer_expired_log[fast_timers_expired % NUM_TIMER_STATS] = *t;
490#endif
491 fast_timers_expired++;
492
493 /* Remove this timer before call, since it may reuse the timer */
494 local_irq_save(flags);
495 if (t->prev)
496 {
497 t->prev->next = t->next;
498 }
499 else
500 {
501 fast_timer_list = t->next;
502 }
503 if (t->next)
504 {
505 t->next->prev = t->prev;
506 }
507 t->prev = NULL;
508 t->next = NULL;
509 local_irq_restore(flags);
510
511 if (t->function != NULL)
512 {
513 t->function(t->data);
514 }
515 else
516 {
517 DEBUG_LOG("!trimertrig %i function==NULL!\n", fast_timer_ints);
518 }
519 }
520 else
521 {
522 /* Timer is to early, let's set it again using the normal routines */
523 D1(printk(".\n"));
524 }
525
526 local_irq_save(flags);
527 if ((t = fast_timer_list) != NULL)
528 {
529 /* Start next timer.. */
530 long us;
531 struct timeval tv;
532
533 do_gettimeofday_fast(&tv);
534 us = ((t->tv_expires.tv_sec - tv.tv_sec) * 1000000 +
535 t->tv_expires.tv_usec - tv.tv_usec);
536 if (us > 0)
537 {
538 if (!fast_timer_running)
539 {
540#ifdef FAST_TIMER_LOG
541 timer_started_log[fast_timers_started % NUM_TIMER_STATS] = *t;
542#endif
543 start_timer_trig(us);
544 }
545 local_irq_restore(flags);
546 break;
547 }
548 else
549 {
550 /* Timer already expired, let's handle it better late than never.
551 * The normal loop handles it
552 */
553 D1(printk("e! %d\n", us));
554 }
555 }
556 local_irq_restore(flags);
557 }
558
559 if (!t)
560 {
561 D1(printk("ttrig stop!\n"));
562 }
563}
564
565static void wake_up_func(unsigned long data)
566{
567#ifdef DECLARE_WAITQUEUE
568 wait_queue_head_t *sleep_wait_p = (wait_queue_head_t*)data;
569#else
570 struct wait_queue **sleep_wait_p = (struct wait_queue **)data;
571#endif
572 wake_up(sleep_wait_p);
573}
574
575
576/* Useful API */
577
578void schedule_usleep(unsigned long us)
579{
580 struct fast_timer t;
581#ifdef DECLARE_WAITQUEUE
582 wait_queue_head_t sleep_wait;
583 init_waitqueue_head(&sleep_wait);
584 {
585 DECLARE_WAITQUEUE(wait, current);
586#else
587 struct wait_queue *sleep_wait = NULL;
588 struct wait_queue wait = { current, NULL };
589#endif
590
591 D1(printk("schedule_usleep(%d)\n", us));
592 add_wait_queue(&sleep_wait, &wait);
593 set_current_state(TASK_INTERRUPTIBLE);
594 start_one_shot_timer(&t, wake_up_func, (unsigned long)&sleep_wait, us,
595 "usleep");
596 schedule();
597 set_current_state(TASK_RUNNING);
598 remove_wait_queue(&sleep_wait, &wait);
599 D1(printk("done schedule_usleep(%d)\n", us));
600#ifdef DECLARE_WAITQUEUE
601 }
602#endif
603}
604
605#ifdef CONFIG_PROC_FS
606static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len
607#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
608 ,int *eof, void *data_unused
609#else
610 ,int unused
611#endif
612 );
613#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
614static struct proc_dir_entry *fasttimer_proc_entry;
615#else
616static struct proc_dir_entry fasttimer_proc_entry =
617{
618 0, 9, "fasttimer",
619 S_IFREG | S_IRUGO, 1, 0, 0,
620 0, NULL /* ops -- default to array */,
621 &proc_fasttimer_read /* get_info */,
622};
623#endif
624#endif /* CONFIG_PROC_FS */
625
626#ifdef CONFIG_PROC_FS
627
628/* This value is very much based on testing */
629#define BIG_BUF_SIZE (500 + NUM_TIMER_STATS * 300)
630
631static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len
632#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
633 ,int *eof, void *data_unused
634#else
635 ,int unused
636#endif
637 )
638{
639 unsigned long flags;
640 int i = 0;
641 int num_to_show;
642 struct timeval tv;
643 struct fast_timer *t, *nextt;
644 static char *bigbuf = NULL;
645 static unsigned long used;
646
647 if (!bigbuf && !(bigbuf = vmalloc(BIG_BUF_SIZE)))
648 {
649 used = 0;
650 bigbuf[0] = '\0';
651 return 0;
652 }
653
654 if (!offset || !used)
655 {
656 do_gettimeofday_fast(&tv);
657
658 used = 0;
659 used += sprintf(bigbuf + used, "Fast timers added: %i\n",
660 fast_timers_added);
661 used += sprintf(bigbuf + used, "Fast timers started: %i\n",
662 fast_timers_started);
663 used += sprintf(bigbuf + used, "Fast timer interrupts: %i\n",
664 fast_timer_ints);
665 used += sprintf(bigbuf + used, "Fast timers expired: %i\n",
666 fast_timers_expired);
667 used += sprintf(bigbuf + used, "Fast timers deleted: %i\n",
668 fast_timers_deleted);
669 used += sprintf(bigbuf + used, "Fast timer running: %s\n",
670 fast_timer_running ? "yes" : "no");
671 used += sprintf(bigbuf + used, "Current time: %lu.%06lu\n",
672 (unsigned long)tv.tv_sec,
673 (unsigned long)tv.tv_usec);
674#ifdef FAST_TIMER_SANITY_CHECKS
675 used += sprintf(bigbuf + used, "Sanity failed: %i\n",
676 sanity_failed);
677#endif
678 used += sprintf(bigbuf + used, "\n");
679
680#ifdef DEBUG_LOG_INCLUDED
681 {
682 int end_i = debug_log_cnt;
683 i = 0;
684
685 if (debug_log_cnt_wrapped)
686 {
687 i = debug_log_cnt;
688 }
689
690 while ((i != end_i || (debug_log_cnt_wrapped && !used)) &&
691 used+100 < BIG_BUF_SIZE)
692 {
693 used += sprintf(bigbuf + used, debug_log_string[i],
694 debug_log_value[i]);
695 i = (i+1) % DEBUG_LOG_MAX;
696 }
697 }
698 used += sprintf(bigbuf + used, "\n");
699#endif
700
701 num_to_show = (fast_timers_started < NUM_TIMER_STATS ? fast_timers_started:
702 NUM_TIMER_STATS);
703 used += sprintf(bigbuf + used, "Timers started: %i\n", fast_timers_started);
704 for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE) ; i++)
705 {
706 int cur = (fast_timers_started - i - 1) % NUM_TIMER_STATS;
707
708#if 1 //ndef FAST_TIMER_LOG
709 used += sprintf(bigbuf + used, "div: %i delay: %i"
710 "\n",
711 timer_div_settings[cur],
712 timer_delay_settings[cur]
713 );
714#endif
715#ifdef FAST_TIMER_LOG
716 t = &timer_started_log[cur];
717 used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
718 "d: %6li us data: 0x%08lX"
719 "\n",
720 t->name,
721 (unsigned long)t->tv_set.tv_sec,
722 (unsigned long)t->tv_set.tv_usec,
723 (unsigned long)t->tv_expires.tv_sec,
724 (unsigned long)t->tv_expires.tv_usec,
725 t->delay_us,
726 t->data
727 );
728#endif
729 }
730 used += sprintf(bigbuf + used, "\n");
731
732#ifdef FAST_TIMER_LOG
733 num_to_show = (fast_timers_added < NUM_TIMER_STATS ? fast_timers_added:
734 NUM_TIMER_STATS);
735 used += sprintf(bigbuf + used, "Timers added: %i\n", fast_timers_added);
736 for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE); i++)
737 {
738 t = &timer_added_log[(fast_timers_added - i - 1) % NUM_TIMER_STATS];
739 used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
740 "d: %6li us data: 0x%08lX"
741 "\n",
742 t->name,
743 (unsigned long)t->tv_set.tv_sec,
744 (unsigned long)t->tv_set.tv_usec,
745 (unsigned long)t->tv_expires.tv_sec,
746 (unsigned long)t->tv_expires.tv_usec,
747 t->delay_us,
748 t->data
749 );
750 }
751 used += sprintf(bigbuf + used, "\n");
752
753 num_to_show = (fast_timers_expired < NUM_TIMER_STATS ? fast_timers_expired:
754 NUM_TIMER_STATS);
755 used += sprintf(bigbuf + used, "Timers expired: %i\n", fast_timers_expired);
756 for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE); i++)
757 {
758 t = &timer_expired_log[(fast_timers_expired - i - 1) % NUM_TIMER_STATS];
759 used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
760 "d: %6li us data: 0x%08lX"
761 "\n",
762 t->name,
763 (unsigned long)t->tv_set.tv_sec,
764 (unsigned long)t->tv_set.tv_usec,
765 (unsigned long)t->tv_expires.tv_sec,
766 (unsigned long)t->tv_expires.tv_usec,
767 t->delay_us,
768 t->data
769 );
770 }
771 used += sprintf(bigbuf + used, "\n");
772#endif
773
774 used += sprintf(bigbuf + used, "Active timers:\n");
775 local_irq_save(flags);
776 local_irq_save(flags);
777 t = fast_timer_list;
778 while (t != NULL && (used+100 < BIG_BUF_SIZE))
779 {
780 nextt = t->next;
781 local_irq_restore(flags);
782 used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
783 "d: %6li us data: 0x%08lX"
784/* " func: 0x%08lX" */
785 "\n",
786 t->name,
787 (unsigned long)t->tv_set.tv_sec,
788 (unsigned long)t->tv_set.tv_usec,
789 (unsigned long)t->tv_expires.tv_sec,
790 (unsigned long)t->tv_expires.tv_usec,
791 t->delay_us,
792 t->data
793/* , t->function */
794 );
795 local_irq_disable();
796 if (t->next != nextt)
797 {
798 printk("timer removed!\n");
799 }
800 t = nextt;
801 }
802 local_irq_restore(flags);
803 }
804
805 if (used - offset < len)
806 {
807 len = used - offset;
808 }
809
810 memcpy(buf, bigbuf + offset, len);
811 *start = buf;
812#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
813 *eof = 1;
814#endif
815
816 return len;
817}
818#endif /* PROC_FS */
819
820#ifdef FAST_TIMER_TEST
821static volatile unsigned long i = 0;
822static volatile int num_test_timeout = 0;
823static struct fast_timer tr[10];
824static int exp_num[10];
825
826static struct timeval tv_exp[100];
827
828static void test_timeout(unsigned long data)
829{
830 do_gettimeofday_fast(&tv_exp[data]);
831 exp_num[data] = num_test_timeout;
832
833 num_test_timeout++;
834}
835
836static void test_timeout1(unsigned long data)
837{
838 do_gettimeofday_fast(&tv_exp[data]);
839 exp_num[data] = num_test_timeout;
840 if (data < 7)
841 {
842 start_one_shot_timer(&tr[i], test_timeout1, i, 1000, "timeout1");
843 i++;
844 }
845 num_test_timeout++;
846}
847
848DP(
849static char buf0[2000];
850static char buf1[2000];
851static char buf2[2000];
852static char buf3[2000];
853static char buf4[2000];
854);
855
856static char buf5[6000];
857static int j_u[1000];
858
859static void fast_timer_test(void)
860{
861 int prev_num;
862 int j;
863
864 struct timeval tv, tv0, tv1, tv2;
865
866 printk("fast_timer_test() start\n");
867 do_gettimeofday_fast(&tv);
868
869 for (j = 0; j < 1000; j++)
870 {
871 j_u[j] = GET_JIFFIES_USEC();
872 }
873 for (j = 0; j < 100; j++)
874 {
875 do_gettimeofday_fast(&tv_exp[j]);
876 }
877 printk("fast_timer_test() %is %06i\n", tv.tv_sec, tv.tv_usec);
878
879 for (j = 0; j < 1000; j++)
880 {
881 printk("%i %i %i %i %i\n",j_u[j], j_u[j+1], j_u[j+2], j_u[j+3], j_u[j+4]);
882 j += 4;
883 }
884 for (j = 0; j < 100; j++)
885 {
886 printk("%i.%i %i.%i %i.%i %i.%i %i.%i\n",
887 tv_exp[j].tv_sec,tv_exp[j].tv_usec,
888 tv_exp[j+1].tv_sec,tv_exp[j+1].tv_usec,
889 tv_exp[j+2].tv_sec,tv_exp[j+2].tv_usec,
890 tv_exp[j+3].tv_sec,tv_exp[j+3].tv_usec,
891 tv_exp[j+4].tv_sec,tv_exp[j+4].tv_usec);
892 j += 4;
893 }
894 do_gettimeofday_fast(&tv0);
895 start_one_shot_timer(&tr[i], test_timeout, i, 50000, "test0");
896 DP(proc_fasttimer_read(buf0, NULL, 0, 0, 0));
897 i++;
898 start_one_shot_timer(&tr[i], test_timeout, i, 70000, "test1");
899 DP(proc_fasttimer_read(buf1, NULL, 0, 0, 0));
900 i++;
901 start_one_shot_timer(&tr[i], test_timeout, i, 40000, "test2");
902 DP(proc_fasttimer_read(buf2, NULL, 0, 0, 0));
903 i++;
904 start_one_shot_timer(&tr[i], test_timeout, i, 60000, "test3");
905 DP(proc_fasttimer_read(buf3, NULL, 0, 0, 0));
906 i++;
907 start_one_shot_timer(&tr[i], test_timeout1, i, 55000, "test4xx");
908 DP(proc_fasttimer_read(buf4, NULL, 0, 0, 0));
909 i++;
910 do_gettimeofday_fast(&tv1);
911
912 proc_fasttimer_read(buf5, NULL, 0, 0, 0);
913
914 prev_num = num_test_timeout;
915 while (num_test_timeout < i)
916 {
917 if (num_test_timeout != prev_num)
918 {
919 prev_num = num_test_timeout;
920 }
921 }
922 do_gettimeofday_fast(&tv2);
923 printk("Timers started %is %06i\n", tv0.tv_sec, tv0.tv_usec);
924 printk("Timers started at %is %06i\n", tv1.tv_sec, tv1.tv_usec);
925 printk("Timers done %is %06i\n", tv2.tv_sec, tv2.tv_usec);
926 DP(printk("buf0:\n");
927 printk(buf0);
928 printk("buf1:\n");
929 printk(buf1);
930 printk("buf2:\n");
931 printk(buf2);
932 printk("buf3:\n");
933 printk(buf3);
934 printk("buf4:\n");
935 printk(buf4);
936 );
937 printk("buf5:\n");
938 printk(buf5);
939
940 printk("timers set:\n");
941 for(j = 0; j<i; j++)
942 {
943 struct fast_timer *t = &tr[j];
944 printk("%-10s set: %6is %06ius exp: %6is %06ius "
945 "data: 0x%08X func: 0x%08X\n",
946 t->name,
947 t->tv_set.tv_sec,
948 t->tv_set.tv_usec,
949 t->tv_expires.tv_sec,
950 t->tv_expires.tv_usec,
951 t->data,
952 t->function
953 );
954
955 printk(" del: %6ius did exp: %6is %06ius as #%i error: %6li\n",
956 t->delay_us,
957 tv_exp[j].tv_sec,
958 tv_exp[j].tv_usec,
959 exp_num[j],
960 (tv_exp[j].tv_sec - t->tv_expires.tv_sec)*1000000 + tv_exp[j].tv_usec - t->tv_expires.tv_usec);
961 }
962 proc_fasttimer_read(buf5, NULL, 0, 0, 0);
963 printk("buf5 after all done:\n");
964 printk(buf5);
965 printk("fast_timer_test() done\n");
966}
967#endif
968
969
970void fast_timer_init(void)
971{
972 /* For some reason, request_irq() hangs when called froom time_init() */
973 if (!fast_timer_is_init)
974 {
975 printk("fast_timer_init()\n");
976
977#ifdef CONFIG_PROC_FS
978#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
979 if ((fasttimer_proc_entry = create_proc_entry( "fasttimer", 0, 0 )))
980 fasttimer_proc_entry->read_proc = proc_fasttimer_read;
981#else
982 proc_register_dynamic(&proc_root, &fasttimer_proc_entry);
983#endif
984#endif /* PROC_FS */
985 if(request_irq(TIMER_INTR_VECT, timer_trig_interrupt, SA_INTERRUPT,
986 "fast timer int", NULL))
987 {
988 printk("err: timer1 irq\n");
989 }
990 fast_timer_is_init = 1;
991#ifdef FAST_TIMER_TEST
992 printk("do test\n");
993 fast_timer_test();
994#endif
995 }
996}
diff --git a/arch/cris/arch-v32/kernel/head.S b/arch/cris/arch-v32/kernel/head.S
new file mode 100644
index 000000000000..3cfe57dc391d
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/head.S
@@ -0,0 +1,448 @@
1/*
2 * CRISv32 kernel startup code.
3 *
4 * Copyright (C) 2003, Axis Communications AB
5 */
6
7#include <linux/config.h>
8
9#define ASSEMBLER_MACROS_ONLY
10
11/*
12 * The macros found in mmu_defs_asm.h uses the ## concatenation operator, so
13 * -traditional must not be used when assembling this file.
14 */
15#include <asm/arch/hwregs/reg_rdwr.h>
16#include <asm/arch/hwregs/asm/mmu_defs_asm.h>
17#include <asm/arch/hwregs/asm/reg_map_asm.h>
18#include <asm/arch/hwregs/asm/config_defs_asm.h>
19#include <asm/arch/hwregs/asm/bif_core_defs_asm.h>
20
21#define CRAMFS_MAGIC 0x28cd3d45
22#define RAM_INIT_MAGIC 0x56902387
23#define COMMAND_LINE_MAGIC 0x87109563
24
25 ;; NOTE: R8 and R9 carry information from the decompressor (if the
26 ;; kernel was compressed). They must not be used in the code below
27 ;; until they are read!
28
29 ;; Exported symbols.
30 .global etrax_irv
31 .global romfs_start
32 .global romfs_length
33 .global romfs_in_flash
34 .global swapper_pg_dir
35 .global crisv32_nand_boot
36 .global crisv32_nand_cramfs_offset
37
38 ;; Dummy section to make it bootable with current VCS simulator
39#ifdef CONFIG_ETRAXFS_SIM
40 .section ".boot", "ax"
41 ba tstart
42 nop
43#endif
44
45 .text
46tstart:
47 ;; This is the entry point of the kernel. The CPU is currently in
48 ;; supervisor mode.
49 ;;
50 ;; 0x00000000 if flash.
51 ;; 0x40004000 if DRAM.
52 ;;
53 di
54
55 ;; Start clocks for used blocks.
56 move.d REG_ADDR(config, regi_config, rw_clk_ctrl), $r1
57 move.d [$r1], $r0
58 or.d REG_STATE(config, rw_clk_ctrl, cpu, yes) | \
59 REG_STATE(config, rw_clk_ctrl, bif, yes) | \
60 REG_STATE(config, rw_clk_ctrl, fix_io, yes), $r0
61 move.d $r0, [$r1]
62
63 ;; Set up waitstates etc
64 move.d REG_ADDR(bif_core, regi_bif_core, rw_grp1_cfg), $r0
65 move.d CONFIG_ETRAX_MEM_GRP1_CONFIG, $r1
66 move.d $r1, [$r0]
67 move.d REG_ADDR(bif_core, regi_bif_core, rw_grp2_cfg), $r0
68 move.d CONFIG_ETRAX_MEM_GRP2_CONFIG, $r1
69 move.d $r1, [$r0]
70 move.d REG_ADDR(bif_core, regi_bif_core, rw_grp3_cfg), $r0
71 move.d CONFIG_ETRAX_MEM_GRP3_CONFIG, $r1
72 move.d $r1, [$r0]
73 move.d REG_ADDR(bif_core, regi_bif_core, rw_grp4_cfg), $r0
74 move.d CONFIG_ETRAX_MEM_GRP4_CONFIG, $r1
75 move.d $r1, [$r0]
76
77#ifdef CONFIG_ETRAXFS_SIM
78 ;; Set up minimal flash waitstates
79 move.d 0, $r10
80 move.d REG_ADDR(bif_core, regi_bif_core, rw_grp1_cfg), $r11
81 move.d $r10, [$r11]
82#endif
83
84 ;; Setup and enable the MMU. Use same configuration for both the data
85 ;; and the instruction MMU.
86 ;;
87 ;; Note; 3 cycles is needed for a bank-select to take effect. Further;
88 ;; bank 1 is the instruction MMU, bank 2 is the data MMU.
89#ifndef CONFIG_ETRAXFS_SIM
90 move.d REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 8) \
91 | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 4) \
92 | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb), $r0
93#else
94 ;; Map the virtual DRAM to the RW eprom area at address 0.
95 ;; Also map 0xa for the hook calls,
96 move.d REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 8) \
97 | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0) \
98 | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) \
99 | REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa), $r0
100#endif
101
102 ;; Temporary map of 0x40 -> 0x40 and 0x00 -> 0x00.
103 move.d REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 4) \
104 | REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0), $r1
105
106 ;; Enable certain page protections and setup linear mapping
107 ;; for f,e,c,b,4,0.
108#ifndef CONFIG_ETRAXFS_SIM
109 move.d REG_STATE(mmu, rw_mm_cfg, we, on) \
110 | REG_STATE(mmu, rw_mm_cfg, acc, on) \
111 | REG_STATE(mmu, rw_mm_cfg, ex, on) \
112 | REG_STATE(mmu, rw_mm_cfg, inv, on) \
113 | REG_STATE(mmu, rw_mm_cfg, seg_f, linear) \
114 | REG_STATE(mmu, rw_mm_cfg, seg_e, linear) \
115 | REG_STATE(mmu, rw_mm_cfg, seg_d, page) \
116 | REG_STATE(mmu, rw_mm_cfg, seg_c, linear) \
117 | REG_STATE(mmu, rw_mm_cfg, seg_b, linear) \
118 | REG_STATE(mmu, rw_mm_cfg, seg_a, page) \
119 | REG_STATE(mmu, rw_mm_cfg, seg_9, page) \
120 | REG_STATE(mmu, rw_mm_cfg, seg_8, page) \
121 | REG_STATE(mmu, rw_mm_cfg, seg_7, page) \
122 | REG_STATE(mmu, rw_mm_cfg, seg_6, page) \
123 | REG_STATE(mmu, rw_mm_cfg, seg_5, page) \
124 | REG_STATE(mmu, rw_mm_cfg, seg_4, linear) \
125 | REG_STATE(mmu, rw_mm_cfg, seg_3, page) \
126 | REG_STATE(mmu, rw_mm_cfg, seg_2, page) \
127 | REG_STATE(mmu, rw_mm_cfg, seg_1, page) \
128 | REG_STATE(mmu, rw_mm_cfg, seg_0, linear), $r2
129#else
130 move.d REG_STATE(mmu, rw_mm_cfg, we, on) \
131 | REG_STATE(mmu, rw_mm_cfg, acc, on) \
132 | REG_STATE(mmu, rw_mm_cfg, ex, on) \
133 | REG_STATE(mmu, rw_mm_cfg, inv, on) \
134 | REG_STATE(mmu, rw_mm_cfg, seg_f, linear) \
135 | REG_STATE(mmu, rw_mm_cfg, seg_e, linear) \
136 | REG_STATE(mmu, rw_mm_cfg, seg_d, page) \
137 | REG_STATE(mmu, rw_mm_cfg, seg_c, linear) \
138 | REG_STATE(mmu, rw_mm_cfg, seg_b, linear) \
139 | REG_STATE(mmu, rw_mm_cfg, seg_a, linear) \
140 | REG_STATE(mmu, rw_mm_cfg, seg_9, page) \
141 | REG_STATE(mmu, rw_mm_cfg, seg_8, page) \
142 | REG_STATE(mmu, rw_mm_cfg, seg_7, page) \
143 | REG_STATE(mmu, rw_mm_cfg, seg_6, page) \
144 | REG_STATE(mmu, rw_mm_cfg, seg_5, page) \
145 | REG_STATE(mmu, rw_mm_cfg, seg_4, linear) \
146 | REG_STATE(mmu, rw_mm_cfg, seg_3, page) \
147 | REG_STATE(mmu, rw_mm_cfg, seg_2, page) \
148 | REG_STATE(mmu, rw_mm_cfg, seg_1, page) \
149 | REG_STATE(mmu, rw_mm_cfg, seg_0, linear), $r2
150#endif
151
152 ;; Update instruction MMU.
153 move 1, $srs
154 nop
155 nop
156 nop
157 move $r0, $s2 ; kbase_hi.
158 move $r1, $s1 ; kbase_lo.
159 move $r2, $s0 ; mm_cfg, virtual memory configuration.
160
161 ;; Update data MMU.
162 move 2, $srs
163 nop
164 nop
165 nop
166 move $r0, $s2 ; kbase_hi.
167 move $r1, $s1 ; kbase_lo
168 move $r2, $s0 ; mm_cfg, virtual memory configuration.
169
170 ;; Enable data and instruction MMU.
171 move 0, $srs
172 moveq 0xf, $r0 ; IMMU, DMMU, DCache, Icache on
173 nop
174 nop
175 nop
176 move $r0, $s0
177 nop
178 nop
179 nop
180
181#ifdef CONFIG_SMP
182 ;; Read CPU ID
183 move 0, $srs
184 nop
185 nop
186 nop
187 move $s10, $r0
188 cmpq 0, $r0
189 beq master_cpu
190 nop
191slave_cpu:
192 ; A slave waits for cpu_now_booting to be equal to CPU ID.
193 move.d cpu_now_booting, $r1
194slave_wait:
195 cmp.d [$r1], $r0
196 bne slave_wait
197 nop
198 ; Time to boot-up. Get stack location provided by master CPU.
199 move.d smp_init_current_idle_thread, $r1
200 move.d [$r1], $sp
201 add.d 8192, $sp
202 move.d ebp_start, $r0 ; Defined in linker-script.
203 move $r0, $ebp
204 jsr smp_callin
205 nop
206master_cpu:
207#endif
208#ifndef CONFIG_ETRAXFS_SIM
209 ;; Check if starting from DRAM or flash.
210 lapcq ., $r0
211 and.d 0x7fffffff, $r0 ; Mask off the non-cache bit.
212 cmp.d 0x10000, $r0 ; Arbitrary, something above this code.
213 blo _inflash0
214 nop
215#endif
216
217 jump _inram ; Jump to cached RAM.
218 nop
219
220 ;; Jumpgate.
221_inflash0:
222 jump _inflash
223 nop
224
225 ;; Put the following in a section so that storage for it can be
226 ;; reclaimed after init is finished.
227 .section ".init.text", "ax"
228
229_inflash:
230
231 ;; Initialize DRAM.
232 cmp.d RAM_INIT_MAGIC, $r8 ; Already initialized?
233 beq _dram_initialized
234 nop
235
236#include "../lib/dram_init.S"
237
238_dram_initialized:
239 ;; Copy the text and data section to DRAM. This depends on that the
240 ;; variables used below are correctly set up by the linker script.
241 ;; The calculated value stored in R4 is used below.
242 moveq 0, $r0 ; Source.
243 move.d text_start, $r1 ; Destination.
244 move.d __vmlinux_end, $r2
245 move.d $r2, $r4
246 sub.d $r1, $r4
2471: move.w [$r0+], $r3
248 move.w $r3, [$r1+]
249 cmp.d $r2, $r1
250 blo 1b
251 nop
252
253 ;; Keep CRAMFS in flash.
254 moveq 0, $r0
255 move.d romfs_length, $r1
256 move.d $r0, [$r1]
257 move.d [$r4], $r0 ; cramfs_super.magic
258 cmp.d CRAMFS_MAGIC, $r0
259 bne 1f
260 nop
261
262 addoq +4, $r4, $acr
263 move.d [$acr], $r0
264 move.d romfs_length, $r1
265 move.d $r0, [$r1]
266 add.d 0xf0000000, $r4 ; Add cached flash start in virtual memory.
267 move.d romfs_start, $r1
268 move.d $r4, [$r1]
2691: moveq 1, $r0
270 move.d romfs_in_flash, $r1
271 move.d $r0, [$r1]
272
273 jump _start_it ; Jump to cached code.
274 nop
275
276_inram:
277 ;; Check if booting from NAND flash (in that case we just remember the offset
278 ;; into the flash where cramfs should be).
279 move.d REG_ADDR(config, regi_config, r_bootsel), $r0
280 move.d [$r0], $r0
281 and.d REG_MASK(config, r_bootsel, boot_mode), $r0
282 cmp.d REG_STATE(config, r_bootsel, boot_mode, nand), $r0
283 bne move_cramfs
284 moveq 1,$r0
285 move.d crisv32_nand_boot, $r1
286 move.d $r0, [$r1]
287 move.d crisv32_nand_cramfs_offset, $r1
288 move.d $r9, [$r1]
289 moveq 1, $r0
290 move.d romfs_in_flash, $r1
291 move.d $r0, [$r1]
292 jump _start_it
293 nop
294
295move_cramfs:
296 ;; Move the cramfs after BSS.
297 moveq 0, $r0
298 move.d romfs_length, $r1
299 move.d $r0, [$r1]
300
301#ifndef CONFIG_ETRAXFS_SIM
302 ;; The kernel could have been unpacked to DRAM by the loader, but
303 ;; the cramfs image could still be inte the flash immediately
304 ;; following the compressed kernel image. The loaded passes the address
305 ;; of the bute succeeding the last compressed byte in the flash in
306 ;; register R9 when starting the kernel.
307 cmp.d 0x0ffffff8, $r9
308 bhs _no_romfs_in_flash ; R9 points outside the flash area.
309 nop
310#else
311 ba _no_romfs_in_flash
312 nop
313#endif
314 move.d [$r9], $r0 ; cramfs_super.magic
315 cmp.d CRAMFS_MAGIC, $r0
316 bne _no_romfs_in_flash
317 nop
318
319 addoq +4, $r9, $acr
320 move.d [$acr], $r0
321 move.d romfs_length, $r1
322 move.d $r0, [$r1]
323 add.d 0xf0000000, $r9 ; Add cached flash start in virtual memory.
324 move.d romfs_start, $r1
325 move.d $r9, [$r1]
326 moveq 1, $r0
327 move.d romfs_in_flash, $r1
328 move.d $r0, [$r1]
329
330 jump _start_it ; Jump to cached code.
331 nop
332
333_no_romfs_in_flash:
334 ;; Look for cramfs.
335#ifndef CONFIG_ETRAXFS_SIM
336 move.d __vmlinux_end, $r0
337#else
338 move.d __end, $r0
339#endif
340 move.d [$r0], $r1
341 cmp.d CRAMFS_MAGIC, $r1
342 bne 2f
343 nop
344
345 addoq +4, $r0, $acr
346 move.d [$acr], $r2
347 move.d _end, $r1
348 move.d romfs_start, $r3
349 move.d $r1, [$r3]
350 move.d romfs_length, $r3
351 move.d $r2, [$r3]
352
353#ifndef CONFIG_ETRAXFS_SIM
354 add.d $r2, $r0
355 add.d $r2, $r1
356
357 lsrq 1, $r2 ; Size is in bytes, we copy words.
358 addq 1, $r2
3591:
360 move.w [$r0], $r3
361 move.w $r3, [$r1]
362 subq 2, $r0
363 subq 2, $r1
364 subq 1, $r2
365 bne 1b
366 nop
367#endif
368
3692:
370 moveq 0, $r0
371 move.d romfs_in_flash, $r1
372 move.d $r0, [$r1]
373
374 jump _start_it ; Jump to cached code.
375 nop
376
377_start_it:
378
379 ;; Check if kernel command line is supplied
380 cmp.d COMMAND_LINE_MAGIC, $r10
381 bne no_command_line
382 nop
383
384 move.d 256, $r13
385 move.d cris_command_line, $r10
386 or.d 0x80000000, $r11 ; Make it virtual
3871:
388 move.b [$r11+], $r12
389 move.b $r12, [$r10+]
390 subq 1, $r13
391 bne 1b
392 nop
393
394no_command_line:
395
396 ;; The kernel stack contains a task structure for each task. This
397 ;; the initial kernel stack is in the same page as the init_task,
398 ;; but starts at the top of the page, i.e. + 8192 bytes.
399 move.d init_thread_union + 8192, $sp
400 move.d ebp_start, $r0 ; Defined in linker-script.
401 move $r0, $ebp
402 move.d etrax_irv, $r1 ; Set the exception base register and pointer.
403 move.d $r0, [$r1]
404
405#ifndef CONFIG_ETRAXFS_SIM
406 ;; Clear the BSS region from _bss_start to _end.
407 move.d __bss_start, $r0
408 move.d _end, $r1
4091: clear.d [$r0+]
410 cmp.d $r1, $r0
411 blo 1b
412 nop
413#endif
414
415#ifdef CONFIG_ETRAXFS_SIM
416 /* Set the watchdog timeout to something big. Will be removed when */
417 /* watchdog can be disabled with command line option */
418 move.d 0x7fffffff, $r10
419 jsr CPU_WATCHDOG_TIMEOUT
420 nop
421#endif
422
423 ; Initialize registers to increase determinism
424 move.d __bss_start, $r0
425 movem [$r0], $r13
426
427 jump start_kernel ; Jump to start_kernel() in init/main.c.
428 nop
429
430 .data
431etrax_irv:
432 .dword 0
433romfs_start:
434 .dword 0
435romfs_length:
436 .dword 0
437romfs_in_flash:
438 .dword 0
439crisv32_nand_boot:
440 .dword 0
441crisv32_nand_cramfs_offset:
442 .dword 0
443
444swapper_pg_dir = 0xc0002000
445
446 .section ".init.data", "aw"
447
448#include "../lib/hw_settings.S"
diff --git a/arch/cris/arch-v32/kernel/io.c b/arch/cris/arch-v32/kernel/io.c
new file mode 100644
index 000000000000..6bc9f263c3d6
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/io.c
@@ -0,0 +1,154 @@
1/*
2 * Helper functions for I/O pins.
3 *
4 * Copyright (c) 2004 Axis Communications AB.
5 */
6
7#include <linux/config.h>
8#include <linux/types.h>
9#include <linux/errno.h>
10#include <linux/init.h>
11#include <linux/string.h>
12#include <linux/ctype.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <asm/io.h>
16#include <asm/arch/pinmux.h>
17#include <asm/arch/hwregs/gio_defs.h>
18
19struct crisv32_ioport crisv32_ioports[] =
20{
21 {
22 (unsigned long*)REG_ADDR(gio, regi_gio, rw_pa_oe),
23 (unsigned long*)REG_ADDR(gio, regi_gio, rw_pa_dout),
24 (unsigned long*)REG_ADDR(gio, regi_gio, r_pa_din),
25 8
26 },
27 {
28 (unsigned long*)REG_ADDR(gio, regi_gio, rw_pb_oe),
29 (unsigned long*)REG_ADDR(gio, regi_gio, rw_pb_dout),
30 (unsigned long*)REG_ADDR(gio, regi_gio, r_pb_din),
31 18
32 },
33 {
34 (unsigned long*)REG_ADDR(gio, regi_gio, rw_pc_oe),
35 (unsigned long*)REG_ADDR(gio, regi_gio, rw_pc_dout),
36 (unsigned long*)REG_ADDR(gio, regi_gio, r_pc_din),
37 18
38 },
39 {
40 (unsigned long*)REG_ADDR(gio, regi_gio, rw_pd_oe),
41 (unsigned long*)REG_ADDR(gio, regi_gio, rw_pd_dout),
42 (unsigned long*)REG_ADDR(gio, regi_gio, r_pd_din),
43 18
44 },
45 {
46 (unsigned long*)REG_ADDR(gio, regi_gio, rw_pe_oe),
47 (unsigned long*)REG_ADDR(gio, regi_gio, rw_pe_dout),
48 (unsigned long*)REG_ADDR(gio, regi_gio, r_pe_din),
49 18
50 }
51};
52
53#define NBR_OF_PORTS sizeof(crisv32_ioports)/sizeof(struct crisv32_ioport)
54
55struct crisv32_iopin crisv32_led1_green;
56struct crisv32_iopin crisv32_led1_red;
57struct crisv32_iopin crisv32_led2_green;
58struct crisv32_iopin crisv32_led2_red;
59struct crisv32_iopin crisv32_led3_green;
60struct crisv32_iopin crisv32_led3_red;
61
62/* Dummy port used when green LED and red LED is on the same bit */
63static unsigned long io_dummy;
64static struct crisv32_ioport dummy_port =
65{
66 &io_dummy,
67 &io_dummy,
68 &io_dummy,
69 18
70};
71static struct crisv32_iopin dummy_led =
72{
73 &dummy_port,
74 0
75};
76
77static int __init crisv32_io_init(void)
78{
79 int ret = 0;
80 /* Initialize LEDs */
81 ret += crisv32_io_get_name(&crisv32_led1_green, CONFIG_ETRAX_LED1G);
82 ret += crisv32_io_get_name(&crisv32_led1_red, CONFIG_ETRAX_LED1R);
83 ret += crisv32_io_get_name(&crisv32_led2_green, CONFIG_ETRAX_LED2G);
84 ret += crisv32_io_get_name(&crisv32_led2_red, CONFIG_ETRAX_LED2R);
85 ret += crisv32_io_get_name(&crisv32_led3_green, CONFIG_ETRAX_LED3G);
86 ret += crisv32_io_get_name(&crisv32_led3_red, CONFIG_ETRAX_LED3R);
87 crisv32_io_set_dir(&crisv32_led1_green, crisv32_io_dir_out);
88 crisv32_io_set_dir(&crisv32_led1_red, crisv32_io_dir_out);
89 crisv32_io_set_dir(&crisv32_led2_green, crisv32_io_dir_out);
90 crisv32_io_set_dir(&crisv32_led2_red, crisv32_io_dir_out);
91 crisv32_io_set_dir(&crisv32_led3_green, crisv32_io_dir_out);
92 crisv32_io_set_dir(&crisv32_led3_red, crisv32_io_dir_out);
93
94 if (!strcmp(CONFIG_ETRAX_LED1G, CONFIG_ETRAX_LED1R))
95 crisv32_led1_red = dummy_led;
96 if (!strcmp(CONFIG_ETRAX_LED2G, CONFIG_ETRAX_LED2R))
97 crisv32_led2_red = dummy_led;
98
99 return ret;
100}
101
102__initcall(crisv32_io_init);
103
104int crisv32_io_get(struct crisv32_iopin* iopin,
105 unsigned int port, unsigned int pin)
106{
107 if (port > NBR_OF_PORTS)
108 return -EINVAL;
109 if (port > crisv32_ioports[port].pin_count)
110 return -EINVAL;
111
112 iopin->bit = 1 << pin;
113 iopin->port = &crisv32_ioports[port];
114
115 if (crisv32_pinmux_alloc(port, pin, pin, pinmux_gpio))
116 return -EIO;
117
118 return 0;
119}
120
121int crisv32_io_get_name(struct crisv32_iopin* iopin,
122 char* name)
123{
124 int port;
125 int pin;
126
127 if (toupper(*name) == 'P')
128 name++;
129
130 if (toupper(*name) < 'A' || toupper(*name) > 'E')
131 return -EINVAL;
132
133 port = toupper(*name) - 'A';
134 name++;
135 pin = simple_strtoul(name, NULL, 10);
136
137 if (pin < 0 || pin > crisv32_ioports[port].pin_count)
138 return -EINVAL;
139
140 iopin->bit = 1 << pin;
141 iopin->port = &crisv32_ioports[port];
142
143 if (crisv32_pinmux_alloc(port, pin, pin, pinmux_gpio))
144 return -EIO;
145
146 return 0;
147}
148
149#ifdef CONFIG_PCI
150/* PCI I/O access stuff */
151struct cris_io_operations* cris_iops = NULL;
152EXPORT_SYMBOL(cris_iops);
153#endif
154
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
new file mode 100644
index 000000000000..c78cc2685133
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -0,0 +1,413 @@
1/*
2 * Copyright (C) 2003, Axis Communications AB.
3 */
4
5#include <asm/irq.h>
6#include <linux/irq.h>
7#include <linux/interrupt.h>
8#include <linux/smp.h>
9#include <linux/config.h>
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/init.h>
13#include <linux/profile.h>
14#include <linux/proc_fs.h>
15#include <linux/seq_file.h>
16#include <linux/threads.h>
17#include <linux/spinlock.h>
18#include <linux/kernel_stat.h>
19#include <asm/arch/hwregs/reg_map.h>
20#include <asm/arch/hwregs/reg_rdwr.h>
21#include <asm/arch/hwregs/intr_vect.h>
22#include <asm/arch/hwregs/intr_vect_defs.h>
23
24#define CPU_FIXED -1
25
26/* IRQ masks (refer to comment for crisv32_do_multiple) */
27#define TIMER_MASK (1 << (TIMER_INTR_VECT - FIRST_IRQ))
28#ifdef CONFIG_ETRAX_KGDB
29#if defined(CONFIG_ETRAX_KGDB_PORT0)
30#define IGNOREMASK (1 << (SER0_INTR_VECT - FIRST_IRQ))
31#elif defined(CONFIG_ETRAX_KGDB_PORT1)
32#define IGNOREMASK (1 << (SER1_INTR_VECT - FIRST_IRQ))
33#elif defined(CONFIG_ETRAX_KGB_PORT2)
34#define IGNOREMASK (1 << (SER2_INTR_VECT - FIRST_IRQ))
35#elif defined(CONFIG_ETRAX_KGDB_PORT3)
36#define IGNOREMASK (1 << (SER3_INTR_VECT - FIRST_IRQ))
37#endif
38#endif
39
40DEFINE_SPINLOCK(irq_lock);
41
42struct cris_irq_allocation
43{
44 int cpu; /* The CPU to which the IRQ is currently allocated. */
45 cpumask_t mask; /* The CPUs to which the IRQ may be allocated. */
46};
47
48struct cris_irq_allocation irq_allocations[NR_IRQS] =
49 {[0 ... NR_IRQS - 1] = {0, CPU_MASK_ALL}};
50
51static unsigned long irq_regs[NR_CPUS] =
52{
53 regi_irq,
54#ifdef CONFIG_SMP
55 regi_irq2,
56#endif
57};
58
59unsigned long cpu_irq_counters[NR_CPUS];
60unsigned long irq_counters[NR_REAL_IRQS];
61
62/* From irq.c. */
63extern void weird_irq(void);
64
65/* From entry.S. */
66extern void system_call(void);
67extern void nmi_interrupt(void);
68extern void multiple_interrupt(void);
69extern void gdb_handle_exception(void);
70extern void i_mmu_refill(void);
71extern void i_mmu_invalid(void);
72extern void i_mmu_access(void);
73extern void i_mmu_execute(void);
74extern void d_mmu_refill(void);
75extern void d_mmu_invalid(void);
76extern void d_mmu_access(void);
77extern void d_mmu_write(void);
78
79/* From kgdb.c. */
80extern void kgdb_init(void);
81extern void breakpoint(void);
82
83/*
84 * Build the IRQ handler stubs using macros from irq.h. First argument is the
85 * IRQ number, the second argument is the corresponding bit in
86 * intr_rw_vect_mask found in asm/arch/hwregs/intr_vect_defs.h.
87 */
88BUILD_IRQ(0x31, (1 << 0)) /* memarb */
89BUILD_IRQ(0x32, (1 << 1)) /* gen_io */
90BUILD_IRQ(0x33, (1 << 2)) /* iop0 */
91BUILD_IRQ(0x34, (1 << 3)) /* iop1 */
92BUILD_IRQ(0x35, (1 << 4)) /* iop2 */
93BUILD_IRQ(0x36, (1 << 5)) /* iop3 */
94BUILD_IRQ(0x37, (1 << 6)) /* dma0 */
95BUILD_IRQ(0x38, (1 << 7)) /* dma1 */
96BUILD_IRQ(0x39, (1 << 8)) /* dma2 */
97BUILD_IRQ(0x3a, (1 << 9)) /* dma3 */
98BUILD_IRQ(0x3b, (1 << 10)) /* dma4 */
99BUILD_IRQ(0x3c, (1 << 11)) /* dma5 */
100BUILD_IRQ(0x3d, (1 << 12)) /* dma6 */
101BUILD_IRQ(0x3e, (1 << 13)) /* dma7 */
102BUILD_IRQ(0x3f, (1 << 14)) /* dma8 */
103BUILD_IRQ(0x40, (1 << 15)) /* dma9 */
104BUILD_IRQ(0x41, (1 << 16)) /* ata */
105BUILD_IRQ(0x42, (1 << 17)) /* sser0 */
106BUILD_IRQ(0x43, (1 << 18)) /* sser1 */
107BUILD_IRQ(0x44, (1 << 19)) /* ser0 */
108BUILD_IRQ(0x45, (1 << 20)) /* ser1 */
109BUILD_IRQ(0x46, (1 << 21)) /* ser2 */
110BUILD_IRQ(0x47, (1 << 22)) /* ser3 */
111BUILD_IRQ(0x48, (1 << 23))
112BUILD_IRQ(0x49, (1 << 24)) /* eth0 */
113BUILD_IRQ(0x4a, (1 << 25)) /* eth1 */
114BUILD_TIMER_IRQ(0x4b, (1 << 26))/* timer */
115BUILD_IRQ(0x4c, (1 << 27)) /* bif_arb */
116BUILD_IRQ(0x4d, (1 << 28)) /* bif_dma */
117BUILD_IRQ(0x4e, (1 << 29)) /* ext */
118BUILD_IRQ(0x4f, (1 << 29)) /* ipi */
119
120/* Pointers to the low-level handlers. */
121static void (*interrupt[NR_IRQS])(void) = {
122 IRQ0x31_interrupt, IRQ0x32_interrupt, IRQ0x33_interrupt,
123 IRQ0x34_interrupt, IRQ0x35_interrupt, IRQ0x36_interrupt,
124 IRQ0x37_interrupt, IRQ0x38_interrupt, IRQ0x39_interrupt,
125 IRQ0x3a_interrupt, IRQ0x3b_interrupt, IRQ0x3c_interrupt,
126 IRQ0x3d_interrupt, IRQ0x3e_interrupt, IRQ0x3f_interrupt,
127 IRQ0x40_interrupt, IRQ0x41_interrupt, IRQ0x42_interrupt,
128 IRQ0x43_interrupt, IRQ0x44_interrupt, IRQ0x45_interrupt,
129 IRQ0x46_interrupt, IRQ0x47_interrupt, IRQ0x48_interrupt,
130 IRQ0x49_interrupt, IRQ0x4a_interrupt, IRQ0x4b_interrupt,
131 IRQ0x4c_interrupt, IRQ0x4d_interrupt, IRQ0x4e_interrupt,
132 IRQ0x4f_interrupt
133};
134
135void
136block_irq(int irq, int cpu)
137{
138 int intr_mask;
139 unsigned long flags;
140
141 spin_lock_irqsave(&irq_lock, flags);
142 intr_mask = REG_RD_INT(intr_vect, irq_regs[cpu], rw_mask);
143
144 /* Remember; 1 let thru, 0 block. */
145 intr_mask &= ~(1 << (irq - FIRST_IRQ));
146
147 REG_WR_INT(intr_vect, irq_regs[cpu], rw_mask, intr_mask);
148 spin_unlock_irqrestore(&irq_lock, flags);
149}
150
151void
152unblock_irq(int irq, int cpu)
153{
154 int intr_mask;
155 unsigned long flags;
156
157 spin_lock_irqsave(&irq_lock, flags);
158 intr_mask = REG_RD_INT(intr_vect, irq_regs[cpu], rw_mask);
159
160 /* Remember; 1 let thru, 0 block. */
161 intr_mask |= (1 << (irq - FIRST_IRQ));
162
163 REG_WR_INT(intr_vect, irq_regs[cpu], rw_mask, intr_mask);
164 spin_unlock_irqrestore(&irq_lock, flags);
165}
166
167/* Find out which CPU the irq should be allocated to. */
168static int irq_cpu(int irq)
169{
170 int cpu;
171 unsigned long flags;
172
173 spin_lock_irqsave(&irq_lock, flags);
174 cpu = irq_allocations[irq - FIRST_IRQ].cpu;
175
176 /* Fixed interrupts stay on the local CPU. */
177 if (cpu == CPU_FIXED)
178 {
179 spin_unlock_irqrestore(&irq_lock, flags);
180 return smp_processor_id();
181 }
182
183
184 /* Let the interrupt stay if possible */
185 if (cpu_isset(cpu, irq_allocations[irq - FIRST_IRQ].mask))
186 goto out;
187
188 /* IRQ must be moved to another CPU. */
189 cpu = first_cpu(irq_allocations[irq - FIRST_IRQ].mask);
190 irq_allocations[irq - FIRST_IRQ].cpu = cpu;
191out:
192 spin_unlock_irqrestore(&irq_lock, flags);
193 return cpu;
194}
195
196void
197mask_irq(int irq)
198{
199 int cpu;
200
201 for (cpu = 0; cpu < NR_CPUS; cpu++)
202 block_irq(irq, cpu);
203}
204
205void
206unmask_irq(int irq)
207{
208 unblock_irq(irq, irq_cpu(irq));
209}
210
211
212static unsigned int startup_crisv32_irq(unsigned int irq)
213{
214 unmask_irq(irq);
215 return 0;
216}
217
218static void shutdown_crisv32_irq(unsigned int irq)
219{
220 mask_irq(irq);
221}
222
223static void enable_crisv32_irq(unsigned int irq)
224{
225 unmask_irq(irq);
226}
227
228static void disable_crisv32_irq(unsigned int irq)
229{
230 mask_irq(irq);
231}
232
233static void ack_crisv32_irq(unsigned int irq)
234{
235}
236
237static void end_crisv32_irq(unsigned int irq)
238{
239}
240
241void set_affinity_crisv32_irq(unsigned int irq, cpumask_t dest)
242{
243 unsigned long flags;
244 spin_lock_irqsave(&irq_lock, flags);
245 irq_allocations[irq - FIRST_IRQ].mask = dest;
246 spin_unlock_irqrestore(&irq_lock, flags);
247}
248
249static struct hw_interrupt_type crisv32_irq_type = {
250 .typename = "CRISv32",
251 .startup = startup_crisv32_irq,
252 .shutdown = shutdown_crisv32_irq,
253 .enable = enable_crisv32_irq,
254 .disable = disable_crisv32_irq,
255 .ack = ack_crisv32_irq,
256 .end = end_crisv32_irq,
257 .set_affinity = set_affinity_crisv32_irq
258};
259
260void
261set_exception_vector(int n, irqvectptr addr)
262{
263 etrax_irv->v[n] = (irqvectptr) addr;
264}
265
266extern void do_IRQ(int irq, struct pt_regs * regs);
267
268void
269crisv32_do_IRQ(int irq, int block, struct pt_regs* regs)
270{
271 /* Interrupts that may not be moved to another CPU and
272 * are SA_INTERRUPT may skip blocking. This is currently
273 * only valid for the timer IRQ and the IPI and is used
274 * for the timer interrupt to avoid watchdog starvation.
275 */
276 if (!block) {
277 do_IRQ(irq, regs);
278 return;
279 }
280
281 block_irq(irq, smp_processor_id());
282 do_IRQ(irq, regs);
283
284 unblock_irq(irq, irq_cpu(irq));
285}
286
287/* If multiple interrupts occur simultaneously we get a multiple
288 * interrupt from the CPU and software has to sort out which
289 * interrupts that happened. There are two special cases here:
290 *
291 * 1. Timer interrupts may never be blocked because of the
292 * watchdog (refer to comment in include/asr/arch/irq.h)
293 * 2. GDB serial port IRQs are unhandled here and will be handled
294 * as a single IRQ when it strikes again because the GDB
295 * stubb wants to save the registers in its own fashion.
296 */
297void
298crisv32_do_multiple(struct pt_regs* regs)
299{
300 int cpu;
301 int mask;
302 int masked;
303 int bit;
304
305 cpu = smp_processor_id();
306
307 /* An extra irq_enter here to prevent softIRQs to run after
308 * each do_IRQ. This will decrease the interrupt latency.
309 */
310 irq_enter();
311
312 /* Get which IRQs that happend. */
313 masked = REG_RD_INT(intr_vect, irq_regs[cpu], r_masked_vect);
314
315 /* Calculate new IRQ mask with these IRQs disabled. */
316 mask = REG_RD_INT(intr_vect, irq_regs[cpu], rw_mask);
317 mask &= ~masked;
318
319 /* Timer IRQ is never masked */
320 if (masked & TIMER_MASK)
321 mask |= TIMER_MASK;
322
323 /* Block all the IRQs */
324 REG_WR_INT(intr_vect, irq_regs[cpu], rw_mask, mask);
325
326 /* Check for timer IRQ and handle it special. */
327 if (masked & TIMER_MASK) {
328 masked &= ~TIMER_MASK;
329 do_IRQ(TIMER_INTR_VECT, regs);
330 }
331
332#ifdef IGNORE_MASK
333 /* Remove IRQs that can't be handled as multiple. */
334 masked &= ~IGNORE_MASK;
335#endif
336
337 /* Handle the rest of the IRQs. */
338 for (bit = 0; bit < 32; bit++)
339 {
340 if (masked & (1 << bit))
341 do_IRQ(bit + FIRST_IRQ, regs);
342 }
343
344 /* Unblock all the IRQs. */
345 mask = REG_RD_INT(intr_vect, irq_regs[cpu], rw_mask);
346 mask |= masked;
347 REG_WR_INT(intr_vect, irq_regs[cpu], rw_mask, mask);
348
349 /* This irq_exit() will trigger the soft IRQs. */
350 irq_exit();
351}
352
353/*
354 * This is called by start_kernel. It fixes the IRQ masks and setup the
355 * interrupt vector table to point to bad_interrupt pointers.
356 */
357void __init
358init_IRQ(void)
359{
360 int i;
361 int j;
362 reg_intr_vect_rw_mask vect_mask = {0};
363
364 /* Clear all interrupts masks. */
365 REG_WR(intr_vect, regi_irq, rw_mask, vect_mask);
366
367 for (i = 0; i < 256; i++)
368 etrax_irv->v[i] = weird_irq;
369
370 /* Point all IRQ's to bad handlers. */
371 for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
372 irq_desc[j].handler = &crisv32_irq_type;
373 set_exception_vector(i, interrupt[j]);
374 }
375
376 /* Mark Timer and IPI IRQs as CPU local */
377 irq_allocations[TIMER_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
378 irq_desc[TIMER_INTR_VECT].status |= IRQ_PER_CPU;
379 irq_allocations[IPI_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
380 irq_desc[IPI_INTR_VECT].status |= IRQ_PER_CPU;
381
382 set_exception_vector(0x00, nmi_interrupt);
383 set_exception_vector(0x30, multiple_interrupt);
384
385 /* Set up handler for various MMU bus faults. */
386 set_exception_vector(0x04, i_mmu_refill);
387 set_exception_vector(0x05, i_mmu_invalid);
388 set_exception_vector(0x06, i_mmu_access);
389 set_exception_vector(0x07, i_mmu_execute);
390 set_exception_vector(0x08, d_mmu_refill);
391 set_exception_vector(0x09, d_mmu_invalid);
392 set_exception_vector(0x0a, d_mmu_access);
393 set_exception_vector(0x0b, d_mmu_write);
394
395 /* The system-call trap is reached by "break 13". */
396 set_exception_vector(0x1d, system_call);
397
398 /* Exception handlers for debugging, both user-mode and kernel-mode. */
399
400 /* Break 8. */
401 set_exception_vector(0x18, gdb_handle_exception);
402 /* Hardware single step. */
403 set_exception_vector(0x3, gdb_handle_exception);
404 /* Hardware breakpoint. */
405 set_exception_vector(0xc, gdb_handle_exception);
406
407#ifdef CONFIG_ETRAX_KGDB
408 kgdb_init();
409 /* Everything is set up; now trap the kernel. */
410 breakpoint();
411#endif
412}
413
diff --git a/arch/cris/arch-v32/kernel/kgdb.c b/arch/cris/arch-v32/kernel/kgdb.c
new file mode 100644
index 000000000000..480e56348be2
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/kgdb.c
@@ -0,0 +1,1660 @@
1/*
2 * arch/cris/arch-v32/kernel/kgdb.c
3 *
4 * CRIS v32 version by Orjan Friberg, Axis Communications AB.
5 *
6 * S390 version
7 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *
10 * Originally written by Glenn Engel, Lake Stevens Instrument Division
11 *
12 * Contributed by HP Systems
13 *
14 * Modified for SPARC by Stu Grossman, Cygnus Support.
15 *
16 * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
17 * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
18 *
19 * Copyright (C) 1995 Andreas Busse
20 */
21
22/* FIXME: Check the documentation. */
23
24/*
25 * kgdb usage notes:
26 * -----------------
27 *
28 * If you select CONFIG_ETRAX_KGDB in the configuration, the kernel will be
29 * built with different gcc flags: "-g" is added to get debug infos, and
30 * "-fomit-frame-pointer" is omitted to make debugging easier. Since the
31 * resulting kernel will be quite big (approx. > 7 MB), it will be stripped
32 * before compresion. Such a kernel will behave just as usually, except if
33 * given a "debug=<device>" command line option. (Only serial devices are
34 * allowed for <device>, i.e. no printers or the like; possible values are
35 * machine depedend and are the same as for the usual debug device, the one
36 * for logging kernel messages.) If that option is given and the device can be
37 * initialized, the kernel will connect to the remote gdb in trap_init(). The
38 * serial parameters are fixed to 8N1 and 115200 bps, for easyness of
39 * implementation.
40 *
41 * To start a debugging session, start that gdb with the debugging kernel
42 * image (the one with the symbols, vmlinux.debug) named on the command line.
43 * This file will be used by gdb to get symbol and debugging infos about the
44 * kernel. Next, select remote debug mode by
45 * target remote <device>
46 * where <device> is the name of the serial device over which the debugged
47 * machine is connected. Maybe you have to adjust the baud rate by
48 * set remotebaud <rate>
49 * or also other parameters with stty:
50 * shell stty ... </dev/...
51 * If the kernel to debug has already booted, it waited for gdb and now
52 * connects, and you'll see a breakpoint being reported. If the kernel isn't
53 * running yet, start it now. The order of gdb and the kernel doesn't matter.
54 * Another thing worth knowing about in the getting-started phase is how to
55 * debug the remote protocol itself. This is activated with
56 * set remotedebug 1
57 * gdb will then print out each packet sent or received. You'll also get some
58 * messages about the gdb stub on the console of the debugged machine.
59 *
60 * If all that works, you can use lots of the usual debugging techniques on
61 * the kernel, e.g. inspecting and changing variables/memory, setting
62 * breakpoints, single stepping and so on. It's also possible to interrupt the
63 * debugged kernel by pressing C-c in gdb. Have fun! :-)
64 *
65 * The gdb stub is entered (and thus the remote gdb gets control) in the
66 * following situations:
67 *
68 * - If breakpoint() is called. This is just after kgdb initialization, or if
69 * a breakpoint() call has been put somewhere into the kernel source.
70 * (Breakpoints can of course also be set the usual way in gdb.)
71 * In eLinux, we call breakpoint() in init/main.c after IRQ initialization.
72 *
73 * - If there is a kernel exception, i.e. bad_super_trap() or die_if_kernel()
74 * are entered. All the CPU exceptions are mapped to (more or less..., see
75 * the hard_trap_info array below) appropriate signal, which are reported
76 * to gdb. die_if_kernel() is usually called after some kind of access
77 * error and thus is reported as SIGSEGV.
78 *
79 * - When panic() is called. This is reported as SIGABRT.
80 *
81 * - If C-c is received over the serial line, which is treated as
82 * SIGINT.
83 *
84 * Of course, all these signals are just faked for gdb, since there is no
85 * signal concept as such for the kernel. It also isn't possible --obviously--
86 * to set signal handlers from inside gdb, or restart the kernel with a
87 * signal.
88 *
89 * Current limitations:
90 *
91 * - While the kernel is stopped, interrupts are disabled for safety reasons
92 * (i.e., variables not changing magically or the like). But this also
93 * means that the clock isn't running anymore, and that interrupts from the
94 * hardware may get lost/not be served in time. This can cause some device
95 * errors...
96 *
97 * - When single-stepping, only one instruction of the current thread is
98 * executed, but interrupts are allowed for that time and will be serviced
99 * if pending. Be prepared for that.
100 *
101 * - All debugging happens in kernel virtual address space. There's no way to
102 * access physical memory not mapped in kernel space, or to access user
103 * space. A way to work around this is using get_user_long & Co. in gdb
104 * expressions, but only for the current process.
105 *
106 * - Interrupting the kernel only works if interrupts are currently allowed,
107 * and the interrupt of the serial line isn't blocked by some other means
108 * (IPL too high, disabled, ...)
109 *
110 * - The gdb stub is currently not reentrant, i.e. errors that happen therein
111 * (e.g. accessing invalid memory) may not be caught correctly. This could
112 * be removed in future by introducing a stack of struct registers.
113 *
114 */
115
116/*
117 * To enable debugger support, two things need to happen. One, a
118 * call to kgdb_init() is necessary in order to allow any breakpoints
119 * or error conditions to be properly intercepted and reported to gdb.
120 * Two, a breakpoint needs to be generated to begin communication. This
121 * is most easily accomplished by a call to breakpoint().
122 *
123 * The following gdb commands are supported:
124 *
125 * command function Return value
126 *
127 * g return the value of the CPU registers hex data or ENN
128 * G set the value of the CPU registers OK or ENN
129 *
130 * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
131 * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
132 *
133 * c Resume at current address SNN ( signal NN)
134 * cAA..AA Continue at address AA..AA SNN
135 *
136 * s Step one instruction SNN
137 * sAA..AA Step one instruction from AA..AA SNN
138 *
139 * k kill
140 *
141 * ? What was the last sigval ? SNN (signal NN)
142 *
143 * bBB..BB Set baud rate to BB..BB OK or BNN, then sets
144 * baud rate
145 *
146 * All commands and responses are sent with a packet which includes a
147 * checksum. A packet consists of
148 *
149 * $<packet info>#<checksum>.
150 *
151 * where
152 * <packet info> :: <characters representing the command or response>
153 * <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>>
154 *
155 * When a packet is received, it is first acknowledged with either '+' or '-'.
156 * '+' indicates a successful transfer. '-' indicates a failed transfer.
157 *
158 * Example:
159 *
160 * Host: Reply:
161 * $m0,10#2a +$00010203040506070809101112131415#42
162 *
163 */
164
165
166#include <linux/string.h>
167#include <linux/signal.h>
168#include <linux/kernel.h>
169#include <linux/delay.h>
170#include <linux/linkage.h>
171#include <linux/reboot.h>
172
173#include <asm/setup.h>
174#include <asm/ptrace.h>
175
176#include <asm/irq.h>
177#include <asm/arch/hwregs/reg_map.h>
178#include <asm/arch/hwregs/reg_rdwr.h>
179#include <asm/arch/hwregs/intr_vect_defs.h>
180#include <asm/arch/hwregs/ser_defs.h>
181
182/* From entry.S. */
183extern void gdb_handle_exception(void);
184/* From kgdb_asm.S. */
185extern void kgdb_handle_exception(void);
186
187static int kgdb_started = 0;
188
189/********************************* Register image ****************************/
190
191typedef
192struct register_image
193{
194 /* Offset */
195 unsigned int r0; /* 0x00 */
196 unsigned int r1; /* 0x04 */
197 unsigned int r2; /* 0x08 */
198 unsigned int r3; /* 0x0C */
199 unsigned int r4; /* 0x10 */
200 unsigned int r5; /* 0x14 */
201 unsigned int r6; /* 0x18 */
202 unsigned int r7; /* 0x1C */
203 unsigned int r8; /* 0x20; Frame pointer (if any) */
204 unsigned int r9; /* 0x24 */
205 unsigned int r10; /* 0x28 */
206 unsigned int r11; /* 0x2C */
207 unsigned int r12; /* 0x30 */
208 unsigned int r13; /* 0x34 */
209 unsigned int sp; /* 0x38; R14, Stack pointer */
210 unsigned int acr; /* 0x3C; R15, Address calculation register. */
211
212 unsigned char bz; /* 0x40; P0, 8-bit zero register */
213 unsigned char vr; /* 0x41; P1, Version register (8-bit) */
214 unsigned int pid; /* 0x42; P2, Process ID */
215 unsigned char srs; /* 0x46; P3, Support register select (8-bit) */
216 unsigned short wz; /* 0x47; P4, 16-bit zero register */
217 unsigned int exs; /* 0x49; P5, Exception status */
218 unsigned int eda; /* 0x4D; P6, Exception data address */
219 unsigned int mof; /* 0x51; P7, Multiply overflow register */
220 unsigned int dz; /* 0x55; P8, 32-bit zero register */
221 unsigned int ebp; /* 0x59; P9, Exception base pointer */
222 unsigned int erp; /* 0x5D; P10, Exception return pointer. Contains the PC we are interested in. */
223 unsigned int srp; /* 0x61; P11, Subroutine return pointer */
224 unsigned int nrp; /* 0x65; P12, NMI return pointer */
225 unsigned int ccs; /* 0x69; P13, Condition code stack */
226 unsigned int usp; /* 0x6D; P14, User mode stack pointer */
227 unsigned int spc; /* 0x71; P15, Single step PC */
228 unsigned int pc; /* 0x75; Pseudo register (for the most part set to ERP). */
229
230} registers;
231
232typedef
233struct bp_register_image
234{
235 /* Support register bank 0. */
236 unsigned int s0_0;
237 unsigned int s1_0;
238 unsigned int s2_0;
239 unsigned int s3_0;
240 unsigned int s4_0;
241 unsigned int s5_0;
242 unsigned int s6_0;
243 unsigned int s7_0;
244 unsigned int s8_0;
245 unsigned int s9_0;
246 unsigned int s10_0;
247 unsigned int s11_0;
248 unsigned int s12_0;
249 unsigned int s13_0;
250 unsigned int s14_0;
251 unsigned int s15_0;
252
253 /* Support register bank 1. */
254 unsigned int s0_1;
255 unsigned int s1_1;
256 unsigned int s2_1;
257 unsigned int s3_1;
258 unsigned int s4_1;
259 unsigned int s5_1;
260 unsigned int s6_1;
261 unsigned int s7_1;
262 unsigned int s8_1;
263 unsigned int s9_1;
264 unsigned int s10_1;
265 unsigned int s11_1;
266 unsigned int s12_1;
267 unsigned int s13_1;
268 unsigned int s14_1;
269 unsigned int s15_1;
270
271 /* Support register bank 2. */
272 unsigned int s0_2;
273 unsigned int s1_2;
274 unsigned int s2_2;
275 unsigned int s3_2;
276 unsigned int s4_2;
277 unsigned int s5_2;
278 unsigned int s6_2;
279 unsigned int s7_2;
280 unsigned int s8_2;
281 unsigned int s9_2;
282 unsigned int s10_2;
283 unsigned int s11_2;
284 unsigned int s12_2;
285 unsigned int s13_2;
286 unsigned int s14_2;
287 unsigned int s15_2;
288
289 /* Support register bank 3. */
290 unsigned int s0_3; /* BP_CTRL */
291 unsigned int s1_3; /* BP_I0_START */
292 unsigned int s2_3; /* BP_I0_END */
293 unsigned int s3_3; /* BP_D0_START */
294 unsigned int s4_3; /* BP_D0_END */
295 unsigned int s5_3; /* BP_D1_START */
296 unsigned int s6_3; /* BP_D1_END */
297 unsigned int s7_3; /* BP_D2_START */
298 unsigned int s8_3; /* BP_D2_END */
299 unsigned int s9_3; /* BP_D3_START */
300 unsigned int s10_3; /* BP_D3_END */
301 unsigned int s11_3; /* BP_D4_START */
302 unsigned int s12_3; /* BP_D4_END */
303 unsigned int s13_3; /* BP_D5_START */
304 unsigned int s14_3; /* BP_D5_END */
305 unsigned int s15_3; /* BP_RESERVED */
306
307} support_registers;
308
309enum register_name
310{
311 R0, R1, R2, R3,
312 R4, R5, R6, R7,
313 R8, R9, R10, R11,
314 R12, R13, SP, ACR,
315
316 BZ, VR, PID, SRS,
317 WZ, EXS, EDA, MOF,
318 DZ, EBP, ERP, SRP,
319 NRP, CCS, USP, SPC,
320 PC,
321
322 S0, S1, S2, S3,
323 S4, S5, S6, S7,
324 S8, S9, S10, S11,
325 S12, S13, S14, S15
326
327};
328
329/* The register sizes of the registers in register_name. An unimplemented register
330 is designated by size 0 in this array. */
331static int register_size[] =
332{
333 4, 4, 4, 4,
334 4, 4, 4, 4,
335 4, 4, 4, 4,
336 4, 4, 4, 4,
337
338 1, 1, 4, 1,
339 2, 4, 4, 4,
340 4, 4, 4, 4,
341 4, 4, 4, 4,
342
343 4,
344
345 4, 4, 4, 4,
346 4, 4, 4, 4,
347 4, 4, 4, 4,
348 4, 4, 4
349
350};
351
352/* Contains the register image of the kernel.
353 (Global so that they can be reached from assembler code.) */
354registers reg;
355support_registers sreg;
356
357/************** Prototypes for local library functions ***********************/
358
359/* Copy of strcpy from libc. */
360static char *gdb_cris_strcpy(char *s1, const char *s2);
361
362/* Copy of strlen from libc. */
363static int gdb_cris_strlen(const char *s);
364
365/* Copy of memchr from libc. */
366static void *gdb_cris_memchr(const void *s, int c, int n);
367
368/* Copy of strtol from libc. Does only support base 16. */
369static int gdb_cris_strtol(const char *s, char **endptr, int base);
370
371/********************** Prototypes for local functions. **********************/
372
373/* Write a value to a specified register regno in the register image
374 of the current thread. */
375static int write_register(int regno, char *val);
376
377/* Read a value from a specified register in the register image. Returns the
378 status of the read operation. The register value is returned in valptr. */
379static int read_register(char regno, unsigned int *valptr);
380
381/* Serial port, reads one character. ETRAX 100 specific. from debugport.c */
382int getDebugChar(void);
383
384#ifdef CONFIG_ETRAXFS_SIM
385int getDebugChar(void)
386{
387 return socketread();
388}
389#endif
390
391/* Serial port, writes one character. ETRAX 100 specific. from debugport.c */
392void putDebugChar(int val);
393
394#ifdef CONFIG_ETRAXFS_SIM
395void putDebugChar(int val)
396{
397 socketwrite((char *)&val, 1);
398}
399#endif
400
401/* Returns the character equivalent of a nibble, bit 7, 6, 5, and 4 of a byte,
402 represented by int x. */
403static char highhex(int x);
404
405/* Returns the character equivalent of a nibble, bit 3, 2, 1, and 0 of a byte,
406 represented by int x. */
407static char lowhex(int x);
408
409/* Returns the integer equivalent of a hexadecimal character. */
410static int hex(char ch);
411
412/* Convert the memory, pointed to by mem into hexadecimal representation.
413 Put the result in buf, and return a pointer to the last character
414 in buf (null). */
415static char *mem2hex(char *buf, unsigned char *mem, int count);
416
417/* Convert the array, in hexadecimal representation, pointed to by buf into
418 binary representation. Put the result in mem, and return a pointer to
419 the character after the last byte written. */
420static unsigned char *hex2mem(unsigned char *mem, char *buf, int count);
421
422/* Put the content of the array, in binary representation, pointed to by buf
423 into memory pointed to by mem, and return a pointer to
424 the character after the last byte written. */
425static unsigned char *bin2mem(unsigned char *mem, unsigned char *buf, int count);
426
427/* Await the sequence $<data>#<checksum> and store <data> in the array buffer
428 returned. */
429static void getpacket(char *buffer);
430
431/* Send $<data>#<checksum> from the <data> in the array buffer. */
432static void putpacket(char *buffer);
433
434/* Build and send a response packet in order to inform the host the
435 stub is stopped. */
436static void stub_is_stopped(int sigval);
437
438/* All expected commands are sent from remote.c. Send a response according
439 to the description in remote.c. Not static since it needs to be reached
440 from assembler code. */
441void handle_exception(int sigval);
442
443/* Performs a complete re-start from scratch. ETRAX specific. */
444static void kill_restart(void);
445
446/******************** Prototypes for global functions. ***********************/
447
448/* The string str is prepended with the GDB printout token and sent. */
449void putDebugString(const unsigned char *str, int len);
450
451/* A static breakpoint to be used at startup. */
452void breakpoint(void);
453
454/* Avoid warning as the internal_stack is not used in the C-code. */
455#define USEDVAR(name) { if (name) { ; } }
456#define USEDFUN(name) { void (*pf)(void) = (void *)name; USEDVAR(pf) }
457
458/********************************** Packet I/O ******************************/
459/* BUFMAX defines the maximum number of characters in
460 inbound/outbound buffers */
461/* FIXME: How do we know it's enough? */
462#define BUFMAX 512
463
464/* Run-length encoding maximum length. Send 64 at most. */
465#define RUNLENMAX 64
466
467/* Definition of all valid hexadecimal characters */
468static const char hexchars[] = "0123456789abcdef";
469
470/* The inbound/outbound buffers used in packet I/O */
471static char input_buffer[BUFMAX];
472static char output_buffer[BUFMAX];
473
474/* Error and warning messages. */
475enum error_type
476{
477 SUCCESS, E01, E02, E03, E04, E05, E06,
478};
479
480static char *error_message[] =
481{
482 "",
483 "E01 Set current or general thread - H[c,g] - internal error.",
484 "E02 Change register content - P - cannot change read-only register.",
485 "E03 Thread is not alive.", /* T, not used. */
486 "E04 The command is not supported - [s,C,S,!,R,d,r] - internal error.",
487 "E05 Change register content - P - the register is not implemented..",
488 "E06 Change memory content - M - internal error.",
489};
490
491/********************************** Breakpoint *******************************/
492/* Use an internal stack in the breakpoint and interrupt response routines.
493 FIXME: How do we know the size of this stack is enough?
494 Global so it can be reached from assembler code. */
495#define INTERNAL_STACK_SIZE 1024
496char internal_stack[INTERNAL_STACK_SIZE];
497
498/* Due to the breakpoint return pointer, a state variable is needed to keep
499 track of whether it is a static (compiled) or dynamic (gdb-invoked)
500 breakpoint to be handled. A static breakpoint uses the content of register
501 ERP as it is whereas a dynamic breakpoint requires subtraction with 2
502 in order to execute the instruction. The first breakpoint is static; all
503 following are assumed to be dynamic. */
504static int dynamic_bp = 0;
505
506/********************************* String library ****************************/
507/* Single-step over library functions creates trap loops. */
508
509/* Copy char s2[] to s1[]. */
510static char*
511gdb_cris_strcpy(char *s1, const char *s2)
512{
513 char *s = s1;
514
515 for (s = s1; (*s++ = *s2++) != '\0'; )
516 ;
517 return s1;
518}
519
520/* Find length of s[]. */
521static int
522gdb_cris_strlen(const char *s)
523{
524 const char *sc;
525
526 for (sc = s; *sc != '\0'; sc++)
527 ;
528 return (sc - s);
529}
530
531/* Find first occurrence of c in s[n]. */
532static void*
533gdb_cris_memchr(const void *s, int c, int n)
534{
535 const unsigned char uc = c;
536 const unsigned char *su;
537
538 for (su = s; 0 < n; ++su, --n)
539 if (*su == uc)
540 return (void *)su;
541 return NULL;
542}
543/******************************* Standard library ****************************/
544/* Single-step over library functions creates trap loops. */
545/* Convert string to long. */
546static int
547gdb_cris_strtol(const char *s, char **endptr, int base)
548{
549 char *s1;
550 char *sd;
551 int x = 0;
552
553 for (s1 = (char*)s; (sd = gdb_cris_memchr(hexchars, *s1, base)) != NULL; ++s1)
554 x = x * base + (sd - hexchars);
555
556 if (endptr) {
557 /* Unconverted suffix is stored in endptr unless endptr is NULL. */
558 *endptr = s1;
559 }
560
561 return x;
562}
563
564/********************************* Register image ****************************/
565
566/* Write a value to a specified register in the register image of the current
567 thread. Returns status code SUCCESS, E02 or E05. */
568static int
569write_register(int regno, char *val)
570{
571 int status = SUCCESS;
572
573 if (regno >= R0 && regno <= ACR) {
574 /* Consecutive 32-bit registers. */
575 hex2mem((unsigned char *)&reg.r0 + (regno - R0) * sizeof(unsigned int),
576 val, sizeof(unsigned int));
577
578 } else if (regno == BZ || regno == VR || regno == WZ || regno == DZ) {
579 /* Read-only registers. */
580 status = E02;
581
582 } else if (regno == PID) {
583 /* 32-bit register. (Even though we already checked SRS and WZ, we cannot
584 combine this with the EXS - SPC write since SRS and WZ have different size.) */
585 hex2mem((unsigned char *)&reg.pid, val, sizeof(unsigned int));
586
587 } else if (regno == SRS) {
588 /* 8-bit register. */
589 hex2mem((unsigned char *)&reg.srs, val, sizeof(unsigned char));
590
591 } else if (regno >= EXS && regno <= SPC) {
592 /* Consecutive 32-bit registers. */
593 hex2mem((unsigned char *)&reg.exs + (regno - EXS) * sizeof(unsigned int),
594 val, sizeof(unsigned int));
595
596 } else if (regno == PC) {
597 /* Pseudo-register. Treat as read-only. */
598 status = E02;
599
600 } else if (regno >= S0 && regno <= S15) {
601 /* 32-bit registers. */
602 hex2mem((unsigned char *)&sreg.s0_0 + (reg.srs * 16 * sizeof(unsigned int)) + (regno - S0) * sizeof(unsigned int), val, sizeof(unsigned int));
603 } else {
604 /* Non-existing register. */
605 status = E05;
606 }
607 return status;
608}
609
610/* Read a value from a specified register in the register image. Returns the
611 value in the register or -1 for non-implemented registers. */
612static int
613read_register(char regno, unsigned int *valptr)
614{
615 int status = SUCCESS;
616
617 /* We read the zero registers from the register struct (instead of just returning 0)
618 to catch errors. */
619
620 if (regno >= R0 && regno <= ACR) {
621 /* Consecutive 32-bit registers. */
622 *valptr = *(unsigned int *)((char *)&reg.r0 + (regno - R0) * sizeof(unsigned int));
623
624 } else if (regno == BZ || regno == VR) {
625 /* Consecutive 8-bit registers. */
626 *valptr = (unsigned int)(*(unsigned char *)
627 ((char *)&reg.bz + (regno - BZ) * sizeof(char)));
628
629 } else if (regno == PID) {
630 /* 32-bit register. */
631 *valptr = *(unsigned int *)((char *)&reg.pid);
632
633 } else if (regno == SRS) {
634 /* 8-bit register. */
635 *valptr = (unsigned int)(*(unsigned char *)((char *)&reg.srs));
636
637 } else if (regno == WZ) {
638 /* 16-bit register. */
639 *valptr = (unsigned int)(*(unsigned short *)(char *)&reg.wz);
640
641 } else if (regno >= EXS && regno <= PC) {
642 /* Consecutive 32-bit registers. */
643 *valptr = *(unsigned int *)((char *)&reg.exs + (regno - EXS) * sizeof(unsigned int));
644
645 } else if (regno >= S0 && regno <= S15) {
646 /* Consecutive 32-bit registers, located elsewhere. */
647 *valptr = *(unsigned int *)((char *)&sreg.s0_0 + (reg.srs * 16 * sizeof(unsigned int)) + (regno - S0) * sizeof(unsigned int));
648
649 } else {
650 /* Non-existing register. */
651 status = E05;
652 }
653 return status;
654
655}
656
657/********************************** Packet I/O ******************************/
658/* Returns the character equivalent of a nibble, bit 7, 6, 5, and 4 of a byte,
659 represented by int x. */
660static inline char
661highhex(int x)
662{
663 return hexchars[(x >> 4) & 0xf];
664}
665
666/* Returns the character equivalent of a nibble, bit 3, 2, 1, and 0 of a byte,
667 represented by int x. */
668static inline char
669lowhex(int x)
670{
671 return hexchars[x & 0xf];
672}
673
674/* Returns the integer equivalent of a hexadecimal character. */
675static int
676hex(char ch)
677{
678 if ((ch >= 'a') && (ch <= 'f'))
679 return (ch - 'a' + 10);
680 if ((ch >= '0') && (ch <= '9'))
681 return (ch - '0');
682 if ((ch >= 'A') && (ch <= 'F'))
683 return (ch - 'A' + 10);
684 return -1;
685}
686
687/* Convert the memory, pointed to by mem into hexadecimal representation.
688 Put the result in buf, and return a pointer to the last character
689 in buf (null). */
690
691static char *
692mem2hex(char *buf, unsigned char *mem, int count)
693{
694 int i;
695 int ch;
696
697 if (mem == NULL) {
698 /* Invalid address, caught by 'm' packet handler. */
699 for (i = 0; i < count; i++) {
700 *buf++ = '0';
701 *buf++ = '0';
702 }
703 } else {
704 /* Valid mem address. */
705 for (i = 0; i < count; i++) {
706 ch = *mem++;
707 *buf++ = highhex (ch);
708 *buf++ = lowhex (ch);
709 }
710 }
711 /* Terminate properly. */
712 *buf = '\0';
713 return buf;
714}
715
716/* Same as mem2hex, but puts it in network byte order. */
717static char *
718mem2hex_nbo(char *buf, unsigned char *mem, int count)
719{
720 int i;
721 int ch;
722
723 mem += count - 1;
724 for (i = 0; i < count; i++) {
725 ch = *mem--;
726 *buf++ = highhex (ch);
727 *buf++ = lowhex (ch);
728 }
729
730 /* Terminate properly. */
731 *buf = '\0';
732 return buf;
733}
734
735/* Convert the array, in hexadecimal representation, pointed to by buf into
736 binary representation. Put the result in mem, and return a pointer to
737 the character after the last byte written. */
738static unsigned char*
739hex2mem(unsigned char *mem, char *buf, int count)
740{
741 int i;
742 unsigned char ch;
743 for (i = 0; i < count; i++) {
744 ch = hex (*buf++) << 4;
745 ch = ch + hex (*buf++);
746 *mem++ = ch;
747 }
748 return mem;
749}
750
751/* Put the content of the array, in binary representation, pointed to by buf
752 into memory pointed to by mem, and return a pointer to the character after
753 the last byte written.
754 Gdb will escape $, #, and the escape char (0x7d). */
755static unsigned char*
756bin2mem(unsigned char *mem, unsigned char *buf, int count)
757{
758 int i;
759 unsigned char *next;
760 for (i = 0; i < count; i++) {
761 /* Check for any escaped characters. Be paranoid and
762 only unescape chars that should be escaped. */
763 if (*buf == 0x7d) {
764 next = buf + 1;
765 if (*next == 0x3 || *next == 0x4 || *next == 0x5D) {
766 /* #, $, ESC */
767 buf++;
768 *buf += 0x20;
769 }
770 }
771 *mem++ = *buf++;
772 }
773 return mem;
774}
775
776/* Await the sequence $<data>#<checksum> and store <data> in the array buffer
777 returned. */
778static void
779getpacket(char *buffer)
780{
781 unsigned char checksum;
782 unsigned char xmitcsum;
783 int i;
784 int count;
785 char ch;
786
787 do {
788 while((ch = getDebugChar ()) != '$')
789 /* Wait for the start character $ and ignore all other characters */;
790 checksum = 0;
791 xmitcsum = -1;
792 count = 0;
793 /* Read until a # or the end of the buffer is reached */
794 while (count < BUFMAX) {
795 ch = getDebugChar();
796 if (ch == '#')
797 break;
798 checksum = checksum + ch;
799 buffer[count] = ch;
800 count = count + 1;
801 }
802
803 if (count >= BUFMAX)
804 continue;
805
806 buffer[count] = 0;
807
808 if (ch == '#') {
809 xmitcsum = hex(getDebugChar()) << 4;
810 xmitcsum += hex(getDebugChar());
811 if (checksum != xmitcsum) {
812 /* Wrong checksum */
813 putDebugChar('-');
814 } else {
815 /* Correct checksum */
816 putDebugChar('+');
817 /* If sequence characters are received, reply with them */
818 if (buffer[2] == ':') {
819 putDebugChar(buffer[0]);
820 putDebugChar(buffer[1]);
821 /* Remove the sequence characters from the buffer */
822 count = gdb_cris_strlen(buffer);
823 for (i = 3; i <= count; i++)
824 buffer[i - 3] = buffer[i];
825 }
826 }
827 }
828 } while (checksum != xmitcsum);
829}
830
831/* Send $<data>#<checksum> from the <data> in the array buffer. */
832
833static void
834putpacket(char *buffer)
835{
836 int checksum;
837 int runlen;
838 int encode;
839
840 do {
841 char *src = buffer;
842 putDebugChar('$');
843 checksum = 0;
844 while (*src) {
845 /* Do run length encoding */
846 putDebugChar(*src);
847 checksum += *src;
848 runlen = 0;
849 while (runlen < RUNLENMAX && *src == src[runlen]) {
850 runlen++;
851 }
852 if (runlen > 3) {
853 /* Got a useful amount */
854 putDebugChar ('*');
855 checksum += '*';
856 encode = runlen + ' ' - 4;
857 putDebugChar(encode);
858 checksum += encode;
859 src += runlen;
860 } else {
861 src++;
862 }
863 }
864 putDebugChar('#');
865 putDebugChar(highhex (checksum));
866 putDebugChar(lowhex (checksum));
867 } while(kgdb_started && (getDebugChar() != '+'));
868}
869
870/* The string str is prepended with the GDB printout token and sent. Required
871 in traditional implementations. */
872void
873putDebugString(const unsigned char *str, int len)
874{
875 /* Move SPC forward if we are single-stepping. */
876 asm("spchere:");
877 asm("move $spc, $r10");
878 asm("cmp.d spchere, $r10");
879 asm("bne nosstep");
880 asm("nop");
881 asm("move.d spccont, $r10");
882 asm("move $r10, $spc");
883 asm("nosstep:");
884
885 output_buffer[0] = 'O';
886 mem2hex(&output_buffer[1], (unsigned char *)str, len);
887 putpacket(output_buffer);
888
889 asm("spccont:");
890}
891
892/********************************** Handle exceptions ************************/
893/* Build and send a response packet in order to inform the host the
894 stub is stopped. TAAn...:r...;n...:r...;n...:r...;
895 AA = signal number
896 n... = register number (hex)
897 r... = register contents
898 n... = `thread'
899 r... = thread process ID. This is a hex integer.
900 n... = other string not starting with valid hex digit.
901 gdb should ignore this n,r pair and go on to the next.
902 This way we can extend the protocol. */
903static void
904stub_is_stopped(int sigval)
905{
906 char *ptr = output_buffer;
907 unsigned int reg_cont;
908
909 /* Send trap type (converted to signal) */
910
911 *ptr++ = 'T';
912 *ptr++ = highhex(sigval);
913 *ptr++ = lowhex(sigval);
914
915 if (((reg.exs & 0xff00) >> 8) == 0xc) {
916
917 /* Some kind of hardware watchpoint triggered. Find which one
918 and determine its type (read/write/access). */
919 int S, bp, trig_bits = 0, rw_bits = 0;
920 int trig_mask = 0;
921 unsigned int *bp_d_regs = &sreg.s3_3;
922 /* In a lot of cases, the stopped data address will simply be EDA.
923 In some cases, we adjust it to match the watched data range.
924 (We don't want to change the actual EDA though). */
925 unsigned int stopped_data_address;
926 /* The S field of EXS. */
927 S = (reg.exs & 0xffff0000) >> 16;
928
929 if (S & 1) {
930 /* Instruction watchpoint. */
931 /* FIXME: Check against, and possibly adjust reported EDA. */
932 } else {
933 /* Data watchpoint. Find the one that triggered. */
934 for (bp = 0; bp < 6; bp++) {
935
936 /* Dx_RD, Dx_WR in the S field of EXS for this BP. */
937 int bitpos_trig = 1 + bp * 2;
938 /* Dx_BPRD, Dx_BPWR in BP_CTRL for this BP. */
939 int bitpos_config = 2 + bp * 4;
940
941 /* Get read/write trig bits for this BP. */
942 trig_bits = (S & (3 << bitpos_trig)) >> bitpos_trig;
943
944 /* Read/write config bits for this BP. */
945 rw_bits = (sreg.s0_3 & (3 << bitpos_config)) >> bitpos_config;
946 if (trig_bits) {
947 /* Sanity check: the BP shouldn't trigger for accesses
948 that it isn't configured for. */
949 if ((rw_bits == 0x1 && trig_bits != 0x1) ||
950 (rw_bits == 0x2 && trig_bits != 0x2))
951 panic("Invalid r/w trigging for this BP");
952
953 /* Mark this BP as trigged for future reference. */
954 trig_mask |= (1 << bp);
955
956 if (reg.eda >= bp_d_regs[bp * 2] &&
957 reg.eda <= bp_d_regs[bp * 2 + 1]) {
958 /* EDA withing range for this BP; it must be the one
959 we're looking for. */
960 stopped_data_address = reg.eda;
961 break;
962 }
963 }
964 }
965 if (bp < 6) {
966 /* Found a trigged BP with EDA within its configured data range. */
967 } else if (trig_mask) {
968 /* Something triggered, but EDA doesn't match any BP's range. */
969 for (bp = 0; bp < 6; bp++) {
970 /* Dx_BPRD, Dx_BPWR in BP_CTRL for this BP. */
971 int bitpos_config = 2 + bp * 4;
972
973 /* Read/write config bits for this BP (needed later). */
974 rw_bits = (sreg.s0_3 & (3 << bitpos_config)) >> bitpos_config;
975
976 if (trig_mask & (1 << bp)) {
977 /* EDA within 31 bytes of the configured start address? */
978 if (reg.eda + 31 >= bp_d_regs[bp * 2]) {
979 /* Changing the reported address to match
980 the start address of the first applicable BP. */
981 stopped_data_address = bp_d_regs[bp * 2];
982 break;
983 } else {
984 /* We continue since we might find another useful BP. */
985 printk("EDA doesn't match trigged BP's range");
986 }
987 }
988 }
989 }
990
991 /* No match yet? */
992 BUG_ON(bp >= 6);
993 /* Note that we report the type according to what the BP is configured
994 for (otherwise we'd never report an 'awatch'), not according to how
995 it trigged. We did check that the trigged bits match what the BP is
996 configured for though. */
997 if (rw_bits == 0x1) {
998 /* read */
999 strncpy(ptr, "rwatch", 6);
1000 ptr += 6;
1001 } else if (rw_bits == 0x2) {
1002 /* write */
1003 strncpy(ptr, "watch", 5);
1004 ptr += 5;
1005 } else if (rw_bits == 0x3) {
1006 /* access */
1007 strncpy(ptr, "awatch", 6);
1008 ptr += 6;
1009 } else {
1010 panic("Invalid r/w bits for this BP.");
1011 }
1012
1013 *ptr++ = ':';
1014 /* Note that we don't read_register(EDA, ...) */
1015 ptr = mem2hex_nbo(ptr, (unsigned char *)&stopped_data_address, register_size[EDA]);
1016 *ptr++ = ';';
1017 }
1018 }
1019 /* Only send PC, frame and stack pointer. */
1020 read_register(PC, &reg_cont);
1021 *ptr++ = highhex(PC);
1022 *ptr++ = lowhex(PC);
1023 *ptr++ = ':';
1024 ptr = mem2hex(ptr, (unsigned char *)&reg_cont, register_size[PC]);
1025 *ptr++ = ';';
1026
1027 read_register(R8, &reg_cont);
1028 *ptr++ = highhex(R8);
1029 *ptr++ = lowhex(R8);
1030 *ptr++ = ':';
1031 ptr = mem2hex(ptr, (unsigned char *)&reg_cont, register_size[R8]);
1032 *ptr++ = ';';
1033
1034 read_register(SP, &reg_cont);
1035 *ptr++ = highhex(SP);
1036 *ptr++ = lowhex(SP);
1037 *ptr++ = ':';
1038 ptr = mem2hex(ptr, (unsigned char *)&reg_cont, register_size[SP]);
1039 *ptr++ = ';';
1040
1041 /* Send ERP as well; this will save us an entire register fetch in some cases. */
1042 read_register(ERP, &reg_cont);
1043 *ptr++ = highhex(ERP);
1044 *ptr++ = lowhex(ERP);
1045 *ptr++ = ':';
1046 ptr = mem2hex(ptr, (unsigned char *)&reg_cont, register_size[ERP]);
1047 *ptr++ = ';';
1048
1049 /* null-terminate and send it off */
1050 *ptr = 0;
1051 putpacket(output_buffer);
1052}
1053
1054/* Returns the size of an instruction that has a delay slot. */
1055
1056int insn_size(unsigned long pc)
1057{
1058 unsigned short opcode = *(unsigned short *)pc;
1059 int size = 0;
1060
1061 switch ((opcode & 0x0f00) >> 8) {
1062 case 0x0:
1063 case 0x9:
1064 case 0xb:
1065 size = 2;
1066 break;
1067 case 0xe:
1068 case 0xf:
1069 size = 6;
1070 break;
1071 case 0xd:
1072 /* Could be 4 or 6; check more bits. */
1073 if ((opcode & 0xff) == 0xff)
1074 size = 4;
1075 else
1076 size = 6;
1077 break;
1078 default:
1079 panic("Couldn't find size of opcode 0x%x at 0x%lx\n", opcode, pc);
1080 }
1081
1082 return size;
1083}
1084
1085void register_fixup(int sigval)
1086{
1087 /* Compensate for ACR push at the beginning of exception handler. */
1088 reg.sp += 4;
1089
1090 /* Standard case. */
1091 reg.pc = reg.erp;
1092 if (reg.erp & 0x1) {
1093 /* Delay slot bit set. Report as stopped on proper instruction. */
1094 if (reg.spc) {
1095 /* Rely on SPC if set. */
1096 reg.pc = reg.spc;
1097 } else {
1098 /* Calculate the PC from the size of the instruction
1099 that the delay slot we're in belongs to. */
1100 reg.pc += insn_size(reg.erp & ~1) - 1 ;
1101 }
1102 }
1103
1104 if ((reg.exs & 0x3) == 0x0) {
1105 /* Bits 1 - 0 indicate the type of memory operation performed
1106 by the interrupted instruction. 0 means no memory operation,
1107 and EDA is undefined in that case. We zero it to avoid confusion. */
1108 reg.eda = 0;
1109 }
1110
1111 if (sigval == SIGTRAP) {
1112 /* Break 8, single step or hardware breakpoint exception. */
1113
1114 /* Check IDX field of EXS. */
1115 if (((reg.exs & 0xff00) >> 8) == 0x18) {
1116
1117 /* Break 8. */
1118
1119 /* Static (compiled) breakpoints must return to the next instruction
1120 in order to avoid infinite loops (default value of ERP). Dynamic
1121 (gdb-invoked) must subtract the size of the break instruction from
1122 the ERP so that the instruction that was originally in the break
1123 instruction's place will be run when we return from the exception. */
1124 if (!dynamic_bp) {
1125 /* Assuming that all breakpoints are dynamic from now on. */
1126 dynamic_bp = 1;
1127 } else {
1128
1129 /* Only if not in a delay slot. */
1130 if (!(reg.erp & 0x1)) {
1131 reg.erp -= 2;
1132 reg.pc -= 2;
1133 }
1134 }
1135
1136 } else if (((reg.exs & 0xff00) >> 8) == 0x3) {
1137 /* Single step. */
1138 /* Don't fiddle with S1. */
1139
1140 } else if (((reg.exs & 0xff00) >> 8) == 0xc) {
1141
1142 /* Hardware watchpoint exception. */
1143
1144 /* SPC has been updated so that we will get a single step exception
1145 when we return, but we don't want that. */
1146 reg.spc = 0;
1147
1148 /* Don't fiddle with S1. */
1149 }
1150
1151 } else if (sigval == SIGINT) {
1152 /* Nothing special. */
1153 }
1154}
1155
1156static void insert_watchpoint(char type, int addr, int len)
1157{
1158 /* Breakpoint/watchpoint types (GDB terminology):
1159 0 = memory breakpoint for instructions
1160 (not supported; done via memory write instead)
1161 1 = hardware breakpoint for instructions (supported)
1162 2 = write watchpoint (supported)
1163 3 = read watchpoint (supported)
1164 4 = access watchpoint (supported) */
1165
1166 if (type < '1' || type > '4') {
1167 output_buffer[0] = 0;
1168 return;
1169 }
1170
1171 /* Read watchpoints are set as access watchpoints, because of GDB's
1172 inability to deal with pure read watchpoints. */
1173 if (type == '3')
1174 type = '4';
1175
1176 if (type == '1') {
1177 /* Hardware (instruction) breakpoint. */
1178 /* Bit 0 in BP_CTRL holds the configuration for I0. */
1179 if (sreg.s0_3 & 0x1) {
1180 /* Already in use. */
1181 gdb_cris_strcpy(output_buffer, error_message[E04]);
1182 return;
1183 }
1184 /* Configure. */
1185 sreg.s1_3 = addr;
1186 sreg.s2_3 = (addr + len - 1);
1187 sreg.s0_3 |= 1;
1188 } else {
1189 int bp;
1190 unsigned int *bp_d_regs = &sreg.s3_3;
1191
1192 /* The watchpoint allocation scheme is the simplest possible.
1193 For example, if a region is watched for read and
1194 a write watch is requested, a new watchpoint will
1195 be used. Also, if a watch for a region that is already
1196 covered by one or more existing watchpoints, a new
1197 watchpoint will be used. */
1198
1199 /* First, find a free data watchpoint. */
1200 for (bp = 0; bp < 6; bp++) {
1201 /* Each data watchpoint's control registers occupy 2 bits
1202 (hence the 3), starting at bit 2 for D0 (hence the 2)
1203 with 4 bits between for each watchpoint (yes, the 4). */
1204 if (!(sreg.s0_3 & (0x3 << (2 + (bp * 4))))) {
1205 break;
1206 }
1207 }
1208
1209 if (bp > 5) {
1210 /* We're out of watchpoints. */
1211 gdb_cris_strcpy(output_buffer, error_message[E04]);
1212 return;
1213 }
1214
1215 /* Configure the control register first. */
1216 if (type == '3' || type == '4') {
1217 /* Trigger on read. */
1218 sreg.s0_3 |= (1 << (2 + bp * 4));
1219 }
1220 if (type == '2' || type == '4') {
1221 /* Trigger on write. */
1222 sreg.s0_3 |= (2 << (2 + bp * 4));
1223 }
1224
1225 /* Ugly pointer arithmetics to configure the watched range. */
1226 bp_d_regs[bp * 2] = addr;
1227 bp_d_regs[bp * 2 + 1] = (addr + len - 1);
1228 }
1229
1230 /* Set the S1 flag to enable watchpoints. */
1231 reg.ccs |= (1 << (S_CCS_BITNR + CCS_SHIFT));
1232 gdb_cris_strcpy(output_buffer, "OK");
1233}
1234
1235static void remove_watchpoint(char type, int addr, int len)
1236{
1237 /* Breakpoint/watchpoint types:
1238 0 = memory breakpoint for instructions
1239 (not supported; done via memory write instead)
1240 1 = hardware breakpoint for instructions (supported)
1241 2 = write watchpoint (supported)
1242 3 = read watchpoint (supported)
1243 4 = access watchpoint (supported) */
1244 if (type < '1' || type > '4') {
1245 output_buffer[0] = 0;
1246 return;
1247 }
1248
1249 /* Read watchpoints are set as access watchpoints, because of GDB's
1250 inability to deal with pure read watchpoints. */
1251 if (type == '3')
1252 type = '4';
1253
1254 if (type == '1') {
1255 /* Hardware breakpoint. */
1256 /* Bit 0 in BP_CTRL holds the configuration for I0. */
1257 if (!(sreg.s0_3 & 0x1)) {
1258 /* Not in use. */
1259 gdb_cris_strcpy(output_buffer, error_message[E04]);
1260 return;
1261 }
1262 /* Deconfigure. */
1263 sreg.s1_3 = 0;
1264 sreg.s2_3 = 0;
1265 sreg.s0_3 &= ~1;
1266 } else {
1267 int bp;
1268 unsigned int *bp_d_regs = &sreg.s3_3;
1269 /* Try to find a watchpoint that is configured for the
1270 specified range, then check that read/write also matches. */
1271
1272 /* Ugly pointer arithmetic, since I cannot rely on a
1273 single switch (addr) as there may be several watchpoints with
1274 the same start address for example. */
1275
1276 for (bp = 0; bp < 6; bp++) {
1277 if (bp_d_regs[bp * 2] == addr &&
1278 bp_d_regs[bp * 2 + 1] == (addr + len - 1)) {
1279 /* Matching range. */
1280 int bitpos = 2 + bp * 4;
1281 int rw_bits;
1282
1283 /* Read/write bits for this BP. */
1284 rw_bits = (sreg.s0_3 & (0x3 << bitpos)) >> bitpos;
1285
1286 if ((type == '3' && rw_bits == 0x1) ||
1287 (type == '2' && rw_bits == 0x2) ||
1288 (type == '4' && rw_bits == 0x3)) {
1289 /* Read/write matched. */
1290 break;
1291 }
1292 }
1293 }
1294
1295 if (bp > 5) {
1296 /* No watchpoint matched. */
1297 gdb_cris_strcpy(output_buffer, error_message[E04]);
1298 return;
1299 }
1300
1301 /* Found a matching watchpoint. Now, deconfigure it by
1302 both disabling read/write in bp_ctrl and zeroing its
1303 start/end addresses. */
1304 sreg.s0_3 &= ~(3 << (2 + (bp * 4)));
1305 bp_d_regs[bp * 2] = 0;
1306 bp_d_regs[bp * 2 + 1] = 0;
1307 }
1308
1309 /* Note that we don't clear the S1 flag here. It's done when continuing. */
1310 gdb_cris_strcpy(output_buffer, "OK");
1311}
1312
1313
1314
1315/* All expected commands are sent from remote.c. Send a response according
1316 to the description in remote.c. */
1317void
1318handle_exception(int sigval)
1319{
1320 /* Avoid warning of not used. */
1321
1322 USEDFUN(handle_exception);
1323 USEDVAR(internal_stack[0]);
1324
1325 register_fixup(sigval);
1326
1327 /* Send response. */
1328 stub_is_stopped(sigval);
1329
1330 for (;;) {
1331 output_buffer[0] = '\0';
1332 getpacket(input_buffer);
1333 switch (input_buffer[0]) {
1334 case 'g':
1335 /* Read registers: g
1336 Success: Each byte of register data is described by two hex digits.
1337 Registers are in the internal order for GDB, and the bytes
1338 in a register are in the same order the machine uses.
1339 Failure: void. */
1340 {
1341 char *buf;
1342 /* General and special registers. */
1343 buf = mem2hex(output_buffer, (char *)&reg, sizeof(registers));
1344 /* Support registers. */
1345 /* -1 because of the null termination that mem2hex adds. */
1346 mem2hex(buf,
1347 (char *)&sreg + (reg.srs * 16 * sizeof(unsigned int)),
1348 16 * sizeof(unsigned int));
1349 break;
1350 }
1351 case 'G':
1352 /* Write registers. GXX..XX
1353 Each byte of register data is described by two hex digits.
1354 Success: OK
1355 Failure: void. */
1356 /* General and special registers. */
1357 hex2mem((char *)&reg, &input_buffer[1], sizeof(registers));
1358 /* Support registers. */
1359 hex2mem((char *)&sreg + (reg.srs * 16 * sizeof(unsigned int)),
1360 &input_buffer[1] + sizeof(registers),
1361 16 * sizeof(unsigned int));
1362 gdb_cris_strcpy(output_buffer, "OK");
1363 break;
1364
1365 case 'P':
1366 /* Write register. Pn...=r...
1367 Write register n..., hex value without 0x, with value r...,
1368 which contains a hex value without 0x and two hex digits
1369 for each byte in the register (target byte order). P1f=11223344 means
1370 set register 31 to 44332211.
1371 Success: OK
1372 Failure: E02, E05 */
1373 {
1374 char *suffix;
1375 int regno = gdb_cris_strtol(&input_buffer[1], &suffix, 16);
1376 int status;
1377
1378 status = write_register(regno, suffix+1);
1379
1380 switch (status) {
1381 case E02:
1382 /* Do not support read-only registers. */
1383 gdb_cris_strcpy(output_buffer, error_message[E02]);
1384 break;
1385 case E05:
1386 /* Do not support non-existing registers. */
1387 gdb_cris_strcpy(output_buffer, error_message[E05]);
1388 break;
1389 default:
1390 /* Valid register number. */
1391 gdb_cris_strcpy(output_buffer, "OK");
1392 break;
1393 }
1394 }
1395 break;
1396
1397 case 'm':
1398 /* Read from memory. mAA..AA,LLLL
1399 AA..AA is the address and LLLL is the length.
1400 Success: XX..XX is the memory content. Can be fewer bytes than
1401 requested if only part of the data may be read. m6000120a,6c means
1402 retrieve 108 byte from base address 6000120a.
1403 Failure: void. */
1404 {
1405 char *suffix;
1406 unsigned char *addr = (unsigned char *)gdb_cris_strtol(&input_buffer[1],
1407 &suffix, 16);
1408 int len = gdb_cris_strtol(suffix+1, 0, 16);
1409
1410 /* Bogus read (i.e. outside the kernel's
1411 segment)? . */
1412 if (!((unsigned int)addr >= 0xc0000000 &&
1413 (unsigned int)addr < 0xd0000000))
1414 addr = NULL;
1415
1416 mem2hex(output_buffer, addr, len);
1417 }
1418 break;
1419
1420 case 'X':
1421 /* Write to memory. XAA..AA,LLLL:XX..XX
1422 AA..AA is the start address, LLLL is the number of bytes, and
1423 XX..XX is the binary data.
1424 Success: OK
1425 Failure: void. */
1426 case 'M':
1427 /* Write to memory. MAA..AA,LLLL:XX..XX
1428 AA..AA is the start address, LLLL is the number of bytes, and
1429 XX..XX is the hexadecimal data.
1430 Success: OK
1431 Failure: void. */
1432 {
1433 char *lenptr;
1434 char *dataptr;
1435 unsigned char *addr = (unsigned char *)gdb_cris_strtol(&input_buffer[1],
1436 &lenptr, 16);
1437 int len = gdb_cris_strtol(lenptr+1, &dataptr, 16);
1438 if (*lenptr == ',' && *dataptr == ':') {
1439 if (input_buffer[0] == 'M') {
1440 hex2mem(addr, dataptr + 1, len);
1441 } else /* X */ {
1442 bin2mem(addr, dataptr + 1, len);
1443 }
1444 gdb_cris_strcpy(output_buffer, "OK");
1445 }
1446 else {
1447 gdb_cris_strcpy(output_buffer, error_message[E06]);
1448 }
1449 }
1450 break;
1451
1452 case 'c':
1453 /* Continue execution. cAA..AA
1454 AA..AA is the address where execution is resumed. If AA..AA is
1455 omitted, resume at the present address.
1456 Success: return to the executing thread.
1457 Failure: will never know. */
1458
1459 if (input_buffer[1] != '\0') {
1460 /* FIXME: Doesn't handle address argument. */
1461 gdb_cris_strcpy(output_buffer, error_message[E04]);
1462 break;
1463 }
1464
1465 /* Before continuing, make sure everything is set up correctly. */
1466
1467 /* Set the SPC to some unlikely value. */
1468 reg.spc = 0;
1469 /* Set the S1 flag to 0 unless some watchpoint is enabled (since setting
1470 S1 to 0 would also disable watchpoints). (Note that bits 26-31 in BP_CTRL
1471 are reserved, so don't check against those). */
1472 if ((sreg.s0_3 & 0x3fff) == 0) {
1473 reg.ccs &= ~(1 << (S_CCS_BITNR + CCS_SHIFT));
1474 }
1475
1476 return;
1477
1478 case 's':
1479 /* Step. sAA..AA
1480 AA..AA is the address where execution is resumed. If AA..AA is
1481 omitted, resume at the present address. Success: return to the
1482 executing thread. Failure: will never know. */
1483
1484 if (input_buffer[1] != '\0') {
1485 /* FIXME: Doesn't handle address argument. */
1486 gdb_cris_strcpy(output_buffer, error_message[E04]);
1487 break;
1488 }
1489
1490 /* Set the SPC to PC, which is where we'll return
1491 (deduced previously). */
1492 reg.spc = reg.pc;
1493
1494 /* Set the S1 (first stacked, not current) flag, which will
1495 kick into action when we rfe. */
1496 reg.ccs |= (1 << (S_CCS_BITNR + CCS_SHIFT));
1497 return;
1498
1499 case 'Z':
1500
1501 /* Insert breakpoint or watchpoint, Ztype,addr,length.
1502 Remote protocol says: A remote target shall return an empty string
1503 for an unrecognized breakpoint or watchpoint packet type. */
1504 {
1505 char *lenptr;
1506 char *dataptr;
1507 int addr = gdb_cris_strtol(&input_buffer[3], &lenptr, 16);
1508 int len = gdb_cris_strtol(lenptr + 1, &dataptr, 16);
1509 char type = input_buffer[1];
1510
1511 insert_watchpoint(type, addr, len);
1512 break;
1513 }
1514
1515 case 'z':
1516 /* Remove breakpoint or watchpoint, Ztype,addr,length.
1517 Remote protocol says: A remote target shall return an empty string
1518 for an unrecognized breakpoint or watchpoint packet type. */
1519 {
1520 char *lenptr;
1521 char *dataptr;
1522 int addr = gdb_cris_strtol(&input_buffer[3], &lenptr, 16);
1523 int len = gdb_cris_strtol(lenptr + 1, &dataptr, 16);
1524 char type = input_buffer[1];
1525
1526 remove_watchpoint(type, addr, len);
1527 break;
1528 }
1529
1530
1531 case '?':
1532 /* The last signal which caused a stop. ?
1533 Success: SAA, where AA is the signal number.
1534 Failure: void. */
1535 output_buffer[0] = 'S';
1536 output_buffer[1] = highhex(sigval);
1537 output_buffer[2] = lowhex(sigval);
1538 output_buffer[3] = 0;
1539 break;
1540
1541 case 'D':
1542 /* Detach from host. D
1543 Success: OK, and return to the executing thread.
1544 Failure: will never know */
1545 putpacket("OK");
1546 return;
1547
1548 case 'k':
1549 case 'r':
1550 /* kill request or reset request.
1551 Success: restart of target.
1552 Failure: will never know. */
1553 kill_restart();
1554 break;
1555
1556 case 'C':
1557 case 'S':
1558 case '!':
1559 case 'R':
1560 case 'd':
1561 /* Continue with signal sig. Csig;AA..AA
1562 Step with signal sig. Ssig;AA..AA
1563 Use the extended remote protocol. !
1564 Restart the target system. R0
1565 Toggle debug flag. d
1566 Search backwards. tAA:PP,MM
1567 Not supported: E04 */
1568
1569 /* FIXME: What's the difference between not supported
1570 and ignored (below)? */
1571 gdb_cris_strcpy(output_buffer, error_message[E04]);
1572 break;
1573
1574 default:
1575 /* The stub should ignore other request and send an empty
1576 response ($#<checksum>). This way we can extend the protocol and GDB
1577 can tell whether the stub it is talking to uses the old or the new. */
1578 output_buffer[0] = 0;
1579 break;
1580 }
1581 putpacket(output_buffer);
1582 }
1583}
1584
1585void
1586kgdb_init(void)
1587{
1588 reg_intr_vect_rw_mask intr_mask;
1589 reg_ser_rw_intr_mask ser_intr_mask;
1590
1591 /* Configure the kgdb serial port. */
1592#if defined(CONFIG_ETRAX_KGDB_PORT0)
1593 /* Note: no shortcut registered (not handled by multiple_interrupt).
1594 See entry.S. */
1595 set_exception_vector(SER0_INTR_VECT, kgdb_handle_exception);
1596 /* Enable the ser irq in the global config. */
1597 intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
1598 intr_mask.ser0 = 1;
1599 REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
1600
1601 ser_intr_mask = REG_RD(ser, regi_ser0, rw_intr_mask);
1602 ser_intr_mask.data_avail = regk_ser_yes;
1603 REG_WR(ser, regi_ser0, rw_intr_mask, ser_intr_mask);
1604#elif defined(CONFIG_ETRAX_KGDB_PORT1)
1605 /* Note: no shortcut registered (not handled by multiple_interrupt).
1606 See entry.S. */
1607 set_exception_vector(SER1_INTR_VECT, kgdb_handle_exception);
1608 /* Enable the ser irq in the global config. */
1609 intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
1610 intr_mask.ser1 = 1;
1611 REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
1612
1613 ser_intr_mask = REG_RD(ser, regi_ser1, rw_intr_mask);
1614 ser_intr_mask.data_avail = regk_ser_yes;
1615 REG_WR(ser, regi_ser1, rw_intr_mask, ser_intr_mask);
1616#elif defined(CONFIG_ETRAX_KGDB_PORT2)
1617 /* Note: no shortcut registered (not handled by multiple_interrupt).
1618 See entry.S. */
1619 set_exception_vector(SER2_INTR_VECT, kgdb_handle_exception);
1620 /* Enable the ser irq in the global config. */
1621 intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
1622 intr_mask.ser2 = 1;
1623 REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
1624
1625 ser_intr_mask = REG_RD(ser, regi_ser2, rw_intr_mask);
1626 ser_intr_mask.data_avail = regk_ser_yes;
1627 REG_WR(ser, regi_ser2, rw_intr_mask, ser_intr_mask);
1628#elif defined(CONFIG_ETRAX_KGDB_PORT3)
1629 /* Note: no shortcut registered (not handled by multiple_interrupt).
1630 See entry.S. */
1631 set_exception_vector(SER3_INTR_VECT, kgdb_handle_exception);
1632 /* Enable the ser irq in the global config. */
1633 intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
1634 intr_mask.ser3 = 1;
1635 REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
1636
1637 ser_intr_mask = REG_RD(ser, regi_ser3, rw_intr_mask);
1638 ser_intr_mask.data_avail = regk_ser_yes;
1639 REG_WR(ser, regi_ser3, rw_intr_mask, ser_intr_mask);
1640#endif
1641
1642}
1643/* Performs a complete re-start from scratch. */
1644static void
1645kill_restart(void)
1646{
1647 machine_restart("");
1648}
1649
1650/* Use this static breakpoint in the start-up only. */
1651
1652void
1653breakpoint(void)
1654{
1655 kgdb_started = 1;
1656 dynamic_bp = 0; /* This is a static, not a dynamic breakpoint. */
1657 __asm__ volatile ("break 8"); /* Jump to kgdb_handle_breakpoint. */
1658}
1659
1660/****************************** End of file **********************************/
diff --git a/arch/cris/arch-v32/kernel/kgdb_asm.S b/arch/cris/arch-v32/kernel/kgdb_asm.S
new file mode 100644
index 000000000000..b350dd279ed2
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/kgdb_asm.S
@@ -0,0 +1,552 @@
1/*
2 * Copyright (C) 2004 Axis Communications AB
3 *
4 * Code for handling break 8, hardware breakpoint, single step, and serial
5 * port exceptions for kernel debugging purposes.
6 */
7
8#include <linux/config.h>
9#include <asm/arch/hwregs/intr_vect.h>
10
11 ;; Exported functions.
12 .globl kgdb_handle_exception
13
14kgdb_handle_exception:
15
16;; Create a register image of the caller.
17;;
18;; First of all, save the ACR on the stack since we need it for address calculations.
19;; We put it into the register struct later.
20
21 subq 4, $sp
22 move.d $acr, [$sp]
23
24;; Now we are free to use ACR all we want.
25;; If we were running this handler with interrupts on, we would have to be careful
26;; to save and restore CCS manually, but since we aren't we treat it like every other
27;; register.
28
29 move.d reg, $acr
30 move.d $r0, [$acr] ; Save R0 (start of register struct)
31 addq 4, $acr
32 move.d $r1, [$acr] ; Save R1
33 addq 4, $acr
34 move.d $r2, [$acr] ; Save R2
35 addq 4, $acr
36 move.d $r3, [$acr] ; Save R3
37 addq 4, $acr
38 move.d $r4, [$acr] ; Save R4
39 addq 4, $acr
40 move.d $r5, [$acr] ; Save R5
41 addq 4, $acr
42 move.d $r6, [$acr] ; Save R6
43 addq 4, $acr
44 move.d $r7, [$acr] ; Save R7
45 addq 4, $acr
46 move.d $r8, [$acr] ; Save R8
47 addq 4, $acr
48 move.d $r9, [$acr] ; Save R9
49 addq 4, $acr
50 move.d $r10, [$acr] ; Save R10
51 addq 4, $acr
52 move.d $r11, [$acr] ; Save R11
53 addq 4, $acr
54 move.d $r12, [$acr] ; Save R12
55 addq 4, $acr
56 move.d $r13, [$acr] ; Save R13
57 addq 4, $acr
58 move.d $sp, [$acr] ; Save SP (R14)
59 addq 4, $acr
60
61 ;; The ACR register is already saved on the stack, so pop it from there.
62 move.d [$sp],$r0
63 move.d $r0, [$acr]
64 addq 4, $acr
65
66 move $bz, [$acr]
67 addq 1, $acr
68 move $vr, [$acr]
69 addq 1, $acr
70 move $pid, [$acr]
71 addq 4, $acr
72 move $srs, [$acr]
73 addq 1, $acr
74 move $wz, [$acr]
75 addq 2, $acr
76 move $exs, [$acr]
77 addq 4, $acr
78 move $eda, [$acr]
79 addq 4, $acr
80 move $mof, [$acr]
81 addq 4, $acr
82 move $dz, [$acr]
83 addq 4, $acr
84 move $ebp, [$acr]
85 addq 4, $acr
86 move $erp, [$acr]
87 addq 4, $acr
88 move $srp, [$acr]
89 addq 4, $acr
90 move $nrp, [$acr]
91 addq 4, $acr
92 move $ccs, [$acr]
93 addq 4, $acr
94 move $usp, [$acr]
95 addq 4, $acr
96 move $spc, [$acr]
97 addq 4, $acr
98
99;; Skip the pseudo-PC.
100 addq 4, $acr
101
102;; Save the support registers in bank 0 - 3.
103 clear.d $r1 ; Bank counter
104 move.d sreg, $acr
105
106;; Bank 0
107 move $r1, $srs
108 nop
109 nop
110 nop
111 move $s0, $r0
112 move.d $r0, [$acr]
113 addq 4, $acr
114 move $s1, $r0
115 move.d $r0, [$acr]
116 addq 4, $acr
117 move $s2, $r0
118 move.d $r0, [$acr]
119 addq 4, $acr
120 move $s3, $r0
121 move.d $r0, [$acr]
122 addq 4, $acr
123 move $s4, $r0
124 move.d $r0, [$acr]
125 addq 4, $acr
126 move $s5, $r0
127 move.d $r0, [$acr]
128 addq 4, $acr
129 move $s6, $r0
130 move.d $r0, [$acr]
131 addq 4, $acr
132 move $s7, $r0
133 move.d $r0, [$acr]
134 addq 4, $acr
135 move $s8, $r0
136 move.d $r0, [$acr]
137 addq 4, $acr
138 move $s9, $r0
139 move.d $r0, [$acr]
140 addq 4, $acr
141 move $s10, $r0
142 move.d $r0, [$acr]
143 addq 4, $acr
144 move $s11, $r0
145 move.d $r0, [$acr]
146 addq 4, $acr
147 move $s12, $r0
148 move.d $r0, [$acr]
149 addq 4, $acr
150
151 ;; Nothing in S13 - S15, bank 0
152 clear.d [$acr]
153 addq 4, $acr
154 clear.d [$acr]
155 addq 4, $acr
156 clear.d [$acr]
157 addq 4, $acr
158
159;; Bank 1 and bank 2 have the same layout, hence the loop.
160 addq 1, $r1
1611:
162 move $r1, $srs
163 nop
164 nop
165 nop
166 move $s0, $r0
167 move.d $r0, [$acr]
168 addq 4, $acr
169 move $s1, $r0
170 move.d $r0, [$acr]
171 addq 4, $acr
172 move $s2, $r0
173 move.d $r0, [$acr]
174 addq 4, $acr
175 move $s3, $r0
176 move.d $r0, [$acr]
177 addq 4, $acr
178 move $s4, $r0
179 move.d $r0, [$acr]
180 addq 4, $acr
181 move $s5, $r0
182 move.d $r0, [$acr]
183 addq 4, $acr
184 move $s6, $r0
185 move.d $r0, [$acr]
186 addq 4, $acr
187
188 ;; Nothing in S7 - S15, bank 1 and 2
189 clear.d [$acr]
190 addq 4, $acr
191 clear.d [$acr]
192 addq 4, $acr
193 clear.d [$acr]
194 addq 4, $acr
195 clear.d [$acr]
196 addq 4, $acr
197 clear.d [$acr]
198 addq 4, $acr
199 clear.d [$acr]
200 addq 4, $acr
201 clear.d [$acr]
202 addq 4, $acr
203 clear.d [$acr]
204 addq 4, $acr
205 clear.d [$acr]
206 addq 4, $acr
207
208 addq 1, $r1
209 cmpq 3, $r1
210 bne 1b
211 nop
212
213;; Bank 3
214 move $r1, $srs
215 nop
216 nop
217 nop
218 move $s0, $r0
219 move.d $r0, [$acr]
220 addq 4, $acr
221 move $s1, $r0
222 move.d $r0, [$acr]
223 addq 4, $acr
224 move $s2, $r0
225 move.d $r0, [$acr]
226 addq 4, $acr
227 move $s3, $r0
228 move.d $r0, [$acr]
229 addq 4, $acr
230 move $s4, $r0
231 move.d $r0, [$acr]
232 addq 4, $acr
233 move $s5, $r0
234 move.d $r0, [$acr]
235 addq 4, $acr
236 move $s6, $r0
237 move.d $r0, [$acr]
238 addq 4, $acr
239 move $s7, $r0
240 move.d $r0, [$acr]
241 addq 4, $acr
242 move $s8, $r0
243 move.d $r0, [$acr]
244 addq 4, $acr
245 move $s9, $r0
246 move.d $r0, [$acr]
247 addq 4, $acr
248 move $s10, $r0
249 move.d $r0, [$acr]
250 addq 4, $acr
251 move $s11, $r0
252 move.d $r0, [$acr]
253 addq 4, $acr
254 move $s12, $r0
255 move.d $r0, [$acr]
256 addq 4, $acr
257 move $s13, $r0
258 move.d $r0, [$acr]
259 addq 4, $acr
260 move $s14, $r0
261 move.d $r0, [$acr]
262 addq 4, $acr
263;; Nothing in S15, bank 3
264 clear.d [$acr]
265 addq 4, $acr
266
267;; Check what got us here: get IDX field of EXS.
268 move $exs, $r10
269 and.d 0xff00, $r10
270 lsrq 8, $r10
271#if defined(CONFIG_ETRAX_KGDB_PORT0)
272 cmp.d SER0_INTR_VECT, $r10 ; IRQ for serial port 0
273 beq sigint
274 nop
275#elif defined(CONFIG_ETRAX_KGDB_PORT1)
276 cmp.d SER1_INTR_VECT, $r10 ; IRQ for serial port 1
277 beq sigint
278 nop
279#elif defined(CONFIG_ETRAX_KGDB_PORT2)
280 cmp.d SER2_INTR_VECT, $r10 ; IRQ for serial port 2
281 beq sigint
282 nop
283#elif defined(CONFIG_ETRAX_KGDB_PORT3)
284 cmp.d SER3_INTR_VECT, $r10 ; IRQ for serial port 3
285 beq sigint
286 nop
287#endif
288;; Multiple interrupt must be due to serial break.
289 cmp.d 0x30, $r10 ; Multiple interrupt
290 beq sigint
291 nop
292;; Neither of those? Then it's a sigtrap.
293 ba handle_comm
294 moveq 5, $r10 ; Set SIGTRAP (delay slot)
295
296sigint:
297 ;; Serial interrupt; get character
298 jsr getDebugChar
299 nop ; Delay slot
300 cmp.b 3, $r10 ; \003 (Ctrl-C)?
301 bne return ; No, get out of here
302 nop
303 moveq 2, $r10 ; Set SIGINT
304
305;;
306;; Handle the communication
307;;
308handle_comm:
309 move.d internal_stack+1020, $sp ; Use the internal stack which grows upwards
310 jsr handle_exception ; Interactive routine
311 nop
312
313;;
314;; Return to the caller
315;;
316return:
317
318;; First of all, write the support registers.
319 clear.d $r1 ; Bank counter
320 move.d sreg, $acr
321
322;; Bank 0
323 move $r1, $srs
324 nop
325 nop
326 nop
327 move.d [$acr], $r0
328 move $r0, $s0
329 addq 4, $acr
330 move.d [$acr], $r0
331 move $r0, $s1
332 addq 4, $acr
333 move.d [$acr], $r0
334 move $r0, $s2
335 addq 4, $acr
336 move.d [$acr], $r0
337 move $r0, $s3
338 addq 4, $acr
339 move.d [$acr], $r0
340 move $r0, $s4
341 addq 4, $acr
342 move.d [$acr], $r0
343 move $r0, $s5
344 addq 4, $acr
345
346;; Nothing in S6 - S7, bank 0.
347 addq 4, $acr
348 addq 4, $acr
349
350 move.d [$acr], $r0
351 move $r0, $s8
352 addq 4, $acr
353 move.d [$acr], $r0
354 move $r0, $s9
355 addq 4, $acr
356 move.d [$acr], $r0
357 move $r0, $s10
358 addq 4, $acr
359 move.d [$acr], $r0
360 move $r0, $s11
361 addq 4, $acr
362 move.d [$acr], $r0
363 move $r0, $s12
364 addq 4, $acr
365
366;; Nothing in S13 - S15, bank 0
367 addq 4, $acr
368 addq 4, $acr
369 addq 4, $acr
370
371;; Bank 1 and bank 2 have the same layout, hence the loop.
372 addq 1, $r1
3732:
374 move $r1, $srs
375 nop
376 nop
377 nop
378 move.d [$acr], $r0
379 move $r0, $s0
380 addq 4, $acr
381 move.d [$acr], $r0
382 move $r0, $s1
383 addq 4, $acr
384 move.d [$acr], $r0
385 move $r0, $s2
386 addq 4, $acr
387
388;; S3 (MM_CAUSE) is read-only.
389 addq 4, $acr
390
391 move.d [$acr], $r0
392 move $r0, $s4
393 addq 4, $acr
394
395;; FIXME: Actually write S5/S6? (Affects MM_CAUSE.)
396 addq 4, $acr
397 addq 4, $acr
398
399;; Nothing in S7 - S15, bank 1 and 2
400 addq 4, $acr
401 addq 4, $acr
402 addq 4, $acr
403 addq 4, $acr
404 addq 4, $acr
405 addq 4, $acr
406 addq 4, $acr
407 addq 4, $acr
408 addq 4, $acr
409
410 addq 1, $r1
411 cmpq 3, $r1
412 bne 2b
413 nop
414
415;; Bank 3
416 move $r1, $srs
417 nop
418 nop
419 nop
420 move.d [$acr], $r0
421 move $r0, $s0
422 addq 4, $acr
423 move.d [$acr], $r0
424 move $r0, $s1
425 addq 4, $acr
426 move.d [$acr], $r0
427 move $r0, $s2
428 addq 4, $acr
429 move.d [$acr], $r0
430 move $r0, $s3
431 addq 4, $acr
432 move.d [$acr], $r0
433 move $r0, $s4
434 addq 4, $acr
435 move.d [$acr], $r0
436 move $r0, $s5
437 addq 4, $acr
438 move.d [$acr], $r0
439 move $r0, $s6
440 addq 4, $acr
441 move.d [$acr], $r0
442 move $r0, $s7
443 addq 4, $acr
444 move.d [$acr], $r0
445 move $r0, $s8
446 addq 4, $acr
447 move.d [$acr], $r0
448 move $r0, $s9
449 addq 4, $acr
450 move.d [$acr], $r0
451 move $r0, $s10
452 addq 4, $acr
453 move.d [$acr], $r0
454 move $r0, $s11
455 addq 4, $acr
456 move.d [$acr], $r0
457 move $r0, $s12
458 addq 4, $acr
459 move.d [$acr], $r0
460 move $r0, $s13
461 addq 4, $acr
462 move.d [$acr], $r0
463 move $r0, $s14
464 addq 4, $acr
465
466;; Nothing in S15, bank 3
467 addq 4, $acr
468
469;; Now, move on to the regular register restoration process.
470
471 move.d reg, $acr ; Reset ACR to point at the beginning of the register image
472 move.d [$acr], $r0 ; Restore R0
473 addq 4, $acr
474 move.d [$acr], $r1 ; Restore R1
475 addq 4, $acr
476 move.d [$acr], $r2 ; Restore R2
477 addq 4, $acr
478 move.d [$acr], $r3 ; Restore R3
479 addq 4, $acr
480 move.d [$acr], $r4 ; Restore R4
481 addq 4, $acr
482 move.d [$acr], $r5 ; Restore R5
483 addq 4, $acr
484 move.d [$acr], $r6 ; Restore R6
485 addq 4, $acr
486 move.d [$acr], $r7 ; Restore R7
487 addq 4, $acr
488 move.d [$acr], $r8 ; Restore R8
489 addq 4, $acr
490 move.d [$acr], $r9 ; Restore R9
491 addq 4, $acr
492 move.d [$acr], $r10 ; Restore R10
493 addq 4, $acr
494 move.d [$acr], $r11 ; Restore R11
495 addq 4, $acr
496 move.d [$acr], $r12 ; Restore R12
497 addq 4, $acr
498 move.d [$acr], $r13 ; Restore R13
499
500;;
501;; We restore all registers, even though some of them probably haven't changed.
502;;
503
504 addq 4, $acr
505 move.d [$acr], $sp ; Restore SP (R14)
506
507 ;; ACR cannot be restored just yet.
508 addq 8, $acr
509
510 ;; Skip BZ, VR.
511 addq 2, $acr
512
513 move [$acr], $pid ; Restore PID
514 addq 4, $acr
515 move [$acr], $srs ; Restore SRS
516 nop
517 nop
518 nop
519 addq 1, $acr
520
521 ;; Skip WZ.
522 addq 2, $acr
523
524 move [$acr], $exs ; Restore EXS.
525 addq 4, $acr
526 move [$acr], $eda ; Restore EDA.
527 addq 4, $acr
528 move [$acr], $mof ; Restore MOF.
529
530 ;; Skip DZ.
531 addq 8, $acr
532
533 move [$acr], $ebp ; Restore EBP.
534 addq 4, $acr
535 move [$acr], $erp ; Restore ERP.
536 addq 4, $acr
537 move [$acr], $srp ; Restore SRP.
538 addq 4, $acr
539 move [$acr], $nrp ; Restore NRP.
540 addq 4, $acr
541 move [$acr], $ccs ; Restore CCS like an ordinary register.
542 addq 4, $acr
543 move [$acr], $usp ; Restore USP
544 addq 4, $acr
545 move [$acr], $spc ; Restore SPC
546 ; No restoration of pseudo-PC of course.
547
548 move.d reg, $acr ; Reset ACR to point at the beginning of the register image
549 add.d 15*4, $acr
550 move.d [$acr], $acr ; Finally, restore ACR.
551 rete ; Same as jump ERP
552 rfe ; Shifts CCS
diff --git a/arch/cris/arch-v32/kernel/pinmux.c b/arch/cris/arch-v32/kernel/pinmux.c
new file mode 100644
index 000000000000..a2b8aa37c1bf
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/pinmux.c
@@ -0,0 +1,229 @@
1/*
2 * Allocator for I/O pins. All pins are allocated to GPIO at bootup.
3 * Unassigned pins and GPIO pins can be allocated to a fixed interface
4 * or the I/O processor instead.
5 *
6 * Copyright (c) 2004 Axis Communications AB.
7 */
8
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/spinlock.h>
14#include <asm/arch/hwregs/reg_map.h>
15#include <asm/arch/hwregs/reg_rdwr.h>
16#include <asm/arch/pinmux.h>
17#include <asm/arch/hwregs/pinmux_defs.h>
18
19#undef DEBUG
20
21#define PORT_PINS 18
22#define PORTS 4
23
24static char pins[PORTS][PORT_PINS];
25static DEFINE_SPINLOCK(pinmux_lock);
26
27static void crisv32_pinmux_set(int port);
28
29int
30crisv32_pinmux_init(void)
31{
32 static int initialized = 0;
33
34 if (!initialized) {
35 reg_pinmux_rw_pa pa = REG_RD(pinmux, regi_pinmux, rw_pa);
36 initialized = 1;
37 pa.pa0 = pa.pa1 = pa.pa2 = pa.pa3 =
38 pa.pa4 = pa.pa5 = pa.pa6 = pa.pa7 = regk_pinmux_yes;
39 REG_WR(pinmux, regi_pinmux, rw_pa, pa);
40 crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio);
41 crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio);
42 crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio);
43 crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio);
44 }
45
46 return 0;
47}
48
49int
50crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
51{
52 int i;
53 unsigned long flags;
54
55 crisv32_pinmux_init();
56
57 if (port > PORTS)
58 return -EINVAL;
59
60 spin_lock_irqsave(&pinmux_lock, flags);
61
62 for (i = first_pin; i <= last_pin; i++)
63 {
64 if ((pins[port][i] != pinmux_none) && (pins[port][i] != pinmux_gpio) &&
65 (pins[port][i] != mode))
66 {
67 spin_unlock_irqrestore(&pinmux_lock, flags);
68#ifdef DEBUG
69 panic("Pinmux alloc failed!\n");
70#endif
71 return -EPERM;
72 }
73 }
74
75 for (i = first_pin; i <= last_pin; i++)
76 pins[port][i] = mode;
77
78 crisv32_pinmux_set(port);
79
80 spin_unlock_irqrestore(&pinmux_lock, flags);
81
82 return 0;
83}
84
85int
86crisv32_pinmux_alloc_fixed(enum fixed_function function)
87{
88 int ret = -EINVAL;
89 char saved[sizeof pins];
90 unsigned long flags;
91
92 spin_lock_irqsave(&pinmux_lock, flags);
93
94 /* Save internal data for recovery */
95 memcpy(saved, pins, sizeof pins);
96
97 reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
98
99 switch(function)
100 {
101 case pinmux_ser1:
102 ret = crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed);
103 hwprot.ser1 = regk_pinmux_yes;
104 break;
105 case pinmux_ser2:
106 ret = crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed);
107 hwprot.ser2 = regk_pinmux_yes;
108 break;
109 case pinmux_ser3:
110 ret = crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed);
111 hwprot.ser3 = regk_pinmux_yes;
112 break;
113 case pinmux_sser0:
114 ret = crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed);
115 ret |= crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
116 hwprot.sser0 = regk_pinmux_yes;
117 break;
118 case pinmux_sser1:
119 ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
120 hwprot.sser1 = regk_pinmux_yes;
121 break;
122 case pinmux_ata0:
123 ret = crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed);
124 ret |= crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed);
125 hwprot.ata0 = regk_pinmux_yes;
126 break;
127 case pinmux_ata1:
128 ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
129 ret |= crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed);
130 hwprot.ata1 = regk_pinmux_yes;
131 break;
132 case pinmux_ata2:
133 ret = crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed);
134 ret |= crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed);
135 hwprot.ata2 = regk_pinmux_yes;
136 break;
137 case pinmux_ata3:
138 ret = crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed);
139 ret |= crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed);
140 hwprot.ata2 = regk_pinmux_yes;
141 break;
142 case pinmux_ata:
143 ret = crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed);
144 ret |= crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed);
145 hwprot.ata = regk_pinmux_yes;
146 break;
147 case pinmux_eth1:
148 ret = crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed);
149 hwprot.eth1 = regk_pinmux_yes;
150 hwprot.eth1_mgm = regk_pinmux_yes;
151 break;
152 case pinmux_timer:
153 ret = crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
154 hwprot.timer = regk_pinmux_yes;
155 spin_unlock_irqrestore(&pinmux_lock, flags);
156 return ret;
157 }
158
159 if (!ret)
160 REG_WR(pinmux, regi_pinmux, rw_hwprot, hwprot);
161 else
162 memcpy(pins, saved, sizeof pins);
163
164 spin_unlock_irqrestore(&pinmux_lock, flags);
165
166 return ret;
167}
168
169void
170crisv32_pinmux_set(int port)
171{
172 int i;
173 int gpio_val = 0;
174 int iop_val = 0;
175
176 for (i = 0; i < PORT_PINS; i++)
177 {
178 if (pins[port][i] == pinmux_gpio)
179 gpio_val |= (1 << i);
180 else if (pins[port][i] == pinmux_iop)
181 iop_val |= (1 << i);
182 }
183
184 REG_WRITE(int, regi_pinmux + REG_RD_ADDR_pinmux_rw_pb_gio + 8*port, gpio_val);
185 REG_WRITE(int, regi_pinmux + REG_RD_ADDR_pinmux_rw_pb_iop + 8*port, iop_val);
186
187#ifdef DEBUG
188 crisv32_pinmux_dump();
189#endif
190}
191
192int
193crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
194{
195 int i;
196 unsigned long flags;
197
198 crisv32_pinmux_init();
199
200 if (port > PORTS)
201 return -EINVAL;
202
203 spin_lock_irqsave(&pinmux_lock, flags);
204
205 for (i = first_pin; i <= last_pin; i++)
206 pins[port][i] = pinmux_none;
207
208 crisv32_pinmux_set(port);
209 spin_unlock_irqrestore(&pinmux_lock, flags);
210
211 return 0;
212}
213
214void
215crisv32_pinmux_dump(void)
216{
217 int i, j;
218
219 crisv32_pinmux_init();
220
221 for (i = 0; i < PORTS; i++)
222 {
223 printk("Port %c\n", 'B'+i);
224 for (j = 0; j < PORT_PINS; j++)
225 printk(" Pin %d = %d\n", j, pins[i][j]);
226 }
227}
228
229__initcall(crisv32_pinmux_init);
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
new file mode 100644
index 000000000000..882be42114f7
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -0,0 +1,270 @@
1/*
2 * Copyright (C) 2000-2003 Axis Communications AB
3 *
4 * Authors: Bjorn Wesen (bjornw@axis.com)
5 * Mikael Starvik (starvik@axis.com)
6 * Tobias Anderberg (tobiasa@axis.com), CRISv32 port.
7 *
8 * This file handles the architecture-dependent parts of process handling..
9 */
10
11#include <linux/config.h>
12#include <linux/sched.h>
13#include <linux/err.h>
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <asm/arch/hwregs/reg_rdwr.h>
17#include <asm/arch/hwregs/reg_map.h>
18#include <asm/arch/hwregs/timer_defs.h>
19#include <asm/arch/hwregs/intr_vect_defs.h>
20
21extern void stop_watchdog(void);
22
23#ifdef CONFIG_ETRAX_GPIO
24extern void etrax_gpio_wake_up_check(void); /* Defined in drivers/gpio.c. */
25#endif
26
27extern int cris_hlt_counter;
28
29/* We use this if we don't have any better idle routine. */
30void default_idle(void)
31{
32 local_irq_disable();
33 if (!need_resched() && !cris_hlt_counter) {
34 /* Halt until exception. */
35 __asm__ volatile("ei \n\t"
36 "halt ");
37 }
38 local_irq_enable();
39}
40
41/*
42 * Free current thread data structures etc..
43 */
44
45extern void deconfigure_bp(long pid);
46void exit_thread(void)
47{
48 deconfigure_bp(current->pid);
49}
50
51/*
52 * If the watchdog is enabled, disable interrupts and enter an infinite loop.
53 * The watchdog will reset the CPU after 0.1s. If the watchdog isn't enabled
54 * then enable it and wait.
55 */
56extern void arch_enable_nmi(void);
57
58void
59hard_reset_now(void)
60{
61 /*
62 * Don't declare this variable elsewhere. We don't want any other
63 * code to know about it than the watchdog handler in entry.S and
64 * this code, implementing hard reset through the watchdog.
65 */
66#if defined(CONFIG_ETRAX_WATCHDOG)
67 extern int cause_of_death;
68#endif
69
70 printk("*** HARD RESET ***\n");
71 local_irq_disable();
72
73#if defined(CONFIG_ETRAX_WATCHDOG)
74 cause_of_death = 0xbedead;
75#else
76{
77 reg_timer_rw_wd_ctrl wd_ctrl = {0};
78
79 stop_watchdog();
80
81 wd_ctrl.key = 16; /* Arbitrary key. */
82 wd_ctrl.cnt = 1; /* Minimum time. */
83 wd_ctrl.cmd = regk_timer_start;
84
85 arch_enable_nmi();
86 REG_WR(timer, regi_timer, rw_wd_ctrl, wd_ctrl);
87}
88#endif
89
90 while (1)
91 ; /* Wait for reset. */
92}
93
94/*
95 * Return saved PC of a blocked thread.
96 */
97unsigned long thread_saved_pc(struct task_struct *t)
98{
99 return (unsigned long)user_regs(t->thread_info)->erp;
100}
101
102static void
103kernel_thread_helper(void* dummy, int (*fn)(void *), void * arg)
104{
105 fn(arg);
106 do_exit(-1); /* Should never be called, return bad exit value. */
107}
108
109/* Create a kernel thread. */
110int
111kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
112{
113 struct pt_regs regs;
114
115 memset(&regs, 0, sizeof(regs));
116
117 /* Don't use r10 since that is set to 0 in copy_thread. */
118 regs.r11 = (unsigned long) fn;
119 regs.r12 = (unsigned long) arg;
120 regs.erp = (unsigned long) kernel_thread_helper;
121 regs.ccs = 1 << (I_CCS_BITNR + CCS_SHIFT);
122
123 /* Create the new process. */
124 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
125}
126
127/*
128 * Setup the child's kernel stack with a pt_regs and call switch_stack() on it.
129 * It will be unnested during _resume and _ret_from_sys_call when the new thread
130 * is scheduled.
131 *
132 * Also setup the thread switching structure which is used to keep
133 * thread-specific data during _resumes.
134 */
135
136extern asmlinkage void ret_from_fork(void);
137
138int
139copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
140 unsigned long unused,
141 struct task_struct *p, struct pt_regs *regs)
142{
143 struct pt_regs *childregs;
144 struct switch_stack *swstack;
145
146 /*
147 * Put the pt_regs structure at the end of the new kernel stack page and
148 * fix it up. Note: the task_struct doubles as the kernel stack for the
149 * task.
150 */
151 childregs = user_regs(p->thread_info);
152 *childregs = *regs; /* Struct copy of pt_regs. */
153 p->set_child_tid = p->clear_child_tid = NULL;
154 childregs->r10 = 0; /* Child returns 0 after a fork/clone. */
155
156 /* Set a new TLS ?
157 * The TLS is in $mof beacuse it is the 5th argument to sys_clone.
158 */
159 if (p->mm && (clone_flags & CLONE_SETTLS)) {
160 p->thread_info->tls = regs->mof;
161 }
162
163 /* Put the switch stack right below the pt_regs. */
164 swstack = ((struct switch_stack *) childregs) - 1;
165
166 /* Paramater to ret_from_sys_call. 0 is don't restart the syscall. */
167 swstack->r9 = 0;
168
169 /*
170 * We want to return into ret_from_sys_call after the _resume.
171 * ret_from_fork will call ret_from_sys_call.
172 */
173 swstack->return_ip = (unsigned long) ret_from_fork;
174
175 /* Fix the user-mode and kernel-mode stackpointer. */
176 p->thread.usp = usp;
177 p->thread.ksp = (unsigned long) swstack;
178
179 return 0;
180}
181
182/*
183 * Be aware of the "magic" 7th argument in the four system-calls below.
184 * They need the latest stackframe, which is put as the 7th argument by
185 * entry.S. The previous arguments are dummies or actually used, but need
186 * to be defined to reach the 7th argument.
187 *
188 * N.B.: Another method to get the stackframe is to use current_regs(). But
189 * it returns the latest stack-frame stacked when going from _user mode_ and
190 * some of these (at least sys_clone) are called from kernel-mode sometimes
191 * (for example during kernel_thread, above) and thus cannot use it. Thus,
192 * to be sure not to get any surprises, we use the method for the other calls
193 * as well.
194 */
195asmlinkage int
196sys_fork(long r10, long r11, long r12, long r13, long mof, long srp,
197 struct pt_regs *regs)
198{
199 return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL);
200}
201
202/* FIXME: Is parent_tid/child_tid really third/fourth argument? Update lib? */
203asmlinkage int
204sys_clone(unsigned long newusp, unsigned long flags, int *parent_tid, int *child_tid,
205 unsigned long tls, long srp, struct pt_regs *regs)
206{
207 if (!newusp)
208 newusp = rdusp();
209
210 return do_fork(flags, newusp, regs, 0, parent_tid, child_tid);
211}
212
213/*
214 * vfork is a system call in i386 because of register-pressure - maybe
215 * we can remove it and handle it in libc but we put it here until then.
216 */
217asmlinkage int
218sys_vfork(long r10, long r11, long r12, long r13, long mof, long srp,
219 struct pt_regs *regs)
220{
221 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL);
222}
223
224/* sys_execve() executes a new program. */
225asmlinkage int
226sys_execve(const char *fname, char **argv, char **envp, long r13, long mof, long srp,
227 struct pt_regs *regs)
228{
229 int error;
230 char *filename;
231
232 filename = getname(fname);
233 error = PTR_ERR(filename);
234
235 if (IS_ERR(filename))
236 goto out;
237
238 error = do_execve(filename, argv, envp, regs);
239 putname(filename);
240 out:
241 return error;
242}
243
244unsigned long
245get_wchan(struct task_struct *p)
246{
247 /* TODO */
248 return 0;
249}
250#undef last_sched
251#undef first_sched
252
253void show_regs(struct pt_regs * regs)
254{
255 unsigned long usp = rdusp();
256 printk("ERP: %08lx SRP: %08lx CCS: %08lx USP: %08lx MOF: %08lx\n",
257 regs->erp, regs->srp, regs->ccs, usp, regs->mof);
258
259 printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n",
260 regs->r0, regs->r1, regs->r2, regs->r3);
261
262 printk(" r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n",
263 regs->r4, regs->r5, regs->r6, regs->r7);
264
265 printk(" r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n",
266 regs->r8, regs->r9, regs->r10, regs->r11);
267
268 printk("r12: %08lx r13: %08lx oR10: %08lx\n",
269 regs->r12, regs->r13, regs->orig_r10);
270}
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
new file mode 100644
index 000000000000..208489da2a87
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -0,0 +1,597 @@
1/*
2 * Copyright (C) 2000-2003, Axis Communications AB.
3 */
4
5#include <linux/kernel.h>
6#include <linux/sched.h>
7#include <linux/mm.h>
8#include <linux/smp.h>
9#include <linux/smp_lock.h>
10#include <linux/errno.h>
11#include <linux/ptrace.h>
12#include <linux/user.h>
13#include <linux/signal.h>
14#include <linux/security.h>
15
16#include <asm/uaccess.h>
17#include <asm/page.h>
18#include <asm/pgtable.h>
19#include <asm/system.h>
20#include <asm/processor.h>
21#include <asm/arch/hwregs/supp_reg.h>
22
23/*
24 * Determines which bits in CCS the user has access to.
25 * 1 = access, 0 = no access.
26 */
27#define CCS_MASK 0x00087c00 /* SXNZVC */
28
29#define SBIT_USER (1 << (S_CCS_BITNR + CCS_SHIFT))
30
31static int put_debugreg(long pid, unsigned int regno, long data);
32static long get_debugreg(long pid, unsigned int regno);
33static unsigned long get_pseudo_pc(struct task_struct *child);
34void deconfigure_bp(long pid);
35
36extern unsigned long cris_signal_return_page;
37
38/*
39 * Get contents of register REGNO in task TASK.
40 */
41long get_reg(struct task_struct *task, unsigned int regno)
42{
43 /* USP is a special case, it's not in the pt_regs struct but
44 * in the tasks thread struct
45 */
46 unsigned long ret;
47
48 if (regno <= PT_EDA)
49 ret = ((unsigned long *)user_regs(task->thread_info))[regno];
50 else if (regno == PT_USP)
51 ret = task->thread.usp;
52 else if (regno == PT_PPC)
53 ret = get_pseudo_pc(task);
54 else if (regno <= PT_MAX)
55 ret = get_debugreg(task->pid, regno);
56 else
57 ret = 0;
58
59 return ret;
60}
61
62/*
63 * Write contents of register REGNO in task TASK.
64 */
65int put_reg(struct task_struct *task, unsigned int regno, unsigned long data)
66{
67 if (regno <= PT_EDA)
68 ((unsigned long *)user_regs(task->thread_info))[regno] = data;
69 else if (regno == PT_USP)
70 task->thread.usp = data;
71 else if (regno == PT_PPC) {
72 /* Write pseudo-PC to ERP only if changed. */
73 if (data != get_pseudo_pc(task))
74 ((unsigned long *)user_regs(task->thread_info))[PT_ERP] = data;
75 } else if (regno <= PT_MAX)
76 return put_debugreg(task->pid, regno, data);
77 else
78 return -1;
79 return 0;
80}
81
82/*
83 * Called by kernel/ptrace.c when detaching.
84 *
85 * Make sure the single step bit is not set.
86 */
87void
88ptrace_disable(struct task_struct *child)
89{
90 unsigned long tmp;
91
92 /* Deconfigure SPC and S-bit. */
93 tmp = get_reg(child, PT_CCS) & ~SBIT_USER;
94 put_reg(child, PT_CCS, tmp);
95 put_reg(child, PT_SPC, 0);
96
97 /* Deconfigure any watchpoints associated with the child. */
98 deconfigure_bp(child->pid);
99}
100
101
102asmlinkage int
103sys_ptrace(long request, long pid, long addr, long data)
104{
105 struct task_struct *child;
106 int ret;
107 unsigned long __user *datap = (unsigned long __user *)data;
108
109 lock_kernel();
110 ret = -EPERM;
111
112 if (request == PTRACE_TRACEME) {
113 /* are we already being traced? */
114 if (current->ptrace & PT_PTRACED)
115 goto out;
116 ret = security_ptrace(current->parent, current);
117 if (ret)
118 goto out;
119 /* set the ptrace bit in the process flags. */
120 current->ptrace |= PT_PTRACED;
121 ret = 0;
122 goto out;
123 }
124
125 ret = -ESRCH;
126 read_lock(&tasklist_lock);
127 child = find_task_by_pid(pid);
128
129 if (child)
130 get_task_struct(child);
131
132 read_unlock(&tasklist_lock);
133
134 if (!child)
135 goto out;
136
137 ret = -EPERM;
138
139 if (pid == 1) /* Leave the init process alone! */
140 goto out_tsk;
141
142 if (request == PTRACE_ATTACH) {
143 ret = ptrace_attach(child);
144 goto out_tsk;
145 }
146
147 ret = ptrace_check_attach(child, request == PTRACE_KILL);
148 if (ret < 0)
149 goto out_tsk;
150
151 switch (request) {
152 /* Read word at location address. */
153 case PTRACE_PEEKTEXT:
154 case PTRACE_PEEKDATA: {
155 unsigned long tmp;
156 int copied;
157
158 ret = -EIO;
159
160 /* The signal trampoline page is outside the normal user-addressable
161 * space but still accessible. This is hack to make it possible to
162 * access the signal handler code in GDB.
163 */
164 if ((addr & PAGE_MASK) == cris_signal_return_page) {
165 /* The trampoline page is globally mapped, no page table to traverse.*/
166 tmp = *(unsigned long*)addr;
167 } else {
168 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
169
170 if (copied != sizeof(tmp))
171 break;
172 }
173
174 ret = put_user(tmp,datap);
175 break;
176 }
177
178 /* Read the word at location address in the USER area. */
179 case PTRACE_PEEKUSR: {
180 unsigned long tmp;
181
182 ret = -EIO;
183 if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
184 break;
185
186 tmp = get_reg(child, addr >> 2);
187 ret = put_user(tmp, datap);
188 break;
189 }
190
191 /* Write the word at location address. */
192 case PTRACE_POKETEXT:
193 case PTRACE_POKEDATA:
194 ret = 0;
195
196 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
197 break;
198
199 ret = -EIO;
200 break;
201
202 /* Write the word at location address in the USER area. */
203 case PTRACE_POKEUSR:
204 ret = -EIO;
205 if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
206 break;
207
208 addr >>= 2;
209
210 if (addr == PT_CCS) {
211 /* don't allow the tracing process to change stuff like
212 * interrupt enable, kernel/user bit, dma enables etc.
213 */
214 data &= CCS_MASK;
215 data |= get_reg(child, PT_CCS) & ~CCS_MASK;
216 }
217 if (put_reg(child, addr, data))
218 break;
219 ret = 0;
220 break;
221
222 case PTRACE_SYSCALL:
223 case PTRACE_CONT:
224 ret = -EIO;
225
226 if (!valid_signal(data))
227 break;
228
229 /* Continue means no single-step. */
230 put_reg(child, PT_SPC, 0);
231
232 if (!get_debugreg(child->pid, PT_BP_CTRL)) {
233 unsigned long tmp;
234 /* If no h/w bp configured, disable S bit. */
235 tmp = get_reg(child, PT_CCS) & ~SBIT_USER;
236 put_reg(child, PT_CCS, tmp);
237 }
238
239 if (request == PTRACE_SYSCALL) {
240 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
241 }
242 else {
243 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
244 }
245
246 child->exit_code = data;
247
248 /* TODO: make sure any pending breakpoint is killed */
249 wake_up_process(child);
250 ret = 0;
251
252 break;
253
254 /* Make the child exit by sending it a sigkill. */
255 case PTRACE_KILL:
256 ret = 0;
257
258 if (child->exit_state == EXIT_ZOMBIE)
259 break;
260
261 child->exit_code = SIGKILL;
262
263 /* Deconfigure single-step and h/w bp. */
264 ptrace_disable(child);
265
266 /* TODO: make sure any pending breakpoint is killed */
267 wake_up_process(child);
268 break;
269
270 /* Set the trap flag. */
271 case PTRACE_SINGLESTEP: {
272 unsigned long tmp;
273 ret = -EIO;
274
275 /* Set up SPC if not set already (in which case we have
276 no other choice but to trust it). */
277 if (!get_reg(child, PT_SPC)) {
278 /* In case we're stopped in a delay slot. */
279 tmp = get_reg(child, PT_ERP) & ~1;
280 put_reg(child, PT_SPC, tmp);
281 }
282 tmp = get_reg(child, PT_CCS) | SBIT_USER;
283 put_reg(child, PT_CCS, tmp);
284
285 if (!valid_signal(data))
286 break;
287
288 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
289
290 /* TODO: set some clever breakpoint mechanism... */
291
292 child->exit_code = data;
293 wake_up_process(child);
294 ret = 0;
295 break;
296
297 }
298 case PTRACE_DETACH:
299 ret = ptrace_detach(child, data);
300 break;
301
302 /* Get all GP registers from the child. */
303 case PTRACE_GETREGS: {
304 int i;
305 unsigned long tmp;
306
307 for (i = 0; i <= PT_MAX; i++) {
308 tmp = get_reg(child, i);
309
310 if (put_user(tmp, datap)) {
311 ret = -EFAULT;
312 goto out_tsk;
313 }
314
315 datap++;
316 }
317
318 ret = 0;
319 break;
320 }
321
322 /* Set all GP registers in the child. */
323 case PTRACE_SETREGS: {
324 int i;
325 unsigned long tmp;
326
327 for (i = 0; i <= PT_MAX; i++) {
328 if (get_user(tmp, datap)) {
329 ret = -EFAULT;
330 goto out_tsk;
331 }
332
333 if (i == PT_CCS) {
334 tmp &= CCS_MASK;
335 tmp |= get_reg(child, PT_CCS) & ~CCS_MASK;
336 }
337
338 put_reg(child, i, tmp);
339 datap++;
340 }
341
342 ret = 0;
343 break;
344 }
345
346 default:
347 ret = ptrace_request(child, request, addr, data);
348 break;
349 }
350out_tsk:
351 put_task_struct(child);
352out:
353 unlock_kernel();
354 return ret;
355}
356
357void do_syscall_trace(void)
358{
359 if (!test_thread_flag(TIF_SYSCALL_TRACE))
360 return;
361
362 if (!(current->ptrace & PT_PTRACED))
363 return;
364
365 /* the 0x80 provides a way for the tracing parent to distinguish
366 between a syscall stop and SIGTRAP delivery */
367 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
368 ? 0x80 : 0));
369
370 /*
371 * This isn't the same as continuing with a signal, but it will do for
372 * normal use.
373 */
374 if (current->exit_code) {
375 send_sig(current->exit_code, current, 1);
376 current->exit_code = 0;
377 }
378}
379
380/* Returns the size of an instruction that has a delay slot. */
381
382static int insn_size(struct task_struct *child, unsigned long pc)
383{
384 unsigned long opcode;
385 int copied;
386 int opsize = 0;
387
388 /* Read the opcode at pc (do what PTRACE_PEEKTEXT would do). */
389 copied = access_process_vm(child, pc, &opcode, sizeof(opcode), 0);
390 if (copied != sizeof(opcode))
391 return 0;
392
393 switch ((opcode & 0x0f00) >> 8) {
394 case 0x0:
395 case 0x9:
396 case 0xb:
397 opsize = 2;
398 break;
399 case 0xe:
400 case 0xf:
401 opsize = 6;
402 break;
403 case 0xd:
404 /* Could be 4 or 6; check more bits. */
405 if ((opcode & 0xff) == 0xff)
406 opsize = 4;
407 else
408 opsize = 6;
409 break;
410 default:
411 panic("ERROR: Couldn't find size of opcode 0x%lx at 0x%lx\n",
412 opcode, pc);
413 }
414
415 return opsize;
416}
417
418static unsigned long get_pseudo_pc(struct task_struct *child)
419{
420 /* Default value for PC is ERP. */
421 unsigned long pc = get_reg(child, PT_ERP);
422
423 if (pc & 0x1) {
424 unsigned long spc = get_reg(child, PT_SPC);
425 /* Delay slot bit set. Report as stopped on proper
426 instruction. */
427 if (spc) {
428 /* Rely on SPC if set. FIXME: We might want to check
429 that EXS indicates we stopped due to a single-step
430 exception. */
431 pc = spc;
432 } else {
433 /* Calculate the PC from the size of the instruction
434 that the delay slot we're in belongs to. */
435 pc += insn_size(child, pc & ~1) - 1;
436 }
437 }
438 return pc;
439}
440
441static long bp_owner = 0;
442
443/* Reachable from exit_thread in signal.c, so not static. */
444void deconfigure_bp(long pid)
445{
446 int bp;
447
448 /* Only deconfigure if the pid is the owner. */
449 if (bp_owner != pid)
450 return;
451
452 for (bp = 0; bp < 6; bp++) {
453 unsigned long tmp;
454 /* Deconfigure start and end address (also gets rid of ownership). */
455 put_debugreg(pid, PT_BP + 3 + (bp * 2), 0);
456 put_debugreg(pid, PT_BP + 4 + (bp * 2), 0);
457
458 /* Deconfigure relevant bits in control register. */
459 tmp = get_debugreg(pid, PT_BP_CTRL) & ~(3 << (2 + (bp * 4)));
460 put_debugreg(pid, PT_BP_CTRL, tmp);
461 }
462 /* No owner now. */
463 bp_owner = 0;
464}
465
466static int put_debugreg(long pid, unsigned int regno, long data)
467{
468 int ret = 0;
469 register int old_srs;
470
471#ifdef CONFIG_ETRAX_KGDB
472 /* Ignore write, but pretend it was ok if value is 0
473 (we don't want POKEUSR/SETREGS failing unnessecarily). */
474 return (data == 0) ? ret : -1;
475#endif
476
477 /* Simple owner management. */
478 if (!bp_owner)
479 bp_owner = pid;
480 else if (bp_owner != pid) {
481 /* Ignore write, but pretend it was ok if value is 0
482 (we don't want POKEUSR/SETREGS failing unnessecarily). */
483 return (data == 0) ? ret : -1;
484 }
485
486 /* Remember old SRS. */
487 SPEC_REG_RD(SPEC_REG_SRS, old_srs);
488 /* Switch to BP bank. */
489 SUPP_BANK_SEL(BANK_BP);
490
491 switch (regno - PT_BP) {
492 case 0:
493 SUPP_REG_WR(0, data); break;
494 case 1:
495 case 2:
496 if (data)
497 ret = -1;
498 break;
499 case 3:
500 SUPP_REG_WR(3, data); break;
501 case 4:
502 SUPP_REG_WR(4, data); break;
503 case 5:
504 SUPP_REG_WR(5, data); break;
505 case 6:
506 SUPP_REG_WR(6, data); break;
507 case 7:
508 SUPP_REG_WR(7, data); break;
509 case 8:
510 SUPP_REG_WR(8, data); break;
511 case 9:
512 SUPP_REG_WR(9, data); break;
513 case 10:
514 SUPP_REG_WR(10, data); break;
515 case 11:
516 SUPP_REG_WR(11, data); break;
517 case 12:
518 SUPP_REG_WR(12, data); break;
519 case 13:
520 SUPP_REG_WR(13, data); break;
521 case 14:
522 SUPP_REG_WR(14, data); break;
523 default:
524 ret = -1;
525 break;
526 }
527
528 /* Restore SRS. */
529 SPEC_REG_WR(SPEC_REG_SRS, old_srs);
530 /* Just for show. */
531 NOP();
532 NOP();
533 NOP();
534
535 return ret;
536}
537
538static long get_debugreg(long pid, unsigned int regno)
539{
540 register int old_srs;
541 register long data;
542
543 if (pid != bp_owner) {
544 return 0;
545 }
546
547 /* Remember old SRS. */
548 SPEC_REG_RD(SPEC_REG_SRS, old_srs);
549 /* Switch to BP bank. */
550 SUPP_BANK_SEL(BANK_BP);
551
552 switch (regno - PT_BP) {
553 case 0:
554 SUPP_REG_RD(0, data); break;
555 case 1:
556 case 2:
557 /* error return value? */
558 data = 0;
559 break;
560 case 3:
561 SUPP_REG_RD(3, data); break;
562 case 4:
563 SUPP_REG_RD(4, data); break;
564 case 5:
565 SUPP_REG_RD(5, data); break;
566 case 6:
567 SUPP_REG_RD(6, data); break;
568 case 7:
569 SUPP_REG_RD(7, data); break;
570 case 8:
571 SUPP_REG_RD(8, data); break;
572 case 9:
573 SUPP_REG_RD(9, data); break;
574 case 10:
575 SUPP_REG_RD(10, data); break;
576 case 11:
577 SUPP_REG_RD(11, data); break;
578 case 12:
579 SUPP_REG_RD(12, data); break;
580 case 13:
581 SUPP_REG_RD(13, data); break;
582 case 14:
583 SUPP_REG_RD(14, data); break;
584 default:
585 /* error return value? */
586 data = 0;
587 }
588
589 /* Restore SRS. */
590 SPEC_REG_WR(SPEC_REG_SRS, old_srs);
591 /* Just for show. */
592 NOP();
593 NOP();
594 NOP();
595
596 return data;
597}
diff --git a/arch/cris/arch-v32/kernel/setup.c b/arch/cris/arch-v32/kernel/setup.c
new file mode 100644
index 000000000000..b17a39a2e164
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/setup.c
@@ -0,0 +1,118 @@
1/*
2 * Display CPU info in /proc/cpuinfo.
3 *
4 * Copyright (C) 2003, Axis Communications AB.
5 */
6
7#include <linux/config.h>
8#include <linux/seq_file.h>
9#include <linux/proc_fs.h>
10#include <linux/delay.h>
11#include <linux/param.h>
12
13#ifdef CONFIG_PROC_FS
14
15#define HAS_FPU 0x0001
16#define HAS_MMU 0x0002
17#define HAS_ETHERNET100 0x0004
18#define HAS_TOKENRING 0x0008
19#define HAS_SCSI 0x0010
20#define HAS_ATA 0x0020
21#define HAS_USB 0x0040
22#define HAS_IRQ_BUG 0x0080
23#define HAS_MMU_BUG 0x0100
24
25struct cpu_info {
26 char *cpu_model;
27 unsigned short rev;
28 unsigned short cache_size;
29 unsigned short flags;
30};
31
32/* Some of these model are here for historical reasons only. */
33static struct cpu_info cpinfo[] = {
34 {"ETRAX 1", 0, 0, 0},
35 {"ETRAX 2", 1, 0, 0},
36 {"ETRAX 3", 2, 0, 0},
37 {"ETRAX 4", 3, 0, 0},
38 {"Simulator", 7, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA},
39 {"ETRAX 100", 8, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA | HAS_IRQ_BUG},
40 {"ETRAX 100", 9, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA},
41
42 {"ETRAX 100LX", 10, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA | HAS_USB
43 | HAS_MMU | HAS_MMU_BUG},
44
45 {"ETRAX 100LX v2", 11, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA | HAS_USB
46 | HAS_MMU},
47
48 {"ETRAX FS", 32, 32, HAS_ETHERNET100 | HAS_ATA | HAS_MMU},
49
50 {"Unknown", 0, 0, 0}
51};
52
53int
54show_cpuinfo(struct seq_file *m, void *v)
55{
56 int i;
57 int cpu = (int)v - 1;
58 int entries;
59 unsigned long revision;
60 struct cpu_info *info;
61
62 entries = sizeof cpinfo / sizeof(struct cpu_info);
63 info = &cpinfo[entries - 1];
64
65#ifdef CONFIG_SMP
66 if (!cpu_online(cpu))
67 return 0;
68#endif
69
70 revision = rdvr();
71
72 for (i = 0; i < entries; i++) {
73 if (cpinfo[i].rev == revision) {
74 info = &cpinfo[i];
75 break;
76 }
77 }
78
79 return seq_printf(m,
80 "processor\t: %d\n"
81 "cpu\t\t: CRIS\n"
82 "cpu revision\t: %lu\n"
83 "cpu model\t: %s\n"
84 "cache size\t: %d KB\n"
85 "fpu\t\t: %s\n"
86 "mmu\t\t: %s\n"
87 "mmu DMA bug\t: %s\n"
88 "ethernet\t: %s Mbps\n"
89 "token ring\t: %s\n"
90 "scsi\t\t: %s\n"
91 "ata\t\t: %s\n"
92 "usb\t\t: %s\n"
93 "bogomips\t: %lu.%02lu\n\n",
94
95 cpu,
96 revision,
97 info->cpu_model,
98 info->cache_size,
99 info->flags & HAS_FPU ? "yes" : "no",
100 info->flags & HAS_MMU ? "yes" : "no",
101 info->flags & HAS_MMU_BUG ? "yes" : "no",
102 info->flags & HAS_ETHERNET100 ? "10/100" : "10",
103 info->flags & HAS_TOKENRING ? "4/16 Mbps" : "no",
104 info->flags & HAS_SCSI ? "yes" : "no",
105 info->flags & HAS_ATA ? "yes" : "no",
106 info->flags & HAS_USB ? "yes" : "no",
107 (loops_per_jiffy * HZ + 500) / 500000,
108 ((loops_per_jiffy * HZ + 500) / 5000) % 100);
109}
110
111#endif /* CONFIG_PROC_FS */
112
113void
114show_etrax_copyright(void)
115{
116 printk(KERN_INFO
117 "Linux/CRISv32 port on ETRAX FS (C) 2003, 2004 Axis Communications AB\n");
118}
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
new file mode 100644
index 000000000000..fb4c79d5b76b
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -0,0 +1,708 @@
1/*
2 * Copyright (C) 2003, Axis Communications AB.
3 */
4
5#include <linux/sched.h>
6#include <linux/mm.h>
7#include <linux/kernel.h>
8#include <linux/signal.h>
9#include <linux/errno.h>
10#include <linux/wait.h>
11#include <linux/ptrace.h>
12#include <linux/unistd.h>
13#include <linux/stddef.h>
14#include <linux/syscalls.h>
15#include <linux/vmalloc.h>
16
17#include <asm/io.h>
18#include <asm/processor.h>
19#include <asm/ucontext.h>
20#include <asm/uaccess.h>
21#include <asm/arch/ptrace.h>
22#include <asm/arch/hwregs/cpu_vect.h>
23
24extern unsigned long cris_signal_return_page;
25
26/* Flag to check if a signal is blockable. */
27#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
28
29/*
30 * A syscall in CRIS is really a "break 13" instruction, which is 2
31 * bytes. The registers is manipulated so upon return the instruction
32 * will be executed again.
33 *
34 * This relies on that PC points to the instruction after the break call.
35 */
36#define RESTART_CRIS_SYS(regs) regs->r10 = regs->orig_r10; regs->erp -= 2;
37
38/* Signal frames. */
39struct signal_frame {
40 struct sigcontext sc;
41 unsigned long extramask[_NSIG_WORDS - 1];
42 unsigned char retcode[8]; /* Trampoline code. */
43};
44
45struct rt_signal_frame {
46 struct siginfo *pinfo;
47 void *puc;
48 struct siginfo info;
49 struct ucontext uc;
50 unsigned char retcode[8]; /* Trampoline code. */
51};
52
53int do_signal(int restart, sigset_t *oldset, struct pt_regs *regs);
54void keep_debug_flags(unsigned long oldccs, unsigned long oldspc,
55 struct pt_regs *regs);
56/*
57 * Swap in the new signal mask, and wait for a signal. Define some
58 * dummy arguments to be able to reach the regs argument.
59 */
60int
61sys_sigsuspend(old_sigset_t mask, long r11, long r12, long r13, long mof,
62 long srp, struct pt_regs *regs)
63{
64 sigset_t saveset;
65
66 mask &= _BLOCKABLE;
67
68 spin_lock_irq(&current->sighand->siglock);
69
70 saveset = current->blocked;
71
72 siginitset(&current->blocked, mask);
73
74 recalc_sigpending();
75 spin_unlock_irq(&current->sighand->siglock);
76
77 regs->r10 = -EINTR;
78
79 while (1) {
80 current->state = TASK_INTERRUPTIBLE;
81 schedule();
82
83 if (do_signal(0, &saveset, regs)) {
84 /*
85 * This point is reached twice: once to call
86 * the signal handler, then again to return
87 * from the sigsuspend system call. When
88 * calling the signal handler, R10 hold the
89 * signal number as set by do_signal(). The
90 * sigsuspend call will always return with
91 * the restored value above; -EINTR.
92 */
93 return regs->r10;
94 }
95 }
96}
97
98/* Define some dummy arguments to be able to reach the regs argument. */
99int
100sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, long r12, long r13,
101 long mof, long srp, struct pt_regs *regs)
102{
103 sigset_t saveset;
104 sigset_t newset;
105
106 if (sigsetsize != sizeof(sigset_t))
107 return -EINVAL;
108
109 if (copy_from_user(&newset, unewset, sizeof(newset)))
110 return -EFAULT;
111
112 sigdelsetmask(&newset, ~_BLOCKABLE);
113 spin_lock_irq(&current->sighand->siglock);
114
115 saveset = current->blocked;
116 current->blocked = newset;
117
118 recalc_sigpending();
119 spin_unlock_irq(&current->sighand->siglock);
120
121 regs->r10 = -EINTR;
122
123 while (1) {
124 current->state = TASK_INTERRUPTIBLE;
125 schedule();
126
127 if (do_signal(0, &saveset, regs)) {
128 /* See comment in function above. */
129 return regs->r10;
130 }
131 }
132}
133
134int
135sys_sigaction(int signal, const struct old_sigaction *act,
136 struct old_sigaction *oact)
137{
138 int retval;
139 struct k_sigaction newk;
140 struct k_sigaction oldk;
141
142 if (act) {
143 old_sigset_t mask;
144
145 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
146 __get_user(newk.sa.sa_handler, &act->sa_handler) ||
147 __get_user(newk.sa.sa_restorer, &act->sa_restorer))
148 return -EFAULT;
149
150 __get_user(newk.sa.sa_flags, &act->sa_flags);
151 __get_user(mask, &act->sa_mask);
152 siginitset(&newk.sa.sa_mask, mask);
153 }
154
155 retval = do_sigaction(signal, act ? &newk : NULL, oact ? &oldk : NULL);
156
157 if (!retval && oact) {
158 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
159 __put_user(oldk.sa.sa_handler, &oact->sa_handler) ||
160 __put_user(oldk.sa.sa_restorer, &oact->sa_restorer))
161 return -EFAULT;
162
163 __put_user(oldk.sa.sa_flags, &oact->sa_flags);
164 __put_user(oldk.sa.sa_mask.sig[0], &oact->sa_mask);
165 }
166
167 return retval;
168}
169
170int
171sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
172{
173 return do_sigaltstack(uss, uoss, rdusp());
174}
175
176static int
177restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
178{
179 unsigned int err = 0;
180 unsigned long old_usp;
181
182 /* Always make any pending restarted system calls return -EINTR */
183 current_thread_info()->restart_block.fn = do_no_restart_syscall;
184
185 /*
186 * Restore the registers from &sc->regs. sc is already checked
187 * for VERIFY_READ since the signal_frame was previously
188 * checked in sys_sigreturn().
189 */
190 if (__copy_from_user(regs, sc, sizeof(struct pt_regs)))
191 goto badframe;
192
193 /* Make that the user-mode flag is set. */
194 regs->ccs |= (1 << (U_CCS_BITNR + CCS_SHIFT));
195
196 /* Restore the old USP. */
197 err |= __get_user(old_usp, &sc->usp);
198 wrusp(old_usp);
199
200 return err;
201
202badframe:
203 return 1;
204}
205
206/* Define some dummy arguments to be able to reach the regs argument. */
207asmlinkage int
208sys_sigreturn(long r10, long r11, long r12, long r13, long mof, long srp,
209 struct pt_regs *regs)
210{
211 sigset_t set;
212 struct signal_frame __user *frame;
213 unsigned long oldspc = regs->spc;
214 unsigned long oldccs = regs->ccs;
215
216 frame = (struct signal_frame *) rdusp();
217
218 /*
219 * Since the signal is stacked on a dword boundary, the frame
220 * should be dword aligned here as well. It it's not, then the
221 * user is trying some funny business.
222 */
223 if (((long)frame) & 3)
224 goto badframe;
225
226 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
227 goto badframe;
228
229 if (__get_user(set.sig[0], &frame->sc.oldmask) ||
230 (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1],
231 frame->extramask,
232 sizeof(frame->extramask))))
233 goto badframe;
234
235 sigdelsetmask(&set, ~_BLOCKABLE);
236 spin_lock_irq(&current->sighand->siglock);
237
238 current->blocked = set;
239
240 recalc_sigpending();
241 spin_unlock_irq(&current->sighand->siglock);
242
243 if (restore_sigcontext(regs, &frame->sc))
244 goto badframe;
245
246 keep_debug_flags(oldccs, oldspc, regs);
247
248 return regs->r10;
249
250badframe:
251 force_sig(SIGSEGV, current);
252 return 0;
253}
254
255/* Define some dummy variables to be able to reach the regs argument. */
256asmlinkage int
257sys_rt_sigreturn(long r10, long r11, long r12, long r13, long mof, long srp,
258 struct pt_regs *regs)
259{
260 sigset_t set;
261 struct rt_signal_frame __user *frame;
262 unsigned long oldspc = regs->spc;
263 unsigned long oldccs = regs->ccs;
264
265 frame = (struct rt_signal_frame *) rdusp();
266
267 /*
268 * Since the signal is stacked on a dword boundary, the frame
269 * should be dword aligned here as well. It it's not, then the
270 * user is trying some funny business.
271 */
272 if (((long)frame) & 3)
273 goto badframe;
274
275 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
276 goto badframe;
277
278 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
279 goto badframe;
280
281 sigdelsetmask(&set, ~_BLOCKABLE);
282 spin_lock_irq(&current->sighand->siglock);
283
284 current->blocked = set;
285
286 recalc_sigpending();
287 spin_unlock_irq(&current->sighand->siglock);
288
289 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
290 goto badframe;
291
292 if (do_sigaltstack(&frame->uc.uc_stack, NULL, rdusp()) == -EFAULT)
293 goto badframe;
294
295 keep_debug_flags(oldccs, oldspc, regs);
296
297 return regs->r10;
298
299badframe:
300 force_sig(SIGSEGV, current);
301 return 0;
302}
303
304/* Setup a signal frame. */
305static int
306setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
307 unsigned long mask)
308{
309 int err;
310 unsigned long usp;
311
312 err = 0;
313 usp = rdusp();
314
315 /*
316 * Copy the registers. They are located first in sc, so it's
317 * possible to use sc directly.
318 */
319 err |= __copy_to_user(sc, regs, sizeof(struct pt_regs));
320
321 err |= __put_user(mask, &sc->oldmask);
322 err |= __put_user(usp, &sc->usp);
323
324 return err;
325}
326
327/* Figure out where to put the new signal frame - usually on the stack. */
328static inline void __user *
329get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
330{
331 unsigned long sp;
332
333 sp = rdusp();
334
335 /* This is the X/Open sanctioned signal stack switching. */
336 if (ka->sa.sa_flags & SA_ONSTACK) {
337 if (!on_sig_stack(sp))
338 sp = current->sas_ss_sp + current->sas_ss_size;
339 }
340
341 /* Make sure the frame is dword-aligned. */
342 sp &= ~3;
343
344 return (void __user *)(sp - frame_size);
345}
346
347/* Grab and setup a signal frame.
348 *
349 * Basically a lot of state-info is stacked, and arranged for the
350 * user-mode program to return to the kernel using either a trampiline
351 * which performs the syscall sigreturn(), or a provided user-mode
352 * trampoline.
353 */
354static void
355setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
356 struct pt_regs * regs)
357{
358 int err;
359 unsigned long return_ip;
360 struct signal_frame __user *frame;
361
362 err = 0;
363 frame = get_sigframe(ka, regs, sizeof(*frame));
364
365 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
366 goto give_sigsegv;
367
368 err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
369
370 if (err)
371 goto give_sigsegv;
372
373 if (_NSIG_WORDS > 1) {
374 err |= __copy_to_user(frame->extramask, &set->sig[1],
375 sizeof(frame->extramask));
376 }
377
378 if (err)
379 goto give_sigsegv;
380
381 /*
382 * Set up to return from user-space. If provided, use a stub
383 * already located in user-space.
384 */
385 if (ka->sa.sa_flags & SA_RESTORER) {
386 return_ip = (unsigned long)ka->sa.sa_restorer;
387 } else {
388 /* Trampoline - the desired return ip is in the signal return page. */
389 return_ip = cris_signal_return_page;
390
391 /*
392 * This is movu.w __NR_sigreturn, r9; break 13;
393 *
394 * WE DO NOT USE IT ANY MORE! It's only left here for historical
395 * reasons and because gdb uses it as a signature to notice
396 * signal handler stack frames.
397 */
398 err |= __put_user(0x9c5f, (short __user*)(frame->retcode+0));
399 err |= __put_user(__NR_sigreturn, (short __user*)(frame->retcode+2));
400 err |= __put_user(0xe93d, (short __user*)(frame->retcode+4));
401 }
402
403 if (err)
404 goto give_sigsegv;
405
406 /*
407 * Set up registers for signal handler.
408 *
409 * Where the code enters now.
410 * Where the code enter later.
411 * First argument, signo.
412 */
413 regs->erp = (unsigned long) ka->sa.sa_handler;
414 regs->srp = return_ip;
415 regs->r10 = sig;
416
417 /* Actually move the USP to reflect the stacked frame. */
418 wrusp((unsigned long)frame);
419
420 return;
421
422give_sigsegv:
423 if (sig == SIGSEGV)
424 ka->sa.sa_handler = SIG_DFL;
425
426 force_sig(SIGSEGV, current);
427}
428
429static void
430setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
431 sigset_t *set, struct pt_regs * regs)
432{
433 int err;
434 unsigned long return_ip;
435 struct rt_signal_frame __user *frame;
436
437 err = 0;
438 frame = get_sigframe(ka, regs, sizeof(*frame));
439
440 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
441 goto give_sigsegv;
442
443 /* TODO: what is the current->exec_domain stuff and invmap ? */
444
445 err |= __put_user(&frame->info, &frame->pinfo);
446 err |= __put_user(&frame->uc, &frame->puc);
447 err |= copy_siginfo_to_user(&frame->info, info);
448
449 if (err)
450 goto give_sigsegv;
451
452 /* Clear all the bits of the ucontext we don't use. */
453 err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
454 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
455 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
456
457 if (err)
458 goto give_sigsegv;
459
460 /*
461 * Set up to return from user-space. If provided, use a stub
462 * already located in user-space.
463 */
464 if (ka->sa.sa_flags & SA_RESTORER) {
465 return_ip = (unsigned long) ka->sa.sa_restorer;
466 } else {
467 /* Trampoline - the desired return ip is in the signal return page. */
468 return_ip = cris_signal_return_page + 6;
469
470 /*
471 * This is movu.w __NR_rt_sigreturn, r9; break 13;
472 *
473 * WE DO NOT USE IT ANY MORE! It's only left here for historical
474 * reasons and because gdb uses it as a signature to notice
475 * signal handler stack frames.
476 */
477 err |= __put_user(0x9c5f, (short __user*)(frame->retcode+0));
478
479 err |= __put_user(__NR_rt_sigreturn,
480 (short __user*)(frame->retcode+2));
481
482 err |= __put_user(0xe93d, (short __user*)(frame->retcode+4));
483 }
484
485 if (err)
486 goto give_sigsegv;
487
488 /*
489 * Set up registers for signal handler.
490 *
491 * Where the code enters now.
492 * Where the code enters later.
493 * First argument is signo.
494 * Second argument is (siginfo_t *).
495 * Third argument is unused.
496 */
497 regs->erp = (unsigned long) ka->sa.sa_handler;
498 regs->srp = return_ip;
499 regs->r10 = sig;
500 regs->r11 = (unsigned long) &frame->info;
501 regs->r12 = 0;
502
503 /* Actually move the usp to reflect the stacked frame. */
504 wrusp((unsigned long)frame);
505
506 return;
507
508give_sigsegv:
509 if (sig == SIGSEGV)
510 ka->sa.sa_handler = SIG_DFL;
511
512 force_sig(SIGSEGV, current);
513}
514
515/* Invoke a singal handler to, well, handle the signal. */
516extern inline void
517handle_signal(int canrestart, unsigned long sig,
518 siginfo_t *info, struct k_sigaction *ka,
519 sigset_t *oldset, struct pt_regs * regs)
520{
521 /* Check if this got called from a system call. */
522 if (canrestart) {
523 /* If so, check system call restarting. */
524 switch (regs->r10) {
525 case -ERESTART_RESTARTBLOCK:
526 case -ERESTARTNOHAND:
527 /*
528 * This means that the syscall should
529 * only be restarted if there was no
530 * handler for the signal, and since
531 * this point isn't reached unless
532 * there is a handler, there's no need
533 * to restart.
534 */
535 regs->r10 = -EINTR;
536 break;
537
538 case -ERESTARTSYS:
539 /*
540 * This means restart the syscall if
541 * there is no handler, or the handler
542 * was registered with SA_RESTART.
543 */
544 if (!(ka->sa.sa_flags & SA_RESTART)) {
545 regs->r10 = -EINTR;
546 break;
547 }
548
549 /* Fall through. */
550
551 case -ERESTARTNOINTR:
552 /*
553 * This means that the syscall should
554 * be called again after the signal
555 * handler returns.
556 */
557 RESTART_CRIS_SYS(regs);
558 break;
559 }
560 }
561
562 /* Set up the stack frame. */
563 if (ka->sa.sa_flags & SA_SIGINFO)
564 setup_rt_frame(sig, ka, info, oldset, regs);
565 else
566 setup_frame(sig, ka, oldset, regs);
567
568 if (ka->sa.sa_flags & SA_ONESHOT)
569 ka->sa.sa_handler = SIG_DFL;
570
571 if (!(ka->sa.sa_flags & SA_NODEFER)) {
572 spin_lock_irq(&current->sighand->siglock);
573 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
574 sigaddset(&current->blocked,sig);
575 recalc_sigpending();
576 spin_unlock_irq(&current->sighand->siglock);
577 }
578}
579
580/*
581 * Note that 'init' is a special process: it doesn't get signals it doesn't
582 * want to handle. Thus you cannot kill init even with a SIGKILL even by
583 * mistake.
584 *
585 * Also note that the regs structure given here as an argument, is the latest
586 * pushed pt_regs. It may or may not be the same as the first pushed registers
587 * when the initial usermode->kernelmode transition took place. Therefore
588 * we can use user_mode(regs) to see if we came directly from kernel or user
589 * mode below.
590 */
591int
592do_signal(int canrestart, sigset_t *oldset, struct pt_regs *regs)
593{
594 int signr;
595 siginfo_t info;
596 struct k_sigaction ka;
597
598 /*
599 * The common case should go fast, which is why this point is
600 * reached from kernel-mode. If that's the case, just return
601 * without doing anything.
602 */
603 if (!user_mode(regs))
604 return 1;
605
606 if (!oldset)
607 oldset = &current->blocked;
608
609 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
610
611 if (signr > 0) {
612 /* Deliver the signal. */
613 handle_signal(canrestart, signr, &info, &ka, oldset, regs);
614 return 1;
615 }
616
617 /* Got here from a system call? */
618 if (canrestart) {
619 /* Restart the system call - no handlers present. */
620 if (regs->r10 == -ERESTARTNOHAND ||
621 regs->r10 == -ERESTARTSYS ||
622 regs->r10 == -ERESTARTNOINTR) {
623 RESTART_CRIS_SYS(regs);
624 }
625
626 if (regs->r10 == -ERESTART_RESTARTBLOCK){
627 regs->r10 = __NR_restart_syscall;
628 regs->erp -= 2;
629 }
630 }
631
632 return 0;
633}
634
635asmlinkage void
636ugdb_trap_user(struct thread_info *ti, int sig)
637{
638 if (((user_regs(ti)->exs & 0xff00) >> 8) != SINGLE_STEP_INTR_VECT) {
639 /* Zero single-step PC if the reason we stopped wasn't a single
640 step exception. This is to avoid relying on it when it isn't
641 reliable. */
642 user_regs(ti)->spc = 0;
643 }
644 /* FIXME: Filter out false h/w breakpoint hits (i.e. EDA
645 not withing any configured h/w breakpoint range). Synchronize with
646 what already exists for kernel debugging. */
647 if (((user_regs(ti)->exs & 0xff00) >> 8) == BREAK_8_INTR_VECT) {
648 /* Break 8: subtract 2 from ERP unless in a delay slot. */
649 if (!(user_regs(ti)->erp & 0x1))
650 user_regs(ti)->erp -= 2;
651 }
652 sys_kill(ti->task->pid, sig);
653}
654
655void
656keep_debug_flags(unsigned long oldccs, unsigned long oldspc,
657 struct pt_regs *regs)
658{
659 if (oldccs & (1 << Q_CCS_BITNR)) {
660 /* Pending single step due to single-stepping the break 13
661 in the signal trampoline: keep the Q flag. */
662 regs->ccs |= (1 << Q_CCS_BITNR);
663 /* S flag should be set - complain if it's not. */
664 if (!(oldccs & (1 << (S_CCS_BITNR + CCS_SHIFT)))) {
665 printk("Q flag but no S flag?");
666 }
667 regs->ccs |= (1 << (S_CCS_BITNR + CCS_SHIFT));
668 /* Assume the SPC is valid and interesting. */
669 regs->spc = oldspc;
670
671 } else if (oldccs & (1 << (S_CCS_BITNR + CCS_SHIFT))) {
672 /* If a h/w bp was set in the signal handler we need
673 to keep the S flag. */
674 regs->ccs |= (1 << (S_CCS_BITNR + CCS_SHIFT));
675 /* Don't keep the old SPC though; if we got here due to
676 a single-step, the Q flag should have been set. */
677 } else if (regs->spc) {
678 /* If we were single-stepping *before* the signal was taken,
679 we don't want to restore that state now, because GDB will
680 have forgotten all about it. */
681 regs->spc = 0;
682 regs->ccs &= ~(1 << (S_CCS_BITNR + CCS_SHIFT));
683 }
684}
685
686/* Set up the trampolines on the signal return page. */
687int __init
688cris_init_signal(void)
689{
690 u16* data = (u16*)kmalloc(PAGE_SIZE, GFP_KERNEL);
691
692 /* This is movu.w __NR_sigreturn, r9; break 13; */
693 data[0] = 0x9c5f;
694 data[1] = __NR_sigreturn;
695 data[2] = 0xe93d;
696 /* This is movu.w __NR_rt_sigreturn, r9; break 13; */
697 data[3] = 0x9c5f;
698 data[4] = __NR_rt_sigreturn;
699 data[5] = 0xe93d;
700
701 /* Map to userspace with appropriate permissions (no write access...) */
702 cris_signal_return_page = (unsigned long)
703 __ioremap_prot(virt_to_phys(data), PAGE_SIZE, PAGE_SIGNAL_TRAMPOLINE);
704
705 return 0;
706}
707
708__initcall(cris_init_signal);
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
new file mode 100644
index 000000000000..2c5cae04a95c
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -0,0 +1,348 @@
1#include <asm/delay.h>
2#include <asm/arch/irq.h>
3#include <asm/arch/hwregs/intr_vect.h>
4#include <asm/arch/hwregs/intr_vect_defs.h>
5#include <asm/tlbflush.h>
6#include <asm/mmu_context.h>
7#include <asm/arch/hwregs/mmu_defs_asm.h>
8#include <asm/arch/hwregs/supp_reg.h>
9#include <asm/atomic.h>
10
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/timex.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/cpumask.h>
17#include <linux/interrupt.h>
18
19#define IPI_SCHEDULE 1
20#define IPI_CALL 2
21#define IPI_FLUSH_TLB 4
22
23#define FLUSH_ALL (void*)0xffffffff
24
25/* Vector of locks used for various atomic operations */
26spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
27
28/* CPU masks */
29cpumask_t cpu_online_map = CPU_MASK_NONE;
30cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
31
32/* Variables used during SMP boot */
33volatile int cpu_now_booting = 0;
34volatile struct thread_info *smp_init_current_idle_thread;
35
36/* Variables used during IPI */
37static DEFINE_SPINLOCK(call_lock);
38static DEFINE_SPINLOCK(tlbstate_lock);
39
40struct call_data_struct {
41 void (*func) (void *info);
42 void *info;
43 int wait;
44};
45
46static struct call_data_struct * call_data;
47
48static struct mm_struct* flush_mm;
49static struct vm_area_struct* flush_vma;
50static unsigned long flush_addr;
51
52extern int setup_irq(int, struct irqaction *);
53
54/* Mode registers */
55static unsigned long irq_regs[NR_CPUS] =
56{
57 regi_irq,
58 regi_irq2
59};
60
61static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs);
62static int send_ipi(int vector, int wait, cpumask_t cpu_mask);
63static struct irqaction irq_ipi = { crisv32_ipi_interrupt, SA_INTERRUPT,
64 CPU_MASK_NONE, "ipi", NULL, NULL};
65
66extern void cris_mmu_init(void);
67extern void cris_timer_init(void);
68
69/* SMP initialization */
70void __init smp_prepare_cpus(unsigned int max_cpus)
71{
72 int i;
73
74 /* From now on we can expect IPIs so set them up */
75 setup_irq(IPI_INTR_VECT, &irq_ipi);
76
77 /* Mark all possible CPUs as present */
78 for (i = 0; i < max_cpus; i++)
79 cpu_set(i, phys_cpu_present_map);
80}
81
82void __devinit smp_prepare_boot_cpu(void)
83{
84 /* PGD pointer has moved after per_cpu initialization so
85 * update the MMU.
86 */
87 pgd_t **pgd;
88 pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
89
90 SUPP_BANK_SEL(1);
91 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
92 SUPP_BANK_SEL(2);
93 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
94
95 cpu_set(0, cpu_online_map);
96 cpu_set(0, phys_cpu_present_map);
97}
98
99void __init smp_cpus_done(unsigned int max_cpus)
100{
101}
102
103/* Bring one cpu online.*/
104static int __init
105smp_boot_one_cpu(int cpuid)
106{
107 unsigned timeout;
108 struct task_struct *idle;
109
110 idle = fork_idle(cpuid);
111 if (IS_ERR(idle))
112 panic("SMP: fork failed for CPU:%d", cpuid);
113
114 idle->thread_info->cpu = cpuid;
115
116 /* Information to the CPU that is about to boot */
117 smp_init_current_idle_thread = idle->thread_info;
118 cpu_now_booting = cpuid;
119
120 /* Wait for CPU to come online */
121 for (timeout = 0; timeout < 10000; timeout++) {
122 if(cpu_online(cpuid)) {
123 cpu_now_booting = 0;
124 smp_init_current_idle_thread = NULL;
125 return 0; /* CPU online */
126 }
127 udelay(100);
128 barrier();
129 }
130
131 put_task_struct(idle);
132 idle = NULL;
133
134 printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
135 return -1;
136}
137
138/* Secondary CPUs starts uing C here. Here we need to setup CPU
139 * specific stuff such as the local timer and the MMU. */
140void __init smp_callin(void)
141{
142 extern void cpu_idle(void);
143
144 int cpu = cpu_now_booting;
145 reg_intr_vect_rw_mask vect_mask = {0};
146
147 /* Initialise the idle task for this CPU */
148 atomic_inc(&init_mm.mm_count);
149 current->active_mm = &init_mm;
150
151 /* Set up MMU */
152 cris_mmu_init();
153 __flush_tlb_all();
154
155 /* Setup local timer. */
156 cris_timer_init();
157
158 /* Enable IRQ and idle */
159 REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask);
160 unmask_irq(IPI_INTR_VECT);
161 unmask_irq(TIMER_INTR_VECT);
162 local_irq_enable();
163
164 cpu_set(cpu, cpu_online_map);
165 cpu_idle();
166}
167
168/* Stop execution on this CPU.*/
169void stop_this_cpu(void* dummy)
170{
171 local_irq_disable();
172 asm volatile("halt");
173}
174
175/* Other calls */
176void smp_send_stop(void)
177{
178 smp_call_function(stop_this_cpu, NULL, 1, 0);
179}
180
181int setup_profiling_timer(unsigned int multiplier)
182{
183 return -EINVAL;
184}
185
186
187/* cache_decay_ticks is used by the scheduler to decide if a process
188 * is "hot" on one CPU. A higher value means a higher penalty to move
189 * a process to another CPU. Our cache is rather small so we report
190 * 1 tick.
191 */
192unsigned long cache_decay_ticks = 1;
193
194int __devinit __cpu_up(unsigned int cpu)
195{
196 smp_boot_one_cpu(cpu);
197 return cpu_online(cpu) ? 0 : -ENOSYS;
198}
199
200void smp_send_reschedule(int cpu)
201{
202 cpumask_t cpu_mask = CPU_MASK_NONE;
203 cpu_set(cpu, cpu_mask);
204 send_ipi(IPI_SCHEDULE, 0, cpu_mask);
205}
206
207/* TLB flushing
208 *
209 * Flush needs to be done on the local CPU and on any other CPU that
210 * may have the same mapping. The mm->cpu_vm_mask is used to keep track
211 * of which CPUs that a specific process has been executed on.
212 */
213void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr)
214{
215 unsigned long flags;
216 cpumask_t cpu_mask;
217
218 spin_lock_irqsave(&tlbstate_lock, flags);
219 cpu_mask = (mm == FLUSH_ALL ? CPU_MASK_ALL : mm->cpu_vm_mask);
220 cpu_clear(smp_processor_id(), cpu_mask);
221 flush_mm = mm;
222 flush_vma = vma;
223 flush_addr = addr;
224 send_ipi(IPI_FLUSH_TLB, 1, cpu_mask);
225 spin_unlock_irqrestore(&tlbstate_lock, flags);
226}
227
228void flush_tlb_all(void)
229{
230 __flush_tlb_all();
231 flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0);
232}
233
234void flush_tlb_mm(struct mm_struct *mm)
235{
236 __flush_tlb_mm(mm);
237 flush_tlb_common(mm, FLUSH_ALL, 0);
238 /* No more mappings in other CPUs */
239 cpus_clear(mm->cpu_vm_mask);
240 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
241}
242
243void flush_tlb_page(struct vm_area_struct *vma,
244 unsigned long addr)
245{
246 __flush_tlb_page(vma, addr);
247 flush_tlb_common(vma->vm_mm, vma, addr);
248}
249
250/* Inter processor interrupts
251 *
252 * The IPIs are used for:
253 * * Force a schedule on a CPU
254 * * FLush TLB on other CPUs
255 * * Call a function on other CPUs
256 */
257
258int send_ipi(int vector, int wait, cpumask_t cpu_mask)
259{
260 int i = 0;
261 reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
262 int ret = 0;
263
264 /* Calculate CPUs to send to. */
265 cpus_and(cpu_mask, cpu_mask, cpu_online_map);
266
267 /* Send the IPI. */
268 for_each_cpu_mask(i, cpu_mask)
269 {
270 ipi.vector |= vector;
271 REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi);
272 }
273
274 /* Wait for IPI to finish on other CPUS */
275 if (wait) {
276 for_each_cpu_mask(i, cpu_mask) {
277 int j;
278 for (j = 0 ; j < 1000; j++) {
279 ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
280 if (!ipi.vector)
281 break;
282 udelay(100);
283 }
284
285 /* Timeout? */
286 if (ipi.vector) {
287 printk("SMP call timeout from %d to %d\n", smp_processor_id(), i);
288 ret = -ETIMEDOUT;
289 dump_stack();
290 }
291 }
292 }
293 return ret;
294}
295
296/*
297 * You must not call this function with disabled interrupts or from a
298 * hardware interrupt handler or from a bottom half handler.
299 */
300int smp_call_function(void (*func)(void *info), void *info,
301 int nonatomic, int wait)
302{
303 cpumask_t cpu_mask = CPU_MASK_ALL;
304 struct call_data_struct data;
305 int ret;
306
307 cpu_clear(smp_processor_id(), cpu_mask);
308
309 WARN_ON(irqs_disabled());
310
311 data.func = func;
312 data.info = info;
313 data.wait = wait;
314
315 spin_lock(&call_lock);
316 call_data = &data;
317 ret = send_ipi(IPI_CALL, wait, cpu_mask);
318 spin_unlock(&call_lock);
319
320 return ret;
321}
322
323irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
324{
325 void (*func) (void *info) = call_data->func;
326 void *info = call_data->info;
327 reg_intr_vect_rw_ipi ipi;
328
329 ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi);
330
331 if (ipi.vector & IPI_CALL) {
332 func(info);
333 }
334 if (ipi.vector & IPI_FLUSH_TLB) {
335 if (flush_mm == FLUSH_ALL)
336 __flush_tlb_all();
337 else if (flush_vma == FLUSH_ALL)
338 __flush_tlb_mm(flush_mm);
339 else
340 __flush_tlb_page(flush_vma, flush_addr);
341 }
342
343 ipi.vector = 0;
344 REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi);
345
346 return IRQ_HANDLED;
347}
348
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
new file mode 100644
index 000000000000..d48e397f5fa4
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -0,0 +1,341 @@
1/* $Id: time.c,v 1.19 2005/04/29 05:40:09 starvik Exp $
2 *
3 * linux/arch/cris/arch-v32/kernel/time.c
4 *
5 * Copyright (C) 2003 Axis Communications AB
6 *
7 */
8
9#include <linux/config.h>
10#include <linux/timex.h>
11#include <linux/time.h>
12#include <linux/jiffies.h>
13#include <linux/interrupt.h>
14#include <linux/swap.h>
15#include <linux/sched.h>
16#include <linux/init.h>
17#include <linux/threads.h>
18#include <asm/types.h>
19#include <asm/signal.h>
20#include <asm/io.h>
21#include <asm/delay.h>
22#include <asm/rtc.h>
23#include <asm/irq.h>
24
25#include <asm/arch/hwregs/reg_map.h>
26#include <asm/arch/hwregs/reg_rdwr.h>
27#include <asm/arch/hwregs/timer_defs.h>
28#include <asm/arch/hwregs/intr_vect_defs.h>
29
30/* Watchdog defines */
31#define ETRAX_WD_KEY_MASK 0x7F /* key is 7 bit */
32#define ETRAX_WD_HZ 763 /* watchdog counts at 763 Hz */
33#define ETRAX_WD_CNT ((2*ETRAX_WD_HZ)/HZ + 1) /* Number of 763 counts before watchdog bites */
34
35unsigned long timer_regs[NR_CPUS] =
36{
37 regi_timer,
38#ifdef CONFIG_SMP
39 regi_timer2
40#endif
41};
42
43extern void update_xtime_from_cmos(void);
44extern int set_rtc_mmss(unsigned long nowtime);
45extern int setup_irq(int, struct irqaction *);
46extern int have_rtc;
47
48unsigned long get_ns_in_jiffie(void)
49{
50 reg_timer_r_tmr0_data data;
51 unsigned long ns;
52
53 data = REG_RD(timer, regi_timer, r_tmr0_data);
54 ns = (TIMER0_DIV - data) * 10;
55 return ns;
56}
57
58unsigned long do_slow_gettimeoffset(void)
59{
60 unsigned long count;
61 unsigned long usec_count = 0;
62
63 static unsigned long count_p = TIMER0_DIV;/* for the first call after boot */
64 static unsigned long jiffies_p = 0;
65
66 /*
67 * cache volatile jiffies temporarily; we have IRQs turned off.
68 */
69 unsigned long jiffies_t;
70
71 /* The timer interrupt comes from Etrax timer 0. In order to get
72 * better precision, we check the current value. It might have
73 * underflowed already though.
74 */
75
76 count = REG_RD(timer, regi_timer, r_tmr0_data);
77 jiffies_t = jiffies;
78
79 /*
80 * avoiding timer inconsistencies (they are rare, but they happen)...
81 * there are one problem that must be avoided here:
82 * 1. the timer counter underflows
83 */
84 if( jiffies_t == jiffies_p ) {
85 if( count > count_p ) {
86 /* Timer wrapped, use new count and prescale
87 * increase the time corresponding to one jiffie
88 */
89 usec_count = 1000000/HZ;
90 }
91 } else
92 jiffies_p = jiffies_t;
93 count_p = count;
94 /* Convert timer value to usec */
95 /* 100 MHz timer, divide by 100 to get usec */
96 usec_count += (TIMER0_DIV - count) / 100;
97 return usec_count;
98}
99
100/* From timer MDS describing the hardware watchdog:
101 * 4.3.1 Watchdog Operation
102 * The watchdog timer is an 8-bit timer with a configurable start value.
103 * Once started the whatchdog counts downwards with a frequency of 763 Hz
104 * (100/131072 MHz). When the watchdog counts down to 1, it generates an
105 * NMI (Non Maskable Interrupt), and when it counts down to 0, it resets the
106 * chip.
107 */
108/* This gives us 1.3 ms to do something useful when the NMI comes */
109
110/* right now, starting the watchdog is the same as resetting it */
111#define start_watchdog reset_watchdog
112
113#if defined(CONFIG_ETRAX_WATCHDOG)
114static short int watchdog_key = 42; /* arbitrary 7 bit number */
115#endif
116
117/* number of pages to consider "out of memory". it is normal that the memory
118 * is used though, so put this really low.
119 */
120
121#define WATCHDOG_MIN_FREE_PAGES 8
122
123void
124reset_watchdog(void)
125{
126#if defined(CONFIG_ETRAX_WATCHDOG)
127 reg_timer_rw_wd_ctrl wd_ctrl = { 0 };
128
129 /* only keep watchdog happy as long as we have memory left! */
130 if(nr_free_pages() > WATCHDOG_MIN_FREE_PAGES) {
131 /* reset the watchdog with the inverse of the old key */
132 watchdog_key ^= ETRAX_WD_KEY_MASK; /* invert key, which is 7 bits */
133 wd_ctrl.cnt = ETRAX_WD_CNT;
134 wd_ctrl.cmd = regk_timer_start;
135 wd_ctrl.key = watchdog_key;
136 REG_WR(timer, regi_timer, rw_wd_ctrl, wd_ctrl);
137 }
138#endif
139}
140
141/* stop the watchdog - we still need the correct key */
142
143void
144stop_watchdog(void)
145{
146#if defined(CONFIG_ETRAX_WATCHDOG)
147 reg_timer_rw_wd_ctrl wd_ctrl = { 0 };
148 watchdog_key ^= ETRAX_WD_KEY_MASK; /* invert key, which is 7 bits */
149 wd_ctrl.cnt = ETRAX_WD_CNT;
150 wd_ctrl.cmd = regk_timer_stop;
151 wd_ctrl.key = watchdog_key;
152 REG_WR(timer, regi_timer, rw_wd_ctrl, wd_ctrl);
153#endif
154}
155
156extern void show_registers(struct pt_regs *regs);
157
158void
159handle_watchdog_bite(struct pt_regs* regs)
160{
161#if defined(CONFIG_ETRAX_WATCHDOG)
162 extern int cause_of_death;
163
164 raw_printk("Watchdog bite\n");
165
166 /* Check if forced restart or unexpected watchdog */
167 if (cause_of_death == 0xbedead) {
168 while(1);
169 }
170
171 /* Unexpected watchdog, stop the watchdog and dump registers*/
172 stop_watchdog();
173 raw_printk("Oops: bitten by watchdog\n");
174 show_registers(regs);
175#ifndef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
176 reset_watchdog();
177#endif
178 while(1) /* nothing */;
179#endif
180}
181
182/* last time the cmos clock got updated */
183static long last_rtc_update = 0;
184
185/*
186 * timer_interrupt() needs to keep up the real-time clock,
187 * as well as call the "do_timer()" routine every clocktick
188 */
189
190//static unsigned short myjiff; /* used by our debug routine print_timestamp */
191
192extern void cris_do_profile(struct pt_regs *regs);
193
194static inline irqreturn_t
195timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
196{
197 int cpu = smp_processor_id();
198 reg_timer_r_masked_intr masked_intr;
199 reg_timer_rw_ack_intr ack_intr = { 0 };
200
201 /* Check if the timer interrupt is for us (a tmr0 int) */
202 masked_intr = REG_RD(timer, timer_regs[cpu], r_masked_intr);
203 if (!masked_intr.tmr0)
204 return IRQ_NONE;
205
206 /* acknowledge the timer irq */
207 ack_intr.tmr0 = 1;
208 REG_WR(timer, timer_regs[cpu], rw_ack_intr, ack_intr);
209
210 /* reset watchdog otherwise it resets us! */
211 reset_watchdog();
212
213 /* Update statistics. */
214 update_process_times(user_mode(regs));
215
216 cris_do_profile(regs); /* Save profiling information */
217
218 /* The master CPU is responsible for the time keeping. */
219 if (cpu != 0)
220 return IRQ_HANDLED;
221
222 /* call the real timer interrupt handler */
223 do_timer(regs);
224
225 /*
226 * If we have an externally synchronized Linux clock, then update
227 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
228 * called as close as possible to 500 ms before the new second starts.
229 *
230 * The division here is not time critical since it will run once in
231 * 11 minutes
232 */
233 if ((time_status & STA_UNSYNC) == 0 &&
234 xtime.tv_sec > last_rtc_update + 660 &&
235 (xtime.tv_nsec / 1000) >= 500000 - (tick_nsec / 1000) / 2 &&
236 (xtime.tv_nsec / 1000) <= 500000 + (tick_nsec / 1000) / 2) {
237 if (set_rtc_mmss(xtime.tv_sec) == 0)
238 last_rtc_update = xtime.tv_sec;
239 else
240 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
241 }
242 return IRQ_HANDLED;
243}
244
245/* timer is SA_SHIRQ so drivers can add stuff to the timer irq chain
246 * it needs to be SA_INTERRUPT to make the jiffies update work properly
247 */
248
249static struct irqaction irq_timer = { timer_interrupt, SA_SHIRQ | SA_INTERRUPT,
250 CPU_MASK_NONE, "timer", NULL, NULL};
251
252void __init
253cris_timer_init(void)
254{
255 int cpu = smp_processor_id();
256 reg_timer_rw_tmr0_ctrl tmr0_ctrl = { 0 };
257 reg_timer_rw_tmr0_div tmr0_div = TIMER0_DIV;
258 reg_timer_rw_intr_mask timer_intr_mask;
259
260 /* Setup the etrax timers
261 * Base frequency is 100MHz, divider 1000000 -> 100 HZ
262 * We use timer0, so timer1 is free.
263 * The trig timer is used by the fasttimer API if enabled.
264 */
265
266 tmr0_ctrl.op = regk_timer_ld;
267 tmr0_ctrl.freq = regk_timer_f100;
268 REG_WR(timer, timer_regs[cpu], rw_tmr0_div, tmr0_div);
269 REG_WR(timer, timer_regs[cpu], rw_tmr0_ctrl, tmr0_ctrl); /* Load */
270 tmr0_ctrl.op = regk_timer_run;
271 REG_WR(timer, timer_regs[cpu], rw_tmr0_ctrl, tmr0_ctrl); /* Start */
272
273 /* enable the timer irq */
274 timer_intr_mask = REG_RD(timer, timer_regs[cpu], rw_intr_mask);
275 timer_intr_mask.tmr0 = 1;
276 REG_WR(timer, timer_regs[cpu], rw_intr_mask, timer_intr_mask);
277}
278
279void __init
280time_init(void)
281{
282 reg_intr_vect_rw_mask intr_mask;
283
284 /* probe for the RTC and read it if it exists
285 * Before the RTC can be probed the loops_per_usec variable needs
286 * to be initialized to make usleep work. A better value for
287 * loops_per_usec is calculated by the kernel later once the
288 * clock has started.
289 */
290 loops_per_usec = 50;
291
292 if(RTC_INIT() < 0) {
293 /* no RTC, start at 1980 */
294 xtime.tv_sec = 0;
295 xtime.tv_nsec = 0;
296 have_rtc = 0;
297 } else {
298 /* get the current time */
299 have_rtc = 1;
300 update_xtime_from_cmos();
301 }
302
303 /*
304 * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
305 * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
306 */
307 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
308
309 /* Start CPU local timer */
310 cris_timer_init();
311
312 /* enable the timer irq in global config */
313 intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
314 intr_mask.timer = 1;
315 REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
316
317 /* now actually register the timer irq handler that calls timer_interrupt() */
318
319 setup_irq(TIMER_INTR_VECT, &irq_timer);
320
321 /* enable watchdog if we should use one */
322
323#if defined(CONFIG_ETRAX_WATCHDOG)
324 printk("Enabling watchdog...\n");
325 start_watchdog();
326
327 /* If we use the hardware watchdog, we want to trap it as an NMI
328 and dump registers before it resets us. For this to happen, we
329 must set the "m" NMI enable flag (which once set, is unset only
330 when an NMI is taken).
331
332 The same goes for the external NMI, but that doesn't have any
333 driver or infrastructure support yet. */
334 {
335 unsigned long flags;
336 local_save_flags(flags);
337 flags |= (1<<30); /* NMI M flag is at bit 30 */
338 local_irq_restore(flags);
339 }
340#endif
341}
diff --git a/arch/cris/arch-v32/kernel/traps.c b/arch/cris/arch-v32/kernel/traps.c
new file mode 100644
index 000000000000..6e3787045560
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/traps.c
@@ -0,0 +1,160 @@
1/*
2 * Copyright (C) 2003, Axis Communications AB.
3 */
4
5#include <linux/config.h>
6#include <linux/ptrace.h>
7#include <asm/uaccess.h>
8
9#include <asm/arch/hwregs/supp_reg.h>
10
11extern void reset_watchdog(void);
12extern void stop_watchdog(void);
13
14extern int raw_printk(const char *fmt, ...);
15
16void
17show_registers(struct pt_regs *regs)
18{
19 /*
20 * It's possible to use either the USP register or current->thread.usp.
21 * USP might not correspond to the current proccess for all cases this
22 * function is called, and current->thread.usp isn't up to date for the
23 * current proccess. Experience shows that using USP is the way to go.
24 */
25 unsigned long usp;
26 unsigned long d_mmu_cause;
27 unsigned long i_mmu_cause;
28
29 usp = rdusp();
30
31 raw_printk("CPU: %d\n", smp_processor_id());
32
33 raw_printk("ERP: %08lx SRP: %08lx CCS: %08lx USP: %08lx MOF: %08lx\n",
34 regs->erp, regs->srp, regs->ccs, usp, regs->mof);
35
36 raw_printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n",
37 regs->r0, regs->r1, regs->r2, regs->r3);
38
39 raw_printk(" r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n",
40 regs->r4, regs->r5, regs->r6, regs->r7);
41
42 raw_printk(" r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n",
43 regs->r8, regs->r9, regs->r10, regs->r11);
44
45 raw_printk("r12: %08lx r13: %08lx oR10: %08lx acr: %08lx\n",
46 regs->r12, regs->r13, regs->orig_r10, regs->acr);
47
48 raw_printk("sp: %08lx\n", regs);
49
50 SUPP_BANK_SEL(BANK_IM);
51 SUPP_REG_RD(RW_MM_CAUSE, i_mmu_cause);
52
53 SUPP_BANK_SEL(BANK_DM);
54 SUPP_REG_RD(RW_MM_CAUSE, d_mmu_cause);
55
56 raw_printk(" Data MMU Cause: %08lx\n", d_mmu_cause);
57 raw_printk("Instruction MMU Cause: %08lx\n", i_mmu_cause);
58
59 raw_printk("Process %s (pid: %d, stackpage: %08lx)\n",
60 current->comm, current->pid, (unsigned long) current);
61
62 /* Show additional info if in kernel-mode. */
63 if (!user_mode(regs)) {
64 int i;
65 unsigned char c;
66
67 show_stack(NULL, (unsigned long *) usp);
68
69 /*
70 * If the previous stack-dump wasn't a kernel one, dump the
71 * kernel stack now.
72 */
73 if (usp != 0)
74 show_stack(NULL, NULL);
75
76 raw_printk("\nCode: ");
77
78 if (regs->erp < PAGE_OFFSET)
79 goto bad_value;
80
81 /*
82 * Quite often the value at regs->erp doesn't point to the
83 * interesting instruction, which often is the previous
84 * instruction. So dump at an offset large enough that the
85 * instruction decoding should be in sync at the interesting
86 * point, but small enough to fit on a row. The regs->erp
87 * location is pointed out in a ksymoops-friendly way by
88 * wrapping the byte for that address in parenthesis.
89 */
90 for (i = -12; i < 12; i++) {
91 if (__get_user(c, &((unsigned char *) regs->erp)[i])) {
92bad_value:
93 raw_printk(" Bad IP value.");
94 break;
95 }
96
97 if (i == 0)
98 raw_printk("(%02x) ", c);
99 else
100 raw_printk("%02x ", c);
101 }
102
103 raw_printk("\n");
104 }
105}
106
107/*
108 * This gets called from entry.S when the watchdog has bitten. Show something
109 * similiar to an Oops dump, and if the kernel if configured to be a nice doggy;
110 * halt instead of reboot.
111 */
112void
113watchdog_bite_hook(struct pt_regs *regs)
114{
115#ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
116 local_irq_disable();
117 stop_watchdog();
118 show_registers(regs);
119
120 while (1)
121 ; /* Do nothing. */
122#else
123 show_registers(regs);
124#endif
125}
126
127/* This is normally the Oops function. */
128void
129die_if_kernel(const char *str, struct pt_regs *regs, long err)
130{
131 if (user_mode(regs))
132 return;
133
134#ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
135 /*
136 * This printout might take too long and could trigger
137 * the watchdog normally. If NICE_DOGGY is set, simply
138 * stop the watchdog during the printout.
139 */
140 stop_watchdog();
141#endif
142
143 raw_printk("%s: %04lx\n", str, err & 0xffff);
144
145 show_registers(regs);
146
147#ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
148 reset_watchdog();
149#endif
150
151 do_exit(SIGSEGV);
152}
153
154void arch_enable_nmi(void)
155{
156 unsigned long flags;
157 local_save_flags(flags);
158 flags |= (1<<30); /* NMI M flag is at bit 30 */
159 local_irq_restore(flags);
160}
diff --git a/arch/cris/arch-v32/kernel/vcs_hook.c b/arch/cris/arch-v32/kernel/vcs_hook.c
new file mode 100644
index 000000000000..64d71c54c22c
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/vcs_hook.c
@@ -0,0 +1,96 @@
1// $Id: vcs_hook.c,v 1.2 2003/08/12 12:01:06 starvik Exp $
2//
3// Call simulator hook. This is the part running in the
4// simulated program.
5//
6
7#include "vcs_hook.h"
8#include <stdarg.h>
9#include <asm/arch-v32/hwregs/reg_map.h>
10#include <asm/arch-v32/hwregs/intr_vect_defs.h>
11
12#define HOOK_TRIG_ADDR 0xb7000000 /* hook cvlog model reg address */
13#define HOOK_MEM_BASE_ADDR 0xa0000000 /* csp4 (shared mem) base addr */
14
15#define HOOK_DATA(offset) ((unsigned*) HOOK_MEM_BASE_ADDR)[offset]
16#define VHOOK_DATA(offset) ((volatile unsigned*) HOOK_MEM_BASE_ADDR)[offset]
17#define HOOK_TRIG(funcid) do { *((unsigned *) HOOK_TRIG_ADDR) = funcid; } while(0)
18#define HOOK_DATA_BYTE(offset) ((unsigned char*) HOOK_MEM_BASE_ADDR)[offset]
19
20
21// ------------------------------------------------------------------ hook_call
22int hook_call( unsigned id, unsigned pcnt, ...) {
23 va_list ap;
24 unsigned i;
25 unsigned ret;
26#ifdef USING_SOS
27 PREEMPT_OFF_SAVE();
28#endif
29
30 // pass parameters
31 HOOK_DATA(0) = id;
32
33 /* Have to make hook_print_str a special case since we call with a
34 parameter of byte type. Should perhaps be a separate
35 hook_call. */
36
37 if (id == hook_print_str) {
38 int i;
39 char *str;
40
41 HOOK_DATA(1) = pcnt;
42
43 va_start(ap, pcnt);
44 str = (char*)va_arg(ap,unsigned);
45
46 for (i=0; i!=pcnt; i++) {
47 HOOK_DATA_BYTE(8+i) = str[i];
48 }
49 HOOK_DATA_BYTE(8+i) = 0; /* null byte */
50 }
51 else {
52 va_start(ap, pcnt);
53 for( i = 1; i <= pcnt; i++ ) HOOK_DATA(i) = va_arg(ap,unsigned);
54 va_end(ap);
55 }
56
57 // read from mem to make sure data has propagated to memory before trigging
58 *((volatile unsigned*) HOOK_MEM_BASE_ADDR);
59
60 // trigger hook
61 HOOK_TRIG(id);
62
63 // wait for call to finish
64 while( VHOOK_DATA(0) > 0 ) {}
65
66 // extract return value
67
68 ret = VHOOK_DATA(1);
69
70#ifdef USING_SOS
71 PREEMPT_RESTORE();
72#endif
73 return ret;
74}
75
76unsigned
77hook_buf(unsigned i)
78{
79 return (HOOK_DATA(i));
80}
81
82void print_str( const char *str ) {
83 int i;
84 for (i=1; str[i]; i++); /* find null at end of string */
85 hook_call(hook_print_str, i, str);
86}
87
88// --------------------------------------------------------------- CPU_KICK_DOG
89void CPU_KICK_DOG(void) {
90 (void) hook_call( hook_kick_dog, 0 );
91}
92
93// ------------------------------------------------------- CPU_WATCHDOG_TIMEOUT
94void CPU_WATCHDOG_TIMEOUT( unsigned t ) {
95 (void) hook_call( hook_dog_timeout, 1, t );
96}
diff --git a/arch/cris/arch-v32/kernel/vcs_hook.h b/arch/cris/arch-v32/kernel/vcs_hook.h
new file mode 100644
index 000000000000..7d73709e3cc6
--- /dev/null
+++ b/arch/cris/arch-v32/kernel/vcs_hook.h
@@ -0,0 +1,42 @@
1// $Id: vcs_hook.h,v 1.1 2003/08/12 12:01:06 starvik Exp $
2//
3// Call simulator hook functions
4
5#ifndef HOOK_H
6#define HOOK_H
7
8int hook_call( unsigned id, unsigned pcnt, ...);
9
10enum hook_ids {
11 hook_debug_on = 1,
12 hook_debug_off,
13 hook_stop_sim_ok,
14 hook_stop_sim_fail,
15 hook_alloc_shared,
16 hook_ptr_shared,
17 hook_free_shared,
18 hook_file2shared,
19 hook_cmp_shared,
20 hook_print_params,
21 hook_sim_time,
22 hook_stop_sim,
23 hook_kick_dog,
24 hook_dog_timeout,
25 hook_rand,
26 hook_srand,
27 hook_rand_range,
28 hook_print_str,
29 hook_print_hex,
30 hook_cmp_offset_shared,
31 hook_fill_random_shared,
32 hook_alloc_random_data,
33 hook_calloc_random_data,
34 hook_print_int,
35 hook_print_uint,
36 hook_fputc,
37 hook_init_fd,
38 hook_sbrk
39
40};
41
42#endif
diff --git a/arch/cris/arch-v32/lib/Makefile b/arch/cris/arch-v32/lib/Makefile
new file mode 100644
index 000000000000..05b3ec6978d6
--- /dev/null
+++ b/arch/cris/arch-v32/lib/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for Etrax-specific library files..
3#
4
5lib-y = checksum.o checksumcopy.o string.o usercopy.o memset.o csumcpfruser.o spinlock.o
6
diff --git a/arch/cris/arch-v32/lib/checksum.S b/arch/cris/arch-v32/lib/checksum.S
new file mode 100644
index 000000000000..32e66181b826
--- /dev/null
+++ b/arch/cris/arch-v32/lib/checksum.S
@@ -0,0 +1,111 @@
1/*
2 * A fast checksum routine using movem
3 * Copyright (c) 1998-2001, 2003 Axis Communications AB
4 *
5 * csum_partial(const unsigned char * buff, int len, unsigned int sum)
6 */
7
8 .globl csum_partial
9csum_partial:
10
11 ;; r10 - src
12 ;; r11 - length
13 ;; r12 - checksum
14
15 ;; check for breakeven length between movem and normal word looping versions
16 ;; we also do _NOT_ want to compute a checksum over more than the
17 ;; actual length when length < 40
18
19 cmpu.w 80,$r11
20 blo _word_loop
21 nop
22
23 ;; need to save the registers we use below in the movem loop
24 ;; this overhead is why we have a check above for breakeven length
25 ;; only r0 - r8 have to be saved, the other ones are clobber-able
26 ;; according to the ABI
27
28 subq 9*4,$sp
29 subq 10*4,$r11 ; update length for the first loop
30 movem $r8,[$sp]
31
32 ;; do a movem checksum
33
34_mloop: movem [$r10+],$r9 ; read 10 longwords
35
36 ;; perform dword checksumming on the 10 longwords
37
38 add.d $r0,$r12
39 addc $r1,$r12
40 addc $r2,$r12
41 addc $r3,$r12
42 addc $r4,$r12
43 addc $r5,$r12
44 addc $r6,$r12
45 addc $r7,$r12
46 addc $r8,$r12
47 addc $r9,$r12
48
49 ;; fold the carry into the checksum, to avoid having to loop the carry
50 ;; back into the top
51
52 addc 0,$r12
53 addc 0,$r12 ; do it again, since we might have generated a carry
54
55 subq 10*4,$r11
56 bge _mloop
57 nop
58
59 addq 10*4,$r11 ; compensate for last loop underflowing length
60
61 movem [$sp+],$r8 ; restore regs
62
63_word_loop:
64 ;; only fold if there is anything to fold.
65
66 cmpq 0,$r12
67 beq _no_fold
68
69 ;; fold 32-bit checksum into a 16-bit checksum, to avoid carries below.
70 ;; r9 and r13 can be used as temporaries.
71
72 moveq -1,$r9 ; put 0xffff in r9, faster than move.d 0xffff,r9
73 lsrq 16,$r9
74
75 move.d $r12,$r13
76 lsrq 16,$r13 ; r13 = checksum >> 16
77 and.d $r9,$r12 ; checksum = checksum & 0xffff
78 add.d $r13,$r12 ; checksum += r13
79 move.d $r12,$r13 ; do the same again, maybe we got a carry last add
80 lsrq 16,$r13
81 and.d $r9,$r12
82 add.d $r13,$r12
83
84_no_fold:
85 cmpq 2,$r11
86 blt _no_words
87 nop
88
89 ;; checksum the rest of the words
90
91 subq 2,$r11
92
93_wloop: subq 2,$r11
94 bge _wloop
95 addu.w [$r10+],$r12
96
97 addq 2,$r11
98
99_no_words:
100 ;; see if we have one odd byte more
101 cmpq 1,$r11
102 beq _do_byte
103 nop
104 ret
105 move.d $r12,$r10
106
107_do_byte:
108 ;; copy and checksum the last byte
109 addu.b [$r10],$r12
110 ret
111 move.d $r12,$r10
diff --git a/arch/cris/arch-v32/lib/checksumcopy.S b/arch/cris/arch-v32/lib/checksumcopy.S
new file mode 100644
index 000000000000..9303ccbadc6d
--- /dev/null
+++ b/arch/cris/arch-v32/lib/checksumcopy.S
@@ -0,0 +1,120 @@
1/*
2 * A fast checksum+copy routine using movem
3 * Copyright (c) 1998, 2001, 2003 Axis Communications AB
4 *
5 * Authors: Bjorn Wesen
6 *
7 * csum_partial_copy_nocheck(const char *src, char *dst,
8 * int len, unsigned int sum)
9 */
10
11 .globl csum_partial_copy_nocheck
12csum_partial_copy_nocheck:
13
14 ;; r10 - src
15 ;; r11 - dst
16 ;; r12 - length
17 ;; r13 - checksum
18
19 ;; check for breakeven length between movem and normal word looping versions
20 ;; we also do _NOT_ want to compute a checksum over more than the
21 ;; actual length when length < 40
22
23 cmpu.w 80,$r12
24 blo _word_loop
25 nop
26
27 ;; need to save the registers we use below in the movem loop
28 ;; this overhead is why we have a check above for breakeven length
29 ;; only r0 - r8 have to be saved, the other ones are clobber-able
30 ;; according to the ABI
31
32 subq 9*4,$sp
33 subq 10*4,$r12 ; update length for the first loop
34 movem $r8,[$sp]
35
36 ;; do a movem copy and checksum
37
381: ;; A failing userspace access (the read) will have this as PC.
39_mloop: movem [$r10+],$r9 ; read 10 longwords
40 movem $r9,[$r11+] ; write 10 longwords
41
42 ;; perform dword checksumming on the 10 longwords
43
44 add.d $r0,$r13
45 addc $r1,$r13
46 addc $r2,$r13
47 addc $r3,$r13
48 addc $r4,$r13
49 addc $r5,$r13
50 addc $r6,$r13
51 addc $r7,$r13
52 addc $r8,$r13
53 addc $r9,$r13
54
55 ;; fold the carry into the checksum, to avoid having to loop the carry
56 ;; back into the top
57
58 addc 0,$r13
59 addc 0,$r13 ; do it again, since we might have generated a carry
60
61 subq 10*4,$r12
62 bge _mloop
63 nop
64
65 addq 10*4,$r12 ; compensate for last loop underflowing length
66
67 movem [$sp+],$r8 ; restore regs
68
69_word_loop:
70 ;; only fold if there is anything to fold.
71
72 cmpq 0,$r13
73 beq _no_fold
74
75 ;; fold 32-bit checksum into a 16-bit checksum, to avoid carries below
76 ;; r9 can be used as temporary.
77
78 move.d $r13,$r9
79 lsrq 16,$r9 ; r0 = checksum >> 16
80 and.d 0xffff,$r13 ; checksum = checksum & 0xffff
81 add.d $r9,$r13 ; checksum += r0
82 move.d $r13,$r9 ; do the same again, maybe we got a carry last add
83 lsrq 16,$r9
84 and.d 0xffff,$r13
85 add.d $r9,$r13
86
87_no_fold:
88 cmpq 2,$r12
89 blt _no_words
90 nop
91
92 ;; copy and checksum the rest of the words
93
94 subq 2,$r12
95
962: ;; A failing userspace access for the read below will have this as PC.
97_wloop: move.w [$r10+],$r9
98 addu.w $r9,$r13
99 subq 2,$r12
100 bge _wloop
101 move.w $r9,[$r11+]
102
103 addq 2,$r12
104
105_no_words:
106 ;; see if we have one odd byte more
107 cmpq 1,$r12
108 beq _do_byte
109 nop
110 ret
111 move.d $r13,$r10
112
113_do_byte:
114 ;; copy and checksum the last byte
1153: ;; A failing userspace access for the read below will have this as PC.
116 move.b [$r10],$r9
117 addu.b $r9,$r13
118 move.b $r9,[$r11]
119 ret
120 move.d $r13,$r10
diff --git a/arch/cris/arch-v32/lib/csumcpfruser.S b/arch/cris/arch-v32/lib/csumcpfruser.S
new file mode 100644
index 000000000000..600ec16b9f28
--- /dev/null
+++ b/arch/cris/arch-v32/lib/csumcpfruser.S
@@ -0,0 +1,69 @@
1/*
2 * Add-on to transform csum_partial_copy_nocheck in checksumcopy.S into
3 * csum_partial_copy_from_user by adding exception records.
4 *
5 * Copyright (C) 2001, 2003 Axis Communications AB.
6 *
7 * Author: Hans-Peter Nilsson.
8 */
9
10#include <asm/errno.h>
11
12/* Same function body, but a different name. If we just added exception
13 records to _csum_partial_copy_nocheck and made it generic, we wouldn't
14 know a user fault from a kernel fault and we would have overhead in
15 each kernel caller for the error-pointer argument.
16
17 unsigned int csum_partial_copy_from_user
18 (const char *src, char *dst, int len, unsigned int sum, int *errptr);
19
20 Note that the errptr argument is only set if we encounter an error.
21 It is conveniently located on the stack, so the normal function body
22 does not have to handle it. */
23
24#define csum_partial_copy_nocheck csum_partial_copy_from_user
25
26/* There are local labels numbered 1, 2 and 3 present to mark the
27 different from-user accesses. */
28#include "checksumcopy.S"
29
30 .section .fixup,"ax"
31
32;; Here from the movem loop; restore stack.
334:
34 movem [$sp+],$r8
35;; r12 is already decremented. Add back chunk_size-2.
36 addq 40-2,$r12
37
38;; Here from the word loop; r12 is off by 2; add it back.
395:
40 addq 2,$r12
41
42;; Here from a failing single byte.
436:
44
45;; Signal in *errptr that we had a failing access.
46 move.d [$sp],$acr
47 moveq -EFAULT,$r9
48 subq 4,$sp
49 move.d $r9,[$acr]
50
51;; Clear the rest of the destination area using memset. Preserve the
52;; checksum for the readable bytes.
53 move.d $r13,[$sp]
54 subq 4,$sp
55 move.d $r11,$r10
56 move $srp,[$sp]
57 jsr memset
58 clear.d $r11
59
60 move [$sp+],$srp
61 ret
62 move.d [$sp+],$r10
63
64 .previous
65 .section __ex_table,"a"
66 .dword 1b,4b
67 .dword 2b,5b
68 .dword 3b,6b
69 .previous
diff --git a/arch/cris/arch-v32/lib/dram_init.S b/arch/cris/arch-v32/lib/dram_init.S
new file mode 100644
index 000000000000..47b6cf5f4afd
--- /dev/null
+++ b/arch/cris/arch-v32/lib/dram_init.S
@@ -0,0 +1,120 @@
1/* $Id: dram_init.S,v 1.4 2005/04/24 18:48:32 starvik Exp $
2 *
3 * DRAM/SDRAM initialization - alter with care
4 * This file is intended to be included from other assembler files
5 *
6 * Note: This file may not modify r8 or r9 because they are used to
7 * carry information from the decompresser to the kernel
8 *
9 * Copyright (C) 2000-2003 Axis Communications AB
10 *
11 * Authors: Mikael Starvik (starvik@axis.com)
12 */
13
14/* Just to be certain the config file is included, we include it here
15 * explicitely instead of depending on it being included in the file that
16 * uses this code.
17 */
18
19#include <linux/config.h>
20#include <asm/arch/hwregs/asm/reg_map_asm.h>
21#include <asm/arch/hwregs/asm/bif_core_defs_asm.h>
22
23 ;; WARNING! The registers r8 and r9 are used as parameters carrying
24 ;; information from the decompressor (if the kernel was compressed).
25 ;; They should not be used in the code below.
26
27 ; Refer to BIF MDS for a description of SDRAM initialization
28
29 ; Bank configuration
30 move.d REG_ADDR(bif_core, regi_bif_core, rw_sdram_cfg_grp0), $r0
31 move.d CONFIG_ETRAX_SDRAM_GRP0_CONFIG, $r1
32 move.d $r1, [$r0]
33 move.d REG_ADDR(bif_core, regi_bif_core, rw_sdram_cfg_grp1), $r0
34 move.d CONFIG_ETRAX_SDRAM_GRP1_CONFIG, $r1
35 move.d $r1, [$r0]
36
37 ; Calculate value of mrs_data
38 ; CAS latency = 2 && bus_width = 32 => 0x40
39 ; CAS latency = 3 && bus_width = 32 => 0x60
40 ; CAS latency = 2 && bus_width = 16 => 0x20
41 ; CAS latency = 3 && bus_width = 16 => 0x30
42
43 ; Check if value is already supplied in kernel config
44 move.d CONFIG_ETRAX_SDRAM_COMMAND, $r2
45 bne _set_timing
46 nop
47
48 move.d 0x40, $r4 ; Assume 32 bits and CAS latency = 2
49 move.d CONFIG_ETRAX_SDRAM_TIMING, $r1
50 and.d 0x07, $r1 ; Get CAS latency
51 cmpq 2, $r1 ; CL = 2 ?
52 beq _bw_check
53 nop
54 move.d 0x60, $r4
55
56_bw_check:
57 ; Assume that group 0 width is equal to group 1. This assumption
58 ; is wrong for a group 1 only hardware (such as the grand old
59 ; StorPoint+).
60 move.d CONFIG_ETRAX_SDRAM_GRP0_CONFIG, $r1
61 and.d 0x200, $r1 ; DRAM width is bit 9
62 beq _set_timing
63 lslq 2, $r4 ; mrs_data starts at bit 2
64 lsrq 1, $r4 ; 16 bits. Shift down value.
65
66 ; Set timing parameters (refresh off to avoid Guinness TR 83)
67_set_timing:
68 move.d CONFIG_ETRAX_SDRAM_TIMING, $r1
69 and.d ~(3 << reg_bif_core_rw_sdram_timing___ref___lsb), $r1
70 move.d REG_ADDR(bif_core, regi_bif_core, rw_sdram_timing), $r0
71 move.d $r1, [$r0]
72
73 ; Issue NOP command
74 move.d REG_ADDR(bif_core, regi_bif_core, rw_sdram_cmd), $r5
75 moveq regk_bif_core_nop, $r1
76 move.d $r1, [$r5]
77
78 ; Wait 200us
79 move.d 10000, $r2
801: bne 1b
81 subq 1, $r2
82
83 ; Issue initialization command sequence
84 move.d _sdram_commands_start, $r2
85 and.d 0x000fffff, $r2 ; Make sure commands are read from flash
86 move.d _sdram_commands_end, $r3
87 and.d 0x000fffff, $r3
881: clear.d $r6
89 move.b [$r2+], $r6 ; Load command
90 or.d $r4, $r6 ; Add calculated mrs
91 move.d $r6, [$r5] ; Write rw_sdram_cmd
92 ; Wait 80 ns between each command
93 move.d 4000, $r7
942: bne 2b
95 subq 1, $r7
96 cmp.d $r2, $r3 ; Last command?
97 bne 1b
98 nop
99
100 ; Start refresh
101 move.d CONFIG_ETRAX_SDRAM_TIMING, $r1
102 move.d REG_ADDR(bif_core, regi_bif_core, rw_sdram_timing), $r0
103 move.d $r1, [$r0]
104
105 ; Initialization finished
106 ba _sdram_commands_end
107 nop
108
109_sdram_commands_start:
110 .byte regk_bif_core_pre ; Precharge
111 .byte regk_bif_core_ref ; refresh
112 .byte regk_bif_core_ref ; refresh
113 .byte regk_bif_core_ref ; refresh
114 .byte regk_bif_core_ref ; refresh
115 .byte regk_bif_core_ref ; refresh
116 .byte regk_bif_core_ref ; refresh
117 .byte regk_bif_core_ref ; refresh
118 .byte regk_bif_core_ref ; refresh
119 .byte regk_bif_core_mrs ; mrs
120_sdram_commands_end:
diff --git a/arch/cris/arch-v32/lib/hw_settings.S b/arch/cris/arch-v32/lib/hw_settings.S
new file mode 100644
index 000000000000..5182e8c2cff2
--- /dev/null
+++ b/arch/cris/arch-v32/lib/hw_settings.S
@@ -0,0 +1,73 @@
1/*
2 * $Id: hw_settings.S,v 1.3 2005/04/24 18:36:57 starvik Exp $
3 *
4 * This table is used by some tools to extract hardware parameters.
5 * The table should be included in the kernel and the decompressor.
6 * Don't forget to update the tools if you change this table.
7 *
8 * Copyright (C) 2001 Axis Communications AB
9 *
10 * Authors: Mikael Starvik (starvik@axis.com)
11 */
12
13#include <linux/config.h>
14#include <asm/arch/hwregs/asm/reg_map_asm.h>
15#include <asm/arch/hwregs/asm/bif_core_defs_asm.h>
16#include <asm/arch/hwregs/asm/gio_defs_asm.h>
17
18 .ascii "HW_PARAM_MAGIC" ; Magic number
19 .dword 0xc0004000 ; Kernel start address
20
21 ; Debug port
22#ifdef CONFIG_ETRAX_DEBUG_PORT0
23 .dword 0
24#elif defined(CONFIG_ETRAX_DEBUG_PORT1)
25 .dword 1
26#elif defined(CONFIG_ETRAX_DEBUG_PORT2)
27 .dword 2
28#elif defined(CONFIG_ETRAX_DEBUG_PORT3)
29 .dword 3
30#else
31 .dword 4 ; No debug
32#endif
33
34 ; Register values
35 .dword REG_ADDR(bif_core, regi_bif_core, rw_grp1_cfg)
36 .dword CONFIG_ETRAX_MEM_GRP1_CONFIG
37 .dword REG_ADDR(bif_core, regi_bif_core, rw_grp2_cfg)
38 .dword CONFIG_ETRAX_MEM_GRP2_CONFIG
39 .dword REG_ADDR(bif_core, regi_bif_core, rw_grp3_cfg)
40 .dword CONFIG_ETRAX_MEM_GRP3_CONFIG
41 .dword REG_ADDR(bif_core, regi_bif_core, rw_grp4_cfg)
42 .dword CONFIG_ETRAX_MEM_GRP4_CONFIG
43 .dword REG_ADDR(bif_core, regi_bif_core, rw_sdram_cfg_grp0)
44 .dword CONFIG_ETRAX_SDRAM_GRP0_CONFIG
45 .dword REG_ADDR(bif_core, regi_bif_core, rw_sdram_cfg_grp1)
46 .dword CONFIG_ETRAX_SDRAM_GRP1_CONFIG
47 .dword REG_ADDR(bif_core, regi_bif_core, rw_sdram_timing)
48 .dword CONFIG_ETRAX_SDRAM_TIMING
49 .dword REG_ADDR(bif_core, regi_bif_core, rw_sdram_cmd)
50 .dword CONFIG_ETRAX_SDRAM_COMMAND
51
52 .dword REG_ADDR(gio, regi_gio, rw_pa_dout)
53 .dword CONFIG_ETRAX_DEF_GIO_PA_OUT
54 .dword REG_ADDR(gio, regi_gio, rw_pa_oe)
55 .dword CONFIG_ETRAX_DEF_GIO_PA_OE
56 .dword REG_ADDR(gio, regi_gio, rw_pb_dout)
57 .dword CONFIG_ETRAX_DEF_GIO_PB_OUT
58 .dword REG_ADDR(gio, regi_gio, rw_pb_oe)
59 .dword CONFIG_ETRAX_DEF_GIO_PB_OE
60 .dword REG_ADDR(gio, regi_gio, rw_pc_dout)
61 .dword CONFIG_ETRAX_DEF_GIO_PC_OUT
62 .dword REG_ADDR(gio, regi_gio, rw_pc_oe)
63 .dword CONFIG_ETRAX_DEF_GIO_PC_OE
64 .dword REG_ADDR(gio, regi_gio, rw_pd_dout)
65 .dword CONFIG_ETRAX_DEF_GIO_PD_OUT
66 .dword REG_ADDR(gio, regi_gio, rw_pd_oe)
67 .dword CONFIG_ETRAX_DEF_GIO_PD_OE
68 .dword REG_ADDR(gio, regi_gio, rw_pe_dout)
69 .dword CONFIG_ETRAX_DEF_GIO_PE_OUT
70 .dword REG_ADDR(gio, regi_gio, rw_pe_oe)
71 .dword CONFIG_ETRAX_DEF_GIO_PE_OE
72
73 .dword 0 ; No more register values
diff --git a/arch/cris/arch-v32/lib/memset.c b/arch/cris/arch-v32/lib/memset.c
new file mode 100644
index 000000000000..ffca1214674e
--- /dev/null
+++ b/arch/cris/arch-v32/lib/memset.c
@@ -0,0 +1,253 @@
1/*#************************************************************************#*/
2/*#-------------------------------------------------------------------------*/
3/*# */
4/*# FUNCTION NAME: memset() */
5/*# */
6/*# PARAMETERS: void* dst; Destination address. */
7/*# int c; Value of byte to write. */
8/*# int len; Number of bytes to write. */
9/*# */
10/*# RETURNS: dst. */
11/*# */
12/*# DESCRIPTION: Sets the memory dst of length len bytes to c, as standard. */
13/*# Framework taken from memcpy. This routine is */
14/*# very sensitive to compiler changes in register allocation. */
15/*# Should really be rewritten to avoid this problem. */
16/*# */
17/*#-------------------------------------------------------------------------*/
18/*# */
19/*# HISTORY */
20/*# */
21/*# DATE NAME CHANGES */
22/*# ---- ---- ------- */
23/*# 990713 HP Tired of watching this function (or */
24/*# really, the nonoptimized generic */
25/*# implementation) take up 90% of simulator */
26/*# output. Measurements needed. */
27/*# */
28/*#-------------------------------------------------------------------------*/
29
30#include <linux/types.h>
31
32/* No, there's no macro saying 12*4, since it is "hard" to get it into
33 the asm in a good way. Thus better to expose the problem everywhere.
34 */
35
36/* Assuming 1 cycle per dword written or read (ok, not really true), and
37 one per instruction, then 43+3*(n/48-1) <= 24+24*(n/48-1)
38 so n >= 45.7; n >= 0.9; we win on the first full 48-byte block to set. */
39
40#define ZERO_BLOCK_SIZE (1*12*4)
41
42void *memset(void *pdst,
43 int c,
44 size_t plen)
45{
46 /* Ok. Now we want the parameters put in special registers.
47 Make sure the compiler is able to make something useful of this. */
48
49 register char *return_dst __asm__ ("r10") = pdst;
50 register int n __asm__ ("r12") = plen;
51 register int lc __asm__ ("r11") = c;
52
53 /* Most apps use memset sanely. Only those memsetting about 3..4
54 bytes or less get penalized compared to the generic implementation
55 - and that's not really sane use. */
56
57 /* Ugh. This is fragile at best. Check with newer GCC releases, if
58 they compile cascaded "x |= x << 8" sanely! */
59 __asm__("movu.b %0,$r13 \n\
60 lslq 8,$r13 \n\
61 move.b %0,$r13 \n\
62 move.d $r13,%0 \n\
63 lslq 16,$r13 \n\
64 or.d $r13,%0"
65 : "=r" (lc) : "0" (lc) : "r13");
66
67 {
68 register char *dst __asm__ ("r13") = pdst;
69
70 /* This is NONPORTABLE, but since this whole routine is */
71 /* grossly nonportable that doesn't matter. */
72
73 if (((unsigned long) pdst & 3) != 0
74 /* Oops! n=0 must be a legal call, regardless of alignment. */
75 && n >= 3)
76 {
77 if ((unsigned long)dst & 1)
78 {
79 *dst = (char) lc;
80 n--;
81 dst++;
82 }
83
84 if ((unsigned long)dst & 2)
85 {
86 *(short *)dst = lc;
87 n -= 2;
88 dst += 2;
89 }
90 }
91
92 /* Now the fun part. For the threshold value of this, check the equation
93 above. */
94 /* Decide which copying method to use. */
95 if (n >= ZERO_BLOCK_SIZE)
96 {
97 /* For large copies we use 'movem' */
98
99 /* It is not optimal to tell the compiler about clobbering any
100 registers; that will move the saving/restoring of those registers
101 to the function prologue/epilogue, and make non-movem sizes
102 suboptimal.
103
104 This method is not foolproof; it assumes that the "asm reg"
105 declarations at the beginning of the function really are used
106 here (beware: they may be moved to temporary registers).
107 This way, we do not have to save/move the registers around into
108 temporaries; we can safely use them straight away.
109
110 If you want to check that the allocation was right; then
111 check the equalities in the first comment. It should say
112 "r13=r13, r12=r12, r11=r11" */
113 __asm__ volatile (" \n\
114 ;; Check that the register asm declaration got right. \n\
115 ;; The GCC manual says it will work, but there *has* been bugs. \n\
116 .ifnc %0-%1-%4,$r13-$r12-$r11 \n\
117 .err \n\
118 .endif \n\
119 \n\
120 ;; Save the registers we'll clobber in the movem process \n\
121 ;; on the stack. Don't mention them to gcc, it will only be \n\
122 ;; upset. \n\
123 subq 11*4,$sp \n\
124 movem $r10,[$sp] \n\
125 \n\
126 move.d $r11,$r0 \n\
127 move.d $r11,$r1 \n\
128 move.d $r11,$r2 \n\
129 move.d $r11,$r3 \n\
130 move.d $r11,$r4 \n\
131 move.d $r11,$r5 \n\
132 move.d $r11,$r6 \n\
133 move.d $r11,$r7 \n\
134 move.d $r11,$r8 \n\
135 move.d $r11,$r9 \n\
136 move.d $r11,$r10 \n\
137 \n\
138 ;; Now we've got this: \n\
139 ;; r13 - dst \n\
140 ;; r12 - n \n\
141 \n\
142 ;; Update n for the first loop \n\
143 subq 12*4,$r12 \n\
1440: \n\
145 subq 12*4,$r12 \n\
146 bge 0b \n\
147 movem $r11,[$r13+] \n\
148 \n\
149 addq 12*4,$r12 ;; compensate for last loop underflowing n \n\
150 \n\
151 ;; Restore registers from stack \n\
152 movem [$sp+],$r10"
153
154 /* Outputs */ : "=r" (dst), "=r" (n)
155 /* Inputs */ : "0" (dst), "1" (n), "r" (lc));
156 }
157
158 /* Either we directly starts copying, using dword copying
159 in a loop, or we copy as much as possible with 'movem'
160 and then the last block (<44 bytes) is copied here.
161 This will work since 'movem' will have updated src,dst,n. */
162
163 while ( n >= 16 )
164 {
165 *((long*)dst)++ = lc;
166 *((long*)dst)++ = lc;
167 *((long*)dst)++ = lc;
168 *((long*)dst)++ = lc;
169 n -= 16;
170 }
171
172 /* A switch() is definitely the fastest although it takes a LOT of code.
173 * Particularly if you inline code this.
174 */
175 switch (n)
176 {
177 case 0:
178 break;
179 case 1:
180 *(char*)dst = (char) lc;
181 break;
182 case 2:
183 *(short*)dst = (short) lc;
184 break;
185 case 3:
186 *((short*)dst)++ = (short) lc;
187 *(char*)dst = (char) lc;
188 break;
189 case 4:
190 *((long*)dst)++ = lc;
191 break;
192 case 5:
193 *((long*)dst)++ = lc;
194 *(char*)dst = (char) lc;
195 break;
196 case 6:
197 *((long*)dst)++ = lc;
198 *(short*)dst = (short) lc;
199 break;
200 case 7:
201 *((long*)dst)++ = lc;
202 *((short*)dst)++ = (short) lc;
203 *(char*)dst = (char) lc;
204 break;
205 case 8:
206 *((long*)dst)++ = lc;
207 *((long*)dst)++ = lc;
208 break;
209 case 9:
210 *((long*)dst)++ = lc;
211 *((long*)dst)++ = lc;
212 *(char*)dst = (char) lc;
213 break;
214 case 10:
215 *((long*)dst)++ = lc;
216 *((long*)dst)++ = lc;
217 *(short*)dst = (short) lc;
218 break;
219 case 11:
220 *((long*)dst)++ = lc;
221 *((long*)dst)++ = lc;
222 *((short*)dst)++ = (short) lc;
223 *(char*)dst = (char) lc;
224 break;
225 case 12:
226 *((long*)dst)++ = lc;
227 *((long*)dst)++ = lc;
228 *((long*)dst)++ = lc;
229 break;
230 case 13:
231 *((long*)dst)++ = lc;
232 *((long*)dst)++ = lc;
233 *((long*)dst)++ = lc;
234 *(char*)dst = (char) lc;
235 break;
236 case 14:
237 *((long*)dst)++ = lc;
238 *((long*)dst)++ = lc;
239 *((long*)dst)++ = lc;
240 *(short*)dst = (short) lc;
241 break;
242 case 15:
243 *((long*)dst)++ = lc;
244 *((long*)dst)++ = lc;
245 *((long*)dst)++ = lc;
246 *((short*)dst)++ = (short) lc;
247 *(char*)dst = (char) lc;
248 break;
249 }
250 }
251
252 return return_dst; /* destination pointer. */
253} /* memset() */
diff --git a/arch/cris/arch-v32/lib/nand_init.S b/arch/cris/arch-v32/lib/nand_init.S
new file mode 100644
index 000000000000..aba5c751c282
--- /dev/null
+++ b/arch/cris/arch-v32/lib/nand_init.S
@@ -0,0 +1,179 @@
1##=============================================================================
2##
3## nand_init.S
4##
5## The bootrom copies data from the NAND flash to the internal RAM but
6## due to a bug/feature we can only trust the 256 first bytes. So this
7## code copies more data from NAND flash to internal RAM. Obvioulsy this
8## code must fit in the first 256 bytes so alter with care.
9##
10## Some notes about the bug/feature for future reference:
11## The bootrom copies the first 127 KB from NAND flash to internal
12## memory. The problem is that it does a bytewise copy. NAND flashes
13## does autoincrement on the address so for a 16-bite device each
14## read/write increases the address by two. So the copy loop in the
15## bootrom will discard every second byte. This is solved by inserting
16## zeroes in every second byte in the first erase block.
17##
18## The bootrom also incorrectly assumes that it can read the flash
19## linear with only one read command but the flash will actually
20## switch between normal area and spare area if you do that so we
21## can't trust more than the first 256 bytes.
22##
23##=============================================================================
24
25#include <asm/arch/hwregs/asm/reg_map_asm.h>
26#include <asm/arch/hwregs/asm/gio_defs_asm.h>
27#include <asm/arch/hwregs/asm/pinmux_defs_asm.h>
28#include <asm/arch/hwregs/asm/bif_core_defs_asm.h>
29#include <asm/arch/hwregs/asm/config_defs_asm.h>
30#include <linux/config.h>
31
32;; There are 8-bit NAND flashes and 16-bit NAND flashes.
33;; We need to treat them slightly different.
34#if CONFIG_ETRAX_FLASH_BUSWIDTH==2
35#define PAGE_SIZE 256
36#else
37#error 2
38#define PAGE_SIZE 512
39#endif
40#define ERASE_BLOCK 16384
41
42;; GPIO pins connected to NAND flash
43#define CE 4
44#define CLE 5
45#define ALE 6
46#define BY 7
47
48;; Address space for NAND flash
49#define NAND_RD_ADDR 0x90000000
50#define NAND_WR_ADDR 0x94000000
51
52#define READ_CMD 0x00
53
54;; Readability macros
55#define CSP_MASK \
56 REG_MASK(bif_core, rw_grp3_cfg, gated_csp0) | \
57 REG_MASK(bif_core, rw_grp3_cfg, gated_csp1)
58#define CSP_VAL \
59 REG_STATE(bif_core, rw_grp3_cfg, gated_csp0, rd) | \
60 REG_STATE(bif_core, rw_grp3_cfg, gated_csp1, wr)
61
62;;----------------------------------------------------------------------------
63;; Macros to set/clear GPIO bits
64
65.macro SET x
66 or.b (1<<\x),$r9
67 move.d $r9, [$r2]
68.endm
69
70.macro CLR x
71 and.b ~(1<<\x),$r9
72 move.d $r9, [$r2]
73.endm
74
75;;----------------------------------------------------------------------------
76
77nand_boot:
78 ;; Check if nand boot was selected
79 move.d REG_ADDR(config, regi_config, r_bootsel), $r0
80 move.d [$r0], $r0
81 and.d REG_MASK(config, r_bootsel, boot_mode), $r0
82 cmp.d REG_STATE(config, r_bootsel, boot_mode, nand), $r0
83 bne normal_boot ; No NAND boot
84 nop
85
86copy_nand_to_ram:
87 ;; copy_nand_to_ram
88 ;; Arguments
89 ;; r10 - destination
90 ;; r11 - source offset
91 ;; r12 - size
92 ;; r13 - Address to jump to after completion
93 ;; Note : r10-r12 are clobbered on return
94 ;; Registers used:
95 ;; r0 - NAND_RD_ADDR
96 ;; r1 - NAND_WR_ADDR
97 ;; r2 - reg_gio_rw_pa_dout
98 ;; r3 - reg_gio_r_pa_din
99 ;; r4 - tmp
100 ;; r5 - byte counter within a page
101 ;; r6 - reg_pinmux_rw_pa
102 ;; r7 - reg_gio_rw_pa_oe
103 ;; r8 - reg_bif_core_rw_grp3_cfg
104 ;; r9 - reg_gio_rw_pa_dout shadow
105 move.d 0x90000000, $r0
106 move.d 0x94000000, $r1
107 move.d REG_ADDR(gio, regi_gio, rw_pa_dout), $r2
108 move.d REG_ADDR(gio, regi_gio, r_pa_din), $r3
109 move.d REG_ADDR(pinmux, regi_pinmux, rw_pa), $r6
110 move.d REG_ADDR(gio, regi_gio, rw_pa_oe), $r7
111 move.d REG_ADDR(bif_core, regi_bif_core, rw_grp3_cfg), $r8
112
113#if CONFIG_ETRAX_FLASH_BUSWIDTH==2
114 lsrq 1, $r11
115#endif
116 ;; Set up GPIO
117 move.d [$r2], $r9
118 move.d [$r7], $r4
119 or.b (1<<ALE) | (1 << CLE) | (1<<CE), $r4
120 move.d $r4, [$r7]
121
122 ;; Set up bif
123 move.d [$r8], $r4
124 and.d CSP_MASK, $r4
125 or.d CSP_VAL, $r4
126 move.d $r4, [$r8]
127
1281: ;; Copy one page
129 CLR CE
130 SET CLE
131 moveq READ_CMD, $r4
132 move.b $r4, [$r1]
133 moveq 20, $r4
1342: bne 2b
135 subq 1, $r4
136 CLR CLE
137 SET ALE
138 clear.w [$r1] ; Column address = 0
139 move.d $r11, $r4
140 lsrq 8, $r4
141 move.b $r4, [$r1] ; Row address
142 lsrq 8, $r4
143 move.b $r4, [$r1] ; Row adddress
144 moveq 20, $r4
1452: bne 2b
146 subq 1, $r4
147 CLR ALE
1482: move.d [$r3], $r4
149 and.d 1 << BY, $r4
150 beq 2b
151 movu.w PAGE_SIZE, $r5
1522: ; Copy one byte/word
153#if CONFIG_ETRAX_FLASH_BUSWIDTH==2
154 move.w [$r0], $r4
155#else
156 move.b [$r0], $r4
157#endif
158 subq 1, $r5
159 bne 2b
160#if CONFIG_ETRAX_FLASH_BUSWIDTH==2
161 move.w $r4, [$r10+]
162 subu.w PAGE_SIZE*2, $r12
163#else
164 move.b $r4, [$r10+]
165 subu.w PAGE_SIZE, $r12
166#endif
167 bpl 1b
168 addu.w PAGE_SIZE, $r11
169
170 ;; End of copy
171 jump $r13
172 nop
173
174 ;; This will warn if the code above is too large. If you consider
175 ;; to remove this you don't understand the bug/feature.
176 .org 256
177 .org ERASE_BLOCK
178
179normal_boot:
diff --git a/arch/cris/arch-v32/lib/spinlock.S b/arch/cris/arch-v32/lib/spinlock.S
new file mode 100644
index 000000000000..2437ae7f6ed2
--- /dev/null
+++ b/arch/cris/arch-v32/lib/spinlock.S
@@ -0,0 +1,33 @@
1;; Core of the spinlock implementation
2;;
3;; Copyright (C) 2004 Axis Communications AB.
4;;
5;; Author: Mikael Starvik
6
7
8 .global cris_spin_lock
9 .global cris_spin_trylock
10
11 .text
12
13cris_spin_lock:
14 clearf p
151: test.d [$r10]
16 beq 1b
17 clearf p
18 ax
19 clear.d [$r10]
20 bcs 1b
21 clearf p
22 ret
23 nop
24
25cris_spin_trylock:
26 clearf p
271: move.d [$r10], $r11
28 ax
29 clear.d [$r10]
30 bcs 1b
31 clearf p
32 ret
33 move.d $r11,$r10
diff --git a/arch/cris/arch-v32/lib/string.c b/arch/cris/arch-v32/lib/string.c
new file mode 100644
index 000000000000..98e282ac824a
--- /dev/null
+++ b/arch/cris/arch-v32/lib/string.c
@@ -0,0 +1,219 @@
1/*#************************************************************************#*/
2/*#-------------------------------------------------------------------------*/
3/*# */
4/*# FUNCTION NAME: memcpy() */
5/*# */
6/*# PARAMETERS: void* dst; Destination address. */
7/*# void* src; Source address. */
8/*# int len; Number of bytes to copy. */
9/*# */
10/*# RETURNS: dst. */
11/*# */
12/*# DESCRIPTION: Copies len bytes of memory from src to dst. No guarantees */
13/*# about copying of overlapping memory areas. This routine is */
14/*# very sensitive to compiler changes in register allocation. */
15/*# Should really be rewritten to avoid this problem. */
16/*# */
17/*#-------------------------------------------------------------------------*/
18/*# */
19/*# HISTORY */
20/*# */
21/*# DATE NAME CHANGES */
22/*# ---- ---- ------- */
23/*# 941007 Kenny R Creation */
24/*# 941011 Kenny R Lots of optimizations and inlining. */
25/*# 941129 Ulf A Adapted for use in libc. */
26/*# 950216 HP N==0 forgotten if non-aligned src/dst. */
27/*# Added some optimizations. */
28/*# 001025 HP Make src and dst char *. Align dst to */
29/*# dword, not just word-if-both-src-and-dst- */
30/*# are-misaligned. */
31/*# */
32/*#-------------------------------------------------------------------------*/
33
34#include <linux/types.h>
35
36void *memcpy(void *pdst,
37 const void *psrc,
38 size_t pn)
39{
40 /* Ok. Now we want the parameters put in special registers.
41 Make sure the compiler is able to make something useful of this.
42 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
43
44 If gcc was allright, it really would need no temporaries, and no
45 stack space to save stuff on. */
46
47 register void *return_dst __asm__ ("r10") = pdst;
48 register char *dst __asm__ ("r13") = pdst;
49 register const char *src __asm__ ("r11") = psrc;
50 register int n __asm__ ("r12") = pn;
51
52
53 /* When src is aligned but not dst, this makes a few extra needless
54 cycles. I believe it would take as many to check that the
55 re-alignment was unnecessary. */
56 if (((unsigned long) dst & 3) != 0
57 /* Don't align if we wouldn't copy more than a few bytes; so we
58 don't have to check further for overflows. */
59 && n >= 3)
60 {
61 if ((unsigned long) dst & 1)
62 {
63 n--;
64 *(char*)dst = *(char*)src;
65 src++;
66 dst++;
67 }
68
69 if ((unsigned long) dst & 2)
70 {
71 n -= 2;
72 *(short*)dst = *(short*)src;
73 src += 2;
74 dst += 2;
75 }
76 }
77
78 /* Decide which copying method to use. Movem is dirt cheap, so the
79 overheap is low enough to always use the minimum block size as the
80 threshold. */
81 if (n >= 44)
82 {
83 /* For large copies we use 'movem' */
84
85 /* It is not optimal to tell the compiler about clobbering any
86 registers; that will move the saving/restoring of those registers
87 to the function prologue/epilogue, and make non-movem sizes
88 suboptimal. */
89 __asm__ volatile (" \n\
90 ;; Check that the register asm declaration got right. \n\
91 ;; The GCC manual explicitly says TRT will happen. \n\
92 .ifnc %0-%1-%2,$r13-$r11-$r12 \n\
93 .err \n\
94 .endif \n\
95 \n\
96 ;; Save the registers we'll use in the movem process \n\
97 \n\
98 ;; on the stack. \n\
99 subq 11*4,$sp \n\
100 movem $r10,[$sp] \n\
101 \n\
102 ;; Now we've got this: \n\
103 ;; r11 - src \n\
104 ;; r13 - dst \n\
105 ;; r12 - n \n\
106 \n\
107 ;; Update n for the first loop \n\
108 subq 44,$r12 \n\
1090: \n\
110 movem [$r11+],$r10 \n\
111 subq 44,$r12 \n\
112 bge 0b \n\
113 movem $r10,[$r13+] \n\
114 \n\
115 addq 44,$r12 ;; compensate for last loop underflowing n \n\
116 \n\
117 ;; Restore registers from stack \n\
118 movem [$sp+],$r10"
119
120 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n)
121 /* Inputs */ : "0" (dst), "1" (src), "2" (n));
122
123 }
124
125 /* Either we directly starts copying, using dword copying
126 in a loop, or we copy as much as possible with 'movem'
127 and then the last block (<44 bytes) is copied here.
128 This will work since 'movem' will have updated src,dst,n. */
129
130 while ( n >= 16 )
131 {
132 *((long*)dst)++ = *((long*)src)++;
133 *((long*)dst)++ = *((long*)src)++;
134 *((long*)dst)++ = *((long*)src)++;
135 *((long*)dst)++ = *((long*)src)++;
136 n -= 16;
137 }
138
139 /* A switch() is definitely the fastest although it takes a LOT of code.
140 * Particularly if you inline code this.
141 */
142 switch (n)
143 {
144 case 0:
145 break;
146 case 1:
147 *(char*)dst = *(char*)src;
148 break;
149 case 2:
150 *(short*)dst = *(short*)src;
151 break;
152 case 3:
153 *((short*)dst)++ = *((short*)src)++;
154 *(char*)dst = *(char*)src;
155 break;
156 case 4:
157 *((long*)dst)++ = *((long*)src)++;
158 break;
159 case 5:
160 *((long*)dst)++ = *((long*)src)++;
161 *(char*)dst = *(char*)src;
162 break;
163 case 6:
164 *((long*)dst)++ = *((long*)src)++;
165 *(short*)dst = *(short*)src;
166 break;
167 case 7:
168 *((long*)dst)++ = *((long*)src)++;
169 *((short*)dst)++ = *((short*)src)++;
170 *(char*)dst = *(char*)src;
171 break;
172 case 8:
173 *((long*)dst)++ = *((long*)src)++;
174 *((long*)dst)++ = *((long*)src)++;
175 break;
176 case 9:
177 *((long*)dst)++ = *((long*)src)++;
178 *((long*)dst)++ = *((long*)src)++;
179 *(char*)dst = *(char*)src;
180 break;
181 case 10:
182 *((long*)dst)++ = *((long*)src)++;
183 *((long*)dst)++ = *((long*)src)++;
184 *(short*)dst = *(short*)src;
185 break;
186 case 11:
187 *((long*)dst)++ = *((long*)src)++;
188 *((long*)dst)++ = *((long*)src)++;
189 *((short*)dst)++ = *((short*)src)++;
190 *(char*)dst = *(char*)src;
191 break;
192 case 12:
193 *((long*)dst)++ = *((long*)src)++;
194 *((long*)dst)++ = *((long*)src)++;
195 *((long*)dst)++ = *((long*)src)++;
196 break;
197 case 13:
198 *((long*)dst)++ = *((long*)src)++;
199 *((long*)dst)++ = *((long*)src)++;
200 *((long*)dst)++ = *((long*)src)++;
201 *(char*)dst = *(char*)src;
202 break;
203 case 14:
204 *((long*)dst)++ = *((long*)src)++;
205 *((long*)dst)++ = *((long*)src)++;
206 *((long*)dst)++ = *((long*)src)++;
207 *(short*)dst = *(short*)src;
208 break;
209 case 15:
210 *((long*)dst)++ = *((long*)src)++;
211 *((long*)dst)++ = *((long*)src)++;
212 *((long*)dst)++ = *((long*)src)++;
213 *((short*)dst)++ = *((short*)src)++;
214 *(char*)dst = *(char*)src;
215 break;
216 }
217
218 return return_dst; /* destination pointer. */
219} /* memcpy() */
diff --git a/arch/cris/arch-v32/lib/usercopy.c b/arch/cris/arch-v32/lib/usercopy.c
new file mode 100644
index 000000000000..f0b08460c1be
--- /dev/null
+++ b/arch/cris/arch-v32/lib/usercopy.c
@@ -0,0 +1,470 @@
1/*
2 * User address space access functions.
3 * The non-inlined parts of asm-cris/uaccess.h are here.
4 *
5 * Copyright (C) 2000, 2003 Axis Communications AB.
6 *
7 * Written by Hans-Peter Nilsson.
8 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
9 */
10
11#include <asm/uaccess.h>
12
13/* Asm:s have been tweaked (within the domain of correctness) to give
14 satisfactory results for "gcc version 3.2.1 Axis release R53/1.53-v32".
15
16 Check regularly...
17
18 Note that for CRISv32, the PC saved at a bus-fault is the address
19 *at* the faulting instruction, with a special case for instructions
20 in delay slots: then it's the address of the branch. Note also that
21 in contrast to v10, a postincrement in the instruction is *not*
22 performed at a bus-fault; the register is seen having the original
23 value in fault handlers. */
24
25
26/* Copy to userspace. This is based on the memcpy used for
27 kernel-to-kernel copying; see "string.c". */
28
29unsigned long
30__copy_user (void __user *pdst, const void *psrc, unsigned long pn)
31{
32 /* We want the parameters put in special registers.
33 Make sure the compiler is able to make something useful of this.
34 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
35
36 FIXME: Comment for old gcc version. Check.
37 If gcc was allright, it really would need no temporaries, and no
38 stack space to save stuff on. */
39
40 register char *dst __asm__ ("r13") = pdst;
41 register const char *src __asm__ ("r11") = psrc;
42 register int n __asm__ ("r12") = pn;
43 register int retn __asm__ ("r10") = 0;
44
45
46 /* When src is aligned but not dst, this makes a few extra needless
47 cycles. I believe it would take as many to check that the
48 re-alignment was unnecessary. */
49 if (((unsigned long) dst & 3) != 0
50 /* Don't align if we wouldn't copy more than a few bytes; so we
51 don't have to check further for overflows. */
52 && n >= 3)
53 {
54 if ((unsigned long) dst & 1)
55 {
56 __asm_copy_to_user_1 (dst, src, retn);
57 n--;
58 }
59
60 if ((unsigned long) dst & 2)
61 {
62 __asm_copy_to_user_2 (dst, src, retn);
63 n -= 2;
64 }
65 }
66
67 /* Movem is dirt cheap. The overheap is low enough to always use the
68 minimum possible block size as the threshold. */
69 if (n >= 44)
70 {
71 /* For large copies we use 'movem'. */
72
73 /* It is not optimal to tell the compiler about clobbering any
74 registers; that will move the saving/restoring of those registers
75 to the function prologue/epilogue, and make non-movem sizes
76 suboptimal. */
77 __asm__ volatile ("\
78 ;; Check that the register asm declaration got right. \n\
79 ;; The GCC manual explicitly says TRT will happen. \n\
80 .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
81 .err \n\
82 .endif \n\
83 \n\
84 ;; Save the registers we'll use in the movem process \n\
85 ;; on the stack. \n\
86 subq 11*4,$sp \n\
87 movem $r10,[$sp] \n\
88 \n\
89 ;; Now we've got this: \n\
90 ;; r11 - src \n\
91 ;; r13 - dst \n\
92 ;; r12 - n \n\
93 \n\
94 ;; Update n for the first loop \n\
95 subq 44,$r12 \n\
960: \n\
97 movem [$r11+],$r10 \n\
98 subq 44,$r12 \n\
991: bge 0b \n\
100 movem $r10,[$r13+] \n\
1013: \n\
102 addq 44,$r12 ;; compensate for last loop underflowing n \n\
103 \n\
104 ;; Restore registers from stack \n\
105 movem [$sp+],$r10 \n\
1062: \n\
107 .section .fixup,\"ax\" \n\
1084: \n\
109; When failing on any of the 1..44 bytes in a chunk, we adjust back the \n\
110; source pointer and just drop through to the by-16 and by-4 loops to \n\
111; get the correct number of failing bytes. This necessarily means a \n\
112; few extra exceptions, but invalid user pointers shouldn't happen in \n\
113; time-critical code anyway. \n\
114 jump 3b \n\
115 subq 44,$r11 \n\
116 \n\
117 .previous \n\
118 .section __ex_table,\"a\" \n\
119 .dword 1b,4b \n\
120 .previous"
121
122 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
123 /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn));
124
125 }
126
127 while (n >= 16)
128 {
129 __asm_copy_to_user_16 (dst, src, retn);
130 n -= 16;
131 }
132
133 /* Having a separate by-four loops cuts down on cache footprint.
134 FIXME: Test with and without; increasing switch to be 0..15. */
135 while (n >= 4)
136 {
137 __asm_copy_to_user_4 (dst, src, retn);
138 n -= 4;
139 }
140
141 switch (n)
142 {
143 case 0:
144 break;
145 case 1:
146 __asm_copy_to_user_1 (dst, src, retn);
147 break;
148 case 2:
149 __asm_copy_to_user_2 (dst, src, retn);
150 break;
151 case 3:
152 __asm_copy_to_user_3 (dst, src, retn);
153 break;
154 }
155
156 return retn;
157}
158
159/* Copy from user to kernel, zeroing the bytes that were inaccessible in
160 userland. The return-value is the number of bytes that were
161 inaccessible. */
162
163unsigned long
164__copy_user_zeroing (void __user *pdst, const void *psrc, unsigned long pn)
165{
166 /* We want the parameters put in special registers.
167 Make sure the compiler is able to make something useful of this.
168 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
169
170 FIXME: Comment for old gcc version. Check.
171 If gcc was allright, it really would need no temporaries, and no
172 stack space to save stuff on. */
173
174 register char *dst __asm__ ("r13") = pdst;
175 register const char *src __asm__ ("r11") = psrc;
176 register int n __asm__ ("r12") = pn;
177 register int retn __asm__ ("r10") = 0;
178
179 /* The best reason to align src is that we then know that a read-fault
180 was for aligned bytes; there's no 1..3 remaining good bytes to
181 pickle. */
182 if (((unsigned long) src & 3) != 0)
183 {
184 if (((unsigned long) src & 1) && n != 0)
185 {
186 __asm_copy_from_user_1 (dst, src, retn);
187 n--;
188 }
189
190 if (((unsigned long) src & 2) && n >= 2)
191 {
192 __asm_copy_from_user_2 (dst, src, retn);
193 n -= 2;
194 }
195
196 /* We only need one check after the unalignment-adjustments, because
197 if both adjustments were done, either both or neither reference
198 had an exception. */
199 if (retn != 0)
200 goto copy_exception_bytes;
201 }
202
203 /* Movem is dirt cheap. The overheap is low enough to always use the
204 minimum possible block size as the threshold. */
205 if (n >= 44)
206 {
207 /* It is not optimal to tell the compiler about clobbering any
208 registers; that will move the saving/restoring of those registers
209 to the function prologue/epilogue, and make non-movem sizes
210 suboptimal. */
211 __asm__ volatile ("\
212 .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
213 .err \n\
214 .endif \n\
215 \n\
216 ;; Save the registers we'll use in the movem process \n\
217 ;; on the stack. \n\
218 subq 11*4,$sp \n\
219 movem $r10,[$sp] \n\
220 \n\
221 ;; Now we've got this: \n\
222 ;; r11 - src \n\
223 ;; r13 - dst \n\
224 ;; r12 - n \n\
225 \n\
226 ;; Update n for the first loop \n\
227 subq 44,$r12 \n\
2280: \n\
229 movem [$r11+],$r10 \n\
230 \n\
231 subq 44,$r12 \n\
232 bge 0b \n\
233 movem $r10,[$r13+] \n\
234 \n\
2354: \n\
236 addq 44,$r12 ;; compensate for last loop underflowing n \n\
237 \n\
238 ;; Restore registers from stack \n\
239 movem [$sp+],$r10 \n\
240 .section .fixup,\"ax\" \n\
241 \n\
242;; Do not jump back into the loop if we fail. For some uses, we get a \n\
243;; page fault somewhere on the line. Without checking for page limits, \n\
244;; we don't know where, but we need to copy accurately and keep an \n\
245;; accurate count; not just clear the whole line. To do that, we fall \n\
246;; down in the code below, proceeding with smaller amounts. It should \n\
247;; be kept in mind that we have to cater to code like what at one time \n\
248;; was in fs/super.c: \n\
249;; i = size - copy_from_user((void *)page, data, size); \n\
250;; which would cause repeated faults while clearing the remainder of \n\
251;; the SIZE bytes at PAGE after the first fault. \n\
252;; A caveat here is that we must not fall through from a failing page \n\
253;; to a valid page. \n\
254 \n\
2553: \n\
256 jump 4b ;; Fall through, pretending the fault didn't happen. \n\
257 nop \n\
258 \n\
259 .previous \n\
260 .section __ex_table,\"a\" \n\
261 .dword 0b,3b \n\
262 .previous"
263
264 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
265 /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn));
266 }
267
268 /* Either we directly start copying here, using dword copying in a loop,
269 or we copy as much as possible with 'movem' and then the last block
270 (<44 bytes) is copied here. This will work since 'movem' will have
271 updated src, dst and n. (Except with failing src.)
272
273 Since we want to keep src accurate, we can't use
274 __asm_copy_from_user_N with N != (1, 2, 4); it updates dst and
275 retn, but not src (by design; it's value is ignored elsewhere). */
276
277 while (n >= 4)
278 {
279 __asm_copy_from_user_4 (dst, src, retn);
280 n -= 4;
281
282 if (retn)
283 goto copy_exception_bytes;
284 }
285
286 /* If we get here, there were no memory read faults. */
287 switch (n)
288 {
289 /* These copies are at least "naturally aligned" (so we don't have
290 to check each byte), due to the src alignment code before the
291 movem loop. The *_3 case *will* get the correct count for retn. */
292 case 0:
293 /* This case deliberately left in (if you have doubts check the
294 generated assembly code). */
295 break;
296 case 1:
297 __asm_copy_from_user_1 (dst, src, retn);
298 break;
299 case 2:
300 __asm_copy_from_user_2 (dst, src, retn);
301 break;
302 case 3:
303 __asm_copy_from_user_3 (dst, src, retn);
304 break;
305 }
306
307 /* If we get here, retn correctly reflects the number of failing
308 bytes. */
309 return retn;
310
311copy_exception_bytes:
312 /* We already have "retn" bytes cleared, and need to clear the
313 remaining "n" bytes. A non-optimized simple byte-for-byte in-line
314 memset is preferred here, since this isn't speed-critical code and
315 we'd rather have this a leaf-function than calling memset. */
316 {
317 char *endp;
318 for (endp = dst + n; dst < endp; dst++)
319 *dst = 0;
320 }
321
322 return retn + n;
323}
324
325/* Zero userspace. */
326
327unsigned long
328__do_clear_user (void __user *pto, unsigned long pn)
329{
330 /* We want the parameters put in special registers.
331 Make sure the compiler is able to make something useful of this.
332 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
333
334 FIXME: Comment for old gcc version. Check.
335 If gcc was allright, it really would need no temporaries, and no
336 stack space to save stuff on. */
337
338 register char *dst __asm__ ("r13") = pto;
339 register int n __asm__ ("r12") = pn;
340 register int retn __asm__ ("r10") = 0;
341
342
343 if (((unsigned long) dst & 3) != 0
344 /* Don't align if we wouldn't copy more than a few bytes. */
345 && n >= 3)
346 {
347 if ((unsigned long) dst & 1)
348 {
349 __asm_clear_1 (dst, retn);
350 n--;
351 }
352
353 if ((unsigned long) dst & 2)
354 {
355 __asm_clear_2 (dst, retn);
356 n -= 2;
357 }
358 }
359
360 /* Decide which copying method to use.
361 FIXME: This number is from the "ordinary" kernel memset. */
362 if (n >= 48)
363 {
364 /* For large clears we use 'movem' */
365
366 /* It is not optimal to tell the compiler about clobbering any
367 call-saved registers; that will move the saving/restoring of
368 those registers to the function prologue/epilogue, and make
369 non-movem sizes suboptimal.
370
371 This method is not foolproof; it assumes that the "asm reg"
372 declarations at the beginning of the function really are used
373 here (beware: they may be moved to temporary registers).
374 This way, we do not have to save/move the registers around into
375 temporaries; we can safely use them straight away.
376
377 If you want to check that the allocation was right; then
378 check the equalities in the first comment. It should say
379 something like "r13=r13, r11=r11, r12=r12". */
380 __asm__ volatile ("\
381 .ifnc %0%1%2,$r13$r12$r10 \n\
382 .err \n\
383 .endif \n\
384 \n\
385 ;; Save the registers we'll clobber in the movem process \n\
386 ;; on the stack. Don't mention them to gcc, it will only be \n\
387 ;; upset. \n\
388 subq 11*4,$sp \n\
389 movem $r10,[$sp] \n\
390 \n\
391 clear.d $r0 \n\
392 clear.d $r1 \n\
393 clear.d $r2 \n\
394 clear.d $r3 \n\
395 clear.d $r4 \n\
396 clear.d $r5 \n\
397 clear.d $r6 \n\
398 clear.d $r7 \n\
399 clear.d $r8 \n\
400 clear.d $r9 \n\
401 clear.d $r10 \n\
402 clear.d $r11 \n\
403 \n\
404 ;; Now we've got this: \n\
405 ;; r13 - dst \n\
406 ;; r12 - n \n\
407 \n\
408 ;; Update n for the first loop \n\
409 subq 12*4,$r12 \n\
4100: \n\
411 subq 12*4,$r12 \n\
4121: \n\
413 bge 0b \n\
414 movem $r11,[$r13+] \n\
415 \n\
416 addq 12*4,$r12 ;; compensate for last loop underflowing n \n\
417 \n\
418 ;; Restore registers from stack \n\
419 movem [$sp+],$r10 \n\
4202: \n\
421 .section .fixup,\"ax\" \n\
4223: \n\
423 movem [$sp],$r10 \n\
424 addq 12*4,$r10 \n\
425 addq 12*4,$r13 \n\
426 movem $r10,[$sp] \n\
427 jump 0b \n\
428 clear.d $r10 \n\
429 \n\
430 .previous \n\
431 .section __ex_table,\"a\" \n\
432 .dword 1b,3b \n\
433 .previous"
434
435 /* Outputs */ : "=r" (dst), "=r" (n), "=r" (retn)
436 /* Inputs */ : "0" (dst), "1" (n), "2" (retn)
437 /* Clobber */ : "r11");
438 }
439
440 while (n >= 16)
441 {
442 __asm_clear_16 (dst, retn);
443 n -= 16;
444 }
445
446 /* Having a separate by-four loops cuts down on cache footprint.
447 FIXME: Test with and without; increasing switch to be 0..15. */
448 while (n >= 4)
449 {
450 __asm_clear_4 (dst, retn);
451 n -= 4;
452 }
453
454 switch (n)
455 {
456 case 0:
457 break;
458 case 1:
459 __asm_clear_1 (dst, retn);
460 break;
461 case 2:
462 __asm_clear_2 (dst, retn);
463 break;
464 case 3:
465 __asm_clear_3 (dst, retn);
466 break;
467 }
468
469 return retn;
470}
diff --git a/arch/cris/arch-v32/mm/Makefile b/arch/cris/arch-v32/mm/Makefile
new file mode 100644
index 000000000000..9146f88484b1
--- /dev/null
+++ b/arch/cris/arch-v32/mm/Makefile
@@ -0,0 +1,3 @@
1# Makefile for the Linux/cris parts of the memory manager.
2
3obj-y := mmu.o init.o tlb.o intmem.o
diff --git a/arch/cris/arch-v32/mm/init.c b/arch/cris/arch-v32/mm/init.c
new file mode 100644
index 000000000000..f2fba27d822c
--- /dev/null
+++ b/arch/cris/arch-v32/mm/init.c
@@ -0,0 +1,174 @@
1/*
2 * Set up paging and the MMU.
3 *
4 * Copyright (C) 2000-2003, Axis Communications AB.
5 *
6 * Authors: Bjorn Wesen <bjornw@axis.com>
7 * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
8 */
9#include <linux/config.h>
10#include <linux/mmzone.h>
11#include <linux/init.h>
12#include <linux/bootmem.h>
13#include <linux/mm.h>
14#include <linux/config.h>
15#include <asm/pgtable.h>
16#include <asm/page.h>
17#include <asm/types.h>
18#include <asm/mmu.h>
19#include <asm/io.h>
20#include <asm/mmu_context.h>
21#include <asm/arch/hwregs/asm/mmu_defs_asm.h>
22#include <asm/arch/hwregs/supp_reg.h>
23
24extern void tlb_init(void);
25
26/*
27 * The kernel is already mapped with linear mapping at kseg_c so there's no
28 * need to map it with a page table. However, head.S also temporarily mapped it
29 * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various
30 * other paging stuff.
31 */
32void __init
33cris_mmu_init(void)
34{
35 unsigned long mmu_config;
36 unsigned long mmu_kbase_hi;
37 unsigned long mmu_kbase_lo;
38 unsigned short mmu_page_id;
39
40 /*
41 * Make sure the current pgd table points to something sane, even if it
42 * is most probably not used until the next switch_mm.
43 */
44 per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
45
46#ifdef CONFIG_SMP
47 {
48 pgd_t **pgd;
49 pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
50 SUPP_BANK_SEL(1);
51 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
52 SUPP_BANK_SEL(2);
53 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
54 }
55#endif
56
57 /* Initialise the TLB. Function found in tlb.c. */
58 tlb_init();
59
60 /* Enable exceptions and initialize the kernel segments. */
61 mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) |
62 REG_STATE(mmu, rw_mm_cfg, acc, on) |
63 REG_STATE(mmu, rw_mm_cfg, ex, on) |
64 REG_STATE(mmu, rw_mm_cfg, inv, on) |
65 REG_STATE(mmu, rw_mm_cfg, seg_f, linear) |
66 REG_STATE(mmu, rw_mm_cfg, seg_e, linear) |
67 REG_STATE(mmu, rw_mm_cfg, seg_d, page) |
68 REG_STATE(mmu, rw_mm_cfg, seg_c, linear) |
69 REG_STATE(mmu, rw_mm_cfg, seg_b, linear) |
70#ifndef CONFIG_ETRAXFS_SIM
71 REG_STATE(mmu, rw_mm_cfg, seg_a, page) |
72#else
73 REG_STATE(mmu, rw_mm_cfg, seg_a, linear) |
74#endif
75 REG_STATE(mmu, rw_mm_cfg, seg_9, page) |
76 REG_STATE(mmu, rw_mm_cfg, seg_8, page) |
77 REG_STATE(mmu, rw_mm_cfg, seg_7, page) |
78 REG_STATE(mmu, rw_mm_cfg, seg_6, page) |
79 REG_STATE(mmu, rw_mm_cfg, seg_5, page) |
80 REG_STATE(mmu, rw_mm_cfg, seg_4, page) |
81 REG_STATE(mmu, rw_mm_cfg, seg_3, page) |
82 REG_STATE(mmu, rw_mm_cfg, seg_2, page) |
83 REG_STATE(mmu, rw_mm_cfg, seg_1, page) |
84 REG_STATE(mmu, rw_mm_cfg, seg_0, page));
85
86 mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) |
87 REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) |
88 REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) |
89#ifndef CONFIG_ETRAXFS_SIM
90 REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) |
91#else
92 REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x0) |
93#endif
94 REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) |
95#ifndef CONFIG_ETRAXFS_SIM
96 REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) |
97#else
98 REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) |
99#endif
100 REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) |
101 REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0));
102
103 mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) |
104 REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) |
105 REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) |
106 REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) |
107 REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) |
108 REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) |
109 REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) |
110 REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0));
111
112 mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0);
113
114 /* Update the instruction MMU. */
115 SUPP_BANK_SEL(BANK_IM);
116 SUPP_REG_WR(RW_MM_CFG, mmu_config);
117 SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
118 SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
119 SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
120
121 /* Update the data MMU. */
122 SUPP_BANK_SEL(BANK_DM);
123 SUPP_REG_WR(RW_MM_CFG, mmu_config);
124 SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
125 SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
126 SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
127
128 SPEC_REG_WR(SPEC_REG_PID, 0);
129
130 /*
131 * The MMU has been enabled ever since head.S but just to make it
132 * totally obvious enable it here as well.
133 */
134 SUPP_BANK_SEL(BANK_GC);
135 SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */
136}
137
138void __init
139paging_init(void)
140{
141 int i;
142 unsigned long zones_size[MAX_NR_ZONES];
143
144 printk("Setting up paging and the MMU.\n");
145
146 /* Clear out the init_mm.pgd that will contain the kernel's mappings. */
147 for(i = 0; i < PTRS_PER_PGD; i++)
148 swapper_pg_dir[i] = __pgd(0);
149
150 cris_mmu_init();
151
152 /*
153 * Initialize the bad page table and bad page to point to a couple of
154 * allocated pages.
155 */
156 empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
157 memset((void *) empty_zero_page, 0, PAGE_SIZE);
158
159 /* All pages are DMA'able in Etrax, so put all in the DMA'able zone. */
160 zones_size[0] = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
161
162 for (i = 1; i < MAX_NR_ZONES; i++)
163 zones_size[i] = 0;
164
165 /*
166 * Use free_area_init_node instead of free_area_init, because it is
167 * designed for systems where the DRAM starts at an address
168 * substantially higher than 0, like us (we start at PAGE_OFFSET). This
169 * saves space in the mem_map page array.
170 */
171 free_area_init_node(0, &contig_page_data, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
172
173 mem_map = contig_page_data.node_mem_map;
174}
diff --git a/arch/cris/arch-v32/mm/intmem.c b/arch/cris/arch-v32/mm/intmem.c
new file mode 100644
index 000000000000..41ee7f7997fd
--- /dev/null
+++ b/arch/cris/arch-v32/mm/intmem.c
@@ -0,0 +1,139 @@
1/*
2 * Simple allocator for internal RAM in ETRAX FS
3 *
4 * Copyright (c) 2004 Axis Communications AB.
5 */
6
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <asm/io.h>
10#include <asm/arch/memmap.h>
11
12#define STATUS_FREE 0
13#define STATUS_ALLOCATED 1
14
15struct intmem_allocation {
16 struct list_head entry;
17 unsigned int size;
18 unsigned offset;
19 char status;
20};
21
22
23static struct list_head intmem_allocations;
24static void* intmem_virtual;
25
26static void crisv32_intmem_init(void)
27{
28 static int initiated = 0;
29 if (!initiated) {
30 struct intmem_allocation* alloc =
31 (struct intmem_allocation*)kmalloc(sizeof *alloc, GFP_KERNEL);
32 INIT_LIST_HEAD(&intmem_allocations);
33 intmem_virtual = ioremap(MEM_INTMEM_START, MEM_INTMEM_SIZE);
34 initiated = 1;
35 alloc->size = MEM_INTMEM_SIZE;
36 alloc->offset = 0;
37 alloc->status = STATUS_FREE;
38 list_add_tail(&alloc->entry, &intmem_allocations);
39 }
40}
41
42void* crisv32_intmem_alloc(unsigned size, unsigned align)
43{
44 struct intmem_allocation* allocation;
45 struct intmem_allocation* tmp;
46 void* ret = NULL;
47
48 preempt_disable();
49 crisv32_intmem_init();
50
51 list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) {
52 int alignment = allocation->offset % align;
53 alignment = alignment ? align - alignment : alignment;
54
55 if (allocation->status == STATUS_FREE &&
56 allocation->size >= size + alignment) {
57 if (allocation->size > size + alignment) {
58 struct intmem_allocation* alloc =
59 (struct intmem_allocation*)
60 kmalloc(sizeof *alloc, GFP_ATOMIC);
61 alloc->status = STATUS_FREE;
62 alloc->size = allocation->size - size - alignment;
63 alloc->offset = allocation->offset + size;
64 list_add(&alloc->entry, &allocation->entry);
65
66 if (alignment) {
67 struct intmem_allocation* tmp;
68 tmp = (struct intmem_allocation*)
69 kmalloc(sizeof *tmp, GFP_ATOMIC);
70 tmp->offset = allocation->offset;
71 tmp->size = alignment;
72 tmp->status = STATUS_FREE;
73 allocation->offset += alignment;
74 list_add_tail(&tmp->entry, &allocation->entry);
75 }
76 }
77 allocation->status = STATUS_ALLOCATED;
78 allocation->size = size;
79 ret = (void*)((int)intmem_virtual + allocation->offset);
80 }
81 }
82 preempt_enable();
83 return ret;
84}
85
86void crisv32_intmem_free(void* addr)
87{
88 struct intmem_allocation* allocation;
89 struct intmem_allocation* tmp;
90
91 if (addr == NULL)
92 return;
93
94 preempt_disable();
95 crisv32_intmem_init();
96
97 list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) {
98 if (allocation->offset == (int)(addr - intmem_virtual)) {
99 struct intmem_allocation* prev =
100 list_entry(allocation->entry.prev,
101 struct intmem_allocation, entry);
102 struct intmem_allocation* next =
103 list_entry(allocation->entry.next,
104 struct intmem_allocation, entry);
105
106 allocation->status = STATUS_FREE;
107 /* Join with prev and/or next if also free */
108 if (prev->status == STATUS_FREE) {
109 prev->size += allocation->size;
110 list_del(&allocation->entry);
111 kfree(allocation);
112 allocation = prev;
113 }
114 if (next->status == STATUS_FREE) {
115 allocation->size += next->size;
116 list_del(&next->entry);
117 kfree(next);
118 }
119 preempt_enable();
120 return;
121 }
122 }
123 preempt_enable();
124}
125
126void* crisv32_intmem_phys_to_virt(unsigned long addr)
127{
128 return (void*)(addr - MEM_INTMEM_START+
129 (unsigned long)intmem_virtual);
130}
131
132unsigned long crisv32_intmem_virt_to_phys(void* addr)
133{
134 return (unsigned long)((unsigned long )addr -
135 (unsigned long)intmem_virtual + MEM_INTMEM_START);
136}
137
138
139
diff --git a/arch/cris/arch-v32/mm/mmu.S b/arch/cris/arch-v32/mm/mmu.S
new file mode 100644
index 000000000000..27b70e5006af
--- /dev/null
+++ b/arch/cris/arch-v32/mm/mmu.S
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2003 Axis Communications AB
3 *
4 * Authors: Mikael Starvik (starvik@axis.com)
5 *
6 * Code for the fault low-level handling routines.
7 *
8 */
9
10#include <asm/page.h>
11#include <asm/pgtable.h>
12
13; Save all register. Must save in same order as struct pt_regs.
14.macro SAVE_ALL
15 subq 12, $sp
16 move $erp, [$sp]
17 subq 4, $sp
18 move $srp, [$sp]
19 subq 4, $sp
20 move $ccs, [$sp]
21 subq 4, $sp
22 move $spc, [$sp]
23 subq 4, $sp
24 move $mof, [$sp]
25 subq 4, $sp
26 move $srs, [$sp]
27 subq 4, $sp
28 move.d $acr, [$sp]
29 subq 14*4, $sp
30 movem $r13, [$sp]
31 subq 4, $sp
32 move.d $r10, [$sp]
33.endm
34
35; Bus fault handler. Extracts relevant information and calls mm subsystem
36; to handle the fault.
37.macro MMU_BUS_FAULT_HANDLER handler, mmu, we, ex
38 .globl \handler
39\handler:
40 SAVE_ALL
41 move \mmu, $srs ; Select MMU support register bank
42 move.d $sp, $r11 ; regs
43 moveq 1, $r12 ; protection fault
44 moveq \we, $r13 ; write exception?
45 orq \ex << 1, $r13 ; execute?
46 move $s3, $r10 ; rw_mm_cause
47 and.d ~8191, $r10 ; Get faulting page start address
48
49 jsr do_page_fault
50 nop
51 ba ret_from_intr
52 nop
53.endm
54
55; Refill handler. Three cases may occur:
56; 1. PMD and PTE exists in mm subsystem but not in TLB
57; 2. PMD exists but not PTE
58; 3. PMD doesn't exist
59; The code below handles case 1 and calls the mm subsystem for case 2 and 3.
60; Do not touch this code without very good reasons and extensive testing.
61; Note that the code is optimized to minimize stalls (makes the code harder
62; to read).
63;
64; Each page is 8 KB. Each PMD holds 8192/4 PTEs (each PTE is 4 bytes) so each
65; PMD holds 16 MB of virtual memory.
66; Bits 0-12 : Offset within a page
67; Bits 13-23 : PTE offset within a PMD
68; Bits 24-31 : PMD offset within the PGD
69
70.macro MMU_REFILL_HANDLER handler, mmu
71 .globl \handler
72\handler:
73 subq 4, $sp
74; (The pipeline stalls for one cycle; $sp used as address in the next cycle.)
75 move $srs, [$sp]
76 subq 4, $sp
77 move \mmu, $srs ; Select MMU support register bank
78 move.d $acr, [$sp]
79 subq 4, $sp
80 move.d $r0, [$sp]
81#ifdef CONFIG_SMP
82 move $s7, $acr ; PGD
83#else
84 move.d per_cpu__current_pgd, $acr ; PGD
85#endif
86 ; Look up PMD in PGD
87 move $s3, $r0 ; rw_mm_cause
88 lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
89 move.d [$acr], $acr ; PGD for the current process
90 addi $r0.d, $acr, $acr
91 move $s3, $r0 ; rw_mm_cause
92 move.d [$acr], $acr ; Get PMD
93 beq 1f
94 ; Look up PTE in PMD
95 lsrq PAGE_SHIFT, $r0
96 and.w PAGE_MASK, $acr ; Remove PMD flags
97 and.d 0x7ff, $r0 ; Get PTE index into PMD (bit 13-23)
98 addi $r0.d, $acr, $acr
99 move.d [$acr], $acr ; Get PTE
100 beq 2f
101 move.d [$sp+], $r0 ; Pop r0 in delayslot
102 ; Store in TLB
103 move $acr, $s5
104 ; Return
105 move.d [$sp+], $acr
106 move [$sp], $srs
107 addq 4, $sp
108 rete
109 rfe
1101: ; PMD missing, let the mm subsystem fix it up.
111 move.d [$sp+], $r0 ; Pop r0
1122: ; PTE missing, let the mm subsystem fix it up.
113 move.d [$sp+], $acr
114 move [$sp], $srs
115 addq 4, $sp
116 SAVE_ALL
117 move \mmu, $srs
118 move.d $sp, $r11 ; regs
119 clear.d $r12 ; Not a protection fault
120 move.w PAGE_MASK, $acr
121 move $s3, $r10 ; rw_mm_cause
122 btstq 9, $r10 ; Check if write access
123 smi $r13
124 and.w PAGE_MASK, $r10 ; Get VPN (virtual address)
125 jsr do_page_fault
126 and.w $acr, $r10
127 ; Return
128 ba ret_from_intr
129 nop
130.endm
131
132 ; This is the MMU bus fault handlers.
133
134MMU_REFILL_HANDLER i_mmu_refill, 1
135MMU_BUS_FAULT_HANDLER i_mmu_invalid, 1, 0, 0
136MMU_BUS_FAULT_HANDLER i_mmu_access, 1, 0, 0
137MMU_BUS_FAULT_HANDLER i_mmu_execute, 1, 0, 1
138MMU_REFILL_HANDLER d_mmu_refill, 2
139MMU_BUS_FAULT_HANDLER d_mmu_invalid, 2, 0, 0
140MMU_BUS_FAULT_HANDLER d_mmu_access, 2, 0, 0
141MMU_BUS_FAULT_HANDLER d_mmu_write, 2, 1, 0
diff --git a/arch/cris/arch-v32/mm/tlb.c b/arch/cris/arch-v32/mm/tlb.c
new file mode 100644
index 000000000000..8233406798d3
--- /dev/null
+++ b/arch/cris/arch-v32/mm/tlb.c
@@ -0,0 +1,208 @@
1/*
2 * Low level TLB handling.
3 *
4 * Copyright (C) 2000-2003, Axis Communications AB.
5 *
6 * Authors: Bjorn Wesen <bjornw@axis.com>
7 * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
8 */
9
10#include <asm/tlb.h>
11#include <asm/mmu_context.h>
12#include <asm/arch/hwregs/asm/mmu_defs_asm.h>
13#include <asm/arch/hwregs/supp_reg.h>
14
15#define UPDATE_TLB_SEL_IDX(val) \
16do { \
17 unsigned long tlb_sel; \
18 \
19 tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \
20 SUPP_REG_WR(RW_MM_TLB_SEL, tlb_sel); \
21} while(0)
22
23#define UPDATE_TLB_HILO(tlb_hi, tlb_lo) \
24do { \
25 SUPP_REG_WR(RW_MM_TLB_HI, tlb_hi); \
26 SUPP_REG_WR(RW_MM_TLB_LO, tlb_lo); \
27} while(0)
28
29/*
30 * The TLB can host up to 256 different mm contexts at the same time. The running
31 * context is found in the PID register. Each TLB entry contains a page_id that
32 * has to match the PID register to give a hit. page_id_map keeps track of which
33 * mm's is assigned to which page_id's, making sure it's known when to
34 * invalidate TLB entries.
35 *
36 * The last page_id is never running, it is used as an invalid page_id so that
37 * it's possible to make TLB entries that will nerver match.
38 *
39 * Note; the flushes needs to be atomic otherwise an interrupt hander that uses
40 * vmalloc'ed memory might cause a TLB load in the middle of a flush.
41 */
42
43/* Flush all TLB entries. */
44void
45__flush_tlb_all(void)
46{
47 int i;
48 int mmu;
49 unsigned long flags;
50 unsigned long mmu_tlb_hi;
51 unsigned long mmu_tlb_sel;
52
53 /*
54 * Mask with 0xf so similar TLB entries aren't written in the same 4-way
55 * entry group.
56 */
57 local_save_flags(flags);
58 local_irq_disable();
59
60 for (mmu = 1; mmu <= 2; mmu++) {
61 SUPP_BANK_SEL(mmu); /* Select the MMU */
62 for (i = 0; i < NUM_TLB_ENTRIES; i++) {
63 /* Store invalid entry */
64 mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i);
65
66 mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID)
67 | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf));
68
69 SUPP_REG_WR(RW_MM_TLB_SEL, mmu_tlb_sel);
70 SUPP_REG_WR(RW_MM_TLB_HI, mmu_tlb_hi);
71 SUPP_REG_WR(RW_MM_TLB_LO, 0);
72 }
73 }
74
75 local_irq_restore(flags);
76}
77
78/* Flush an entire user address space. */
79void
80__flush_tlb_mm(struct mm_struct *mm)
81{
82 int i;
83 int mmu;
84 unsigned long flags;
85 unsigned long page_id;
86 unsigned long tlb_hi;
87 unsigned long mmu_tlb_hi;
88
89 page_id = mm->context.page_id;
90
91 if (page_id == NO_CONTEXT)
92 return;
93
94 /* Mark the TLB entries that match the page_id as invalid. */
95 local_save_flags(flags);
96 local_irq_disable();
97
98 for (mmu = 1; mmu <= 2; mmu++) {
99 SUPP_BANK_SEL(mmu);
100 for (i = 0; i < NUM_TLB_ENTRIES; i++) {
101 UPDATE_TLB_SEL_IDX(i);
102
103 /* Get the page_id */
104 SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
105
106 /* Check if the page_id match. */
107 if ((tlb_hi & 0xff) == page_id) {
108 mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid,
109 INVALID_PAGEID)
110 | REG_FIELD(mmu, rw_mm_tlb_hi, vpn,
111 i & 0xf));
112
113 UPDATE_TLB_HILO(mmu_tlb_hi, 0);
114 }
115 }
116 }
117
118 local_irq_restore(flags);
119}
120
121/* Invalidate a single page. */
122void
123__flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
124{
125 int i;
126 int mmu;
127 unsigned long page_id;
128 unsigned long flags;
129 unsigned long tlb_hi;
130 unsigned long mmu_tlb_hi;
131
132 page_id = vma->vm_mm->context.page_id;
133
134 if (page_id == NO_CONTEXT)
135 return;
136
137 addr &= PAGE_MASK;
138
139 /*
140 * Invalidate those TLB entries that match both the mm context and the
141 * requested virtual address.
142 */
143 local_save_flags(flags);
144 local_irq_disable();
145
146 for (mmu = 1; mmu <= 2; mmu++) {
147 SUPP_BANK_SEL(mmu);
148 for (i = 0; i < NUM_TLB_ENTRIES; i++) {
149 UPDATE_TLB_SEL_IDX(i);
150 SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
151
152 /* Check if page_id and address matches */
153 if (((tlb_hi & 0xff) == page_id) &&
154 ((tlb_hi & PAGE_MASK) == addr)) {
155 mmu_tlb_hi = REG_FIELD(mmu, rw_mm_tlb_hi, pid,
156 INVALID_PAGEID) | addr;
157
158 UPDATE_TLB_HILO(mmu_tlb_hi, 0);
159 }
160 }
161 }
162
163 local_irq_restore(flags);
164}
165
166/*
167 * Initialize the context related info for a new mm_struct
168 * instance.
169 */
170
171int
172init_new_context(struct task_struct *tsk, struct mm_struct *mm)
173{
174 mm->context.page_id = NO_CONTEXT;
175 return 0;
176}
177
178/* Called in schedule() just before actually doing the switch_to. */
179void
180switch_mm(struct mm_struct *prev, struct mm_struct *next,
181 struct task_struct *tsk)
182{
183 int cpu = smp_processor_id();
184
185 /* Make sure there is a MMU context. */
186 spin_lock(&next->page_table_lock);
187 get_mmu_context(next);
188 cpu_set(cpu, next->cpu_vm_mask);
189 spin_unlock(&next->page_table_lock);
190
191 /*
192 * Remember the pgd for the fault handlers. Keep a seperate copy of it
193 * because current and active_mm might be invalid at points where
194 * there's still a need to derefer the pgd.
195 */
196 per_cpu(current_pgd, cpu) = next->pgd;
197
198 /* Switch context in the MMU. */
199 if (tsk && tsk->thread_info)
200 {
201 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | tsk->thread_info->tls);
202 }
203 else
204 {
205 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id);
206 }
207}
208
diff --git a/arch/cris/arch-v32/output_arch.ld b/arch/cris/arch-v32/output_arch.ld
new file mode 100644
index 000000000000..d60a57db0ec2
--- /dev/null
+++ b/arch/cris/arch-v32/output_arch.ld
@@ -0,0 +1,2 @@
1/* At the time of this writing, there's no equivalent ld option. */
2OUTPUT_ARCH (crisv32)
diff --git a/arch/cris/arch-v32/vmlinux.lds.S b/arch/cris/arch-v32/vmlinux.lds.S
new file mode 100644
index 000000000000..adb94605d92a
--- /dev/null
+++ b/arch/cris/arch-v32/vmlinux.lds.S
@@ -0,0 +1,134 @@
1/* ld script to make the Linux/CRIS kernel
2 * Authors: Bjorn Wesen (bjornw@axis.com)
3 *
4 * It is VERY DANGEROUS to fiddle around with the symbols in this
5 * script. It is for example quite vital that all generated sections
6 * that are used are actually named here, otherwise the linker will
7 * put them at the end, where the init stuff is which is FREED after
8 * the kernel has booted.
9 */
10
11#include <linux/config.h>
12#include <asm-generic/vmlinux.lds.h>
13
14jiffies = jiffies_64;
15SECTIONS
16{
17 . = DRAM_VIRTUAL_BASE;
18 dram_start = .;
19 ebp_start = .;
20
21 /* The boot section is only necessary until the VCS top level testbench */
22 /* includes both flash and DRAM. */
23 .boot : { *(.boot) }
24
25 . = DRAM_VIRTUAL_BASE + 0x4000; /* See head.S and pages reserved at the start. */
26
27 _text = .; /* Text and read-only data. */
28 text_start = .; /* Lots of aliases. */
29 _stext = .;
30 __stext = .;
31 .text : {
32 *(.text)
33 SCHED_TEXT
34 LOCK_TEXT
35 *(.fixup)
36 *(.text.__*)
37 }
38
39 _etext = . ; /* End of text section. */
40 __etext = .;
41
42 . = ALIGN(4); /* Exception table. */
43 __start___ex_table = .;
44 __ex_table : { *(__ex_table) }
45 __stop___ex_table = .;
46
47 RODATA
48
49 . = ALIGN (4);
50 ___data_start = . ;
51 __Sdata = . ;
52 .data : { /* Data */
53 *(.data)
54 }
55 __edata = . ; /* End of data section. */
56 _edata = . ;
57
58 . = ALIGN(8192); /* init_task and stack, must be aligned. */
59 .data.init_task : { *(.data.init_task) }
60
61 . = ALIGN(8192); /* Init code and data. */
62 __init_begin = .;
63 .init.text : {
64 _sinittext = .;
65 *(.init.text)
66 _einittext = .;
67 }
68 .init.data : { *(.init.data) }
69 . = ALIGN(16);
70 __setup_start = .;
71 .init.setup : { *(.init.setup) }
72 __setup_end = .;
73 __start___param = .;
74 __param : { *(__param) }
75 __stop___param = .;
76 .initcall.init : {
77 __initcall_start = .;
78 *(.initcall1.init);
79 *(.initcall2.init);
80 *(.initcall3.init);
81 *(.initcall4.init);
82 *(.initcall5.init);
83 *(.initcall6.init);
84 *(.initcall7.init);
85 __initcall_end = .;
86 }
87
88 .con_initcall.init : {
89 __con_initcall_start = .;
90 *(.con_initcall.init)
91 __con_initcall_end = .;
92 }
93 SECURITY_INIT
94
95 __per_cpu_start = .;
96 .data.percpu : { *(.data.percpu) }
97 __per_cpu_end = .;
98
99 .init.ramfs : {
100 __initramfs_start = .;
101 *(.init.ramfs)
102 __initramfs_end = .;
103 /*
104 * We fill to the next page, so we can discard all init
105 * pages without needing to consider what payload might be
106 * appended to the kernel image.
107 */
108 FILL (0);
109 . = ALIGN (8192);
110 }
111
112 __vmlinux_end = .; /* Last address of the physical file. */
113 __init_end = .;
114
115 __data_end = . ; /* Move to _edata? */
116 __bss_start = .; /* BSS. */
117 .bss : {
118 *(COMMON)
119 *(.bss)
120 }
121
122 . = ALIGN (0x20);
123 _end = .;
124 __end = .;
125
126 /* Sections to be discarded */
127 /DISCARD/ : {
128 *(.text.exit)
129 *(.data.exit)
130 *(.exitcall.exit)
131 }
132
133 dram_end = dram_start + CONFIG_ETRAX_DRAM_SIZE*1024*1024;
134}