aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig393
-rw-r--r--arch/sparc/Kconfig.debug14
-rw-r--r--arch/sparc/Makefile78
-rw-r--r--arch/sparc/boot/Makefile58
-rw-r--r--arch/sparc/boot/btfixupprep.c386
-rw-r--r--arch/sparc/boot/piggyback.c137
-rw-r--r--arch/sparc/defconfig644
-rw-r--r--arch/sparc/kernel/Makefile27
-rw-r--r--arch/sparc/kernel/apc.c186
-rw-r--r--arch/sparc/kernel/asm-offsets.c45
-rw-r--r--arch/sparc/kernel/auxio.c138
-rw-r--r--arch/sparc/kernel/cpu.c168
-rw-r--r--arch/sparc/kernel/devices.c160
-rw-r--r--arch/sparc/kernel/ebus.c361
-rw-r--r--arch/sparc/kernel/entry.S1956
-rw-r--r--arch/sparc/kernel/errtbls.c276
-rw-r--r--arch/sparc/kernel/etrap.S321
-rw-r--r--arch/sparc/kernel/head.S1326
-rw-r--r--arch/sparc/kernel/idprom.c108
-rw-r--r--arch/sparc/kernel/init_task.c28
-rw-r--r--arch/sparc/kernel/ioport.c731
-rw-r--r--arch/sparc/kernel/irq.c614
-rw-r--r--arch/sparc/kernel/module.c159
-rw-r--r--arch/sparc/kernel/muldiv.c240
-rw-r--r--arch/sparc/kernel/pcic.c1041
-rw-r--r--arch/sparc/kernel/pmc.c99
-rw-r--r--arch/sparc/kernel/process.c746
-rw-r--r--arch/sparc/kernel/ptrace.c632
-rw-r--r--arch/sparc/kernel/rtrap.S319
-rw-r--r--arch/sparc/kernel/sclow.S86
-rw-r--r--arch/sparc/kernel/semaphore.c155
-rw-r--r--arch/sparc/kernel/setup.c476
-rw-r--r--arch/sparc/kernel/signal.c1181
-rw-r--r--arch/sparc/kernel/smp.c295
-rw-r--r--arch/sparc/kernel/sparc-stub.c724
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c334
-rw-r--r--arch/sparc/kernel/sun4c_irq.c250
-rw-r--r--arch/sparc/kernel/sun4d_irq.c594
-rw-r--r--arch/sparc/kernel/sun4d_smp.c486
-rw-r--r--arch/sparc/kernel/sun4m_irq.c399
-rw-r--r--arch/sparc/kernel/sun4m_smp.c451
-rw-r--r--arch/sparc/kernel/sun4setup.c75
-rw-r--r--arch/sparc/kernel/sunos_asm.S67
-rw-r--r--arch/sparc/kernel/sunos_ioctl.c231
-rw-r--r--arch/sparc/kernel/sys_solaris.c37
-rw-r--r--arch/sparc/kernel/sys_sparc.c485
-rw-r--r--arch/sparc/kernel/sys_sunos.c1194
-rw-r--r--arch/sparc/kernel/systbls.S186
-rw-r--r--arch/sparc/kernel/tadpole.c126
-rw-r--r--arch/sparc/kernel/tick14.c85
-rw-r--r--arch/sparc/kernel/time.c641
-rw-r--r--arch/sparc/kernel/trampoline.S162
-rw-r--r--arch/sparc/kernel/traps.c515
-rw-r--r--arch/sparc/kernel/unaligned.c548
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S103
-rw-r--r--arch/sparc/kernel/windows.c127
-rw-r--r--arch/sparc/kernel/wof.S428
-rw-r--r--arch/sparc/kernel/wuf.S360
-rw-r--r--arch/sparc/lib/COPYING.LIB481
-rw-r--r--arch/sparc/lib/Makefile13
-rw-r--r--arch/sparc/lib/ashldi3.S34
-rw-r--r--arch/sparc/lib/ashrdi3.S36
-rw-r--r--arch/sparc/lib/atomic.S100
-rw-r--r--arch/sparc/lib/atomic32.c53
-rw-r--r--arch/sparc/lib/bitext.c132
-rw-r--r--arch/sparc/lib/bitops.S110
-rw-r--r--arch/sparc/lib/blockops.S89
-rw-r--r--arch/sparc/lib/checksum.S583
-rw-r--r--arch/sparc/lib/copy_user.S492
-rw-r--r--arch/sparc/lib/debuglocks.c202
-rw-r--r--arch/sparc/lib/divdi3.S295
-rw-r--r--arch/sparc/lib/locks.S72
-rw-r--r--arch/sparc/lib/lshrdi3.S27
-rw-r--r--arch/sparc/lib/memcmp.S312
-rw-r--r--arch/sparc/lib/memcpy.S1150
-rw-r--r--arch/sparc/lib/memscan.S133
-rw-r--r--arch/sparc/lib/memset.S203
-rw-r--r--arch/sparc/lib/mul.S135
-rw-r--r--arch/sparc/lib/muldi3.S76
-rw-r--r--arch/sparc/lib/rem.S382
-rw-r--r--arch/sparc/lib/rwsem.S205
-rw-r--r--arch/sparc/lib/sdiv.S379
-rw-r--r--arch/sparc/lib/strlen.S81
-rw-r--r--arch/sparc/lib/strlen_user.S109
-rw-r--r--arch/sparc/lib/strncmp.S118
-rw-r--r--arch/sparc/lib/strncpy_from_user.S47
-rw-r--r--arch/sparc/lib/udiv.S355
-rw-r--r--arch/sparc/lib/udivdi3.S258
-rw-r--r--arch/sparc/lib/umul.S169
-rw-r--r--arch/sparc/lib/urem.S355
-rw-r--r--arch/sparc/math-emu/Makefile8
-rw-r--r--arch/sparc/math-emu/ashldi3.S36
-rw-r--r--arch/sparc/math-emu/math.c521
-rw-r--r--arch/sparc/math-emu/sfp-util.h115
-rw-r--r--arch/sparc/mm/Makefile23
-rw-r--r--arch/sparc/mm/btfixup.c336
-rw-r--r--arch/sparc/mm/extable.c77
-rw-r--r--arch/sparc/mm/fault.c596
-rw-r--r--arch/sparc/mm/generic.c154
-rw-r--r--arch/sparc/mm/highmem.c120
-rw-r--r--arch/sparc/mm/hypersparc.S413
-rw-r--r--arch/sparc/mm/init.c515
-rw-r--r--arch/sparc/mm/io-unit.c318
-rw-r--r--arch/sparc/mm/iommu.c475
-rw-r--r--arch/sparc/mm/loadmmu.c46
-rw-r--r--arch/sparc/mm/nosrmmu.c59
-rw-r--r--arch/sparc/mm/nosun4c.c77
-rw-r--r--arch/sparc/mm/srmmu.c2274
-rw-r--r--arch/sparc/mm/sun4c.c2276
-rw-r--r--arch/sparc/mm/swift.S256
-rw-r--r--arch/sparc/mm/tsunami.S133
-rw-r--r--arch/sparc/mm/viking.S284
-rw-r--r--arch/sparc/prom/Makefile9
-rw-r--r--arch/sparc/prom/bootstr.c63
-rw-r--r--arch/sparc/prom/console.c220
-rw-r--r--arch/sparc/prom/devmap.c54
-rw-r--r--arch/sparc/prom/devops.c89
-rw-r--r--arch/sparc/prom/init.c95
-rw-r--r--arch/sparc/prom/memory.c216
-rw-r--r--arch/sparc/prom/misc.c139
-rw-r--r--arch/sparc/prom/mp.c121
-rw-r--r--arch/sparc/prom/palloc.c44
-rw-r--r--arch/sparc/prom/printf.c46
-rw-r--r--arch/sparc/prom/ranges.c118
-rw-r--r--arch/sparc/prom/segment.c29
-rw-r--r--arch/sparc/prom/sun4prom.c161
-rw-r--r--arch/sparc/prom/tree.c364
127 files changed, 40258 insertions, 0 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
new file mode 100644
index 000000000000..237f922520fd
--- /dev/null
+++ b/arch/sparc/Kconfig
@@ -0,0 +1,393 @@
1# $Id: config.in,v 1.113 2002/01/24 22:14:44 davem Exp $
2# For a description of the syntax of this configuration file,
3# see Documentation/kbuild/kconfig-language.txt.
4#
5
6mainmenu "Linux/SPARC Kernel Configuration"
7
8config MMU
9 bool
10 default y
11
12config UID16
13 bool
14 default y
15
16config HIGHMEM
17 bool
18 default y
19
20config GENERIC_ISA_DMA
21 bool
22 default y
23
24source "init/Kconfig"
25
26menu "General machine setup"
27
28config VT
29 bool
30 select INPUT
31 default y
32 ---help---
33 If you say Y here, you will get support for terminal devices with
34 display and keyboard devices. These are called "virtual" because you
35 can run several virtual terminals (also called virtual consoles) on
36 one physical terminal. This is rather useful, for example one
37 virtual terminal can collect system messages and warnings, another
38 one can be used for a text-mode user session, and a third could run
39 an X session, all in parallel. Switching between virtual terminals
40 is done with certain key combinations, usually Alt-<function key>.
41
42 The setterm command ("man setterm") can be used to change the
43 properties (such as colors or beeping) of a virtual terminal. The
44 man page console_codes(4) ("man console_codes") contains the special
45 character sequences that can be used to change those properties
46 directly. The fonts used on virtual terminals can be changed with
47 the setfont ("man setfont") command and the key bindings are defined
48 with the loadkeys ("man loadkeys") command.
49
50 You need at least one virtual terminal device in order to make use
51 of your keyboard and monitor. Therefore, only people configuring an
52 embedded system would want to say N here in order to save some
53 memory; the only way to log into such a system is then via a serial
54 or network connection.
55
56 If unsure, say Y, or else you won't be able to do much with your new
57 shiny Linux system :-)
58
59config VT_CONSOLE
60 bool
61 default y
62 ---help---
63 The system console is the device which receives all kernel messages
64 and warnings and which allows logins in single user mode. If you
65 answer Y here, a virtual terminal (the device used to interact with
66 a physical terminal) can be used as system console. This is the most
67 common mode of operations, so you should say Y here unless you want
68 the kernel messages be output only to a serial port (in which case
69 you should say Y to "Console on serial port", below).
70
71 If you do say Y here, by default the currently visible virtual
72 terminal (/dev/tty0) will be used as system console. You can change
73 that with a kernel command line option such as "console=tty3" which
74 would use the third virtual terminal as system console. (Try "man
75 bootparam" or see the documentation of your boot loader (lilo or
76 loadlin) about how to pass options to the kernel at boot time.)
77
78 If unsure, say Y.
79
80config HW_CONSOLE
81 bool
82 default y
83
84config SMP
85 bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
86 depends on BROKEN
87 ---help---
88 This enables support for systems with more than one CPU. If you have
89 a system with only one CPU, say N. If you have a system with more
90 than one CPU, say Y.
91
92 If you say N here, the kernel will run on single and multiprocessor
93 machines, but will use only one CPU of a multiprocessor machine. If
94 you say Y here, the kernel will run on many, but not all,
95 singleprocessor machines. On a singleprocessor machine, the kernel
96 will run faster if you say N here.
97
98 People using multiprocessor machines who say Y here should also say
99 Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
100 Management" code will be disabled if you say Y here.
101
102 See also the <file:Documentation/smp.txt>,
103 <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
104 <http://www.tldp.org/docs.html#howto>.
105
106 If you don't know what to do here, say N.
107
108config NR_CPUS
109 int "Maximum number of CPUs (2-32)"
110 range 2 32
111 depends on SMP
112 default "32"
113
114# Identify this as a Sparc32 build
115config SPARC32
116 bool
117 default y
118 help
119 SPARC is a family of RISC microprocessors designed and marketed by
120 Sun Microsystems, incorporated. They are very widely found in Sun
121 workstations and clones. This port covers the original 32-bit SPARC;
122 it is old and stable and usually considered one of the "big three"
123 along with the Intel and Alpha ports. The UltraLinux project
124 maintains both the SPARC32 and SPARC64 ports; its web page is
125 available at <http://www.ultralinux.org/>.
126
127# Global things across all Sun machines.
128config ISA
129 bool
130 help
131 ISA is found on Espresso only and is not supported currently.
132 Say N
133
134config EISA
135 bool
136 help
137 EISA is not supported.
138 Say N
139
140config MCA
141 bool
142 help
143 MCA is not supported.
144 Say N
145
146config PCMCIA
147 tristate
148 ---help---
149 Say Y here if you want to attach PCMCIA- or PC-cards to your Linux
150 computer. These are credit-card size devices such as network cards,
151 modems or hard drives often used with laptops computers. There are
152 actually two varieties of these cards: the older 16 bit PCMCIA cards
153 and the newer 32 bit CardBus cards. If you want to use CardBus
154 cards, you need to say Y here and also to "CardBus support" below.
155
156 To use your PC-cards, you will need supporting software from David
157 Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
158 for location). Please also read the PCMCIA-HOWTO, available from
159 <http://www.tldp.org/docs.html#howto>.
160
161 To compile this driver as modules, choose M here: the
162 modules will be called pcmcia_core and ds.
163
164config SBUS
165 bool
166 default y
167
168config SBUSCHAR
169 bool
170 default y
171
172config SERIAL_CONSOLE
173 bool
174 default y
175 ---help---
176 If you say Y here, it will be possible to use a serial port as the
177 system console (the system console is the device which receives all
178 kernel messages and warnings and which allows logins in single user
179 mode). This could be useful if some terminal or printer is connected
180 to that serial port.
181
182 Even if you say Y here, the currently visible virtual console
183 (/dev/tty0) will still be used as the system console by default, but
184 you can alter that using a kernel command line option such as
185 "console=ttyS1". (Try "man bootparam" or see the documentation of
186 your boot loader (silo) about how to pass options to the kernel at
187 boot time.)
188
189 If you don't have a graphics card installed and you say Y here, the
190 kernel will automatically use the first serial line, /dev/ttyS0, as
191 system console.
192
193 If unsure, say N.
194
195config SUN_AUXIO
196 bool
197 default y
198
199config SUN_IO
200 bool
201 default y
202
203config RWSEM_GENERIC_SPINLOCK
204 bool
205 default y
206
207config RWSEM_XCHGADD_ALGORITHM
208 bool
209
210config GENERIC_CALIBRATE_DELAY
211 bool
212 default y
213
214config SUN_PM
215 bool
216 default y
217 help
218 Enable power management and CPU standby features on supported
219 SPARC platforms.
220
221config SUN4
222 bool "Support for SUN4 machines (disables SUN4[CDM] support)"
223 depends on !SMP
224 default n
225 help
226 Say Y here if, and only if, your machine is a sun4. Note that
227 a kernel compiled with this option will run only on sun4.
228 (And the current version will probably work only on sun4/330.)
229
230if !SUN4
231
232config PCI
233 bool "Support for PCI and PS/2 keyboard/mouse"
234 help
235 CONFIG_PCI is needed for all JavaStation's (including MrCoffee),
236 CP-1200, JavaEngine-1, Corona, Red October, and Serengeti SGSC.
237 All of these platforms are extremely obscure, so say N if unsure.
238
239source "drivers/pci/Kconfig"
240
241endif
242
243config SUN_OPENPROMFS
244 tristate "Openprom tree appears in /proc/openprom"
245 help
246 If you say Y, the OpenPROM device tree will be available as a
247 virtual file system, which you can mount to /proc/openprom by "mount
248 -t openpromfs none /proc/openprom".
249
250 To compile the /proc/openprom support as a module, choose M here: the
251 module will be called openpromfs.
252
253 Only choose N if you know in advance that you will not need to modify
254 OpenPROM settings on the running system.
255
256source "fs/Kconfig.binfmt"
257
258config SUNOS_EMUL
259 bool "SunOS binary emulation"
260 help
261 This allows you to run most SunOS binaries. If you want to do this,
262 say Y here and place appropriate files in /usr/gnemul/sunos. See
263 <http://www.ultralinux.org/faq.html> for more information. If you
264 want to run SunOS binaries on an Ultra you must also say Y to
265 "Kernel support for 32-bit a.out binaries" above.
266
267source "drivers/parport/Kconfig"
268
269config PRINTER
270 tristate "Parallel printer support"
271 depends on PARPORT
272 ---help---
273 If you intend to attach a printer to the parallel port of your Linux
274 box (as opposed to using a serial printer; if the connector at the
275 printer has 9 or 25 holes ["female"], then it's serial), say Y.
276 Also read the Printing-HOWTO, available from
277 <http://www.tldp.org/docs.html#howto>.
278
279 It is possible to share one parallel port among several devices
280 (e.g. printer and ZIP drive) and it is safe to compile the
281 corresponding drivers into the kernel. If you want to compile this
282 driver as a module however, choose M here and read
283 <file:Documentation/parport.txt>. The module will be called lp.
284
285 If you have several parallel ports, you can specify which ports to
286 use with the "lp" kernel command line option. (Try "man bootparam"
287 or see the documentation of your boot loader (silo) about how to pass
288 options to the kernel at boot time.) The syntax of the "lp" command
289 line option can be found in <file:drivers/char/lp.c>.
290
291 If you have more than 8 printers, you need to increase the LP_NO
292 macro in lp.c and the PARPORT_MAX macro in parport.h.
293
294endmenu
295
296source "drivers/base/Kconfig"
297
298source "drivers/video/Kconfig"
299
300source "drivers/mtd/Kconfig"
301
302source "drivers/serial/Kconfig"
303
304if !SUN4
305source "drivers/sbus/char/Kconfig"
306endif
307
308source "drivers/block/Kconfig"
309
310# Don't frighten a common SBus user
311if PCI
312
313source "drivers/ide/Kconfig"
314
315endif
316
317source "drivers/isdn/Kconfig"
318
319source "drivers/scsi/Kconfig"
320
321source "drivers/fc4/Kconfig"
322
323source "drivers/md/Kconfig"
324
325source "net/Kconfig"
326
327# This one must be before the filesystem configs. -DaveM
328
329menu "Unix98 PTY support"
330
331config UNIX98_PTYS
332 bool "Unix98 PTY support"
333 ---help---
334 A pseudo terminal (PTY) is a software device consisting of two
335 halves: a master and a slave. The slave device behaves identical to
336 a physical terminal; the master device is used by a process to
337 read data from and write data to the slave, thereby emulating a
338 terminal. Typical programs for the master side are telnet servers
339 and xterms.
340
341 Linux has traditionally used the BSD-like names /dev/ptyxx for
342 masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
343 has a number of problems. The GNU C library glibc 2.1 and later,
344 however, supports the Unix98 naming standard: in order to acquire a
345 pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
346 terminal is then made available to the process and the pseudo
347 terminal slave can be accessed as /dev/pts/<number>. What was
348 traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
349
350 The entries in /dev/pts/ are created on the fly by a virtual
351 file system; therefore, if you say Y here you should say Y to
352 "/dev/pts file system for Unix98 PTYs" as well.
353
354 If you want to say Y here, you need to have the C library glibc 2.1
355 or later (equal to libc-6.1, check with "ls -l /lib/libc.so.*").
356 Read the instructions in <file:Documentation/Changes> pertaining to
357 pseudo terminals. It's safe to say N.
358
359config UNIX98_PTY_COUNT
360 int "Maximum number of Unix98 PTYs in use (0-2048)"
361 depends on UNIX98_PTYS
362 default "256"
363 help
364 The maximum number of Unix98 PTYs that can be used at any one time.
365 The default is 256, and should be enough for desktop systems. Server
366 machines which support incoming telnet/rlogin/ssh connections and/or
367 serve several X terminals may want to increase this: every incoming
368 connection and every xterm uses up one PTY.
369
370 When not in use, each additional set of 256 PTYs occupy
371 approximately 8 KB of kernel memory on 32-bit architectures.
372
373endmenu
374
375source "drivers/input/Kconfig"
376
377source "fs/Kconfig"
378
379source "sound/Kconfig"
380
381source "drivers/usb/Kconfig"
382
383source "drivers/infiniband/Kconfig"
384
385source "drivers/char/watchdog/Kconfig"
386
387source "arch/sparc/Kconfig.debug"
388
389source "security/Kconfig"
390
391source "crypto/Kconfig"
392
393source "lib/Kconfig"
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
new file mode 100644
index 000000000000..120f6b529348
--- /dev/null
+++ b/arch/sparc/Kconfig.debug
@@ -0,0 +1,14 @@
1menu "Kernel hacking"
2
3source "lib/Kconfig.debug"
4
5config DEBUG_STACK_USAGE
6 bool "Enable stack utilization instrumentation"
7 depends on DEBUG_KERNEL
8 help
9 Enables the display of the minimum amount of free stack which each
10 task has ever had available in the sysrq-T and sysrq-P debug output.
11
12 This option will slow down process creation somewhat.
13
14endmenu
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
new file mode 100644
index 000000000000..7b3bbaf083a6
--- /dev/null
+++ b/arch/sparc/Makefile
@@ -0,0 +1,78 @@
1#
2# sparc/Makefile
3#
4# Makefile for the architecture dependent flags and dependencies on the
5# Sparc.
6#
7# Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
8#
9
10#
11# Uncomment the first CFLAGS if you are doing kgdb source level
12# debugging of the kernel to get the proper debugging information.
13
14AS := $(AS) -32
15LDFLAGS := -m elf32_sparc
16CHECKFLAGS += -D__sparc__
17
18#CFLAGS := $(CFLAGS) -g -pipe -fcall-used-g5 -fcall-used-g7
19CFLAGS := $(CFLAGS) -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
20AFLAGS := $(AFLAGS) -m32
21
22#LDFLAGS_vmlinux = -N -Ttext 0xf0004000
23# Since 2.5.40, the first stage is left not btfix-ed.
24# Actual linking is done with "make image".
25LDFLAGS_vmlinux = -r
26
27head-y := arch/sparc/kernel/head.o arch/sparc/kernel/init_task.o
28HEAD_Y := $(head-y)
29
30core-y += arch/sparc/kernel/ arch/sparc/mm/ arch/sparc/math-emu/
31libs-y += arch/sparc/prom/ arch/sparc/lib/
32
33# Export what is needed by arch/sparc/boot/Makefile
34# Renaming is done to avoid confusing pattern matching rules in 2.5.45 (multy-)
35INIT_Y := $(patsubst %/, %/built-in.o, $(init-y))
36CORE_Y := $(core-y)
37CORE_Y += kernel/ mm/ fs/ ipc/ security/ crypto/
38CORE_Y := $(patsubst %/, %/built-in.o, $(CORE_Y))
39DRIVERS_Y := $(patsubst %/, %/built-in.o, $(drivers-y))
40NET_Y := $(patsubst %/, %/built-in.o, $(net-y))
41LIBS_Y1 := $(patsubst %/, %/lib.a, $(libs-y))
42LIBS_Y2 := $(patsubst %/, %/built-in.o, $(libs-y))
43LIBS_Y := $(LIBS_Y1) $(LIBS_Y2)
44
45ifdef CONFIG_KALLSYMS
46kallsyms.o := .tmp_kallsyms2.o
47endif
48
49export INIT_Y CORE_Y DRIVERS_Y NET_Y LIBS_Y HEAD_Y kallsyms.o
50
51# Default target
52all: image
53
54boot := arch/sparc/boot
55
56image tftpboot.img: vmlinux
57 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
58
59archclean:
60 $(Q)$(MAKE) $(clean)=$(boot)
61
62prepare: include/asm-$(ARCH)/asm_offsets.h
63
64arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
65 include/config/MARKER
66
67include/asm-$(ARCH)/asm_offsets.h: arch/$(ARCH)/kernel/asm-offsets.s
68 $(call filechk,gen-asm-offsets)
69
70CLEAN_FILES += include/asm-$(ARCH)/asm_offsets.h \
71 arch/$(ARCH)/kernel/asm-offsets.s \
72 arch/$(ARCH)/boot/System.map
73
74# Don't use tabs in echo arguments.
75define archhelp
76 echo '* image - kernel image ($(boot)/image)'
77 echo ' tftpboot.img - image prepared for tftp'
78endef
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
new file mode 100644
index 000000000000..b365084316ac
--- /dev/null
+++ b/arch/sparc/boot/Makefile
@@ -0,0 +1,58 @@
1# $Id: Makefile,v 1.10 2000/02/23 08:17:46 jj Exp $
2# Makefile for the Sparc boot stuff.
3#
4# Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5# Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
6
7ROOT_IMG := /usr/src/root.img
8ELFTOAOUT := elftoaout
9
10hostprogs-y := piggyback btfixupprep
11targets := tftpboot.img btfix.o btfix.S image
12
13quiet_cmd_elftoaout = ELFTOAOUT $@
14 cmd_elftoaout = $(ELFTOAOUT) $(obj)/image -o $@
15quiet_cmd_piggy = PIGGY $@
16 cmd_piggy = $(obj)/piggyback $@ $(obj)/System.map $(ROOT_IMG)
17quiet_cmd_btfix = BTFIX $@
18 cmd_btfix = $(OBJDUMP) -x vmlinux | $(obj)/btfixupprep > $@
19quiet_cmd_sysmap = SYSMAP $(obj)/System.map
20 cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
21quiet_cmd_image = LD $@
22 cmd_image = $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) $(LDFLAGS_$(@F)) -o $@
23
24define rule_image
25 $(if $($(quiet)cmd_image), \
26 echo ' $($(quiet)cmd_image)' &&) \
27 $(cmd_image); \
28 $(if $($(quiet)cmd_sysmap), \
29 echo ' $($(quiet)cmd_sysmap)' &&) \
30 $(cmd_sysmap) $@ $(obj)/System.map; \
31 if [ $$? -ne 0 ]; then \
32 rm -f $@; \
33 /bin/false; \
34 fi; \
35 echo 'cmd_$@ := $(cmd_image)' > $(@D)/.$(@F).cmd
36endef
37
38BTOBJS := $(HEAD_Y) $(INIT_Y)
39BTLIBS := $(CORE_Y) $(LIBS_Y) $(DRIVERS_Y) $(NET_Y)
40LDFLAGS_image := -T arch/sparc/kernel/vmlinux.lds $(BTOBJS) \
41 --start-group $(BTLIBS) --end-group \
42 $(kallsyms.o) $(obj)/btfix.o
43
44# Link the final image including btfixup'ed symbols.
45# This is a replacement for the link done in the top-level Makefile.
46# Note: No dependency on the prerequisite files since that would require
47# make to try check if they are updated - and due to changes
48# in gcc options (path for example) this would result in
49# these files being recompiled for each build.
50$(obj)/image: $(obj)/btfix.o FORCE
51 $(call if_changed_rule,image)
52
53$(obj)/tftpboot.img: $(obj)/piggyback $(obj)/System.map $(obj)/image FORCE
54 $(call if_changed,elftoaout)
55 $(call if_changed,piggy)
56
57$(obj)/btfix.S: $(obj)/btfixupprep vmlinux FORCE
58 $(call if_changed,btfix)
diff --git a/arch/sparc/boot/btfixupprep.c b/arch/sparc/boot/btfixupprep.c
new file mode 100644
index 000000000000..dc7b0546e3bb
--- /dev/null
+++ b/arch/sparc/boot/btfixupprep.c
@@ -0,0 +1,386 @@
1/* $Id: btfixupprep.c,v 1.6 2001/08/22 15:27:47 davem Exp $
2 Simple utility to prepare vmlinux image for sparc.
3 Resolves all BTFIXUP uses and settings and creates
4 a special .s object to link to the image.
5
6 Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
21
22#include <stdio.h>
23#include <string.h>
24#include <ctype.h>
25#include <errno.h>
26#include <unistd.h>
27#include <stdlib.h>
28#include <malloc.h>
29
30#define MAXSYMS 1024
31
32static char *symtab = "SYMBOL TABLE:";
33static char *relrec = "RELOCATION RECORDS FOR [";
34static int rellen;
35static int symlen;
36int mode;
37
38struct _btfixup;
39
40typedef struct _btfixuprel {
41 char *sect;
42 unsigned long offset;
43 struct _btfixup *f;
44 int frel;
45 struct _btfixuprel *next;
46} btfixuprel;
47
48typedef struct _btfixup {
49 int type;
50 int setinitval;
51 unsigned int initval;
52 char *initvalstr;
53 char *name;
54 btfixuprel *rel;
55} btfixup;
56
57btfixup array[MAXSYMS];
58int last = 0;
59char buffer[1024];
60unsigned long lastfoffset = -1;
61unsigned long lastfrelno;
62btfixup *lastf;
63
64void fatal(void) __attribute__((noreturn));
65void fatal(void)
66{
67 fprintf(stderr, "Malformed output from objdump\n%s\n", buffer);
68 exit(1);
69}
70
71btfixup *find(int type, char *name)
72{
73 int i;
74 for (i = 0; i < last; i++) {
75 if (array[i].type == type && !strcmp(array[i].name, name))
76 return array + i;
77 }
78 array[last].type = type;
79 array[last].name = strdup(name);
80 array[last].setinitval = 0;
81 if (!array[last].name) fatal();
82 array[last].rel = NULL;
83 last++;
84 if (last >= MAXSYMS) {
85 fprintf(stderr, "Ugh. Something strange. More than %d different BTFIXUP symbols\n", MAXSYMS);
86 exit(1);
87 }
88 return array + last - 1;
89}
90
91void set_mode (char *buffer)
92{
93 for (mode = 0;; mode++)
94 if (buffer[mode] < '0' || buffer[mode] > '9')
95 break;
96 if (mode != 8 && mode != 16)
97 fatal();
98}
99
100
101int main(int argc,char **argv)
102{
103 char *p, *q;
104 char *sect;
105 int i, j, k;
106 unsigned int initval;
107 int shift;
108 btfixup *f;
109 btfixuprel *r, **rr;
110 unsigned long offset;
111 char *initvalstr;
112
113 symlen = strlen(symtab);
114 while (fgets (buffer, 1024, stdin) != NULL)
115 if (!strncmp (buffer, symtab, symlen))
116 goto main0;
117 fatal();
118main0:
119 rellen = strlen(relrec);
120 while (fgets (buffer, 1024, stdin) != NULL)
121 if (!strncmp (buffer, relrec, rellen))
122 goto main1;
123 fatal();
124main1:
125 sect = malloc(strlen (buffer + rellen) + 1);
126 if (!sect) fatal();
127 strcpy (sect, buffer + rellen);
128 p = strchr (sect, ']');
129 if (!p) fatal();
130 *p = 0;
131 if (fgets (buffer, 1024, stdin) == NULL)
132 fatal();
133 while (fgets (buffer, 1024, stdin) != NULL) {
134 int nbase;
135 if (!strncmp (buffer, relrec, rellen))
136 goto main1;
137 if (mode == 0)
138 set_mode (buffer);
139 p = strchr (buffer, '\n');
140 if (p) *p = 0;
141 if (strlen (buffer) < 22+mode)
142 continue;
143 if (strncmp (buffer + mode, " R_SPARC_", 9))
144 continue;
145 nbase = 27 - 8 + mode;
146 if (buffer[nbase] != '_' || buffer[nbase+1] != '_' || buffer[nbase+2] != '_')
147 continue;
148 switch (buffer[nbase+3]) {
149 case 'f': /* CALL */
150 case 'b': /* BLACKBOX */
151 case 's': /* SIMM13 */
152 case 'a': /* HALF */
153 case 'h': /* SETHI */
154 case 'i': /* INT */
155 break;
156 default:
157 continue;
158 }
159 p = strchr (buffer + nbase+5, '+');
160 if (p) *p = 0;
161 shift = nbase + 5;
162 if (buffer[nbase+4] == 's' && buffer[nbase+5] == '_') {
163 shift = nbase + 6;
164 if (strcmp (sect, ".init.text")) {
165 fprintf(stderr,
166 "Wrong use of '%s' BTFIXUPSET in '%s' section.\n"
167 "BTFIXUPSET_CALL can be used only in"
168 " __init sections\n",
169 buffer + shift, sect);
170 exit(1);
171 }
172 } else if (buffer[nbase+4] != '_')
173 continue;
174 if (!strcmp (sect, ".text.exit"))
175 continue;
176 if (strcmp (sect, ".text") &&
177 strcmp (sect, ".init.text") &&
178 strcmp (sect, ".fixup") &&
179 (strcmp (sect, "__ksymtab") || buffer[nbase+3] != 'f')) {
180 if (buffer[nbase+3] == 'f')
181 fprintf(stderr,
182 "Wrong use of '%s' in '%s' section.\n"
183 " It can be used only in .text, .init.text,"
184 " .fixup and __ksymtab\n",
185 buffer + shift, sect);
186 else
187 fprintf(stderr,
188 "Wrong use of '%s' in '%s' section.\n"
189 " It can be only used in .text, .init.text,"
190 " and .fixup\n", buffer + shift, sect);
191 exit(1);
192 }
193 p = strstr (buffer + shift, "__btset_");
194 if (p && buffer[nbase+4] == 's') {
195 fprintf(stderr, "__btset_ in BTFIXUP name can only be used when defining the variable, not for setting\n%s\n", buffer);
196 exit(1);
197 }
198 initval = 0;
199 initvalstr = NULL;
200 if (p) {
201 if (p[8] != '0' || p[9] != 'x') {
202 fprintf(stderr, "Pre-initialized values can be only initialized with hexadecimal constants starting 0x\n%s\n", buffer);
203 exit(1);
204 }
205 initval = strtoul(p + 10, &q, 16);
206 if (*q || !initval) {
207 fprintf(stderr, "Pre-initialized values can be only in the form name__btset_0xXXXXXXXX where X are hex digits.\nThey cannot be name__btset_0x00000000 though. Use BTFIXUPDEF_XX instead of BTFIXUPDEF_XX_INIT then.\n%s\n", buffer);
208 exit(1);
209 }
210 initvalstr = p + 10;
211 *p = 0;
212 }
213 f = find(buffer[nbase+3], buffer + shift);
214 if (buffer[nbase+4] == 's')
215 continue;
216 switch (buffer[nbase+3]) {
217 case 'f':
218 if (initval) {
219 fprintf(stderr, "Cannot use pre-initalized fixups for calls\n%s\n", buffer);
220 exit(1);
221 }
222 if (!strcmp (sect, "__ksymtab")) {
223 if (strncmp (buffer + mode+9, "32 ", 10)) {
224 fprintf(stderr, "BTFIXUP_CALL in EXPORT_SYMBOL results in relocation other than R_SPARC_32\n\%s\n", buffer);
225 exit(1);
226 }
227 } else if (strncmp (buffer + mode+9, "WDISP30 ", 10) &&
228 strncmp (buffer + mode+9, "HI22 ", 10) &&
229 strncmp (buffer + mode+9, "LO10 ", 10)) {
230 fprintf(stderr, "BTFIXUP_CALL results in relocation other than R_SPARC_WDISP30, R_SPARC_HI22 or R_SPARC_LO10\n%s\n", buffer);
231 exit(1);
232 }
233 break;
234 case 'b':
235 if (initval) {
236 fprintf(stderr, "Cannot use pre-initialized fixups for blackboxes\n%s\n", buffer);
237 exit(1);
238 }
239 if (strncmp (buffer + mode+9, "HI22 ", 10)) {
240 fprintf(stderr, "BTFIXUP_BLACKBOX results in relocation other than R_SPARC_HI22\n%s\n", buffer);
241 exit(1);
242 }
243 break;
244 case 's':
245 if (initval + 0x1000 >= 0x2000) {
246 fprintf(stderr, "Wrong initializer for SIMM13. Has to be from $fffff000 to $00000fff\n%s\n", buffer);
247 exit(1);
248 }
249 if (strncmp (buffer + mode+9, "13 ", 10)) {
250 fprintf(stderr, "BTFIXUP_SIMM13 results in relocation other than R_SPARC_13\n%s\n", buffer);
251 exit(1);
252 }
253 break;
254 case 'a':
255 if (initval + 0x1000 >= 0x2000 && (initval & 0x3ff)) {
256 fprintf(stderr, "Wrong initializer for HALF.\n%s\n", buffer);
257 exit(1);
258 }
259 if (strncmp (buffer + mode+9, "13 ", 10)) {
260 fprintf(stderr, "BTFIXUP_HALF results in relocation other than R_SPARC_13\n%s\n", buffer);
261 exit(1);
262 }
263 break;
264 case 'h':
265 if (initval & 0x3ff) {
266 fprintf(stderr, "Wrong initializer for SETHI. Cannot have set low 10 bits\n%s\n", buffer);
267 exit(1);
268 }
269 if (strncmp (buffer + mode+9, "HI22 ", 10)) {
270 fprintf(stderr, "BTFIXUP_SETHI results in relocation other than R_SPARC_HI22\n%s\n", buffer);
271 exit(1);
272 }
273 break;
274 case 'i':
275 if (initval) {
276 fprintf(stderr, "Cannot use pre-initalized fixups for INT\n%s\n", buffer);
277 exit(1);
278 }
279 if (strncmp (buffer + mode+9, "HI22 ", 10) && strncmp (buffer + mode+9, "LO10 ", 10)) {
280 fprintf(stderr, "BTFIXUP_INT results in relocation other than R_SPARC_HI22 and R_SPARC_LO10\n%s\n", buffer);
281 exit(1);
282 }
283 break;
284 }
285 if (!f->setinitval) {
286 f->initval = initval;
287 if (initvalstr) {
288 f->initvalstr = strdup(initvalstr);
289 if (!f->initvalstr) fatal();
290 }
291 f->setinitval = 1;
292 } else if (f->initval != initval) {
293 fprintf(stderr, "Btfixup %s previously used with initializer %s which doesn't match with current initializer\n%s\n",
294 f->name, f->initvalstr ? : "0x00000000", buffer);
295 exit(1);
296 } else if (initval && strcmp(f->initvalstr, initvalstr)) {
297 fprintf(stderr, "Btfixup %s previously used with initializer %s which doesn't match with current initializer.\n"
298 "Initializers have to match literally as well.\n%s\n",
299 f->name, f->initvalstr, buffer);
300 exit(1);
301 }
302 offset = strtoul(buffer, &q, 16);
303 if (q != buffer + mode || (!offset && (mode == 8 ? strncmp (buffer, "00000000 ", 9) : strncmp (buffer, "0000000000000000 ", 17)))) {
304 fprintf(stderr, "Malformed relocation address in\n%s\n", buffer);
305 exit(1);
306 }
307 for (k = 0, r = f->rel, rr = &f->rel; r; rr = &r->next, r = r->next, k++)
308 if (r->offset == offset && !strcmp(r->sect, sect)) {
309 fprintf(stderr, "Ugh. One address has two relocation records\n");
310 exit(1);
311 }
312 *rr = malloc(sizeof(btfixuprel));
313 if (!*rr) fatal();
314 (*rr)->offset = offset;
315 (*rr)->f = NULL;
316 if (buffer[nbase+3] == 'f') {
317 lastf = f;
318 lastfoffset = offset;
319 lastfrelno = k;
320 } else if (lastfoffset + 4 == offset) {
321 (*rr)->f = lastf;
322 (*rr)->frel = lastfrelno;
323 }
324 (*rr)->sect = sect;
325 (*rr)->next = NULL;
326 }
327 printf("! Generated by btfixupprep. Do not edit.\n\n");
328 printf("\t.section\t\".data.init\",#alloc,#write\n\t.align\t4\n\n");
329 printf("\t.global\t___btfixup_start\n___btfixup_start:\n\n");
330 for (i = 0; i < last; i++) {
331 f = array + i;
332 printf("\t.global\t___%cs_%s\n", f->type, f->name);
333 if (f->type == 'f')
334 printf("___%cs_%s:\n\t.word 0x%08x,0,0,", f->type, f->name, f->type << 24);
335 else
336 printf("___%cs_%s:\n\t.word 0x%08x,0,", f->type, f->name, f->type << 24);
337 for (j = 0, r = f->rel; r != NULL; j++, r = r->next);
338 if (j)
339 printf("%d\n\t.word\t", j * 2);
340 else
341 printf("0\n");
342 for (r = f->rel, j--; r != NULL; j--, r = r->next) {
343 if (!strcmp (r->sect, ".text"))
344 printf ("_stext+0x%08lx", r->offset);
345 else if (!strcmp (r->sect, ".init.text"))
346 printf ("__init_begin+0x%08lx", r->offset);
347 else if (!strcmp (r->sect, "__ksymtab"))
348 printf ("__start___ksymtab+0x%08lx", r->offset);
349 else if (!strcmp (r->sect, ".fixup"))
350 printf ("__start___fixup+0x%08lx", r->offset);
351 else
352 fatal();
353 if (f->type == 'f' || !r->f)
354 printf (",0");
355 else
356 printf (",___fs_%s+0x%08x", r->f->name, (4 + r->frel*2)*4 + 4);
357 if (j) printf (",");
358 else printf ("\n");
359 }
360 printf("\n");
361 }
362 printf("\n\t.global\t___btfixup_end\n___btfixup_end:\n");
363 printf("\n\n! Define undefined references\n\n");
364 for (i = 0; i < last; i++) {
365 f = array + i;
366 if (f->type == 'f') {
367 printf("\t.global\t___f_%s\n", f->name);
368 printf("___f_%s:\n", f->name);
369 }
370 }
371 printf("\tretl\n\t nop\n\n");
372 for (i = 0; i < last; i++) {
373 f = array + i;
374 if (f->type != 'f') {
375 if (!f->initval) {
376 printf("\t.global\t___%c_%s\n", f->type, f->name);
377 printf("___%c_%s = 0\n", f->type, f->name);
378 } else {
379 printf("\t.global\t___%c_%s__btset_0x%s\n", f->type, f->name, f->initvalstr);
380 printf("___%c_%s__btset_0x%s = 0x%08x\n", f->type, f->name, f->initvalstr, f->initval);
381 }
382 }
383 }
384 printf("\n\n");
385 exit(0);
386}
diff --git a/arch/sparc/boot/piggyback.c b/arch/sparc/boot/piggyback.c
new file mode 100644
index 000000000000..6962cc68ed5b
--- /dev/null
+++ b/arch/sparc/boot/piggyback.c
@@ -0,0 +1,137 @@
1/* $Id: piggyback.c,v 1.4 2000/12/05 00:48:57 anton Exp $
2 Simple utility to make a single-image install kernel with initial ramdisk
3 for Sparc tftpbooting without need to set up nfs.
4
5 Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 Pete Zaitcev <zaitcev@yahoo.com> endian fixes for cross-compiles, 2000.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
21
22#include <stdio.h>
23#include <string.h>
24#include <ctype.h>
25#include <errno.h>
26#include <fcntl.h>
27#include <dirent.h>
28#include <unistd.h>
29#include <stdlib.h>
30#include <sys/types.h>
31#include <sys/stat.h>
32
33/*
34 * Note: run this on an a.out kernel (use elftoaout for it),
35 * as PROM looks for a.out image only.
36 */
37
38unsigned short ld2(char *p)
39{
40 return (p[0] << 8) | p[1];
41}
42
43unsigned int ld4(char *p)
44{
45 return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
46}
47
48void st4(char *p, unsigned int x)
49{
50 p[0] = x >> 24;
51 p[1] = x >> 16;
52 p[2] = x >> 8;
53 p[3] = x;
54}
55
56void usage(void)
57{
58 /* fs_img.gz is an image of initial ramdisk. */
59 fprintf(stderr, "Usage: piggyback vmlinux.aout System.map fs_img.gz\n");
60 fprintf(stderr, "\tKernel image will be modified in place.\n");
61 exit(1);
62}
63
64void die(char *str)
65{
66 perror (str);
67 exit(1);
68}
69
70int main(int argc,char **argv)
71{
72 static char aout_magic[] = { 0x01, 0x03, 0x01, 0x07 };
73 unsigned char buffer[1024], *q, *r;
74 unsigned int i, j, k, start, end, offset;
75 FILE *map;
76 struct stat s;
77 int image, tail;
78
79 if (argc != 4) usage();
80 start = end = 0;
81 if (stat (argv[3], &s) < 0) die (argv[3]);
82 map = fopen (argv[2], "r");
83 if (!map) die(argv[2]);
84 while (fgets (buffer, 1024, map)) {
85 if (!strcmp (buffer + 8, " T start\n") || !strcmp (buffer + 16, " T start\n"))
86 start = strtoul (buffer, NULL, 16);
87 else if (!strcmp (buffer + 8, " A end\n") || !strcmp (buffer + 16, " A end\n"))
88 end = strtoul (buffer, NULL, 16);
89 }
90 fclose (map);
91 if (!start || !end) {
92 fprintf (stderr, "Could not determine start and end from System.map\n");
93 exit(1);
94 }
95 if ((image = open(argv[1],O_RDWR)) < 0) die(argv[1]);
96 if (read(image,buffer,512) != 512) die(argv[1]);
97 if (memcmp (buffer, "\177ELF", 4) == 0) {
98 q = buffer + ld4(buffer + 28);
99 i = ld4(q + 4) + ld4(buffer + 24) - ld4(q + 8);
100 if (lseek(image,i,0) < 0) die("lseek");
101 if (read(image,buffer,512) != 512) die(argv[1]);
102 j = 0;
103 } else if (memcmp(buffer, aout_magic, 4) == 0) {
104 i = j = 32;
105 } else {
106 fprintf (stderr, "Not ELF nor a.out. Don't blame me.\n");
107 exit(1);
108 }
109 k = i;
110 i += (ld2(buffer + j + 2)<<2) - 512;
111 if (lseek(image,i,0) < 0) die("lseek");
112 if (read(image,buffer,1024) != 1024) die(argv[1]);
113 for (q = buffer, r = q + 512; q < r; q += 4) {
114 if (*q == 'H' && q[1] == 'd' && q[2] == 'r' && q[3] == 'S')
115 break;
116 }
117 if (q == r) {
118 fprintf (stderr, "Couldn't find headers signature in the kernel.\n");
119 exit(1);
120 }
121 offset = i + (q - buffer) + 10;
122 if (lseek(image, offset, 0) < 0) die ("lseek");
123
124 st4(buffer, 0);
125 st4(buffer + 4, 0x01000000);
126 st4(buffer + 8, (end + 32 + 4095) & ~4095);
127 st4(buffer + 12, s.st_size);
128
129 if (write(image,buffer+2,14) != 14) die (argv[1]);
130 if (lseek(image, k - start + ((end + 32 + 4095) & ~4095), 0) < 0) die ("lseek");
131 if ((tail = open(argv[3],O_RDONLY)) < 0) die(argv[3]);
132 while ((i = read (tail,buffer,1024)) > 0)
133 if (write(image,buffer,i) != i) die (argv[1]);
134 if (close(image) < 0) die("close");
135 if (close(tail) < 0) die("close");
136 return 0;
137}
diff --git a/arch/sparc/defconfig b/arch/sparc/defconfig
new file mode 100644
index 000000000000..a69856263009
--- /dev/null
+++ b/arch/sparc/defconfig
@@ -0,0 +1,644 @@
1#
2# Automatically generated make config: don't edit
3#
4CONFIG_MMU=y
5CONFIG_UID16=y
6CONFIG_HIGHMEM=y
7CONFIG_GENERIC_ISA_DMA=y
8
9#
10# Code maturity level options
11#
12CONFIG_EXPERIMENTAL=y
13CONFIG_CLEAN_COMPILE=y
14CONFIG_STANDALONE=y
15CONFIG_BROKEN_ON_SMP=y
16
17#
18# General setup
19#
20CONFIG_SWAP=y
21CONFIG_SYSVIPC=y
22CONFIG_POSIX_MQUEUE=y
23# CONFIG_BSD_PROCESS_ACCT is not set
24CONFIG_SYSCTL=y
25# CONFIG_AUDIT is not set
26CONFIG_LOG_BUF_SHIFT=14
27# CONFIG_HOTPLUG is not set
28# CONFIG_IKCONFIG is not set
29# CONFIG_EMBEDDED is not set
30CONFIG_KALLSYMS=y
31# CONFIG_KALLSYMS_ALL is not set
32CONFIG_FUTEX=y
33CONFIG_EPOLL=y
34CONFIG_IOSCHED_NOOP=y
35CONFIG_IOSCHED_AS=y
36CONFIG_IOSCHED_DEADLINE=y
37CONFIG_IOSCHED_CFQ=y
38# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
39
40#
41# Loadable module support
42#
43CONFIG_MODULES=y
44CONFIG_MODULE_UNLOAD=y
45# CONFIG_MODULE_FORCE_UNLOAD is not set
46CONFIG_OBSOLETE_MODPARM=y
47# CONFIG_MODVERSIONS is not set
48CONFIG_KMOD=y
49
50#
51# General setup
52#
53CONFIG_VT=y
54CONFIG_VT_CONSOLE=y
55CONFIG_HW_CONSOLE=y
56# CONFIG_SMP is not set
57CONFIG_SPARC32=y
58CONFIG_SBUS=y
59CONFIG_SBUSCHAR=y
60CONFIG_SERIAL_CONSOLE=y
61CONFIG_SUN_AUXIO=y
62CONFIG_SUN_IO=y
63CONFIG_RWSEM_GENERIC_SPINLOCK=y
64CONFIG_SUN_PM=y
65# CONFIG_SUN4 is not set
66CONFIG_PCI=y
67# CONFIG_PCI_LEGACY_PROC is not set
68# CONFIG_PCI_NAMES is not set
69CONFIG_SUN_OPENPROMFS=m
70CONFIG_BINFMT_ELF=y
71CONFIG_BINFMT_AOUT=y
72CONFIG_BINFMT_MISC=m
73CONFIG_SUNOS_EMUL=y
74
75#
76# Parallel port support
77#
78# CONFIG_PARPORT is not set
79
80#
81# Generic Driver Options
82#
83# CONFIG_DEBUG_DRIVER is not set
84
85#
86# Graphics support
87#
88# CONFIG_FB is not set
89
90#
91# Console display driver support
92#
93# CONFIG_MDA_CONSOLE is not set
94# CONFIG_PROM_CONSOLE is not set
95CONFIG_DUMMY_CONSOLE=y
96
97#
98# Memory Technology Devices (MTD)
99#
100# CONFIG_MTD is not set
101
102#
103# Serial drivers
104#
105# CONFIG_SERIAL_8250 is not set
106
107#
108# Non-8250 serial port support
109#
110CONFIG_SERIAL_SUNCORE=y
111CONFIG_SERIAL_SUNZILOG=y
112CONFIG_SERIAL_SUNZILOG_CONSOLE=y
113CONFIG_SERIAL_SUNSU=y
114CONFIG_SERIAL_SUNSU_CONSOLE=y
115# CONFIG_SERIAL_SUNSAB is not set
116CONFIG_SERIAL_CORE=y
117CONFIG_SERIAL_CORE_CONSOLE=y
118
119#
120# Misc Linux/SPARC drivers
121#
122CONFIG_SUN_OPENPROMIO=m
123CONFIG_SUN_MOSTEK_RTC=m
124# CONFIG_SUN_BPP is not set
125# CONFIG_SUN_VIDEOPIX is not set
126# CONFIG_SUN_AURORA is not set
127# CONFIG_TADPOLE_TS102_UCTRL is not set
128# CONFIG_SUN_JSFLASH is not set
129CONFIG_APM_RTC_IS_GMT=y
130CONFIG_RTC=m
131
132#
133# Block devices
134#
135# CONFIG_BLK_DEV_FD is not set
136# CONFIG_BLK_CPQ_DA is not set
137# CONFIG_BLK_CPQ_CISS_DA is not set
138# CONFIG_BLK_DEV_DAC960 is not set
139# CONFIG_BLK_DEV_UMEM is not set
140CONFIG_BLK_DEV_LOOP=m
141CONFIG_BLK_DEV_CRYPTOLOOP=m
142# CONFIG_BLK_DEV_NBD is not set
143# CONFIG_BLK_DEV_CARMEL is not set
144CONFIG_BLK_DEV_RAM=y
145CONFIG_BLK_DEV_RAM_SIZE=4096
146CONFIG_BLK_DEV_INITRD=y
147
148#
149# ATA/ATAPI/MFM/RLL support
150#
151# CONFIG_IDE is not set
152
153#
154# ISDN subsystem
155#
156# CONFIG_ISDN is not set
157
158#
159# SCSI device support
160#
161CONFIG_SCSI=y
162CONFIG_SCSI_PROC_FS=y
163
164#
165# SCSI support type (disk, tape, CD-ROM)
166#
167CONFIG_BLK_DEV_SD=y
168# CONFIG_CHR_DEV_ST is not set
169# CONFIG_CHR_DEV_OSST is not set
170CONFIG_BLK_DEV_SR=m
171# CONFIG_BLK_DEV_SR_VENDOR is not set
172CONFIG_CHR_DEV_SG=m
173
174#
175# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
176#
177# CONFIG_SCSI_MULTI_LUN is not set
178# CONFIG_SCSI_CONSTANTS is not set
179# CONFIG_SCSI_LOGGING is not set
180
181#
182# SCSI Transport Attributes
183#
184CONFIG_SCSI_SPI_ATTRS=m
185# CONFIG_SCSI_FC_ATTRS is not set
186
187#
188# SCSI low-level drivers
189#
190# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
191# CONFIG_SCSI_ACARD is not set
192# CONFIG_SCSI_AACRAID is not set
193# CONFIG_SCSI_AIC7XXX is not set
194# CONFIG_SCSI_AIC7XXX_OLD is not set
195# CONFIG_SCSI_AIC79XX is not set
196# CONFIG_SCSI_DPT_I2O is not set
197# CONFIG_SCSI_ADVANSYS is not set
198# CONFIG_SCSI_MEGARAID is not set
199# CONFIG_SCSI_SATA is not set
200# CONFIG_SCSI_BUSLOGIC is not set
201# CONFIG_SCSI_DMX3191D is not set
202# CONFIG_SCSI_EATA is not set
203# CONFIG_SCSI_EATA_PIO is not set
204# CONFIG_SCSI_FUTURE_DOMAIN is not set
205# CONFIG_SCSI_GDTH is not set
206# CONFIG_SCSI_IPS is not set
207# CONFIG_SCSI_INIA100 is not set
208# CONFIG_SCSI_SYM53C8XX_2 is not set
209# CONFIG_SCSI_IPR is not set
210# CONFIG_SCSI_QLOGIC_ISP is not set
211# CONFIG_SCSI_QLOGIC_FC is not set
212# CONFIG_SCSI_QLOGIC_1280 is not set
213CONFIG_SCSI_QLOGICPTI=m
214CONFIG_SCSI_QLA2XXX=y
215# CONFIG_SCSI_QLA21XX is not set
216# CONFIG_SCSI_QLA22XX is not set
217# CONFIG_SCSI_QLA2300 is not set
218# CONFIG_SCSI_QLA2322 is not set
219# CONFIG_SCSI_QLA6312 is not set
220# CONFIG_SCSI_QLA6322 is not set
221# CONFIG_SCSI_DC395x is not set
222# CONFIG_SCSI_DC390T is not set
223# CONFIG_SCSI_NSP32 is not set
224# CONFIG_SCSI_DEBUG is not set
225CONFIG_SCSI_SUNESP=y
226
227#
228# Fibre Channel support
229#
230# CONFIG_FC4 is not set
231
232#
233# Multi-device support (RAID and LVM)
234#
235# CONFIG_MD is not set
236
237#
238# Networking support
239#
240CONFIG_NET=y
241
242#
243# Networking options
244#
245CONFIG_PACKET=y
246# CONFIG_PACKET_MMAP is not set
247CONFIG_NETLINK_DEV=y
248CONFIG_UNIX=y
249CONFIG_NET_KEY=m
250CONFIG_INET=y
251# CONFIG_IP_MULTICAST is not set
252# CONFIG_IP_ADVANCED_ROUTER is not set
253CONFIG_IP_PNP=y
254CONFIG_IP_PNP_DHCP=y
255# CONFIG_IP_PNP_BOOTP is not set
256# CONFIG_IP_PNP_RARP is not set
257# CONFIG_NET_IPIP is not set
258# CONFIG_NET_IPGRE is not set
259# CONFIG_ARPD is not set
260# CONFIG_SYN_COOKIES is not set
261CONFIG_INET_AH=y
262CONFIG_INET_ESP=y
263CONFIG_INET_IPCOMP=y
264CONFIG_IPV6=m
265CONFIG_IPV6_PRIVACY=y
266CONFIG_INET6_AH=m
267CONFIG_INET6_ESP=m
268CONFIG_INET6_IPCOMP=m
269CONFIG_IPV6_TUNNEL=m
270# CONFIG_NETFILTER is not set
271CONFIG_XFRM=y
272CONFIG_XFRM_USER=m
273
274#
275# SCTP Configuration (EXPERIMENTAL)
276#
277CONFIG_IP_SCTP=m
278# CONFIG_SCTP_DBG_MSG is not set
279CONFIG_SCTP_DBG_OBJCNT=y
280# CONFIG_SCTP_HMAC_NONE is not set
281# CONFIG_SCTP_HMAC_SHA1 is not set
282CONFIG_SCTP_HMAC_MD5=y
283# CONFIG_ATM is not set
284# CONFIG_BRIDGE is not set
285# CONFIG_VLAN_8021Q is not set
286# CONFIG_DECNET is not set
287# CONFIG_LLC2 is not set
288# CONFIG_IPX is not set
289# CONFIG_ATALK is not set
290# CONFIG_X25 is not set
291# CONFIG_LAPB is not set
292# CONFIG_NET_DIVERT is not set
293# CONFIG_ECONET is not set
294# CONFIG_WAN_ROUTER is not set
295# CONFIG_NET_HW_FLOWCONTROL is not set
296
297#
298# QoS and/or fair queueing
299#
300# CONFIG_NET_SCHED is not set
301
302#
303# Network testing
304#
305CONFIG_NET_PKTGEN=m
306# CONFIG_NETPOLL is not set
307# CONFIG_NET_POLL_CONTROLLER is not set
308# CONFIG_HAMRADIO is not set
309# CONFIG_IRDA is not set
310# CONFIG_BT is not set
311CONFIG_NETDEVICES=y
312CONFIG_DUMMY=m
313# CONFIG_BONDING is not set
314# CONFIG_EQUALIZER is not set
315CONFIG_TUN=m
316# CONFIG_ETHERTAP is not set
317
318#
319# ARCnet devices
320#
321# CONFIG_ARCNET is not set
322
323#
324# Ethernet (10 or 100Mbit)
325#
326CONFIG_NET_ETHERNET=y
327CONFIG_MII=m
328CONFIG_SUNLANCE=y
329CONFIG_HAPPYMEAL=m
330CONFIG_SUNBMAC=m
331CONFIG_SUNQE=m
332# CONFIG_SUNGEM is not set
333# CONFIG_NET_VENDOR_3COM is not set
334
335#
336# Tulip family network device support
337#
338# CONFIG_NET_TULIP is not set
339# CONFIG_HP100 is not set
340# CONFIG_NET_PCI is not set
341
342#
343# Ethernet (1000 Mbit)
344#
345# CONFIG_ACENIC is not set
346# CONFIG_DL2K is not set
347# CONFIG_E1000 is not set
348# CONFIG_MYRI_SBUS is not set
349# CONFIG_NS83820 is not set
350# CONFIG_HAMACHI is not set
351# CONFIG_YELLOWFIN is not set
352# CONFIG_R8169 is not set
353# CONFIG_SK98LIN is not set
354# CONFIG_TIGON3 is not set
355
356#
357# Ethernet (10000 Mbit)
358#
359# CONFIG_IXGB is not set
360# CONFIG_S2IO is not set
361
362#
363# Token Ring devices
364#
365# CONFIG_TR is not set
366
367#
368# Wireless LAN (non-hamradio)
369#
370# CONFIG_NET_RADIO is not set
371
372#
373# Wan interfaces
374#
375# CONFIG_WAN is not set
376# CONFIG_FDDI is not set
377# CONFIG_HIPPI is not set
378# CONFIG_PPP is not set
379# CONFIG_SLIP is not set
380# CONFIG_NET_FC is not set
381# CONFIG_SHAPER is not set
382# CONFIG_NETCONSOLE is not set
383
384#
385# Unix98 PTY support
386#
387CONFIG_UNIX98_PTYS=y
388CONFIG_UNIX98_PTY_COUNT=256
389
390#
391# Input device support
392#
393CONFIG_INPUT=y
394
395#
396# Userland interfaces
397#
398CONFIG_INPUT_MOUSEDEV=y
399CONFIG_INPUT_MOUSEDEV_PSAUX=y
400CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
401CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
402CONFIG_INPUT_JOYDEV=m
403# CONFIG_INPUT_TSDEV is not set
404CONFIG_INPUT_EVDEV=m
405CONFIG_INPUT_EVBUG=m
406
407#
408# Input I/O drivers
409#
410# CONFIG_GAMEPORT is not set
411CONFIG_SOUND_GAMEPORT=y
412CONFIG_SERIO=m
413# CONFIG_SERIO_I8042 is not set
414CONFIG_SERIO_SERPORT=m
415# CONFIG_SERIO_CT82C710 is not set
416# CONFIG_SERIO_PCIPS2 is not set
417
418#
419# Input Device Drivers
420#
421CONFIG_INPUT_KEYBOARD=y
422CONFIG_KEYBOARD_ATKBD=m
423CONFIG_KEYBOARD_SUNKBD=m
424# CONFIG_KEYBOARD_LKKBD is not set
425# CONFIG_KEYBOARD_XTKBD is not set
426# CONFIG_KEYBOARD_NEWTON is not set
427CONFIG_INPUT_MOUSE=y
428CONFIG_MOUSE_PS2=m
429CONFIG_MOUSE_SERIAL=m
430# CONFIG_MOUSE_VSXXXAA is not set
431# CONFIG_INPUT_JOYSTICK is not set
432# CONFIG_INPUT_TOUCHSCREEN is not set
433# CONFIG_INPUT_MISC is not set
434
435#
436# File systems
437#
438CONFIG_EXT2_FS=y
439CONFIG_EXT2_FS_XATTR=y
440CONFIG_EXT2_FS_POSIX_ACL=y
441CONFIG_EXT2_FS_SECURITY=y
442# CONFIG_EXT3_FS is not set
443# CONFIG_JBD is not set
444CONFIG_FS_MBCACHE=y
445# CONFIG_REISERFS_FS is not set
446# CONFIG_JFS_FS is not set
447CONFIG_FS_POSIX_ACL=y
448CONFIG_XFS_FS=m
449CONFIG_XFS_RT=y
450CONFIG_XFS_QUOTA=y
451CONFIG_XFS_SECURITY=y
452CONFIG_XFS_POSIX_ACL=y
453# CONFIG_MINIX_FS is not set
454CONFIG_ROMFS_FS=m
455# CONFIG_QUOTA is not set
456CONFIG_QUOTACTL=y
457CONFIG_AUTOFS_FS=m
458CONFIG_AUTOFS4_FS=m
459
460#
461# CD-ROM/DVD Filesystems
462#
463CONFIG_ISO9660_FS=m
464# CONFIG_JOLIET is not set
465# CONFIG_ZISOFS is not set
466# CONFIG_UDF_FS is not set
467
468#
469# DOS/FAT/NT Filesystems
470#
471# CONFIG_FAT_FS is not set
472# CONFIG_NTFS_FS is not set
473
474#
475# Pseudo filesystems
476#
477CONFIG_PROC_FS=y
478CONFIG_PROC_KCORE=y
479CONFIG_SYSFS=y
480# CONFIG_DEVFS_FS is not set
481CONFIG_DEVPTS_FS_XATTR=y
482# CONFIG_DEVPTS_FS_SECURITY is not set
483# CONFIG_TMPFS is not set
484# CONFIG_HUGETLB_PAGE is not set
485CONFIG_RAMFS=y
486
487#
488# Miscellaneous filesystems
489#
490# CONFIG_ADFS_FS is not set
491# CONFIG_AFFS_FS is not set
492# CONFIG_HFS_FS is not set
493# CONFIG_HFSPLUS_FS is not set
494CONFIG_BEFS_FS=m
495# CONFIG_BEFS_DEBUG is not set
496# CONFIG_BFS_FS is not set
497# CONFIG_EFS_FS is not set
498# CONFIG_CRAMFS is not set
499# CONFIG_VXFS_FS is not set
500# CONFIG_HPFS_FS is not set
501# CONFIG_QNX4FS_FS is not set
502# CONFIG_SYSV_FS is not set
503# CONFIG_UFS_FS is not set
504
505#
506# Network File Systems
507#
508CONFIG_NFS_FS=y
509# CONFIG_NFS_V3 is not set
510# CONFIG_NFS_V4 is not set
511# CONFIG_NFS_DIRECTIO is not set
512# CONFIG_NFSD is not set
513CONFIG_ROOT_NFS=y
514CONFIG_LOCKD=y
515# CONFIG_EXPORTFS is not set
516CONFIG_SUNRPC=y
517CONFIG_SUNRPC_GSS=m
518CONFIG_RPCSEC_GSS_KRB5=m
519# CONFIG_SMB_FS is not set
520CONFIG_CIFS=m
521# CONFIG_CIFS_STATS is not set
522# CONFIG_NCP_FS is not set
523# CONFIG_CODA_FS is not set
524CONFIG_AFS_FS=m
525CONFIG_RXRPC=m
526
527#
528# Partition Types
529#
530# CONFIG_PARTITION_ADVANCED is not set
531CONFIG_MSDOS_PARTITION=y
532CONFIG_SUN_PARTITION=y
533
534#
535# Native Language Support
536#
537CONFIG_NLS=y
538CONFIG_NLS_DEFAULT="iso8859-1"
539# CONFIG_NLS_CODEPAGE_437 is not set
540# CONFIG_NLS_CODEPAGE_737 is not set
541# CONFIG_NLS_CODEPAGE_775 is not set
542# CONFIG_NLS_CODEPAGE_850 is not set
543# CONFIG_NLS_CODEPAGE_852 is not set
544# CONFIG_NLS_CODEPAGE_855 is not set
545# CONFIG_NLS_CODEPAGE_857 is not set
546# CONFIG_NLS_CODEPAGE_860 is not set
547# CONFIG_NLS_CODEPAGE_861 is not set
548# CONFIG_NLS_CODEPAGE_862 is not set
549# CONFIG_NLS_CODEPAGE_863 is not set
550# CONFIG_NLS_CODEPAGE_864 is not set
551# CONFIG_NLS_CODEPAGE_865 is not set
552# CONFIG_NLS_CODEPAGE_866 is not set
553# CONFIG_NLS_CODEPAGE_869 is not set
554# CONFIG_NLS_CODEPAGE_936 is not set
555# CONFIG_NLS_CODEPAGE_950 is not set
556# CONFIG_NLS_CODEPAGE_932 is not set
557# CONFIG_NLS_CODEPAGE_949 is not set
558# CONFIG_NLS_CODEPAGE_874 is not set
559# CONFIG_NLS_ISO8859_8 is not set
560# CONFIG_NLS_CODEPAGE_1250 is not set
561# CONFIG_NLS_CODEPAGE_1251 is not set
562# CONFIG_NLS_ISO8859_1 is not set
563# CONFIG_NLS_ISO8859_2 is not set
564# CONFIG_NLS_ISO8859_3 is not set
565# CONFIG_NLS_ISO8859_4 is not set
566# CONFIG_NLS_ISO8859_5 is not set
567# CONFIG_NLS_ISO8859_6 is not set
568# CONFIG_NLS_ISO8859_7 is not set
569# CONFIG_NLS_ISO8859_9 is not set
570# CONFIG_NLS_ISO8859_13 is not set
571# CONFIG_NLS_ISO8859_14 is not set
572# CONFIG_NLS_ISO8859_15 is not set
573# CONFIG_NLS_KOI8_R is not set
574# CONFIG_NLS_KOI8_U is not set
575# CONFIG_NLS_UTF8 is not set
576
577#
578# Sound
579#
580# CONFIG_SOUND is not set
581
582#
583# USB support
584#
585# CONFIG_USB is not set
586
587#
588# USB Gadget Support
589#
590# CONFIG_USB_GADGET is not set
591
592#
593# Watchdog Cards
594#
595# CONFIG_WATCHDOG is not set
596
597#
598# Kernel hacking
599#
600CONFIG_DEBUG_KERNEL=y
601# CONFIG_DEBUG_STACK_USAGE is not set
602# CONFIG_DEBUG_SLAB is not set
603CONFIG_MAGIC_SYSRQ=y
604# CONFIG_DEBUG_SPINLOCK is not set
605# CONFIG_DEBUG_HIGHMEM is not set
606# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
607# CONFIG_DEBUG_BUGVERBOSE is not set
608
609#
610# Security options
611#
612# CONFIG_SECURITY is not set
613
614#
615# Cryptographic options
616#
617CONFIG_CRYPTO=y
618CONFIG_CRYPTO_HMAC=y
619CONFIG_CRYPTO_NULL=m
620CONFIG_CRYPTO_MD4=y
621CONFIG_CRYPTO_MD5=y
622CONFIG_CRYPTO_SHA1=y
623CONFIG_CRYPTO_SHA256=m
624CONFIG_CRYPTO_SHA512=m
625CONFIG_CRYPTO_DES=y
626CONFIG_CRYPTO_BLOWFISH=m
627CONFIG_CRYPTO_TWOFISH=m
628CONFIG_CRYPTO_SERPENT=m
629CONFIG_CRYPTO_AES=m
630CONFIG_CRYPTO_CAST5=m
631CONFIG_CRYPTO_CAST6=m
632CONFIG_CRYPTO_ARC4=m
633CONFIG_CRYPTO_DEFLATE=y
634CONFIG_CRYPTO_MICHAEL_MIC=m
635CONFIG_CRYPTO_CRC32C=m
636# CONFIG_CRYPTO_TEST is not set
637
638#
639# Library routines
640#
641CONFIG_CRC32=y
642CONFIG_LIBCRC32C=m
643CONFIG_ZLIB_INFLATE=y
644CONFIG_ZLIB_DEFLATE=y
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
new file mode 100644
index 000000000000..3d22ba2af01c
--- /dev/null
+++ b/arch/sparc/kernel/Makefile
@@ -0,0 +1,27 @@
1# $Id: Makefile,v 1.62 2000/12/15 00:41:17 davem Exp $
2# Makefile for the linux kernel.
3#
4
5extra-y := head.o init_task.o vmlinux.lds
6
7EXTRA_AFLAGS := -ansi
8
9IRQ_OBJS := irq.o sun4m_irq.o sun4c_irq.o sun4d_irq.o
10obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
11 process.o signal.o ioport.o setup.o idprom.o \
12 sys_sparc.o sunos_asm.o systbls.o \
13 time.o windows.o cpu.o devices.o sclow.o \
14 tadpole.o tick14.o ptrace.o sys_solaris.o \
15 unaligned.o muldiv.o semaphore.o
16
17obj-$(CONFIG_PCI) += pcic.o
18obj-$(CONFIG_SUN4) += sun4setup.o
19obj-$(CONFIG_SMP) += trampoline.o smp.o sun4m_smp.o sun4d_smp.o
20obj-$(CONFIG_SUN_AUXIO) += auxio.o
21obj-$(CONFIG_PCI) += ebus.o
22obj-$(CONFIG_SUN_PM) += apc.o pmc.o
23obj-$(CONFIG_MODULES) += module.o sparc_ksyms.o
24
25ifdef CONFIG_SUNOS_EMUL
26obj-y += sys_sunos.o sunos_ioctl.o
27endif
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
new file mode 100644
index 000000000000..406dd94afb45
--- /dev/null
+++ b/arch/sparc/kernel/apc.c
@@ -0,0 +1,186 @@
1/* apc - Driver implementation for power management functions
2 * of Aurora Personality Chip (APC) on SPARCstation-4/5 and
3 * derivatives.
4 *
5 * Copyright (c) 2002 Eric Brower (ebrower@usa.net)
6 */
7
8#include <linux/kernel.h>
9#include <linux/fs.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/miscdevice.h>
13#include <linux/pm.h>
14
15#include <asm/io.h>
16#include <asm/sbus.h>
17#include <asm/oplib.h>
18#include <asm/uaccess.h>
19#include <asm/auxio.h>
20#include <asm/apc.h>
21
22/* Debugging
23 *
24 * #define APC_DEBUG_LED
25 */
26
27#define APC_MINOR MISC_DYNAMIC_MINOR
28#define APC_OBPNAME "power-management"
29#define APC_DEVNAME "apc"
30
31volatile static u8 __iomem *regs;
32static int apc_regsize;
33static int apc_no_idle __initdata = 0;
34
35#define apc_readb(offs) (sbus_readb(regs+offs))
36#define apc_writeb(val, offs) (sbus_writeb(val, regs+offs))
37
38/* Specify "apc=noidle" on the kernel command line to
39 * disable APC CPU standby support. Certain prototype
40 * systems (SPARCstation-Fox) do not play well with APC
41 * CPU idle, so disable this if your system has APC and
42 * crashes randomly.
43 */
44static int __init apc_setup(char *str)
45{
46 if(!strncmp(str, "noidle", strlen("noidle"))) {
47 apc_no_idle = 1;
48 return 1;
49 }
50 return 0;
51}
52__setup("apc=", apc_setup);
53
54/*
55 * CPU idle callback function
56 * See .../arch/sparc/kernel/process.c
57 */
58void apc_swift_idle(void)
59{
60#ifdef APC_DEBUG_LED
61 set_auxio(0x00, AUXIO_LED);
62#endif
63
64 apc_writeb(apc_readb(APC_IDLE_REG) | APC_IDLE_ON, APC_IDLE_REG);
65
66#ifdef APC_DEBUG_LED
67 set_auxio(AUXIO_LED, 0x00);
68#endif
69}
70
71static inline void apc_free(void)
72{
73 sbus_iounmap(regs, apc_regsize);
74}
75
76static int apc_open(struct inode *inode, struct file *f)
77{
78 return 0;
79}
80
81static int apc_release(struct inode *inode, struct file *f)
82{
83 return 0;
84}
85
86static int apc_ioctl(struct inode *inode, struct file *f,
87 unsigned int cmd, unsigned long __arg)
88{
89 __u8 inarg, __user *arg;
90
91 arg = (__u8 __user *) __arg;
92 switch (cmd) {
93 case APCIOCGFANCTL:
94 if (put_user(apc_readb(APC_FANCTL_REG) & APC_REGMASK, arg))
95 return -EFAULT;
96 break;
97
98 case APCIOCGCPWR:
99 if (put_user(apc_readb(APC_CPOWER_REG) & APC_REGMASK, arg))
100 return -EFAULT;
101 break;
102
103 case APCIOCGBPORT:
104 if (put_user(apc_readb(APC_BPORT_REG) & APC_BPMASK, arg))
105 return -EFAULT;
106 break;
107
108 case APCIOCSFANCTL:
109 if (get_user(inarg, arg))
110 return -EFAULT;
111 apc_writeb(inarg & APC_REGMASK, APC_FANCTL_REG);
112 break;
113 case APCIOCSCPWR:
114 if (get_user(inarg, arg))
115 return -EFAULT;
116 apc_writeb(inarg & APC_REGMASK, APC_CPOWER_REG);
117 break;
118 case APCIOCSBPORT:
119 if (get_user(inarg, arg))
120 return -EFAULT;
121 apc_writeb(inarg & APC_BPMASK, APC_BPORT_REG);
122 break;
123 default:
124 return -EINVAL;
125 };
126
127 return 0;
128}
129
130static struct file_operations apc_fops = {
131 .ioctl = apc_ioctl,
132 .open = apc_open,
133 .release = apc_release,
134};
135
136static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops };
137
138static int __init apc_probe(void)
139{
140 struct sbus_bus *sbus = NULL;
141 struct sbus_dev *sdev = NULL;
142 int iTmp = 0;
143
144 for_each_sbus(sbus) {
145 for_each_sbusdev(sdev, sbus) {
146 if (!strcmp(sdev->prom_name, APC_OBPNAME)) {
147 goto sbus_done;
148 }
149 }
150 }
151
152sbus_done:
153 if (!sdev) {
154 return -ENODEV;
155 }
156
157 apc_regsize = sdev->reg_addrs[0].reg_size;
158 regs = sbus_ioremap(&sdev->resource[0], 0,
159 apc_regsize, APC_OBPNAME);
160 if(!regs) {
161 printk(KERN_ERR "%s: unable to map registers\n", APC_DEVNAME);
162 return -ENODEV;
163 }
164
165 iTmp = misc_register(&apc_miscdev);
166 if (iTmp != 0) {
167 printk(KERN_ERR "%s: unable to register device\n", APC_DEVNAME);
168 apc_free();
169 return -ENODEV;
170 }
171
172 /* Assign power management IDLE handler */
173 if(!apc_no_idle)
174 pm_idle = apc_swift_idle;
175
176 printk(KERN_INFO "%s: power management initialized%s\n",
177 APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : "");
178 return 0;
179}
180
181/* This driver is not critical to the boot process
182 * and is easiest to ioremap when SBus is already
183 * initialized, so we install ourselves thusly:
184 */
185__initcall(apc_probe);
186
diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..1f55231f07de
--- /dev/null
+++ b/arch/sparc/kernel/asm-offsets.c
@@ -0,0 +1,45 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 *
10 * On sparc, thread_info data is static and TI_XXX offsets are computed by hand.
11 */
12
13#include <linux/config.h>
14#include <linux/sched.h>
15// #include <linux/mm.h>
16
17#define DEFINE(sym, val) \
18 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
19
20#define BLANK() asm volatile("\n->" : : )
21
22int foo(void)
23{
24 DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
25 BLANK();
26 /* XXX This is the stuff for sclow.S, kill it. */
27 DEFINE(AOFF_task_pid, offsetof(struct task_struct, pid));
28 DEFINE(AOFF_task_uid, offsetof(struct task_struct, uid));
29 DEFINE(AOFF_task_gid, offsetof(struct task_struct, gid));
30 DEFINE(AOFF_task_euid, offsetof(struct task_struct, euid));
31 DEFINE(AOFF_task_egid, offsetof(struct task_struct, egid));
32 /* DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); */
33 DEFINE(ASIZ_task_uid, sizeof(current->uid));
34 DEFINE(ASIZ_task_gid, sizeof(current->gid));
35 DEFINE(ASIZ_task_euid, sizeof(current->euid));
36 DEFINE(ASIZ_task_egid, sizeof(current->egid));
37 BLANK();
38 DEFINE(AOFF_thread_fork_kpsr,
39 offsetof(struct thread_struct, fork_kpsr));
40 BLANK();
41 DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
42
43 /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
44 return 0;
45}
diff --git a/arch/sparc/kernel/auxio.c b/arch/sparc/kernel/auxio.c
new file mode 100644
index 000000000000..d3b3648362c0
--- /dev/null
+++ b/arch/sparc/kernel/auxio.c
@@ -0,0 +1,138 @@
1/* auxio.c: Probing for the Sparc AUXIO register at boot time.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <linux/stddef.h>
7#include <linux/init.h>
8#include <linux/config.h>
9#include <linux/spinlock.h>
10#include <asm/oplib.h>
11#include <asm/io.h>
12#include <asm/auxio.h>
13#include <asm/string.h> /* memset(), Linux has no bzero() */
14
15/* Probe and map in the Auxiliary I/O register */
16
17/* auxio_register is not static because it is referenced
18 * in entry.S::floppy_tdone
19 */
20void __iomem *auxio_register = NULL;
21static DEFINE_SPINLOCK(auxio_lock);
22
23void __init auxio_probe(void)
24{
25 int node, auxio_nd;
26 struct linux_prom_registers auxregs[1];
27 struct resource r;
28
29 switch (sparc_cpu_model) {
30 case sun4d:
31 case sun4:
32 return;
33 default:
34 break;
35 }
36 node = prom_getchild(prom_root_node);
37 auxio_nd = prom_searchsiblings(node, "auxiliary-io");
38 if(!auxio_nd) {
39 node = prom_searchsiblings(node, "obio");
40 node = prom_getchild(node);
41 auxio_nd = prom_searchsiblings(node, "auxio");
42 if(!auxio_nd) {
43#ifdef CONFIG_PCI
44 /* There may be auxio on Ebus */
45 return;
46#else
47 if(prom_searchsiblings(node, "leds")) {
48 /* VME chassis sun4m machine, no auxio exists. */
49 return;
50 }
51 prom_printf("Cannot find auxio node, cannot continue...\n");
52 prom_halt();
53#endif
54 }
55 }
56 if(prom_getproperty(auxio_nd, "reg", (char *) auxregs, sizeof(auxregs)) <= 0)
57 return;
58 prom_apply_obio_ranges(auxregs, 0x1);
59 /* Map the register both read and write */
60 r.flags = auxregs[0].which_io & 0xF;
61 r.start = auxregs[0].phys_addr;
62 r.end = auxregs[0].phys_addr + auxregs[0].reg_size - 1;
63 auxio_register = sbus_ioremap(&r, 0, auxregs[0].reg_size, "auxio");
64 /* Fix the address on sun4m and sun4c. */
65 if((((unsigned long) auxregs[0].phys_addr) & 3) == 3 ||
66 sparc_cpu_model == sun4c)
67 auxio_register += (3 - ((unsigned long)auxio_register & 3));
68
69 set_auxio(AUXIO_LED, 0);
70}
71
72unsigned char get_auxio(void)
73{
74 if(auxio_register)
75 return sbus_readb(auxio_register);
76 return 0;
77}
78
79void set_auxio(unsigned char bits_on, unsigned char bits_off)
80{
81 unsigned char regval;
82 unsigned long flags;
83 spin_lock_irqsave(&auxio_lock, flags);
84 switch(sparc_cpu_model) {
85 case sun4c:
86 regval = sbus_readb(auxio_register);
87 sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN,
88 auxio_register);
89 break;
90 case sun4m:
91 if(!auxio_register)
92 break; /* VME chassic sun4m, no auxio. */
93 regval = sbus_readb(auxio_register);
94 sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN4M,
95 auxio_register);
96 break;
97 case sun4d:
98 break;
99 default:
100 panic("Can't set AUXIO register on this machine.");
101 };
102 spin_unlock_irqrestore(&auxio_lock, flags);
103}
104
105
106/* sun4m power control register (AUXIO2) */
107
108volatile unsigned char * auxio_power_register = NULL;
109
110void __init auxio_power_probe(void)
111{
112 struct linux_prom_registers regs;
113 int node;
114 struct resource r;
115
116 /* Attempt to find the sun4m power control node. */
117 node = prom_getchild(prom_root_node);
118 node = prom_searchsiblings(node, "obio");
119 node = prom_getchild(node);
120 node = prom_searchsiblings(node, "power");
121 if (node == 0 || node == -1)
122 return;
123
124 /* Map the power control register. */
125 if (prom_getproperty(node, "reg", (char *)&regs, sizeof(regs)) <= 0)
126 return;
127 prom_apply_obio_ranges(&regs, 1);
128 memset(&r, 0, sizeof(r));
129 r.flags = regs.which_io & 0xF;
130 r.start = regs.phys_addr;
131 r.end = regs.phys_addr + regs.reg_size - 1;
132 auxio_power_register = (unsigned char *) sbus_ioremap(&r, 0,
133 regs.reg_size, "auxpower");
134
135 /* Display a quick message on the console. */
136 if (auxio_power_register)
137 printk(KERN_INFO "Power off control detected.\n");
138}
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
new file mode 100644
index 000000000000..6a4ebc62193e
--- /dev/null
+++ b/arch/sparc/kernel/cpu.c
@@ -0,0 +1,168 @@
1/* cpu.c: Dinky routines to look for the kind of Sparc cpu
2 * we are on.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/smp.h>
11#include <linux/threads.h>
12#include <asm/oplib.h>
13#include <asm/page.h>
14#include <asm/head.h>
15#include <asm/psr.h>
16#include <asm/mbus.h>
17#include <asm/cpudata.h>
18
19DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
20
21struct cpu_iu_info {
22 int psr_impl;
23 int psr_vers;
24 char* cpu_name; /* should be enough I hope... */
25};
26
27struct cpu_fp_info {
28 int psr_impl;
29 int fp_vers;
30 char* fp_name;
31};
32
33/* In order to get the fpu type correct, you need to take the IDPROM's
34 * machine type value into consideration too. I will fix this.
35 */
36struct cpu_fp_info linux_sparc_fpu[] = {
37 { 0, 0, "Fujitsu MB86910 or Weitek WTL1164/5"},
38 { 0, 1, "Fujitsu MB86911 or Weitek WTL1164/5 or LSI L64831"},
39 { 0, 2, "LSI Logic L64802 or Texas Instruments ACT8847"},
40 /* SparcStation SLC, SparcStation1 */
41 { 0, 3, "Weitek WTL3170/2"},
42 /* SPARCstation-5 */
43 { 0, 4, "Lsi Logic/Meiko L64804 or compatible"},
44 { 0, 5, "reserved"},
45 { 0, 6, "reserved"},
46 { 0, 7, "No FPU"},
47 { 1, 0, "ROSS HyperSparc combined IU/FPU"},
48 { 1, 1, "Lsi Logic L64814"},
49 { 1, 2, "Texas Instruments TMS390-C602A"},
50 { 1, 3, "Cypress CY7C602 FPU"},
51 { 1, 4, "reserved"},
52 { 1, 5, "reserved"},
53 { 1, 6, "reserved"},
54 { 1, 7, "No FPU"},
55 { 2, 0, "BIT B5010 or B5110/20 or B5210"},
56 { 2, 1, "reserved"},
57 { 2, 2, "reserved"},
58 { 2, 3, "reserved"},
59 { 2, 4, "reserved"},
60 { 2, 5, "reserved"},
61 { 2, 6, "reserved"},
62 { 2, 7, "No FPU"},
63 /* SuperSparc 50 module */
64 { 4, 0, "SuperSparc on-chip FPU"},
65 /* SparcClassic */
66 { 4, 4, "TI MicroSparc on chip FPU"},
67 { 5, 0, "Matsushita MN10501"},
68 { 5, 1, "reserved"},
69 { 5, 2, "reserved"},
70 { 5, 3, "reserved"},
71 { 5, 4, "reserved"},
72 { 5, 5, "reserved"},
73 { 5, 6, "reserved"},
74 { 5, 7, "No FPU"},
75 { 9, 3, "Fujitsu or Weitek on-chip FPU"},
76};
77
78#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
79
80struct cpu_iu_info linux_sparc_chips[] = {
81 /* Sun4/100, 4/200, SLC */
82 { 0, 0, "Fujitsu MB86900/1A or LSI L64831 SparcKIT-40"},
83 /* borned STP1012PGA */
84 { 0, 4, "Fujitsu MB86904"},
85 { 0, 5, "Fujitsu TurboSparc MB86907"},
86 /* SparcStation2, SparcServer 490 & 690 */
87 { 1, 0, "LSI Logic Corporation - L64811"},
88 /* SparcStation2 */
89 { 1, 1, "Cypress/ROSS CY7C601"},
90 /* Embedded controller */
91 { 1, 3, "Cypress/ROSS CY7C611"},
92 /* Ross Technologies HyperSparc */
93 { 1, 0xf, "ROSS HyperSparc RT620"},
94 { 1, 0xe, "ROSS HyperSparc RT625 or RT626"},
95 /* ECL Implementation, CRAY S-MP Supercomputer... AIEEE! */
96 /* Someone please write the code to support this beast! ;) */
97 { 2, 0, "Bipolar Integrated Technology - B5010"},
98 { 3, 0, "LSI Logic Corporation - unknown-type"},
99 { 4, 0, "Texas Instruments, Inc. - SuperSparc-(II)"},
100 /* SparcClassic -- borned STP1010TAB-50*/
101 { 4, 1, "Texas Instruments, Inc. - MicroSparc"},
102 { 4, 2, "Texas Instruments, Inc. - MicroSparc II"},
103 { 4, 3, "Texas Instruments, Inc. - SuperSparc 51"},
104 { 4, 4, "Texas Instruments, Inc. - SuperSparc 61"},
105 { 4, 5, "Texas Instruments, Inc. - unknown"},
106 { 5, 0, "Matsushita - MN10501"},
107 { 6, 0, "Philips Corporation - unknown"},
108 { 7, 0, "Harvest VLSI Design Center, Inc. - unknown"},
109 /* Gallium arsenide 200MHz, BOOOOGOOOOMIPS!!! */
110 { 8, 0, "Systems and Processes Engineering Corporation (SPEC)"},
111 { 9, 0, "Fujitsu or Weitek Power-UP"},
112 { 9, 1, "Fujitsu or Weitek Power-UP"},
113 { 9, 2, "Fujitsu or Weitek Power-UP"},
114 { 9, 3, "Fujitsu or Weitek Power-UP"},
115 { 0xa, 0, "UNKNOWN CPU-VENDOR/TYPE"},
116 { 0xb, 0, "UNKNOWN CPU-VENDOR/TYPE"},
117 { 0xc, 0, "UNKNOWN CPU-VENDOR/TYPE"},
118 { 0xd, 0, "UNKNOWN CPU-VENDOR/TYPE"},
119 { 0xe, 0, "UNKNOWN CPU-VENDOR/TYPE"},
120 { 0xf, 0, "UNKNOWN CPU-VENDOR/TYPE"},
121};
122
123#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
124
125char *sparc_cpu_type;
126char *sparc_fpu_type;
127
128unsigned int fsr_storage;
129
130void __init cpu_probe(void)
131{
132 int psr_impl, psr_vers, fpu_vers;
133 int i, psr;
134
135 psr_impl = ((get_psr()>>28)&0xf);
136 psr_vers = ((get_psr()>>24)&0xf);
137
138 psr = get_psr();
139 put_psr(psr | PSR_EF);
140 fpu_vers = ((get_fsr()>>17)&0x7);
141 put_psr(psr);
142
143 for(i = 0; i<NSPARCCHIPS; i++) {
144 if(linux_sparc_chips[i].psr_impl == psr_impl)
145 if(linux_sparc_chips[i].psr_vers == psr_vers) {
146 sparc_cpu_type = linux_sparc_chips[i].cpu_name;
147 break;
148 }
149 }
150
151 if(i==NSPARCCHIPS)
152 printk("DEBUG: psr.impl = 0x%x psr.vers = 0x%x\n", psr_impl,
153 psr_vers);
154
155 for(i = 0; i<NSPARCFPU; i++) {
156 if(linux_sparc_fpu[i].psr_impl == psr_impl)
157 if(linux_sparc_fpu[i].fp_vers == fpu_vers) {
158 sparc_fpu_type = linux_sparc_fpu[i].fp_name;
159 break;
160 }
161 }
162
163 if(i == NSPARCFPU) {
164 printk("DEBUG: psr.impl = 0x%x fsr.vers = 0x%x\n", psr_impl,
165 fpu_vers);
166 sparc_fpu_type = linux_sparc_fpu[31].fp_name;
167 }
168}
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
new file mode 100644
index 000000000000..fcb0c049c3fe
--- /dev/null
+++ b/arch/sparc/kernel/devices.c
@@ -0,0 +1,160 @@
1/* devices.c: Initial scan of the prom device tree for important
2 * Sparc device nodes which we need to find.
3 *
4 * This is based on the sparc64 version, but sun4m doesn't always use
5 * the hardware MIDs, so be careful.
6 *
7 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 */
9
10#include <linux/config.h>
11#include <linux/kernel.h>
12#include <linux/threads.h>
13#include <linux/string.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16
17#include <asm/page.h>
18#include <asm/oplib.h>
19#include <asm/smp.h>
20#include <asm/system.h>
21#include <asm/cpudata.h>
22
23extern void cpu_probe(void);
24extern void clock_stop_probe(void); /* tadpole.c */
25extern void sun4c_probe_memerr_reg(void);
26
27static char *cpu_mid_prop(void)
28{
29 if (sparc_cpu_model == sun4d)
30 return "cpu-id";
31 return "mid";
32}
33
34static int check_cpu_node(int nd, int *cur_inst,
35 int (*compare)(int, int, void *), void *compare_arg,
36 int *prom_node, int *mid)
37{
38 char node_str[128];
39
40 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
41 if (strcmp(node_str, "cpu"))
42 return -ENODEV;
43
44 if (!compare(nd, *cur_inst, compare_arg)) {
45 if (prom_node)
46 *prom_node = nd;
47 if (mid) {
48 *mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
49 if (sparc_cpu_model == sun4m)
50 *mid &= 3;
51 }
52 return 0;
53 }
54
55 (*cur_inst)++;
56
57 return -ENODEV;
58}
59
60static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
61 int *prom_node, int *mid)
62{
63 int nd, cur_inst, err;
64
65 nd = prom_root_node;
66 cur_inst = 0;
67
68 err = check_cpu_node(nd, &cur_inst, compare, compare_arg,
69 prom_node, mid);
70 if (!err)
71 return 0;
72
73 nd = prom_getchild(nd);
74 while ((nd = prom_getsibling(nd)) != 0) {
75 err = check_cpu_node(nd, &cur_inst, compare, compare_arg,
76 prom_node, mid);
77 if (!err)
78 return 0;
79 }
80
81 return -ENODEV;
82}
83
84static int cpu_instance_compare(int nd, int instance, void *_arg)
85{
86 int desired_instance = (int) _arg;
87
88 if (instance == desired_instance)
89 return 0;
90 return -ENODEV;
91}
92
93int cpu_find_by_instance(int instance, int *prom_node, int *mid)
94{
95 return __cpu_find_by(cpu_instance_compare, (void *)instance,
96 prom_node, mid);
97}
98
99static int cpu_mid_compare(int nd, int instance, void *_arg)
100{
101 int desired_mid = (int) _arg;
102 int this_mid;
103
104 this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
105 if (this_mid == desired_mid
106 || (sparc_cpu_model == sun4m && (this_mid & 3) == desired_mid))
107 return 0;
108 return -ENODEV;
109}
110
111int cpu_find_by_mid(int mid, int *prom_node)
112{
113 return __cpu_find_by(cpu_mid_compare, (void *)mid,
114 prom_node, NULL);
115}
116
117/* sun4m uses truncated mids since we base the cpuid on the ttable/irqset
118 * address (0-3). This gives us the true hardware mid, which might have
119 * some other bits set. On 4d hardware and software mids are the same.
120 */
121int cpu_get_hwmid(int prom_node)
122{
123 return prom_getintdefault(prom_node, cpu_mid_prop(), -ENODEV);
124}
125
126void __init device_scan(void)
127{
128 prom_printf("Booting Linux...\n");
129
130#ifndef CONFIG_SMP
131 {
132 int err, cpu_node;
133 err = cpu_find_by_instance(0, &cpu_node, NULL);
134 if (err) {
135 /* Probably a sun4e, Sun is trying to trick us ;-) */
136 prom_printf("No cpu nodes, cannot continue\n");
137 prom_halt();
138 }
139 cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
140 "clock-frequency",
141 0);
142 }
143#endif /* !CONFIG_SMP */
144
145 cpu_probe();
146#ifdef CONFIG_SUN_AUXIO
147 {
148 extern void auxio_probe(void);
149 extern void auxio_power_probe(void);
150 auxio_probe();
151 auxio_power_probe();
152 }
153#endif
154 clock_stop_probe();
155
156 if (ARCH_SUN4C_SUN4)
157 sun4c_probe_memerr_reg();
158
159 return;
160}
diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c
new file mode 100644
index 000000000000..1754192c69d0
--- /dev/null
+++ b/arch/sparc/kernel/ebus.c
@@ -0,0 +1,361 @@
1/* $Id: ebus.c,v 1.20 2002/01/05 01:13:43 davem Exp $
2 * ebus.c: PCI to EBus bridge device.
3 *
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
5 *
6 * Adopted for sparc by V. Roganov and G. Raiko.
7 * Fixes for different platforms by Pete Zaitcev.
8 */
9
10#include <linux/config.h>
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16
17#include <asm/system.h>
18#include <asm/page.h>
19#include <asm/pbm.h>
20#include <asm/ebus.h>
21#include <asm/io.h>
22#include <asm/oplib.h>
23#include <asm/bpp.h>
24
25struct linux_ebus *ebus_chain = 0;
26
27/* We are together with pcic.c under CONFIG_PCI. */
28extern unsigned int pcic_pin_to_irq(unsigned int, char *name);
29
30/*
31 * IRQ Blacklist
32 * Here we list PROMs and systems that are known to supply crap as IRQ numbers.
33 */
34struct ebus_device_irq {
35 char *name;
36 unsigned int pin;
37};
38
39struct ebus_system_entry {
40 char *esname;
41 struct ebus_device_irq *ipt;
42};
43
44static struct ebus_device_irq je1_1[] = {
45 { "8042", 3 },
46 { "SUNW,CS4231", 0 },
47 { "parallel", 0 },
48 { "se", 2 },
49 { 0, 0 }
50};
51
52/*
53 * Gleb's JE1 supplied reasonable pin numbers, but mine did not (OBP 2.32).
54 * Blacklist the sucker... Note that Gleb's system will work.
55 */
56static struct ebus_system_entry ebus_blacklist[] = {
57 { "SUNW,JavaEngine1", je1_1 },
58 { 0, 0 }
59};
60
61static struct ebus_device_irq *ebus_blackp = NULL;
62
63/*
64 */
65static inline unsigned long ebus_alloc(size_t size)
66{
67 return (unsigned long)kmalloc(size, GFP_ATOMIC);
68}
69
70/*
71 */
72int __init ebus_blacklist_irq(char *name)
73{
74 struct ebus_device_irq *dp;
75
76 if ((dp = ebus_blackp) != NULL) {
77 for (; dp->name != NULL; dp++) {
78 if (strcmp(name, dp->name) == 0) {
79 return pcic_pin_to_irq(dp->pin, name);
80 }
81 }
82 }
83 return 0;
84}
85
86void __init fill_ebus_child(int node, struct linux_prom_registers *preg,
87 struct linux_ebus_child *dev)
88{
89 int regs[PROMREG_MAX];
90 int irqs[PROMREG_MAX];
91 char lbuf[128];
92 int i, len;
93
94 dev->prom_node = node;
95 prom_getstring(node, "name", lbuf, sizeof(lbuf));
96 strcpy(dev->prom_name, lbuf);
97
98 len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
99 if (len == -1) len = 0;
100 dev->num_addrs = len / sizeof(regs[0]);
101
102 for (i = 0; i < dev->num_addrs; i++) {
103 if (regs[i] >= dev->parent->num_addrs) {
104 prom_printf("UGH: property for %s was %d, need < %d\n",
105 dev->prom_name, len, dev->parent->num_addrs);
106 panic(__FUNCTION__);
107 }
108 dev->resource[i].start = dev->parent->resource[regs[i]].start; /* XXX resource */
109 }
110
111 for (i = 0; i < PROMINTR_MAX; i++)
112 dev->irqs[i] = PCI_IRQ_NONE;
113
114 if ((dev->irqs[0] = ebus_blacklist_irq(dev->prom_name)) != 0) {
115 dev->num_irqs = 1;
116 } else if ((len = prom_getproperty(node, "interrupts",
117 (char *)&irqs, sizeof(irqs))) == -1 || len == 0) {
118 dev->num_irqs = 0;
119 dev->irqs[0] = 0;
120 if (dev->parent->num_irqs != 0) {
121 dev->num_irqs = 1;
122 dev->irqs[0] = dev->parent->irqs[0];
123/* P3 */ /* printk("EBUS: dev %s irq %d from parent\n", dev->prom_name, dev->irqs[0]); */
124 }
125 } else {
126 dev->num_irqs = len / sizeof(irqs[0]);
127 if (irqs[0] == 0 || irqs[0] >= 8) {
128 /*
129 * XXX Zero is a valid pin number...
130 * This works as long as Ebus is not wired to INTA#.
131 */
132 printk("EBUS: %s got bad irq %d from PROM\n",
133 dev->prom_name, irqs[0]);
134 dev->num_irqs = 0;
135 dev->irqs[0] = 0;
136 } else {
137 dev->irqs[0] = pcic_pin_to_irq(irqs[0], dev->prom_name);
138 }
139 }
140}
141
142void __init fill_ebus_device(int node, struct linux_ebus_device *dev)
143{
144 struct linux_prom_registers regs[PROMREG_MAX];
145 struct linux_ebus_child *child;
146 int irqs[PROMINTR_MAX];
147 char lbuf[128];
148 int i, n, len;
149 unsigned long baseaddr;
150
151 dev->prom_node = node;
152 prom_getstring(node, "name", lbuf, sizeof(lbuf));
153 strcpy(dev->prom_name, lbuf);
154
155 len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
156 if (len % sizeof(struct linux_prom_registers)) {
157 prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
158 dev->prom_name, len,
159 (int)sizeof(struct linux_prom_registers));
160 panic(__FUNCTION__);
161 }
162 dev->num_addrs = len / sizeof(struct linux_prom_registers);
163
164 for (i = 0; i < dev->num_addrs; i++) {
165 /*
166 * XXX Collect JE-1 PROM
167 *
168 * Example - JS-E with 3.11:
169 * /ebus
170 * regs
171 * 0x00000000, 0x0, 0x00000000, 0x0, 0x00000000,
172 * 0x82000010, 0x0, 0xf0000000, 0x0, 0x01000000,
173 * 0x82000014, 0x0, 0x38800000, 0x0, 0x00800000,
174 * ranges
175 * 0x00, 0x00000000, 0x02000010, 0x0, 0x0, 0x01000000,
176 * 0x01, 0x01000000, 0x02000014, 0x0, 0x0, 0x00800000,
177 * /ebus/8042
178 * regs
179 * 0x00000001, 0x00300060, 0x00000008,
180 * 0x00000001, 0x00300060, 0x00000008,
181 */
182 n = regs[i].which_io;
183 if (n >= 4) {
184 /* XXX This is copied from old JE-1 by Gleb. */
185 n = (regs[i].which_io - 0x10) >> 2;
186 } else {
187 ;
188 }
189
190/*
191 * XXX Now as we have regions, why don't we make an on-demand allocation...
192 */
193 dev->resource[i].start = 0;
194 if ((baseaddr = dev->bus->self->resource[n].start +
195 regs[i].phys_addr) != 0) {
196 /* dev->resource[i].name = dev->prom_name; */
197 if ((baseaddr = (unsigned long) ioremap(baseaddr,
198 regs[i].reg_size)) == 0) {
199 panic("ebus: unable to remap dev %s",
200 dev->prom_name);
201 }
202 }
203 dev->resource[i].start = baseaddr; /* XXX Unaligned */
204 }
205
206 for (i = 0; i < PROMINTR_MAX; i++)
207 dev->irqs[i] = PCI_IRQ_NONE;
208
209 if ((dev->irqs[0] = ebus_blacklist_irq(dev->prom_name)) != 0) {
210 dev->num_irqs = 1;
211 } else if ((len = prom_getproperty(node, "interrupts",
212 (char *)&irqs, sizeof(irqs))) == -1 || len == 0) {
213 dev->num_irqs = 0;
214 if ((dev->irqs[0] = dev->bus->self->irq) != 0) {
215 dev->num_irqs = 1;
216/* P3 */ /* printk("EBUS: child %s irq %d from parent\n", dev->prom_name, dev->irqs[0]); */
217 }
218 } else {
219 dev->num_irqs = 1; /* dev->num_irqs = len / sizeof(irqs[0]); */
220 if (irqs[0] == 0 || irqs[0] >= 8) {
221 /* See above for the parent. XXX */
222 printk("EBUS: %s got bad irq %d from PROM\n",
223 dev->prom_name, irqs[0]);
224 dev->num_irqs = 0;
225 dev->irqs[0] = 0;
226 } else {
227 dev->irqs[0] = pcic_pin_to_irq(irqs[0], dev->prom_name);
228 }
229 }
230
231 if ((node = prom_getchild(node))) {
232 dev->children = (struct linux_ebus_child *)
233 ebus_alloc(sizeof(struct linux_ebus_child));
234
235 child = dev->children;
236 child->next = 0;
237 child->parent = dev;
238 child->bus = dev->bus;
239 fill_ebus_child(node, &regs[0], child);
240
241 while ((node = prom_getsibling(node)) != 0) {
242 child->next = (struct linux_ebus_child *)
243 ebus_alloc(sizeof(struct linux_ebus_child));
244
245 child = child->next;
246 child->next = 0;
247 child->parent = dev;
248 child->bus = dev->bus;
249 fill_ebus_child(node, &regs[0], child);
250 }
251 }
252}
253
254void __init ebus_init(void)
255{
256 struct linux_prom_pci_registers regs[PROMREG_MAX];
257 struct linux_pbm_info *pbm;
258 struct linux_ebus_device *dev;
259 struct linux_ebus *ebus;
260 struct ebus_system_entry *sp;
261 struct pci_dev *pdev;
262 struct pcidev_cookie *cookie;
263 char lbuf[128];
264 unsigned long addr, *base;
265 unsigned short pci_command;
266 int nd, len, ebusnd;
267 int reg, nreg;
268 int num_ebus = 0;
269
270 prom_getstring(prom_root_node, "name", lbuf, sizeof(lbuf));
271 for (sp = ebus_blacklist; sp->esname != NULL; sp++) {
272 if (strcmp(lbuf, sp->esname) == 0) {
273 ebus_blackp = sp->ipt;
274 break;
275 }
276 }
277
278 pdev = pci_get_device(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_EBUS, 0);
279 if (!pdev) {
280 return;
281 }
282 cookie = pdev->sysdata;
283 ebusnd = cookie->prom_node;
284
285 ebus_chain = ebus = (struct linux_ebus *)
286 ebus_alloc(sizeof(struct linux_ebus));
287 ebus->next = 0;
288
289 while (ebusnd) {
290
291 prom_getstring(ebusnd, "name", lbuf, sizeof(lbuf));
292 ebus->prom_node = ebusnd;
293 strcpy(ebus->prom_name, lbuf);
294 ebus->self = pdev;
295 ebus->parent = pbm = cookie->pbm;
296
297 /* Enable BUS Master. */
298 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
299 pci_command |= PCI_COMMAND_MASTER;
300 pci_write_config_word(pdev, PCI_COMMAND, pci_command);
301
302 len = prom_getproperty(ebusnd, "reg", (void *)regs,
303 sizeof(regs));
304 if (len == 0 || len == -1) {
305 prom_printf("%s: can't find reg property\n",
306 __FUNCTION__);
307 prom_halt();
308 }
309 nreg = len / sizeof(struct linux_prom_pci_registers);
310
311 base = &ebus->self->resource[0].start;
312 for (reg = 0; reg < nreg; reg++) {
313 if (!(regs[reg].which_io & 0x03000000))
314 continue;
315
316 addr = regs[reg].phys_lo;
317 *base++ = addr;
318 }
319
320 nd = prom_getchild(ebusnd);
321 if (!nd)
322 goto next_ebus;
323
324 ebus->devices = (struct linux_ebus_device *)
325 ebus_alloc(sizeof(struct linux_ebus_device));
326
327 dev = ebus->devices;
328 dev->next = 0;
329 dev->children = 0;
330 dev->bus = ebus;
331 fill_ebus_device(nd, dev);
332
333 while ((nd = prom_getsibling(nd)) != 0) {
334 dev->next = (struct linux_ebus_device *)
335 ebus_alloc(sizeof(struct linux_ebus_device));
336
337 dev = dev->next;
338 dev->next = 0;
339 dev->children = 0;
340 dev->bus = ebus;
341 fill_ebus_device(nd, dev);
342 }
343
344 next_ebus:
345 pdev = pci_get_device(PCI_VENDOR_ID_SUN,
346 PCI_DEVICE_ID_SUN_EBUS, pdev);
347 if (!pdev)
348 break;
349
350 cookie = pdev->sysdata;
351 ebusnd = cookie->prom_node;
352
353 ebus->next = (struct linux_ebus *)
354 ebus_alloc(sizeof(struct linux_ebus));
355 ebus = ebus->next;
356 ebus->next = 0;
357 ++num_ebus;
358 }
359 if (pdev)
360 pci_dev_put(pdev);
361}
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
new file mode 100644
index 000000000000..b448166f5da9
--- /dev/null
+++ b/arch/sparc/kernel/entry.S
@@ -0,0 +1,1956 @@
1/* $Id: entry.S,v 1.170 2001/11/13 00:57:05 davem Exp $
2 * arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1996-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
9 */
10
11#include <linux/config.h>
12#include <linux/errno.h>
13
14#include <asm/head.h>
15#include <asm/asi.h>
16#include <asm/smp.h>
17#include <asm/kgdb.h>
18#include <asm/contregs.h>
19#include <asm/ptrace.h>
20#include <asm/asm_offsets.h>
21#include <asm/psr.h>
22#include <asm/vaddrs.h>
23#include <asm/memreg.h>
24#include <asm/page.h>
25#ifdef CONFIG_SUN4
26#include <asm/pgtsun4.h>
27#else
28#include <asm/pgtsun4c.h>
29#endif
30#include <asm/winmacro.h>
31#include <asm/signal.h>
32#include <asm/obio.h>
33#include <asm/mxcc.h>
34#include <asm/thread_info.h>
35#include <asm/param.h>
36
37#include <asm/asmmacro.h>
38
39#define curptr g6
40
41#define NR_SYSCALLS 284 /* Each OS is different... */
42
43/* These are just handy. */
44#define _SV save %sp, -STACKFRAME_SZ, %sp
45#define _RS restore
46
47#define FLUSH_ALL_KERNEL_WINDOWS \
48 _SV; _SV; _SV; _SV; _SV; _SV; _SV; \
49 _RS; _RS; _RS; _RS; _RS; _RS; _RS;
50
51/* First, KGDB low level things. This is a rewrite
52 * of the routines found in the sparc-stub.c asm() statement
53 * from the gdb distribution. This is also dual-purpose
54 * as a software trap for userlevel programs.
55 */
56 .data
57 .align 4
58
59in_trap_handler:
60 .word 0
61
62 .text
63 .align 4
64
65#if 0 /* kgdb is dropped from 2.5.33 */
66! This function is called when any SPARC trap (except window overflow or
67! underflow) occurs. It makes sure that the invalid register window is still
68! available before jumping into C code. It will also restore the world if you
69! return from handle_exception.
70
71 .globl trap_low
72trap_low:
73 rd %wim, %l3
74 SAVE_ALL
75
76 sethi %hi(in_trap_handler), %l4
77 ld [%lo(in_trap_handler) + %l4], %l5
78 inc %l5
79 st %l5, [%lo(in_trap_handler) + %l4]
80
81 /* Make sure kgdb sees the same state we just saved. */
82 LOAD_PT_GLOBALS(sp)
83 LOAD_PT_INS(sp)
84 ld [%sp + STACKFRAME_SZ + PT_Y], %l4
85 ld [%sp + STACKFRAME_SZ + PT_WIM], %l3
86 ld [%sp + STACKFRAME_SZ + PT_PSR], %l0
87 ld [%sp + STACKFRAME_SZ + PT_PC], %l1
88 ld [%sp + STACKFRAME_SZ + PT_NPC], %l2
89 rd %tbr, %l5 /* Never changes... */
90
91 /* Make kgdb exception frame. */
92 sub %sp,(16+1+6+1+72)*4,%sp ! Make room for input & locals
93 ! + hidden arg + arg spill
94 ! + doubleword alignment
95 ! + registers[72] local var
96 SAVE_KGDB_GLOBALS(sp)
97 SAVE_KGDB_INS(sp)
98 SAVE_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
99
100 /* We are increasing PIL, so two writes. */
101 or %l0, PSR_PIL, %l0
102 wr %l0, 0, %psr
103 WRITE_PAUSE
104 wr %l0, PSR_ET, %psr
105 WRITE_PAUSE
106
107 call handle_exception
108 add %sp, STACKFRAME_SZ, %o0 ! Pass address of registers
109
110 /* Load new kgdb register set. */
111 LOAD_KGDB_GLOBALS(sp)
112 LOAD_KGDB_INS(sp)
113 LOAD_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
114 wr %l4, 0x0, %y
115
116 sethi %hi(in_trap_handler), %l4
117 ld [%lo(in_trap_handler) + %l4], %l5
118 dec %l5
119 st %l5, [%lo(in_trap_handler) + %l4]
120
121 add %sp,(16+1+6+1+72)*4,%sp ! Undo the kgdb trap frame.
122
123 /* Now take what kgdb did and place it into the pt_regs
124 * frame which SparcLinux RESTORE_ALL understands.,
125 */
126 STORE_PT_INS(sp)
127 STORE_PT_GLOBALS(sp)
128 STORE_PT_YREG(sp, g2)
129 STORE_PT_PRIV(sp, l0, l1, l2)
130
131 RESTORE_ALL
132#endif
133
134#ifdef CONFIG_BLK_DEV_FD
135 .text
136 .align 4
137 .globl floppy_hardint
138floppy_hardint:
139 /*
140 * This code cannot touch registers %l0 %l1 and %l2
141 * because SAVE_ALL depends on their values. It depends
142 * on %l3 also, but we regenerate it before a call.
143 * Other registers are:
144 * %l3 -- base address of fdc registers
145 * %l4 -- pdma_vaddr
146 * %l5 -- scratch for ld/st address
147 * %l6 -- pdma_size
148 * %l7 -- scratch [floppy byte, ld/st address, aux. data]
149 */
150
151 /* Do we have work to do? */
152 sethi %hi(doing_pdma), %l7
153 ld [%l7 + %lo(doing_pdma)], %l7
154 cmp %l7, 0
155 be floppy_dosoftint
156 nop
157
158 /* Load fdc register base */
159 sethi %hi(fdc_status), %l3
160 ld [%l3 + %lo(fdc_status)], %l3
161
162 /* Setup register addresses */
163 sethi %hi(pdma_vaddr), %l5 ! transfer buffer
164 ld [%l5 + %lo(pdma_vaddr)], %l4
165 sethi %hi(pdma_size), %l5 ! bytes to go
166 ld [%l5 + %lo(pdma_size)], %l6
167next_byte:
168 ldub [%l3], %l7
169
170 andcc %l7, 0x80, %g0 ! Does fifo still have data
171 bz floppy_fifo_emptied ! fifo has been emptied...
172 andcc %l7, 0x20, %g0 ! in non-dma mode still?
173 bz floppy_overrun ! nope, overrun
174 andcc %l7, 0x40, %g0 ! 0=write 1=read
175 bz floppy_write
176 sub %l6, 0x1, %l6
177
178 /* Ok, actually read this byte */
179 ldub [%l3 + 1], %l7
180 orcc %g0, %l6, %g0
181 stb %l7, [%l4]
182 bne next_byte
183 add %l4, 0x1, %l4
184
185 b floppy_tdone
186 nop
187
188floppy_write:
189 /* Ok, actually write this byte */
190 ldub [%l4], %l7
191 orcc %g0, %l6, %g0
192 stb %l7, [%l3 + 1]
193 bne next_byte
194 add %l4, 0x1, %l4
195
196 /* fall through... */
197floppy_tdone:
198 sethi %hi(pdma_vaddr), %l5
199 st %l4, [%l5 + %lo(pdma_vaddr)]
200 sethi %hi(pdma_size), %l5
201 st %l6, [%l5 + %lo(pdma_size)]
202 /* Flip terminal count pin */
203 set auxio_register, %l7
204 ld [%l7], %l7
205
206 set sparc_cpu_model, %l5
207 ld [%l5], %l5
208 subcc %l5, 1, %g0 /* enum { sun4c = 1 }; */
209 be 1f
210 ldub [%l7], %l5
211
212 or %l5, 0xc2, %l5
213 stb %l5, [%l7]
214 andn %l5, 0x02, %l5
215 b 2f
216 nop
217
2181:
219 or %l5, 0xf4, %l5
220 stb %l5, [%l7]
221 andn %l5, 0x04, %l5
222
2232:
224 /* Kill some time so the bits set */
225 WRITE_PAUSE
226 WRITE_PAUSE
227
228 stb %l5, [%l7]
229
230 /* Prevent recursion */
231 sethi %hi(doing_pdma), %l7
232 b floppy_dosoftint
233 st %g0, [%l7 + %lo(doing_pdma)]
234
235 /* We emptied the FIFO, but we haven't read everything
236 * as of yet. Store the current transfer address and
237 * bytes left to read so we can continue when the next
238 * fast IRQ comes in.
239 */
240floppy_fifo_emptied:
241 sethi %hi(pdma_vaddr), %l5
242 st %l4, [%l5 + %lo(pdma_vaddr)]
243 sethi %hi(pdma_size), %l7
244 st %l6, [%l7 + %lo(pdma_size)]
245
246 /* Restore condition codes */
247 wr %l0, 0x0, %psr
248 WRITE_PAUSE
249
250 jmp %l1
251 rett %l2
252
253floppy_overrun:
254 sethi %hi(pdma_vaddr), %l5
255 st %l4, [%l5 + %lo(pdma_vaddr)]
256 sethi %hi(pdma_size), %l5
257 st %l6, [%l5 + %lo(pdma_size)]
258 /* Prevent recursion */
259 sethi %hi(doing_pdma), %l7
260 st %g0, [%l7 + %lo(doing_pdma)]
261
262 /* fall through... */
263floppy_dosoftint:
264 rd %wim, %l3
265 SAVE_ALL
266
267 /* Set all IRQs off. */
268 or %l0, PSR_PIL, %l4
269 wr %l4, 0x0, %psr
270 WRITE_PAUSE
271 wr %l4, PSR_ET, %psr
272 WRITE_PAUSE
273
274 mov 11, %o0 ! floppy irq level (unused anyway)
275 mov %g0, %o1 ! devid is not used in fast interrupts
276 call sparc_floppy_irq
277 add %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs
278
279 RESTORE_ALL
280
281#endif /* (CONFIG_BLK_DEV_FD) */
282
283 /* Bad trap handler */
284 .globl bad_trap_handler
285bad_trap_handler:
286 SAVE_ALL
287
288 wr %l0, PSR_ET, %psr
289 WRITE_PAUSE
290
291 add %sp, STACKFRAME_SZ, %o0 ! pt_regs
292 call do_hw_interrupt
293 mov %l7, %o1 ! trap number
294
295 RESTORE_ALL
296
297/* For now all IRQ's not registered get sent here. handler_irq() will
298 * see if a routine is registered to handle this interrupt and if not
299 * it will say so on the console.
300 */
301
302 .align 4
303 .globl real_irq_entry, patch_handler_irq
304real_irq_entry:
305 SAVE_ALL
306
307#ifdef CONFIG_SMP
308 .globl patchme_maybe_smp_msg
309
310 cmp %l7, 12
311patchme_maybe_smp_msg:
312 bgu maybe_smp4m_msg
313 nop
314#endif
315
316real_irq_continue:
317 or %l0, PSR_PIL, %g2
318 wr %g2, 0x0, %psr
319 WRITE_PAUSE
320 wr %g2, PSR_ET, %psr
321 WRITE_PAUSE
322 mov %l7, %o0 ! irq level
323patch_handler_irq:
324 call handler_irq
325 add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
326 or %l0, PSR_PIL, %g2 ! restore PIL after handler_irq
327 wr %g2, PSR_ET, %psr ! keep ET up
328 WRITE_PAUSE
329
330 RESTORE_ALL
331
332#ifdef CONFIG_SMP
333 /* SMP per-cpu ticker interrupts are handled specially. */
334smp4m_ticker:
335 bne real_irq_continue+4
336 or %l0, PSR_PIL, %g2
337 wr %g2, 0x0, %psr
338 WRITE_PAUSE
339 wr %g2, PSR_ET, %psr
340 WRITE_PAUSE
341 call smp4m_percpu_timer_interrupt
342 add %sp, STACKFRAME_SZ, %o0
343 wr %l0, PSR_ET, %psr
344 WRITE_PAUSE
345 RESTORE_ALL
346
347 /* Here is where we check for possible SMP IPI passed to us
348 * on some level other than 15 which is the NMI and only used
349 * for cross calls. That has a separate entry point below.
350 */
351maybe_smp4m_msg:
352 GET_PROCESSOR4M_ID(o3)
353 set sun4m_interrupts, %l5
354 ld [%l5], %o5
355 sethi %hi(0x40000000), %o2
356 sll %o3, 12, %o3
357 ld [%o5 + %o3], %o1
358 andcc %o1, %o2, %g0
359 be,a smp4m_ticker
360 cmp %l7, 14
361 st %o2, [%o5 + 0x4]
362 WRITE_PAUSE
363 ld [%o5], %g0
364 WRITE_PAUSE
365 or %l0, PSR_PIL, %l4
366 wr %l4, 0x0, %psr
367 WRITE_PAUSE
368 wr %l4, PSR_ET, %psr
369 WRITE_PAUSE
370 call smp_reschedule_irq
371 nop
372
373 RESTORE_ALL
374
375 .align 4
376 .globl linux_trap_ipi15_sun4m
377linux_trap_ipi15_sun4m:
378 SAVE_ALL
379 sethi %hi(0x80000000), %o2
380 GET_PROCESSOR4M_ID(o0)
381 set sun4m_interrupts, %l5
382 ld [%l5], %o5
383 sll %o0, 12, %o0
384 add %o5, %o0, %o5
385 ld [%o5], %o3
386 andcc %o3, %o2, %g0
387 be 1f ! Must be an NMI async memory error
388 st %o2, [%o5 + 4]
389 WRITE_PAUSE
390 ld [%o5], %g0
391 WRITE_PAUSE
392 or %l0, PSR_PIL, %l4
393 wr %l4, 0x0, %psr
394 WRITE_PAUSE
395 wr %l4, PSR_ET, %psr
396 WRITE_PAUSE
397 call smp4m_cross_call_irq
398 nop
399 b ret_trap_lockless_ipi
400 clr %l6
4011:
402 /* NMI async memory error handling. */
403 sethi %hi(0x80000000), %l4
404 sethi %hi(0x4000), %o3
405 sub %o5, %o0, %o5
406 add %o5, %o3, %l5
407 st %l4, [%l5 + 0xc]
408 WRITE_PAUSE
409 ld [%l5], %g0
410 WRITE_PAUSE
411 or %l0, PSR_PIL, %l4
412 wr %l4, 0x0, %psr
413 WRITE_PAUSE
414 wr %l4, PSR_ET, %psr
415 WRITE_PAUSE
416 call sun4m_nmi
417 nop
418 st %l4, [%l5 + 0x8]
419 WRITE_PAUSE
420 ld [%l5], %g0
421 WRITE_PAUSE
422 RESTORE_ALL
423
424 .globl smp4d_ticker
425 /* SMP per-cpu ticker interrupts are handled specially. */
426smp4d_ticker:
427 SAVE_ALL
428 or %l0, PSR_PIL, %g2
429 sethi %hi(CC_ICLR), %o0
430 sethi %hi(1 << 14), %o1
431 or %o0, %lo(CC_ICLR), %o0
432 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 14 in MXCC's ICLR */
433 wr %g2, 0x0, %psr
434 WRITE_PAUSE
435 wr %g2, PSR_ET, %psr
436 WRITE_PAUSE
437 call smp4d_percpu_timer_interrupt
438 add %sp, STACKFRAME_SZ, %o0
439 wr %l0, PSR_ET, %psr
440 WRITE_PAUSE
441 RESTORE_ALL
442
443 .align 4
444 .globl linux_trap_ipi15_sun4d
445linux_trap_ipi15_sun4d:
446 SAVE_ALL
447 sethi %hi(CC_BASE), %o4
448 sethi %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
449 or %o4, (CC_EREG - CC_BASE), %o0
450 ldda [%o0] ASI_M_MXCC, %o0
451 andcc %o0, %o2, %g0
452 bne 1f
453 sethi %hi(BB_STAT2), %o2
454 lduba [%o2] ASI_M_CTL, %o2
455 andcc %o2, BB_STAT2_MASK, %g0
456 bne 2f
457 or %o4, (CC_ICLR - CC_BASE), %o0
458 sethi %hi(1 << 15), %o1
459 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 15 in MXCC's ICLR */
460 or %l0, PSR_PIL, %l4
461 wr %l4, 0x0, %psr
462 WRITE_PAUSE
463 wr %l4, PSR_ET, %psr
464 WRITE_PAUSE
465 call smp4d_cross_call_irq
466 nop
467 b ret_trap_lockless_ipi
468 clr %l6
469
4701: /* MXCC error */
4712: /* BB error */
472 /* Disable PIL 15 */
473 set CC_IMSK, %l4
474 lduha [%l4] ASI_M_MXCC, %l5
475 sethi %hi(1 << 15), %l7
476 or %l5, %l7, %l5
477 stha %l5, [%l4] ASI_M_MXCC
478 /* FIXME */
4791: b,a 1b
480
481#endif /* CONFIG_SMP */
482
483 /* This routine handles illegal instructions and privileged
484 * instruction attempts from user code.
485 */
486 .align 4
487 .globl bad_instruction
488bad_instruction:
489 sethi %hi(0xc1f80000), %l4
490 ld [%l1], %l5
491 sethi %hi(0x81d80000), %l7
492 and %l5, %l4, %l5
493 cmp %l5, %l7
494 be 1f
495 SAVE_ALL
496
497 wr %l0, PSR_ET, %psr ! re-enable traps
498 WRITE_PAUSE
499
500 add %sp, STACKFRAME_SZ, %o0
501 mov %l1, %o1
502 mov %l2, %o2
503 call do_illegal_instruction
504 mov %l0, %o3
505
506 RESTORE_ALL
507
5081: /* unimplemented flush - just skip */
509 jmpl %l2, %g0
510 rett %l2 + 4
511
512 .align 4
513 .globl priv_instruction
514priv_instruction:
515 SAVE_ALL
516
517 wr %l0, PSR_ET, %psr
518 WRITE_PAUSE
519
520 add %sp, STACKFRAME_SZ, %o0
521 mov %l1, %o1
522 mov %l2, %o2
523 call do_priv_instruction
524 mov %l0, %o3
525
526 RESTORE_ALL
527
528 /* This routine handles unaligned data accesses. */
529 .align 4
530 .globl mna_handler
531mna_handler:
532 andcc %l0, PSR_PS, %g0
533 be mna_fromuser
534 nop
535
536 SAVE_ALL
537
538 wr %l0, PSR_ET, %psr
539 WRITE_PAUSE
540
541 ld [%l1], %o1
542 call kernel_unaligned_trap
543 add %sp, STACKFRAME_SZ, %o0
544
545 RESTORE_ALL
546
547mna_fromuser:
548 SAVE_ALL
549
550 wr %l0, PSR_ET, %psr ! re-enable traps
551 WRITE_PAUSE
552
553 ld [%l1], %o1
554 call user_unaligned_trap
555 add %sp, STACKFRAME_SZ, %o0
556
557 RESTORE_ALL
558
559 /* This routine handles floating point disabled traps. */
560 .align 4
561 .globl fpd_trap_handler
562fpd_trap_handler:
563 SAVE_ALL
564
565 wr %l0, PSR_ET, %psr ! re-enable traps
566 WRITE_PAUSE
567
568 add %sp, STACKFRAME_SZ, %o0
569 mov %l1, %o1
570 mov %l2, %o2
571 call do_fpd_trap
572 mov %l0, %o3
573
574 RESTORE_ALL
575
576 /* This routine handles Floating Point Exceptions. */
577 .align 4
578 .globl fpe_trap_handler
579fpe_trap_handler:
580 set fpsave_magic, %l5
581 cmp %l1, %l5
582 be 1f
583 sethi %hi(fpsave), %l5
584 or %l5, %lo(fpsave), %l5
585 cmp %l1, %l5
586 bne 2f
587 sethi %hi(fpsave_catch2), %l5
588 or %l5, %lo(fpsave_catch2), %l5
589 wr %l0, 0x0, %psr
590 WRITE_PAUSE
591 jmp %l5
592 rett %l5 + 4
5931:
594 sethi %hi(fpsave_catch), %l5
595 or %l5, %lo(fpsave_catch), %l5
596 wr %l0, 0x0, %psr
597 WRITE_PAUSE
598 jmp %l5
599 rett %l5 + 4
600
6012:
602 SAVE_ALL
603
604 wr %l0, PSR_ET, %psr ! re-enable traps
605 WRITE_PAUSE
606
607 add %sp, STACKFRAME_SZ, %o0
608 mov %l1, %o1
609 mov %l2, %o2
610 call do_fpe_trap
611 mov %l0, %o3
612
613 RESTORE_ALL
614
615 /* This routine handles Tag Overflow Exceptions. */
616 .align 4
617 .globl do_tag_overflow
618do_tag_overflow:
619 SAVE_ALL
620
621 wr %l0, PSR_ET, %psr ! re-enable traps
622 WRITE_PAUSE
623
624 add %sp, STACKFRAME_SZ, %o0
625 mov %l1, %o1
626 mov %l2, %o2
627 call handle_tag_overflow
628 mov %l0, %o3
629
630 RESTORE_ALL
631
632 /* This routine handles Watchpoint Exceptions. */
633 .align 4
634 .globl do_watchpoint
635do_watchpoint:
636 SAVE_ALL
637
638 wr %l0, PSR_ET, %psr ! re-enable traps
639 WRITE_PAUSE
640
641 add %sp, STACKFRAME_SZ, %o0
642 mov %l1, %o1
643 mov %l2, %o2
644 call handle_watchpoint
645 mov %l0, %o3
646
647 RESTORE_ALL
648
649 /* This routine handles Register Access Exceptions. */
650 .align 4
651 .globl do_reg_access
652do_reg_access:
653 SAVE_ALL
654
655 wr %l0, PSR_ET, %psr ! re-enable traps
656 WRITE_PAUSE
657
658 add %sp, STACKFRAME_SZ, %o0
659 mov %l1, %o1
660 mov %l2, %o2
661 call handle_reg_access
662 mov %l0, %o3
663
664 RESTORE_ALL
665
666 /* This routine handles Co-Processor Disabled Exceptions. */
667 .align 4
668 .globl do_cp_disabled
669do_cp_disabled:
670 SAVE_ALL
671
672 wr %l0, PSR_ET, %psr ! re-enable traps
673 WRITE_PAUSE
674
675 add %sp, STACKFRAME_SZ, %o0
676 mov %l1, %o1
677 mov %l2, %o2
678 call handle_cp_disabled
679 mov %l0, %o3
680
681 RESTORE_ALL
682
683 /* This routine handles Co-Processor Exceptions. */
684 .align 4
685 .globl do_cp_exception
686do_cp_exception:
687 SAVE_ALL
688
689 wr %l0, PSR_ET, %psr ! re-enable traps
690 WRITE_PAUSE
691
692 add %sp, STACKFRAME_SZ, %o0
693 mov %l1, %o1
694 mov %l2, %o2
695 call handle_cp_exception
696 mov %l0, %o3
697
698 RESTORE_ALL
699
700 /* This routine handles Hardware Divide By Zero Exceptions. */
701 .align 4
702 .globl do_hw_divzero
703do_hw_divzero:
704 SAVE_ALL
705
706 wr %l0, PSR_ET, %psr ! re-enable traps
707 WRITE_PAUSE
708
709 add %sp, STACKFRAME_SZ, %o0
710 mov %l1, %o1
711 mov %l2, %o2
712 call handle_hw_divzero
713 mov %l0, %o3
714
715 RESTORE_ALL
716
717 .align 4
718 .globl do_flush_windows
719do_flush_windows:
720 SAVE_ALL
721
722 wr %l0, PSR_ET, %psr
723 WRITE_PAUSE
724
725 andcc %l0, PSR_PS, %g0
726 bne dfw_kernel
727 nop
728
729 call flush_user_windows
730 nop
731
732 /* Advance over the trap instruction. */
733 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1
734 add %l1, 0x4, %l2
735 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
736 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
737
738 RESTORE_ALL
739
740 .globl flush_patch_one
741
742 /* We get these for debugging routines using __builtin_return_address() */
743dfw_kernel:
744flush_patch_one:
745 FLUSH_ALL_KERNEL_WINDOWS
746
747 /* Advance over the trap instruction. */
748 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1
749 add %l1, 0x4, %l2
750 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
751 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
752
753 RESTORE_ALL
754
755 /* The getcc software trap. The user wants the condition codes from
756 * the %psr in register %g1.
757 */
758
759 .align 4
760 .globl getcc_trap_handler
761getcc_trap_handler:
762 srl %l0, 20, %g1 ! give user
763 and %g1, 0xf, %g1 ! only ICC bits in %psr
764 jmp %l2 ! advance over trap instruction
765 rett %l2 + 0x4 ! like this...
766
767 /* The setcc software trap. The user has condition codes in %g1
768 * that it would like placed in the %psr. Be careful not to flip
769 * any unintentional bits!
770 */
771
772 .align 4
773 .globl setcc_trap_handler
774setcc_trap_handler:
775 sll %g1, 0x14, %l4
776 set PSR_ICC, %l5
777 andn %l0, %l5, %l0 ! clear ICC bits in %psr
778 and %l4, %l5, %l4 ! clear non-ICC bits in user value
779 or %l4, %l0, %l4 ! or them in... mix mix mix
780
781 wr %l4, 0x0, %psr ! set new %psr
782 WRITE_PAUSE ! TI scumbags...
783
784 jmp %l2 ! advance over trap instruction
785 rett %l2 + 0x4 ! like this...
786
787 .align 4
788 .globl linux_trap_nmi_sun4c
789linux_trap_nmi_sun4c:
790 SAVE_ALL
791
792 /* Ugh, we need to clear the IRQ line. This is now
793 * a very sun4c specific trap handler...
794 */
795 sethi %hi(interrupt_enable), %l5
796 ld [%l5 + %lo(interrupt_enable)], %l5
797 ldub [%l5], %l6
798 andn %l6, INTS_ENAB, %l6
799 stb %l6, [%l5]
800
801 /* Now it is safe to re-enable traps without recursion. */
802 or %l0, PSR_PIL, %l0
803 wr %l0, PSR_ET, %psr
804 WRITE_PAUSE
805
806 /* Now call the c-code with the pt_regs frame ptr and the
807 * memory error registers as arguments. The ordering chosen
808 * here is due to unlatching semantics.
809 */
810 sethi %hi(AC_SYNC_ERR), %o0
811 add %o0, 0x4, %o0
812 lda [%o0] ASI_CONTROL, %o2 ! sync vaddr
813 sub %o0, 0x4, %o0
814 lda [%o0] ASI_CONTROL, %o1 ! sync error
815 add %o0, 0xc, %o0
816 lda [%o0] ASI_CONTROL, %o4 ! async vaddr
817 sub %o0, 0x4, %o0
818 lda [%o0] ASI_CONTROL, %o3 ! async error
819 call sparc_lvl15_nmi
820 add %sp, STACKFRAME_SZ, %o0
821
822 RESTORE_ALL
823
824 .align 4
825 .globl invalid_segment_patch1_ff
826 .globl invalid_segment_patch2_ff
827invalid_segment_patch1_ff: cmp %l4, 0xff
828invalid_segment_patch2_ff: mov 0xff, %l3
829
830 .align 4
831 .globl invalid_segment_patch1_1ff
832 .globl invalid_segment_patch2_1ff
833invalid_segment_patch1_1ff: cmp %l4, 0x1ff
834invalid_segment_patch2_1ff: mov 0x1ff, %l3
835
836 .align 4
837 .globl num_context_patch1_16, num_context_patch2_16
838num_context_patch1_16: mov 0x10, %l7
839num_context_patch2_16: mov 0x10, %l7
840
841 .align 4
842 .globl vac_linesize_patch_32
843vac_linesize_patch_32: subcc %l7, 32, %l7
844
845 .align 4
846 .globl vac_hwflush_patch1_on, vac_hwflush_patch2_on
847
848/*
849 * Ugly, but we cant use hardware flushing on the sun4 and we'd require
850 * two instructions (Anton)
851 */
852#ifdef CONFIG_SUN4
853vac_hwflush_patch1_on: nop
854#else
855vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7
856#endif
857
858vac_hwflush_patch2_on: sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG
859
860 .globl invalid_segment_patch1, invalid_segment_patch2
861 .globl num_context_patch1
862 .globl vac_linesize_patch, vac_hwflush_patch1
863 .globl vac_hwflush_patch2
864
865 .align 4
866 .globl sun4c_fault
867
868! %l0 = %psr
869! %l1 = %pc
870! %l2 = %npc
871! %l3 = %wim
872! %l7 = 1 for textfault
873! We want error in %l5, vaddr in %l6
874sun4c_fault:
875#ifdef CONFIG_SUN4
876 sethi %hi(sun4c_memerr_reg), %l4
877 ld [%l4+%lo(sun4c_memerr_reg)], %l4 ! memerr ctrl reg addr
878 ld [%l4], %l6 ! memerr ctrl reg
879 ld [%l4 + 4], %l5 ! memerr vaddr reg
880 andcc %l6, 0x80, %g0 ! check for error type
881 st %g0, [%l4 + 4] ! clear the error
882 be 0f ! normal error
883 sethi %hi(AC_BUS_ERROR), %l4 ! bus err reg addr
884
885 call prom_halt ! something weird happened
886 ! what exactly did happen?
887 ! what should we do here?
888
8890: or %l4, %lo(AC_BUS_ERROR), %l4 ! bus err reg addr
890 lduba [%l4] ASI_CONTROL, %l6 ! bus err reg
891
892 cmp %l7, 1 ! text fault?
893 be 1f ! yes
894 nop
895
896 ld [%l1], %l4 ! load instruction that caused fault
897 srl %l4, 21, %l4
898 andcc %l4, 1, %g0 ! store instruction?
899
900 be 1f ! no
901 sethi %hi(SUN4C_SYNC_BADWRITE), %l4 ! yep
902 ! %lo(SUN4C_SYNC_BADWRITE) = 0
903 or %l4, %l6, %l6 ! set write bit to emulate sun4c
9041:
905#else
906 sethi %hi(AC_SYNC_ERR), %l4
907 add %l4, 0x4, %l6 ! AC_SYNC_VA in %l6
908 lda [%l6] ASI_CONTROL, %l5 ! Address
909 lda [%l4] ASI_CONTROL, %l6 ! Error, retained for a bit
910#endif
911
912 andn %l5, 0xfff, %l5 ! Encode all info into l7
913 srl %l6, 14, %l4
914
915 and %l4, 2, %l4
916 or %l5, %l4, %l4
917
918 or %l4, %l7, %l7 ! l7 = [addr,write,txtfault]
919
920 andcc %l0, PSR_PS, %g0
921 be sun4c_fault_fromuser
922 andcc %l7, 1, %g0 ! Text fault?
923
924 be 1f
925 sethi %hi(KERNBASE), %l4
926
927 mov %l1, %l5 ! PC
928
9291:
930 cmp %l5, %l4
931 blu sun4c_fault_fromuser
932 sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
933
934 /* If the kernel references a bum kernel pointer, or a pte which
935 * points to a non existant page in ram, we will run this code
936 * _forever_ and lock up the machine!!!!! So we must check for
937 * this condition, the AC_SYNC_ERR bits are what we must examine.
938 * Also a parity error would make this happen as well. So we just
939 * check that we are in fact servicing a tlb miss and not some
940 * other type of fault for the kernel.
941 */
942 andcc %l6, 0x80, %g0
943 be sun4c_fault_fromuser
944 and %l5, %l4, %l5
945
946 /* Test for NULL pte_t * in vmalloc area. */
947 sethi %hi(VMALLOC_START), %l4
948 cmp %l5, %l4
949 blu,a invalid_segment_patch1
950 lduXa [%l5] ASI_SEGMAP, %l4
951
952 sethi %hi(swapper_pg_dir), %l4
953 srl %l5, SUN4C_PGDIR_SHIFT, %l6
954 or %l4, %lo(swapper_pg_dir), %l4
955 sll %l6, 2, %l6
956 ld [%l4 + %l6], %l4
957#ifdef CONFIG_SUN4
958 sethi %hi(PAGE_MASK), %l6
959 andcc %l4, %l6, %g0
960#else
961 andcc %l4, PAGE_MASK, %g0
962#endif
963 be sun4c_fault_fromuser
964 lduXa [%l5] ASI_SEGMAP, %l4
965
966invalid_segment_patch1:
967 cmp %l4, 0x7f
968 bne 1f
969 sethi %hi(sun4c_kfree_ring), %l4
970 or %l4, %lo(sun4c_kfree_ring), %l4
971 ld [%l4 + 0x18], %l3
972 deccc %l3 ! do we have a free entry?
973 bcs,a 2f ! no, unmap one.
974 sethi %hi(sun4c_kernel_ring), %l4
975
976 st %l3, [%l4 + 0x18] ! sun4c_kfree_ring.num_entries--
977
978 ld [%l4 + 0x00], %l6 ! entry = sun4c_kfree_ring.ringhd.next
979 st %l5, [%l6 + 0x08] ! entry->vaddr = address
980
981 ld [%l6 + 0x00], %l3 ! next = entry->next
982 ld [%l6 + 0x04], %l7 ! entry->prev
983
984 st %l7, [%l3 + 0x04] ! next->prev = entry->prev
985 st %l3, [%l7 + 0x00] ! entry->prev->next = next
986
987 sethi %hi(sun4c_kernel_ring), %l4
988 or %l4, %lo(sun4c_kernel_ring), %l4
989 ! head = &sun4c_kernel_ring.ringhd
990
991 ld [%l4 + 0x00], %l7 ! head->next
992
993 st %l4, [%l6 + 0x04] ! entry->prev = head
994 st %l7, [%l6 + 0x00] ! entry->next = head->next
995 st %l6, [%l7 + 0x04] ! head->next->prev = entry
996
997 st %l6, [%l4 + 0x00] ! head->next = entry
998
999 ld [%l4 + 0x18], %l3
1000 inc %l3 ! sun4c_kernel_ring.num_entries++
1001 st %l3, [%l4 + 0x18]
1002 b 4f
1003 ld [%l6 + 0x08], %l5
1004
10052:
1006 or %l4, %lo(sun4c_kernel_ring), %l4
1007 ! head = &sun4c_kernel_ring.ringhd
1008
1009 ld [%l4 + 0x04], %l6 ! entry = head->prev
1010
1011 ld [%l6 + 0x08], %l3 ! tmp = entry->vaddr
1012
1013 ! Flush segment from the cache.
1014#ifdef CONFIG_SUN4
1015 sethi %hi((128 * 1024)), %l7
1016#else
1017 sethi %hi((64 * 1024)), %l7
1018#endif
10199:
1020vac_hwflush_patch1:
1021vac_linesize_patch:
1022 subcc %l7, 16, %l7
1023 bne 9b
1024vac_hwflush_patch2:
1025 sta %g0, [%l3 + %l7] ASI_FLUSHSEG
1026
1027 st %l5, [%l6 + 0x08] ! entry->vaddr = address
1028
1029 ld [%l6 + 0x00], %l5 ! next = entry->next
1030 ld [%l6 + 0x04], %l7 ! entry->prev
1031
1032 st %l7, [%l5 + 0x04] ! next->prev = entry->prev
1033 st %l5, [%l7 + 0x00] ! entry->prev->next = next
1034 st %l4, [%l6 + 0x04] ! entry->prev = head
1035
1036 ld [%l4 + 0x00], %l7 ! head->next
1037
1038 st %l7, [%l6 + 0x00] ! entry->next = head->next
1039 st %l6, [%l7 + 0x04] ! head->next->prev = entry
1040 st %l6, [%l4 + 0x00] ! head->next = entry
1041
1042 mov %l3, %l5 ! address = tmp
1043
10444:
1045num_context_patch1:
1046 mov 0x08, %l7
1047
1048 ld [%l6 + 0x08], %l4
1049 ldub [%l6 + 0x0c], %l3
1050 or %l4, %l3, %l4 ! encode new vaddr/pseg into l4
1051
1052 sethi %hi(AC_CONTEXT), %l3
1053 lduba [%l3] ASI_CONTROL, %l6
1054
1055 /* Invalidate old mapping, instantiate new mapping,
1056 * for each context. Registers l6/l7 are live across
1057 * this loop.
1058 */
10593: deccc %l7
1060 sethi %hi(AC_CONTEXT), %l3
1061 stba %l7, [%l3] ASI_CONTROL
1062invalid_segment_patch2:
1063 mov 0x7f, %l3
1064 stXa %l3, [%l5] ASI_SEGMAP
1065 andn %l4, 0x1ff, %l3
1066 bne 3b
1067 stXa %l4, [%l3] ASI_SEGMAP
1068
1069 sethi %hi(AC_CONTEXT), %l3
1070 stba %l6, [%l3] ASI_CONTROL
1071
1072 andn %l4, 0x1ff, %l5
1073
10741:
1075 sethi %hi(VMALLOC_START), %l4
1076 cmp %l5, %l4
1077
1078 bgeu 1f
1079 mov 1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7
1080
1081 sethi %hi(KERNBASE), %l6
1082
1083 sub %l5, %l6, %l4
1084 srl %l4, PAGE_SHIFT, %l4
1085 sethi %hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3
1086 or %l3, %l4, %l3
1087
1088 sethi %hi(PAGE_SIZE), %l4
1089
10902:
1091 sta %l3, [%l5] ASI_PTE
1092 deccc %l7
1093 inc %l3
1094 bne 2b
1095 add %l5, %l4, %l5
1096
1097 b 7f
1098 sethi %hi(sun4c_kernel_faults), %l4
1099
11001:
1101 srl %l5, SUN4C_PGDIR_SHIFT, %l3
1102 sethi %hi(swapper_pg_dir), %l4
1103 or %l4, %lo(swapper_pg_dir), %l4
1104 sll %l3, 2, %l3
1105 ld [%l4 + %l3], %l4
1106#ifndef CONFIG_SUN4
1107 and %l4, PAGE_MASK, %l4
1108#else
1109 sethi %hi(PAGE_MASK), %l6
1110 and %l4, %l6, %l4
1111#endif
1112
1113 srl %l5, (PAGE_SHIFT - 2), %l6
1114 and %l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6
1115 add %l6, %l4, %l6
1116
1117 sethi %hi(PAGE_SIZE), %l4
1118
11192:
1120 ld [%l6], %l3
1121 deccc %l7
1122 sta %l3, [%l5] ASI_PTE
1123 add %l6, 0x4, %l6
1124 bne 2b
1125 add %l5, %l4, %l5
1126
1127 sethi %hi(sun4c_kernel_faults), %l4
11287:
1129 ld [%l4 + %lo(sun4c_kernel_faults)], %l3
1130 inc %l3
1131 st %l3, [%l4 + %lo(sun4c_kernel_faults)]
1132
1133 /* Restore condition codes */
1134 wr %l0, 0x0, %psr
1135 WRITE_PAUSE
1136 jmp %l1
1137 rett %l2
1138
1139sun4c_fault_fromuser:
1140 SAVE_ALL
1141 nop
1142
1143 mov %l7, %o1 ! Decode the info from %l7
1144 mov %l7, %o2
1145 and %o1, 1, %o1 ! arg2 = text_faultp
1146 mov %l7, %o3
1147 and %o2, 2, %o2 ! arg3 = writep
1148 andn %o3, 0xfff, %o3 ! arg4 = faulting address
1149
1150 wr %l0, PSR_ET, %psr
1151 WRITE_PAUSE
1152
1153 call do_sun4c_fault
1154 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
1155
1156 RESTORE_ALL
1157
1158 .align 4
1159 .globl srmmu_fault
1160srmmu_fault:
1161 mov 0x400, %l5
1162 mov 0x300, %l4
1163
1164 lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first
1165 lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last
1166
1167 andn %l6, 0xfff, %l6
1168 srl %l5, 6, %l5 ! and encode all info into l7
1169
1170 and %l5, 2, %l5
1171 or %l5, %l6, %l6
1172
1173 or %l6, %l7, %l7 ! l7 = [addr,write,txtfault]
1174
1175 SAVE_ALL
1176
1177 mov %l7, %o1
1178 mov %l7, %o2
1179 and %o1, 1, %o1 ! arg2 = text_faultp
1180 mov %l7, %o3
1181 and %o2, 2, %o2 ! arg3 = writep
1182 andn %o3, 0xfff, %o3 ! arg4 = faulting address
1183
1184 wr %l0, PSR_ET, %psr
1185 WRITE_PAUSE
1186
1187 call do_sparc_fault
1188 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
1189
1190 RESTORE_ALL
1191
1192#ifdef CONFIG_SUNOS_EMUL
1193 /* SunOS uses syscall zero as the 'indirect syscall' it looks
1194 * like indir_syscall(scall_num, arg0, arg1, arg2...); etc.
1195 * This is complete brain damage.
1196 */
1197 .globl sunos_indir
1198sunos_indir:
1199 mov %o7, %l4
1200 cmp %o0, NR_SYSCALLS
1201 blu,a 1f
1202 sll %o0, 0x2, %o0
1203
1204 sethi %hi(sunos_nosys), %l6
1205 b 2f
1206 or %l6, %lo(sunos_nosys), %l6
1207
12081:
1209 set sunos_sys_table, %l7
1210 ld [%l7 + %o0], %l6
1211
12122:
1213 mov %o1, %o0
1214 mov %o2, %o1
1215 mov %o3, %o2
1216 mov %o4, %o3
1217 mov %o5, %o4
1218 call %l6
1219 mov %l4, %o7
1220#endif
1221
1222 .align 4
1223 .globl sys_nis_syscall
1224sys_nis_syscall:
1225 mov %o7, %l5
1226 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
1227 call c_sys_nis_syscall
1228 mov %l5, %o7
1229
1230 .align 4
1231 .globl sys_ptrace
1232sys_ptrace:
1233 call do_ptrace
1234 add %sp, STACKFRAME_SZ, %o0
1235
1236 ld [%curptr + TI_FLAGS], %l5
1237 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1238 be 1f
1239 nop
1240
1241 call syscall_trace
1242 nop
1243
12441:
1245 RESTORE_ALL
1246
1247 .align 4
1248 .globl sys_execve
1249sys_execve:
1250 mov %o7, %l5
1251 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
1252 call sparc_execve
1253 mov %l5, %o7
1254
1255 .align 4
1256 .globl sys_pipe
1257sys_pipe:
1258 mov %o7, %l5
1259 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
1260 call sparc_pipe
1261 mov %l5, %o7
1262
1263 .align 4
1264 .globl sys_sigaltstack
1265sys_sigaltstack:
1266 mov %o7, %l5
1267 mov %fp, %o2
1268 call do_sigaltstack
1269 mov %l5, %o7
1270
1271 .align 4
1272 .globl sys_sigstack
1273sys_sigstack:
1274 mov %o7, %l5
1275 mov %fp, %o2
1276 call do_sys_sigstack
1277 mov %l5, %o7
1278
1279 .align 4
1280 .globl sys_sigpause
1281sys_sigpause:
1282 /* Note: %o0 already has correct value... */
1283 call do_sigpause
1284 add %sp, STACKFRAME_SZ, %o1
1285
1286 ld [%curptr + TI_FLAGS], %l5
1287 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1288 be 1f
1289 nop
1290
1291 call syscall_trace
1292 nop
1293
12941:
1295 /* We are returning to a signal handler. */
1296 RESTORE_ALL
1297
1298 .align 4
1299 .globl sys_sigsuspend
1300sys_sigsuspend:
1301 call do_sigsuspend
1302 add %sp, STACKFRAME_SZ, %o0
1303
1304 ld [%curptr + TI_FLAGS], %l5
1305 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1306 be 1f
1307 nop
1308
1309 call syscall_trace
1310 nop
1311
13121:
1313 /* We are returning to a signal handler. */
1314 RESTORE_ALL
1315
1316 .align 4
1317 .globl sys_rt_sigsuspend
1318sys_rt_sigsuspend:
1319 /* Note: %o0, %o1 already have correct value... */
1320 call do_rt_sigsuspend
1321 add %sp, STACKFRAME_SZ, %o2
1322
1323 ld [%curptr + TI_FLAGS], %l5
1324 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1325 be 1f
1326 nop
1327
1328 call syscall_trace
1329 nop
1330
13311:
1332 /* We are returning to a signal handler. */
1333 RESTORE_ALL
1334
1335 .align 4
1336 .globl sys_sigreturn
1337sys_sigreturn:
1338 call do_sigreturn
1339 add %sp, STACKFRAME_SZ, %o0
1340
1341 ld [%curptr + TI_FLAGS], %l5
1342 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1343 be 1f
1344 nop
1345
1346 call syscall_trace
1347 nop
1348
13491:
1350 /* We don't want to muck with user registers like a
1351 * normal syscall, just return.
1352 */
1353 RESTORE_ALL
1354
1355 .align 4
1356 .globl sys_rt_sigreturn
1357sys_rt_sigreturn:
1358 call do_rt_sigreturn
1359 add %sp, STACKFRAME_SZ, %o0
1360
1361 ld [%curptr + TI_FLAGS], %l5
1362 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1363 be 1f
1364 nop
1365
1366 call syscall_trace
1367 nop
1368
13691:
1370 /* We are returning to a signal handler. */
1371 RESTORE_ALL
1372
1373 /* Now that we have a real sys_clone, sys_fork() is
1374 * implemented in terms of it. Our _real_ implementation
1375 * of SunOS vfork() will use sys_vfork().
1376 *
1377 * XXX These three should be consolidated into mostly shared
1378 * XXX code just like on sparc64... -DaveM
1379 */
1380 .align 4
1381 .globl sys_fork, flush_patch_two
1382sys_fork:
1383 mov %o7, %l5
1384flush_patch_two:
1385 FLUSH_ALL_KERNEL_WINDOWS;
1386 ld [%curptr + TI_TASK], %o4
1387 rd %psr, %g4
1388 WRITE_PAUSE
1389 mov SIGCHLD, %o0 ! arg0: clone flags
1390 rd %wim, %g5
1391 WRITE_PAUSE
1392 mov %fp, %o1 ! arg1: usp
1393 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1394 add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr
1395 mov 0, %o3
1396 call sparc_do_fork
1397 mov %l5, %o7
1398
1399 /* Whee, kernel threads! */
1400 .globl sys_clone, flush_patch_three
1401sys_clone:
1402 mov %o7, %l5
1403flush_patch_three:
1404 FLUSH_ALL_KERNEL_WINDOWS;
1405 ld [%curptr + TI_TASK], %o4
1406 rd %psr, %g4
1407 WRITE_PAUSE
1408
1409 /* arg0,1: flags,usp -- loaded already */
1410 cmp %o1, 0x0 ! Is new_usp NULL?
1411 rd %wim, %g5
1412 WRITE_PAUSE
1413 be,a 1f
1414 mov %fp, %o1 ! yes, use callers usp
1415 andn %o1, 7, %o1 ! no, align to 8 bytes
14161:
1417 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1418 add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr
1419 mov 0, %o3
1420 call sparc_do_fork
1421 mov %l5, %o7
1422
1423 /* Whee, real vfork! */
1424 .globl sys_vfork, flush_patch_four
1425sys_vfork:
1426flush_patch_four:
1427 FLUSH_ALL_KERNEL_WINDOWS;
1428 ld [%curptr + TI_TASK], %o4
1429 rd %psr, %g4
1430 WRITE_PAUSE
1431 rd %wim, %g5
1432 WRITE_PAUSE
1433 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1434 sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
1435 mov %fp, %o1
1436 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
1437 sethi %hi(sparc_do_fork), %l1
1438 mov 0, %o3
1439 jmpl %l1 + %lo(sparc_do_fork), %g0
1440 add %sp, STACKFRAME_SZ, %o2
1441
1442 .align 4
1443linux_sparc_ni_syscall:
1444 sethi %hi(sys_ni_syscall), %l7
1445 b syscall_is_too_hard
1446 or %l7, %lo(sys_ni_syscall), %l7
1447
1448linux_fast_syscall:
1449 andn %l7, 3, %l7
1450 mov %i0, %o0
1451 mov %i1, %o1
1452 mov %i2, %o2
1453 jmpl %l7 + %g0, %g0
1454 mov %i3, %o3
1455
1456linux_syscall_trace:
1457 call syscall_trace
1458 nop
1459 mov %i0, %o0
1460 mov %i1, %o1
1461 mov %i2, %o2
1462 mov %i3, %o3
1463 b 2f
1464 mov %i4, %o4
1465
1466 .globl ret_from_fork
1467ret_from_fork:
1468 call schedule_tail
1469 mov %g3, %o0
1470 b ret_sys_call
1471 ld [%sp + STACKFRAME_SZ + PT_I0], %o0
1472
1473 /* Linux native and SunOS system calls enter here... */
1474 .align 4
1475 .globl linux_sparc_syscall
1476linux_sparc_syscall:
1477 /* Direct access to user regs, must faster. */
1478 cmp %g1, NR_SYSCALLS
1479 bgeu linux_sparc_ni_syscall
1480 sll %g1, 2, %l4
1481 ld [%l7 + %l4], %l7
1482 andcc %l7, 1, %g0
1483 bne linux_fast_syscall
1484 /* Just do first insn from SAVE_ALL in the delay slot */
1485
1486 .globl syscall_is_too_hard
1487syscall_is_too_hard:
1488 SAVE_ALL_HEAD
1489 rd %wim, %l3
1490
1491 wr %l0, PSR_ET, %psr
1492 mov %i0, %o0
1493 mov %i1, %o1
1494 mov %i2, %o2
1495
1496 ld [%curptr + TI_FLAGS], %l5
1497 mov %i3, %o3
1498 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1499 mov %i4, %o4
1500 bne linux_syscall_trace
1501 mov %i0, %l5
15022:
1503 call %l7
1504 mov %i5, %o5
1505
1506 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1507
1508 .globl ret_sys_call
1509ret_sys_call:
1510 ld [%curptr + TI_FLAGS], %l6
1511 cmp %o0, -ERESTART_RESTARTBLOCK
1512 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
1513 set PSR_C, %g2
1514 bgeu 1f
1515 andcc %l6, _TIF_SYSCALL_TRACE, %g0
1516
1517 /* System call success, clear Carry condition code. */
1518 andn %g3, %g2, %g3
1519 clr %l6
1520 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1521 bne linux_syscall_trace2
1522 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1523 add %l1, 0x4, %l2 /* npc = npc+4 */
1524 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1525 b ret_trap_entry
1526 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
15271:
1528 /* System call failure, set Carry condition code.
1529 * Also, get abs(errno) to return to the process.
1530 */
1531 sub %g0, %o0, %o0
1532 or %g3, %g2, %g3
1533 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1534 mov 1, %l6
1535 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1536 bne linux_syscall_trace2
1537 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1538 add %l1, 0x4, %l2 /* npc = npc+4 */
1539 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1540 b ret_trap_entry
1541 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1542
1543linux_syscall_trace2:
1544 call syscall_trace
1545 add %l1, 0x4, %l2 /* npc = npc+4 */
1546 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1547 b ret_trap_entry
1548 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1549
1550
1551 /*
1552 * Solaris system calls and indirect system calls enter here.
1553 *
1554 * I have named the solaris indirect syscalls like that because
1555 * it seems like Solaris has some fast path syscalls that can
1556 * be handled as indirect system calls. - mig
1557 */
1558
1559linux_syscall_for_solaris:
1560 sethi %hi(sys_call_table), %l7
1561 b linux_sparc_syscall
1562 or %l7, %lo(sys_call_table), %l7
1563
1564 .align 4
1565 .globl solaris_syscall
1566solaris_syscall:
1567 cmp %g1,59
1568 be linux_syscall_for_solaris
1569 cmp %g1,2
1570 be linux_syscall_for_solaris
1571 cmp %g1,42
1572 be linux_syscall_for_solaris
1573 cmp %g1,119
1574 be,a linux_syscall_for_solaris
1575 mov 2, %g1
15761:
1577 SAVE_ALL_HEAD
1578 rd %wim, %l3
1579
1580 wr %l0, PSR_ET, %psr
1581 nop
1582 nop
1583 mov %i0, %l5
1584
1585 call do_solaris_syscall
1586 add %sp, STACKFRAME_SZ, %o0
1587
1588 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1589 set PSR_C, %g2
1590 cmp %o0, -ERESTART_RESTARTBLOCK
1591 bgeu 1f
1592 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
1593
1594 /* System call success, clear Carry condition code. */
1595 andn %g3, %g2, %g3
1596 clr %l6
1597 b 2f
1598 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1599
16001:
1601 /* System call failure, set Carry condition code.
1602 * Also, get abs(errno) to return to the process.
1603 */
1604 sub %g0, %o0, %o0
1605 mov 1, %l6
1606 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1607 or %g3, %g2, %g3
1608 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1609
1610 /* Advance the pc and npc over the trap instruction.
1611 * If the npc is unaligned (has a 1 in the lower byte), it means
1612 * the kernel does not want us to play magic (ie, skipping over
1613 * traps). Mainly when the Solaris code wants to set some PC and
1614 * nPC (setcontext).
1615 */
16162:
1617 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1618 andcc %l1, 1, %g0
1619 bne 1f
1620 add %l1, 0x4, %l2 /* npc = npc+4 */
1621 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1622 b ret_trap_entry
1623 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1624
1625 /* kernel knows what it is doing, fixup npc and continue */
16261:
1627 sub %l1, 1, %l1
1628 b ret_trap_entry
1629 st %l1, [%sp + STACKFRAME_SZ + PT_NPC]
1630
1631#ifndef CONFIG_SUNOS_EMUL
1632 .align 4
1633 .globl sunos_syscall
1634sunos_syscall:
1635 SAVE_ALL_HEAD
1636 rd %wim, %l3
1637 wr %l0, PSR_ET, %psr
1638 nop
1639 nop
1640 mov %i0, %l5
1641 call do_sunos_syscall
1642 add %sp, STACKFRAME_SZ, %o0
1643#endif
1644
1645 /* {net, open}bsd system calls enter here... */
1646 .align 4
1647 .globl bsd_syscall
1648bsd_syscall:
1649 /* Direct access to user regs, must faster. */
1650 cmp %g1, NR_SYSCALLS
1651 blu,a 1f
1652 sll %g1, 2, %l4
1653
1654 set sys_ni_syscall, %l7
1655 b bsd_is_too_hard
1656 nop
1657
16581:
1659 ld [%l7 + %l4], %l7
1660
1661 .globl bsd_is_too_hard
1662bsd_is_too_hard:
1663 rd %wim, %l3
1664 SAVE_ALL
1665
1666 wr %l0, PSR_ET, %psr
1667 WRITE_PAUSE
1668
16692:
1670 mov %i0, %o0
1671 mov %i1, %o1
1672 mov %i2, %o2
1673 mov %i0, %l5
1674 mov %i3, %o3
1675 mov %i4, %o4
1676 call %l7
1677 mov %i5, %o5
1678
1679 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1680 set PSR_C, %g2
1681 cmp %o0, -ERESTART_RESTARTBLOCK
1682 bgeu 1f
1683 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
1684
1685 /* System call success, clear Carry condition code. */
1686 andn %g3, %g2, %g3
1687 clr %l6
1688 b 2f
1689 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1690
16911:
1692 /* System call failure, set Carry condition code.
1693 * Also, get abs(errno) to return to the process.
1694 */
1695 sub %g0, %o0, %o0
1696#if 0 /* XXX todo XXX */
1697 sethi %hi(bsd_xlatb_rorl), %o3
1698 or %o3, %lo(bsd_xlatb_rorl), %o3
1699 sll %o0, 2, %o0
1700 ld [%o3 + %o0], %o0
1701#endif
1702 mov 1, %l6
1703 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1704 or %g3, %g2, %g3
1705 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1706
1707 /* Advance the pc and npc over the trap instruction. */
17082:
1709 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1710 add %l1, 0x4, %l2 /* npc = npc+4 */
1711 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1712 b ret_trap_entry
1713 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1714
1715/* Saving and restoring the FPU state is best done from lowlevel code.
1716 *
1717 * void fpsave(unsigned long *fpregs, unsigned long *fsr,
1718 * void *fpqueue, unsigned long *fpqdepth)
1719 */
1720
1721 .globl fpsave
1722fpsave:
1723 st %fsr, [%o1] ! this can trap on us if fpu is in bogon state
1724 ld [%o1], %g1
1725 set 0x2000, %g4
1726 andcc %g1, %g4, %g0
1727 be 2f
1728 mov 0, %g2
1729
1730 /* We have an fpqueue to save. */
17311:
1732 std %fq, [%o2]
1733fpsave_magic:
1734 st %fsr, [%o1]
1735 ld [%o1], %g3
1736 andcc %g3, %g4, %g0
1737 add %g2, 1, %g2
1738 bne 1b
1739 add %o2, 8, %o2
1740
17412:
1742 st %g2, [%o3]
1743
1744 std %f0, [%o0 + 0x00]
1745 std %f2, [%o0 + 0x08]
1746 std %f4, [%o0 + 0x10]
1747 std %f6, [%o0 + 0x18]
1748 std %f8, [%o0 + 0x20]
1749 std %f10, [%o0 + 0x28]
1750 std %f12, [%o0 + 0x30]
1751 std %f14, [%o0 + 0x38]
1752 std %f16, [%o0 + 0x40]
1753 std %f18, [%o0 + 0x48]
1754 std %f20, [%o0 + 0x50]
1755 std %f22, [%o0 + 0x58]
1756 std %f24, [%o0 + 0x60]
1757 std %f26, [%o0 + 0x68]
1758 std %f28, [%o0 + 0x70]
1759 retl
1760 std %f30, [%o0 + 0x78]
1761
1762 /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
1763 * code for pointing out this possible deadlock, while we save state
1764 * above we could trap on the fsr store so our low level fpu trap
1765 * code has to know how to deal with this.
1766 */
1767fpsave_catch:
1768 b fpsave_magic + 4
1769 st %fsr, [%o1]
1770
1771fpsave_catch2:
1772 b fpsave + 4
1773 st %fsr, [%o1]
1774
1775 /* void fpload(unsigned long *fpregs, unsigned long *fsr); */
1776
1777 .globl fpload
1778fpload:
1779 ldd [%o0 + 0x00], %f0
1780 ldd [%o0 + 0x08], %f2
1781 ldd [%o0 + 0x10], %f4
1782 ldd [%o0 + 0x18], %f6
1783 ldd [%o0 + 0x20], %f8
1784 ldd [%o0 + 0x28], %f10
1785 ldd [%o0 + 0x30], %f12
1786 ldd [%o0 + 0x38], %f14
1787 ldd [%o0 + 0x40], %f16
1788 ldd [%o0 + 0x48], %f18
1789 ldd [%o0 + 0x50], %f20
1790 ldd [%o0 + 0x58], %f22
1791 ldd [%o0 + 0x60], %f24
1792 ldd [%o0 + 0x68], %f26
1793 ldd [%o0 + 0x70], %f28
1794 ldd [%o0 + 0x78], %f30
1795 ld [%o1], %fsr
1796 retl
1797 nop
1798
1799 /* __ndelay and __udelay take two arguments:
1800 * 0 - nsecs or usecs to delay
1801 * 1 - per_cpu udelay_val (loops per jiffy)
1802 *
1803 * Note that ndelay gives HZ times higher resolution but has a 10ms
1804 * limit. udelay can handle up to 1s.
1805 */
1806 .globl __ndelay
1807__ndelay:
1808 save %sp, -STACKFRAME_SZ, %sp
1809 mov %i0, %o0
1810 call .umul
1811 mov 0x1ad, %o1 ! 2**32 / (1 000 000 000 / HZ)
1812 call .umul
1813 mov %i1, %o1 ! udelay_val
1814 ba delay_continue
1815 mov %o1, %o0 ! >>32 later for better resolution
1816
1817 .globl __udelay
1818__udelay:
1819 save %sp, -STACKFRAME_SZ, %sp
1820 mov %i0, %o0
1821 sethi %hi(0x10c6), %o1
1822 call .umul
1823 or %o1, %lo(0x10c6), %o1 ! 2**32 / 1 000 000
1824 call .umul
1825 mov %i1, %o1 ! udelay_val
1826 call .umul
1827 mov HZ, %o0 ! >>32 earlier for wider range
1828
1829delay_continue:
1830 cmp %o0, 0x0
18311:
1832 bne 1b
1833 subcc %o0, 1, %o0
1834
1835 ret
1836 restore
1837
1838 /* Handle a software breakpoint */
1839 /* We have to inform parent that child has stopped */
1840 .align 4
1841 .globl breakpoint_trap
1842breakpoint_trap:
1843 rd %wim,%l3
1844 SAVE_ALL
1845 wr %l0, PSR_ET, %psr
1846 WRITE_PAUSE
1847
1848 st %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
1849 call sparc_breakpoint
1850 add %sp, STACKFRAME_SZ, %o0
1851
1852 RESTORE_ALL
1853
1854 .align 4
1855 .globl __handle_exception, flush_patch_exception
1856__handle_exception:
1857flush_patch_exception:
1858 FLUSH_ALL_KERNEL_WINDOWS;
1859 ldd [%o0], %o6
1860 jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h
1861 mov 1, %g1 ! signal EFAULT condition
1862
1863 .align 4
1864 .globl kill_user_windows, kuw_patch1_7win
1865 .globl kuw_patch1
1866kuw_patch1_7win: sll %o3, 6, %o3
1867
1868 /* No matter how much overhead this routine has in the worst
1869 * case scenerio, it is several times better than taking the
1870 * traps with the old method of just doing flush_user_windows().
1871 */
1872kill_user_windows:
1873 ld [%g6 + TI_UWINMASK], %o0 ! get current umask
1874 orcc %g0, %o0, %g0 ! if no bits set, we are done
1875 be 3f ! nothing to do
1876 rd %psr, %o5 ! must clear interrupts
1877 or %o5, PSR_PIL, %o4 ! or else that could change
1878 wr %o4, 0x0, %psr ! the uwinmask state
1879 WRITE_PAUSE ! burn them cycles
18801:
1881 ld [%g6 + TI_UWINMASK], %o0 ! get consistent state
1882 orcc %g0, %o0, %g0 ! did an interrupt come in?
1883 be 4f ! yep, we are done
1884 rd %wim, %o3 ! get current wim
1885 srl %o3, 1, %o4 ! simulate a save
1886kuw_patch1:
1887 sll %o3, 7, %o3 ! compute next wim
1888 or %o4, %o3, %o3 ! result
1889 andncc %o0, %o3, %o0 ! clean this bit in umask
1890 bne kuw_patch1 ! not done yet
1891 srl %o3, 1, %o4 ! begin another save simulation
1892 wr %o3, 0x0, %wim ! set the new wim
1893 st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask
18944:
1895 wr %o5, 0x0, %psr ! re-enable interrupts
1896 WRITE_PAUSE ! burn baby burn
18973:
1898 retl ! return
1899 st %g0, [%g6 + TI_W_SAVED] ! no windows saved
1900
1901 .align 4
1902 .globl restore_current
1903restore_current:
1904 LOAD_CURRENT(g6, o0)
1905 retl
1906 nop
1907
1908#ifdef CONFIG_PCI
1909#include <asm/pcic.h>
1910
1911 .align 4
1912 .globl linux_trap_ipi15_pcic
1913linux_trap_ipi15_pcic:
1914 rd %wim, %l3
1915 SAVE_ALL
1916
1917 /*
1918 * First deactivate NMI
1919 * or we cannot drop ET, cannot get window spill traps.
1920 * The busy loop is necessary because the PIO error
1921 * sometimes does not go away quickly and we trap again.
1922 */
1923 sethi %hi(pcic_regs), %o1
1924 ld [%o1 + %lo(pcic_regs)], %o2
1925
1926 ! Get pending status for printouts later.
1927 ld [%o2 + PCI_SYS_INT_PENDING], %o0
1928
1929 mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1930 stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
19311:
1932 ld [%o2 + PCI_SYS_INT_PENDING], %o1
1933 andcc %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1934 bne 1b
1935 nop
1936
1937 or %l0, PSR_PIL, %l4
1938 wr %l4, 0x0, %psr
1939 WRITE_PAUSE
1940 wr %l4, PSR_ET, %psr
1941 WRITE_PAUSE
1942
1943 call pcic_nmi
1944 add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
1945 RESTORE_ALL
1946
1947 .globl pcic_nmi_trap_patch
1948pcic_nmi_trap_patch:
1949 sethi %hi(linux_trap_ipi15_pcic), %l3
1950 jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0
1951 rd %psr, %l0
1952 .word 0
1953
1954#endif /* CONFIG_PCI */
1955
1956/* End of entry.S */
diff --git a/arch/sparc/kernel/errtbls.c b/arch/sparc/kernel/errtbls.c
new file mode 100644
index 000000000000..bb36f6eadfee
--- /dev/null
+++ b/arch/sparc/kernel/errtbls.c
@@ -0,0 +1,276 @@
1/* $Id: errtbls.c,v 1.2 1995/11/25 00:57:55 davem Exp $
2 * errtbls.c: Error number conversion tables between various syscall
3 * OS semantics.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 *
7 * Based upon preliminary work which is:
8 *
9 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
10 */
11
12#include <asm/bsderrno.h> /* NetBSD (bsd4.4) errnos */
13#include <asm/solerrno.h> /* Solaris errnos */
14
15/* Here are tables which convert between Linux/SunOS error number
16 * values to the equivalent in other OSs. Note that since the Linux
17 * ones have been set up to match exactly those of SunOS, no
18 * translation table is needed for that OS.
19 */
20
21int solaris_errno[] = {
22 0,
23 SOL_EPERM,
24 SOL_ENOENT,
25 SOL_ESRCH,
26 SOL_EINTR,
27 SOL_EIO,
28 SOL_ENXIO,
29 SOL_E2BIG,
30 SOL_ENOEXEC,
31 SOL_EBADF,
32 SOL_ECHILD,
33 SOL_EAGAIN,
34 SOL_ENOMEM,
35 SOL_EACCES,
36 SOL_EFAULT,
37 SOL_NOTBLK,
38 SOL_EBUSY,
39 SOL_EEXIST,
40 SOL_EXDEV,
41 SOL_ENODEV,
42 SOL_ENOTDIR,
43 SOL_EISDIR,
44 SOL_EINVAL,
45 SOL_ENFILE,
46 SOL_EMFILE,
47 SOL_ENOTTY,
48 SOL_ETXTBSY,
49 SOL_EFBIG,
50 SOL_ENOSPC,
51 SOL_ESPIPE,
52 SOL_EROFS,
53 SOL_EMLINK,
54 SOL_EPIPE,
55 SOL_EDOM,
56 SOL_ERANGE,
57 SOL_EWOULDBLOCK,
58 SOL_EINPROGRESS,
59 SOL_EALREADY,
60 SOL_ENOTSOCK,
61 SOL_EDESTADDRREQ,
62 SOL_EMSGSIZE,
63 SOL_EPROTOTYPE,
64 SOL_ENOPROTOOPT,
65 SOL_EPROTONOSUPPORT,
66 SOL_ESOCKTNOSUPPORT,
67 SOL_EOPNOTSUPP,
68 SOL_EPFNOSUPPORT,
69 SOL_EAFNOSUPPORT,
70 SOL_EADDRINUSE,
71 SOL_EADDRNOTAVAIL,
72 SOL_ENETDOWN,
73 SOL_ENETUNREACH,
74 SOL_ENETRESET,
75 SOL_ECONNABORTED,
76 SOL_ECONNRESET,
77 SOL_ENOBUFS,
78 SOL_EISCONN,
79 SOL_ENOTONN,
80 SOL_ESHUTDOWN,
81 SOL_ETOOMANYREFS,
82 SOL_ETIMEDOUT,
83 SOL_ECONNREFUSED,
84 SOL_ELOOP,
85 SOL_ENAMETOOLONG,
86 SOL_EHOSTDOWN,
87 SOL_EHOSTUNREACH,
88 SOL_ENOTEMPTY,
89 SOL_EPROCLIM,
90 SOL_EUSERS,
91 SOL_EDQUOT,
92 SOL_ESTALE,
93 SOL_EREMOTE,
94 SOL_ENOSTR,
95 SOL_ETIME,
96 SOL_ENOSR,
97 SOL_ENOMSG,
98 SOL_EBADMSG,
99 SOL_IDRM,
100 SOL_EDEADLK,
101 SOL_ENOLCK,
102 SOL_ENONET,
103 SOL_ERREMOTE,
104 SOL_ENOLINK,
105 SOL_EADV,
106 SOL_ESRMNT,
107 SOL_ECOMM,
108 SOL_EPROTO,
109 SOL_EMULTIHOP,
110 SOL_EINVAL, /* EDOTDOT XXX??? */
111 SOL_REMCHG,
112 SOL_NOSYS,
113 SOL_STRPIPE,
114 SOL_EOVERFLOW,
115 SOL_EBADFD,
116 SOL_ECHRNG,
117 SOL_EL2NSYNC,
118 SOL_EL3HLT,
119 SOL_EL3RST,
120 SOL_NRNG,
121 SOL_EUNATCH,
122 SOL_ENOCSI,
123 SOL_EL2HLT,
124 SOL_EBADE,
125 SOL_EBADR,
126 SOL_EXFULL,
127 SOL_ENOANO,
128 SOL_EBADRQC,
129 SOL_EBADSLT,
130 SOL_EDEADLOCK,
131 SOL_EBFONT,
132 SOL_ELIBEXEC,
133 SOL_ENODATA,
134 SOL_ELIBBAD,
135 SOL_ENOPKG,
136 SOL_ELIBACC,
137 SOL_ENOTUNIQ,
138 SOL_ERESTART,
139 SOL_EUCLEAN,
140 SOL_ENOTNAM,
141 SOL_ENAVAIL,
142 SOL_EISNAM,
143 SOL_EREMOTEIO,
144 SOL_EILSEQ,
145 SOL_ELIBMAX,
146 SOL_ELIBSCN,
147};
148
149int netbsd_errno[] = {
150 0,
151 BSD_EPERM,
152 BSD_ENOENT,
153 BSD_ESRCH,
154 BSD_EINTR,
155 BSD_EIO,
156 BSD_ENXIO,
157 BSD_E2BIG,
158 BSD_ENOEXEC,
159 BSD_EBADF,
160 BSD_ECHILD,
161 BSD_EAGAIN,
162 BSD_ENOMEM,
163 BSD_EACCES,
164 BSD_EFAULT,
165 BSD_NOTBLK,
166 BSD_EBUSY,
167 BSD_EEXIST,
168 BSD_EXDEV,
169 BSD_ENODEV,
170 BSD_ENOTDIR,
171 BSD_EISDIR,
172 BSD_EINVAL,
173 BSD_ENFILE,
174 BSD_EMFILE,
175 BSD_ENOTTY,
176 BSD_ETXTBSY,
177 BSD_EFBIG,
178 BSD_ENOSPC,
179 BSD_ESPIPE,
180 BSD_EROFS,
181 BSD_EMLINK,
182 BSD_EPIPE,
183 BSD_EDOM,
184 BSD_ERANGE,
185 BSD_EWOULDBLOCK,
186 BSD_EINPROGRESS,
187 BSD_EALREADY,
188 BSD_ENOTSOCK,
189 BSD_EDESTADDRREQ,
190 BSD_EMSGSIZE,
191 BSD_EPROTOTYPE,
192 BSD_ENOPROTOOPT,
193 BSD_EPROTONOSUPPORT,
194 BSD_ESOCKTNOSUPPORT,
195 BSD_EOPNOTSUPP,
196 BSD_EPFNOSUPPORT,
197 BSD_EAFNOSUPPORT,
198 BSD_EADDRINUSE,
199 BSD_EADDRNOTAVAIL,
200 BSD_ENETDOWN,
201 BSD_ENETUNREACH,
202 BSD_ENETRESET,
203 BSD_ECONNABORTED,
204 BSD_ECONNRESET,
205 BSD_ENOBUFS,
206 BSD_EISCONN,
207 BSD_ENOTONN,
208 BSD_ESHUTDOWN,
209 BSD_ETOOMANYREFS,
210 BSD_ETIMEDOUT,
211 BSD_ECONNREFUSED,
212 BSD_ELOOP,
213 BSD_ENAMETOOLONG,
214 BSD_EHOSTDOWN,
215 BSD_EHOSTUNREACH,
216 BSD_ENOTEMPTY,
217 BSD_EPROCLIM,
218 BSD_EUSERS,
219 BSD_EDQUOT,
220 BSD_ESTALE,
221 BSD_EREMOTE,
222 BSD_ENOSTR,
223 BSD_ETIME,
224 BSD_ENOSR,
225 BSD_ENOMSG,
226 BSD_EBADMSG,
227 BSD_IDRM,
228 BSD_EDEADLK,
229 BSD_ENOLCK,
230 BSD_ENONET,
231 BSD_ERREMOTE,
232 BSD_ENOLINK,
233 BSD_EADV,
234 BSD_ESRMNT,
235 BSD_ECOMM,
236 BSD_EPROTO,
237 BSD_EMULTIHOP,
238 BSD_EINVAL, /* EDOTDOT XXX??? */
239 BSD_REMCHG,
240 BSD_NOSYS,
241 BSD_STRPIPE,
242 BSD_EOVERFLOW,
243 BSD_EBADFD,
244 BSD_ECHRNG,
245 BSD_EL2NSYNC,
246 BSD_EL3HLT,
247 BSD_EL3RST,
248 BSD_NRNG,
249 BSD_EUNATCH,
250 BSD_ENOCSI,
251 BSD_EL2HLT,
252 BSD_EBADE,
253 BSD_EBADR,
254 BSD_EXFULL,
255 BSD_ENOANO,
256 BSD_EBADRQC,
257 BSD_EBADSLT,
258 BSD_EDEADLOCK,
259 BSD_EBFONT,
260 BSD_ELIBEXEC,
261 BSD_ENODATA,
262 BSD_ELIBBAD,
263 BSD_ENOPKG,
264 BSD_ELIBACC,
265 BSD_ENOTUNIQ,
266 BSD_ERESTART,
267 BSD_EUCLEAN,
268 BSD_ENOTNAM,
269 BSD_ENAVAIL,
270 BSD_EISNAM,
271 BSD_EREMOTEIO,
272 BSD_EILSEQ,
273 BSD_ELIBMAX,
274 BSD_ELIBSCN,
275};
276
diff --git a/arch/sparc/kernel/etrap.S b/arch/sparc/kernel/etrap.S
new file mode 100644
index 000000000000..a8b35bed12a2
--- /dev/null
+++ b/arch/sparc/kernel/etrap.S
@@ -0,0 +1,321 @@
1/* $Id: etrap.S,v 1.31 2000/01/08 16:38:18 anton Exp $
2 * etrap.S: Sparc trap window preparation for entry into the
3 * Linux kernel.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <asm/head.h>
9#include <asm/asi.h>
10#include <asm/contregs.h>
11#include <asm/page.h>
12#include <asm/psr.h>
13#include <asm/ptrace.h>
14#include <asm/winmacro.h>
15#include <asm/asmmacro.h>
16#include <asm/thread_info.h>
17
18/* Registers to not touch at all. */
19#define t_psr l0 /* Set by caller */
20#define t_pc l1 /* Set by caller */
21#define t_npc l2 /* Set by caller */
22#define t_wim l3 /* Set by caller */
23#define t_twinmask l4 /* Set at beginning of this entry routine. */
24#define t_kstack l5 /* Set right before pt_regs frame is built */
25#define t_retpc l6 /* If you change this, change winmacro.h header file */
26#define t_systable l7 /* Never touch this, could be the syscall table ptr. */
27#define curptr g6 /* Set after pt_regs frame is built */
28
29 .text
30 .align 4
31
32 /* SEVEN WINDOW PATCH INSTRUCTIONS */
33 .globl tsetup_7win_patch1, tsetup_7win_patch2
34 .globl tsetup_7win_patch3, tsetup_7win_patch4
35 .globl tsetup_7win_patch5, tsetup_7win_patch6
36tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim
37tsetup_7win_patch2: and %g2, 0x7f, %g2
38tsetup_7win_patch3: and %g2, 0x7f, %g2
39tsetup_7win_patch4: and %g1, 0x7f, %g1
40tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim
41tsetup_7win_patch6: and %g2, 0x7f, %g2
42 /* END OF PATCH INSTRUCTIONS */
43
44 /* At trap time, interrupts and all generic traps do the
45 * following:
46 *
47 * rd %psr, %l0
48 * b some_handler
49 * rd %wim, %l3
50 * nop
51 *
52 * Then 'some_handler' if it needs a trap frame (ie. it has
53 * to call c-code and the trap cannot be handled in-window)
54 * then it does the SAVE_ALL macro in entry.S which does
55 *
56 * sethi %hi(trap_setup), %l4
57 * jmpl %l4 + %lo(trap_setup), %l6
58 * nop
59 */
60
61 /* 2 3 4 window number
62 * -----
63 * O T S mnemonic
64 *
65 * O == Current window before trap
66 * T == Window entered when trap occurred
67 * S == Window we will need to save if (1<<T) == %wim
68 *
69 * Before execution gets here, it must be guaranteed that
70 * %l0 contains trap time %psr, %l1 and %l2 contain the
71 * trap pc and npc, and %l3 contains the trap time %wim.
72 */
73
74 .globl trap_setup, tsetup_patch1, tsetup_patch2
75 .globl tsetup_patch3, tsetup_patch4
76 .globl tsetup_patch5, tsetup_patch6
77trap_setup:
78 /* Calculate mask of trap window. See if from user
79 * or kernel and branch conditionally.
80 */
81 mov 1, %t_twinmask
82 andcc %t_psr, PSR_PS, %g0 ! fromsupv_p = (psr & PSR_PS)
83 be trap_setup_from_user ! nope, from user mode
84 sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
85
86 /* From kernel, allocate more kernel stack and
87 * build a pt_regs trap frame.
88 */
89 sub %fp, (STACKFRAME_SZ + TRACEREG_SZ), %t_kstack
90 STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
91
92 /* See if we are in the trap window. */
93 andcc %t_twinmask, %t_wim, %g0
94 bne trap_setup_kernel_spill ! in trap window, clean up
95 nop
96
97 /* Trap from kernel with a window available.
98 * Just do it...
99 */
100 jmpl %t_retpc + 0x8, %g0 ! return to caller
101 mov %t_kstack, %sp ! jump onto new stack
102
103trap_setup_kernel_spill:
104 ld [%curptr + TI_UWINMASK], %g1
105 orcc %g0, %g1, %g0
106 bne trap_setup_user_spill ! there are some user windows, yuck
107 /* Spill from kernel, but only kernel windows, adjust
108 * %wim and go.
109 */
110 srl %t_wim, 0x1, %g2 ! begin computation of new %wim
111tsetup_patch1:
112 sll %t_wim, 0x7, %t_wim ! patched on 7 window Sparcs
113 or %t_wim, %g2, %g2
114tsetup_patch2:
115 and %g2, 0xff, %g2 ! patched on 7 window Sparcs
116
117 save %g0, %g0, %g0
118
119 /* Set new %wim value */
120 wr %g2, 0x0, %wim
121
122 /* Save the kernel window onto the corresponding stack. */
123 STORE_WINDOW(sp)
124
125 restore %g0, %g0, %g0
126
127 jmpl %t_retpc + 0x8, %g0 ! return to caller
128 mov %t_kstack, %sp ! and onto new kernel stack
129
130#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
131
132trap_setup_from_user:
133 /* We can't use %curptr yet. */
134 LOAD_CURRENT(t_kstack, t_twinmask)
135
136 sethi %hi(STACK_OFFSET), %t_twinmask
137 or %t_twinmask, %lo(STACK_OFFSET), %t_twinmask
138 add %t_kstack, %t_twinmask, %t_kstack
139
140 mov 1, %t_twinmask
141 sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
142
143 /* Build pt_regs frame. */
144 STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
145
146#if 0
147 /* If we're sure every task_struct is THREAD_SIZE aligned,
148 we can speed this up. */
149 sethi %hi(STACK_OFFSET), %curptr
150 or %curptr, %lo(STACK_OFFSET), %curptr
151 sub %t_kstack, %curptr, %curptr
152#else
153 sethi %hi(~(THREAD_SIZE - 1)), %curptr
154 and %t_kstack, %curptr, %curptr
155#endif
156
157 /* Clear current_thread_info->w_saved */
158 st %g0, [%curptr + TI_W_SAVED]
159
160 /* See if we are in the trap window. */
161 andcc %t_twinmask, %t_wim, %g0
162 bne trap_setup_user_spill ! yep we are
163 orn %g0, %t_twinmask, %g1 ! negate trap win mask into %g1
164
165 /* Trap from user, but not into the invalid window.
166 * Calculate new umask. The way this works is,
167 * any window from the %wim at trap time until
168 * the window right before the one we are in now,
169 * is a user window. A diagram:
170 *
171 * 7 6 5 4 3 2 1 0 window number
172 * ---------------
173 * I L T mnemonic
174 *
175 * Window 'I' is the invalid window in our example,
176 * window 'L' is the window the user was in when
177 * the trap occurred, window T is the trap window
178 * we are in now. So therefore, windows 5, 4 and
179 * 3 are user windows. The following sequence
180 * computes the user winmask to represent this.
181 */
182 subcc %t_wim, %t_twinmask, %g2
183 bneg,a 1f
184 sub %g2, 0x1, %g2
1851:
186 andn %g2, %t_twinmask, %g2
187tsetup_patch3:
188 and %g2, 0xff, %g2 ! patched on 7win Sparcs
189 st %g2, [%curptr + TI_UWINMASK] ! store new umask
190
191 jmpl %t_retpc + 0x8, %g0 ! return to caller
192 mov %t_kstack, %sp ! and onto kernel stack
193
194trap_setup_user_spill:
195 /* A spill occurred from either kernel or user mode
196 * and there exist some user windows to deal with.
197 * A mask of the currently valid user windows
198 * is in %g1 upon entry to here.
199 */
200
201tsetup_patch4:
202 and %g1, 0xff, %g1 ! patched on 7win Sparcs, mask
203 srl %t_wim, 0x1, %g2 ! compute new %wim
204tsetup_patch5:
205 sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs
206 or %t_wim, %g2, %g2 ! %g2 is new %wim
207tsetup_patch6:
208 and %g2, 0xff, %g2 ! patched on 7win Sparcs
209 andn %g1, %g2, %g1 ! clear this bit in %g1
210 st %g1, [%curptr + TI_UWINMASK]
211
212 save %g0, %g0, %g0
213
214 wr %g2, 0x0, %wim
215
216 /* Call MMU-architecture dependent stack checking
217 * routine.
218 */
219 .globl tsetup_mmu_patchme
220tsetup_mmu_patchme:
221 b tsetup_sun4c_stackchk
222 andcc %sp, 0x7, %g0
223
224 /* Architecture specific stack checking routines. When either
225 * of these routines are called, the globals are free to use
226 * as they have been safely stashed on the new kernel stack
227 * pointer. Thus the definition below for simplicity.
228 */
229#define glob_tmp g1
230
231 .globl tsetup_sun4c_stackchk
232tsetup_sun4c_stackchk:
233 /* Done by caller: andcc %sp, 0x7, %g0 */
234 bne trap_setup_user_stack_is_bolixed
235 sra %sp, 29, %glob_tmp
236
237 add %glob_tmp, 0x1, %glob_tmp
238 andncc %glob_tmp, 0x1, %g0
239 bne trap_setup_user_stack_is_bolixed
240 and %sp, 0xfff, %glob_tmp ! delay slot
241
242 /* See if our dump area will be on more than one
243 * page.
244 */
245 add %glob_tmp, 0x38, %glob_tmp
246 andncc %glob_tmp, 0xff8, %g0
247 be tsetup_sun4c_onepage ! only one page to check
248 lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways
249
250tsetup_sun4c_twopages:
251 /* Is first page ok permission wise? */
252 srl %glob_tmp, 29, %glob_tmp
253 cmp %glob_tmp, 0x6
254 bne trap_setup_user_stack_is_bolixed
255 add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */
256
257 sra %glob_tmp, 29, %glob_tmp
258 add %glob_tmp, 0x1, %glob_tmp
259 andncc %glob_tmp, 0x1, %g0
260 bne trap_setup_user_stack_is_bolixed
261 add %sp, 0x38, %glob_tmp
262
263 lda [%glob_tmp] ASI_PTE, %glob_tmp
264
265tsetup_sun4c_onepage:
266 srl %glob_tmp, 29, %glob_tmp
267 cmp %glob_tmp, 0x6 ! can user write to it?
268 bne trap_setup_user_stack_is_bolixed ! failure
269 nop
270
271 STORE_WINDOW(sp)
272
273 restore %g0, %g0, %g0
274
275 jmpl %t_retpc + 0x8, %g0
276 mov %t_kstack, %sp
277
278 .globl tsetup_srmmu_stackchk
279tsetup_srmmu_stackchk:
280 /* Check results of callers andcc %sp, 0x7, %g0 */
281 bne trap_setup_user_stack_is_bolixed
282 sethi %hi(PAGE_OFFSET), %glob_tmp
283
284 cmp %glob_tmp, %sp
285 bleu,a 1f
286 lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
287
288trap_setup_user_stack_is_bolixed:
289 /* From user/kernel into invalid window w/bad user
290 * stack. Save bad user stack, and return to caller.
291 */
292 SAVE_BOLIXED_USER_STACK(curptr, g3)
293 restore %g0, %g0, %g0
294
295 jmpl %t_retpc + 0x8, %g0
296 mov %t_kstack, %sp
297
2981:
299 /* Clear the fault status and turn on the no_fault bit. */
300 or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
301 sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
302
303 /* Dump the registers and cross fingers. */
304 STORE_WINDOW(sp)
305
306 /* Clear the no_fault bit and check the status. */
307 andn %glob_tmp, 0x2, %glob_tmp
308 sta %glob_tmp, [%g0] ASI_M_MMUREGS
309 mov AC_M_SFAR, %glob_tmp
310 lda [%glob_tmp] ASI_M_MMUREGS, %g0
311 mov AC_M_SFSR, %glob_tmp
312 lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp ! save away status of winstore
313 andcc %glob_tmp, 0x2, %g0 ! did we fault?
314 bne trap_setup_user_stack_is_bolixed ! failure
315 nop
316
317 restore %g0, %g0, %g0
318
319 jmpl %t_retpc + 0x8, %g0
320 mov %t_kstack, %sp
321
diff --git a/arch/sparc/kernel/head.S b/arch/sparc/kernel/head.S
new file mode 100644
index 000000000000..42d3de59d19b
--- /dev/null
+++ b/arch/sparc/kernel/head.S
@@ -0,0 +1,1326 @@
1/* $Id: head.S,v 1.105 2001/08/12 09:08:56 davem Exp $
2 * head.S: The initial boot code for the Sparc port of Linux.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,1999 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 Michael A. Griffith (grif@acm.org)
9 *
10 * CompactPCI platform by Eric Brower, 1999.
11 */
12
13#include <linux/version.h>
14#include <linux/config.h>
15#include <linux/init.h>
16
17#include <asm/head.h>
18#include <asm/asi.h>
19#include <asm/contregs.h>
20#include <asm/ptrace.h>
21#include <asm/psr.h>
22#include <asm/page.h>
23#include <asm/kdebug.h>
24#include <asm/winmacro.h>
25#include <asm/thread_info.h> /* TI_UWINMASK */
26#include <asm/errno.h>
27#include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */
28
29 .data
30/*
31 * The following are used with the prom_vector node-ops to figure out
32 * the cpu-type
33 */
34
35 .align 4
36 .globl cputyp
37cputyp:
38 .word 1
39
40 .align 4
41 .globl cputypval
42cputypval:
43 .asciz "sun4c"
44 .ascii " "
45
46cputypvalend:
47cputypvallen = cputypvar - cputypval
48
49 .align 4
50/*
51 * Sun people can't spell worth damn. "compatability" indeed.
52 * At least we *know* we can't spell, and use a spell-checker.
53 */
54
55/* Uh, actually Linus it is I who cannot spell. Too much murky
56 * Sparc assembly will do this to ya.
57 */
58cputypvar:
59 .asciz "compatability"
60
61/* Tested on SS-5, SS-10. Probably someone at Sun applied a spell-checker. */
62 .align 4
63cputypvar_sun4m:
64 .asciz "compatible"
65
66 .align 4
67
68#ifndef CONFIG_SUN4
69sun4_notsup:
70 .asciz "Sparc-Linux sun4 needs a specially compiled kernel, turn CONFIG_SUN4 on.\n\n"
71 .align 4
72#else
73sun4cdm_notsup:
74 .asciz "Kernel compiled with CONFIG_SUN4 cannot run on SUN4C/SUN4M/SUN4D\nTurn CONFIG_SUN4 off.\n\n"
75 .align 4
76#endif
77
78sun4e_notsup:
79 .asciz "Sparc-Linux sun4e support does not exist\n\n"
80 .align 4
81
82#ifndef CONFIG_SUNOS_EMUL
83#undef SUNOS_SYSCALL_TRAP
84#define SUNOS_SYSCALL_TRAP SUNOS_NO_SYSCALL_TRAP
85#endif
86
87 /* The Sparc trap table, bootloader gives us control at _start. */
88 .text
89 .globl start, _stext, _start, __stext
90 .globl trapbase
91_start: /* danger danger */
92__stext:
93_stext:
94start:
95trapbase:
96#ifdef CONFIG_SMP
97trapbase_cpu0:
98#endif
99/* We get control passed to us here at t_zero. */
100t_zero: b gokernel; nop; nop; nop;
101t_tflt: SPARC_TFAULT /* Inst. Access Exception */
102t_bins: TRAP_ENTRY(0x2, bad_instruction) /* Illegal Instruction */
103t_pins: TRAP_ENTRY(0x3, priv_instruction) /* Privileged Instruction */
104t_fpd: TRAP_ENTRY(0x4, fpd_trap_handler) /* Floating Point Disabled */
105t_wovf: WINDOW_SPILL /* Window Overflow */
106t_wunf: WINDOW_FILL /* Window Underflow */
107t_mna: TRAP_ENTRY(0x7, mna_handler) /* Memory Address Not Aligned */
108t_fpe: TRAP_ENTRY(0x8, fpe_trap_handler) /* Floating Point Exception */
109t_dflt: SPARC_DFAULT /* Data Miss Exception */
110t_tio: TRAP_ENTRY(0xa, do_tag_overflow) /* Tagged Instruction Ovrflw */
111t_wpt: TRAP_ENTRY(0xb, do_watchpoint) /* Watchpoint Detected */
112t_badc: BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
113t_irq1: TRAP_ENTRY_INTERRUPT(1) /* IRQ Software/SBUS Level 1 */
114t_irq2: TRAP_ENTRY_INTERRUPT(2) /* IRQ SBUS Level 2 */
115t_irq3: TRAP_ENTRY_INTERRUPT(3) /* IRQ SCSI/DMA/SBUS Level 3 */
116t_irq4: TRAP_ENTRY_INTERRUPT(4) /* IRQ Software Level 4 */
117t_irq5: TRAP_ENTRY_INTERRUPT(5) /* IRQ SBUS/Ethernet Level 5 */
118t_irq6: TRAP_ENTRY_INTERRUPT(6) /* IRQ Software Level 6 */
119t_irq7: TRAP_ENTRY_INTERRUPT(7) /* IRQ Video/SBUS Level 5 */
120t_irq8: TRAP_ENTRY_INTERRUPT(8) /* IRQ SBUS Level 6 */
121t_irq9: TRAP_ENTRY_INTERRUPT(9) /* IRQ SBUS Level 7 */
122t_irq10:TRAP_ENTRY_INTERRUPT(10) /* IRQ Timer #1 (one we use) */
123t_irq11:TRAP_ENTRY_INTERRUPT(11) /* IRQ Floppy Intr. */
124t_irq12:TRAP_ENTRY_INTERRUPT(12) /* IRQ Zilog serial chip */
125t_irq13:TRAP_ENTRY_INTERRUPT(13) /* IRQ Audio Intr. */
126t_irq14:TRAP_ENTRY_INTERRUPT(14) /* IRQ Timer #2 */
127 .globl t_nmi
128#ifndef CONFIG_SMP
129t_nmi: NMI_TRAP /* Level 15 (NMI) */
130#else
131t_nmi: TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
132#endif
133t_racc: TRAP_ENTRY(0x20, do_reg_access) /* General Register Access Error */
134t_iacce:BAD_TRAP(0x21) /* Instr Access Error */
135t_bad22:BAD_TRAP(0x22) BAD_TRAP(0x23)
136t_cpdis:TRAP_ENTRY(0x24, do_cp_disabled) /* Co-Processor Disabled */
137t_uflsh:SKIP_TRAP(0x25, unimp_flush) /* Unimplemented FLUSH inst. */
138t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27)
139t_cpexc:TRAP_ENTRY(0x28, do_cp_exception) /* Co-Processor Exception */
140t_dacce:SPARC_DFAULT /* Data Access Error */
141t_hwdz: TRAP_ENTRY(0x2a, do_hw_divzero) /* Division by zero, you lose... */
142t_dserr:BAD_TRAP(0x2b) /* Data Store Error */
143t_daccm:BAD_TRAP(0x2c) /* Data Access MMU-Miss */
144t_bad2d:BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
145t_bad32:BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
146t_bad37:BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
147t_iaccm:BAD_TRAP(0x3c) /* Instr Access MMU-Miss */
148t_bad3d:BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) BAD_TRAP(0x41)
149t_bad42:BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) BAD_TRAP(0x46)
150t_bad47:BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) BAD_TRAP(0x4b)
151t_bad4c:BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) BAD_TRAP(0x50)
152t_bad51:BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
153t_bad56:BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
154t_bad5b:BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
155t_bad60:BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
156t_bad65:BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
157t_bad6a:BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
158t_bad6f:BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
159t_bad74:BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
160t_bad79:BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
161t_bad7e:BAD_TRAP(0x7e) BAD_TRAP(0x7f)
162t_sunos:SUNOS_SYSCALL_TRAP /* SunOS System Call */
163t_sbkpt:BREAKPOINT_TRAP /* Software Breakpoint/KGDB */
164t_divz: TRAP_ENTRY(0x82, do_hw_divzero) /* Divide by zero trap */
165t_flwin:TRAP_ENTRY(0x83, do_flush_windows) /* Flush Windows Trap */
166t_clwin:BAD_TRAP(0x84) /* Clean Windows Trap */
167t_rchk: BAD_TRAP(0x85) /* Range Check */
168t_funal:BAD_TRAP(0x86) /* Fix Unaligned Access Trap */
169t_iovf: BAD_TRAP(0x87) /* Integer Overflow Trap */
170t_slowl:SOLARIS_SYSCALL_TRAP /* Slowaris System Call */
171t_netbs:NETBSD_SYSCALL_TRAP /* Net-B.S. System Call */
172t_bad8a:BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) BAD_TRAP(0x8d) BAD_TRAP(0x8e)
173t_bad8f:BAD_TRAP(0x8f)
174t_linux:LINUX_SYSCALL_TRAP /* Linux System Call */
175t_bad91:BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95)
176t_bad96:BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a)
177t_bad9b:BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) BAD_TRAP(0x9f)
178t_getcc:GETCC_TRAP /* Get Condition Codes */
179t_setcc:SETCC_TRAP /* Set Condition Codes */
180t_getpsr:GETPSR_TRAP /* Get PSR Register */
181t_bada3:BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
182t_slowi:INDIRECT_SOLARIS_SYSCALL(156)
183t_bada8:BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
184t_badac:BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
185t_badb1:BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
186t_badb6:BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
187t_badbb:BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
188t_badc0:BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
189t_badc5:BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
190t_badca:BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
191t_badcf:BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
192t_badd4:BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
193t_badd9:BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
194t_badde:BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
195t_bade3:BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
196t_bade8:BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
197t_baded:BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
198t_badf2:BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
199t_badf7:BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
200t_badfc:BAD_TRAP(0xfc) BAD_TRAP(0xfd)
201dbtrap: BAD_TRAP(0xfe) /* Debugger/PROM breakpoint #1 */
202dbtrap2:BAD_TRAP(0xff) /* Debugger/PROM breakpoint #2 */
203
204 .globl end_traptable
205end_traptable:
206
207#ifdef CONFIG_SMP
208 /* Trap tables for the other cpus. */
209 .globl trapbase_cpu1, trapbase_cpu2, trapbase_cpu3
210trapbase_cpu1:
211 BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
212 TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
213 WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
214 TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
215 TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
216 BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
217 TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
218 TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
219 TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
220 TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
221 TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
222 TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
223 TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
224 TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
225 TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
226 BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
227 BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
228 SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
229 BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
230 BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
231 BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
232 BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
233 BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
234 BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
235 BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
236 BAD_TRAP(0x50)
237 BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
238 BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
239 BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
240 BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
241 BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
242 BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
243 BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
244 BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
245 BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
246 BAD_TRAP(0x7e) BAD_TRAP(0x7f)
247 SUNOS_SYSCALL_TRAP
248 BREAKPOINT_TRAP
249 TRAP_ENTRY(0x82, do_hw_divzero)
250 TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
251 BAD_TRAP(0x86) BAD_TRAP(0x87) SOLARIS_SYSCALL_TRAP
252 NETBSD_SYSCALL_TRAP BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
253 BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
254 LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
255 BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
256 BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
257 BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP
258 BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
259 INDIRECT_SOLARIS_SYSCALL(156) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
260 BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
261 BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
262 BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
263 BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
264 BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
265 BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
266 BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
267 BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
268 BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
269 BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
270 BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
271 BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
272 BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
273 BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
274 BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
275 BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
276 BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
277
278trapbase_cpu2:
279 BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
280 TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
281 WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
282 TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
283 TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
284 BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
285 TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
286 TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
287 TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
288 TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
289 TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
290 TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
291 TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
292 TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
293 TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
294 BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
295 BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
296 SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
297 BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
298 BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
299 BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
300 BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
301 BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
302 BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
303 BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
304 BAD_TRAP(0x50)
305 BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
306 BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
307 BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
308 BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
309 BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
310 BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
311 BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
312 BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
313 BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
314 BAD_TRAP(0x7e) BAD_TRAP(0x7f)
315 SUNOS_SYSCALL_TRAP
316 BREAKPOINT_TRAP
317 TRAP_ENTRY(0x82, do_hw_divzero)
318 TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
319 BAD_TRAP(0x86) BAD_TRAP(0x87) SOLARIS_SYSCALL_TRAP
320 NETBSD_SYSCALL_TRAP BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
321 BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
322 LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
323 BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
324 BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
325 BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP
326 BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
327 INDIRECT_SOLARIS_SYSCALL(156) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
328 BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
329 BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
330 BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
331 BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
332 BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
333 BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
334 BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
335 BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
336 BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
337 BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
338 BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
339 BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
340 BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
341 BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
342 BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
343 BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
344 BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
345
346trapbase_cpu3:
347 BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
348 TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
349 WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
350 TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
351 TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
352 BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
353 TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
354 TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
355 TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
356 TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
357 TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
358 TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
359 TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
360 TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
361 TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
362 BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
363 BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
364 SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
365 BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
366 BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
367 BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
368 BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
369 BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
370 BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
371 BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
372 BAD_TRAP(0x50)
373 BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
374 BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
375 BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
376 BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
377 BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
378 BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
379 BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
380 BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
381 BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
382 BAD_TRAP(0x7e) BAD_TRAP(0x7f)
383 SUNOS_SYSCALL_TRAP
384 BREAKPOINT_TRAP
385 TRAP_ENTRY(0x82, do_hw_divzero)
386 TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
387 BAD_TRAP(0x86) BAD_TRAP(0x87) SOLARIS_SYSCALL_TRAP
388 NETBSD_SYSCALL_TRAP BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
389 BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
390 LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
391 BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
392 BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
393 BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP
394 BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
395 INDIRECT_SOLARIS_SYSCALL(156) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
396 BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
397 BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
398 BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
399 BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
400 BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
401 BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
402 BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
403 BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
404 BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
405 BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
406 BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
407 BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
408 BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
409 BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
410 BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
411 BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
412 BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
413
414#endif
415 .align PAGE_SIZE
416
417/* This was the only reasonable way I could think of to properly align
418 * these page-table data structures.
419 */
420 .globl pg0, pg1, pg2, pg3
421 .globl empty_bad_page
422 .globl empty_bad_page_table
423 .globl empty_zero_page
424 .globl swapper_pg_dir
425swapper_pg_dir: .skip PAGE_SIZE
426pg0: .skip PAGE_SIZE
427pg1: .skip PAGE_SIZE
428pg2: .skip PAGE_SIZE
429pg3: .skip PAGE_SIZE
430empty_bad_page: .skip PAGE_SIZE
431empty_bad_page_table: .skip PAGE_SIZE
432empty_zero_page: .skip PAGE_SIZE
433
434 .global root_flags
435 .global ram_flags
436 .global root_dev
437 .global sparc_ramdisk_image
438 .global sparc_ramdisk_size
439
440/* This stuff has to be in sync with SILO and other potential boot loaders
441 * Fields should be kept upward compatible and whenever any change is made,
442 * HdrS version should be incremented.
443 */
444 .ascii "HdrS"
445 .word LINUX_VERSION_CODE
446 .half 0x0203 /* HdrS version */
447root_flags:
448 .half 1
449root_dev:
450 .half 0
451ram_flags:
452 .half 0
453sparc_ramdisk_image:
454 .word 0
455sparc_ramdisk_size:
456 .word 0
457 .word reboot_command
458 .word 0, 0, 0
459 .word _end
460
461/* Cool, here we go. Pick up the romvec pointer in %o0 and stash it in
462 * %g7 and at prom_vector_p. And also quickly check whether we are on
463 * a v0, v2, or v3 prom.
464 */
465gokernel:
466 /* Ok, it's nice to know, as early as possible, if we
467 * are already mapped where we expect to be in virtual
468 * memory. The Solaris /boot elf format bootloader
469 * will peek into our elf header and load us where
470 * we want to be, otherwise we have to re-map.
471 *
472 * Some boot loaders don't place the jmp'rs address
473 * in %o7, so we do a pc-relative call to a local
474 * label, then see what %o7 has.
475 */
476
477 mov %o7, %g4 ! Save %o7
478
479 /* Jump to it, and pray... */
480current_pc:
481 call 1f
482 nop
483
4841:
485 mov %o7, %g3
486
487 tst %o0
488 be no_sun4u_here
489 mov %g4, %o7 /* Previous %o7. */
490
491 mov %o0, %l0 ! stash away romvec
492 mov %o0, %g7 ! put it here too
493 mov %o1, %l1 ! stash away debug_vec too
494
495 /* Ok, let's check out our run time program counter. */
496 set current_pc, %g5
497 cmp %g3, %g5
498 be already_mapped
499 nop
500
501 /* %l6 will hold the offset we have to subtract
502 * from absolute symbols in order to access areas
503 * in our own image. If already mapped this is
504 * just plain zero, else it is KERNBASE.
505 */
506 set KERNBASE, %l6
507 b copy_prom_lvl14
508 nop
509
510already_mapped:
511 mov 0, %l6
512
513 /* Copy over the Prom's level 14 clock handler. */
514copy_prom_lvl14:
515#if 1
516 /* DJHR
517 * preserve our linked/calculated instructions
518 */
519 set lvl14_save, %g1
520 set t_irq14, %g3
521 sub %g1, %l6, %g1 ! translate to physical
522 sub %g3, %l6, %g3 ! translate to physical
523 ldd [%g3], %g4
524 std %g4, [%g1]
525 ldd [%g3+8], %g4
526 std %g4, [%g1+8]
527#endif
528 rd %tbr, %g1
529 andn %g1, 0xfff, %g1 ! proms trap table base
530 or %g0, (0x1e<<4), %g2 ! offset to lvl14 intr
531 or %g1, %g2, %g2
532 set t_irq14, %g3
533 sub %g3, %l6, %g3
534 ldd [%g2], %g4
535 std %g4, [%g3]
536 ldd [%g2 + 0x8], %g4
537 std %g4, [%g3 + 0x8] ! Copy proms handler
538
539/* Must determine whether we are on a sun4c MMU, SRMMU, or SUN4/400 MUTANT
540 * MMU so we can remap ourselves properly. DON'T TOUCH %l0 thru %l5 in these
541 * remapping routines, we need their values afterwards!
542 */
543 /* Now check whether we are already mapped, if we
544 * are we can skip all this garbage coming up.
545 */
546copy_prom_done:
547 cmp %l6, 0
548 be go_to_highmem ! this will be a nop then
549 nop
550
551 set LOAD_ADDR, %g6
552 cmp %g7, %g6
553 bne remap_not_a_sun4 ! This is not a Sun4
554 nop
555
556 or %g0, 0x1, %g1
557 lduba [%g1] ASI_CONTROL, %g1 ! Only safe to try on Sun4.
558 subcc %g1, 0x24, %g0 ! Is this a mutant Sun4/400???
559 be sun4_mutant_remap ! Ugh, it is...
560 nop
561
562 b sun4_normal_remap ! regular sun4, 2 level mmu
563 nop
564
565remap_not_a_sun4:
566 lda [%g0] ASI_M_MMUREGS, %g1 ! same as ASI_PTE on sun4c
567 and %g1, 0x1, %g1 ! Test SRMMU Enable bit ;-)
568 cmp %g1, 0x0
569 be sun4c_remap ! A sun4c MMU or normal Sun4
570 nop
571srmmu_remap:
572 /* First, check for a viking (TI) module. */
573 set 0x40000000, %g2
574 rd %psr, %g3
575 and %g2, %g3, %g3
576 subcc %g3, 0x0, %g0
577 bz srmmu_nviking
578 nop
579
580 /* Figure out what kind of viking we are on.
581 * We need to know if we have to play with the
582 * AC bit and disable traps or not.
583 */
584
585 /* I've only seen MicroSparc's on SparcClassics with this
586 * bit set.
587 */
588 set 0x800, %g2
589 lda [%g0] ASI_M_MMUREGS, %g3 ! peek in the control reg
590 and %g2, %g3, %g3
591 subcc %g3, 0x0, %g0
592 bnz srmmu_nviking ! is in mbus mode
593 nop
594
595 rd %psr, %g3 ! DO NOT TOUCH %g3
596 andn %g3, PSR_ET, %g2
597 wr %g2, 0x0, %psr
598 WRITE_PAUSE
599
600 /* Get context table pointer, then convert to
601 * a physical address, which is 36 bits.
602 */
603 set AC_M_CTPR, %g4
604 lda [%g4] ASI_M_MMUREGS, %g4
605 sll %g4, 0x4, %g4 ! We use this below
606 ! DO NOT TOUCH %g4
607
608 /* Set the AC bit in the Viking's MMU control reg. */
609 lda [%g0] ASI_M_MMUREGS, %g5 ! DO NOT TOUCH %g5
610 set 0x8000, %g6 ! AC bit mask
611 or %g5, %g6, %g6 ! Or it in...
612 sta %g6, [%g0] ASI_M_MMUREGS ! Close your eyes...
613
614 /* Grrr, why does it seem like every other load/store
615 * on the sun4m is in some ASI space...
616 * Fine with me, let's get the pointer to the level 1
617 * page table directory and fetch its entry.
618 */
619 lda [%g4] ASI_M_BYPASS, %o1 ! This is a level 1 ptr
620 srl %o1, 0x4, %o1 ! Clear low 4 bits
621 sll %o1, 0x8, %o1 ! Make physical
622
623 /* Ok, pull in the PTD. */
624 lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd
625
626 /* Calculate to KERNBASE entry. */
627 add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
628
629 /* Poke the entry into the calculated address. */
630 sta %o2, [%o3] ASI_M_BYPASS
631
632 /* I don't get it Sun, if you engineered all these
633 * boot loaders and the PROM (thank you for the debugging
634 * features btw) why did you not have them load kernel
635 * images up in high address space, since this is necessary
636 * for ABI compliance anyways? Does this low-mapping provide
637 * enhanced interoperability?
638 *
639 * "The PROM is the computer."
640 */
641
642 /* Ok, restore the MMU control register we saved in %g5 */
643 sta %g5, [%g0] ASI_M_MMUREGS ! POW... ouch
644
645 /* Turn traps back on. We saved it in %g3 earlier. */
646 wr %g3, 0x0, %psr ! tick tock, tick tock
647
648 /* Now we burn precious CPU cycles due to bad engineering. */
649 WRITE_PAUSE
650
651 /* Wow, all that just to move a 32-bit value from one
652 * place to another... Jump to high memory.
653 */
654 b go_to_highmem
655 nop
656
657 /* This works on viking's in Mbus mode and all
658 * other MBUS modules. It is virtually the same as
659 * the above madness sans turning traps off and flipping
660 * the AC bit.
661 */
662srmmu_nviking:
663 set AC_M_CTPR, %g1
664 lda [%g1] ASI_M_MMUREGS, %g1 ! get ctx table ptr
665 sll %g1, 0x4, %g1 ! make physical addr
666 lda [%g1] ASI_M_BYPASS, %g1 ! ptr to level 1 pg_table
667 srl %g1, 0x4, %g1
668 sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
669
670 lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
671 add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
672 sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
673 b go_to_highmem
674 nop ! wheee....
675
676 /* This remaps the kernel on Sun4/4xx machines
677 * that have the Sun Mutant Three Level MMU.
678 * It's like a platypus, Sun didn't have the
679 * SRMMU in conception so they kludged the three
680 * level logic in the regular Sun4 MMU probably.
681 *
682 * Basically, you take each entry in the top level
683 * directory that maps the low 3MB starting at
684 * address zero and put the mapping in the KERNBASE
685 * slots. These top level pgd's are called regmaps.
686 */
687sun4_mutant_remap:
688 or %g0, %g0, %g3 ! source base
689 sethi %hi(KERNBASE), %g4 ! destination base
690 or %g4, %lo(KERNBASE), %g4
691 sethi %hi(0x300000), %g5
692 or %g5, %lo(0x300000), %g5 ! upper bound 3MB
693 or %g0, 0x1, %l6
694 sll %l6, 24, %l6 ! Regmap mapping size
695 add %g3, 0x2, %g3 ! Base magic
696 add %g4, 0x2, %g4 ! Base magic
697
698 /* Main remapping loop on Sun4-Mutant-MMU.
699 * "I am not an animal..." -Famous Mutant Person
700 */
701sun4_mutant_loop:
702 lduha [%g3] ASI_REGMAP, %g2 ! Get lower entry
703 stha %g2, [%g4] ASI_REGMAP ! Store in high entry
704 add %g4, %l6, %g4 ! Move up high memory ptr
705 subcc %g3, %g5, %g0 ! Reached our limit?
706 blu sun4_mutant_loop ! Nope, loop again
707 add %g3, %l6, %g3 ! delay, Move up low ptr
708 b go_to_highmem ! Jump to high memory.
709 nop
710
711 /* The following is for non-4/4xx sun4 MMU's. */
712sun4_normal_remap:
713 mov 0, %g3 ! source base
714 set KERNBASE, %g4 ! destination base
715 set 0x300000, %g5 ! upper bound 3MB
716 mov 1, %l6
717 sll %l6, 18, %l6 ! sun4 mmu segmap size
718sun4_normal_loop:
719 lduha [%g3] ASI_SEGMAP, %g6 ! load phys_seg
720 stha %g6, [%g4] ASI_SEGMAP ! stort new virt mapping
721 add %g3, %l6, %g3 ! increment source pointer
722 subcc %g3, %g5, %g0 ! reached limit?
723 blu sun4_normal_loop ! nope, loop again
724 add %g4, %l6, %g4 ! delay, increment dest ptr
725 b go_to_highmem
726 nop
727
728 /* The following works for Sun4c MMU's */
729sun4c_remap:
730 mov 0, %g3 ! source base
731 set KERNBASE, %g4 ! destination base
732 set 0x300000, %g5 ! upper bound 3MB
733 mov 1, %l6
734 sll %l6, 18, %l6 ! sun4c mmu segmap size
735sun4c_remap_loop:
736 lda [%g3] ASI_SEGMAP, %g6 ! load phys_seg
737 sta %g6, [%g4] ASI_SEGMAP ! store new virt mapping
738 add %g3, %l6, %g3 ! Increment source ptr
739 subcc %g3, %g5, %g0 ! Reached limit?
740 bl sun4c_remap_loop ! Nope, loop again
741 add %g4, %l6, %g4 ! delay, Increment dest ptr
742
743/* Now do a non-relative jump so that PC is in high-memory */
744go_to_highmem:
745 set execute_in_high_mem, %g1
746 jmpl %g1, %g0
747 nop
748
749/* The code above should be at beginning and we have to take care about
750 * short jumps, as branching to .text.init section from .text is usually
751 * impossible */
752 __INIT
753/* Acquire boot time privileged register values, this will help debugging.
754 * I figure out and store nwindows and nwindowsm1 later on.
755 */
756execute_in_high_mem:
757 mov %l0, %o0 ! put back romvec
758 mov %l1, %o1 ! and debug_vec
759
760 sethi %hi(prom_vector_p), %g1
761 st %o0, [%g1 + %lo(prom_vector_p)]
762
763 sethi %hi(linux_dbvec), %g1
764 st %o1, [%g1 + %lo(linux_dbvec)]
765
766 ld [%o0 + 0x4], %o3
767 and %o3, 0x3, %o5 ! get the version
768
769 cmp %o3, 0x2 ! a v2 prom?
770 be found_version
771 nop
772
773 /* paul@sfe.com.au */
774 cmp %o3, 0x3 ! a v3 prom?
775 be found_version
776 nop
777
778/* Old sun4's pass our load address into %o0 instead of the prom
779 * pointer. On sun4's you have to hard code the romvec pointer into
780 * your code. Sun probably still does that because they don't even
781 * trust their own "OpenBoot" specifications.
782 */
783 set LOAD_ADDR, %g6
784 cmp %o0, %g6 ! an old sun4?
785 be sun4_init
786 nop
787
788found_version:
789#ifdef CONFIG_SUN4
790/* For people who try sun4 kernels, even if Configure.help advises them. */
791 ld [%g7 + 0x68], %o1
792 set sun4cdm_notsup, %o0
793 call %o1
794 nop
795 b halt_me
796 nop
797#endif
798/* Get the machine type via the mysterious romvec node operations. */
799
800 add %g7, 0x1c, %l1
801 ld [%l1], %l0
802 ld [%l0], %l0
803 call %l0
804 or %g0, %g0, %o0 ! next_node(0) = first_node
805 or %o0, %g0, %g6
806
807 sethi %hi(cputypvar), %o1 ! First node has cpu-arch
808 or %o1, %lo(cputypvar), %o1
809 sethi %hi(cputypval), %o2 ! information, the string
810 or %o2, %lo(cputypval), %o2
811 ld [%l1], %l0 ! 'compatibility' tells
812 ld [%l0 + 0xc], %l0 ! that we want 'sun4x' where
813 call %l0 ! x is one of '', 'c', 'm',
814 nop ! 'd' or 'e'. %o2 holds pointer
815 ! to a buf where above string
816 ! will get stored by the prom.
817
818 subcc %o0, %g0, %g0
819 bpos got_prop ! Got the property
820 nop
821
822 or %g6, %g0, %o0
823 sethi %hi(cputypvar_sun4m), %o1
824 or %o1, %lo(cputypvar_sun4m), %o1
825 sethi %hi(cputypval), %o2
826 or %o2, %lo(cputypval), %o2
827 ld [%l1], %l0
828 ld [%l0 + 0xc], %l0
829 call %l0
830 nop
831
832got_prop:
833 set cputypval, %o2
834 ldub [%o2 + 0x4], %l1
835
836 cmp %l1, ' '
837 be 1f
838 cmp %l1, 'c'
839 be 1f
840 cmp %l1, 'm'
841 be 1f
842 cmp %l1, 's'
843 be 1f
844 cmp %l1, 'd'
845 be 1f
846 cmp %l1, 'e'
847 be no_sun4e_here ! Could be a sun4e.
848 nop
849 b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
850 nop
851
8521: set cputypval, %l1
853 ldub [%l1 + 0x4], %l1
854 cmp %l1, 'm' ! Test for sun4d, sun4e ?
855 be sun4m_init
856 cmp %l1, 's' ! Treat sun4s as sun4m
857 be sun4m_init
858 cmp %l1, 'd' ! Let us see how the beast will die
859 be sun4d_init
860 nop
861
862 /* Jump into mmu context zero. */
863 set AC_CONTEXT, %g1
864 stba %g0, [%g1] ASI_CONTROL
865
866 b sun4c_continue_boot
867 nop
868
869/* CPUID in bootbus can be found at PA 0xff0140000 */
870#define SUN4D_BOOTBUS_CPUID 0xf0140000
871
872sun4d_init:
873 /* Need to patch call to handler_irq */
874 set patch_handler_irq, %g4
875 set sun4d_handler_irq, %g5
876 sethi %hi(0x40000000), %g3 ! call
877 sub %g5, %g4, %g5
878 srl %g5, 2, %g5
879 or %g5, %g3, %g5
880 st %g5, [%g4]
881
882#ifdef CONFIG_SMP
883 /* Get our CPU id out of bootbus */
884 set SUN4D_BOOTBUS_CPUID, %g3
885 lduba [%g3] ASI_M_CTL, %g3
886 and %g3, 0xf8, %g3
887 srl %g3, 3, %g4
888 sta %g4, [%g0] ASI_M_VIKING_TMP1
889 sethi %hi(boot_cpu_id), %g5
890 stb %g4, [%g5 + %lo(boot_cpu_id)]
891 sll %g4, 2, %g4
892 sethi %hi(boot_cpu_id4), %g5
893 stb %g4, [%g5 + %lo(boot_cpu_id4)]
894#endif
895
896 /* Fall through to sun4m_init */
897
898sun4m_init:
899 /* XXX Fucking Cypress... */
900 lda [%g0] ASI_M_MMUREGS, %g5
901 srl %g5, 28, %g4
902
903 cmp %g4, 1
904 bne 1f
905 srl %g5, 24, %g4
906
907 and %g4, 0xf, %g4
908 cmp %g4, 7 /* This would be a HyperSparc. */
909
910 bne 2f
911 nop
912
9131:
914
915#define PATCH_IT(dst, src) \
916 set (dst), %g5; \
917 set (src), %g4; \
918 ld [%g4], %g3; \
919 st %g3, [%g5]; \
920 ld [%g4+0x4], %g3; \
921 st %g3, [%g5+0x4];
922
923 /* Signed multiply. */
924 PATCH_IT(.mul, .mul_patch)
925 PATCH_IT(.mul+0x08, .mul_patch+0x08)
926
927 /* Signed remainder. */
928 PATCH_IT(.rem, .rem_patch)
929 PATCH_IT(.rem+0x08, .rem_patch+0x08)
930 PATCH_IT(.rem+0x10, .rem_patch+0x10)
931 PATCH_IT(.rem+0x18, .rem_patch+0x18)
932 PATCH_IT(.rem+0x20, .rem_patch+0x20)
933 PATCH_IT(.rem+0x28, .rem_patch+0x28)
934
935 /* Signed division. */
936 PATCH_IT(.div, .div_patch)
937 PATCH_IT(.div+0x08, .div_patch+0x08)
938 PATCH_IT(.div+0x10, .div_patch+0x10)
939 PATCH_IT(.div+0x18, .div_patch+0x18)
940 PATCH_IT(.div+0x20, .div_patch+0x20)
941
942 /* Unsigned multiply. */
943 PATCH_IT(.umul, .umul_patch)
944 PATCH_IT(.umul+0x08, .umul_patch+0x08)
945
946 /* Unsigned remainder. */
947 PATCH_IT(.urem, .urem_patch)
948 PATCH_IT(.urem+0x08, .urem_patch+0x08)
949 PATCH_IT(.urem+0x10, .urem_patch+0x10)
950 PATCH_IT(.urem+0x18, .urem_patch+0x18)
951
952 /* Unsigned division. */
953 PATCH_IT(.udiv, .udiv_patch)
954 PATCH_IT(.udiv+0x08, .udiv_patch+0x08)
955 PATCH_IT(.udiv+0x10, .udiv_patch+0x10)
956
957#undef PATCH_IT
958
959/* Ok, the PROM could have done funny things and apple cider could still
960 * be sitting in the fault status/address registers. Read them all to
961 * clear them so we don't get magic faults later on.
962 */
963/* This sucks, apparently this makes Vikings call prom panic, will fix later */
9642:
965 rd %psr, %o1
966 srl %o1, 28, %o1 ! Get a type of the CPU
967
968 subcc %o1, 4, %g0 ! TI: Viking or MicroSPARC
969 be sun4c_continue_boot
970 nop
971
972 set AC_M_SFSR, %o0
973 lda [%o0] ASI_M_MMUREGS, %g0
974 set AC_M_SFAR, %o0
975 lda [%o0] ASI_M_MMUREGS, %g0
976
977 /* Fujitsu MicroSPARC-II has no asynchronous flavors of FARs */
978 subcc %o1, 0, %g0
979 be sun4c_continue_boot
980 nop
981
982 set AC_M_AFSR, %o0
983 lda [%o0] ASI_M_MMUREGS, %g0
984 set AC_M_AFAR, %o0
985 lda [%o0] ASI_M_MMUREGS, %g0
986 nop
987
988
989sun4c_continue_boot:
990
991
992/* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
993 * show-time!
994 */
995
996 sethi %hi(cputyp), %o0
997 st %g4, [%o0 + %lo(cputyp)]
998
999 /* Turn on Supervisor, EnableFloating, and all the PIL bits.
1000 * Also puts us in register window zero with traps off.
1001 */
1002 set (PSR_PS | PSR_S | PSR_PIL | PSR_EF), %g2
1003 wr %g2, 0x0, %psr
1004 WRITE_PAUSE
1005
1006 /* I want a kernel stack NOW! */
1007 set init_thread_union, %g1
1008 set (THREAD_SIZE - STACKFRAME_SZ), %g2
1009 add %g1, %g2, %sp
1010 mov 0, %fp /* And for good luck */
1011
1012 /* Zero out our BSS section. */
1013 set __bss_start , %o0 ! First address of BSS
1014 set end , %o1 ! Last address of BSS
1015 add %o0, 0x1, %o0
10161:
1017 stb %g0, [%o0]
1018 subcc %o0, %o1, %g0
1019 bl 1b
1020 add %o0, 0x1, %o0
1021
1022 /* Initialize the uwinmask value for init task just in case.
1023 * But first make current_set[boot_cpu_id] point to something useful.
1024 */
1025 set init_thread_union, %g6
1026 set current_set, %g2
1027#ifdef CONFIG_SMP
1028 sethi %hi(boot_cpu_id4), %g3
1029 ldub [%g3 + %lo(boot_cpu_id4)], %g3
1030 st %g6, [%g2]
1031 add %g2, %g3, %g2
1032#endif
1033 st %g6, [%g2]
1034
1035 st %g0, [%g6 + TI_UWINMASK]
1036
1037/* Compute NWINDOWS and stash it away. Now uses %wim trick explained
1038 * in the V8 manual. Ok, this method seems to work, Sparc is cool...
1039 * No, it doesn't work, have to play the save/readCWP/restore trick.
1040 */
1041
1042 wr %g0, 0x0, %wim ! so we do not get a trap
1043 WRITE_PAUSE
1044
1045 save
1046
1047 rd %psr, %g3
1048
1049 restore
1050
1051 and %g3, 0x1f, %g3
1052 add %g3, 0x1, %g3
1053
1054 mov 2, %g1
1055 wr %g1, 0x0, %wim ! make window 1 invalid
1056 WRITE_PAUSE
1057
1058 cmp %g3, 0x7
1059 bne 2f
1060 nop
1061
1062 /* Adjust our window handling routines to
1063 * do things correctly on 7 window Sparcs.
1064 */
1065
1066#define PATCH_INSN(src, dest) \
1067 set src, %g5; \
1068 set dest, %g2; \
1069 ld [%g5], %g4; \
1070 st %g4, [%g2];
1071
1072 /* Patch for window spills... */
1073 PATCH_INSN(spnwin_patch1_7win, spnwin_patch1)
1074 PATCH_INSN(spnwin_patch2_7win, spnwin_patch2)
1075 PATCH_INSN(spnwin_patch3_7win, spnwin_patch3)
1076
1077 /* Patch for window fills... */
1078 PATCH_INSN(fnwin_patch1_7win, fnwin_patch1)
1079 PATCH_INSN(fnwin_patch2_7win, fnwin_patch2)
1080
1081 /* Patch for trap entry setup... */
1082 PATCH_INSN(tsetup_7win_patch1, tsetup_patch1)
1083 PATCH_INSN(tsetup_7win_patch2, tsetup_patch2)
1084 PATCH_INSN(tsetup_7win_patch3, tsetup_patch3)
1085 PATCH_INSN(tsetup_7win_patch4, tsetup_patch4)
1086 PATCH_INSN(tsetup_7win_patch5, tsetup_patch5)
1087 PATCH_INSN(tsetup_7win_patch6, tsetup_patch6)
1088
1089 /* Patch for returning from traps... */
1090 PATCH_INSN(rtrap_7win_patch1, rtrap_patch1)
1091 PATCH_INSN(rtrap_7win_patch2, rtrap_patch2)
1092 PATCH_INSN(rtrap_7win_patch3, rtrap_patch3)
1093 PATCH_INSN(rtrap_7win_patch4, rtrap_patch4)
1094 PATCH_INSN(rtrap_7win_patch5, rtrap_patch5)
1095
1096 /* Patch for killing user windows from the register file. */
1097 PATCH_INSN(kuw_patch1_7win, kuw_patch1)
1098
1099 /* Now patch the kernel window flush sequences.
1100 * This saves 2 traps on every switch and fork.
1101 */
1102 set 0x01000000, %g4
1103 set flush_patch_one, %g5
1104 st %g4, [%g5 + 0x18]
1105 st %g4, [%g5 + 0x1c]
1106 set flush_patch_two, %g5
1107 st %g4, [%g5 + 0x18]
1108 st %g4, [%g5 + 0x1c]
1109 set flush_patch_three, %g5
1110 st %g4, [%g5 + 0x18]
1111 st %g4, [%g5 + 0x1c]
1112 set flush_patch_four, %g5
1113 st %g4, [%g5 + 0x18]
1114 st %g4, [%g5 + 0x1c]
1115 set flush_patch_exception, %g5
1116 st %g4, [%g5 + 0x18]
1117 st %g4, [%g5 + 0x1c]
1118 set flush_patch_switch, %g5
1119 st %g4, [%g5 + 0x18]
1120 st %g4, [%g5 + 0x1c]
1121
11222:
1123 sethi %hi(nwindows), %g4
1124 st %g3, [%g4 + %lo(nwindows)] ! store final value
1125 sub %g3, 0x1, %g3
1126 sethi %hi(nwindowsm1), %g4
1127 st %g3, [%g4 + %lo(nwindowsm1)]
1128
1129 /* Here we go, start using Linux's trap table... */
1130 set trapbase, %g3
1131 wr %g3, 0x0, %tbr
1132 WRITE_PAUSE
1133
1134 /* Finally, turn on traps so that we can call c-code. */
1135 rd %psr, %g3
1136 wr %g3, 0x0, %psr
1137 WRITE_PAUSE
1138
1139 wr %g3, PSR_ET, %psr
1140 WRITE_PAUSE
1141
1142 /* First we call prom_init() to set up PROMLIB, then
1143 * off to start_kernel().
1144 */
1145
1146 sethi %hi(prom_vector_p), %g5
1147 ld [%g5 + %lo(prom_vector_p)], %o0
1148 call prom_init
1149 nop
1150
1151 call start_kernel
1152 nop
1153
1154 /* We should not get here. */
1155 call halt_me
1156 nop
1157
1158sun4_init:
1159#ifdef CONFIG_SUN4
1160/* There, happy now Adrian? */
1161 set cputypval, %o2 ! Let everyone know we
1162 set ' ', %o0 ! are a "sun4 " architecture
1163 stb %o0, [%o2 + 0x4]
1164
1165 b got_prop
1166 nop
1167#else
1168 sethi %hi(SUN4_PROM_VECTOR+0x84), %o1
1169 ld [%o1 + %lo(SUN4_PROM_VECTOR+0x84)], %o1
1170 set sun4_notsup, %o0
1171 call %o1 /* printf */
1172 nop
1173 sethi %hi(SUN4_PROM_VECTOR+0xc4), %o1
1174 ld [%o1 + %lo(SUN4_PROM_VECTOR+0xc4)], %o1
1175 call %o1 /* exittomon */
1176 nop
11771: ba 1b ! Cannot exit into KMON
1178 nop
1179#endif
1180no_sun4e_here:
1181 ld [%g7 + 0x68], %o1
1182 set sun4e_notsup, %o0
1183 call %o1
1184 nop
1185 b halt_me
1186 nop
1187
1188 __INITDATA
1189
1190sun4u_1:
1191 .asciz "finddevice"
1192 .align 4
1193sun4u_2:
1194 .asciz "/chosen"
1195 .align 4
1196sun4u_3:
1197 .asciz "getprop"
1198 .align 4
1199sun4u_4:
1200 .asciz "stdout"
1201 .align 4
1202sun4u_5:
1203 .asciz "write"
1204 .align 4
1205sun4u_6:
1206 .asciz "\n\rOn sun4u you have to use UltraLinux (64bit) kernel\n\rand not a 32bit sun4[cdem] version\n\r\n\r"
1207sun4u_6e:
1208 .align 4
1209sun4u_7:
1210 .asciz "exit"
1211 .align 8
1212sun4u_a1:
1213 .word 0, sun4u_1, 0, 1, 0, 1, 0, sun4u_2, 0
1214sun4u_r1:
1215 .word 0
1216sun4u_a2:
1217 .word 0, sun4u_3, 0, 4, 0, 1, 0
1218sun4u_i2:
1219 .word 0, 0, sun4u_4, 0, sun4u_1, 0, 8, 0
1220sun4u_r2:
1221 .word 0
1222sun4u_a3:
1223 .word 0, sun4u_5, 0, 3, 0, 1, 0
1224sun4u_i3:
1225 .word 0, 0, sun4u_6, 0, sun4u_6e - sun4u_6 - 1, 0
1226sun4u_r3:
1227 .word 0
1228sun4u_a4:
1229 .word 0, sun4u_7, 0, 0, 0, 0
1230sun4u_r4:
1231
1232 __INIT
1233no_sun4u_here:
1234 set sun4u_a1, %o0
1235 set current_pc, %l2
1236 cmp %l2, %g3
1237 be 1f
1238 mov %o4, %l0
1239 sub %g3, %l2, %l6
1240 add %o0, %l6, %o0
1241 mov %o0, %l4
1242 mov sun4u_r4 - sun4u_a1, %l3
1243 ld [%l4], %l5
12442:
1245 add %l4, 4, %l4
1246 cmp %l5, %l2
1247 add %l5, %l6, %l5
1248 bgeu,a 3f
1249 st %l5, [%l4 - 4]
12503:
1251 subcc %l3, 4, %l3
1252 bne 2b
1253 ld [%l4], %l5
12541:
1255 call %l0
1256 mov %o0, %l1
1257
1258 ld [%l1 + (sun4u_r1 - sun4u_a1)], %o1
1259 add %l1, (sun4u_a2 - sun4u_a1), %o0
1260 call %l0
1261 st %o1, [%o0 + (sun4u_i2 - sun4u_a2)]
1262
1263 ld [%l1 + (sun4u_1 - sun4u_a1)], %o1
1264 add %l1, (sun4u_a3 - sun4u_a1), %o0
1265 call %l0
1266 st %o1, [%o0 + (sun4u_i3 - sun4u_a3)]
1267
1268 call %l0
1269 add %l1, (sun4u_a4 - sun4u_a1), %o0
1270
1271 /* Not reached */
1272halt_me:
1273 ld [%g7 + 0x74], %o0
1274 call %o0 ! Get us out of here...
1275 nop ! Apparently Solaris is better.
1276
1277/* Ok, now we continue in the .data/.text sections */
1278
1279 .data
1280 .align 4
1281
1282/*
1283 * Fill up the prom vector, note in particular the kind first element,
1284 * no joke. I don't need all of them in here as the entire prom vector
1285 * gets initialized in c-code so all routines can use it.
1286 */
1287
1288 .globl prom_vector_p
1289prom_vector_p:
1290 .word 0
1291
1292/* We calculate the following at boot time, window fills/spills and trap entry
1293 * code uses these to keep track of the register windows.
1294 */
1295
1296 .align 4
1297 .globl nwindows
1298 .globl nwindowsm1
1299nwindows:
1300 .word 8
1301nwindowsm1:
1302 .word 7
1303
1304/* Boot time debugger vector value. We need this later on. */
1305
1306 .align 4
1307 .globl linux_dbvec
1308linux_dbvec:
1309 .word 0
1310 .word 0
1311
1312 .align 8
1313
1314 .globl lvl14_save
1315lvl14_save:
1316 .word 0
1317 .word 0
1318 .word 0
1319 .word 0
1320 .word t_irq14
1321
1322 .section ".fixup",#alloc,#execinstr
1323 .globl __ret_efault
1324__ret_efault:
1325 ret
1326 restore %g0, -EFAULT, %o0
diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c
new file mode 100644
index 000000000000..2e1b0f6e99d4
--- /dev/null
+++ b/arch/sparc/kernel/idprom.c
@@ -0,0 +1,108 @@
1/* $Id: idprom.c,v 1.24 1999/08/31 06:54:20 davem Exp $
2 * idprom.c: Routines to load the idprom into kernel addresses and
3 * interpret the data contained within.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/init.h>
12
13#include <asm/oplib.h>
14#include <asm/idprom.h>
15#include <asm/machines.h> /* Fun with Sun released architectures. */
16#ifdef CONFIG_SUN4
17#include <asm/sun4paddr.h>
18extern void sun4setup(void);
19#endif
20
21struct idprom *idprom;
22static struct idprom idprom_buffer;
23
24/* Here is the master table of Sun machines which use some implementation
25 * of the Sparc CPU and have a meaningful IDPROM machtype value that we
26 * know about. See asm-sparc/machines.h for empirical constants.
27 */
28struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
29/* First, Sun4's */
30{ "Sun 4/100 Series", (SM_SUN4 | SM_4_110) },
31{ "Sun 4/200 Series", (SM_SUN4 | SM_4_260) },
32{ "Sun 4/300 Series", (SM_SUN4 | SM_4_330) },
33{ "Sun 4/400 Series", (SM_SUN4 | SM_4_470) },
34/* Now, Sun4c's */
35{ "Sun4c SparcStation 1", (SM_SUN4C | SM_4C_SS1) },
36{ "Sun4c SparcStation IPC", (SM_SUN4C | SM_4C_IPC) },
37{ "Sun4c SparcStation 1+", (SM_SUN4C | SM_4C_SS1PLUS) },
38{ "Sun4c SparcStation SLC", (SM_SUN4C | SM_4C_SLC) },
39{ "Sun4c SparcStation 2", (SM_SUN4C | SM_4C_SS2) },
40{ "Sun4c SparcStation ELC", (SM_SUN4C | SM_4C_ELC) },
41{ "Sun4c SparcStation IPX", (SM_SUN4C | SM_4C_IPX) },
42/* Finally, early Sun4m's */
43{ "Sun4m SparcSystem600", (SM_SUN4M | SM_4M_SS60) },
44{ "Sun4m SparcStation10/20", (SM_SUN4M | SM_4M_SS50) },
45{ "Sun4m SparcStation5", (SM_SUN4M | SM_4M_SS40) },
46/* One entry for the OBP arch's which are sun4d, sun4e, and newer sun4m's */
47{ "Sun4M OBP based system", (SM_SUN4M_OBP | 0x0) } };
48
49static void __init display_system_type(unsigned char machtype)
50{
51 char sysname[128];
52 register int i;
53
54 for (i = 0; i < NUM_SUN_MACHINES; i++) {
55 if(Sun_Machines[i].id_machtype == machtype) {
56 if (machtype != (SM_SUN4M_OBP | 0x00) ||
57 prom_getproperty(prom_root_node, "banner-name",
58 sysname, sizeof(sysname)) <= 0)
59 printk("TYPE: %s\n", Sun_Machines[i].name);
60 else
61 printk("TYPE: %s\n", sysname);
62 return;
63 }
64 }
65
66 prom_printf("IDPROM: Bogus id_machtype value, 0x%x\n", machtype);
67 prom_halt();
68}
69
70/* Calculate the IDPROM checksum (xor of the data bytes). */
71static unsigned char __init calc_idprom_cksum(struct idprom *idprom)
72{
73 unsigned char cksum, i, *ptr = (unsigned char *)idprom;
74
75 for (i = cksum = 0; i <= 0x0E; i++)
76 cksum ^= *ptr++;
77
78 return cksum;
79}
80
81/* Create a local IDPROM copy, verify integrity, and display information. */
82void __init idprom_init(void)
83{
84 prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer));
85
86 idprom = &idprom_buffer;
87
88 if (idprom->id_format != 0x01) {
89 prom_printf("IDPROM: Unknown format type!\n");
90 prom_halt();
91 }
92
93 if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
94 prom_printf("IDPROM: Checksum failure (nvram=%x, calc=%x)!\n",
95 idprom->id_cksum, calc_idprom_cksum(idprom));
96 prom_halt();
97 }
98
99 display_system_type(idprom->id_machtype);
100
101 printk("Ethernet address: %x:%x:%x:%x:%x:%x\n",
102 idprom->id_ethaddr[0], idprom->id_ethaddr[1],
103 idprom->id_ethaddr[2], idprom->id_ethaddr[3],
104 idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
105#ifdef CONFIG_SUN4
106 sun4setup();
107#endif
108}
diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c
new file mode 100644
index 000000000000..fc31de66b1c2
--- /dev/null
+++ b/arch/sparc/kernel/init_task.c
@@ -0,0 +1,28 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/sched.h>
4#include <linux/init_task.h>
5#include <linux/mqueue.h>
6
7#include <asm/pgtable.h>
8#include <asm/uaccess.h>
9
10static struct fs_struct init_fs = INIT_FS;
11static struct files_struct init_files = INIT_FILES;
12static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
13static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
14struct mm_struct init_mm = INIT_MM(init_mm);
15struct task_struct init_task = INIT_TASK(init_task);
16
17EXPORT_SYMBOL(init_mm);
18EXPORT_SYMBOL(init_task);
19
20/* .text section in head.S is aligned at 8k boundary and this gets linked
21 * right after that so that the init_thread_union is aligned properly as well.
22 * If this is not aligned on a 8k boundry, then you should change code
23 * in etrap.S which assumes it.
24 */
25union thread_union init_thread_union
26 __attribute__((section (".text\"\n\t#")))
27 __attribute__((aligned (THREAD_SIZE)))
28 = { INIT_THREAD_INFO(init_task) };
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
new file mode 100644
index 000000000000..d0f2bd227c4c
--- /dev/null
+++ b/arch/sparc/kernel/ioport.c
@@ -0,0 +1,731 @@
1/* $Id: ioport.c,v 1.45 2001/10/30 04:54:21 davem Exp $
2 * ioport.c: Simple io mapping allocator.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 *
7 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
8 *
9 * 2000/01/29
10 * <rth> zait: as long as pci_alloc_consistent produces something addressable,
11 * things are ok.
12 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
13 * pointer into the big page mapping
14 * <rth> zait: so what?
15 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
16 * <zaitcev> Hmm
17 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
18 * So far so good.
19 * <zaitcev> Now, driver calls pci_free_consistent(with result of
20 * remap_it_my_way()).
21 * <zaitcev> How do you find the address to pass to free_pages()?
22 * <rth> zait: walk the page tables? It's only two or three level after all.
23 * <rth> zait: you have to walk them anyway to remove the mapping.
24 * <zaitcev> Hmm
25 * <zaitcev> Sounds reasonable
26 */
27
28#include <linux/config.h>
29#include <linux/sched.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/types.h>
33#include <linux/ioport.h>
34#include <linux/mm.h>
35#include <linux/slab.h>
36#include <linux/pci.h> /* struct pci_dev */
37#include <linux/proc_fs.h>
38
39#include <asm/io.h>
40#include <asm/vaddrs.h>
41#include <asm/oplib.h>
42#include <asm/page.h>
43#include <asm/pgalloc.h>
44#include <asm/dma.h>
45
46#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
47
48struct resource *_sparc_find_resource(struct resource *r, unsigned long);
49
50static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
51static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
52 unsigned long size, char *name);
53static void _sparc_free_io(struct resource *res);
54
55/* This points to the next to use virtual memory for DVMA mappings */
56static struct resource _sparc_dvma = {
57 .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
58};
59/* This points to the start of I/O mappings, cluable from outside. */
60/*ext*/ struct resource sparc_iomap = {
61 .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1
62};
63
64/*
65 * Our mini-allocator...
66 * Boy this is gross! We need it because we must map I/O for
67 * timers and interrupt controller before the kmalloc is available.
68 */
69
70#define XNMLN 15
71#define XNRES 10 /* SS-10 uses 8 */
72
73struct xresource {
74 struct resource xres; /* Must be first */
75 int xflag; /* 1 == used */
76 char xname[XNMLN+1];
77};
78
79static struct xresource xresv[XNRES];
80
81static struct xresource *xres_alloc(void) {
82 struct xresource *xrp;
83 int n;
84
85 xrp = xresv;
86 for (n = 0; n < XNRES; n++) {
87 if (xrp->xflag == 0) {
88 xrp->xflag = 1;
89 return xrp;
90 }
91 xrp++;
92 }
93 return NULL;
94}
95
96static void xres_free(struct xresource *xrp) {
97 xrp->xflag = 0;
98}
99
100/*
101 * These are typically used in PCI drivers
102 * which are trying to be cross-platform.
103 *
104 * Bus type is always zero on IIep.
105 */
106void __iomem *ioremap(unsigned long offset, unsigned long size)
107{
108 char name[14];
109
110 sprintf(name, "phys_%08x", (u32)offset);
111 return _sparc_alloc_io(0, offset, size, name);
112}
113
114/*
115 * Comlimentary to ioremap().
116 */
117void iounmap(volatile void __iomem *virtual)
118{
119 unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
120 struct resource *res;
121
122 if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) {
123 printk("free_io/iounmap: cannot free %lx\n", vaddr);
124 return;
125 }
126 _sparc_free_io(res);
127
128 if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) {
129 xres_free((struct xresource *)res);
130 } else {
131 kfree(res);
132 }
133}
134
135/*
136 */
137void __iomem *sbus_ioremap(struct resource *phyres, unsigned long offset,
138 unsigned long size, char *name)
139{
140 return _sparc_alloc_io(phyres->flags & 0xF,
141 phyres->start + offset, size, name);
142}
143
144/*
145 */
146void sbus_iounmap(volatile void __iomem *addr, unsigned long size)
147{
148 iounmap(addr);
149}
150
151/*
152 * Meat of mapping
153 */
154static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
155 unsigned long size, char *name)
156{
157 static int printed_full;
158 struct xresource *xres;
159 struct resource *res;
160 char *tack;
161 int tlen;
162 void __iomem *va; /* P3 diag */
163
164 if (name == NULL) name = "???";
165
166 if ((xres = xres_alloc()) != 0) {
167 tack = xres->xname;
168 res = &xres->xres;
169 } else {
170 if (!printed_full) {
171 printk("ioremap: done with statics, switching to malloc\n");
172 printed_full = 1;
173 }
174 tlen = strlen(name);
175 tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
176 if (tack == NULL) return NULL;
177 memset(tack, 0, sizeof(struct resource));
178 res = (struct resource *) tack;
179 tack += sizeof (struct resource);
180 }
181
182 strlcpy(tack, name, XNMLN+1);
183 res->name = tack;
184
185 va = _sparc_ioremap(res, busno, phys, size);
186 /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
187 return va;
188}
189
190/*
191 */
192static void __iomem *
193_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
194{
195 unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
196
197 if (allocate_resource(&sparc_iomap, res,
198 (offset + sz + PAGE_SIZE-1) & PAGE_MASK,
199 sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) {
200 /* Usually we cannot see printks in this case. */
201 prom_printf("alloc_io_res(%s): cannot occupy\n",
202 (res->name != NULL)? res->name: "???");
203 prom_halt();
204 }
205
206 pa &= PAGE_MASK;
207 sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
208
209 return (void __iomem *) (res->start + offset);
210}
211
212/*
213 * Comlimentary to _sparc_ioremap().
214 */
215static void _sparc_free_io(struct resource *res)
216{
217 unsigned long plen;
218
219 plen = res->end - res->start + 1;
220 if ((plen & (PAGE_SIZE-1)) != 0) BUG();
221 sparc_unmapiorange(res->start, plen);
222 release_resource(res);
223}
224
225#ifdef CONFIG_SBUS
226
227void sbus_set_sbus64(struct sbus_dev *sdev, int x) {
228 printk("sbus_set_sbus64: unsupported\n");
229}
230
231/*
232 * Allocate a chunk of memory suitable for DMA.
233 * Typically devices use them for control blocks.
234 * CPU may access them without any explicit flushing.
235 *
236 * XXX Some clever people know that sdev is not used and supply NULL. Watch.
237 */
238void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
239{
240 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
241 unsigned long va;
242 struct resource *res;
243 int order;
244
245 /* XXX why are some lenghts signed, others unsigned? */
246 if (len <= 0) {
247 return NULL;
248 }
249 /* XXX So what is maxphys for us and how do drivers know it? */
250 if (len > 256*1024) { /* __get_free_pages() limit */
251 return NULL;
252 }
253
254 order = get_order(len_total);
255 if ((va = __get_free_pages(GFP_KERNEL, order)) == 0)
256 goto err_nopages;
257
258 if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
259 goto err_nomem;
260 memset((char*)res, 0, sizeof(struct resource));
261
262 if (allocate_resource(&_sparc_dvma, res, len_total,
263 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
264 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
265 goto err_nova;
266 }
267 mmu_inval_dma_area(va, len_total);
268 // XXX The mmu_map_dma_area does this for us below, see comments.
269 // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
270 /*
271 * XXX That's where sdev would be used. Currently we load
272 * all iommu tables with the same translations.
273 */
274 if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0)
275 goto err_noiommu;
276
277 return (void *)res->start;
278
279err_noiommu:
280 release_resource(res);
281err_nova:
282 free_pages(va, order);
283err_nomem:
284 kfree(res);
285err_nopages:
286 return NULL;
287}
288
289void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba)
290{
291 struct resource *res;
292 struct page *pgv;
293
294 if ((res = _sparc_find_resource(&_sparc_dvma,
295 (unsigned long)p)) == NULL) {
296 printk("sbus_free_consistent: cannot free %p\n", p);
297 return;
298 }
299
300 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
301 printk("sbus_free_consistent: unaligned va %p\n", p);
302 return;
303 }
304
305 n = (n + PAGE_SIZE-1) & PAGE_MASK;
306 if ((res->end-res->start)+1 != n) {
307 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n",
308 (long)((res->end-res->start)+1), n);
309 return;
310 }
311
312 release_resource(res);
313 kfree(res);
314
315 /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
316 pgv = mmu_translate_dvma(ba);
317 mmu_unmap_dma_area(ba, n);
318
319 __free_pages(pgv, get_order(n));
320}
321
322/*
323 * Map a chunk of memory so that devices can see it.
324 * CPU view of this memory may be inconsistent with
325 * a device view and explicit flushing is necessary.
326 */
327dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int direction)
328{
329 /* XXX why are some lenghts signed, others unsigned? */
330 if (len <= 0) {
331 return 0;
332 }
333 /* XXX So what is maxphys for us and how do drivers know it? */
334 if (len > 256*1024) { /* __get_free_pages() limit */
335 return 0;
336 }
337 return mmu_get_scsi_one(va, len, sdev->bus);
338}
339
340void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t ba, size_t n, int direction)
341{
342 mmu_release_scsi_one(ba, n, sdev->bus);
343}
344
345int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
346{
347 mmu_get_scsi_sgl(sg, n, sdev->bus);
348
349 /*
350 * XXX sparc64 can return a partial length here. sun4c should do this
351 * but it currently panics if it can't fulfill the request - Anton
352 */
353 return n;
354}
355
356void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
357{
358 mmu_release_scsi_sgl(sg, n, sdev->bus);
359}
360
361/*
362 */
363void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction)
364{
365#if 0
366 unsigned long va;
367 struct resource *res;
368
369 /* We do not need the resource, just print a message if invalid. */
370 res = _sparc_find_resource(&_sparc_dvma, ba);
371 if (res == NULL)
372 panic("sbus_dma_sync_single: 0x%x\n", ba);
373
374 va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */
375 /*
376 * XXX This bogosity will be fixed with the iommu rewrite coming soon
377 * to a kernel near you. - Anton
378 */
379 /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */
380#endif
381}
382
383void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction)
384{
385#if 0
386 unsigned long va;
387 struct resource *res;
388
389 /* We do not need the resource, just print a message if invalid. */
390 res = _sparc_find_resource(&_sparc_dvma, ba);
391 if (res == NULL)
392 panic("sbus_dma_sync_single: 0x%x\n", ba);
393
394 va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */
395 /*
396 * XXX This bogosity will be fixed with the iommu rewrite coming soon
397 * to a kernel near you. - Anton
398 */
399 /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */
400#endif
401}
402
403void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
404{
405 printk("sbus_dma_sync_sg_for_cpu: not implemented yet\n");
406}
407
408void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
409{
410 printk("sbus_dma_sync_sg_for_device: not implemented yet\n");
411}
412#endif /* CONFIG_SBUS */
413
414#ifdef CONFIG_PCI
415
416/* Allocate and map kernel buffer using consistent mode DMA for a device.
417 * hwdev should be valid struct pci_dev pointer for PCI devices.
418 */
419void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
420{
421 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
422 unsigned long va;
423 struct resource *res;
424 int order;
425
426 if (len == 0) {
427 return NULL;
428 }
429 if (len > 256*1024) { /* __get_free_pages() limit */
430 return NULL;
431 }
432
433 order = get_order(len_total);
434 va = __get_free_pages(GFP_KERNEL, order);
435 if (va == 0) {
436 printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
437 return NULL;
438 }
439
440 if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
441 free_pages(va, order);
442 printk("pci_alloc_consistent: no core\n");
443 return NULL;
444 }
445 memset((char*)res, 0, sizeof(struct resource));
446
447 if (allocate_resource(&_sparc_dvma, res, len_total,
448 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
449 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
450 free_pages(va, order);
451 kfree(res);
452 return NULL;
453 }
454 mmu_inval_dma_area(va, len_total);
455#if 0
456/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
457 (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
458#endif
459 sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
460
461 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
462 return (void *) res->start;
463}
464
465/* Free and unmap a consistent DMA buffer.
466 * cpu_addr is what was returned from pci_alloc_consistent,
467 * size must be the same as what as passed into pci_alloc_consistent,
468 * and likewise dma_addr must be the same as what *dma_addrp was set to.
469 *
470 * References to the memory and mappings assosciated with cpu_addr/dma_addr
471 * past this call are illegal.
472 */
473void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
474{
475 struct resource *res;
476 unsigned long pgp;
477
478 if ((res = _sparc_find_resource(&_sparc_dvma,
479 (unsigned long)p)) == NULL) {
480 printk("pci_free_consistent: cannot free %p\n", p);
481 return;
482 }
483
484 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
485 printk("pci_free_consistent: unaligned va %p\n", p);
486 return;
487 }
488
489 n = (n + PAGE_SIZE-1) & PAGE_MASK;
490 if ((res->end-res->start)+1 != n) {
491 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
492 (long)((res->end-res->start)+1), (long)n);
493 return;
494 }
495
496 pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
497 mmu_inval_dma_area(pgp, n);
498 sparc_unmapiorange((unsigned long)p, n);
499
500 release_resource(res);
501 kfree(res);
502
503 free_pages(pgp, get_order(n));
504}
505
506/* Map a single buffer of the indicated size for DMA in streaming mode.
507 * The 32-bit bus address to use is returned.
508 *
509 * Once the device is given the dma address, the device owns this memory
510 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
511 */
512dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
513 int direction)
514{
515 if (direction == PCI_DMA_NONE)
516 BUG();
517 /* IIep is write-through, not flushing. */
518 return virt_to_phys(ptr);
519}
520
521/* Unmap a single streaming mode DMA translation. The dma_addr and size
522 * must match what was provided for in a previous pci_map_single call. All
523 * other usages are undefined.
524 *
525 * After this call, reads by the cpu to the buffer are guaranteed to see
526 * whatever the device wrote there.
527 */
528void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
529 int direction)
530{
531 if (direction == PCI_DMA_NONE)
532 BUG();
533 if (direction != PCI_DMA_TODEVICE) {
534 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
535 (size + PAGE_SIZE-1) & PAGE_MASK);
536 }
537}
538
539/*
540 * Same as pci_map_single, but with pages.
541 */
542dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
543 unsigned long offset, size_t size, int direction)
544{
545 if (direction == PCI_DMA_NONE)
546 BUG();
547 /* IIep is write-through, not flushing. */
548 return page_to_phys(page) + offset;
549}
550
551void pci_unmap_page(struct pci_dev *hwdev,
552 dma_addr_t dma_address, size_t size, int direction)
553{
554 if (direction == PCI_DMA_NONE)
555 BUG();
556 /* mmu_inval_dma_area XXX */
557}
558
559/* Map a set of buffers described by scatterlist in streaming
560 * mode for DMA. This is the scather-gather version of the
561 * above pci_map_single interface. Here the scatter gather list
562 * elements are each tagged with the appropriate dma address
563 * and length. They are obtained via sg_dma_{address,length}(SG).
564 *
565 * NOTE: An implementation may be able to use a smaller number of
566 * DMA address/length pairs than there are SG table elements.
567 * (for example via virtual mapping capabilities)
568 * The routine returns the number of addr/length pairs actually
569 * used, at most nents.
570 *
571 * Device ownership issues as mentioned above for pci_map_single are
572 * the same here.
573 */
574int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
575 int direction)
576{
577 int n;
578
579 if (direction == PCI_DMA_NONE)
580 BUG();
581 /* IIep is write-through, not flushing. */
582 for (n = 0; n < nents; n++) {
583 if (page_address(sg->page) == NULL) BUG();
584 sg->dvma_address = virt_to_phys(page_address(sg->page));
585 sg->dvma_length = sg->length;
586 sg++;
587 }
588 return nents;
589}
590
591/* Unmap a set of streaming mode DMA translations.
592 * Again, cpu read rules concerning calls here are the same as for
593 * pci_unmap_single() above.
594 */
595void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
596 int direction)
597{
598 int n;
599
600 if (direction == PCI_DMA_NONE)
601 BUG();
602 if (direction != PCI_DMA_TODEVICE) {
603 for (n = 0; n < nents; n++) {
604 if (page_address(sg->page) == NULL) BUG();
605 mmu_inval_dma_area(
606 (unsigned long) page_address(sg->page),
607 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
608 sg++;
609 }
610 }
611}
612
613/* Make physical memory consistent for a single
614 * streaming mode DMA translation before or after a transfer.
615 *
616 * If you perform a pci_map_single() but wish to interrogate the
617 * buffer using the cpu, yet do not wish to teardown the PCI dma
618 * mapping, you must call this function before doing so. At the
619 * next point you give the PCI dma address back to the card, you
620 * must first perform a pci_dma_sync_for_device, and then the
621 * device again owns the buffer.
622 */
623void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
624{
625 if (direction == PCI_DMA_NONE)
626 BUG();
627 if (direction != PCI_DMA_TODEVICE) {
628 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
629 (size + PAGE_SIZE-1) & PAGE_MASK);
630 }
631}
632
633void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
634{
635 if (direction == PCI_DMA_NONE)
636 BUG();
637 if (direction != PCI_DMA_TODEVICE) {
638 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
639 (size + PAGE_SIZE-1) & PAGE_MASK);
640 }
641}
642
643/* Make physical memory consistent for a set of streaming
644 * mode DMA translations after a transfer.
645 *
646 * The same as pci_dma_sync_single_* but for a scatter-gather list,
647 * same rules and usage.
648 */
649void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
650{
651 int n;
652
653 if (direction == PCI_DMA_NONE)
654 BUG();
655 if (direction != PCI_DMA_TODEVICE) {
656 for (n = 0; n < nents; n++) {
657 if (page_address(sg->page) == NULL) BUG();
658 mmu_inval_dma_area(
659 (unsigned long) page_address(sg->page),
660 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
661 sg++;
662 }
663 }
664}
665
666void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
667{
668 int n;
669
670 if (direction == PCI_DMA_NONE)
671 BUG();
672 if (direction != PCI_DMA_TODEVICE) {
673 for (n = 0; n < nents; n++) {
674 if (page_address(sg->page) == NULL) BUG();
675 mmu_inval_dma_area(
676 (unsigned long) page_address(sg->page),
677 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
678 sg++;
679 }
680 }
681}
682#endif /* CONFIG_PCI */
683
684#ifdef CONFIG_PROC_FS
685
686static int
687_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
688 void *data)
689{
690 char *p = buf, *e = buf + length;
691 struct resource *r;
692 const char *nm;
693
694 for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
695 if (p + 32 >= e) /* Better than nothing */
696 break;
697 if ((nm = r->name) == 0) nm = "???";
698 p += sprintf(p, "%08lx-%08lx: %s\n", r->start, r->end, nm);
699 }
700
701 return p-buf;
702}
703
704#endif /* CONFIG_PROC_FS */
705
706/*
707 * This is a version of find_resource and it belongs to kernel/resource.c.
708 * Until we have agreement with Linus and Martin, it lingers here.
709 *
710 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
711 * This probably warrants some sort of hashing.
712 */
713struct resource *
714_sparc_find_resource(struct resource *root, unsigned long hit)
715{
716 struct resource *tmp;
717
718 for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
719 if (tmp->start <= hit && tmp->end >= hit)
720 return tmp;
721 }
722 return NULL;
723}
724
725void register_proc_sparc_ioport(void)
726{
727#ifdef CONFIG_PROC_FS
728 create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap);
729 create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma);
730#endif
731}
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
new file mode 100644
index 000000000000..410b9a72aba9
--- /dev/null
+++ b/arch/sparc/kernel/irq.c
@@ -0,0 +1,614 @@
1/* $Id: irq.c,v 1.114 2001/12/11 04:55:51 davem Exp $
2 * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
3 * Sparc the IRQ's are basically 'cast in stone'
4 * and you are supposed to probe the prom's device
5 * node trees to find out who's got which IRQ.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
9 * Copyright (C) 1995,2002 Pete A. Zaitcev (zaitcev@yahoo.com)
10 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
11 * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/ptrace.h>
18#include <linux/errno.h>
19#include <linux/linkage.h>
20#include <linux/kernel_stat.h>
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/interrupt.h>
24#include <linux/slab.h>
25#include <linux/random.h>
26#include <linux/init.h>
27#include <linux/smp.h>
28#include <linux/smp_lock.h>
29#include <linux/delay.h>
30#include <linux/threads.h>
31#include <linux/spinlock.h>
32#include <linux/seq_file.h>
33
34#include <asm/ptrace.h>
35#include <asm/processor.h>
36#include <asm/system.h>
37#include <asm/psr.h>
38#include <asm/smp.h>
39#include <asm/vaddrs.h>
40#include <asm/timer.h>
41#include <asm/openprom.h>
42#include <asm/oplib.h>
43#include <asm/traps.h>
44#include <asm/irq.h>
45#include <asm/io.h>
46#include <asm/pgalloc.h>
47#include <asm/pgtable.h>
48#include <asm/pcic.h>
49#include <asm/cacheflush.h>
50
51#ifdef CONFIG_SMP
52#define SMP_NOP2 "nop; nop;\n\t"
53#define SMP_NOP3 "nop; nop; nop;\n\t"
54#else
55#define SMP_NOP2
56#define SMP_NOP3
57#endif /* SMP */
58unsigned long __local_irq_save(void)
59{
60 unsigned long retval;
61 unsigned long tmp;
62
63 __asm__ __volatile__(
64 "rd %%psr, %0\n\t"
65 SMP_NOP3 /* Sun4m + Cypress + SMP bug */
66 "or %0, %2, %1\n\t"
67 "wr %1, 0, %%psr\n\t"
68 "nop; nop; nop\n"
69 : "=&r" (retval), "=r" (tmp)
70 : "i" (PSR_PIL)
71 : "memory");
72
73 return retval;
74}
75
76void local_irq_enable(void)
77{
78 unsigned long tmp;
79
80 __asm__ __volatile__(
81 "rd %%psr, %0\n\t"
82 SMP_NOP3 /* Sun4m + Cypress + SMP bug */
83 "andn %0, %1, %0\n\t"
84 "wr %0, 0, %%psr\n\t"
85 "nop; nop; nop\n"
86 : "=&r" (tmp)
87 : "i" (PSR_PIL)
88 : "memory");
89}
90
91void local_irq_restore(unsigned long old_psr)
92{
93 unsigned long tmp;
94
95 __asm__ __volatile__(
96 "rd %%psr, %0\n\t"
97 "and %2, %1, %2\n\t"
98 SMP_NOP2 /* Sun4m + Cypress + SMP bug */
99 "andn %0, %1, %0\n\t"
100 "wr %0, %2, %%psr\n\t"
101 "nop; nop; nop\n"
102 : "=&r" (tmp)
103 : "i" (PSR_PIL), "r" (old_psr)
104 : "memory");
105}
106
107EXPORT_SYMBOL(__local_irq_save);
108EXPORT_SYMBOL(local_irq_enable);
109EXPORT_SYMBOL(local_irq_restore);
110
111/*
112 * Dave Redman (djhr@tadpole.co.uk)
113 *
114 * IRQ numbers.. These are no longer restricted to 15..
115 *
116 * this is done to enable SBUS cards and onboard IO to be masked
117 * correctly. using the interrupt level isn't good enough.
118 *
119 * For example:
120 * A device interrupting at sbus level6 and the Floppy both come in
121 * at IRQ11, but enabling and disabling them requires writing to
122 * different bits in the SLAVIO/SEC.
123 *
124 * As a result of these changes sun4m machines could now support
125 * directed CPU interrupts using the existing enable/disable irq code
126 * with tweaks.
127 *
128 */
129
130static void irq_panic(void)
131{
132 extern char *cputypval;
133 prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
134 prom_halt();
135}
136
137void (*sparc_init_timers)(irqreturn_t (*)(int, void *,struct pt_regs *)) =
138 (void (*)(irqreturn_t (*)(int, void *,struct pt_regs *))) irq_panic;
139
140/*
141 * Dave Redman (djhr@tadpole.co.uk)
142 *
143 * There used to be extern calls and hard coded values here.. very sucky!
144 * instead, because some of the devices attach very early, I do something
145 * equally sucky but at least we'll never try to free statically allocated
146 * space or call kmalloc before kmalloc_init :(.
147 *
148 * In fact it's the timer10 that attaches first.. then timer14
149 * then kmalloc_init is called.. then the tty interrupts attach.
150 * hmmm....
151 *
152 */
153#define MAX_STATIC_ALLOC 4
154struct irqaction static_irqaction[MAX_STATIC_ALLOC];
155int static_irq_count;
156
157struct irqaction *irq_action[NR_IRQS] = {
158 [0 ... (NR_IRQS-1)] = NULL
159};
160
161/* Used to protect the IRQ action lists */
162DEFINE_SPINLOCK(irq_action_lock);
163
164int show_interrupts(struct seq_file *p, void *v)
165{
166 int i = *(loff_t *) v;
167 struct irqaction * action;
168 unsigned long flags;
169#ifdef CONFIG_SMP
170 int j;
171#endif
172
173 if (sparc_cpu_model == sun4d) {
174 extern int show_sun4d_interrupts(struct seq_file *, void *);
175
176 return show_sun4d_interrupts(p, v);
177 }
178 spin_lock_irqsave(&irq_action_lock, flags);
179 if (i < NR_IRQS) {
180 action = *(i + irq_action);
181 if (!action)
182 goto out_unlock;
183 seq_printf(p, "%3d: ", i);
184#ifndef CONFIG_SMP
185 seq_printf(p, "%10u ", kstat_irqs(i));
186#else
187 for (j = 0; j < NR_CPUS; j++) {
188 if (cpu_online(j))
189 seq_printf(p, "%10u ",
190 kstat_cpu(cpu_logical_map(j)).irqs[i]);
191 }
192#endif
193 seq_printf(p, " %c %s",
194 (action->flags & SA_INTERRUPT) ? '+' : ' ',
195 action->name);
196 for (action=action->next; action; action = action->next) {
197 seq_printf(p, ",%s %s",
198 (action->flags & SA_INTERRUPT) ? " +" : "",
199 action->name);
200 }
201 seq_putc(p, '\n');
202 }
203out_unlock:
204 spin_unlock_irqrestore(&irq_action_lock, flags);
205 return 0;
206}
207
208void free_irq(unsigned int irq, void *dev_id)
209{
210 struct irqaction * action;
211 struct irqaction * tmp = NULL;
212 unsigned long flags;
213 unsigned int cpu_irq;
214
215 if (sparc_cpu_model == sun4d) {
216 extern void sun4d_free_irq(unsigned int, void *);
217
218 sun4d_free_irq(irq, dev_id);
219 return;
220 }
221 cpu_irq = irq & (NR_IRQS - 1);
222 if (cpu_irq > 14) { /* 14 irq levels on the sparc */
223 printk("Trying to free bogus IRQ %d\n", irq);
224 return;
225 }
226
227 spin_lock_irqsave(&irq_action_lock, flags);
228
229 action = *(cpu_irq + irq_action);
230
231 if (!action->handler) {
232 printk("Trying to free free IRQ%d\n",irq);
233 goto out_unlock;
234 }
235 if (dev_id) {
236 for (; action; action = action->next) {
237 if (action->dev_id == dev_id)
238 break;
239 tmp = action;
240 }
241 if (!action) {
242 printk("Trying to free free shared IRQ%d\n",irq);
243 goto out_unlock;
244 }
245 } else if (action->flags & SA_SHIRQ) {
246 printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
247 goto out_unlock;
248 }
249 if (action->flags & SA_STATIC_ALLOC)
250 {
251 /* This interrupt is marked as specially allocated
252 * so it is a bad idea to free it.
253 */
254 printk("Attempt to free statically allocated IRQ%d (%s)\n",
255 irq, action->name);
256 goto out_unlock;
257 }
258
259 if (action && tmp)
260 tmp->next = action->next;
261 else
262 *(cpu_irq + irq_action) = action->next;
263
264 spin_unlock_irqrestore(&irq_action_lock, flags);
265
266 synchronize_irq(irq);
267
268 spin_lock_irqsave(&irq_action_lock, flags);
269
270 kfree(action);
271
272 if (!(*(cpu_irq + irq_action)))
273 disable_irq(irq);
274
275out_unlock:
276 spin_unlock_irqrestore(&irq_action_lock, flags);
277}
278
279EXPORT_SYMBOL(free_irq);
280
281/*
282 * This is called when we want to synchronize with
283 * interrupts. We may for example tell a device to
284 * stop sending interrupts: but to make sure there
285 * are no interrupts that are executing on another
286 * CPU we need to call this function.
287 */
288#ifdef CONFIG_SMP
289void synchronize_irq(unsigned int irq)
290{
291 printk("synchronize_irq says: implement me!\n");
292 BUG();
293}
294#endif /* SMP */
295
296void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
297{
298 int i;
299 struct irqaction * action;
300 unsigned int cpu_irq;
301
302 cpu_irq = irq & (NR_IRQS - 1);
303 action = *(cpu_irq + irq_action);
304
305 printk("IO device interrupt, irq = %d\n", irq);
306 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
307 regs->npc, regs->u_regs[14]);
308 if (action) {
309 printk("Expecting: ");
310 for (i = 0; i < 16; i++)
311 if (action->handler)
312 printk("[%s:%d:0x%x] ", action->name,
313 (int) i, (unsigned int) action->handler);
314 }
315 printk("AIEEE\n");
316 panic("bogus interrupt received");
317}
318
319void handler_irq(int irq, struct pt_regs * regs)
320{
321 struct irqaction * action;
322 int cpu = smp_processor_id();
323#ifdef CONFIG_SMP
324 extern void smp4m_irq_rotate(int cpu);
325#endif
326
327 irq_enter();
328 disable_pil_irq(irq);
329#ifdef CONFIG_SMP
330 /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */
331 if(irq < 10)
332 smp4m_irq_rotate(cpu);
333#endif
334 action = *(irq + irq_action);
335 kstat_cpu(cpu).irqs[irq]++;
336 do {
337 if (!action || !action->handler)
338 unexpected_irq(irq, NULL, regs);
339 action->handler(irq, action->dev_id, regs);
340 action = action->next;
341 } while (action);
342 enable_pil_irq(irq);
343 irq_exit();
344}
345
346#ifdef CONFIG_BLK_DEV_FD
347extern void floppy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
348
349void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
350{
351 int cpu = smp_processor_id();
352
353 disable_pil_irq(irq);
354 irq_enter();
355 kstat_cpu(cpu).irqs[irq]++;
356 floppy_interrupt(irq, dev_id, regs);
357 irq_exit();
358 enable_pil_irq(irq);
359 // XXX Eek, it's totally changed with preempt_count() and such
360 // if (softirq_pending(cpu))
361 // do_softirq();
362}
363#endif
364
365/* Fast IRQ's on the Sparc can only have one routine attached to them,
366 * thus no sharing possible.
367 */
368int request_fast_irq(unsigned int irq,
369 irqreturn_t (*handler)(int, void *, struct pt_regs *),
370 unsigned long irqflags, const char *devname)
371{
372 struct irqaction *action;
373 unsigned long flags;
374 unsigned int cpu_irq;
375 int ret;
376#ifdef CONFIG_SMP
377 struct tt_entry *trap_table;
378 extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
379#endif
380
381 cpu_irq = irq & (NR_IRQS - 1);
382 if(cpu_irq > 14) {
383 ret = -EINVAL;
384 goto out;
385 }
386 if(!handler) {
387 ret = -EINVAL;
388 goto out;
389 }
390
391 spin_lock_irqsave(&irq_action_lock, flags);
392
393 action = *(cpu_irq + irq_action);
394 if(action) {
395 if(action->flags & SA_SHIRQ)
396 panic("Trying to register fast irq when already shared.\n");
397 if(irqflags & SA_SHIRQ)
398 panic("Trying to register fast irq as shared.\n");
399
400 /* Anyway, someone already owns it so cannot be made fast. */
401 printk("request_fast_irq: Trying to register yet already owned.\n");
402 ret = -EBUSY;
403 goto out_unlock;
404 }
405
406 /* If this is flagged as statically allocated then we use our
407 * private struct which is never freed.
408 */
409 if (irqflags & SA_STATIC_ALLOC) {
410 if (static_irq_count < MAX_STATIC_ALLOC)
411 action = &static_irqaction[static_irq_count++];
412 else
413 printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
414 irq, devname);
415 }
416
417 if (action == NULL)
418 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
419 GFP_ATOMIC);
420
421 if (!action) {
422 ret = -ENOMEM;
423 goto out_unlock;
424 }
425
426 /* Dork with trap table if we get this far. */
427#define INSTANTIATE(table) \
428 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
429 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
430 SPARC_BRANCH((unsigned long) handler, \
431 (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
432 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
433 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
434
435 INSTANTIATE(sparc_ttable)
436#ifdef CONFIG_SMP
437 trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
438 trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
439 trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
440#endif
441#undef INSTANTIATE
442 /*
443 * XXX Correct thing whould be to flush only I- and D-cache lines
444 * which contain the handler in question. But as of time of the
445 * writing we have no CPU-neutral interface to fine-grained flushes.
446 */
447 flush_cache_all();
448
449 action->handler = handler;
450 action->flags = irqflags;
451 cpus_clear(action->mask);
452 action->name = devname;
453 action->dev_id = NULL;
454 action->next = NULL;
455
456 *(cpu_irq + irq_action) = action;
457
458 enable_irq(irq);
459
460 ret = 0;
461out_unlock:
462 spin_unlock_irqrestore(&irq_action_lock, flags);
463out:
464 return ret;
465}
466
467int request_irq(unsigned int irq,
468 irqreturn_t (*handler)(int, void *, struct pt_regs *),
469 unsigned long irqflags, const char * devname, void *dev_id)
470{
471 struct irqaction * action, *tmp = NULL;
472 unsigned long flags;
473 unsigned int cpu_irq;
474 int ret;
475
476 if (sparc_cpu_model == sun4d) {
477 extern int sun4d_request_irq(unsigned int,
478 irqreturn_t (*)(int, void *, struct pt_regs *),
479 unsigned long, const char *, void *);
480 return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
481 }
482 cpu_irq = irq & (NR_IRQS - 1);
483 if(cpu_irq > 14) {
484 ret = -EINVAL;
485 goto out;
486 }
487 if (!handler) {
488 ret = -EINVAL;
489 goto out;
490 }
491
492 spin_lock_irqsave(&irq_action_lock, flags);
493
494 action = *(cpu_irq + irq_action);
495 if (action) {
496 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
497 for (tmp = action; tmp->next; tmp = tmp->next);
498 } else {
499 ret = -EBUSY;
500 goto out_unlock;
501 }
502 if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
503 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
504 ret = -EBUSY;
505 goto out_unlock;
506 }
507 action = NULL; /* Or else! */
508 }
509
510 /* If this is flagged as statically allocated then we use our
511 * private struct which is never freed.
512 */
513 if (irqflags & SA_STATIC_ALLOC) {
514 if (static_irq_count < MAX_STATIC_ALLOC)
515 action = &static_irqaction[static_irq_count++];
516 else
517 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
518 }
519
520 if (action == NULL)
521 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
522 GFP_ATOMIC);
523
524 if (!action) {
525 ret = -ENOMEM;
526 goto out_unlock;
527 }
528
529 action->handler = handler;
530 action->flags = irqflags;
531 cpus_clear(action->mask);
532 action->name = devname;
533 action->next = NULL;
534 action->dev_id = dev_id;
535
536 if (tmp)
537 tmp->next = action;
538 else
539 *(cpu_irq + irq_action) = action;
540
541 enable_irq(irq);
542
543 ret = 0;
544out_unlock:
545 spin_unlock_irqrestore(&irq_action_lock, flags);
546out:
547 return ret;
548}
549
550EXPORT_SYMBOL(request_irq);
551
552/* We really don't need these at all on the Sparc. We only have
553 * stubs here because they are exported to modules.
554 */
555unsigned long probe_irq_on(void)
556{
557 return 0;
558}
559
560EXPORT_SYMBOL(probe_irq_on);
561
562int probe_irq_off(unsigned long mask)
563{
564 return 0;
565}
566
567EXPORT_SYMBOL(probe_irq_off);
568
569/* djhr
570 * This could probably be made indirect too and assigned in the CPU
571 * bits of the code. That would be much nicer I think and would also
572 * fit in with the idea of being able to tune your kernel for your machine
573 * by removing unrequired machine and device support.
574 *
575 */
576
577void __init init_IRQ(void)
578{
579 extern void sun4c_init_IRQ( void );
580 extern void sun4m_init_IRQ( void );
581 extern void sun4d_init_IRQ( void );
582
583 switch(sparc_cpu_model) {
584 case sun4c:
585 case sun4:
586 sun4c_init_IRQ();
587 break;
588
589 case sun4m:
590#ifdef CONFIG_PCI
591 pcic_probe();
592 if (pcic_present()) {
593 sun4m_pci_init_IRQ();
594 break;
595 }
596#endif
597 sun4m_init_IRQ();
598 break;
599
600 case sun4d:
601 sun4d_init_IRQ();
602 break;
603
604 default:
605 prom_printf("Cannot initialize IRQ's on this Sun machine...");
606 break;
607 }
608 btfixup();
609}
610
611void init_irq_proc(void)
612{
613 /* For now, nothing... */
614}
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
new file mode 100644
index 000000000000..7931d6f92819
--- /dev/null
+++ b/arch/sparc/kernel/module.c
@@ -0,0 +1,159 @@
1/* Kernel module help for sparc32.
2 *
3 * Copyright (C) 2001 Rusty Russell.
4 * Copyright (C) 2002 David S. Miller.
5 */
6
7#include <linux/moduleloader.h>
8#include <linux/kernel.h>
9#include <linux/elf.h>
10#include <linux/vmalloc.h>
11#include <linux/fs.h>
12#include <linux/string.h>
13
14void *module_alloc(unsigned long size)
15{
16 void *ret;
17
18 /* We handle the zero case fine, unlike vmalloc */
19 if (size == 0)
20 return NULL;
21
22 ret = vmalloc(size);
23 if (!ret)
24 ret = ERR_PTR(-ENOMEM);
25 else
26 memset(ret, 0, size);
27
28 return ret;
29}
30
31/* Free memory returned from module_core_alloc/module_init_alloc */
32void module_free(struct module *mod, void *module_region)
33{
34 vfree(module_region);
35 /* FIXME: If module_region == mod->init_region, trim exception
36 table entries. */
37}
38
39/* Make generic code ignore STT_REGISTER dummy undefined symbols,
40 * and replace references to .func with func as in ppc64's dedotify.
41 */
42int module_frob_arch_sections(Elf_Ehdr *hdr,
43 Elf_Shdr *sechdrs,
44 char *secstrings,
45 struct module *mod)
46{
47 unsigned int symidx;
48 Elf32_Sym *sym;
49 char *strtab;
50 int i;
51
52 for (symidx = 0; sechdrs[symidx].sh_type != SHT_SYMTAB; symidx++) {
53 if (symidx == hdr->e_shnum-1) {
54 printk("%s: no symtab found.\n", mod->name);
55 return -ENOEXEC;
56 }
57 }
58 sym = (Elf32_Sym *)sechdrs[symidx].sh_addr;
59 strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr;
60
61 for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
62 if (sym[i].st_shndx == SHN_UNDEF) {
63 if (ELF32_ST_TYPE(sym[i].st_info) == STT_REGISTER)
64 sym[i].st_shndx = SHN_ABS;
65 else {
66 char *name = strtab + sym[i].st_name;
67 if (name[0] == '.')
68 memmove(name, name+1, strlen(name));
69 }
70 }
71 }
72 return 0;
73}
74
75int apply_relocate(Elf32_Shdr *sechdrs,
76 const char *strtab,
77 unsigned int symindex,
78 unsigned int relsec,
79 struct module *me)
80{
81 printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n",
82 me->name);
83 return -ENOEXEC;
84}
85
86int apply_relocate_add(Elf32_Shdr *sechdrs,
87 const char *strtab,
88 unsigned int symindex,
89 unsigned int relsec,
90 struct module *me)
91{
92 unsigned int i;
93 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
94 Elf32_Sym *sym;
95 u8 *location;
96 u32 *loc32;
97
98 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
99 Elf32_Addr v;
100
101 /* This is where to make the change */
102 location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr
103 + rel[i].r_offset;
104 loc32 = (u32 *) location;
105 /* This is the symbol it is referring to. Note that all
106 undefined symbols have been resolved. */
107 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
108 + ELF32_R_SYM(rel[i].r_info);
109 v = sym->st_value + rel[i].r_addend;
110
111 switch (ELF32_R_TYPE(rel[i].r_info)) {
112 case R_SPARC_32:
113 location[0] = v >> 24;
114 location[1] = v >> 16;
115 location[2] = v >> 8;
116 location[3] = v >> 0;
117 break;
118
119 case R_SPARC_WDISP30:
120 v -= (Elf32_Addr) location;
121 *loc32 = (*loc32 & ~0x3fffffff) |
122 ((v >> 2) & 0x3fffffff);
123 break;
124
125 case R_SPARC_WDISP22:
126 v -= (Elf32_Addr) location;
127 *loc32 = (*loc32 & ~0x3fffff) |
128 ((v >> 2) & 0x3fffff);
129 break;
130
131 case R_SPARC_LO10:
132 *loc32 = (*loc32 & ~0x3ff) | (v & 0x3ff);
133 break;
134
135 case R_SPARC_HI22:
136 *loc32 = (*loc32 & ~0x3fffff) |
137 ((v >> 10) & 0x3fffff);
138 break;
139
140 default:
141 printk(KERN_ERR "module %s: Unknown relocation: %x\n",
142 me->name,
143 (int) (ELF32_R_TYPE(rel[i].r_info) & 0xff));
144 return -ENOEXEC;
145 };
146 }
147 return 0;
148}
149
150int module_finalize(const Elf_Ehdr *hdr,
151 const Elf_Shdr *sechdrs,
152 struct module *me)
153{
154 return 0;
155}
156
157void module_arch_cleanup(struct module *mod)
158{
159}
diff --git a/arch/sparc/kernel/muldiv.c b/arch/sparc/kernel/muldiv.c
new file mode 100644
index 000000000000..37b9a4942232
--- /dev/null
+++ b/arch/sparc/kernel/muldiv.c
@@ -0,0 +1,240 @@
1/* $Id: muldiv.c,v 1.5 1997/12/15 20:07:20 ecd Exp $
2 * muldiv.c: Hardware multiply/division illegal instruction trap
3 * for sun4c/sun4 (which do not have those instructions)
4 *
5 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
7 *
8 * 2004-12-25 Krzysztof Helt (krzysztof.h1@wp.pl)
9 * - fixed registers constrains in inline assembly declarations
10 */
11
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <asm/ptrace.h>
16#include <asm/processor.h>
17#include <asm/system.h>
18#include <asm/uaccess.h>
19
20/* #define DEBUG_MULDIV */
21
22static inline int has_imm13(int insn)
23{
24 return (insn & 0x2000);
25}
26
27static inline int is_foocc(int insn)
28{
29 return (insn & 0x800000);
30}
31
32static inline int sign_extend_imm13(int imm)
33{
34 return imm << 19 >> 19;
35}
36
37static inline void advance(struct pt_regs *regs)
38{
39 regs->pc = regs->npc;
40 regs->npc += 4;
41}
42
43static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
44 unsigned int rd)
45{
46 if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
47 /* Wheee... */
48 __asm__ __volatile__("save %sp, -0x40, %sp\n\t"
49 "save %sp, -0x40, %sp\n\t"
50 "save %sp, -0x40, %sp\n\t"
51 "save %sp, -0x40, %sp\n\t"
52 "save %sp, -0x40, %sp\n\t"
53 "save %sp, -0x40, %sp\n\t"
54 "save %sp, -0x40, %sp\n\t"
55 "restore; restore; restore; restore;\n\t"
56 "restore; restore; restore;\n\t");
57 }
58}
59
60#define fetch_reg(reg, regs) ({ \
61 struct reg_window __user *win; \
62 register unsigned long ret; \
63 \
64 if (!(reg)) ret = 0; \
65 else if ((reg) < 16) { \
66 ret = regs->u_regs[(reg)]; \
67 } else { \
68 /* Ho hum, the slightly complicated case. */ \
69 win = (struct reg_window __user *)regs->u_regs[UREG_FP];\
70 if (get_user (ret, &win->locals[(reg) - 16])) return -1;\
71 } \
72 ret; \
73})
74
75static inline int
76store_reg(unsigned int result, unsigned int reg, struct pt_regs *regs)
77{
78 struct reg_window __user *win;
79
80 if (!reg)
81 return 0;
82 if (reg < 16) {
83 regs->u_regs[reg] = result;
84 return 0;
85 } else {
86 /* need to use put_user() in this case: */
87 win = (struct reg_window __user *) regs->u_regs[UREG_FP];
88 return (put_user(result, &win->locals[reg - 16]));
89 }
90}
91
92extern void handle_hw_divzero (struct pt_regs *regs, unsigned long pc,
93 unsigned long npc, unsigned long psr);
94
95/* Should return 0 if mul/div emulation succeeded and SIGILL should
96 * not be issued.
97 */
98int do_user_muldiv(struct pt_regs *regs, unsigned long pc)
99{
100 unsigned int insn;
101 int inst;
102 unsigned int rs1, rs2, rdv;
103
104 if (!pc)
105 return -1; /* This happens to often, I think */
106 if (get_user (insn, (unsigned int __user *)pc))
107 return -1;
108 if ((insn & 0xc1400000) != 0x80400000)
109 return -1;
110 inst = ((insn >> 19) & 0xf);
111 if ((inst & 0xe) != 10 && (inst & 0xe) != 14)
112 return -1;
113
114 /* Now we know we have to do something with umul, smul, udiv or sdiv */
115 rs1 = (insn >> 14) & 0x1f;
116 rs2 = insn & 0x1f;
117 rdv = (insn >> 25) & 0x1f;
118 if (has_imm13(insn)) {
119 maybe_flush_windows(rs1, 0, rdv);
120 rs2 = sign_extend_imm13(insn);
121 } else {
122 maybe_flush_windows(rs1, rs2, rdv);
123 rs2 = fetch_reg(rs2, regs);
124 }
125 rs1 = fetch_reg(rs1, regs);
126 switch (inst) {
127 case 10: /* umul */
128#ifdef DEBUG_MULDIV
129 printk ("unsigned muldiv: 0x%x * 0x%x = ", rs1, rs2);
130#endif
131 __asm__ __volatile__ ("\n\t"
132 "mov %0, %%o0\n\t"
133 "call .umul\n\t"
134 " mov %1, %%o1\n\t"
135 "mov %%o0, %0\n\t"
136 "mov %%o1, %1\n\t"
137 : "=r" (rs1), "=r" (rs2)
138 : "0" (rs1), "1" (rs2)
139 : "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc");
140#ifdef DEBUG_MULDIV
141 printk ("0x%x%08x\n", rs2, rs1);
142#endif
143 if (store_reg(rs1, rdv, regs))
144 return -1;
145 regs->y = rs2;
146 break;
147 case 11: /* smul */
148#ifdef DEBUG_MULDIV
149 printk ("signed muldiv: 0x%x * 0x%x = ", rs1, rs2);
150#endif
151 __asm__ __volatile__ ("\n\t"
152 "mov %0, %%o0\n\t"
153 "call .mul\n\t"
154 " mov %1, %%o1\n\t"
155 "mov %%o0, %0\n\t"
156 "mov %%o1, %1\n\t"
157 : "=r" (rs1), "=r" (rs2)
158 : "0" (rs1), "1" (rs2)
159 : "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc");
160#ifdef DEBUG_MULDIV
161 printk ("0x%x%08x\n", rs2, rs1);
162#endif
163 if (store_reg(rs1, rdv, regs))
164 return -1;
165 regs->y = rs2;
166 break;
167 case 14: /* udiv */
168#ifdef DEBUG_MULDIV
169 printk ("unsigned muldiv: 0x%x%08x / 0x%x = ", regs->y, rs1, rs2);
170#endif
171 if (!rs2) {
172#ifdef DEBUG_MULDIV
173 printk ("DIVISION BY ZERO\n");
174#endif
175 handle_hw_divzero (regs, pc, regs->npc, regs->psr);
176 return 0;
177 }
178 __asm__ __volatile__ ("\n\t"
179 "mov %2, %%o0\n\t"
180 "mov %0, %%o1\n\t"
181 "mov %%g0, %%o2\n\t"
182 "call __udivdi3\n\t"
183 " mov %1, %%o3\n\t"
184 "mov %%o1, %0\n\t"
185 "mov %%o0, %1\n\t"
186 : "=r" (rs1), "=r" (rs2)
187 : "r" (regs->y), "0" (rs1), "1" (rs2)
188 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
189 "g1", "g2", "g3", "cc");
190#ifdef DEBUG_MULDIV
191 printk ("0x%x\n", rs1);
192#endif
193 if (store_reg(rs1, rdv, regs))
194 return -1;
195 break;
196 case 15: /* sdiv */
197#ifdef DEBUG_MULDIV
198 printk ("signed muldiv: 0x%x%08x / 0x%x = ", regs->y, rs1, rs2);
199#endif
200 if (!rs2) {
201#ifdef DEBUG_MULDIV
202 printk ("DIVISION BY ZERO\n");
203#endif
204 handle_hw_divzero (regs, pc, regs->npc, regs->psr);
205 return 0;
206 }
207 __asm__ __volatile__ ("\n\t"
208 "mov %2, %%o0\n\t"
209 "mov %0, %%o1\n\t"
210 "mov %%g0, %%o2\n\t"
211 "call __divdi3\n\t"
212 " mov %1, %%o3\n\t"
213 "mov %%o1, %0\n\t"
214 "mov %%o0, %1\n\t"
215 : "=r" (rs1), "=r" (rs2)
216 : "r" (regs->y), "0" (rs1), "1" (rs2)
217 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
218 "g1", "g2", "g3", "cc");
219#ifdef DEBUG_MULDIV
220 printk ("0x%x\n", rs1);
221#endif
222 if (store_reg(rs1, rdv, regs))
223 return -1;
224 break;
225 }
226 if (is_foocc (insn)) {
227 regs->psr &= ~PSR_ICC;
228 if ((inst & 0xe) == 14) {
229 /* ?div */
230 if (rs2) regs->psr |= PSR_V;
231 }
232 if (!rs1) regs->psr |= PSR_Z;
233 if (((int)rs1) < 0) regs->psr |= PSR_N;
234#ifdef DEBUG_MULDIV
235 printk ("psr muldiv: %08x\n", regs->psr);
236#endif
237 }
238 advance(regs);
239 return 0;
240}
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
new file mode 100644
index 000000000000..597d3ff6ad68
--- /dev/null
+++ b/arch/sparc/kernel/pcic.c
@@ -0,0 +1,1041 @@
1/*
2 * pcic.c: MicroSPARC-IIep PCI controller support
3 *
4 * Copyright (C) 1998 V. Roganov and G. Raiko
5 *
6 * Code is derived from Ultra/PCI PSYCHO controller support, see that
7 * for author info.
8 *
9 * Support for diverse IIep based platforms by Pete Zaitcev.
10 * CP-1200 by Eric Brower.
11 */
12
13#include <linux/config.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <linux/jiffies.h>
20
21#include <asm/ebus.h>
22#include <asm/sbus.h> /* for sanity check... */
23#include <asm/swift.h> /* for cache flushing. */
24#include <asm/io.h>
25
26#include <linux/ctype.h>
27#include <linux/pci.h>
28#include <linux/time.h>
29#include <linux/timex.h>
30#include <linux/interrupt.h>
31
32#include <asm/irq.h>
33#include <asm/oplib.h>
34#include <asm/pcic.h>
35#include <asm/timer.h>
36#include <asm/uaccess.h>
37
38
39unsigned int pcic_pin_to_irq(unsigned int pin, char *name);
40
41/*
42 * I studied different documents and many live PROMs both from 2.30
43 * family and 3.xx versions. I came to the amazing conclusion: there is
44 * absolutely no way to route interrupts in IIep systems relying on
45 * information which PROM presents. We must hardcode interrupt routing
46 * schematics. And this actually sucks. -- zaitcev 1999/05/12
47 *
48 * To find irq for a device we determine which routing map
49 * is in effect or, in other words, on which machine we are running.
50 * We use PROM name for this although other techniques may be used
51 * in special cases (Gleb reports a PROMless IIep based system).
52 * Once we know the map we take device configuration address and
53 * find PCIC pin number where INT line goes. Then we may either program
54 * preferred irq into the PCIC or supply the preexisting irq to the device.
55 */
56struct pcic_ca2irq {
57 unsigned char busno; /* PCI bus number */
58 unsigned char devfn; /* Configuration address */
59 unsigned char pin; /* PCIC external interrupt pin */
60 unsigned char irq; /* Preferred IRQ (mappable in PCIC) */
61 unsigned int force; /* Enforce preferred IRQ */
62};
63
64struct pcic_sn2list {
65 char *sysname;
66 struct pcic_ca2irq *intmap;
67 int mapdim;
68};
69
70/*
71 * JavaEngine-1 apparently has different versions.
72 *
73 * According to communications with Sun folks, for P2 build 501-4628-03:
74 * pin 0 - parallel, audio;
75 * pin 1 - Ethernet;
76 * pin 2 - su;
77 * pin 3 - PS/2 kbd and mouse.
78 *
79 * OEM manual (805-1486):
80 * pin 0: Ethernet
81 * pin 1: All EBus
82 * pin 2: IGA (unused)
83 * pin 3: Not connected
84 * OEM manual says that 501-4628 & 501-4811 are the same thing,
85 * only the latter has NAND flash in place.
86 *
87 * So far unofficial Sun wins over the OEM manual. Poor OEMs...
88 */
89static struct pcic_ca2irq pcic_i_je1a[] = { /* 501-4811-03 */
90 { 0, 0x00, 2, 12, 0 }, /* EBus: hogs all */
91 { 0, 0x01, 1, 6, 1 }, /* Happy Meal */
92 { 0, 0x80, 0, 7, 0 }, /* IGA (unused) */
93};
94
95/* XXX JS-E entry is incomplete - PCI Slot 2 address (pin 7)? */
96static struct pcic_ca2irq pcic_i_jse[] = {
97 { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */
98 { 0, 0x01, 1, 6, 0 }, /* hme */
99 { 0, 0x08, 2, 9, 0 }, /* VGA - we hope not used :) */
100 { 0, 0x10, 6, 8, 0 }, /* PCI INTA# in Slot 1 */
101 { 0, 0x18, 7, 12, 0 }, /* PCI INTA# in Slot 2, shared w. RTC */
102 { 0, 0x38, 4, 9, 0 }, /* All ISA devices. Read 8259. */
103 { 0, 0x80, 5, 11, 0 }, /* EIDE */
104 /* {0,0x88, 0,0,0} - unknown device... PMU? Probably no interrupt. */
105 { 0, 0xA0, 4, 9, 0 }, /* USB */
106 /*
107 * Some pins belong to non-PCI devices, we hardcode them in drivers.
108 * sun4m timers - irq 10, 14
109 * PC style RTC - pin 7, irq 4 ?
110 * Smart card, Parallel - pin 4 shared with USB, ISA
111 * audio - pin 3, irq 5 ?
112 */
113};
114
115/* SPARCengine-6 was the original release name of CP1200.
116 * The documentation differs between the two versions
117 */
118static struct pcic_ca2irq pcic_i_se6[] = {
119 { 0, 0x08, 0, 2, 0 }, /* SCSI */
120 { 0, 0x01, 1, 6, 0 }, /* HME */
121 { 0, 0x00, 3, 13, 0 }, /* EBus */
122};
123
124/*
125 * Krups (courtesy of Varol Kaptan)
126 * No documentation available, but it was easy to guess
127 * because it was very similar to Espresso.
128 *
129 * pin 0 - kbd, mouse, serial;
130 * pin 1 - Ethernet;
131 * pin 2 - igs (we do not use it);
132 * pin 3 - audio;
133 * pin 4,5,6 - unused;
134 * pin 7 - RTC (from P2 onwards as David B. says).
135 */
136static struct pcic_ca2irq pcic_i_jk[] = {
137 { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */
138 { 0, 0x01, 1, 6, 0 }, /* hme */
139};
140
141/*
142 * Several entries in this list may point to the same routing map
143 * as several PROMs may be installed on the same physical board.
144 */
145#define SN2L_INIT(name, map) \
146 { name, map, sizeof(map)/sizeof(struct pcic_ca2irq) }
147
148static struct pcic_sn2list pcic_known_sysnames[] = {
149 SN2L_INIT("SUNW,JavaEngine1", pcic_i_je1a), /* JE1, PROM 2.32 */
150 SN2L_INIT("SUNW,JS-E", pcic_i_jse), /* PROLL JavaStation-E */
151 SN2L_INIT("SUNW,SPARCengine-6", pcic_i_se6), /* SPARCengine-6/CP-1200 */
152 SN2L_INIT("SUNW,JS-NC", pcic_i_jk), /* PROLL JavaStation-NC */
153 SN2L_INIT("SUNW,JSIIep", pcic_i_jk), /* OBP JavaStation-NC */
154 { NULL, NULL, 0 }
155};
156
157/*
158 * Only one PCIC per IIep,
159 * and since we have no SMP IIep, only one per system.
160 */
161static int pcic0_up;
162static struct linux_pcic pcic0;
163
164void * __iomem pcic_regs;
165volatile int pcic_speculative;
166volatile int pcic_trapped;
167
168static void pci_do_gettimeofday(struct timeval *tv);
169static int pci_do_settimeofday(struct timespec *tv);
170
171#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3))
172
173static int pcic_read_config_dword(unsigned int busno, unsigned int devfn,
174 int where, u32 *value)
175{
176 struct linux_pcic *pcic;
177 unsigned long flags;
178
179 pcic = &pcic0;
180
181 local_irq_save(flags);
182#if 0 /* does not fail here */
183 pcic_speculative = 1;
184 pcic_trapped = 0;
185#endif
186 writel(CONFIG_CMD(busno, devfn, where), pcic->pcic_config_space_addr);
187#if 0 /* does not fail here */
188 nop();
189 if (pcic_trapped) {
190 local_irq_restore(flags);
191 *value = ~0;
192 return 0;
193 }
194#endif
195 pcic_speculative = 2;
196 pcic_trapped = 0;
197 *value = readl(pcic->pcic_config_space_data + (where&4));
198 nop();
199 if (pcic_trapped) {
200 pcic_speculative = 0;
201 local_irq_restore(flags);
202 *value = ~0;
203 return 0;
204 }
205 pcic_speculative = 0;
206 local_irq_restore(flags);
207 return 0;
208}
209
210static int pcic_read_config(struct pci_bus *bus, unsigned int devfn,
211 int where, int size, u32 *val)
212{
213 unsigned int v;
214
215 if (bus->number != 0) return -EINVAL;
216 switch (size) {
217 case 1:
218 pcic_read_config_dword(bus->number, devfn, where&~3, &v);
219 *val = 0xff & (v >> (8*(where & 3)));
220 return 0;
221 case 2:
222 if (where&1) return -EINVAL;
223 pcic_read_config_dword(bus->number, devfn, where&~3, &v);
224 *val = 0xffff & (v >> (8*(where & 3)));
225 return 0;
226 case 4:
227 if (where&3) return -EINVAL;
228 pcic_read_config_dword(bus->number, devfn, where&~3, val);
229 return 0;
230 }
231 return -EINVAL;
232}
233
234static int pcic_write_config_dword(unsigned int busno, unsigned int devfn,
235 int where, u32 value)
236{
237 struct linux_pcic *pcic;
238 unsigned long flags;
239
240 pcic = &pcic0;
241
242 local_irq_save(flags);
243 writel(CONFIG_CMD(busno, devfn, where), pcic->pcic_config_space_addr);
244 writel(value, pcic->pcic_config_space_data + (where&4));
245 local_irq_restore(flags);
246 return 0;
247}
248
249static int pcic_write_config(struct pci_bus *bus, unsigned int devfn,
250 int where, int size, u32 val)
251{
252 unsigned int v;
253
254 if (bus->number != 0) return -EINVAL;
255 switch (size) {
256 case 1:
257 pcic_read_config_dword(bus->number, devfn, where&~3, &v);
258 v = (v & ~(0xff << (8*(where&3)))) |
259 ((0xff&val) << (8*(where&3)));
260 return pcic_write_config_dword(bus->number, devfn, where&~3, v);
261 case 2:
262 if (where&1) return -EINVAL;
263 pcic_read_config_dword(bus->number, devfn, where&~3, &v);
264 v = (v & ~(0xffff << (8*(where&3)))) |
265 ((0xffff&val) << (8*(where&3)));
266 return pcic_write_config_dword(bus->number, devfn, where&~3, v);
267 case 4:
268 if (where&3) return -EINVAL;
269 return pcic_write_config_dword(bus->number, devfn, where, val);
270 }
271 return -EINVAL;
272}
273
274static struct pci_ops pcic_ops = {
275 .read = pcic_read_config,
276 .write = pcic_write_config,
277};
278
279/*
280 * On sparc64 pcibios_init() calls pci_controller_probe().
281 * We want PCIC probed little ahead so that interrupt controller
282 * would be operational.
283 */
284int __init pcic_probe(void)
285{
286 struct linux_pcic *pcic;
287 struct linux_prom_registers regs[PROMREG_MAX];
288 struct linux_pbm_info* pbm;
289 char namebuf[64];
290 int node;
291 int err;
292
293 if (pcic0_up) {
294 prom_printf("PCIC: called twice!\n");
295 prom_halt();
296 }
297 pcic = &pcic0;
298
299 node = prom_getchild (prom_root_node);
300 node = prom_searchsiblings (node, "pci");
301 if (node == 0)
302 return -ENODEV;
303 /*
304 * Map in PCIC register set, config space, and IO base
305 */
306 err = prom_getproperty(node, "reg", (char*)regs, sizeof(regs));
307 if (err == 0 || err == -1) {
308 prom_printf("PCIC: Error, cannot get PCIC registers "
309 "from PROM.\n");
310 prom_halt();
311 }
312
313 pcic0_up = 1;
314
315 pcic->pcic_res_regs.name = "pcic_registers";
316 pcic->pcic_regs = ioremap(regs[0].phys_addr, regs[0].reg_size);
317 if (!pcic->pcic_regs) {
318 prom_printf("PCIC: Error, cannot map PCIC registers.\n");
319 prom_halt();
320 }
321
322 pcic->pcic_res_io.name = "pcic_io";
323 if ((pcic->pcic_io = (unsigned long)
324 ioremap(regs[1].phys_addr, 0x10000)) == 0) {
325 prom_printf("PCIC: Error, cannot map PCIC IO Base.\n");
326 prom_halt();
327 }
328
329 pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr";
330 if ((pcic->pcic_config_space_addr =
331 ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) {
332 prom_printf("PCIC: Error, cannot map"
333 "PCI Configuration Space Address.\n");
334 prom_halt();
335 }
336
337 /*
338 * Docs say three least significant bits in address and data
339 * must be the same. Thus, we need adjust size of data.
340 */
341 pcic->pcic_res_cfg_data.name = "pcic_cfg_data";
342 if ((pcic->pcic_config_space_data =
343 ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) {
344 prom_printf("PCIC: Error, cannot map"
345 "PCI Configuration Space Data.\n");
346 prom_halt();
347 }
348
349 pbm = &pcic->pbm;
350 pbm->prom_node = node;
351 prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0;
352 strcpy(pbm->prom_name, namebuf);
353
354 {
355 extern volatile int t_nmi[1];
356 extern int pcic_nmi_trap_patch[1];
357
358 t_nmi[0] = pcic_nmi_trap_patch[0];
359 t_nmi[1] = pcic_nmi_trap_patch[1];
360 t_nmi[2] = pcic_nmi_trap_patch[2];
361 t_nmi[3] = pcic_nmi_trap_patch[3];
362 swift_flush_dcache();
363 pcic_regs = pcic->pcic_regs;
364 }
365
366 prom_getstring(prom_root_node, "name", namebuf, 63); namebuf[63] = 0;
367 {
368 struct pcic_sn2list *p;
369
370 for (p = pcic_known_sysnames; p->sysname != NULL; p++) {
371 if (strcmp(namebuf, p->sysname) == 0)
372 break;
373 }
374 pcic->pcic_imap = p->intmap;
375 pcic->pcic_imdim = p->mapdim;
376 }
377 if (pcic->pcic_imap == NULL) {
378 /*
379 * We do not panic here for the sake of embedded systems.
380 */
381 printk("PCIC: System %s is unknown, cannot route interrupts\n",
382 namebuf);
383 }
384
385 return 0;
386}
387
388static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic)
389{
390 struct linux_pbm_info *pbm = &pcic->pbm;
391
392 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, &pcic_ops, pbm);
393#if 0 /* deadwood transplanted from sparc64 */
394 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
395 pci_record_assignments(pbm, pbm->pci_bus);
396 pci_assign_unassigned(pbm, pbm->pci_bus);
397 pci_fixup_irq(pbm, pbm->pci_bus);
398#endif
399}
400
401/*
402 * Main entry point from the PCI subsystem.
403 */
404static int __init pcic_init(void)
405{
406 struct linux_pcic *pcic;
407
408 /*
409 * PCIC should be initialized at start of the timer.
410 * So, here we report the presence of PCIC and do some magic passes.
411 */
412 if(!pcic0_up)
413 return 0;
414 pcic = &pcic0;
415
416 /*
417 * Switch off IOTLB translation.
418 */
419 writeb(PCI_DVMA_CONTROL_IOTLB_DISABLE,
420 pcic->pcic_regs+PCI_DVMA_CONTROL);
421
422 /*
423 * Increase mapped size for PCI memory space (DMA access).
424 * Should be done in that order (size first, address second).
425 * Why we couldn't set up 4GB and forget about it? XXX
426 */
427 writel(0xF0000000UL, pcic->pcic_regs+PCI_SIZE_0);
428 writel(0+PCI_BASE_ADDRESS_SPACE_MEMORY,
429 pcic->pcic_regs+PCI_BASE_ADDRESS_0);
430
431 pcic_pbm_scan_bus(pcic);
432
433 ebus_init();
434 return 0;
435}
436
437int pcic_present(void)
438{
439 return pcic0_up;
440}
441
442static int __init pdev_to_pnode(struct linux_pbm_info *pbm,
443 struct pci_dev *pdev)
444{
445 struct linux_prom_pci_registers regs[PROMREG_MAX];
446 int err;
447 int node = prom_getchild(pbm->prom_node);
448
449 while(node) {
450 err = prom_getproperty(node, "reg",
451 (char *)&regs[0], sizeof(regs));
452 if(err != 0 && err != -1) {
453 unsigned long devfn = (regs[0].which_io >> 8) & 0xff;
454 if(devfn == pdev->devfn)
455 return node;
456 }
457 node = prom_getsibling(node);
458 }
459 return 0;
460}
461
462static inline struct pcidev_cookie *pci_devcookie_alloc(void)
463{
464 return kmalloc(sizeof(struct pcidev_cookie), GFP_ATOMIC);
465}
466
467static void pcic_map_pci_device(struct linux_pcic *pcic,
468 struct pci_dev *dev, int node)
469{
470 char namebuf[64];
471 unsigned long address;
472 unsigned long flags;
473 int j;
474
475 if (node == 0 || node == -1) {
476 strcpy(namebuf, "???");
477 } else {
478 prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0;
479 }
480
481 for (j = 0; j < 6; j++) {
482 address = dev->resource[j].start;
483 if (address == 0) break; /* are sequential */
484 flags = dev->resource[j].flags;
485 if ((flags & IORESOURCE_IO) != 0) {
486 if (address < 0x10000) {
487 /*
488 * A device responds to I/O cycles on PCI.
489 * We generate these cycles with memory
490 * access into the fixed map (phys 0x30000000).
491 *
492 * Since a device driver does not want to
493 * do ioremap() before accessing PC-style I/O,
494 * we supply virtual, ready to access address.
495 *
496 * Ebus devices do not come here even if
497 * CheerIO makes a similar conversion.
498 * See ebus.c for details.
499 *
500 * Note that check_region()/request_region()
501 * work for these devices.
502 *
503 * XXX Neat trick, but it's a *bad* idea
504 * to shit into regions like that.
505 * What if we want to allocate one more
506 * PCI base address...
507 */
508 dev->resource[j].start =
509 pcic->pcic_io + address;
510 dev->resource[j].end = 1; /* XXX */
511 dev->resource[j].flags =
512 (flags & ~IORESOURCE_IO) | IORESOURCE_MEM;
513 } else {
514 /*
515 * OOPS... PCI Spec allows this. Sun does
516 * not have any devices getting above 64K
517 * so it must be user with a weird I/O
518 * board in a PCI slot. We must remap it
519 * under 64K but it is not done yet. XXX
520 */
521 printk("PCIC: Skipping I/O space at 0x%lx,"
522 "this will Oops if a driver attaches;"
523 "device '%s' at %02x:%02x)\n", address,
524 namebuf, dev->bus->number, dev->devfn);
525 }
526 }
527 }
528}
529
530static void
531pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
532{
533 struct pcic_ca2irq *p;
534 int i, ivec;
535 char namebuf[64];
536
537 if (node == 0 || node == -1) {
538 strcpy(namebuf, "???");
539 } else {
540 prom_getstring(node, "name", namebuf, sizeof(namebuf));
541 }
542
543 if ((p = pcic->pcic_imap) == 0) {
544 dev->irq = 0;
545 return;
546 }
547 for (i = 0; i < pcic->pcic_imdim; i++) {
548 if (p->busno == dev->bus->number && p->devfn == dev->devfn)
549 break;
550 p++;
551 }
552 if (i >= pcic->pcic_imdim) {
553 printk("PCIC: device %s devfn %02x:%02x not found in %d\n",
554 namebuf, dev->bus->number, dev->devfn, pcic->pcic_imdim);
555 dev->irq = 0;
556 return;
557 }
558
559 i = p->pin;
560 if (i >= 0 && i < 4) {
561 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
562 dev->irq = ivec >> (i << 2) & 0xF;
563 } else if (i >= 4 && i < 8) {
564 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
565 dev->irq = ivec >> ((i-4) << 2) & 0xF;
566 } else { /* Corrupted map */
567 printk("PCIC: BAD PIN %d\n", i); for (;;) {}
568 }
569/* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */
570
571 /*
572 * dev->irq=0 means PROM did not bother to program the upper
573 * half of PCIC. This happens on JS-E with PROM 3.11, for instance.
574 */
575 if (dev->irq == 0 || p->force) {
576 if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */
577 printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
578 }
579 printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n",
580 p->irq, p->pin, dev->bus->number, dev->devfn);
581 dev->irq = p->irq;
582
583 i = p->pin;
584 if (i >= 4) {
585 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
586 ivec &= ~(0xF << ((i - 4) << 2));
587 ivec |= p->irq << ((i - 4) << 2);
588 writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_HI);
589 } else {
590 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
591 ivec &= ~(0xF << (i << 2));
592 ivec |= p->irq << (i << 2);
593 writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_LO);
594 }
595 }
596
597 return;
598}
599
600/*
601 * Normally called from {do_}pci_scan_bus...
602 */
603void __init pcibios_fixup_bus(struct pci_bus *bus)
604{
605 struct pci_dev *dev;
606 int i, has_io, has_mem;
607 unsigned int cmd;
608 struct linux_pcic *pcic;
609 /* struct linux_pbm_info* pbm = &pcic->pbm; */
610 int node;
611 struct pcidev_cookie *pcp;
612
613 if (!pcic0_up) {
614 printk("pcibios_fixup_bus: no PCIC\n");
615 return;
616 }
617 pcic = &pcic0;
618
619 /*
620 * Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus);
621 */
622 if (bus->number != 0) {
623 printk("pcibios_fixup_bus: nonzero bus 0x%x\n", bus->number);
624 return;
625 }
626
627 list_for_each_entry(dev, &bus->devices, bus_list) {
628
629 /*
630 * Comment from i386 branch:
631 * There are buggy BIOSes that forget to enable I/O and memory
632 * access to PCI devices. We try to fix this, but we need to
633 * be sure that the BIOS didn't forget to assign an address
634 * to the device. [mj]
635 * OBP is a case of such BIOS :-)
636 */
637 has_io = has_mem = 0;
638 for(i=0; i<6; i++) {
639 unsigned long f = dev->resource[i].flags;
640 if (f & IORESOURCE_IO) {
641 has_io = 1;
642 } else if (f & IORESOURCE_MEM)
643 has_mem = 1;
644 }
645 pcic_read_config(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
646 if (has_io && !(cmd & PCI_COMMAND_IO)) {
647 printk("PCIC: Enabling I/O for device %02x:%02x\n",
648 dev->bus->number, dev->devfn);
649 cmd |= PCI_COMMAND_IO;
650 pcic_write_config(dev->bus, dev->devfn,
651 PCI_COMMAND, 2, cmd);
652 }
653 if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
654 printk("PCIC: Enabling memory for device %02x:%02x\n",
655 dev->bus->number, dev->devfn);
656 cmd |= PCI_COMMAND_MEMORY;
657 pcic_write_config(dev->bus, dev->devfn,
658 PCI_COMMAND, 2, cmd);
659 }
660
661 node = pdev_to_pnode(&pcic->pbm, dev);
662 if(node == 0)
663 node = -1;
664
665 /* cookies */
666 pcp = pci_devcookie_alloc();
667 pcp->pbm = &pcic->pbm;
668 pcp->prom_node = node;
669 dev->sysdata = pcp;
670
671 /* fixing I/O to look like memory */
672 if ((dev->class>>16) != PCI_BASE_CLASS_BRIDGE)
673 pcic_map_pci_device(pcic, dev, node);
674
675 pcic_fill_irq(pcic, dev, node);
676 }
677}
678
679/*
680 * pcic_pin_to_irq() is exported to ebus.c.
681 */
682unsigned int
683pcic_pin_to_irq(unsigned int pin, char *name)
684{
685 struct linux_pcic *pcic = &pcic0;
686 unsigned int irq;
687 unsigned int ivec;
688
689 if (pin < 4) {
690 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
691 irq = ivec >> (pin << 2) & 0xF;
692 } else if (pin < 8) {
693 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
694 irq = ivec >> ((pin-4) << 2) & 0xF;
695 } else { /* Corrupted map */
696 printk("PCIC: BAD PIN %d FOR %s\n", pin, name);
697 for (;;) {} /* XXX Cannot panic properly in case of PROLL */
698 }
699/* P3 */ /* printk("PCIC: dev %s pin %d ivec 0x%x irq %x\n", name, pin, ivec, irq); */
700 return irq;
701}
702
703/* Makes compiler happy */
704static volatile int pcic_timer_dummy;
705
706static void pcic_clear_clock_irq(void)
707{
708 pcic_timer_dummy = readl(pcic0.pcic_regs+PCI_SYS_LIMIT);
709}
710
711static irqreturn_t pcic_timer_handler (int irq, void *h, struct pt_regs *regs)
712{
713 write_seqlock(&xtime_lock); /* Dummy, to show that we remember */
714 pcic_clear_clock_irq();
715 do_timer(regs);
716#ifndef CONFIG_SMP
717 update_process_times(user_mode(regs));
718#endif
719 write_sequnlock(&xtime_lock);
720 return IRQ_HANDLED;
721}
722
723#define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */
724#define TICK_TIMER_LIMIT ((100*1000000/4)/100)
725
726void __init pci_time_init(void)
727{
728 struct linux_pcic *pcic = &pcic0;
729 unsigned long v;
730 int timer_irq, irq;
731
732 /* A hack until do_gettimeofday prototype is moved to arch specific headers
733 and btfixupped. Patch do_gettimeofday with ba pci_do_gettimeofday; nop */
734 ((unsigned int *)do_gettimeofday)[0] =
735 0x10800000 | ((((unsigned long)pci_do_gettimeofday -
736 (unsigned long)do_gettimeofday) >> 2) & 0x003fffff);
737 ((unsigned int *)do_gettimeofday)[1] = 0x01000000;
738 BTFIXUPSET_CALL(bus_do_settimeofday, pci_do_settimeofday, BTFIXUPCALL_NORM);
739 btfixup();
740
741 writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT);
742 /* PROM should set appropriate irq */
743 v = readb(pcic->pcic_regs+PCI_COUNTER_IRQ);
744 timer_irq = PCI_COUNTER_IRQ_SYS(v);
745 writel (PCI_COUNTER_IRQ_SET(timer_irq, 0),
746 pcic->pcic_regs+PCI_COUNTER_IRQ);
747 irq = request_irq(timer_irq, pcic_timer_handler,
748 (SA_INTERRUPT | SA_STATIC_ALLOC), "timer", NULL);
749 if (irq) {
750 prom_printf("time_init: unable to attach IRQ%d\n", timer_irq);
751 prom_halt();
752 }
753 local_irq_enable();
754}
755
756static __inline__ unsigned long do_gettimeoffset(void)
757{
758 /*
759 * We devide all to 100
760 * to have microsecond resolution and to avoid overflow
761 */
762 unsigned long count =
763 readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW;
764 count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100);
765 return count;
766}
767
768extern unsigned long wall_jiffies;
769
770static void pci_do_gettimeofday(struct timeval *tv)
771{
772 unsigned long flags;
773 unsigned long seq;
774 unsigned long usec, sec;
775 unsigned long max_ntp_tick = tick_usec - tickadj;
776
777 do {
778 unsigned long lost;
779
780 seq = read_seqbegin_irqsave(&xtime_lock, flags);
781 usec = do_gettimeoffset();
782 lost = jiffies - wall_jiffies;
783
784 /*
785 * If time_adjust is negative then NTP is slowing the clock
786 * so make sure not to go into next possible interval.
787 * Better to lose some accuracy than have time go backwards..
788 */
789 if (unlikely(time_adjust < 0)) {
790 usec = min(usec, max_ntp_tick);
791
792 if (lost)
793 usec += lost * max_ntp_tick;
794 }
795 else if (unlikely(lost))
796 usec += lost * tick_usec;
797
798 sec = xtime.tv_sec;
799 usec += (xtime.tv_nsec / 1000);
800 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
801
802 while (usec >= 1000000) {
803 usec -= 1000000;
804 sec++;
805 }
806
807 tv->tv_sec = sec;
808 tv->tv_usec = usec;
809}
810
811static int pci_do_settimeofday(struct timespec *tv)
812{
813 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
814 return -EINVAL;
815
816 /*
817 * This is revolting. We need to set "xtime" correctly. However, the
818 * value in this location is the value at the most recent update of
819 * wall time. Discover what correction gettimeofday() would have
820 * made, and then undo it!
821 */
822 tv->tv_nsec -= 1000 * (do_gettimeoffset() +
823 (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ));
824 while (tv->tv_nsec < 0) {
825 tv->tv_nsec += NSEC_PER_SEC;
826 tv->tv_sec--;
827 }
828
829 wall_to_monotonic.tv_sec += xtime.tv_sec - tv->tv_sec;
830 wall_to_monotonic.tv_nsec += xtime.tv_nsec - tv->tv_nsec;
831
832 if (wall_to_monotonic.tv_nsec > NSEC_PER_SEC) {
833 wall_to_monotonic.tv_nsec -= NSEC_PER_SEC;
834 wall_to_monotonic.tv_sec++;
835 }
836 if (wall_to_monotonic.tv_nsec < 0) {
837 wall_to_monotonic.tv_nsec += NSEC_PER_SEC;
838 wall_to_monotonic.tv_sec--;
839 }
840
841 xtime.tv_sec = tv->tv_sec;
842 xtime.tv_nsec = tv->tv_nsec;
843 time_adjust = 0; /* stop active adjtime() */
844 time_status |= STA_UNSYNC;
845 time_maxerror = NTP_PHASE_LIMIT;
846 time_esterror = NTP_PHASE_LIMIT;
847 return 0;
848}
849
850#if 0
851static void watchdog_reset() {
852 writeb(0, pcic->pcic_regs+PCI_SYS_STATUS);
853}
854#endif
855
856/*
857 * Other archs parse arguments here.
858 */
859char * __init pcibios_setup(char *str)
860{
861 return str;
862}
863
864void pcibios_align_resource(void *data, struct resource *res,
865 unsigned long size, unsigned long align)
866{
867}
868
869int pcibios_enable_device(struct pci_dev *pdev, int mask)
870{
871 return 0;
872}
873
874/*
875 * NMI
876 */
877void pcic_nmi(unsigned int pend, struct pt_regs *regs)
878{
879
880 pend = flip_dword(pend);
881
882 if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) {
883 /*
884 * XXX On CP-1200 PCI #SERR may happen, we do not know
885 * what to do about it yet.
886 */
887 printk("Aiee, NMI pend 0x%x pc 0x%x spec %d, hanging\n",
888 pend, (int)regs->pc, pcic_speculative);
889 for (;;) { }
890 }
891 pcic_speculative = 0;
892 pcic_trapped = 1;
893 regs->pc = regs->npc;
894 regs->npc += 4;
895}
896
897static inline unsigned long get_irqmask(int irq_nr)
898{
899 return 1 << irq_nr;
900}
901
902static inline char *pcic_irq_itoa(unsigned int irq)
903{
904 static char buff[16];
905 sprintf(buff, "%d", irq);
906 return buff;
907}
908
909static void pcic_disable_irq(unsigned int irq_nr)
910{
911 unsigned long mask, flags;
912
913 mask = get_irqmask(irq_nr);
914 local_irq_save(flags);
915 writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET);
916 local_irq_restore(flags);
917}
918
919static void pcic_enable_irq(unsigned int irq_nr)
920{
921 unsigned long mask, flags;
922
923 mask = get_irqmask(irq_nr);
924 local_irq_save(flags);
925 writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR);
926 local_irq_restore(flags);
927}
928
929static void pcic_clear_profile_irq(int cpu)
930{
931 printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__);
932}
933
934static void pcic_load_profile_irq(int cpu, unsigned int limit)
935{
936 printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__);
937}
938
939/* We assume the caller has disabled local interrupts when these are called,
940 * or else very bizarre behavior will result.
941 */
942static void pcic_disable_pil_irq(unsigned int pil)
943{
944 writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET);
945}
946
947static void pcic_enable_pil_irq(unsigned int pil)
948{
949 writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR);
950}
951
952void __init sun4m_pci_init_IRQ(void)
953{
954 BTFIXUPSET_CALL(enable_irq, pcic_enable_irq, BTFIXUPCALL_NORM);
955 BTFIXUPSET_CALL(disable_irq, pcic_disable_irq, BTFIXUPCALL_NORM);
956 BTFIXUPSET_CALL(enable_pil_irq, pcic_enable_pil_irq, BTFIXUPCALL_NORM);
957 BTFIXUPSET_CALL(disable_pil_irq, pcic_disable_pil_irq, BTFIXUPCALL_NORM);
958 BTFIXUPSET_CALL(clear_clock_irq, pcic_clear_clock_irq, BTFIXUPCALL_NORM);
959 BTFIXUPSET_CALL(clear_profile_irq, pcic_clear_profile_irq, BTFIXUPCALL_NORM);
960 BTFIXUPSET_CALL(load_profile_irq, pcic_load_profile_irq, BTFIXUPCALL_NORM);
961 BTFIXUPSET_CALL(__irq_itoa, pcic_irq_itoa, BTFIXUPCALL_NORM);
962}
963
964int pcibios_assign_resource(struct pci_dev *pdev, int resource)
965{
966 return -ENXIO;
967}
968
969/*
970 * This probably belongs here rather than ioport.c because
971 * we do not want this crud linked into SBus kernels.
972 * Also, think for a moment about likes of floppy.c that
973 * include architecture specific parts. They may want to redefine ins/outs.
974 *
975 * We do not use horroble macroses here because we want to
976 * advance pointer by sizeof(size).
977 */
978void outsb(unsigned long addr, const void *src, unsigned long count)
979{
980 while (count) {
981 count -= 1;
982 outb(*(const char *)src, addr);
983 src += 1;
984 /* addr += 1; */
985 }
986}
987
988void outsw(unsigned long addr, const void *src, unsigned long count)
989{
990 while (count) {
991 count -= 2;
992 outw(*(const short *)src, addr);
993 src += 2;
994 /* addr += 2; */
995 }
996}
997
998void outsl(unsigned long addr, const void *src, unsigned long count)
999{
1000 while (count) {
1001 count -= 4;
1002 outl(*(const long *)src, addr);
1003 src += 4;
1004 /* addr += 4; */
1005 }
1006}
1007
1008void insb(unsigned long addr, void *dst, unsigned long count)
1009{
1010 while (count) {
1011 count -= 1;
1012 *(unsigned char *)dst = inb(addr);
1013 dst += 1;
1014 /* addr += 1; */
1015 }
1016}
1017
1018void insw(unsigned long addr, void *dst, unsigned long count)
1019{
1020 while (count) {
1021 count -= 2;
1022 *(unsigned short *)dst = inw(addr);
1023 dst += 2;
1024 /* addr += 2; */
1025 }
1026}
1027
1028void insl(unsigned long addr, void *dst, unsigned long count)
1029{
1030 while (count) {
1031 count -= 4;
1032 /*
1033 * XXX I am sure we are in for an unaligned trap here.
1034 */
1035 *(unsigned long *)dst = inl(addr);
1036 dst += 4;
1037 /* addr += 4; */
1038 }
1039}
1040
1041subsys_initcall(pcic_init);
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
new file mode 100644
index 000000000000..7eca8871ff47
--- /dev/null
+++ b/arch/sparc/kernel/pmc.c
@@ -0,0 +1,99 @@
1/* pmc - Driver implementation for power management functions
2 * of Power Management Controller (PMC) on SPARCstation-Voyager.
3 *
4 * Copyright (c) 2002 Eric Brower (ebrower@usa.net)
5 */
6
7#include <linux/kernel.h>
8#include <linux/fs.h>
9#include <linux/errno.h>
10#include <linux/init.h>
11#include <linux/miscdevice.h>
12#include <linux/pm.h>
13
14#include <asm/io.h>
15#include <asm/sbus.h>
16#include <asm/oplib.h>
17#include <asm/uaccess.h>
18#include <asm/auxio.h>
19
20/* Debug
21 *
22 * #define PMC_DEBUG_LED
23 * #define PMC_NO_IDLE
24 */
25
26#define PMC_MINOR MISC_DYNAMIC_MINOR
27#define PMC_OBPNAME "SUNW,pmc"
28#define PMC_DEVNAME "pmc"
29
30#define PMC_IDLE_REG 0x00
31#define PMC_IDLE_ON 0x01
32
33volatile static u8 __iomem *regs;
34static int pmc_regsize;
35
36#define pmc_readb(offs) (sbus_readb(regs+offs))
37#define pmc_writeb(val, offs) (sbus_writeb(val, regs+offs))
38
39/*
40 * CPU idle callback function
41 * See .../arch/sparc/kernel/process.c
42 */
43void pmc_swift_idle(void)
44{
45#ifdef PMC_DEBUG_LED
46 set_auxio(0x00, AUXIO_LED);
47#endif
48
49 pmc_writeb(pmc_readb(PMC_IDLE_REG) | PMC_IDLE_ON, PMC_IDLE_REG);
50
51#ifdef PMC_DEBUG_LED
52 set_auxio(AUXIO_LED, 0x00);
53#endif
54}
55
56static inline void pmc_free(void)
57{
58 sbus_iounmap(regs, pmc_regsize);
59}
60
61static int __init pmc_probe(void)
62{
63 struct sbus_bus *sbus = NULL;
64 struct sbus_dev *sdev = NULL;
65 for_each_sbus(sbus) {
66 for_each_sbusdev(sdev, sbus) {
67 if (!strcmp(sdev->prom_name, PMC_OBPNAME)) {
68 goto sbus_done;
69 }
70 }
71 }
72
73sbus_done:
74 if (!sdev) {
75 return -ENODEV;
76 }
77
78 pmc_regsize = sdev->reg_addrs[0].reg_size;
79 regs = sbus_ioremap(&sdev->resource[0], 0,
80 pmc_regsize, PMC_OBPNAME);
81 if (!regs) {
82 printk(KERN_ERR "%s: unable to map registers\n", PMC_DEVNAME);
83 return -ENODEV;
84 }
85
86#ifndef PMC_NO_IDLE
87 /* Assign power management IDLE handler */
88 pm_idle = pmc_swift_idle;
89#endif
90
91 printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME);
92 return 0;
93}
94
95/* This driver is not critical to the boot process
96 * and is easiest to ioremap when SBus is already
97 * initialized, so we install ourselves thusly:
98 */
99__initcall(pmc_probe);
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c
new file mode 100644
index 000000000000..143fe2f3c1c4
--- /dev/null
+++ b/arch/sparc/kernel/process.c
@@ -0,0 +1,746 @@
1/* $Id: process.c,v 1.161 2002/01/23 11:27:32 davem Exp $
2 * linux/arch/sparc/kernel/process.c
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 */
7
8/*
9 * This file handles the architecture-dependent parts of process handling..
10 */
11
12#include <stdarg.h>
13
14#include <linux/errno.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/kallsyms.h>
19#include <linux/mm.h>
20#include <linux/stddef.h>
21#include <linux/ptrace.h>
22#include <linux/slab.h>
23#include <linux/user.h>
24#include <linux/a.out.h>
25#include <linux/config.h>
26#include <linux/smp.h>
27#include <linux/smp_lock.h>
28#include <linux/reboot.h>
29#include <linux/delay.h>
30#include <linux/pm.h>
31#include <linux/init.h>
32
33#include <asm/auxio.h>
34#include <asm/oplib.h>
35#include <asm/uaccess.h>
36#include <asm/system.h>
37#include <asm/page.h>
38#include <asm/pgalloc.h>
39#include <asm/pgtable.h>
40#include <asm/delay.h>
41#include <asm/processor.h>
42#include <asm/psr.h>
43#include <asm/elf.h>
44#include <asm/unistd.h>
45
46/*
47 * Power management idle function
48 * Set in pm platform drivers (apc.c and pmc.c)
49 */
50void (*pm_idle)(void);
51
52/*
53 * Power-off handler instantiation for pm.h compliance
54 * This is done via auxio, but could be used as a fallback
55 * handler when auxio is not present-- unused for now...
56 */
57void (*pm_power_off)(void);
58
59/*
60 * sysctl - toggle power-off restriction for serial console
61 * systems in machine_power_off()
62 */
63int scons_pwroff = 1;
64
65extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
66
67struct task_struct *last_task_used_math = NULL;
68struct thread_info *current_set[NR_CPUS];
69
70/*
71 * default_idle is new in 2.5. XXX Review, currently stolen from sparc64.
72 */
73void default_idle(void)
74{
75}
76
77#ifndef CONFIG_SMP
78
79#define SUN4C_FAULT_HIGH 100
80
81/*
82 * the idle loop on a Sparc... ;)
83 */
84void cpu_idle(void)
85{
86 if (current->pid != 0)
87 goto out;
88
89 /* endless idle loop with no priority at all */
90 for (;;) {
91 if (ARCH_SUN4C_SUN4) {
92 static int count = HZ;
93 static unsigned long last_jiffies;
94 static unsigned long last_faults;
95 static unsigned long fps;
96 unsigned long now;
97 unsigned long faults;
98 unsigned long flags;
99
100 extern unsigned long sun4c_kernel_faults;
101 extern void sun4c_grow_kernel_ring(void);
102
103 local_irq_save(flags);
104 now = jiffies;
105 count -= (now - last_jiffies);
106 last_jiffies = now;
107 if (count < 0) {
108 count += HZ;
109 faults = sun4c_kernel_faults;
110 fps = (fps + (faults - last_faults)) >> 1;
111 last_faults = faults;
112#if 0
113 printk("kernel faults / second = %ld\n", fps);
114#endif
115 if (fps >= SUN4C_FAULT_HIGH) {
116 sun4c_grow_kernel_ring();
117 }
118 }
119 local_irq_restore(flags);
120 }
121
122 while((!need_resched()) && pm_idle) {
123 (*pm_idle)();
124 }
125
126 schedule();
127 check_pgt_cache();
128 }
129out:
130 return;
131}
132
133#else
134
135/* This is being executed in task 0 'user space'. */
136void cpu_idle(void)
137{
138 /* endless idle loop with no priority at all */
139 while(1) {
140 if(need_resched()) {
141 schedule();
142 check_pgt_cache();
143 }
144 barrier(); /* or else gcc optimizes... */
145 }
146}
147
148#endif
149
150extern char reboot_command [];
151
152extern void (*prom_palette)(int);
153
154/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
155void machine_halt(void)
156{
157 local_irq_enable();
158 mdelay(8);
159 local_irq_disable();
160 if (!serial_console && prom_palette)
161 prom_palette (1);
162 prom_halt();
163 panic("Halt failed!");
164}
165
166EXPORT_SYMBOL(machine_halt);
167
168void machine_restart(char * cmd)
169{
170 char *p;
171
172 local_irq_enable();
173 mdelay(8);
174 local_irq_disable();
175
176 p = strchr (reboot_command, '\n');
177 if (p) *p = 0;
178 if (!serial_console && prom_palette)
179 prom_palette (1);
180 if (cmd)
181 prom_reboot(cmd);
182 if (*reboot_command)
183 prom_reboot(reboot_command);
184 prom_feval ("reset");
185 panic("Reboot failed!");
186}
187
188EXPORT_SYMBOL(machine_restart);
189
190void machine_power_off(void)
191{
192#ifdef CONFIG_SUN_AUXIO
193 if (auxio_power_register && (!serial_console || scons_pwroff))
194 *auxio_power_register |= AUXIO_POWER_OFF;
195#endif
196 machine_halt();
197}
198
199EXPORT_SYMBOL(machine_power_off);
200
201static DEFINE_SPINLOCK(sparc_backtrace_lock);
202
203void __show_backtrace(unsigned long fp)
204{
205 struct reg_window *rw;
206 unsigned long flags;
207 int cpu = smp_processor_id();
208
209 spin_lock_irqsave(&sparc_backtrace_lock, flags);
210
211 rw = (struct reg_window *)fp;
212 while(rw && (((unsigned long) rw) >= PAGE_OFFSET) &&
213 !(((unsigned long) rw) & 0x7)) {
214 printk("CPU[%d]: ARGS[%08lx,%08lx,%08lx,%08lx,%08lx,%08lx] "
215 "FP[%08lx] CALLER[%08lx]: ", cpu,
216 rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
217 rw->ins[4], rw->ins[5],
218 rw->ins[6],
219 rw->ins[7]);
220 print_symbol("%s\n", rw->ins[7]);
221 rw = (struct reg_window *) rw->ins[6];
222 }
223 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
224}
225
226#define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
227#define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
228#define __GET_FP(fp) __asm__ __volatile__("mov %%i6, %0" : "=r" (fp))
229
230void show_backtrace(void)
231{
232 unsigned long fp;
233
234 __SAVE; __SAVE; __SAVE; __SAVE;
235 __SAVE; __SAVE; __SAVE; __SAVE;
236 __RESTORE; __RESTORE; __RESTORE; __RESTORE;
237 __RESTORE; __RESTORE; __RESTORE; __RESTORE;
238
239 __GET_FP(fp);
240
241 __show_backtrace(fp);
242}
243
244#ifdef CONFIG_SMP
245void smp_show_backtrace_all_cpus(void)
246{
247 xc0((smpfunc_t) show_backtrace);
248 show_backtrace();
249}
250#endif
251
252#if 0
253void show_stackframe(struct sparc_stackf *sf)
254{
255 unsigned long size;
256 unsigned long *stk;
257 int i;
258
259 printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx "
260 "l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n",
261 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
262 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
263 printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx "
264 "i4: %08lx i5: %08lx fp: %08lx i7: %08lx\n",
265 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
266 sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
267 printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx "
268 "x3: %08lx x4: %08lx x5: %08lx xx: %08lx\n",
269 (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
270 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
271 sf->xxargs[0]);
272 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
273 size -= STACKFRAME_SZ;
274 stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
275 i = 0;
276 do {
277 printk("s%d: %08lx\n", i++, *stk++);
278 } while ((size -= sizeof(unsigned long)));
279}
280#endif
281
282void show_regs(struct pt_regs *r)
283{
284 struct reg_window *rw = (struct reg_window *) r->u_regs[14];
285
286 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
287 r->psr, r->pc, r->npc, r->y, print_tainted());
288 print_symbol("PC: <%s>\n", r->pc);
289 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
290 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
291 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
292 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
293 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
294 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
295 print_symbol("RPC: <%s>\n", r->u_regs[15]);
296
297 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
298 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
299 rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
300 printk("%%I: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
301 rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
302 rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
303}
304
305/*
306 * The show_stack is an external API which we do not use ourselves.
307 * The oops is printed in die_if_kernel.
308 */
309void show_stack(struct task_struct *tsk, unsigned long *_ksp)
310{
311 unsigned long pc, fp;
312 unsigned long task_base;
313 struct reg_window *rw;
314 int count = 0;
315
316 if (tsk != NULL)
317 task_base = (unsigned long) tsk->thread_info;
318 else
319 task_base = (unsigned long) current_thread_info();
320
321 fp = (unsigned long) _ksp;
322 do {
323 /* Bogus frame pointer? */
324 if (fp < (task_base + sizeof(struct thread_info)) ||
325 fp >= (task_base + (PAGE_SIZE << 1)))
326 break;
327 rw = (struct reg_window *) fp;
328 pc = rw->ins[7];
329 printk("[%08lx : ", pc);
330 print_symbol("%s ] ", pc);
331 fp = rw->ins[6];
332 } while (++count < 16);
333 printk("\n");
334}
335
336/*
337 * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
338 */
339unsigned long thread_saved_pc(struct task_struct *tsk)
340{
341 return tsk->thread_info->kpc;
342}
343
344/*
345 * Free current thread data structures etc..
346 */
347void exit_thread(void)
348{
349#ifndef CONFIG_SMP
350 if(last_task_used_math == current) {
351#else
352 if(current_thread_info()->flags & _TIF_USEDFPU) {
353#endif
354 /* Keep process from leaving FPU in a bogon state. */
355 put_psr(get_psr() | PSR_EF);
356 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
357 &current->thread.fpqueue[0], &current->thread.fpqdepth);
358#ifndef CONFIG_SMP
359 last_task_used_math = NULL;
360#else
361 current_thread_info()->flags &= ~_TIF_USEDFPU;
362#endif
363 }
364}
365
366void flush_thread(void)
367{
368 current_thread_info()->w_saved = 0;
369
370 /* No new signal delivery by default */
371 current->thread.new_signal = 0;
372#ifndef CONFIG_SMP
373 if(last_task_used_math == current) {
374#else
375 if(current_thread_info()->flags & _TIF_USEDFPU) {
376#endif
377 /* Clean the fpu. */
378 put_psr(get_psr() | PSR_EF);
379 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
380 &current->thread.fpqueue[0], &current->thread.fpqdepth);
381#ifndef CONFIG_SMP
382 last_task_used_math = NULL;
383#else
384 current_thread_info()->flags &= ~_TIF_USEDFPU;
385#endif
386 }
387
388 /* Now, this task is no longer a kernel thread. */
389 current->thread.current_ds = USER_DS;
390 if (current->thread.flags & SPARC_FLAG_KTHREAD) {
391 current->thread.flags &= ~SPARC_FLAG_KTHREAD;
392
393 /* We must fixup kregs as well. */
394 /* XXX This was not fixed for ti for a while, worked. Unused? */
395 current->thread.kregs = (struct pt_regs *)
396 ((char *)current->thread_info + (THREAD_SIZE - TRACEREG_SZ));
397 }
398}
399
400static __inline__ struct sparc_stackf __user *
401clone_stackframe(struct sparc_stackf __user *dst,
402 struct sparc_stackf __user *src)
403{
404 unsigned long size, fp;
405 struct sparc_stackf *tmp;
406 struct sparc_stackf __user *sp;
407
408 if (get_user(tmp, &src->fp))
409 return NULL;
410
411 fp = (unsigned long) tmp;
412 size = (fp - ((unsigned long) src));
413 fp = (unsigned long) dst;
414 sp = (struct sparc_stackf __user *)(fp - size);
415
416 /* do_fork() grabs the parent semaphore, we must release it
417 * temporarily so we can build the child clone stack frame
418 * without deadlocking.
419 */
420 if (__copy_user(sp, src, size))
421 sp = NULL;
422 else if (put_user(fp, &sp->fp))
423 sp = NULL;
424
425 return sp;
426}
427
428asmlinkage int sparc_do_fork(unsigned long clone_flags,
429 unsigned long stack_start,
430 struct pt_regs *regs,
431 unsigned long stack_size)
432{
433 unsigned long parent_tid_ptr, child_tid_ptr;
434
435 parent_tid_ptr = regs->u_regs[UREG_I2];
436 child_tid_ptr = regs->u_regs[UREG_I4];
437
438 return do_fork(clone_flags, stack_start,
439 regs, stack_size,
440 (int __user *) parent_tid_ptr,
441 (int __user *) child_tid_ptr);
442}
443
444/* Copy a Sparc thread. The fork() return value conventions
445 * under SunOS are nothing short of bletcherous:
446 * Parent --> %o0 == childs pid, %o1 == 0
447 * Child --> %o0 == parents pid, %o1 == 1
448 *
449 * NOTE: We have a separate fork kpsr/kwim because
450 * the parent could change these values between
451 * sys_fork invocation and when we reach here
452 * if the parent should sleep while trying to
453 * allocate the task_struct and kernel stack in
454 * do_fork().
455 * XXX See comment above sys_vfork in sparc64. todo.
456 */
457extern void ret_from_fork(void);
458
459int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
460 unsigned long unused,
461 struct task_struct *p, struct pt_regs *regs)
462{
463 struct thread_info *ti = p->thread_info;
464 struct pt_regs *childregs;
465 char *new_stack;
466
467#ifndef CONFIG_SMP
468 if(last_task_used_math == current) {
469#else
470 if(current_thread_info()->flags & _TIF_USEDFPU) {
471#endif
472 put_psr(get_psr() | PSR_EF);
473 fpsave(&p->thread.float_regs[0], &p->thread.fsr,
474 &p->thread.fpqueue[0], &p->thread.fpqdepth);
475#ifdef CONFIG_SMP
476 current_thread_info()->flags &= ~_TIF_USEDFPU;
477#endif
478 }
479
480 /*
481 * p->thread_info new_stack childregs
482 * ! ! ! {if(PSR_PS) }
483 * V V (stk.fr.) V (pt_regs) { (stk.fr.) }
484 * +----- - - - - - ------+===========+============={+==========}+
485 */
486 new_stack = (char*)ti + THREAD_SIZE;
487 if (regs->psr & PSR_PS)
488 new_stack -= STACKFRAME_SZ;
489 new_stack -= STACKFRAME_SZ + TRACEREG_SZ;
490 memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
491 childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ);
492
493 /*
494 * A new process must start with interrupts closed in 2.5,
495 * because this is how Mingo's scheduler works (see schedule_tail
496 * and finish_arch_switch). If we do not do it, a timer interrupt hits
497 * before we unlock, attempts to re-take the rq->lock, and then we die.
498 * Thus, kpsr|=PSR_PIL.
499 */
500 ti->ksp = (unsigned long) new_stack;
501 ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
502 ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
503 ti->kwim = current->thread.fork_kwim;
504
505 if(regs->psr & PSR_PS) {
506 extern struct pt_regs fake_swapper_regs;
507
508 p->thread.kregs = &fake_swapper_regs;
509 new_stack += STACKFRAME_SZ + TRACEREG_SZ;
510 childregs->u_regs[UREG_FP] = (unsigned long) new_stack;
511 p->thread.flags |= SPARC_FLAG_KTHREAD;
512 p->thread.current_ds = KERNEL_DS;
513 memcpy(new_stack, (void *)regs->u_regs[UREG_FP], STACKFRAME_SZ);
514 childregs->u_regs[UREG_G6] = (unsigned long) ti;
515 } else {
516 p->thread.kregs = childregs;
517 childregs->u_regs[UREG_FP] = sp;
518 p->thread.flags &= ~SPARC_FLAG_KTHREAD;
519 p->thread.current_ds = USER_DS;
520
521 if (sp != regs->u_regs[UREG_FP]) {
522 struct sparc_stackf __user *childstack;
523 struct sparc_stackf __user *parentstack;
524
525 /*
526 * This is a clone() call with supplied user stack.
527 * Set some valid stack frames to give to the child.
528 */
529 childstack = (struct sparc_stackf __user *)
530 (sp & ~0x7UL);
531 parentstack = (struct sparc_stackf __user *)
532 regs->u_regs[UREG_FP];
533
534#if 0
535 printk("clone: parent stack:\n");
536 show_stackframe(parentstack);
537#endif
538
539 childstack = clone_stackframe(childstack, parentstack);
540 if (!childstack)
541 return -EFAULT;
542
543#if 0
544 printk("clone: child stack:\n");
545 show_stackframe(childstack);
546#endif
547
548 childregs->u_regs[UREG_FP] = (unsigned long)childstack;
549 }
550 }
551
552#ifdef CONFIG_SMP
553 /* FPU must be disabled on SMP. */
554 childregs->psr &= ~PSR_EF;
555#endif
556
557 /* Set the return value for the child. */
558 childregs->u_regs[UREG_I0] = current->pid;
559 childregs->u_regs[UREG_I1] = 1;
560
561 /* Set the return value for the parent. */
562 regs->u_regs[UREG_I1] = 0;
563
564 if (clone_flags & CLONE_SETTLS)
565 childregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
566
567 return 0;
568}
569
570/*
571 * fill in the user structure for a core dump..
572 */
573void dump_thread(struct pt_regs * regs, struct user * dump)
574{
575 unsigned long first_stack_page;
576
577 dump->magic = SUNOS_CORE_MAGIC;
578 dump->len = sizeof(struct user);
579 dump->regs.psr = regs->psr;
580 dump->regs.pc = regs->pc;
581 dump->regs.npc = regs->npc;
582 dump->regs.y = regs->y;
583 /* fuck me plenty */
584 memcpy(&dump->regs.regs[0], &regs->u_regs[1], (sizeof(unsigned long) * 15));
585 dump->uexec = current->thread.core_exec;
586 dump->u_tsize = (((unsigned long) current->mm->end_code) -
587 ((unsigned long) current->mm->start_code)) & ~(PAGE_SIZE - 1);
588 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1)));
589 dump->u_dsize -= dump->u_tsize;
590 dump->u_dsize &= ~(PAGE_SIZE - 1);
591 first_stack_page = (regs->u_regs[UREG_FP] & ~(PAGE_SIZE - 1));
592 dump->u_ssize = (TASK_SIZE - first_stack_page) & ~(PAGE_SIZE - 1);
593 memcpy(&dump->fpu.fpstatus.fregs.regs[0], &current->thread.float_regs[0], (sizeof(unsigned long) * 32));
594 dump->fpu.fpstatus.fsr = current->thread.fsr;
595 dump->fpu.fpstatus.flags = dump->fpu.fpstatus.extra = 0;
596 dump->fpu.fpstatus.fpq_count = current->thread.fpqdepth;
597 memcpy(&dump->fpu.fpstatus.fpq[0], &current->thread.fpqueue[0],
598 ((sizeof(unsigned long) * 2) * 16));
599 dump->sigcode = 0;
600}
601
602/*
603 * fill in the fpu structure for a core dump.
604 */
605int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
606{
607 if (used_math()) {
608 memset(fpregs, 0, sizeof(*fpregs));
609 fpregs->pr_q_entrysize = 8;
610 return 1;
611 }
612#ifdef CONFIG_SMP
613 if (current_thread_info()->flags & _TIF_USEDFPU) {
614 put_psr(get_psr() | PSR_EF);
615 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
616 &current->thread.fpqueue[0], &current->thread.fpqdepth);
617 if (regs != NULL) {
618 regs->psr &= ~(PSR_EF);
619 current_thread_info()->flags &= ~(_TIF_USEDFPU);
620 }
621 }
622#else
623 if (current == last_task_used_math) {
624 put_psr(get_psr() | PSR_EF);
625 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
626 &current->thread.fpqueue[0], &current->thread.fpqdepth);
627 if (regs != NULL) {
628 regs->psr &= ~(PSR_EF);
629 last_task_used_math = NULL;
630 }
631 }
632#endif
633 memcpy(&fpregs->pr_fr.pr_regs[0],
634 &current->thread.float_regs[0],
635 (sizeof(unsigned long) * 32));
636 fpregs->pr_fsr = current->thread.fsr;
637 fpregs->pr_qcnt = current->thread.fpqdepth;
638 fpregs->pr_q_entrysize = 8;
639 fpregs->pr_en = 1;
640 if(fpregs->pr_qcnt != 0) {
641 memcpy(&fpregs->pr_q[0],
642 &current->thread.fpqueue[0],
643 sizeof(struct fpq) * fpregs->pr_qcnt);
644 }
645 /* Zero out the rest. */
646 memset(&fpregs->pr_q[fpregs->pr_qcnt], 0,
647 sizeof(struct fpq) * (32 - fpregs->pr_qcnt));
648 return 1;
649}
650
651/*
652 * sparc_execve() executes a new program after the asm stub has set
653 * things up for us. This should basically do what I want it to.
654 */
655asmlinkage int sparc_execve(struct pt_regs *regs)
656{
657 int error, base = 0;
658 char *filename;
659
660 /* Check for indirect call. */
661 if(regs->u_regs[UREG_G1] == 0)
662 base = 1;
663
664 filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
665 error = PTR_ERR(filename);
666 if(IS_ERR(filename))
667 goto out;
668 error = do_execve(filename,
669 (char __user * __user *)regs->u_regs[base + UREG_I1],
670 (char __user * __user *)regs->u_regs[base + UREG_I2],
671 regs);
672 putname(filename);
673 if (error == 0) {
674 task_lock(current);
675 current->ptrace &= ~PT_DTRACE;
676 task_unlock(current);
677 }
678out:
679 return error;
680}
681
682/*
683 * This is the mechanism for creating a new kernel thread.
684 *
685 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
686 * who haven't done an "execve()") should use this: it will work within
687 * a system call from a "real" process, but the process memory space will
688 * not be free'd until both the parent and the child have exited.
689 */
690pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
691{
692 long retval;
693
694 __asm__ __volatile__("mov %4, %%g2\n\t" /* Set aside fn ptr... */
695 "mov %5, %%g3\n\t" /* and arg. */
696 "mov %1, %%g1\n\t"
697 "mov %2, %%o0\n\t" /* Clone flags. */
698 "mov 0, %%o1\n\t" /* usp arg == 0 */
699 "t 0x10\n\t" /* Linux/Sparc clone(). */
700 "cmp %%o1, 0\n\t"
701 "be 1f\n\t" /* The parent, just return. */
702 " nop\n\t" /* Delay slot. */
703 "jmpl %%g2, %%o7\n\t" /* Call the function. */
704 " mov %%g3, %%o0\n\t" /* Get back the arg in delay. */
705 "mov %3, %%g1\n\t"
706 "t 0x10\n\t" /* Linux/Sparc exit(). */
707 /* Notreached by child. */
708 "1: mov %%o0, %0\n\t" :
709 "=r" (retval) :
710 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
711 "i" (__NR_exit), "r" (fn), "r" (arg) :
712 "g1", "g2", "g3", "o0", "o1", "memory", "cc");
713 return retval;
714}
715
716unsigned long get_wchan(struct task_struct *task)
717{
718 unsigned long pc, fp, bias = 0;
719 unsigned long task_base = (unsigned long) task;
720 unsigned long ret = 0;
721 struct reg_window *rw;
722 int count = 0;
723
724 if (!task || task == current ||
725 task->state == TASK_RUNNING)
726 goto out;
727
728 fp = task->thread_info->ksp + bias;
729 do {
730 /* Bogus frame pointer? */
731 if (fp < (task_base + sizeof(struct thread_info)) ||
732 fp >= (task_base + (2 * PAGE_SIZE)))
733 break;
734 rw = (struct reg_window *) fp;
735 pc = rw->ins[7];
736 if (!in_sched_functions(pc)) {
737 ret = pc;
738 goto out;
739 }
740 fp = rw->ins[6] + bias;
741 } while (++count < 16);
742
743out:
744 return ret;
745}
746
diff --git a/arch/sparc/kernel/ptrace.c b/arch/sparc/kernel/ptrace.c
new file mode 100644
index 000000000000..fc4ad69357b8
--- /dev/null
+++ b/arch/sparc/kernel/ptrace.c
@@ -0,0 +1,632 @@
1/* ptrace.c: Sparc process tracing support.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
4 *
5 * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
6 * and David Mosberger.
7 *
8 * Added Linux support -miguel (weird, eh?, the orignal code was meant
9 * to emulate SunOS).
10 */
11
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/user.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/security.h>
21
22#include <asm/pgtable.h>
23#include <asm/system.h>
24#include <asm/uaccess.h>
25
26#define MAGIC_CONSTANT 0x80000000
27
28
29/* Returning from ptrace is a bit tricky because the syscall return
30 * low level code assumes any value returned which is negative and
31 * is a valid errno will mean setting the condition codes to indicate
32 * an error return. This doesn't work, so we have this hook.
33 */
34static inline void pt_error_return(struct pt_regs *regs, unsigned long error)
35{
36 regs->u_regs[UREG_I0] = error;
37 regs->psr |= PSR_C;
38 regs->pc = regs->npc;
39 regs->npc += 4;
40}
41
42static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
43{
44 regs->u_regs[UREG_I0] = value;
45 regs->psr &= ~PSR_C;
46 regs->pc = regs->npc;
47 regs->npc += 4;
48}
49
50static void
51pt_succ_return_linux(struct pt_regs *regs, unsigned long value, long __user *addr)
52{
53 if (put_user(value, addr)) {
54 pt_error_return(regs, EFAULT);
55 return;
56 }
57 regs->u_regs[UREG_I0] = 0;
58 regs->psr &= ~PSR_C;
59 regs->pc = regs->npc;
60 regs->npc += 4;
61}
62
63static void
64pt_os_succ_return (struct pt_regs *regs, unsigned long val, long __user *addr)
65{
66 if (current->personality == PER_SUNOS)
67 pt_succ_return (regs, val);
68 else
69 pt_succ_return_linux (regs, val, addr);
70}
71
72/* Fuck me gently with a chainsaw... */
73static inline void read_sunos_user(struct pt_regs *regs, unsigned long offset,
74 struct task_struct *tsk, long __user *addr)
75{
76 struct pt_regs *cregs = tsk->thread.kregs;
77 struct thread_info *t = tsk->thread_info;
78 int v;
79
80 if(offset >= 1024)
81 offset -= 1024; /* whee... */
82 if(offset & ((sizeof(unsigned long) - 1))) {
83 pt_error_return(regs, EIO);
84 return;
85 }
86 if(offset >= 16 && offset < 784) {
87 offset -= 16; offset >>= 2;
88 pt_os_succ_return(regs, *(((unsigned long *)(&t->reg_window[0]))+offset), addr);
89 return;
90 }
91 if(offset >= 784 && offset < 832) {
92 offset -= 784; offset >>= 2;
93 pt_os_succ_return(regs, *(((unsigned long *)(&t->rwbuf_stkptrs[0]))+offset), addr);
94 return;
95 }
96 switch(offset) {
97 case 0:
98 v = t->ksp;
99 break;
100 case 4:
101 v = t->kpc;
102 break;
103 case 8:
104 v = t->kpsr;
105 break;
106 case 12:
107 v = t->uwinmask;
108 break;
109 case 832:
110 v = t->w_saved;
111 break;
112 case 896:
113 v = cregs->u_regs[UREG_I0];
114 break;
115 case 900:
116 v = cregs->u_regs[UREG_I1];
117 break;
118 case 904:
119 v = cregs->u_regs[UREG_I2];
120 break;
121 case 908:
122 v = cregs->u_regs[UREG_I3];
123 break;
124 case 912:
125 v = cregs->u_regs[UREG_I4];
126 break;
127 case 916:
128 v = cregs->u_regs[UREG_I5];
129 break;
130 case 920:
131 v = cregs->u_regs[UREG_I6];
132 break;
133 case 924:
134 if(tsk->thread.flags & MAGIC_CONSTANT)
135 v = cregs->u_regs[UREG_G1];
136 else
137 v = 0;
138 break;
139 case 940:
140 v = cregs->u_regs[UREG_I0];
141 break;
142 case 944:
143 v = cregs->u_regs[UREG_I1];
144 break;
145
146 case 948:
147 /* Isn't binary compatibility _fun_??? */
148 if(cregs->psr & PSR_C)
149 v = cregs->u_regs[UREG_I0] << 24;
150 else
151 v = 0;
152 break;
153
154 /* Rest of them are completely unsupported. */
155 default:
156 printk("%s [%d]: Wants to read user offset %ld\n",
157 current->comm, current->pid, offset);
158 pt_error_return(regs, EIO);
159 return;
160 }
161 if (current->personality == PER_SUNOS)
162 pt_succ_return (regs, v);
163 else
164 pt_succ_return_linux (regs, v, addr);
165 return;
166}
167
168static inline void write_sunos_user(struct pt_regs *regs, unsigned long offset,
169 struct task_struct *tsk)
170{
171 struct pt_regs *cregs = tsk->thread.kregs;
172 struct thread_info *t = tsk->thread_info;
173 unsigned long value = regs->u_regs[UREG_I3];
174
175 if(offset >= 1024)
176 offset -= 1024; /* whee... */
177 if(offset & ((sizeof(unsigned long) - 1)))
178 goto failure;
179 if(offset >= 16 && offset < 784) {
180 offset -= 16; offset >>= 2;
181 *(((unsigned long *)(&t->reg_window[0]))+offset) = value;
182 goto success;
183 }
184 if(offset >= 784 && offset < 832) {
185 offset -= 784; offset >>= 2;
186 *(((unsigned long *)(&t->rwbuf_stkptrs[0]))+offset) = value;
187 goto success;
188 }
189 switch(offset) {
190 case 896:
191 cregs->u_regs[UREG_I0] = value;
192 break;
193 case 900:
194 cregs->u_regs[UREG_I1] = value;
195 break;
196 case 904:
197 cregs->u_regs[UREG_I2] = value;
198 break;
199 case 908:
200 cregs->u_regs[UREG_I3] = value;
201 break;
202 case 912:
203 cregs->u_regs[UREG_I4] = value;
204 break;
205 case 916:
206 cregs->u_regs[UREG_I5] = value;
207 break;
208 case 920:
209 cregs->u_regs[UREG_I6] = value;
210 break;
211 case 924:
212 cregs->u_regs[UREG_I7] = value;
213 break;
214 case 940:
215 cregs->u_regs[UREG_I0] = value;
216 break;
217 case 944:
218 cregs->u_regs[UREG_I1] = value;
219 break;
220
221 /* Rest of them are completely unsupported or "no-touch". */
222 default:
223 printk("%s [%d]: Wants to write user offset %ld\n",
224 current->comm, current->pid, offset);
225 goto failure;
226 }
227success:
228 pt_succ_return(regs, 0);
229 return;
230failure:
231 pt_error_return(regs, EIO);
232 return;
233}
234
235/* #define ALLOW_INIT_TRACING */
236/* #define DEBUG_PTRACE */
237
238#ifdef DEBUG_PTRACE
239char *pt_rq [] = {
240 /* 0 */ "TRACEME", "PEEKTEXT", "PEEKDATA", "PEEKUSR",
241 /* 4 */ "POKETEXT", "POKEDATA", "POKEUSR", "CONT",
242 /* 8 */ "KILL", "SINGLESTEP", "SUNATTACH", "SUNDETACH",
243 /* 12 */ "GETREGS", "SETREGS", "GETFPREGS", "SETFPREGS",
244 /* 16 */ "READDATA", "WRITEDATA", "READTEXT", "WRITETEXT",
245 /* 20 */ "GETFPAREGS", "SETFPAREGS", "unknown", "unknown",
246 /* 24 */ "SYSCALL", ""
247};
248#endif
249
250/*
251 * Called by kernel/ptrace.c when detaching..
252 *
253 * Make sure single step bits etc are not set.
254 */
255void ptrace_disable(struct task_struct *child)
256{
257 /* nothing to do */
258}
259
260asmlinkage void do_ptrace(struct pt_regs *regs)
261{
262 unsigned long request = regs->u_regs[UREG_I0];
263 unsigned long pid = regs->u_regs[UREG_I1];
264 unsigned long addr = regs->u_regs[UREG_I2];
265 unsigned long data = regs->u_regs[UREG_I3];
266 unsigned long addr2 = regs->u_regs[UREG_I4];
267 struct task_struct *child;
268 int ret;
269
270 lock_kernel();
271#ifdef DEBUG_PTRACE
272 {
273 char *s;
274
275 if ((request >= 0) && (request <= 24))
276 s = pt_rq [request];
277 else
278 s = "unknown";
279
280 if (request == PTRACE_POKEDATA && data == 0x91d02001){
281 printk ("do_ptrace: breakpoint pid=%d, addr=%08lx addr2=%08lx\n",
282 pid, addr, addr2);
283 } else
284 printk("do_ptrace: rq=%s(%d) pid=%d addr=%08lx data=%08lx addr2=%08lx\n",
285 s, (int) request, (int) pid, addr, data, addr2);
286 }
287#endif
288 if (request == PTRACE_TRACEME) {
289 int my_ret;
290
291 /* are we already being traced? */
292 if (current->ptrace & PT_PTRACED) {
293 pt_error_return(regs, EPERM);
294 goto out;
295 }
296 my_ret = security_ptrace(current->parent, current);
297 if (my_ret) {
298 pt_error_return(regs, -my_ret);
299 goto out;
300 }
301
302 /* set the ptrace bit in the process flags. */
303 current->ptrace |= PT_PTRACED;
304 pt_succ_return(regs, 0);
305 goto out;
306 }
307#ifndef ALLOW_INIT_TRACING
308 if (pid == 1) {
309 /* Can't dork with init. */
310 pt_error_return(regs, EPERM);
311 goto out;
312 }
313#endif
314 read_lock(&tasklist_lock);
315 child = find_task_by_pid(pid);
316 if (child)
317 get_task_struct(child);
318 read_unlock(&tasklist_lock);
319
320 if (!child) {
321 pt_error_return(regs, ESRCH);
322 goto out;
323 }
324
325 if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
326 || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
327 if (ptrace_attach(child)) {
328 pt_error_return(regs, EPERM);
329 goto out_tsk;
330 }
331 pt_succ_return(regs, 0);
332 goto out_tsk;
333 }
334
335 ret = ptrace_check_attach(child, request == PTRACE_KILL);
336 if (ret < 0) {
337 pt_error_return(regs, -ret);
338 goto out_tsk;
339 }
340
341 switch(request) {
342 case PTRACE_PEEKTEXT: /* read word at location addr. */
343 case PTRACE_PEEKDATA: {
344 unsigned long tmp;
345
346 if (access_process_vm(child, addr,
347 &tmp, sizeof(tmp), 0) == sizeof(tmp))
348 pt_os_succ_return(regs, tmp, (long __user *)data);
349 else
350 pt_error_return(regs, EIO);
351 goto out_tsk;
352 }
353
354 case PTRACE_PEEKUSR:
355 read_sunos_user(regs, addr, child, (long __user *) data);
356 goto out_tsk;
357
358 case PTRACE_POKEUSR:
359 write_sunos_user(regs, addr, child);
360 goto out_tsk;
361
362 case PTRACE_POKETEXT: /* write the word at location addr. */
363 case PTRACE_POKEDATA: {
364 if (access_process_vm(child, addr,
365 &data, sizeof(data), 1) == sizeof(data))
366 pt_succ_return(regs, 0);
367 else
368 pt_error_return(regs, EIO);
369 goto out_tsk;
370 }
371
372 case PTRACE_GETREGS: {
373 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
374 struct pt_regs *cregs = child->thread.kregs;
375 int rval;
376
377 if (!access_ok(VERIFY_WRITE, pregs, sizeof(struct pt_regs))) {
378 rval = -EFAULT;
379 pt_error_return(regs, -rval);
380 goto out_tsk;
381 }
382 __put_user(cregs->psr, (&pregs->psr));
383 __put_user(cregs->pc, (&pregs->pc));
384 __put_user(cregs->npc, (&pregs->npc));
385 __put_user(cregs->y, (&pregs->y));
386 for(rval = 1; rval < 16; rval++)
387 __put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]));
388 pt_succ_return(regs, 0);
389#ifdef DEBUG_PTRACE
390 printk ("PC=%x nPC=%x o7=%x\n", cregs->pc, cregs->npc, cregs->u_regs [15]);
391#endif
392 goto out_tsk;
393 }
394
395 case PTRACE_SETREGS: {
396 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
397 struct pt_regs *cregs = child->thread.kregs;
398 unsigned long psr, pc, npc, y;
399 int i;
400
401 /* Must be careful, tracing process can only set certain
402 * bits in the psr.
403 */
404 if (!access_ok(VERIFY_READ, pregs, sizeof(struct pt_regs))) {
405 pt_error_return(regs, EFAULT);
406 goto out_tsk;
407 }
408 __get_user(psr, (&pregs->psr));
409 __get_user(pc, (&pregs->pc));
410 __get_user(npc, (&pregs->npc));
411 __get_user(y, (&pregs->y));
412 psr &= PSR_ICC;
413 cregs->psr &= ~PSR_ICC;
414 cregs->psr |= psr;
415 if (!((pc | npc) & 3)) {
416 cregs->pc = pc;
417 cregs->npc =npc;
418 }
419 cregs->y = y;
420 for(i = 1; i < 16; i++)
421 __get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]));
422 pt_succ_return(regs, 0);
423 goto out_tsk;
424 }
425
426 case PTRACE_GETFPREGS: {
427 struct fps {
428 unsigned long regs[32];
429 unsigned long fsr;
430 unsigned long flags;
431 unsigned long extra;
432 unsigned long fpqd;
433 struct fq {
434 unsigned long *insnaddr;
435 unsigned long insn;
436 } fpq[16];
437 };
438 struct fps __user *fps = (struct fps __user *) addr;
439 int i;
440
441 if (!access_ok(VERIFY_WRITE, fps, sizeof(struct fps))) {
442 i = -EFAULT;
443 pt_error_return(regs, -i);
444 goto out_tsk;
445 }
446 for(i = 0; i < 32; i++)
447 __put_user(child->thread.float_regs[i], (&fps->regs[i]));
448 __put_user(child->thread.fsr, (&fps->fsr));
449 __put_user(child->thread.fpqdepth, (&fps->fpqd));
450 __put_user(0, (&fps->flags));
451 __put_user(0, (&fps->extra));
452 for(i = 0; i < 16; i++) {
453 __put_user(child->thread.fpqueue[i].insn_addr,
454 (&fps->fpq[i].insnaddr));
455 __put_user(child->thread.fpqueue[i].insn, (&fps->fpq[i].insn));
456 }
457 pt_succ_return(regs, 0);
458 goto out_tsk;
459 }
460
461 case PTRACE_SETFPREGS: {
462 struct fps {
463 unsigned long regs[32];
464 unsigned long fsr;
465 unsigned long flags;
466 unsigned long extra;
467 unsigned long fpqd;
468 struct fq {
469 unsigned long *insnaddr;
470 unsigned long insn;
471 } fpq[16];
472 };
473 struct fps __user *fps = (struct fps __user *) addr;
474 int i;
475
476 if (!access_ok(VERIFY_READ, fps, sizeof(struct fps))) {
477 i = -EFAULT;
478 pt_error_return(regs, -i);
479 goto out_tsk;
480 }
481 copy_from_user(&child->thread.float_regs[0], &fps->regs[0], (32 * sizeof(unsigned long)));
482 __get_user(child->thread.fsr, (&fps->fsr));
483 __get_user(child->thread.fpqdepth, (&fps->fpqd));
484 for(i = 0; i < 16; i++) {
485 __get_user(child->thread.fpqueue[i].insn_addr,
486 (&fps->fpq[i].insnaddr));
487 __get_user(child->thread.fpqueue[i].insn, (&fps->fpq[i].insn));
488 }
489 pt_succ_return(regs, 0);
490 goto out_tsk;
491 }
492
493 case PTRACE_READTEXT:
494 case PTRACE_READDATA: {
495 int res = ptrace_readdata(child, addr,
496 (void __user *) addr2, data);
497
498 if (res == data) {
499 pt_succ_return(regs, 0);
500 goto out_tsk;
501 }
502 /* Partial read is an IO failure */
503 if (res >= 0)
504 res = -EIO;
505 pt_error_return(regs, -res);
506 goto out_tsk;
507 }
508
509 case PTRACE_WRITETEXT:
510 case PTRACE_WRITEDATA: {
511 int res = ptrace_writedata(child, (void __user *) addr2,
512 addr, data);
513
514 if (res == data) {
515 pt_succ_return(regs, 0);
516 goto out_tsk;
517 }
518 /* Partial write is an IO failure */
519 if (res >= 0)
520 res = -EIO;
521 pt_error_return(regs, -res);
522 goto out_tsk;
523 }
524
525 case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
526 addr = 1;
527
528 case PTRACE_CONT: { /* restart after signal. */
529 if (data > _NSIG) {
530 pt_error_return(regs, EIO);
531 goto out_tsk;
532 }
533 if (addr != 1) {
534 if (addr & 3) {
535 pt_error_return(regs, EINVAL);
536 goto out_tsk;
537 }
538#ifdef DEBUG_PTRACE
539 printk ("Original: %08lx %08lx\n", child->thread.kregs->pc, child->thread.kregs->npc);
540 printk ("Continuing with %08lx %08lx\n", addr, addr+4);
541#endif
542 child->thread.kregs->pc = addr;
543 child->thread.kregs->npc = addr + 4;
544 }
545
546 if (request == PTRACE_SYSCALL)
547 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
548 else
549 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
550
551 child->exit_code = data;
552#ifdef DEBUG_PTRACE
553 printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n",
554 child->comm, child->pid, child->exit_code,
555 child->thread.kregs->pc,
556 child->thread.kregs->npc);
557#endif
558 wake_up_process(child);
559 pt_succ_return(regs, 0);
560 goto out_tsk;
561 }
562
563/*
564 * make the child exit. Best I can do is send it a sigkill.
565 * perhaps it should be put in the status that it wants to
566 * exit.
567 */
568 case PTRACE_KILL: {
569 if (child->exit_state == EXIT_ZOMBIE) { /* already dead */
570 pt_succ_return(regs, 0);
571 goto out_tsk;
572 }
573 wake_up_process(child);
574 child->exit_code = SIGKILL;
575 pt_succ_return(regs, 0);
576 goto out_tsk;
577 }
578
579 case PTRACE_SUNDETACH: { /* detach a process that was attached. */
580 int err = ptrace_detach(child, data);
581 if (err) {
582 pt_error_return(regs, EIO);
583 goto out_tsk;
584 }
585 pt_succ_return(regs, 0);
586 goto out_tsk;
587 }
588
589 /* PTRACE_DUMPCORE unsupported... */
590
591 default: {
592 int err = ptrace_request(child, request, addr, data);
593 if (err)
594 pt_error_return(regs, -err);
595 else
596 pt_succ_return(regs, 0);
597 goto out_tsk;
598 }
599 }
600out_tsk:
601 if (child)
602 put_task_struct(child);
603out:
604 unlock_kernel();
605}
606
607asmlinkage void syscall_trace(void)
608{
609#ifdef DEBUG_PTRACE
610 printk("%s [%d]: syscall_trace\n", current->comm, current->pid);
611#endif
612 if (!test_thread_flag(TIF_SYSCALL_TRACE))
613 return;
614 if (!(current->ptrace & PT_PTRACED))
615 return;
616 current->thread.flags ^= MAGIC_CONSTANT;
617 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
618 ? 0x80 : 0));
619 /*
620 * this isn't the same as continuing with a signal, but it will do
621 * for normal use. strace only continues with a signal if the
622 * stopping signal is not SIGTRAP. -brl
623 */
624#ifdef DEBUG_PTRACE
625 printk("%s [%d]: syscall_trace exit= %x\n", current->comm,
626 current->pid, current->exit_code);
627#endif
628 if (current->exit_code) {
629 send_sig (current->exit_code, current, 1);
630 current->exit_code = 0;
631 }
632}
diff --git a/arch/sparc/kernel/rtrap.S b/arch/sparc/kernel/rtrap.S
new file mode 100644
index 000000000000..f7460d897e79
--- /dev/null
+++ b/arch/sparc/kernel/rtrap.S
@@ -0,0 +1,319 @@
1/* $Id: rtrap.S,v 1.58 2002/01/31 03:30:05 davem Exp $
2 * rtrap.S: Return from Sparc trap low-level code.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/page.h>
8#include <asm/ptrace.h>
9#include <asm/psr.h>
10#include <asm/asi.h>
11#include <asm/smp.h>
12#include <asm/contregs.h>
13#include <asm/winmacro.h>
14#include <asm/asmmacro.h>
15#include <asm/thread_info.h>
16
17#define t_psr l0
18#define t_pc l1
19#define t_npc l2
20#define t_wim l3
21#define twin_tmp1 l4
22#define glob_tmp g4
23#define curptr g6
24
25 /* 7 WINDOW SPARC PATCH INSTRUCTIONS */
26 .globl rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3
27 .globl rtrap_7win_patch4, rtrap_7win_patch5
28rtrap_7win_patch1: srl %t_wim, 0x6, %glob_tmp
29rtrap_7win_patch2: and %glob_tmp, 0x7f, %glob_tmp
30rtrap_7win_patch3: srl %g1, 7, %g2
31rtrap_7win_patch4: srl %g2, 6, %g2
32rtrap_7win_patch5: and %g1, 0x7f, %g1
33 /* END OF PATCH INSTRUCTIONS */
34
35 /* We need to check for a few things which are:
36 * 1) The need to call schedule() because this
37 * processes quantum is up.
38 * 2) Pending signals for this process, if any
39 * exist we need to call do_signal() to do
40 * the needy.
41 *
42 * Else we just check if the rett would land us
43 * in an invalid window, if so we need to grab
44 * it off the user/kernel stack first.
45 */
46
47 .globl ret_trap_entry, rtrap_patch1, rtrap_patch2
48 .globl rtrap_patch3, rtrap_patch4, rtrap_patch5
49 .globl ret_trap_lockless_ipi
50ret_trap_entry:
51ret_trap_lockless_ipi:
52 andcc %t_psr, PSR_PS, %g0
53 be 1f
54 nop
55
56 wr %t_psr, 0x0, %psr
57 b ret_trap_kernel
58 nop
59
601:
61 ld [%curptr + TI_FLAGS], %g2
62 andcc %g2, (_TIF_NEED_RESCHED), %g0
63 be signal_p
64 nop
65
66 call schedule
67 nop
68
69 ld [%curptr + TI_FLAGS], %g2
70signal_p:
71 andcc %g2, (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING), %g0
72 bz,a ret_trap_continue
73 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
74
75 clr %o0
76 mov %l5, %o2
77 mov %l6, %o3
78 call do_signal
79 add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
80
81 /* Fall through. */
82 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
83 clr %l6
84ret_trap_continue:
85 wr %t_psr, 0x0, %psr
86 WRITE_PAUSE
87
88 ld [%curptr + TI_W_SAVED], %twin_tmp1
89 orcc %g0, %twin_tmp1, %g0
90 be ret_trap_nobufwins
91 nop
92
93 wr %t_psr, PSR_ET, %psr
94 WRITE_PAUSE
95
96 mov 1, %o1
97 call try_to_clear_window_buffer
98 add %sp, STACKFRAME_SZ, %o0
99
100 b signal_p
101 ld [%curptr + TI_FLAGS], %g2
102
103ret_trap_nobufwins:
104 /* Load up the user's out registers so we can pull
105 * a window from the stack, if necessary.
106 */
107 LOAD_PT_INS(sp)
108
109 /* If there are already live user windows in the
110 * set we can return from trap safely.
111 */
112 ld [%curptr + TI_UWINMASK], %twin_tmp1
113 orcc %g0, %twin_tmp1, %g0
114 bne ret_trap_userwins_ok
115 nop
116
117 /* Calculate new %wim, we have to pull a register
118 * window from the users stack.
119 */
120ret_trap_pull_one_window:
121 rd %wim, %t_wim
122 sll %t_wim, 0x1, %twin_tmp1
123rtrap_patch1: srl %t_wim, 0x7, %glob_tmp
124 or %glob_tmp, %twin_tmp1, %glob_tmp
125rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp
126
127 wr %glob_tmp, 0x0, %wim
128
129 /* Here comes the architecture specific
130 * branch to the user stack checking routine
131 * for return from traps.
132 */
133 .globl rtrap_mmu_patchme
134rtrap_mmu_patchme: b sun4c_rett_stackchk
135 andcc %fp, 0x7, %g0
136
137ret_trap_userwins_ok:
138 LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
139 or %t_pc, %t_npc, %g2
140 andcc %g2, 0x3, %g0
141 be 1f
142 nop
143
144 b ret_trap_unaligned_pc
145 add %sp, STACKFRAME_SZ, %o0
146
1471:
148 LOAD_PT_YREG(sp, g1)
149 LOAD_PT_GLOBALS(sp)
150
151 wr %t_psr, 0x0, %psr
152 WRITE_PAUSE
153
154 jmp %t_pc
155 rett %t_npc
156
157ret_trap_unaligned_pc:
158 ld [%sp + STACKFRAME_SZ + PT_PC], %o1
159 ld [%sp + STACKFRAME_SZ + PT_NPC], %o2
160 ld [%sp + STACKFRAME_SZ + PT_PSR], %o3
161
162 wr %t_wim, 0x0, %wim ! or else...
163
164 wr %t_psr, PSR_ET, %psr
165 WRITE_PAUSE
166
167 call do_memaccess_unaligned
168 nop
169
170 b signal_p
171 ld [%curptr + TI_FLAGS], %g2
172
173ret_trap_kernel:
174 /* Will the rett land us in the invalid window? */
175 mov 2, %g1
176 sll %g1, %t_psr, %g1
177rtrap_patch3: srl %g1, 8, %g2
178 or %g1, %g2, %g1
179 rd %wim, %g2
180 andcc %g2, %g1, %g0
181 be 1f ! Nope, just return from the trap
182 sll %g2, 0x1, %g1
183
184 /* We have to grab a window before returning. */
185rtrap_patch4: srl %g2, 7, %g2
186 or %g1, %g2, %g1
187rtrap_patch5: and %g1, 0xff, %g1
188
189 wr %g1, 0x0, %wim
190
191 /* Grrr, make sure we load from the right %sp... */
192 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
193
194 restore %g0, %g0, %g0
195 LOAD_WINDOW(sp)
196 b 2f
197 save %g0, %g0, %g0
198
199 /* Reload the entire frame in case this is from a
200 * kernel system call or whatever...
201 */
2021:
203 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
2042:
205 wr %t_psr, 0x0, %psr
206 WRITE_PAUSE
207
208 jmp %t_pc
209 rett %t_npc
210
211ret_trap_user_stack_is_bolixed:
212 wr %t_wim, 0x0, %wim
213
214 wr %t_psr, PSR_ET, %psr
215 WRITE_PAUSE
216
217 call window_ret_fault
218 add %sp, STACKFRAME_SZ, %o0
219
220 b signal_p
221 ld [%curptr + TI_FLAGS], %g2
222
223
224 .globl sun4c_rett_stackchk
225sun4c_rett_stackchk:
226 be 1f
227 and %fp, 0xfff, %g1 ! delay slot
228
229 b ret_trap_user_stack_is_bolixed + 0x4
230 wr %t_wim, 0x0, %wim
231
232 /* See if we have to check the sanity of one page or two */
2331:
234 add %g1, 0x38, %g1
235 sra %fp, 29, %g2
236 add %g2, 0x1, %g2
237 andncc %g2, 0x1, %g0
238 be 1f
239 andncc %g1, 0xff8, %g0
240
241 /* %sp is in vma hole, yuck */
242 b ret_trap_user_stack_is_bolixed + 0x4
243 wr %t_wim, 0x0, %wim
244
2451:
246 be sun4c_rett_onepage /* Only one page to check */
247 lda [%fp] ASI_PTE, %g2
248
249sun4c_rett_twopages:
250 add %fp, 0x38, %g1
251 sra %g1, 29, %g2
252 add %g2, 0x1, %g2
253 andncc %g2, 0x1, %g0
254 be 1f
255 lda [%g1] ASI_PTE, %g2
256
257 /* Second page is in vma hole */
258 b ret_trap_user_stack_is_bolixed + 0x4
259 wr %t_wim, 0x0, %wim
260
2611:
262 srl %g2, 29, %g2
263 andcc %g2, 0x4, %g0
264 bne sun4c_rett_onepage
265 lda [%fp] ASI_PTE, %g2
266
267 /* Second page has bad perms */
268 b ret_trap_user_stack_is_bolixed + 0x4
269 wr %t_wim, 0x0, %wim
270
271sun4c_rett_onepage:
272 srl %g2, 29, %g2
273 andcc %g2, 0x4, %g0
274 bne,a 1f
275 restore %g0, %g0, %g0
276
277 /* A page had bad page permissions, losing... */
278 b ret_trap_user_stack_is_bolixed + 0x4
279 wr %t_wim, 0x0, %wim
280
281 /* Whee, things are ok, load the window and continue. */
2821:
283 LOAD_WINDOW(sp)
284
285 b ret_trap_userwins_ok
286 save %g0, %g0, %g0
287
288 .globl srmmu_rett_stackchk
289srmmu_rett_stackchk:
290 bne ret_trap_user_stack_is_bolixed
291 sethi %hi(PAGE_OFFSET), %g1
292 cmp %g1, %fp
293 bleu ret_trap_user_stack_is_bolixed
294 mov AC_M_SFSR, %g1
295 lda [%g1] ASI_M_MMUREGS, %g0
296
297 lda [%g0] ASI_M_MMUREGS, %g1
298 or %g1, 0x2, %g1
299 sta %g1, [%g0] ASI_M_MMUREGS
300
301 restore %g0, %g0, %g0
302
303 LOAD_WINDOW(sp)
304
305 save %g0, %g0, %g0
306
307 andn %g1, 0x2, %g1
308 sta %g1, [%g0] ASI_M_MMUREGS
309
310 mov AC_M_SFAR, %g2
311 lda [%g2] ASI_M_MMUREGS, %g2
312
313 mov AC_M_SFSR, %g1
314 lda [%g1] ASI_M_MMUREGS, %g1
315 andcc %g1, 0x2, %g0
316 be ret_trap_userwins_ok
317 nop
318
319 b,a ret_trap_user_stack_is_bolixed
diff --git a/arch/sparc/kernel/sclow.S b/arch/sparc/kernel/sclow.S
new file mode 100644
index 000000000000..3a867fc19927
--- /dev/null
+++ b/arch/sparc/kernel/sclow.S
@@ -0,0 +1,86 @@
1/* sclow.S: Low level special syscall handling.
2 * Basically these are cases where we can completely
3 * handle the system call without saving any state
4 * because we know that the process will not sleep.
5 *
6 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
7 */
8
9#include <asm/ptrace.h>
10#include <asm/asm_offsets.h>
11#include <asm/errno.h>
12#include <asm/winmacro.h>
13#include <asm/thread_info.h>
14#include <asm/psr.h>
15#include <asm/page.h>
16
17#define CC_AND_RETT \
18 set PSR_C, %l4; \
19 andn %l0, %l4, %l4; \
20 wr %l4, 0x0, %psr; \
21 nop; nop; nop; \
22 jmp %l2; \
23 rett %l2 + 4;
24
25#define SC_AND_RETT \
26 set PSR_C, %l4; \
27 or %l0, %l4, %l4; \
28 wr %l4, 0x0, %psr; \
29 nop; nop; nop; \
30 jmp %l2; \
31 rett %l2 + 4;
32
33#define LABEL(func) func##_low
34
35 .globl LABEL(sunosnop)
36LABEL(sunosnop):
37 CC_AND_RETT
38
39#if (ASIZ_task_uid == 2 && ASIZ_task_euid == 2)
40 .globl LABEL(sunosgetuid)
41LABEL(sunosgetuid):
42 LOAD_CURRENT(l4, l5)
43 ld [%l4 + TI_TASK], %l4
44 lduh [%l4 + AOFF_task_uid], %i0
45 lduh [%l4 + AOFF_task_euid], %i1
46 CC_AND_RETT
47#endif
48
49#if (ASIZ_task_gid == 2 && ASIZ_task_egid == 2)
50 .globl LABEL(sunosgetgid)
51LABEL(sunosgetgid):
52 LOAD_CURRENT(l4, l5)
53 ld [%l4 + TI_TASK], %l4
54 lduh [%l4 + AOFF_task_gid], %i0
55 lduh [%l4 + AOFF_task_egid], %i1
56 CC_AND_RETT
57#endif
58
59 .globl LABEL(sunosmctl)
60LABEL(sunosmctl):
61 mov 0, %i0
62 CC_AND_RETT
63
64 .globl LABEL(sunosgdtsize)
65LABEL(sunosgdtsize):
66 mov 256, %i0
67 CC_AND_RETT
68
69 .globl LABEL(getpagesize)
70LABEL(getpagesize):
71 set PAGE_SIZE, %i0
72 CC_AND_RETT
73
74 /* XXX sys_nice() XXX */
75 /* XXX sys_setpriority() XXX */
76 /* XXX sys_getpriority() XXX */
77 /* XXX sys_setregid() XXX */
78 /* XXX sys_setgid() XXX */
79 /* XXX sys_setreuid() XXX */
80 /* XXX sys_setuid() XXX */
81 /* XXX sys_setfsuid() XXX */
82 /* XXX sys_setfsgid() XXX */
83 /* XXX sys_setpgid() XXX */
84 /* XXX sys_getpgid() XXX */
85 /* XXX sys_setsid() XXX */
86 /* XXX sys_getsid() XXX */
diff --git a/arch/sparc/kernel/semaphore.c b/arch/sparc/kernel/semaphore.c
new file mode 100644
index 000000000000..0c37c1a7cd7e
--- /dev/null
+++ b/arch/sparc/kernel/semaphore.c
@@ -0,0 +1,155 @@
1/* $Id: semaphore.c,v 1.7 2001/04/18 21:06:05 davem Exp $ */
2
3/* sparc32 semaphore implementation, based on i386 version */
4
5#include <linux/sched.h>
6#include <linux/errno.h>
7#include <linux/init.h>
8
9#include <asm/semaphore.h>
10
11/*
12 * Semaphores are implemented using a two-way counter:
13 * The "count" variable is decremented for each process
14 * that tries to acquire the semaphore, while the "sleeping"
15 * variable is a count of such acquires.
16 *
17 * Notably, the inline "up()" and "down()" functions can
18 * efficiently test if they need to do any extra work (up
19 * needs to do something only if count was negative before
20 * the increment operation.
21 *
22 * "sleeping" and the contention routine ordering is
23 * protected by the semaphore spinlock.
24 *
25 * Note that these functions are only called when there is
26 * contention on the lock, and as such all this is the
27 * "non-critical" part of the whole semaphore business. The
28 * critical part is the inline stuff in <asm/semaphore.h>
29 * where we want to avoid any extra jumps and calls.
30 */
31
32/*
33 * Logic:
34 * - only on a boundary condition do we need to care. When we go
35 * from a negative count to a non-negative, we wake people up.
36 * - when we go from a non-negative count to a negative do we
37 * (a) synchronize with the "sleeper" count and (b) make sure
38 * that we're on the wakeup list before we synchronize so that
39 * we cannot lose wakeup events.
40 */
41
42void __up(struct semaphore *sem)
43{
44 wake_up(&sem->wait);
45}
46
47static DEFINE_SPINLOCK(semaphore_lock);
48
49void __sched __down(struct semaphore * sem)
50{
51 struct task_struct *tsk = current;
52 DECLARE_WAITQUEUE(wait, tsk);
53 tsk->state = TASK_UNINTERRUPTIBLE;
54 add_wait_queue_exclusive(&sem->wait, &wait);
55
56 spin_lock_irq(&semaphore_lock);
57 sem->sleepers++;
58 for (;;) {
59 int sleepers = sem->sleepers;
60
61 /*
62 * Add "everybody else" into it. They aren't
63 * playing, because we own the spinlock.
64 */
65 if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
66 sem->sleepers = 0;
67 break;
68 }
69 sem->sleepers = 1; /* us - see -1 above */
70 spin_unlock_irq(&semaphore_lock);
71
72 schedule();
73 tsk->state = TASK_UNINTERRUPTIBLE;
74 spin_lock_irq(&semaphore_lock);
75 }
76 spin_unlock_irq(&semaphore_lock);
77 remove_wait_queue(&sem->wait, &wait);
78 tsk->state = TASK_RUNNING;
79 wake_up(&sem->wait);
80}
81
82int __sched __down_interruptible(struct semaphore * sem)
83{
84 int retval = 0;
85 struct task_struct *tsk = current;
86 DECLARE_WAITQUEUE(wait, tsk);
87 tsk->state = TASK_INTERRUPTIBLE;
88 add_wait_queue_exclusive(&sem->wait, &wait);
89
90 spin_lock_irq(&semaphore_lock);
91 sem->sleepers ++;
92 for (;;) {
93 int sleepers = sem->sleepers;
94
95 /*
96 * With signals pending, this turns into
97 * the trylock failure case - we won't be
98 * sleeping, and we* can't get the lock as
99 * it has contention. Just correct the count
100 * and exit.
101 */
102 if (signal_pending(current)) {
103 retval = -EINTR;
104 sem->sleepers = 0;
105 atomic24_add(sleepers, &sem->count);
106 break;
107 }
108
109 /*
110 * Add "everybody else" into it. They aren't
111 * playing, because we own the spinlock. The
112 * "-1" is because we're still hoping to get
113 * the lock.
114 */
115 if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
116 sem->sleepers = 0;
117 break;
118 }
119 sem->sleepers = 1; /* us - see -1 above */
120 spin_unlock_irq(&semaphore_lock);
121
122 schedule();
123 tsk->state = TASK_INTERRUPTIBLE;
124 spin_lock_irq(&semaphore_lock);
125 }
126 spin_unlock_irq(&semaphore_lock);
127 tsk->state = TASK_RUNNING;
128 remove_wait_queue(&sem->wait, &wait);
129 wake_up(&sem->wait);
130 return retval;
131}
132
133/*
134 * Trylock failed - make sure we correct for
135 * having decremented the count.
136 */
137int __down_trylock(struct semaphore * sem)
138{
139 int sleepers;
140 unsigned long flags;
141
142 spin_lock_irqsave(&semaphore_lock, flags);
143 sleepers = sem->sleepers + 1;
144 sem->sleepers = 0;
145
146 /*
147 * Add "everybody else" and us into it. They aren't
148 * playing, because we own the spinlock.
149 */
150 if (!atomic24_add_negative(sleepers, &sem->count))
151 wake_up(&sem->wait);
152
153 spin_unlock_irqrestore(&semaphore_lock, flags);
154 return 1;
155}
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
new file mode 100644
index 000000000000..55352ed85e8a
--- /dev/null
+++ b/arch/sparc/kernel/setup.c
@@ -0,0 +1,476 @@
1/* $Id: setup.c,v 1.126 2001/11/13 00:49:27 davem Exp $
2 * linux/arch/sparc/kernel/setup.c
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 2000 Anton Blanchard (anton@samba.org)
6 */
7
8#include <linux/errno.h>
9#include <linux/sched.h>
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/stddef.h>
13#include <linux/unistd.h>
14#include <linux/ptrace.h>
15#include <linux/slab.h>
16#include <linux/initrd.h>
17#include <asm/smp.h>
18#include <linux/user.h>
19#include <linux/a.out.h>
20#include <linux/tty.h>
21#include <linux/delay.h>
22#include <linux/config.h>
23#include <linux/fs.h>
24#include <linux/seq_file.h>
25#include <linux/syscalls.h>
26#include <linux/kdev_t.h>
27#include <linux/major.h>
28#include <linux/string.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/console.h>
32#include <linux/spinlock.h>
33#include <linux/root_dev.h>
34
35#include <asm/segment.h>
36#include <asm/system.h>
37#include <asm/io.h>
38#include <asm/processor.h>
39#include <asm/oplib.h>
40#include <asm/page.h>
41#include <asm/pgtable.h>
42#include <asm/traps.h>
43#include <asm/vaddrs.h>
44#include <asm/kdebug.h>
45#include <asm/mbus.h>
46#include <asm/idprom.h>
47#include <asm/machines.h>
48#include <asm/cpudata.h>
49#include <asm/setup.h>
50
51struct screen_info screen_info = {
52 0, 0, /* orig-x, orig-y */
53 0, /* unused */
54 0, /* orig-video-page */
55 0, /* orig-video-mode */
56 128, /* orig-video-cols */
57 0,0,0, /* ega_ax, ega_bx, ega_cx */
58 54, /* orig-video-lines */
59 0, /* orig-video-isVGA */
60 16 /* orig-video-points */
61};
62
63/* Typing sync at the prom prompt calls the function pointed to by
64 * romvec->pv_synchook which I set to the following function.
65 * This should sync all filesystems and return, for now it just
66 * prints out pretty messages and returns.
67 */
68
69extern unsigned long trapbase;
70void (*prom_palette)(int);
71
72/* Pretty sick eh? */
73void prom_sync_me(void)
74{
75 unsigned long prom_tbr, flags;
76
77 /* XXX Badly broken. FIX! - Anton */
78 local_irq_save(flags);
79 __asm__ __volatile__("rd %%tbr, %0\n\t" : "=r" (prom_tbr));
80 __asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
81 "nop\n\t"
82 "nop\n\t"
83 "nop\n\t" : : "r" (&trapbase));
84
85 if (prom_palette)
86 prom_palette(1);
87 prom_printf("PROM SYNC COMMAND...\n");
88 show_free_areas();
89 if(current->pid != 0) {
90 local_irq_enable();
91 sys_sync();
92 local_irq_disable();
93 }
94 prom_printf("Returning to prom\n");
95
96 __asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
97 "nop\n\t"
98 "nop\n\t"
99 "nop\n\t" : : "r" (prom_tbr));
100 local_irq_restore(flags);
101
102 return;
103}
104
105unsigned int boot_flags __initdata = 0;
106#define BOOTME_DEBUG 0x1
107#define BOOTME_SINGLE 0x2
108
109/* Exported for mm/init.c:paging_init. */
110unsigned long cmdline_memory_size __initdata = 0;
111
112static void
113prom_console_write(struct console *con, const char *s, unsigned n)
114{
115 prom_write(s, n);
116}
117
118static struct console prom_debug_console = {
119 .name = "debug",
120 .write = prom_console_write,
121 .flags = CON_PRINTBUFFER,
122 .index = -1,
123};
124
125int obp_system_intr(void)
126{
127 if (boot_flags & BOOTME_DEBUG) {
128 printk("OBP: system interrupted\n");
129 prom_halt();
130 return 1;
131 }
132 return 0;
133}
134
135/*
136 * Process kernel command line switches that are specific to the
137 * SPARC or that require special low-level processing.
138 */
139static void __init process_switch(char c)
140{
141 switch (c) {
142 case 'd':
143 boot_flags |= BOOTME_DEBUG;
144 break;
145 case 's':
146 boot_flags |= BOOTME_SINGLE;
147 break;
148 case 'h':
149 prom_printf("boot_flags_init: Halt!\n");
150 prom_halt();
151 break;
152 case 'p':
153 /* Use PROM debug console. */
154 register_console(&prom_debug_console);
155 break;
156 default:
157 printk("Unknown boot switch (-%c)\n", c);
158 break;
159 }
160}
161
162static void __init process_console(char *commands)
163{
164 serial_console = 0;
165 commands += 8;
166 /* Linux-style serial */
167 if (!strncmp(commands, "ttyS", 4))
168 serial_console = simple_strtoul(commands + 4, NULL, 10) + 1;
169 else if (!strncmp(commands, "tty", 3)) {
170 char c = *(commands + 3);
171 /* Solaris-style serial */
172 if (c == 'a' || c == 'b')
173 serial_console = c - 'a' + 1;
174 /* else Linux-style fbcon, not serial */
175 }
176#if defined(CONFIG_PROM_CONSOLE)
177 if (!strncmp(commands, "prom", 4)) {
178 char *p;
179
180 for (p = commands - 8; *p && *p != ' '; p++)
181 *p = ' ';
182 conswitchp = &prom_con;
183 }
184#endif
185}
186
187static void __init boot_flags_init(char *commands)
188{
189 while (*commands) {
190 /* Move to the start of the next "argument". */
191 while (*commands && *commands == ' ')
192 commands++;
193
194 /* Process any command switches, otherwise skip it. */
195 if (*commands == '\0')
196 break;
197 if (*commands == '-') {
198 commands++;
199 while (*commands && *commands != ' ')
200 process_switch(*commands++);
201 continue;
202 }
203 if (!strncmp(commands, "console=", 8)) {
204 process_console(commands);
205 } else if (!strncmp(commands, "mem=", 4)) {
206 /*
207 * "mem=XXX[kKmM] overrides the PROM-reported
208 * memory size.
209 */
210 cmdline_memory_size = simple_strtoul(commands + 4,
211 &commands, 0);
212 if (*commands == 'K' || *commands == 'k') {
213 cmdline_memory_size <<= 10;
214 commands++;
215 } else if (*commands=='M' || *commands=='m') {
216 cmdline_memory_size <<= 20;
217 commands++;
218 }
219 }
220 while (*commands && *commands != ' ')
221 commands++;
222 }
223}
224
225/* This routine will in the future do all the nasty prom stuff
226 * to probe for the mmu type and its parameters, etc. This will
227 * also be where SMP things happen plus the Sparc specific memory
228 * physical memory probe as on the alpha.
229 */
230
231extern int prom_probe_memory(void);
232extern void sun4c_probe_vac(void);
233extern char cputypval;
234extern unsigned long start, end;
235extern void panic_setup(char *, int *);
236
237extern unsigned short root_flags;
238extern unsigned short root_dev;
239extern unsigned short ram_flags;
240#define RAMDISK_IMAGE_START_MASK 0x07FF
241#define RAMDISK_PROMPT_FLAG 0x8000
242#define RAMDISK_LOAD_FLAG 0x4000
243
244extern int root_mountflags;
245
246char reboot_command[COMMAND_LINE_SIZE];
247enum sparc_cpu sparc_cpu_model;
248
249struct tt_entry *sparc_ttable;
250
251struct pt_regs fake_swapper_regs;
252
253extern void paging_init(void);
254
255void __init setup_arch(char **cmdline_p)
256{
257 int i;
258 unsigned long highest_paddr;
259
260 sparc_ttable = (struct tt_entry *) &start;
261
262 /* Initialize PROM console and command line. */
263 *cmdline_p = prom_getbootargs();
264 strcpy(saved_command_line, *cmdline_p);
265
266 /* Set sparc_cpu_model */
267 sparc_cpu_model = sun_unknown;
268 if(!strcmp(&cputypval,"sun4 ")) { sparc_cpu_model=sun4; }
269 if(!strcmp(&cputypval,"sun4c")) { sparc_cpu_model=sun4c; }
270 if(!strcmp(&cputypval,"sun4m")) { sparc_cpu_model=sun4m; }
271 if(!strcmp(&cputypval,"sun4s")) { sparc_cpu_model=sun4m; } /* CP-1200 with PROM 2.30 -E */
272 if(!strcmp(&cputypval,"sun4d")) { sparc_cpu_model=sun4d; }
273 if(!strcmp(&cputypval,"sun4e")) { sparc_cpu_model=sun4e; }
274 if(!strcmp(&cputypval,"sun4u")) { sparc_cpu_model=sun4u; }
275
276#ifdef CONFIG_SUN4
277 if (sparc_cpu_model != sun4) {
278 prom_printf("This kernel is for Sun4 architecture only.\n");
279 prom_halt();
280 }
281#endif
282 printk("ARCH: ");
283 switch(sparc_cpu_model) {
284 case sun4:
285 printk("SUN4\n");
286 break;
287 case sun4c:
288 printk("SUN4C\n");
289 break;
290 case sun4m:
291 printk("SUN4M\n");
292 break;
293 case sun4d:
294 printk("SUN4D\n");
295 break;
296 case sun4e:
297 printk("SUN4E\n");
298 break;
299 case sun4u:
300 printk("SUN4U\n");
301 break;
302 default:
303 printk("UNKNOWN!\n");
304 break;
305 };
306
307#ifdef CONFIG_DUMMY_CONSOLE
308 conswitchp = &dummy_con;
309#elif defined(CONFIG_PROM_CONSOLE)
310 conswitchp = &prom_con;
311#endif
312 boot_flags_init(*cmdline_p);
313
314 idprom_init();
315 if (ARCH_SUN4C_SUN4)
316 sun4c_probe_vac();
317 load_mmu();
318 (void) prom_probe_memory();
319
320 phys_base = 0xffffffffUL;
321 highest_paddr = 0UL;
322 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
323 unsigned long top;
324
325 if (sp_banks[i].base_addr < phys_base)
326 phys_base = sp_banks[i].base_addr;
327 top = sp_banks[i].base_addr +
328 sp_banks[i].num_bytes;
329 if (highest_paddr < top)
330 highest_paddr = top;
331 }
332 pfn_base = phys_base >> PAGE_SHIFT;
333
334 if (!root_flags)
335 root_mountflags &= ~MS_RDONLY;
336 ROOT_DEV = old_decode_dev(root_dev);
337#ifdef CONFIG_BLK_DEV_INITRD
338 rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
339 rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
340 rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
341#endif
342
343 prom_setsync(prom_sync_me);
344
345 if((boot_flags&BOOTME_DEBUG) && (linux_dbvec!=0) &&
346 ((*(short *)linux_dbvec) != -1)) {
347 printk("Booted under KADB. Syncing trap table.\n");
348 (*(linux_dbvec->teach_debugger))();
349 }
350
351 init_mm.context = (unsigned long) NO_CONTEXT;
352 init_task.thread.kregs = &fake_swapper_regs;
353
354 paging_init();
355}
356
357static int __init set_preferred_console(void)
358{
359 int idev, odev;
360
361 /* The user has requested a console so this is already set up. */
362 if (serial_console >= 0)
363 return -EBUSY;
364
365 idev = prom_query_input_device();
366 odev = prom_query_output_device();
367 if (idev == PROMDEV_IKBD && odev == PROMDEV_OSCREEN) {
368 serial_console = 0;
369 } else if (idev == PROMDEV_ITTYA && odev == PROMDEV_OTTYA) {
370 serial_console = 1;
371 } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
372 serial_console = 2;
373 } else if (idev == PROMDEV_I_UNK && odev == PROMDEV_OTTYA) {
374 prom_printf("MrCoffee ttya\n");
375 serial_console = 1;
376 } else if (idev == PROMDEV_I_UNK && odev == PROMDEV_OSCREEN) {
377 serial_console = 0;
378 prom_printf("MrCoffee keyboard\n");
379 } else {
380 prom_printf("Confusing console (idev %d, odev %d)\n",
381 idev, odev);
382 serial_console = 1;
383 }
384
385 if (serial_console)
386 return add_preferred_console("ttyS", serial_console - 1, NULL);
387
388 return -ENODEV;
389}
390console_initcall(set_preferred_console);
391
392extern char *sparc_cpu_type;
393extern char *sparc_fpu_type;
394
395static int show_cpuinfo(struct seq_file *m, void *__unused)
396{
397 seq_printf(m,
398 "cpu\t\t: %s\n"
399 "fpu\t\t: %s\n"
400 "promlib\t\t: Version %d Revision %d\n"
401 "prom\t\t: %d.%d\n"
402 "type\t\t: %s\n"
403 "ncpus probed\t: %d\n"
404 "ncpus active\t: %d\n"
405#ifndef CONFIG_SMP
406 "CPU0Bogo\t: %lu.%02lu\n"
407 "CPU0ClkTck\t: %ld\n"
408#endif
409 ,
410 sparc_cpu_type ? sparc_cpu_type : "undetermined",
411 sparc_fpu_type ? sparc_fpu_type : "undetermined",
412 romvec->pv_romvers,
413 prom_rev,
414 romvec->pv_printrev >> 16,
415 romvec->pv_printrev & 0xffff,
416 &cputypval,
417 num_possible_cpus(),
418 num_online_cpus()
419#ifndef CONFIG_SMP
420 , cpu_data(0).udelay_val/(500000/HZ),
421 (cpu_data(0).udelay_val/(5000/HZ)) % 100,
422 cpu_data(0).clock_tick
423#endif
424 );
425
426#ifdef CONFIG_SMP
427 smp_bogo(m);
428#endif
429 mmu_info(m);
430#ifdef CONFIG_SMP
431 smp_info(m);
432#endif
433 return 0;
434}
435
436static void *c_start(struct seq_file *m, loff_t *pos)
437{
438 /* The pointer we are returning is arbitrary,
439 * it just has to be non-NULL and not IS_ERR
440 * in the success case.
441 */
442 return *pos == 0 ? &c_start : NULL;
443}
444
445static void *c_next(struct seq_file *m, void *v, loff_t *pos)
446{
447 ++*pos;
448 return c_start(m, pos);
449}
450
451static void c_stop(struct seq_file *m, void *v)
452{
453}
454
455struct seq_operations cpuinfo_op = {
456 .start =c_start,
457 .next = c_next,
458 .stop = c_stop,
459 .show = show_cpuinfo,
460};
461
462extern int stop_a_enabled;
463
464void sun_do_break(void)
465{
466 if (!stop_a_enabled)
467 return;
468
469 printk("\n");
470 flush_user_windows();
471
472 prom_cmdline();
473}
474
475int serial_console = -1;
476int stop_a_enabled = 1;
diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c
new file mode 100644
index 000000000000..011ff35057a5
--- /dev/null
+++ b/arch/sparc/kernel/signal.c
@@ -0,0 +1,1181 @@
1/* $Id: signal.c,v 1.110 2002/02/08 03:57:14 davem Exp $
2 * linux/arch/sparc/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
8 */
9
10#include <linux/config.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/signal.h>
14#include <linux/errno.h>
15#include <linux/wait.h>
16#include <linux/ptrace.h>
17#include <linux/unistd.h>
18#include <linux/mm.h>
19#include <linux/tty.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/binfmts.h> /* do_coredum */
23#include <linux/bitops.h>
24
25#include <asm/uaccess.h>
26#include <asm/ptrace.h>
27#include <asm/svr4.h>
28#include <asm/pgalloc.h>
29#include <asm/pgtable.h>
30#include <asm/cacheflush.h> /* flush_sig_insns */
31
32#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
33
34extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
35 void *fpqueue, unsigned long *fpqdepth);
36extern void fpload(unsigned long *fpregs, unsigned long *fsr);
37
38asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
39 unsigned long orig_o0, int restart_syscall);
40
41/* Signal frames: the original one (compatible with SunOS):
42 *
43 * Set up a signal frame... Make the stack look the way SunOS
44 * expects it to look which is basically:
45 *
46 * ---------------------------------- <-- %sp at signal time
47 * Struct sigcontext
48 * Signal address
49 * Ptr to sigcontext area above
50 * Signal code
51 * The signal number itself
52 * One register window
53 * ---------------------------------- <-- New %sp
54 */
55struct signal_sframe {
56 struct reg_window sig_window;
57 int sig_num;
58 int sig_code;
59 struct sigcontext __user *sig_scptr;
60 int sig_address;
61 struct sigcontext sig_context;
62 unsigned int extramask[_NSIG_WORDS - 1];
63};
64
65/*
66 * And the new one, intended to be used for Linux applications only
67 * (we have enough in there to work with clone).
68 * All the interesting bits are in the info field.
69 */
70
71struct new_signal_frame {
72 struct sparc_stackf ss;
73 __siginfo_t info;
74 __siginfo_fpu_t __user *fpu_save;
75 unsigned long insns[2] __attribute__ ((aligned (8)));
76 unsigned int extramask[_NSIG_WORDS - 1];
77 unsigned int extra_size; /* Should be 0 */
78 __siginfo_fpu_t fpu_state;
79};
80
81struct rt_signal_frame {
82 struct sparc_stackf ss;
83 siginfo_t info;
84 struct pt_regs regs;
85 sigset_t mask;
86 __siginfo_fpu_t __user *fpu_save;
87 unsigned int insns[2];
88 stack_t stack;
89 unsigned int extra_size; /* Should be 0 */
90 __siginfo_fpu_t fpu_state;
91};
92
93/* Align macros */
94#define SF_ALIGNEDSZ (((sizeof(struct signal_sframe) + 7) & (~7)))
95#define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame) + 7) & (~7)))
96#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
97
98/*
99 * atomically swap in the new signal mask, and wait for a signal.
100 * This is really tricky on the Sparc, watch out...
101 */
102asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
103{
104 sigset_t saveset;
105
106 set &= _BLOCKABLE;
107 spin_lock_irq(&current->sighand->siglock);
108 saveset = current->blocked;
109 siginitset(&current->blocked, set);
110 recalc_sigpending();
111 spin_unlock_irq(&current->sighand->siglock);
112
113 regs->pc = regs->npc;
114 regs->npc += 4;
115
116 /* Condition codes and return value where set here for sigpause,
117 * and so got used by setup_frame, which again causes sigreturn()
118 * to return -EINTR.
119 */
120 while (1) {
121 current->state = TASK_INTERRUPTIBLE;
122 schedule();
123 /*
124 * Return -EINTR and set condition code here,
125 * so the interrupted system call actually returns
126 * these.
127 */
128 regs->psr |= PSR_C;
129 regs->u_regs[UREG_I0] = EINTR;
130 if (do_signal(&saveset, regs, 0, 0))
131 return;
132 }
133}
134
135asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
136{
137 _sigpause_common(set, regs);
138}
139
140asmlinkage void do_sigsuspend (struct pt_regs *regs)
141{
142 _sigpause_common(regs->u_regs[UREG_I0], regs);
143}
144
145asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
146 struct pt_regs *regs)
147{
148 sigset_t oldset, set;
149
150 /* XXX: Don't preclude handling different sized sigset_t's. */
151 if (sigsetsize != sizeof(sigset_t)) {
152 regs->psr |= PSR_C;
153 regs->u_regs[UREG_I0] = EINVAL;
154 return;
155 }
156
157 if (copy_from_user(&set, uset, sizeof(set))) {
158 regs->psr |= PSR_C;
159 regs->u_regs[UREG_I0] = EFAULT;
160 return;
161 }
162
163 sigdelsetmask(&set, ~_BLOCKABLE);
164 spin_lock_irq(&current->sighand->siglock);
165 oldset = current->blocked;
166 current->blocked = set;
167 recalc_sigpending();
168 spin_unlock_irq(&current->sighand->siglock);
169
170 regs->pc = regs->npc;
171 regs->npc += 4;
172
173 /* Condition codes and return value where set here for sigpause,
174 * and so got used by setup_frame, which again causes sigreturn()
175 * to return -EINTR.
176 */
177 while (1) {
178 current->state = TASK_INTERRUPTIBLE;
179 schedule();
180 /*
181 * Return -EINTR and set condition code here,
182 * so the interrupted system call actually returns
183 * these.
184 */
185 regs->psr |= PSR_C;
186 regs->u_regs[UREG_I0] = EINTR;
187 if (do_signal(&oldset, regs, 0, 0))
188 return;
189 }
190}
191
192static inline int
193restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
194{
195 int err;
196#ifdef CONFIG_SMP
197 if (test_tsk_thread_flag(current, TIF_USEDFPU))
198 regs->psr &= ~PSR_EF;
199#else
200 if (current == last_task_used_math) {
201 last_task_used_math = NULL;
202 regs->psr &= ~PSR_EF;
203 }
204#endif
205 set_used_math();
206 clear_tsk_thread_flag(current, TIF_USEDFPU);
207
208 if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu)))
209 return -EFAULT;
210
211 err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0],
212 (sizeof(unsigned long) * 32));
213 err |= __get_user(current->thread.fsr, &fpu->si_fsr);
214 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
215 if (current->thread.fpqdepth != 0)
216 err |= __copy_from_user(&current->thread.fpqueue[0],
217 &fpu->si_fpqueue[0],
218 ((sizeof(unsigned long) +
219 (sizeof(unsigned long *)))*16));
220 return err;
221}
222
223static inline void do_new_sigreturn (struct pt_regs *regs)
224{
225 struct new_signal_frame __user *sf;
226 unsigned long up_psr, pc, npc;
227 sigset_t set;
228 __siginfo_fpu_t __user *fpu_save;
229 int err;
230
231 sf = (struct new_signal_frame __user *) regs->u_regs[UREG_FP];
232
233 /* 1. Make sure we are not getting garbage from the user */
234 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
235 goto segv_and_exit;
236
237 if (((unsigned long) sf) & 3)
238 goto segv_and_exit;
239
240 err = __get_user(pc, &sf->info.si_regs.pc);
241 err |= __get_user(npc, &sf->info.si_regs.npc);
242
243 if ((pc | npc) & 3)
244 goto segv_and_exit;
245
246 /* 2. Restore the state */
247 up_psr = regs->psr;
248 err |= __copy_from_user(regs, &sf->info.si_regs, sizeof(struct pt_regs));
249
250 /* User can only change condition codes and FPU enabling in %psr. */
251 regs->psr = (up_psr & ~(PSR_ICC | PSR_EF))
252 | (regs->psr & (PSR_ICC | PSR_EF));
253
254 err |= __get_user(fpu_save, &sf->fpu_save);
255
256 if (fpu_save)
257 err |= restore_fpu_state(regs, fpu_save);
258
259 /* This is pretty much atomic, no amount locking would prevent
260 * the races which exist anyways.
261 */
262 err |= __get_user(set.sig[0], &sf->info.si_mask);
263 err |= __copy_from_user(&set.sig[1], &sf->extramask,
264 (_NSIG_WORDS-1) * sizeof(unsigned int));
265
266 if (err)
267 goto segv_and_exit;
268
269 sigdelsetmask(&set, ~_BLOCKABLE);
270 spin_lock_irq(&current->sighand->siglock);
271 current->blocked = set;
272 recalc_sigpending();
273 spin_unlock_irq(&current->sighand->siglock);
274 return;
275
276segv_and_exit:
277 force_sig(SIGSEGV, current);
278}
279
280asmlinkage void do_sigreturn(struct pt_regs *regs)
281{
282 struct sigcontext __user *scptr;
283 unsigned long pc, npc, psr;
284 sigset_t set;
285 int err;
286
287 /* Always make any pending restarted system calls return -EINTR */
288 current_thread_info()->restart_block.fn = do_no_restart_syscall;
289
290 synchronize_user_stack();
291
292 if (current->thread.new_signal) {
293 do_new_sigreturn(regs);
294 return;
295 }
296
297 scptr = (struct sigcontext __user *) regs->u_regs[UREG_I0];
298
299 /* Check sanity of the user arg. */
300 if (!access_ok(VERIFY_READ, scptr, sizeof(struct sigcontext)) ||
301 (((unsigned long) scptr) & 3))
302 goto segv_and_exit;
303
304 err = __get_user(pc, &scptr->sigc_pc);
305 err |= __get_user(npc, &scptr->sigc_npc);
306
307 if ((pc | npc) & 3)
308 goto segv_and_exit;
309
310 /* This is pretty much atomic, no amount locking would prevent
311 * the races which exist anyways.
312 */
313 err |= __get_user(set.sig[0], &scptr->sigc_mask);
314 /* Note that scptr + 1 points to extramask */
315 err |= __copy_from_user(&set.sig[1], scptr + 1,
316 (_NSIG_WORDS - 1) * sizeof(unsigned int));
317
318 if (err)
319 goto segv_and_exit;
320
321 sigdelsetmask(&set, ~_BLOCKABLE);
322 spin_lock_irq(&current->sighand->siglock);
323 current->blocked = set;
324 recalc_sigpending();
325 spin_unlock_irq(&current->sighand->siglock);
326
327 regs->pc = pc;
328 regs->npc = npc;
329
330 err = __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp);
331 err |= __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0);
332 err |= __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1);
333
334 /* User can only change condition codes in %psr. */
335 err |= __get_user(psr, &scptr->sigc_psr);
336 if (err)
337 goto segv_and_exit;
338
339 regs->psr &= ~(PSR_ICC);
340 regs->psr |= (psr & PSR_ICC);
341 return;
342
343segv_and_exit:
344 force_sig(SIGSEGV, current);
345}
346
347asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
348{
349 struct rt_signal_frame __user *sf;
350 unsigned int psr, pc, npc;
351 __siginfo_fpu_t __user *fpu_save;
352 mm_segment_t old_fs;
353 sigset_t set;
354 stack_t st;
355 int err;
356
357 synchronize_user_stack();
358 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
359 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
360 (((unsigned long) sf) & 0x03))
361 goto segv;
362
363 err = __get_user(pc, &sf->regs.pc);
364 err |= __get_user(npc, &sf->regs.npc);
365 err |= ((pc | npc) & 0x03);
366
367 err |= __get_user(regs->y, &sf->regs.y);
368 err |= __get_user(psr, &sf->regs.psr);
369
370 err |= __copy_from_user(&regs->u_regs[UREG_G1],
371 &sf->regs.u_regs[UREG_G1], 15 * sizeof(u32));
372
373 regs->psr = (regs->psr & ~PSR_ICC) | (psr & PSR_ICC);
374
375 err |= __get_user(fpu_save, &sf->fpu_save);
376
377 if (fpu_save)
378 err |= restore_fpu_state(regs, fpu_save);
379 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
380
381 err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
382
383 if (err)
384 goto segv;
385
386 regs->pc = pc;
387 regs->npc = npc;
388
389 /* It is more difficult to avoid calling this function than to
390 * call it and ignore errors.
391 */
392 old_fs = get_fs();
393 set_fs(KERNEL_DS);
394 do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
395 set_fs(old_fs);
396
397 sigdelsetmask(&set, ~_BLOCKABLE);
398 spin_lock_irq(&current->sighand->siglock);
399 current->blocked = set;
400 recalc_sigpending();
401 spin_unlock_irq(&current->sighand->siglock);
402 return;
403segv:
404 force_sig(SIGSEGV, current);
405}
406
407/* Checks if the fp is valid */
408static inline int invalid_frame_pointer(void __user *fp, int fplen)
409{
410 if ((((unsigned long) fp) & 7) ||
411 !__access_ok((unsigned long)fp, fplen) ||
412 ((sparc_cpu_model == sun4 || sparc_cpu_model == sun4c) &&
413 ((unsigned long) fp < 0xe0000000 && (unsigned long) fp >= 0x20000000)))
414 return 1;
415
416 return 0;
417}
418
419static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
420{
421 unsigned long sp;
422
423 sp = regs->u_regs[UREG_FP];
424
425 /* This is the X/Open sanctioned signal stack switching. */
426 if (sa->sa_flags & SA_ONSTACK) {
427 if (!on_sig_stack(sp) && !((current->sas_ss_sp + current->sas_ss_size) & 7))
428 sp = current->sas_ss_sp + current->sas_ss_size;
429 }
430 return (void __user *)(sp - framesize);
431}
432
433static inline void
434setup_frame(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *oldset, siginfo_t *info)
435{
436 struct signal_sframe __user *sframep;
437 struct sigcontext __user *sc;
438 int window = 0, err;
439 unsigned long pc = regs->pc;
440 unsigned long npc = regs->npc;
441 struct thread_info *tp = current_thread_info();
442 void __user *sig_address;
443 int sig_code;
444
445 synchronize_user_stack();
446 sframep = (struct signal_sframe __user *)
447 get_sigframe(sa, regs, SF_ALIGNEDSZ);
448 if (invalid_frame_pointer(sframep, sizeof(*sframep))){
449 /* Don't change signal code and address, so that
450 * post mortem debuggers can have a look.
451 */
452 goto sigill_and_return;
453 }
454
455 sc = &sframep->sig_context;
456
457 /* We've already made sure frame pointer isn't in kernel space... */
458 err = __put_user((sas_ss_flags(regs->u_regs[UREG_FP]) == SS_ONSTACK),
459 &sc->sigc_onstack);
460 err |= __put_user(oldset->sig[0], &sc->sigc_mask);
461 err |= __copy_to_user(sframep->extramask, &oldset->sig[1],
462 (_NSIG_WORDS - 1) * sizeof(unsigned int));
463 err |= __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp);
464 err |= __put_user(pc, &sc->sigc_pc);
465 err |= __put_user(npc, &sc->sigc_npc);
466 err |= __put_user(regs->psr, &sc->sigc_psr);
467 err |= __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
468 err |= __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
469 err |= __put_user(tp->w_saved, &sc->sigc_oswins);
470 if (tp->w_saved)
471 for (window = 0; window < tp->w_saved; window++) {
472 put_user((char *)tp->rwbuf_stkptrs[window],
473 &sc->sigc_spbuf[window]);
474 err |= __copy_to_user(&sc->sigc_wbuf[window],
475 &tp->reg_window[window],
476 sizeof(struct reg_window));
477 }
478 else
479 err |= __copy_to_user(sframep, (char *) regs->u_regs[UREG_FP],
480 sizeof(struct reg_window));
481
482 tp->w_saved = 0; /* So process is allowed to execute. */
483
484 err |= __put_user(signr, &sframep->sig_num);
485 sig_address = NULL;
486 sig_code = 0;
487 if (SI_FROMKERNEL (info) && (info->si_code & __SI_MASK) == __SI_FAULT) {
488 sig_address = info->si_addr;
489 switch (signr) {
490 case SIGSEGV:
491 switch (info->si_code) {
492 case SEGV_MAPERR: sig_code = SUBSIG_NOMAPPING; break;
493 default: sig_code = SUBSIG_PROTECTION; break;
494 }
495 break;
496 case SIGILL:
497 switch (info->si_code) {
498 case ILL_ILLOPC: sig_code = SUBSIG_ILLINST; break;
499 case ILL_PRVOPC: sig_code = SUBSIG_PRIVINST; break;
500 case ILL_ILLTRP: sig_code = SUBSIG_BADTRAP(info->si_trapno); break;
501 default: sig_code = SUBSIG_STACK; break;
502 }
503 break;
504 case SIGFPE:
505 switch (info->si_code) {
506 case FPE_INTDIV: sig_code = SUBSIG_IDIVZERO; break;
507 case FPE_INTOVF: sig_code = SUBSIG_FPINTOVFL; break;
508 case FPE_FLTDIV: sig_code = SUBSIG_FPDIVZERO; break;
509 case FPE_FLTOVF: sig_code = SUBSIG_FPOVFLOW; break;
510 case FPE_FLTUND: sig_code = SUBSIG_FPUNFLOW; break;
511 case FPE_FLTRES: sig_code = SUBSIG_FPINEXACT; break;
512 case FPE_FLTINV: sig_code = SUBSIG_FPOPERROR; break;
513 default: sig_code = SUBSIG_FPERROR; break;
514 }
515 break;
516 case SIGBUS:
517 switch (info->si_code) {
518 case BUS_ADRALN: sig_code = SUBSIG_ALIGNMENT; break;
519 case BUS_ADRERR: sig_code = SUBSIG_MISCERROR; break;
520 default: sig_code = SUBSIG_BUSTIMEOUT; break;
521 }
522 break;
523 case SIGEMT:
524 switch (info->si_code) {
525 case EMT_TAGOVF: sig_code = SUBSIG_TAG; break;
526 }
527 break;
528 case SIGSYS:
529 if (info->si_code == (__SI_FAULT|0x100)) {
530 /* See sys_sunos.c */
531 sig_code = info->si_trapno;
532 break;
533 }
534 default:
535 sig_address = NULL;
536 }
537 }
538 err |= __put_user((unsigned long)sig_address, &sframep->sig_address);
539 err |= __put_user(sig_code, &sframep->sig_code);
540 err |= __put_user(sc, &sframep->sig_scptr);
541 if (err)
542 goto sigsegv;
543
544 regs->u_regs[UREG_FP] = (unsigned long) sframep;
545 regs->pc = (unsigned long) sa->sa_handler;
546 regs->npc = (regs->pc + 4);
547 return;
548
549sigill_and_return:
550 do_exit(SIGILL);
551sigsegv:
552 force_sigsegv(signr, current);
553}
554
555
556static inline int
557save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
558{
559 int err = 0;
560#ifdef CONFIG_SMP
561 if (test_tsk_thread_flag(current, TIF_USEDFPU)) {
562 put_psr(get_psr() | PSR_EF);
563 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
564 &current->thread.fpqueue[0], &current->thread.fpqdepth);
565 regs->psr &= ~(PSR_EF);
566 clear_tsk_thread_flag(current, TIF_USEDFPU);
567 }
568#else
569 if (current == last_task_used_math) {
570 put_psr(get_psr() | PSR_EF);
571 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
572 &current->thread.fpqueue[0], &current->thread.fpqdepth);
573 last_task_used_math = NULL;
574 regs->psr &= ~(PSR_EF);
575 }
576#endif
577 err |= __copy_to_user(&fpu->si_float_regs[0],
578 &current->thread.float_regs[0],
579 (sizeof(unsigned long) * 32));
580 err |= __put_user(current->thread.fsr, &fpu->si_fsr);
581 err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
582 if (current->thread.fpqdepth != 0)
583 err |= __copy_to_user(&fpu->si_fpqueue[0],
584 &current->thread.fpqueue[0],
585 ((sizeof(unsigned long) +
586 (sizeof(unsigned long *)))*16));
587 clear_used_math();
588 return err;
589}
590
591static inline void
592new_setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
593 int signo, sigset_t *oldset)
594{
595 struct new_signal_frame __user *sf;
596 int sigframe_size, err;
597
598 /* 1. Make sure everything is clean */
599 synchronize_user_stack();
600
601 sigframe_size = NF_ALIGNEDSZ;
602 if (!used_math())
603 sigframe_size -= sizeof(__siginfo_fpu_t);
604
605 sf = (struct new_signal_frame __user *)
606 get_sigframe(&ka->sa, regs, sigframe_size);
607
608 if (invalid_frame_pointer(sf, sigframe_size))
609 goto sigill_and_return;
610
611 if (current_thread_info()->w_saved != 0)
612 goto sigill_and_return;
613
614 /* 2. Save the current process state */
615 err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs));
616
617 err |= __put_user(0, &sf->extra_size);
618
619 if (used_math()) {
620 err |= save_fpu_state(regs, &sf->fpu_state);
621 err |= __put_user(&sf->fpu_state, &sf->fpu_save);
622 } else {
623 err |= __put_user(0, &sf->fpu_save);
624 }
625
626 err |= __put_user(oldset->sig[0], &sf->info.si_mask);
627 err |= __copy_to_user(sf->extramask, &oldset->sig[1],
628 (_NSIG_WORDS - 1) * sizeof(unsigned int));
629 err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
630 sizeof(struct reg_window));
631 if (err)
632 goto sigsegv;
633
634 /* 3. signal handler back-trampoline and parameters */
635 regs->u_regs[UREG_FP] = (unsigned long) sf;
636 regs->u_regs[UREG_I0] = signo;
637 regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
638 regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
639
640 /* 4. signal handler */
641 regs->pc = (unsigned long) ka->sa.sa_handler;
642 regs->npc = (regs->pc + 4);
643
644 /* 5. return to kernel instructions */
645 if (ka->ka_restorer)
646 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
647 else {
648 regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
649
650 /* mov __NR_sigreturn, %g1 */
651 err |= __put_user(0x821020d8, &sf->insns[0]);
652
653 /* t 0x10 */
654 err |= __put_user(0x91d02010, &sf->insns[1]);
655 if (err)
656 goto sigsegv;
657
658 /* Flush instruction space. */
659 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
660 }
661 return;
662
663sigill_and_return:
664 do_exit(SIGILL);
665sigsegv:
666 force_sigsegv(signo, current);
667}
668
669static inline void
670new_setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
671 int signo, sigset_t *oldset, siginfo_t *info)
672{
673 struct rt_signal_frame __user *sf;
674 int sigframe_size;
675 unsigned int psr;
676 int err;
677
678 synchronize_user_stack();
679 sigframe_size = RT_ALIGNEDSZ;
680 if (!used_math())
681 sigframe_size -= sizeof(__siginfo_fpu_t);
682 sf = (struct rt_signal_frame __user *)
683 get_sigframe(&ka->sa, regs, sigframe_size);
684 if (invalid_frame_pointer(sf, sigframe_size))
685 goto sigill;
686 if (current_thread_info()->w_saved != 0)
687 goto sigill;
688
689 err = __put_user(regs->pc, &sf->regs.pc);
690 err |= __put_user(regs->npc, &sf->regs.npc);
691 err |= __put_user(regs->y, &sf->regs.y);
692 psr = regs->psr;
693 if (used_math())
694 psr |= PSR_EF;
695 err |= __put_user(psr, &sf->regs.psr);
696 err |= __copy_to_user(&sf->regs.u_regs, regs->u_regs, sizeof(regs->u_regs));
697 err |= __put_user(0, &sf->extra_size);
698
699 if (psr & PSR_EF) {
700 err |= save_fpu_state(regs, &sf->fpu_state);
701 err |= __put_user(&sf->fpu_state, &sf->fpu_save);
702 } else {
703 err |= __put_user(0, &sf->fpu_save);
704 }
705 err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t));
706
707 /* Setup sigaltstack */
708 err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
709 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
710 err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
711
712 err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
713 sizeof(struct reg_window));
714
715 err |= copy_siginfo_to_user(&sf->info, info);
716
717 if (err)
718 goto sigsegv;
719
720 regs->u_regs[UREG_FP] = (unsigned long) sf;
721 regs->u_regs[UREG_I0] = signo;
722 regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
723 regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
724
725 regs->pc = (unsigned long) ka->sa.sa_handler;
726 regs->npc = (regs->pc + 4);
727
728 if (ka->ka_restorer)
729 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
730 else {
731 regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
732
733 /* mov __NR_sigreturn, %g1 */
734 err |= __put_user(0x821020d8, &sf->insns[0]);
735
736 /* t 0x10 */
737 err |= __put_user(0x91d02010, &sf->insns[1]);
738 if (err)
739 goto sigsegv;
740
741 /* Flush instruction space. */
742 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
743 }
744 return;
745
746sigill:
747 do_exit(SIGILL);
748sigsegv:
749 force_sigsegv(signo, current);
750}
751
752/* Setup a Solaris stack frame */
753static inline void
754setup_svr4_frame(struct sigaction *sa, unsigned long pc, unsigned long npc,
755 struct pt_regs *regs, int signr, sigset_t *oldset)
756{
757 svr4_signal_frame_t __user *sfp;
758 svr4_gregset_t __user *gr;
759 svr4_siginfo_t __user *si;
760 svr4_mcontext_t __user *mc;
761 svr4_gwindows_t __user *gw;
762 svr4_ucontext_t __user *uc;
763 svr4_sigset_t setv;
764 struct thread_info *tp = current_thread_info();
765 int window = 0, err;
766
767 synchronize_user_stack();
768 sfp = (svr4_signal_frame_t __user *)
769 get_sigframe(sa, regs, SVR4_SF_ALIGNED + sizeof(struct reg_window));
770
771 if (invalid_frame_pointer(sfp, sizeof(*sfp)))
772 goto sigill_and_return;
773
774 /* Start with a clean frame pointer and fill it */
775 err = __clear_user(sfp, sizeof(*sfp));
776
777 /* Setup convenience variables */
778 si = &sfp->si;
779 uc = &sfp->uc;
780 gw = &sfp->gw;
781 mc = &uc->mcontext;
782 gr = &mc->greg;
783
784 /* FIXME: where am I supposed to put this?
785 * sc->sigc_onstack = old_status;
786 * anyways, it does not look like it is used for anything at all.
787 */
788 setv.sigbits[0] = oldset->sig[0];
789 setv.sigbits[1] = oldset->sig[1];
790 if (_NSIG_WORDS >= 4) {
791 setv.sigbits[2] = oldset->sig[2];
792 setv.sigbits[3] = oldset->sig[3];
793 err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t));
794 } else
795 err |= __copy_to_user(&uc->sigmask, &setv,
796 2 * sizeof(unsigned int));
797
798 /* Store registers */
799 err |= __put_user(regs->pc, &((*gr)[SVR4_PC]));
800 err |= __put_user(regs->npc, &((*gr)[SVR4_NPC]));
801 err |= __put_user(regs->psr, &((*gr)[SVR4_PSR]));
802 err |= __put_user(regs->y, &((*gr)[SVR4_Y]));
803
804 /* Copy g[1..7] and o[0..7] registers */
805 err |= __copy_to_user(&(*gr)[SVR4_G1], &regs->u_regs[UREG_G1],
806 sizeof(long) * 7);
807 err |= __copy_to_user(&(*gr)[SVR4_O0], &regs->u_regs[UREG_I0],
808 sizeof(long) * 8);
809
810 /* Setup sigaltstack */
811 err |= __put_user(current->sas_ss_sp, &uc->stack.sp);
812 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags);
813 err |= __put_user(current->sas_ss_size, &uc->stack.size);
814
815 /* Save the currently window file: */
816
817 /* 1. Link sfp->uc->gwins to our windows */
818 err |= __put_user(gw, &mc->gwin);
819
820 /* 2. Number of windows to restore at setcontext(): */
821 err |= __put_user(tp->w_saved, &gw->count);
822
823 /* 3. Save each valid window
824 * Currently, it makes a copy of the windows from the kernel copy.
825 * David's code for SunOS, makes the copy but keeps the pointer to
826 * the kernel. My version makes the pointer point to a userland
827 * copy of those. Mhm, I wonder if I shouldn't just ignore those
828 * on setcontext and use those that are on the kernel, the signal
829 * handler should not be modyfing those, mhm.
830 *
831 * These windows are just used in case synchronize_user_stack failed
832 * to flush the user windows.
833 */
834 for (window = 0; window < tp->w_saved; window++) {
835 err |= __put_user((int __user *) &(gw->win[window]), &gw->winptr[window]);
836 err |= __copy_to_user(&gw->win[window],
837 &tp->reg_window[window],
838 sizeof(svr4_rwindow_t));
839 err |= __put_user(0, gw->winptr[window]);
840 }
841
842 /* 4. We just pay attention to the gw->count field on setcontext */
843 tp->w_saved = 0; /* So process is allowed to execute. */
844
845 /* Setup the signal information. Solaris expects a bunch of
846 * information to be passed to the signal handler, we don't provide
847 * that much currently, should use siginfo.
848 */
849 err |= __put_user(signr, &si->siginfo.signo);
850 err |= __put_user(SVR4_SINOINFO, &si->siginfo.code);
851 if (err)
852 goto sigsegv;
853
854 regs->u_regs[UREG_FP] = (unsigned long) sfp;
855 regs->pc = (unsigned long) sa->sa_handler;
856 regs->npc = (regs->pc + 4);
857
858 /* Arguments passed to signal handler */
859 if (regs->u_regs[14]){
860 struct reg_window __user *rw = (struct reg_window __user *)
861 regs->u_regs[14];
862
863 err |= __put_user(signr, &rw->ins[0]);
864 err |= __put_user(si, &rw->ins[1]);
865 err |= __put_user(uc, &rw->ins[2]);
866 err |= __put_user(sfp, &rw->ins[6]); /* frame pointer */
867 if (err)
868 goto sigsegv;
869
870 regs->u_regs[UREG_I0] = signr;
871 regs->u_regs[UREG_I1] = (unsigned long) si;
872 regs->u_regs[UREG_I2] = (unsigned long) uc;
873 }
874 return;
875
876sigill_and_return:
877 do_exit(SIGILL);
878sigsegv:
879 force_sigsegv(signr, current);
880}
881
882asmlinkage int svr4_getcontext(svr4_ucontext_t __user *uc, struct pt_regs *regs)
883{
884 svr4_gregset_t __user *gr;
885 svr4_mcontext_t __user *mc;
886 svr4_sigset_t setv;
887 int err = 0;
888
889 synchronize_user_stack();
890
891 if (current_thread_info()->w_saved)
892 return -EFAULT;
893
894 err = clear_user(uc, sizeof(*uc));
895 if (err)
896 return -EFAULT;
897
898 /* Setup convenience variables */
899 mc = &uc->mcontext;
900 gr = &mc->greg;
901
902 setv.sigbits[0] = current->blocked.sig[0];
903 setv.sigbits[1] = current->blocked.sig[1];
904 if (_NSIG_WORDS >= 4) {
905 setv.sigbits[2] = current->blocked.sig[2];
906 setv.sigbits[3] = current->blocked.sig[3];
907 err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t));
908 } else
909 err |= __copy_to_user(&uc->sigmask, &setv,
910 2 * sizeof(unsigned int));
911
912 /* Store registers */
913 err |= __put_user(regs->pc, &uc->mcontext.greg[SVR4_PC]);
914 err |= __put_user(regs->npc, &uc->mcontext.greg[SVR4_NPC]);
915 err |= __put_user(regs->psr, &uc->mcontext.greg[SVR4_PSR]);
916 err |= __put_user(regs->y, &uc->mcontext.greg[SVR4_Y]);
917
918 /* Copy g[1..7] and o[0..7] registers */
919 err |= __copy_to_user(&(*gr)[SVR4_G1], &regs->u_regs[UREG_G1],
920 sizeof(uint) * 7);
921 err |= __copy_to_user(&(*gr)[SVR4_O0], &regs->u_regs[UREG_I0],
922 sizeof(uint) * 8);
923
924 /* Setup sigaltstack */
925 err |= __put_user(current->sas_ss_sp, &uc->stack.sp);
926 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags);
927 err |= __put_user(current->sas_ss_size, &uc->stack.size);
928
929 /* The register file is not saved
930 * we have already stuffed all of it with sync_user_stack
931 */
932 return (err ? -EFAULT : 0);
933}
934
935/* Set the context for a svr4 application, this is Solaris way to sigreturn */
936asmlinkage int svr4_setcontext(svr4_ucontext_t __user *c, struct pt_regs *regs)
937{
938 svr4_gregset_t __user *gr;
939 unsigned long pc, npc, psr;
940 mm_segment_t old_fs;
941 sigset_t set;
942 svr4_sigset_t setv;
943 int err;
944 stack_t st;
945
946 /* Fixme: restore windows, or is this already taken care of in
947 * svr4_setup_frame when sync_user_windows is done?
948 */
949 flush_user_windows();
950
951 if (current_thread_info()->w_saved)
952 goto sigsegv_and_return;
953
954 if (((unsigned long) c) & 3)
955 goto sigsegv_and_return;
956
957 if (!__access_ok((unsigned long)c, sizeof(*c)))
958 goto sigsegv_and_return;
959
960 /* Check for valid PC and nPC */
961 gr = &c->mcontext.greg;
962 err = __get_user(pc, &((*gr)[SVR4_PC]));
963 err |= __get_user(npc, &((*gr)[SVR4_NPC]));
964
965 if ((pc | npc) & 3)
966 goto sigsegv_and_return;
967
968 /* Retrieve information from passed ucontext */
969 /* note that nPC is ored a 1, this is used to inform entry.S */
970 /* that we don't want it to mess with our PC and nPC */
971
972 /* This is pretty much atomic, no amount locking would prevent
973 * the races which exist anyways.
974 */
975 err |= __copy_from_user(&setv, &c->sigmask, sizeof(svr4_sigset_t));
976
977 err |= __get_user(st.ss_sp, &c->stack.sp);
978 err |= __get_user(st.ss_flags, &c->stack.flags);
979 err |= __get_user(st.ss_size, &c->stack.size);
980
981 if (err)
982 goto sigsegv_and_return;
983
984 /* It is more difficult to avoid calling this function than to
985 call it and ignore errors. */
986 old_fs = get_fs();
987 set_fs(KERNEL_DS);
988 do_sigaltstack((const stack_t __user *) &st, NULL,
989 regs->u_regs[UREG_I6]);
990 set_fs(old_fs);
991
992 set.sig[0] = setv.sigbits[0];
993 set.sig[1] = setv.sigbits[1];
994 if (_NSIG_WORDS >= 4) {
995 set.sig[2] = setv.sigbits[2];
996 set.sig[3] = setv.sigbits[3];
997 }
998 sigdelsetmask(&set, ~_BLOCKABLE);
999 spin_lock_irq(&current->sighand->siglock);
1000 current->blocked = set;
1001 recalc_sigpending();
1002 spin_unlock_irq(&current->sighand->siglock);
1003 regs->pc = pc;
1004 regs->npc = npc | 1;
1005 err |= __get_user(regs->y, &((*gr)[SVR4_Y]));
1006 err |= __get_user(psr, &((*gr)[SVR4_PSR]));
1007 regs->psr &= ~(PSR_ICC);
1008 regs->psr |= (psr & PSR_ICC);
1009
1010 /* Restore g[1..7] and o[0..7] registers */
1011 err |= __copy_from_user(&regs->u_regs[UREG_G1], &(*gr)[SVR4_G1],
1012 sizeof(long) * 7);
1013 err |= __copy_from_user(&regs->u_regs[UREG_I0], &(*gr)[SVR4_O0],
1014 sizeof(long) * 8);
1015 return (err ? -EFAULT : 0);
1016
1017sigsegv_and_return:
1018 force_sig(SIGSEGV, current);
1019 return -EFAULT;
1020}
1021
1022static inline void
1023handle_signal(unsigned long signr, struct k_sigaction *ka,
1024 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs,
1025 int svr4_signal)
1026{
1027 if (svr4_signal)
1028 setup_svr4_frame(&ka->sa, regs->pc, regs->npc, regs, signr, oldset);
1029 else {
1030 if (ka->sa.sa_flags & SA_SIGINFO)
1031 new_setup_rt_frame(ka, regs, signr, oldset, info);
1032 else if (current->thread.new_signal)
1033 new_setup_frame(ka, regs, signr, oldset);
1034 else
1035 setup_frame(&ka->sa, regs, signr, oldset, info);
1036 }
1037 if (!(ka->sa.sa_flags & SA_NOMASK)) {
1038 spin_lock_irq(&current->sighand->siglock);
1039 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
1040 sigaddset(&current->blocked, signr);
1041 recalc_sigpending();
1042 spin_unlock_irq(&current->sighand->siglock);
1043 }
1044}
1045
1046static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
1047 struct sigaction *sa)
1048{
1049 switch(regs->u_regs[UREG_I0]) {
1050 case ERESTART_RESTARTBLOCK:
1051 case ERESTARTNOHAND:
1052 no_system_call_restart:
1053 regs->u_regs[UREG_I0] = EINTR;
1054 regs->psr |= PSR_C;
1055 break;
1056 case ERESTARTSYS:
1057 if (!(sa->sa_flags & SA_RESTART))
1058 goto no_system_call_restart;
1059 /* fallthrough */
1060 case ERESTARTNOINTR:
1061 regs->u_regs[UREG_I0] = orig_i0;
1062 regs->pc -= 4;
1063 regs->npc -= 4;
1064 }
1065}
1066
1067/* Note that 'init' is a special process: it doesn't get signals it doesn't
1068 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1069 * mistake.
1070 */
1071asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
1072 unsigned long orig_i0, int restart_syscall)
1073{
1074 siginfo_t info;
1075 struct sparc_deliver_cookie cookie;
1076 struct k_sigaction ka;
1077 int signr;
1078
1079 /*
1080 * XXX Disable svr4 signal handling until solaris emulation works.
1081 * It is buggy - Anton
1082 */
1083#define SVR4_SIGNAL_BROKEN 1
1084#ifdef SVR4_SIGNAL_BROKEN
1085 int svr4_signal = 0;
1086#else
1087 int svr4_signal = current->personality == PER_SVR4;
1088#endif
1089
1090 cookie.restart_syscall = restart_syscall;
1091 cookie.orig_i0 = orig_i0;
1092
1093 if (!oldset)
1094 oldset = &current->blocked;
1095
1096 signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
1097 if (signr > 0) {
1098 if (cookie.restart_syscall)
1099 syscall_restart(cookie.orig_i0, regs, &ka.sa);
1100 handle_signal(signr, &ka, &info, oldset,
1101 regs, svr4_signal);
1102 return 1;
1103 }
1104 if (cookie.restart_syscall &&
1105 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
1106 regs->u_regs[UREG_I0] == ERESTARTSYS ||
1107 regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
1108 /* replay the system call when we are done */
1109 regs->u_regs[UREG_I0] = cookie.orig_i0;
1110 regs->pc -= 4;
1111 regs->npc -= 4;
1112 }
1113 if (cookie.restart_syscall &&
1114 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
1115 regs->u_regs[UREG_G1] = __NR_restart_syscall;
1116 regs->pc -= 4;
1117 regs->npc -= 4;
1118 }
1119 return 0;
1120}
1121
1122asmlinkage int
1123do_sys_sigstack(struct sigstack __user *ssptr, struct sigstack __user *ossptr,
1124 unsigned long sp)
1125{
1126 int ret = -EFAULT;
1127
1128 /* First see if old state is wanted. */
1129 if (ossptr) {
1130 if (put_user(current->sas_ss_sp + current->sas_ss_size,
1131 &ossptr->the_stack) ||
1132 __put_user(on_sig_stack(sp), &ossptr->cur_status))
1133 goto out;
1134 }
1135
1136 /* Now see if we want to update the new state. */
1137 if (ssptr) {
1138 char *ss_sp;
1139
1140 if (get_user(ss_sp, &ssptr->the_stack))
1141 goto out;
1142 /* If the current stack was set with sigaltstack, don't
1143 swap stacks while we are on it. */
1144 ret = -EPERM;
1145 if (current->sas_ss_sp && on_sig_stack(sp))
1146 goto out;
1147
1148 /* Since we don't know the extent of the stack, and we don't
1149 track onstack-ness, but rather calculate it, we must
1150 presume a size. Ho hum this interface is lossy. */
1151 current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
1152 current->sas_ss_size = SIGSTKSZ;
1153 }
1154 ret = 0;
1155out:
1156 return ret;
1157}
1158
1159void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
1160{
1161 struct sparc_deliver_cookie *cp = cookie;
1162
1163 if (cp->restart_syscall &&
1164 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
1165 regs->u_regs[UREG_I0] == ERESTARTSYS ||
1166 regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
1167 /* replay the system call when we are done */
1168 regs->u_regs[UREG_I0] = cp->orig_i0;
1169 regs->pc -= 4;
1170 regs->npc -= 4;
1171 cp->restart_syscall = 0;
1172 }
1173
1174 if (cp->restart_syscall &&
1175 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
1176 regs->u_regs[UREG_G1] = __NR_restart_syscall;
1177 regs->pc -= 4;
1178 regs->npc -= 4;
1179 cp->restart_syscall = 0;
1180 }
1181}
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
new file mode 100644
index 000000000000..c6e721d8f477
--- /dev/null
+++ b/arch/sparc/kernel/smp.c
@@ -0,0 +1,295 @@
1/* smp.c: Sparc SMP support.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
6 */
7
8#include <asm/head.h>
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/threads.h>
13#include <linux/smp.h>
14#include <linux/smp_lock.h>
15#include <linux/interrupt.h>
16#include <linux/kernel_stat.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/seq_file.h>
22#include <linux/cache.h>
23#include <linux/delay.h>
24
25#include <asm/ptrace.h>
26#include <asm/atomic.h>
27
28#include <asm/irq.h>
29#include <asm/page.h>
30#include <asm/pgalloc.h>
31#include <asm/pgtable.h>
32#include <asm/oplib.h>
33#include <asm/cacheflush.h>
34#include <asm/tlbflush.h>
35#include <asm/cpudata.h>
36
37volatile int smp_processors_ready = 0;
38int smp_num_cpus = 1;
39volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
40unsigned char boot_cpu_id = 0;
41unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
42int smp_activated = 0;
43volatile int __cpu_number_map[NR_CPUS];
44volatile int __cpu_logical_map[NR_CPUS];
45
46cpumask_t cpu_online_map = CPU_MASK_NONE;
47cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
48
49/* The only guaranteed locking primitive available on all Sparc
50 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
51 * places the current byte at the effective address into dest_reg and
52 * places 0xff there afterwards. Pretty lame locking primitive
53 * compared to the Alpha and the Intel no? Most Sparcs have 'swap'
54 * instruction which is much better...
55 */
56
57/* Used to make bitops atomic */
58unsigned char bitops_spinlock = 0;
59
60volatile unsigned long ipi_count;
61
62volatile int smp_process_available=0;
63volatile int smp_commenced = 0;
64
65void __init smp_store_cpu_info(int id)
66{
67 int cpu_node;
68
69 cpu_data(id).udelay_val = loops_per_jiffy;
70
71 cpu_find_by_mid(id, &cpu_node);
72 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
73 "clock-frequency", 0);
74 cpu_data(id).prom_node = cpu_node;
75 cpu_data(id).mid = cpu_get_hwmid(cpu_node);
76 if (cpu_data(id).mid < 0)
77 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
78}
79
80void __init smp_cpus_done(unsigned int max_cpus)
81{
82}
83
84void cpu_panic(void)
85{
86 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
87 panic("SMP bolixed\n");
88}
89
90struct linux_prom_registers smp_penguin_ctable __initdata = { 0 };
91
92void __init smp_boot_cpus(void)
93{
94 extern void smp4m_boot_cpus(void);
95 extern void smp4d_boot_cpus(void);
96
97 if (sparc_cpu_model == sun4m)
98 smp4m_boot_cpus();
99 else
100 smp4d_boot_cpus();
101}
102
103void smp_send_reschedule(int cpu)
104{
105 /* See sparc64 */
106}
107
108void smp_send_stop(void)
109{
110}
111
112void smp_flush_cache_all(void)
113{
114 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
115 local_flush_cache_all();
116}
117
118void smp_flush_tlb_all(void)
119{
120 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
121 local_flush_tlb_all();
122}
123
124void smp_flush_cache_mm(struct mm_struct *mm)
125{
126 if(mm->context != NO_CONTEXT) {
127 cpumask_t cpu_mask = mm->cpu_vm_mask;
128 cpu_clear(smp_processor_id(), cpu_mask);
129 if (!cpus_empty(cpu_mask))
130 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
131 local_flush_cache_mm(mm);
132 }
133}
134
135void smp_flush_tlb_mm(struct mm_struct *mm)
136{
137 if(mm->context != NO_CONTEXT) {
138 cpumask_t cpu_mask = mm->cpu_vm_mask;
139 cpu_clear(smp_processor_id(), cpu_mask);
140 if (!cpus_empty(cpu_mask)) {
141 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
142 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
143 mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
144 }
145 local_flush_tlb_mm(mm);
146 }
147}
148
149void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
150 unsigned long end)
151{
152 struct mm_struct *mm = vma->vm_mm;
153
154 if (mm->context != NO_CONTEXT) {
155 cpumask_t cpu_mask = mm->cpu_vm_mask;
156 cpu_clear(smp_processor_id(), cpu_mask);
157 if (!cpus_empty(cpu_mask))
158 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
159 local_flush_cache_range(vma, start, end);
160 }
161}
162
163void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
164 unsigned long end)
165{
166 struct mm_struct *mm = vma->vm_mm;
167
168 if (mm->context != NO_CONTEXT) {
169 cpumask_t cpu_mask = mm->cpu_vm_mask;
170 cpu_clear(smp_processor_id(), cpu_mask);
171 if (!cpus_empty(cpu_mask))
172 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
173 local_flush_tlb_range(vma, start, end);
174 }
175}
176
177void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
178{
179 struct mm_struct *mm = vma->vm_mm;
180
181 if(mm->context != NO_CONTEXT) {
182 cpumask_t cpu_mask = mm->cpu_vm_mask;
183 cpu_clear(smp_processor_id(), cpu_mask);
184 if (!cpus_empty(cpu_mask))
185 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
186 local_flush_cache_page(vma, page);
187 }
188}
189
190void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
191{
192 struct mm_struct *mm = vma->vm_mm;
193
194 if(mm->context != NO_CONTEXT) {
195 cpumask_t cpu_mask = mm->cpu_vm_mask;
196 cpu_clear(smp_processor_id(), cpu_mask);
197 if (!cpus_empty(cpu_mask))
198 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
199 local_flush_tlb_page(vma, page);
200 }
201}
202
203void smp_reschedule_irq(void)
204{
205 set_need_resched();
206}
207
208void smp_flush_page_to_ram(unsigned long page)
209{
210 /* Current theory is that those who call this are the one's
211 * who have just dirtied their cache with the pages contents
212 * in kernel space, therefore we only run this on local cpu.
213 *
214 * XXX This experiment failed, research further... -DaveM
215 */
216#if 1
217 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
218#endif
219 local_flush_page_to_ram(page);
220}
221
222void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
223{
224 cpumask_t cpu_mask = mm->cpu_vm_mask;
225 cpu_clear(smp_processor_id(), cpu_mask);
226 if (!cpus_empty(cpu_mask))
227 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
228 local_flush_sig_insns(mm, insn_addr);
229}
230
231extern unsigned int lvl14_resolution;
232
233/* /proc/profile writes can call this, don't __init it please. */
234static DEFINE_SPINLOCK(prof_setup_lock);
235
236int setup_profiling_timer(unsigned int multiplier)
237{
238 int i;
239 unsigned long flags;
240
241 /* Prevent level14 ticker IRQ flooding. */
242 if((!multiplier) || (lvl14_resolution / multiplier) < 500)
243 return -EINVAL;
244
245 spin_lock_irqsave(&prof_setup_lock, flags);
246 for(i = 0; i < NR_CPUS; i++) {
247 if (cpu_possible(i))
248 load_profile_irq(i, lvl14_resolution / multiplier);
249 prof_multiplier(i) = multiplier;
250 }
251 spin_unlock_irqrestore(&prof_setup_lock, flags);
252
253 return 0;
254}
255
256void __init smp_prepare_cpus(unsigned int maxcpus)
257{
258}
259
260void __devinit smp_prepare_boot_cpu(void)
261{
262 current_thread_info()->cpu = hard_smp_processor_id();
263 cpu_set(smp_processor_id(), cpu_online_map);
264 cpu_set(smp_processor_id(), phys_cpu_present_map);
265}
266
267int __devinit __cpu_up(unsigned int cpu)
268{
269 panic("smp doesn't work\n");
270}
271
272void smp_bogo(struct seq_file *m)
273{
274 int i;
275
276 for (i = 0; i < NR_CPUS; i++) {
277 if (cpu_online(i))
278 seq_printf(m,
279 "Cpu%dBogo\t: %lu.%02lu\n",
280 i,
281 cpu_data(i).udelay_val/(500000/HZ),
282 (cpu_data(i).udelay_val/(5000/HZ))%100);
283 }
284}
285
286void smp_info(struct seq_file *m)
287{
288 int i;
289
290 seq_printf(m, "State:\n");
291 for (i = 0; i < NR_CPUS; i++) {
292 if (cpu_online(i))
293 seq_printf(m, "CPU%d\t\t: online\n", i);
294 }
295}
diff --git a/arch/sparc/kernel/sparc-stub.c b/arch/sparc/kernel/sparc-stub.c
new file mode 100644
index 000000000000..e84f815e6903
--- /dev/null
+++ b/arch/sparc/kernel/sparc-stub.c
@@ -0,0 +1,724 @@
1/* $Id: sparc-stub.c,v 1.28 2001/10/30 04:54:21 davem Exp $
2 * sparc-stub.c: KGDB support for the Linux kernel.
3 *
4 * Modifications to run under Linux
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 *
7 * This file originally came from the gdb sources, and the
8 * copyright notices have been retained below.
9 */
10
11/****************************************************************************
12
13 THIS SOFTWARE IS NOT COPYRIGHTED
14
15 HP offers the following for use in the public domain. HP makes no
16 warranty with regard to the software or its performance and the
17 user accepts the software "AS IS" with all faults.
18
19 HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD
20 TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES
21 OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
22
23****************************************************************************/
24
25/****************************************************************************
26 * Header: remcom.c,v 1.34 91/03/09 12:29:49 glenne Exp $
27 *
28 * Module name: remcom.c $
29 * Revision: 1.34 $
30 * Date: 91/03/09 12:29:49 $
31 * Contributor: Lake Stevens Instrument Division$
32 *
33 * Description: low level support for gdb debugger. $
34 *
35 * Considerations: only works on target hardware $
36 *
37 * Written by: Glenn Engel $
38 * ModuleState: Experimental $
39 *
40 * NOTES: See Below $
41 *
42 * Modified for SPARC by Stu Grossman, Cygnus Support.
43 *
44 * This code has been extensively tested on the Fujitsu SPARClite demo board.
45 *
46 * To enable debugger support, two things need to happen. One, a
47 * call to set_debug_traps() is necessary in order to allow any breakpoints
48 * or error conditions to be properly intercepted and reported to gdb.
49 * Two, a breakpoint needs to be generated to begin communication. This
50 * is most easily accomplished by a call to breakpoint(). Breakpoint()
51 * simulates a breakpoint by executing a trap #1.
52 *
53 *************
54 *
55 * The following gdb commands are supported:
56 *
57 * command function Return value
58 *
59 * g return the value of the CPU registers hex data or ENN
60 * G set the value of the CPU registers OK or ENN
61 *
62 * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
63 * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
64 *
65 * c Resume at current address SNN ( signal NN)
66 * cAA..AA Continue at address AA..AA SNN
67 *
68 * s Step one instruction SNN
69 * sAA..AA Step one instruction from AA..AA SNN
70 *
71 * k kill
72 *
73 * ? What was the last sigval ? SNN (signal NN)
74 *
75 * bBB..BB Set baud rate to BB..BB OK or BNN, then sets
76 * baud rate
77 *
78 * All commands and responses are sent with a packet which includes a
79 * checksum. A packet consists of
80 *
81 * $<packet info>#<checksum>.
82 *
83 * where
84 * <packet info> :: <characters representing the command or response>
85 * <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>>
86 *
87 * When a packet is received, it is first acknowledged with either '+' or '-'.
88 * '+' indicates a successful transfer. '-' indicates a failed transfer.
89 *
90 * Example:
91 *
92 * Host: Reply:
93 * $m0,10#2a +$00010203040506070809101112131415#42
94 *
95 ****************************************************************************/
96
97#include <linux/kernel.h>
98#include <linux/string.h>
99#include <linux/mm.h>
100#include <linux/smp.h>
101#include <linux/smp_lock.h>
102
103#include <asm/system.h>
104#include <asm/signal.h>
105#include <asm/oplib.h>
106#include <asm/head.h>
107#include <asm/traps.h>
108#include <asm/vac-ops.h>
109#include <asm/kgdb.h>
110#include <asm/pgalloc.h>
111#include <asm/pgtable.h>
112#include <asm/cacheflush.h>
113
114/*
115 *
116 * external low-level support routines
117 */
118
119extern void putDebugChar(char); /* write a single character */
120extern char getDebugChar(void); /* read and return a single char */
121
122/*
123 * BUFMAX defines the maximum number of characters in inbound/outbound buffers
124 * at least NUMREGBYTES*2 are needed for register packets
125 */
126#define BUFMAX 2048
127
128static int initialized; /* !0 means we've been initialized */
129
130static const char hexchars[]="0123456789abcdef";
131
132#define NUMREGS 72
133
134/* Number of bytes of registers. */
135#define NUMREGBYTES (NUMREGS * 4)
136enum regnames {G0, G1, G2, G3, G4, G5, G6, G7,
137 O0, O1, O2, O3, O4, O5, SP, O7,
138 L0, L1, L2, L3, L4, L5, L6, L7,
139 I0, I1, I2, I3, I4, I5, FP, I7,
140
141 F0, F1, F2, F3, F4, F5, F6, F7,
142 F8, F9, F10, F11, F12, F13, F14, F15,
143 F16, F17, F18, F19, F20, F21, F22, F23,
144 F24, F25, F26, F27, F28, F29, F30, F31,
145 Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR };
146
147
148extern void trap_low(void); /* In arch/sparc/kernel/entry.S */
149
150unsigned long get_sun4cpte(unsigned long addr)
151{
152 unsigned long entry;
153
154 __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" :
155 "=r" (entry) :
156 "r" (addr), "i" (ASI_PTE));
157 return entry;
158}
159
160unsigned long get_sun4csegmap(unsigned long addr)
161{
162 unsigned long entry;
163
164 __asm__ __volatile__("\n\tlduba [%1] %2, %0\n\t" :
165 "=r" (entry) :
166 "r" (addr), "i" (ASI_SEGMAP));
167 return entry;
168}
169
170#if 0
171/* Have to sort this out. This cannot be done after initialization. */
172static void flush_cache_all_nop(void) {}
173#endif
174
175/* Place where we save old trap entries for restoration */
176struct tt_entry kgdb_savettable[256];
177typedef void (*trapfunc_t)(void);
178
179/* Helper routine for manipulation of kgdb_savettable */
180static inline void copy_ttentry(struct tt_entry *src, struct tt_entry *dest)
181{
182 dest->inst_one = src->inst_one;
183 dest->inst_two = src->inst_two;
184 dest->inst_three = src->inst_three;
185 dest->inst_four = src->inst_four;
186}
187
188/* Initialize the kgdb_savettable so that debugging can commence */
189static void eh_init(void)
190{
191 int i;
192
193 for(i=0; i < 256; i++)
194 copy_ttentry(&sparc_ttable[i], &kgdb_savettable[i]);
195}
196
197/* Install an exception handler for kgdb */
198static void exceptionHandler(int tnum, trapfunc_t trap_entry)
199{
200 unsigned long te_addr = (unsigned long) trap_entry;
201
202 /* Make new vector */
203 sparc_ttable[tnum].inst_one =
204 SPARC_BRANCH((unsigned long) te_addr,
205 (unsigned long) &sparc_ttable[tnum].inst_one);
206 sparc_ttable[tnum].inst_two = SPARC_RD_PSR_L0;
207 sparc_ttable[tnum].inst_three = SPARC_NOP;
208 sparc_ttable[tnum].inst_four = SPARC_NOP;
209}
210
211/* Convert ch from a hex digit to an int */
212static int
213hex(unsigned char ch)
214{
215 if (ch >= 'a' && ch <= 'f')
216 return ch-'a'+10;
217 if (ch >= '0' && ch <= '9')
218 return ch-'0';
219 if (ch >= 'A' && ch <= 'F')
220 return ch-'A'+10;
221 return -1;
222}
223
224/* scan for the sequence $<data>#<checksum> */
225static void
226getpacket(char *buffer)
227{
228 unsigned char checksum;
229 unsigned char xmitcsum;
230 int i;
231 int count;
232 unsigned char ch;
233
234 do {
235 /* wait around for the start character, ignore all other characters */
236 while ((ch = (getDebugChar() & 0x7f)) != '$') ;
237
238 checksum = 0;
239 xmitcsum = -1;
240
241 count = 0;
242
243 /* now, read until a # or end of buffer is found */
244 while (count < BUFMAX) {
245 ch = getDebugChar() & 0x7f;
246 if (ch == '#')
247 break;
248 checksum = checksum + ch;
249 buffer[count] = ch;
250 count = count + 1;
251 }
252
253 if (count >= BUFMAX)
254 continue;
255
256 buffer[count] = 0;
257
258 if (ch == '#') {
259 xmitcsum = hex(getDebugChar() & 0x7f) << 4;
260 xmitcsum |= hex(getDebugChar() & 0x7f);
261 if (checksum != xmitcsum)
262 putDebugChar('-'); /* failed checksum */
263 else {
264 putDebugChar('+'); /* successful transfer */
265 /* if a sequence char is present, reply the ID */
266 if (buffer[2] == ':') {
267 putDebugChar(buffer[0]);
268 putDebugChar(buffer[1]);
269 /* remove sequence chars from buffer */
270 count = strlen(buffer);
271 for (i=3; i <= count; i++)
272 buffer[i-3] = buffer[i];
273 }
274 }
275 }
276 } while (checksum != xmitcsum);
277}
278
279/* send the packet in buffer. */
280
281static void
282putpacket(unsigned char *buffer)
283{
284 unsigned char checksum;
285 int count;
286 unsigned char ch, recv;
287
288 /* $<packet info>#<checksum>. */
289 do {
290 putDebugChar('$');
291 checksum = 0;
292 count = 0;
293
294 while ((ch = buffer[count])) {
295 putDebugChar(ch);
296 checksum += ch;
297 count += 1;
298 }
299
300 putDebugChar('#');
301 putDebugChar(hexchars[checksum >> 4]);
302 putDebugChar(hexchars[checksum & 0xf]);
303 recv = getDebugChar();
304 } while ((recv & 0x7f) != '+');
305}
306
307static char remcomInBuffer[BUFMAX];
308static char remcomOutBuffer[BUFMAX];
309
310/* Convert the memory pointed to by mem into hex, placing result in buf.
311 * Return a pointer to the last char put in buf (null), in case of mem fault,
312 * return 0.
313 */
314
315static unsigned char *
316mem2hex(char *mem, char *buf, int count)
317{
318 unsigned char ch;
319
320 while (count-- > 0) {
321 /* This assembler code is basically: ch = *mem++;
322 * except that we use the SPARC/Linux exception table
323 * mechanism (see how "fixup" works in kernel_mna_trap_fault)
324 * to arrange for a "return 0" upon a memory fault
325 */
326 __asm__(
327 "\n1:\n\t"
328 "ldub [%0], %1\n\t"
329 "inc %0\n\t"
330 ".section .fixup,#alloc,#execinstr\n\t"
331 ".align 4\n"
332 "2:\n\t"
333 "retl\n\t"
334 " mov 0, %%o0\n\t"
335 ".section __ex_table, #alloc\n\t"
336 ".align 4\n\t"
337 ".word 1b, 2b\n\t"
338 ".text\n"
339 : "=r" (mem), "=r" (ch) : "0" (mem));
340 *buf++ = hexchars[ch >> 4];
341 *buf++ = hexchars[ch & 0xf];
342 }
343
344 *buf = 0;
345 return buf;
346}
347
348/* convert the hex array pointed to by buf into binary to be placed in mem
349 * return a pointer to the character AFTER the last byte written.
350*/
351static char *
352hex2mem(char *buf, char *mem, int count)
353{
354 int i;
355 unsigned char ch;
356
357 for (i=0; i<count; i++) {
358
359 ch = hex(*buf++) << 4;
360 ch |= hex(*buf++);
361 /* Assembler code is *mem++ = ch; with return 0 on fault */
362 __asm__(
363 "\n1:\n\t"
364 "stb %1, [%0]\n\t"
365 "inc %0\n\t"
366 ".section .fixup,#alloc,#execinstr\n\t"
367 ".align 4\n"
368 "2:\n\t"
369 "retl\n\t"
370 " mov 0, %%o0\n\t"
371 ".section __ex_table, #alloc\n\t"
372 ".align 4\n\t"
373 ".word 1b, 2b\n\t"
374 ".text\n"
375 : "=r" (mem) : "r" (ch) , "0" (mem));
376 }
377 return mem;
378}
379
380/* This table contains the mapping between SPARC hardware trap types, and
381 signals, which are primarily what GDB understands. It also indicates
382 which hardware traps we need to commandeer when initializing the stub. */
383
384static struct hard_trap_info
385{
386 unsigned char tt; /* Trap type code for SPARC */
387 unsigned char signo; /* Signal that we map this trap into */
388} hard_trap_info[] = {
389 {SP_TRAP_SBPT, SIGTRAP}, /* ta 1 - Linux/KGDB software breakpoint */
390 {0, 0} /* Must be last */
391};
392
393/* Set up exception handlers for tracing and breakpoints */
394
395void
396set_debug_traps(void)
397{
398 struct hard_trap_info *ht;
399 unsigned long flags;
400
401 local_irq_save(flags);
402#if 0
403/* Have to sort this out. This cannot be done after initialization. */
404 BTFIXUPSET_CALL(flush_cache_all, flush_cache_all_nop, BTFIXUPCALL_NOP);
405#endif
406
407 /* Initialize our copy of the Linux Sparc trap table */
408 eh_init();
409
410 for (ht = hard_trap_info; ht->tt && ht->signo; ht++) {
411 /* Only if it doesn't destroy our fault handlers */
412 if((ht->tt != SP_TRAP_TFLT) &&
413 (ht->tt != SP_TRAP_DFLT))
414 exceptionHandler(ht->tt, trap_low);
415 }
416
417 /* In case GDB is started before us, ack any packets (presumably
418 * "$?#xx") sitting there.
419 *
420 * I've found this code causes more problems than it solves,
421 * so that's why it's commented out. GDB seems to work fine
422 * now starting either before or after the kernel -bwb
423 */
424#if 0
425 while((c = getDebugChar()) != '$');
426 while((c = getDebugChar()) != '#');
427 c = getDebugChar(); /* eat first csum byte */
428 c = getDebugChar(); /* eat second csum byte */
429 putDebugChar('+'); /* ack it */
430#endif
431
432 initialized = 1; /* connect! */
433 local_irq_restore(flags);
434}
435
436/* Convert the SPARC hardware trap type code to a unix signal number. */
437
438static int
439computeSignal(int tt)
440{
441 struct hard_trap_info *ht;
442
443 for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
444 if (ht->tt == tt)
445 return ht->signo;
446
447 return SIGHUP; /* default for things we don't know about */
448}
449
450/*
451 * While we find nice hex chars, build an int.
452 * Return number of chars processed.
453 */
454
455static int
456hexToInt(char **ptr, int *intValue)
457{
458 int numChars = 0;
459 int hexValue;
460
461 *intValue = 0;
462
463 while (**ptr) {
464 hexValue = hex(**ptr);
465 if (hexValue < 0)
466 break;
467
468 *intValue = (*intValue << 4) | hexValue;
469 numChars ++;
470
471 (*ptr)++;
472 }
473
474 return (numChars);
475}
476
477/*
478 * This function does all command processing for interfacing to gdb. It
479 * returns 1 if you should skip the instruction at the trap address, 0
480 * otherwise.
481 */
482
483extern void breakinst(void);
484
485void
486handle_exception (unsigned long *registers)
487{
488 int tt; /* Trap type */
489 int sigval;
490 int addr;
491 int length;
492 char *ptr;
493 unsigned long *sp;
494
495 /* First, we must force all of the windows to be spilled out */
496
497 asm("save %sp, -64, %sp\n\t"
498 "save %sp, -64, %sp\n\t"
499 "save %sp, -64, %sp\n\t"
500 "save %sp, -64, %sp\n\t"
501 "save %sp, -64, %sp\n\t"
502 "save %sp, -64, %sp\n\t"
503 "save %sp, -64, %sp\n\t"
504 "save %sp, -64, %sp\n\t"
505 "restore\n\t"
506 "restore\n\t"
507 "restore\n\t"
508 "restore\n\t"
509 "restore\n\t"
510 "restore\n\t"
511 "restore\n\t"
512 "restore\n\t");
513
514 lock_kernel();
515 if (registers[PC] == (unsigned long)breakinst) {
516 /* Skip over breakpoint trap insn */
517 registers[PC] = registers[NPC];
518 registers[NPC] += 4;
519 }
520
521 sp = (unsigned long *)registers[SP];
522
523 tt = (registers[TBR] >> 4) & 0xff;
524
525 /* reply to host that an exception has occurred */
526 sigval = computeSignal(tt);
527 ptr = remcomOutBuffer;
528
529 *ptr++ = 'T';
530 *ptr++ = hexchars[sigval >> 4];
531 *ptr++ = hexchars[sigval & 0xf];
532
533 *ptr++ = hexchars[PC >> 4];
534 *ptr++ = hexchars[PC & 0xf];
535 *ptr++ = ':';
536 ptr = mem2hex((char *)&registers[PC], ptr, 4);
537 *ptr++ = ';';
538
539 *ptr++ = hexchars[FP >> 4];
540 *ptr++ = hexchars[FP & 0xf];
541 *ptr++ = ':';
542 ptr = mem2hex((char *) (sp + 8 + 6), ptr, 4); /* FP */
543 *ptr++ = ';';
544
545 *ptr++ = hexchars[SP >> 4];
546 *ptr++ = hexchars[SP & 0xf];
547 *ptr++ = ':';
548 ptr = mem2hex((char *)&sp, ptr, 4);
549 *ptr++ = ';';
550
551 *ptr++ = hexchars[NPC >> 4];
552 *ptr++ = hexchars[NPC & 0xf];
553 *ptr++ = ':';
554 ptr = mem2hex((char *)&registers[NPC], ptr, 4);
555 *ptr++ = ';';
556
557 *ptr++ = hexchars[O7 >> 4];
558 *ptr++ = hexchars[O7 & 0xf];
559 *ptr++ = ':';
560 ptr = mem2hex((char *)&registers[O7], ptr, 4);
561 *ptr++ = ';';
562
563 *ptr++ = 0;
564
565 putpacket(remcomOutBuffer);
566
567 /* XXX We may want to add some features dealing with poking the
568 * XXX page tables, the real ones on the srmmu, and what is currently
569 * XXX loaded in the sun4/sun4c tlb at this point in time. But this
570 * XXX also required hacking to the gdb sources directly...
571 */
572
573 while (1) {
574 remcomOutBuffer[0] = 0;
575
576 getpacket(remcomInBuffer);
577 switch (remcomInBuffer[0]) {
578 case '?':
579 remcomOutBuffer[0] = 'S';
580 remcomOutBuffer[1] = hexchars[sigval >> 4];
581 remcomOutBuffer[2] = hexchars[sigval & 0xf];
582 remcomOutBuffer[3] = 0;
583 break;
584
585 case 'd':
586 /* toggle debug flag */
587 break;
588
589 case 'g': /* return the value of the CPU registers */
590 {
591 ptr = remcomOutBuffer;
592 /* G & O regs */
593 ptr = mem2hex((char *)registers, ptr, 16 * 4);
594 /* L & I regs */
595 ptr = mem2hex((char *) (sp + 0), ptr, 16 * 4);
596 /* Floating point */
597 memset(ptr, '0', 32 * 8);
598 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
599 mem2hex((char *)&registers[Y], (ptr + 32 * 4 * 2), (8 * 4));
600 }
601 break;
602
603 case 'G': /* set the value of the CPU registers - return OK */
604 {
605 unsigned long *newsp, psr;
606
607 psr = registers[PSR];
608
609 ptr = &remcomInBuffer[1];
610 /* G & O regs */
611 hex2mem(ptr, (char *)registers, 16 * 4);
612 /* L & I regs */
613 hex2mem(ptr + 16 * 4 * 2, (char *) (sp + 0), 16 * 4);
614 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
615 hex2mem(ptr + 64 * 4 * 2, (char *)&registers[Y], 8 * 4);
616
617 /* See if the stack pointer has moved. If so,
618 * then copy the saved locals and ins to the
619 * new location. This keeps the window
620 * overflow and underflow routines happy.
621 */
622
623 newsp = (unsigned long *)registers[SP];
624 if (sp != newsp)
625 sp = memcpy(newsp, sp, 16 * 4);
626
627 /* Don't allow CWP to be modified. */
628
629 if (psr != registers[PSR])
630 registers[PSR] = (psr & 0x1f) | (registers[PSR] & ~0x1f);
631
632 strcpy(remcomOutBuffer,"OK");
633 }
634 break;
635
636 case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */
637 /* Try to read %x,%x. */
638
639 ptr = &remcomInBuffer[1];
640
641 if (hexToInt(&ptr, &addr)
642 && *ptr++ == ','
643 && hexToInt(&ptr, &length)) {
644 if (mem2hex((char *)addr, remcomOutBuffer, length))
645 break;
646
647 strcpy (remcomOutBuffer, "E03");
648 } else {
649 strcpy(remcomOutBuffer,"E01");
650 }
651 break;
652
653 case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */
654 /* Try to read '%x,%x:'. */
655
656 ptr = &remcomInBuffer[1];
657
658 if (hexToInt(&ptr, &addr)
659 && *ptr++ == ','
660 && hexToInt(&ptr, &length)
661 && *ptr++ == ':') {
662 if (hex2mem(ptr, (char *)addr, length)) {
663 strcpy(remcomOutBuffer, "OK");
664 } else {
665 strcpy(remcomOutBuffer, "E03");
666 }
667 } else {
668 strcpy(remcomOutBuffer, "E02");
669 }
670 break;
671
672 case 'c': /* cAA..AA Continue at address AA..AA(optional) */
673 /* try to read optional parameter, pc unchanged if no parm */
674
675 ptr = &remcomInBuffer[1];
676 if (hexToInt(&ptr, &addr)) {
677 registers[PC] = addr;
678 registers[NPC] = addr + 4;
679 }
680
681/* Need to flush the instruction cache here, as we may have deposited a
682 * breakpoint, and the icache probably has no way of knowing that a data ref to
683 * some location may have changed something that is in the instruction cache.
684 */
685 flush_cache_all();
686 unlock_kernel();
687 return;
688
689 /* kill the program */
690 case 'k' : /* do nothing */
691 break;
692 case 'r': /* Reset */
693 asm ("call 0\n\t"
694 "nop\n\t");
695 break;
696 } /* switch */
697
698 /* reply to the request */
699 putpacket(remcomOutBuffer);
700 } /* while(1) */
701}
702
703/* This function will generate a breakpoint exception. It is used at the
704 beginning of a program to sync up with a debugger and can be used
705 otherwise as a quick means to stop program execution and "break" into
706 the debugger. */
707
708void
709breakpoint(void)
710{
711 if (!initialized)
712 return;
713
714 /* Again, watch those c-prefixes for ELF kernels */
715#if defined(__svr4__) || defined(__ELF__)
716 asm(".globl breakinst\n"
717 "breakinst:\n\t"
718 "ta 1\n");
719#else
720 asm(".globl _breakinst\n"
721 "_breakinst:\n\t"
722 "ta 1\n");
723#endif
724}
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
new file mode 100644
index 000000000000..f91b0e8d0dc8
--- /dev/null
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -0,0 +1,334 @@
1/* $Id: sparc_ksyms.c,v 1.107 2001/07/17 16:17:33 anton Exp $
2 * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 */
7
8/* Tell string.h we don't want memcpy etc. as cpp defines */
9#define EXPORT_SYMTAB_STROPS
10#define PROMLIB_INTERNAL
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/smp.h>
16#include <linux/types.h>
17#include <linux/string.h>
18#include <linux/sched.h>
19#include <linux/interrupt.h>
20#include <linux/in6.h>
21#include <linux/spinlock.h>
22#include <linux/mm.h>
23#ifdef CONFIG_PCI
24#include <linux/pci.h>
25#endif
26#include <linux/pm.h>
27#ifdef CONFIG_HIGHMEM
28#include <linux/highmem.h>
29#endif
30
31#include <asm/oplib.h>
32#include <asm/delay.h>
33#include <asm/system.h>
34#include <asm/auxio.h>
35#include <asm/pgtable.h>
36#include <asm/io.h>
37#include <asm/irq.h>
38#include <asm/idprom.h>
39#include <asm/svr4.h>
40#include <asm/head.h>
41#include <asm/smp.h>
42#include <asm/mostek.h>
43#include <asm/ptrace.h>
44#include <asm/user.h>
45#include <asm/uaccess.h>
46#include <asm/checksum.h>
47#ifdef CONFIG_SBUS
48#include <asm/sbus.h>
49#include <asm/dma.h>
50#endif
51#ifdef CONFIG_PCI
52#include <asm/ebus.h>
53#endif
54#include <asm/a.out.h>
55#include <asm/io-unit.h>
56#include <asm/bug.h>
57
58extern spinlock_t rtc_lock;
59
60struct poll {
61 int fd;
62 short events;
63 short revents;
64};
65
66extern int svr4_getcontext (svr4_ucontext_t *, struct pt_regs *);
67extern int svr4_setcontext (svr4_ucontext_t *, struct pt_regs *);
68void _sigpause_common (unsigned int set, struct pt_regs *);
69extern void (*__copy_1page)(void *, const void *);
70extern void __memmove(void *, const void *, __kernel_size_t);
71extern void (*bzero_1page)(void *);
72extern void *__bzero(void *, size_t);
73extern void *__memscan_zero(void *, size_t);
74extern void *__memscan_generic(void *, int, size_t);
75extern int __memcmp(const void *, const void *, __kernel_size_t);
76extern int __strncmp(const char *, const char *, __kernel_size_t);
77
78extern int __ashrdi3(int, int);
79extern int __ashldi3(int, int);
80extern int __lshrdi3(int, int);
81extern int __muldi3(int, int);
82extern int __divdi3(int, int);
83
84extern void dump_thread(struct pt_regs *, struct user *);
85
86/* Private functions with odd calling conventions. */
87extern void ___atomic24_add(void);
88extern void ___atomic24_sub(void);
89extern void ___set_bit(void);
90extern void ___clear_bit(void);
91extern void ___change_bit(void);
92
93/* Alias functions whose names begin with "." and export the aliases.
94 * The module references will be fixed up by module_frob_arch_sections.
95 */
96#define DOT_ALIAS2(__ret, __x, __arg1, __arg2) \
97 extern __ret __x(__arg1, __arg2) \
98 __attribute__((weak, alias("." # __x)));
99
100DOT_ALIAS2(int, div, int, int)
101DOT_ALIAS2(int, mul, int, int)
102DOT_ALIAS2(int, rem, int, int)
103DOT_ALIAS2(unsigned, udiv, unsigned, unsigned)
104DOT_ALIAS2(unsigned, umul, unsigned, unsigned)
105DOT_ALIAS2(unsigned, urem, unsigned, unsigned)
106
107#undef DOT_ALIAS2
108
109/* used by various drivers */
110EXPORT_SYMBOL(sparc_cpu_model);
111EXPORT_SYMBOL(kernel_thread);
112#ifdef CONFIG_DEBUG_SPINLOCK
113#ifdef CONFIG_SMP
114EXPORT_SYMBOL(_do_spin_lock);
115EXPORT_SYMBOL(_do_spin_unlock);
116EXPORT_SYMBOL(_spin_trylock);
117EXPORT_SYMBOL(_do_read_lock);
118EXPORT_SYMBOL(_do_read_unlock);
119EXPORT_SYMBOL(_do_write_lock);
120EXPORT_SYMBOL(_do_write_unlock);
121#endif
122#else
123// XXX find what uses (or used) these.
124// EXPORT_SYMBOL_PRIVATE(_rw_read_enter);
125// EXPORT_SYMBOL_PRIVATE(_rw_read_exit);
126// EXPORT_SYMBOL_PRIVATE(_rw_write_enter);
127#endif
128/* semaphores */
129EXPORT_SYMBOL(__up);
130EXPORT_SYMBOL(__down);
131EXPORT_SYMBOL(__down_trylock);
132EXPORT_SYMBOL(__down_interruptible);
133
134EXPORT_SYMBOL(sparc_valid_addr_bitmap);
135EXPORT_SYMBOL(phys_base);
136EXPORT_SYMBOL(pfn_base);
137
138/* Atomic operations. */
139EXPORT_SYMBOL(___atomic24_add);
140EXPORT_SYMBOL(___atomic24_sub);
141
142/* Bit operations. */
143EXPORT_SYMBOL(___set_bit);
144EXPORT_SYMBOL(___clear_bit);
145EXPORT_SYMBOL(___change_bit);
146
147#ifdef CONFIG_SMP
148/* IRQ implementation. */
149EXPORT_SYMBOL(synchronize_irq);
150
151/* Misc SMP information */
152EXPORT_SYMBOL(__cpu_number_map);
153EXPORT_SYMBOL(__cpu_logical_map);
154#endif
155
156EXPORT_SYMBOL(__udelay);
157EXPORT_SYMBOL(__ndelay);
158EXPORT_SYMBOL(rtc_lock);
159EXPORT_SYMBOL(mostek_lock);
160EXPORT_SYMBOL(mstk48t02_regs);
161#ifdef CONFIG_SUN_AUXIO
162EXPORT_SYMBOL(set_auxio);
163EXPORT_SYMBOL(get_auxio);
164#endif
165EXPORT_SYMBOL(request_fast_irq);
166EXPORT_SYMBOL(io_remap_page_range);
167EXPORT_SYMBOL(io_remap_pfn_range);
168 /* P3: iounit_xxx may be needed, sun4d users */
169/* EXPORT_SYMBOL(iounit_map_dma_init); */
170/* EXPORT_SYMBOL(iounit_map_dma_page); */
171
172#ifndef CONFIG_SMP
173EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
174#else
175EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id));
176#endif
177EXPORT_SYMBOL(BTFIXUP_CALL(enable_irq));
178EXPORT_SYMBOL(BTFIXUP_CALL(disable_irq));
179EXPORT_SYMBOL(BTFIXUP_CALL(__irq_itoa));
180EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea));
181EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea));
182EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl));
183EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one));
184EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl));
185EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one));
186
187#ifdef CONFIG_SBUS
188EXPORT_SYMBOL(sbus_root);
189EXPORT_SYMBOL(dma_chain);
190EXPORT_SYMBOL(sbus_set_sbus64);
191EXPORT_SYMBOL(sbus_alloc_consistent);
192EXPORT_SYMBOL(sbus_free_consistent);
193EXPORT_SYMBOL(sbus_map_single);
194EXPORT_SYMBOL(sbus_unmap_single);
195EXPORT_SYMBOL(sbus_map_sg);
196EXPORT_SYMBOL(sbus_unmap_sg);
197EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu);
198EXPORT_SYMBOL(sbus_dma_sync_single_for_device);
199EXPORT_SYMBOL(sbus_dma_sync_sg_for_cpu);
200EXPORT_SYMBOL(sbus_dma_sync_sg_for_device);
201EXPORT_SYMBOL(sbus_iounmap);
202EXPORT_SYMBOL(sbus_ioremap);
203#endif
204#ifdef CONFIG_PCI
205EXPORT_SYMBOL(ebus_chain);
206EXPORT_SYMBOL(insb);
207EXPORT_SYMBOL(outsb);
208EXPORT_SYMBOL(insw);
209EXPORT_SYMBOL(outsw);
210EXPORT_SYMBOL(insl);
211EXPORT_SYMBOL(outsl);
212EXPORT_SYMBOL(pci_alloc_consistent);
213EXPORT_SYMBOL(pci_free_consistent);
214EXPORT_SYMBOL(pci_map_single);
215EXPORT_SYMBOL(pci_unmap_single);
216EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
217EXPORT_SYMBOL(pci_dma_sync_single_for_device);
218EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
219EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
220EXPORT_SYMBOL(pci_map_sg);
221EXPORT_SYMBOL(pci_unmap_sg);
222EXPORT_SYMBOL(pci_map_page);
223EXPORT_SYMBOL(pci_unmap_page);
224/* Actually, ioremap/iounmap are not PCI specific. But it is ok for drivers. */
225EXPORT_SYMBOL(ioremap);
226EXPORT_SYMBOL(iounmap);
227#endif
228
229/* in arch/sparc/mm/highmem.c */
230#ifdef CONFIG_HIGHMEM
231EXPORT_SYMBOL(kmap_atomic);
232EXPORT_SYMBOL(kunmap_atomic);
233#endif
234
235/* Solaris/SunOS binary compatibility */
236EXPORT_SYMBOL(svr4_setcontext);
237EXPORT_SYMBOL(svr4_getcontext);
238EXPORT_SYMBOL(_sigpause_common);
239
240EXPORT_SYMBOL(dump_thread);
241
242/* prom symbols */
243EXPORT_SYMBOL(idprom);
244EXPORT_SYMBOL(prom_root_node);
245EXPORT_SYMBOL(prom_getchild);
246EXPORT_SYMBOL(prom_getsibling);
247EXPORT_SYMBOL(prom_searchsiblings);
248EXPORT_SYMBOL(prom_firstprop);
249EXPORT_SYMBOL(prom_nextprop);
250EXPORT_SYMBOL(prom_getproplen);
251EXPORT_SYMBOL(prom_getproperty);
252EXPORT_SYMBOL(prom_node_has_property);
253EXPORT_SYMBOL(prom_setprop);
254EXPORT_SYMBOL(saved_command_line);
255EXPORT_SYMBOL(prom_apply_obio_ranges);
256EXPORT_SYMBOL(prom_getname);
257EXPORT_SYMBOL(prom_feval);
258EXPORT_SYMBOL(prom_getbool);
259EXPORT_SYMBOL(prom_getstring);
260EXPORT_SYMBOL(prom_getint);
261EXPORT_SYMBOL(prom_getintdefault);
262EXPORT_SYMBOL(prom_finddevice);
263EXPORT_SYMBOL(romvec);
264EXPORT_SYMBOL(__prom_getchild);
265EXPORT_SYMBOL(__prom_getsibling);
266
267/* sparc library symbols */
268EXPORT_SYMBOL(memchr);
269EXPORT_SYMBOL(memscan);
270EXPORT_SYMBOL(strlen);
271EXPORT_SYMBOL(strnlen);
272EXPORT_SYMBOL(strcpy);
273EXPORT_SYMBOL(strncpy);
274EXPORT_SYMBOL(strcat);
275EXPORT_SYMBOL(strncat);
276EXPORT_SYMBOL(strcmp);
277EXPORT_SYMBOL(strncmp);
278EXPORT_SYMBOL(strchr);
279EXPORT_SYMBOL(strrchr);
280EXPORT_SYMBOL(strpbrk);
281EXPORT_SYMBOL(strstr);
282EXPORT_SYMBOL(page_kernel);
283
284/* Special internal versions of library functions. */
285EXPORT_SYMBOL(__copy_1page);
286EXPORT_SYMBOL(__memcpy);
287EXPORT_SYMBOL(__memset);
288EXPORT_SYMBOL(bzero_1page);
289EXPORT_SYMBOL(__bzero);
290EXPORT_SYMBOL(__memscan_zero);
291EXPORT_SYMBOL(__memscan_generic);
292EXPORT_SYMBOL(__memcmp);
293EXPORT_SYMBOL(__strncmp);
294EXPORT_SYMBOL(__memmove);
295
296/* Moving data to/from userspace. */
297EXPORT_SYMBOL(__copy_user);
298EXPORT_SYMBOL(__strncpy_from_user);
299
300/* Networking helper routines. */
301EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
302EXPORT_SYMBOL(csum_partial);
303
304/* Cache flushing. */
305EXPORT_SYMBOL(sparc_flush_page_to_ram);
306
307/* For when serial stuff is built as modules. */
308EXPORT_SYMBOL(sun_do_break);
309
310EXPORT_SYMBOL(__ret_efault);
311
312EXPORT_SYMBOL(memcmp);
313EXPORT_SYMBOL(memcpy);
314EXPORT_SYMBOL(memset);
315EXPORT_SYMBOL(memmove);
316EXPORT_SYMBOL(__ashrdi3);
317EXPORT_SYMBOL(__ashldi3);
318EXPORT_SYMBOL(__lshrdi3);
319EXPORT_SYMBOL(__muldi3);
320EXPORT_SYMBOL(__divdi3);
321
322EXPORT_SYMBOL(rem);
323EXPORT_SYMBOL(urem);
324EXPORT_SYMBOL(mul);
325EXPORT_SYMBOL(umul);
326EXPORT_SYMBOL(div);
327EXPORT_SYMBOL(udiv);
328
329#ifdef CONFIG_DEBUG_BUGVERBOSE
330EXPORT_SYMBOL(do_BUG);
331#endif
332
333/* Sun Power Management Idle Handler */
334EXPORT_SYMBOL(pm_idle);
diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c
new file mode 100644
index 000000000000..3d6a99073c42
--- /dev/null
+++ b/arch/sparc/kernel/sun4c_irq.c
@@ -0,0 +1,250 @@
1/* sun4c_irq.c
2 * arch/sparc/kernel/sun4c_irq.c:
3 *
4 * djhr: Hacked out of irq.c into a CPU dependent version.
5 *
6 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
7 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@yahoo.com)
9 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
10 */
11
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/linkage.h>
15#include <linux/kernel_stat.h>
16#include <linux/signal.h>
17#include <linux/sched.h>
18#include <linux/ptrace.h>
19#include <linux/interrupt.h>
20#include <linux/slab.h>
21#include <linux/init.h>
22
23#include <asm/ptrace.h>
24#include <asm/processor.h>
25#include <asm/system.h>
26#include <asm/psr.h>
27#include <asm/vaddrs.h>
28#include <asm/timer.h>
29#include <asm/openprom.h>
30#include <asm/oplib.h>
31#include <asm/traps.h>
32#include <asm/irq.h>
33#include <asm/io.h>
34#include <asm/sun4paddr.h>
35#include <asm/idprom.h>
36#include <asm/machines.h>
37#include <asm/sbus.h>
38
39#if 0
40static struct resource sun4c_timer_eb = { "sun4c_timer" };
41static struct resource sun4c_intr_eb = { "sun4c_intr" };
42#endif
43
44/* Pointer to the interrupt enable byte
45 *
46 * Dave Redman (djhr@tadpole.co.uk)
47 * What you may not be aware of is that entry.S requires this variable.
48 *
49 * --- linux_trap_nmi_sun4c --
50 *
51 * so don't go making it static, like I tried. sigh.
52 */
53unsigned char *interrupt_enable = NULL;
54
55static int sun4c_pil_map[] = { 0, 1, 2, 3, 5, 7, 8, 9 };
56
57unsigned int sun4c_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint)
58{
59 if (sbint >= sizeof(sun4c_pil_map)) {
60 printk(KERN_ERR "%s: bogus SBINT %d\n", sdev->prom_name, sbint);
61 BUG();
62 }
63 return sun4c_pil_map[sbint];
64}
65
66static void sun4c_disable_irq(unsigned int irq_nr)
67{
68 unsigned long flags;
69 unsigned char current_mask, new_mask;
70
71 local_irq_save(flags);
72 irq_nr &= (NR_IRQS - 1);
73 current_mask = *interrupt_enable;
74 switch(irq_nr) {
75 case 1:
76 new_mask = ((current_mask) & (~(SUN4C_INT_E1)));
77 break;
78 case 8:
79 new_mask = ((current_mask) & (~(SUN4C_INT_E8)));
80 break;
81 case 10:
82 new_mask = ((current_mask) & (~(SUN4C_INT_E10)));
83 break;
84 case 14:
85 new_mask = ((current_mask) & (~(SUN4C_INT_E14)));
86 break;
87 default:
88 local_irq_restore(flags);
89 return;
90 }
91 *interrupt_enable = new_mask;
92 local_irq_restore(flags);
93}
94
95static void sun4c_enable_irq(unsigned int irq_nr)
96{
97 unsigned long flags;
98 unsigned char current_mask, new_mask;
99
100 local_irq_save(flags);
101 irq_nr &= (NR_IRQS - 1);
102 current_mask = *interrupt_enable;
103 switch(irq_nr) {
104 case 1:
105 new_mask = ((current_mask) | SUN4C_INT_E1);
106 break;
107 case 8:
108 new_mask = ((current_mask) | SUN4C_INT_E8);
109 break;
110 case 10:
111 new_mask = ((current_mask) | SUN4C_INT_E10);
112 break;
113 case 14:
114 new_mask = ((current_mask) | SUN4C_INT_E14);
115 break;
116 default:
117 local_irq_restore(flags);
118 return;
119 }
120 *interrupt_enable = new_mask;
121 local_irq_restore(flags);
122}
123
124#define TIMER_IRQ 10 /* Also at level 14, but we ignore that one. */
125#define PROFILE_IRQ 14 /* Level14 ticker.. used by OBP for polling */
126
127volatile struct sun4c_timer_info *sun4c_timers;
128
129#ifdef CONFIG_SUN4
130/* This is an ugly hack to work around the
131 current timer code, and make it work with
132 the sun4/260 intersil
133 */
134volatile struct sun4c_timer_info sun4_timer;
135#endif
136
137static void sun4c_clear_clock_irq(void)
138{
139 volatile unsigned int clear_intr;
140#ifdef CONFIG_SUN4
141 if (idprom->id_machtype == (SM_SUN4 | SM_4_260))
142 clear_intr = sun4_timer.timer_limit10;
143 else
144#endif
145 clear_intr = sun4c_timers->timer_limit10;
146}
147
148static void sun4c_clear_profile_irq(int cpu)
149{
150 /* Errm.. not sure how to do this.. */
151}
152
153static void sun4c_load_profile_irq(int cpu, unsigned int limit)
154{
155 /* Errm.. not sure how to do this.. */
156}
157
158static void __init sun4c_init_timers(irqreturn_t (*counter_fn)(int, void *, struct pt_regs *))
159{
160 int irq;
161
162 /* Map the Timer chip, this is implemented in hardware inside
163 * the cache chip on the sun4c.
164 */
165#ifdef CONFIG_SUN4
166 if (idprom->id_machtype == (SM_SUN4 | SM_4_260))
167 sun4c_timers = &sun4_timer;
168 else
169#endif
170 sun4c_timers = ioremap(SUN_TIMER_PHYSADDR,
171 sizeof(struct sun4c_timer_info));
172
173 /* Have the level 10 timer tick at 100HZ. We don't touch the
174 * level 14 timer limit since we are letting the prom handle
175 * them until we have a real console driver so L1-A works.
176 */
177 sun4c_timers->timer_limit10 = (((1000000/HZ) + 1) << 10);
178 master_l10_counter = &sun4c_timers->cur_count10;
179 master_l10_limit = &sun4c_timers->timer_limit10;
180
181 irq = request_irq(TIMER_IRQ,
182 counter_fn,
183 (SA_INTERRUPT | SA_STATIC_ALLOC),
184 "timer", NULL);
185 if (irq) {
186 prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
187 prom_halt();
188 }
189
190#if 0
191 /* This does not work on 4/330 */
192 sun4c_enable_irq(10);
193#endif
194 claim_ticker14(NULL, PROFILE_IRQ, 0);
195}
196
197#ifdef CONFIG_SMP
198static void sun4c_nop(void) {}
199#endif
200
201extern char *sun4m_irq_itoa(unsigned int irq);
202
203void __init sun4c_init_IRQ(void)
204{
205 struct linux_prom_registers int_regs[2];
206 int ie_node;
207
208 if (ARCH_SUN4) {
209 interrupt_enable = (char *)
210 ioremap(sun4_ie_physaddr, PAGE_SIZE);
211 } else {
212 struct resource phyres;
213
214 ie_node = prom_searchsiblings (prom_getchild(prom_root_node),
215 "interrupt-enable");
216 if(ie_node == 0)
217 panic("Cannot find /interrupt-enable node");
218
219 /* Depending on the "address" property is bad news... */
220 interrupt_enable = NULL;
221 if (prom_getproperty(ie_node, "reg", (char *) int_regs,
222 sizeof(int_regs)) != -1) {
223 memset(&phyres, 0, sizeof(struct resource));
224 phyres.flags = int_regs[0].which_io;
225 phyres.start = int_regs[0].phys_addr;
226 interrupt_enable = (char *) sbus_ioremap(&phyres, 0,
227 int_regs[0].reg_size, "sun4c_intr");
228 }
229 }
230 if (!interrupt_enable)
231 panic("Cannot map interrupt_enable");
232
233 BTFIXUPSET_CALL(sbint_to_irq, sun4c_sbint_to_irq, BTFIXUPCALL_NORM);
234 BTFIXUPSET_CALL(enable_irq, sun4c_enable_irq, BTFIXUPCALL_NORM);
235 BTFIXUPSET_CALL(disable_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
236 BTFIXUPSET_CALL(enable_pil_irq, sun4c_enable_irq, BTFIXUPCALL_NORM);
237 BTFIXUPSET_CALL(disable_pil_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
238 BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM);
239 BTFIXUPSET_CALL(clear_profile_irq, sun4c_clear_profile_irq, BTFIXUPCALL_NOP);
240 BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP);
241 BTFIXUPSET_CALL(__irq_itoa, sun4m_irq_itoa, BTFIXUPCALL_NORM);
242 sparc_init_timers = sun4c_init_timers;
243#ifdef CONFIG_SMP
244 BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
245 BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
246 BTFIXUPSET_CALL(set_irq_udt, sun4c_nop, BTFIXUPCALL_NOP);
247#endif
248 *interrupt_enable = (SUN4C_INT_ENABLE);
249 /* Cannot enable interrupts until OBP ticker is disabled. */
250}
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
new file mode 100644
index 000000000000..52621348a56c
--- /dev/null
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -0,0 +1,594 @@
1/* $Id: sun4d_irq.c,v 1.29 2001/12/11 04:55:51 davem Exp $
2 * arch/sparc/kernel/sun4d_irq.c:
3 * SS1000/SC2000 interrupt handling.
4 *
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Heavily based on arch/sparc/kernel/irq.c.
7 */
8
9#include <linux/config.h>
10#include <linux/errno.h>
11#include <linux/linkage.h>
12#include <linux/kernel_stat.h>
13#include <linux/signal.h>
14#include <linux/sched.h>
15#include <linux/ptrace.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/random.h>
19#include <linux/init.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/spinlock.h>
23#include <linux/seq_file.h>
24
25#include <asm/ptrace.h>
26#include <asm/processor.h>
27#include <asm/system.h>
28#include <asm/psr.h>
29#include <asm/smp.h>
30#include <asm/vaddrs.h>
31#include <asm/timer.h>
32#include <asm/openprom.h>
33#include <asm/oplib.h>
34#include <asm/traps.h>
35#include <asm/irq.h>
36#include <asm/io.h>
37#include <asm/pgalloc.h>
38#include <asm/pgtable.h>
39#include <asm/sbus.h>
40#include <asm/sbi.h>
41#include <asm/cacheflush.h>
42
43/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
44/* #define DISTRIBUTE_IRQS */
45
46struct sun4d_timer_regs *sun4d_timers;
47#define TIMER_IRQ 10
48
49#define MAX_STATIC_ALLOC 4
50extern struct irqaction static_irqaction[MAX_STATIC_ALLOC];
51extern int static_irq_count;
52unsigned char cpu_leds[32];
53#ifdef CONFIG_SMP
54unsigned char sbus_tid[32];
55#endif
56
57extern struct irqaction *irq_action[];
58extern spinlock_t irq_action_lock;
59
60struct sbus_action {
61 struct irqaction *action;
62 /* For SMP this needs to be extended */
63} *sbus_actions;
64
65static int pil_to_sbus[] = {
66 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
67};
68
69static int sbus_to_pil[] = {
70 0, 2, 3, 5, 7, 9, 11, 13,
71};
72
73static int nsbi;
74#ifdef CONFIG_SMP
75DEFINE_SPINLOCK(sun4d_imsk_lock);
76#endif
77
78int show_sun4d_interrupts(struct seq_file *p, void *v)
79{
80 int i = *(loff_t *) v, j = 0, k = 0, sbusl;
81 struct irqaction * action;
82 unsigned long flags;
83#ifdef CONFIG_SMP
84 int x;
85#endif
86
87 spin_lock_irqsave(&irq_action_lock, flags);
88 if (i < NR_IRQS) {
89 sbusl = pil_to_sbus[i];
90 if (!sbusl) {
91 action = *(i + irq_action);
92 if (!action)
93 goto out_unlock;
94 } else {
95 for (j = 0; j < nsbi; j++) {
96 for (k = 0; k < 4; k++)
97 if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
98 goto found_it;
99 }
100 goto out_unlock;
101 }
102found_it: seq_printf(p, "%3d: ", i);
103#ifndef CONFIG_SMP
104 seq_printf(p, "%10u ", kstat_irqs(i));
105#else
106 for (x = 0; x < NR_CPUS; x++) {
107 if (cpu_online(x))
108 seq_printf(p, "%10u ",
109 kstat_cpu(cpu_logical_map(x)).irqs[i]);
110 }
111#endif
112 seq_printf(p, "%c %s",
113 (action->flags & SA_INTERRUPT) ? '+' : ' ',
114 action->name);
115 action = action->next;
116 for (;;) {
117 for (; action; action = action->next) {
118 seq_printf(p, ",%s %s",
119 (action->flags & SA_INTERRUPT) ? " +" : "",
120 action->name);
121 }
122 if (!sbusl) break;
123 k++;
124 if (k < 4)
125 action = sbus_actions [(j << 5) + (sbusl << 2) + k].action;
126 else {
127 j++;
128 if (j == nsbi) break;
129 k = 0;
130 action = sbus_actions [(j << 5) + (sbusl << 2)].action;
131 }
132 }
133 seq_putc(p, '\n');
134 }
135out_unlock:
136 spin_unlock_irqrestore(&irq_action_lock, flags);
137 return 0;
138}
139
140void sun4d_free_irq(unsigned int irq, void *dev_id)
141{
142 struct irqaction *action, **actionp;
143 struct irqaction *tmp = NULL;
144 unsigned long flags;
145
146 spin_lock_irqsave(&irq_action_lock, flags);
147 if (irq < 15)
148 actionp = irq + irq_action;
149 else
150 actionp = &(sbus_actions[irq - (1 << 5)].action);
151 action = *actionp;
152 if (!action) {
153 printk("Trying to free free IRQ%d\n",irq);
154 goto out_unlock;
155 }
156 if (dev_id) {
157 for (; action; action = action->next) {
158 if (action->dev_id == dev_id)
159 break;
160 tmp = action;
161 }
162 if (!action) {
163 printk("Trying to free free shared IRQ%d\n",irq);
164 goto out_unlock;
165 }
166 } else if (action->flags & SA_SHIRQ) {
167 printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
168 goto out_unlock;
169 }
170 if (action->flags & SA_STATIC_ALLOC)
171 {
172 /* This interrupt is marked as specially allocated
173 * so it is a bad idea to free it.
174 */
175 printk("Attempt to free statically allocated IRQ%d (%s)\n",
176 irq, action->name);
177 goto out_unlock;
178 }
179
180 if (action && tmp)
181 tmp->next = action->next;
182 else
183 *actionp = action->next;
184
185 spin_unlock_irqrestore(&irq_action_lock, flags);
186
187 synchronize_irq(irq);
188
189 spin_lock_irqsave(&irq_action_lock, flags);
190
191 kfree(action);
192
193 if (!(*actionp))
194 disable_irq(irq);
195
196out_unlock:
197 spin_unlock_irqrestore(&irq_action_lock, flags);
198}
199
200extern void unexpected_irq(int, void *, struct pt_regs *);
201
202void sun4d_handler_irq(int irq, struct pt_regs * regs)
203{
204 struct irqaction * action;
205 int cpu = smp_processor_id();
206 /* SBUS IRQ level (1 - 7) */
207 int sbusl = pil_to_sbus[irq];
208
209 /* FIXME: Is this necessary?? */
210 cc_get_ipen();
211
212 cc_set_iclr(1 << irq);
213
214 irq_enter();
215 kstat_cpu(cpu).irqs[irq]++;
216 if (!sbusl) {
217 action = *(irq + irq_action);
218 if (!action)
219 unexpected_irq(irq, NULL, regs);
220 do {
221 action->handler(irq, action->dev_id, regs);
222 action = action->next;
223 } while (action);
224 } else {
225 int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
226 int sbino;
227 struct sbus_action *actionp;
228 unsigned mask, slot;
229 int sbil = (sbusl << 2);
230
231 bw_clear_intr_mask(sbusl, bus_mask);
232
233 /* Loop for each pending SBI */
234 for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
235 if (bus_mask & 1) {
236 mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
237 mask &= (0xf << sbil);
238 actionp = sbus_actions + (sbino << 5) + (sbil);
239 /* Loop for each pending SBI slot */
240 for (slot = (1 << sbil); mask; slot <<= 1, actionp++)
241 if (mask & slot) {
242 mask &= ~slot;
243 action = actionp->action;
244
245 if (!action)
246 unexpected_irq(irq, NULL, regs);
247 do {
248 action->handler(irq, action->dev_id, regs);
249 action = action->next;
250 } while (action);
251 release_sbi(SBI2DEVID(sbino), slot);
252 }
253 }
254 }
255 irq_exit();
256}
257
258unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq)
259{
260 int sbusl = pil_to_sbus[irq];
261
262 if (sbusl)
263 return ((sdev->bus->board + 1) << 5) + (sbusl << 2) + sdev->slot;
264 else
265 return irq;
266}
267
268unsigned int sun4d_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint)
269{
270 if (sbint >= sizeof(sbus_to_pil)) {
271 printk(KERN_ERR "%s: bogus SBINT %d\n", sdev->prom_name, sbint);
272 BUG();
273 }
274 return sun4d_build_irq(sdev, sbus_to_pil[sbint]);
275}
276
277int sun4d_request_irq(unsigned int irq,
278 irqreturn_t (*handler)(int, void *, struct pt_regs *),
279 unsigned long irqflags, const char * devname, void *dev_id)
280{
281 struct irqaction *action, *tmp = NULL, **actionp;
282 unsigned long flags;
283 int ret;
284
285 if(irq > 14 && irq < (1 << 5)) {
286 ret = -EINVAL;
287 goto out;
288 }
289
290 if (!handler) {
291 ret = -EINVAL;
292 goto out;
293 }
294
295 spin_lock_irqsave(&irq_action_lock, flags);
296
297 if (irq >= (1 << 5))
298 actionp = &(sbus_actions[irq - (1 << 5)].action);
299 else
300 actionp = irq + irq_action;
301 action = *actionp;
302
303 if (action) {
304 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
305 for (tmp = action; tmp->next; tmp = tmp->next);
306 } else {
307 ret = -EBUSY;
308 goto out_unlock;
309 }
310 if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
311 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
312 ret = -EBUSY;
313 goto out_unlock;
314 }
315 action = NULL; /* Or else! */
316 }
317
318 /* If this is flagged as statically allocated then we use our
319 * private struct which is never freed.
320 */
321 if (irqflags & SA_STATIC_ALLOC) {
322 if (static_irq_count < MAX_STATIC_ALLOC)
323 action = &static_irqaction[static_irq_count++];
324 else
325 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
326 }
327
328 if (action == NULL)
329 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
330 GFP_ATOMIC);
331
332 if (!action) {
333 ret = -ENOMEM;
334 goto out_unlock;
335 }
336
337 action->handler = handler;
338 action->flags = irqflags;
339 cpus_clear(action->mask);
340 action->name = devname;
341 action->next = NULL;
342 action->dev_id = dev_id;
343
344 if (tmp)
345 tmp->next = action;
346 else
347 *actionp = action;
348
349 enable_irq(irq);
350
351 ret = 0;
352out_unlock:
353 spin_unlock_irqrestore(&irq_action_lock, flags);
354out:
355 return ret;
356}
357
358static void sun4d_disable_irq(unsigned int irq)
359{
360#ifdef CONFIG_SMP
361 int tid = sbus_tid[(irq >> 5) - 1];
362 unsigned long flags;
363#endif
364
365 if (irq < NR_IRQS) return;
366#ifdef CONFIG_SMP
367 spin_lock_irqsave(&sun4d_imsk_lock, flags);
368 cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7]));
369 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
370#else
371 cc_set_imsk(cc_get_imsk() | (1 << sbus_to_pil[(irq >> 2) & 7]));
372#endif
373}
374
375static void sun4d_enable_irq(unsigned int irq)
376{
377#ifdef CONFIG_SMP
378 int tid = sbus_tid[(irq >> 5) - 1];
379 unsigned long flags;
380#endif
381
382 if (irq < NR_IRQS) return;
383#ifdef CONFIG_SMP
384 spin_lock_irqsave(&sun4d_imsk_lock, flags);
385 cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
386 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
387#else
388 cc_set_imsk(cc_get_imsk() & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
389#endif
390}
391
392#ifdef CONFIG_SMP
393static void sun4d_set_cpu_int(int cpu, int level)
394{
395 sun4d_send_ipi(cpu, level);
396}
397
398static void sun4d_clear_ipi(int cpu, int level)
399{
400}
401
402static void sun4d_set_udt(int cpu)
403{
404}
405
406/* Setup IRQ distribution scheme. */
407void __init sun4d_distribute_irqs(void)
408{
409#ifdef DISTRIBUTE_IRQS
410 struct sbus_bus *sbus;
411 unsigned long sbus_serving_map;
412
413 sbus_serving_map = cpu_present_map;
414 for_each_sbus(sbus) {
415 if ((sbus->board * 2) == boot_cpu_id && (cpu_present_map & (1 << (sbus->board * 2 + 1))))
416 sbus_tid[sbus->board] = (sbus->board * 2 + 1);
417 else if (cpu_present_map & (1 << (sbus->board * 2)))
418 sbus_tid[sbus->board] = (sbus->board * 2);
419 else if (cpu_present_map & (1 << (sbus->board * 2 + 1)))
420 sbus_tid[sbus->board] = (sbus->board * 2 + 1);
421 else
422 sbus_tid[sbus->board] = 0xff;
423 if (sbus_tid[sbus->board] != 0xff)
424 sbus_serving_map &= ~(1 << sbus_tid[sbus->board]);
425 }
426 for_each_sbus(sbus)
427 if (sbus_tid[sbus->board] == 0xff) {
428 int i = 31;
429
430 if (!sbus_serving_map)
431 sbus_serving_map = cpu_present_map;
432 while (!(sbus_serving_map & (1 << i)))
433 i--;
434 sbus_tid[sbus->board] = i;
435 sbus_serving_map &= ~(1 << i);
436 }
437 for_each_sbus(sbus) {
438 printk("sbus%d IRQs directed to CPU%d\n", sbus->board, sbus_tid[sbus->board]);
439 set_sbi_tid(sbus->devid, sbus_tid[sbus->board] << 3);
440 }
441#else
442 struct sbus_bus *sbus;
443 int cpuid = cpu_logical_map(1);
444
445 if (cpuid == -1)
446 cpuid = cpu_logical_map(0);
447 for_each_sbus(sbus) {
448 sbus_tid[sbus->board] = cpuid;
449 set_sbi_tid(sbus->devid, cpuid << 3);
450 }
451 printk("All sbus IRQs directed to CPU%d\n", cpuid);
452#endif
453}
454#endif
455
456static void sun4d_clear_clock_irq(void)
457{
458 volatile unsigned int clear_intr;
459 clear_intr = sun4d_timers->l10_timer_limit;
460}
461
462static void sun4d_clear_profile_irq(int cpu)
463{
464 bw_get_prof_limit(cpu);
465}
466
467static void sun4d_load_profile_irq(int cpu, unsigned int limit)
468{
469 bw_set_prof_limit(cpu, limit);
470}
471
472static void __init sun4d_init_timers(irqreturn_t (*counter_fn)(int, void *, struct pt_regs *))
473{
474 int irq;
475 int cpu;
476 struct resource r;
477 int mid;
478
479 /* Map the User Timer registers. */
480 memset(&r, 0, sizeof(r));
481#ifdef CONFIG_SMP
482 r.start = CSR_BASE(boot_cpu_id)+BW_TIMER_LIMIT;
483#else
484 r.start = CSR_BASE(0)+BW_TIMER_LIMIT;
485#endif
486 r.flags = 0xf;
487 sun4d_timers = (struct sun4d_timer_regs *) sbus_ioremap(&r, 0,
488 PAGE_SIZE, "user timer");
489
490 sun4d_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10);
491 master_l10_counter = &sun4d_timers->l10_cur_count;
492 master_l10_limit = &sun4d_timers->l10_timer_limit;
493
494 irq = request_irq(TIMER_IRQ,
495 counter_fn,
496 (SA_INTERRUPT | SA_STATIC_ALLOC),
497 "timer", NULL);
498 if (irq) {
499 prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
500 prom_halt();
501 }
502
503 /* Enable user timer free run for CPU 0 in BW */
504 /* bw_set_ctrl(0, bw_get_ctrl(0) | BW_CTRL_USER_TIMER); */
505
506 cpu = 0;
507 while (!cpu_find_by_instance(cpu, NULL, &mid)) {
508 sun4d_load_profile_irq(mid >> 3, 0);
509 cpu++;
510 }
511
512#ifdef CONFIG_SMP
513 {
514 unsigned long flags;
515 extern unsigned long lvl14_save[4];
516 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
517 extern unsigned int real_irq_entry[], smp4d_ticker[];
518 extern unsigned int patchme_maybe_smp_msg[];
519
520 /* Adjust so that we jump directly to smp4d_ticker */
521 lvl14_save[2] += smp4d_ticker - real_irq_entry;
522
523 /* For SMP we use the level 14 ticker, however the bootup code
524 * has copied the firmwares level 14 vector into boot cpu's
525 * trap table, we must fix this now or we get squashed.
526 */
527 local_irq_save(flags);
528 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
529 trap_table->inst_one = lvl14_save[0];
530 trap_table->inst_two = lvl14_save[1];
531 trap_table->inst_three = lvl14_save[2];
532 trap_table->inst_four = lvl14_save[3];
533 local_flush_cache_all();
534 local_irq_restore(flags);
535 }
536#endif
537}
538
539void __init sun4d_init_sbi_irq(void)
540{
541 struct sbus_bus *sbus;
542 unsigned mask;
543
544 nsbi = 0;
545 for_each_sbus(sbus)
546 nsbi++;
547 sbus_actions = (struct sbus_action *)kmalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
548 memset (sbus_actions, 0, (nsbi * 8 * 4 * sizeof(struct sbus_action)));
549 for_each_sbus(sbus) {
550#ifdef CONFIG_SMP
551 extern unsigned char boot_cpu_id;
552
553 set_sbi_tid(sbus->devid, boot_cpu_id << 3);
554 sbus_tid[sbus->board] = boot_cpu_id;
555#endif
556 /* Get rid of pending irqs from PROM */
557 mask = acquire_sbi(sbus->devid, 0xffffffff);
558 if (mask) {
559 printk ("Clearing pending IRQs %08x on SBI %d\n", mask, sbus->board);
560 release_sbi(sbus->devid, mask);
561 }
562 }
563}
564
565static char *sun4d_irq_itoa(unsigned int irq)
566{
567 static char buff[16];
568
569 if (irq < (1 << 5))
570 sprintf(buff, "%d", irq);
571 else
572 sprintf(buff, "%d,%x", sbus_to_pil[(irq >> 2) & 7], irq);
573 return buff;
574}
575
576void __init sun4d_init_IRQ(void)
577{
578 local_irq_disable();
579
580 BTFIXUPSET_CALL(sbint_to_irq, sun4d_sbint_to_irq, BTFIXUPCALL_NORM);
581 BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM);
582 BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
583 BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
584 BTFIXUPSET_CALL(clear_profile_irq, sun4d_clear_profile_irq, BTFIXUPCALL_NORM);
585 BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
586 BTFIXUPSET_CALL(__irq_itoa, sun4d_irq_itoa, BTFIXUPCALL_NORM);
587 sparc_init_timers = sun4d_init_timers;
588#ifdef CONFIG_SMP
589 BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
590 BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
591 BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP);
592#endif
593 /* Cannot enable interrupts until OBP ticker is disabled. */
594}
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
new file mode 100644
index 000000000000..cc1fc898495c
--- /dev/null
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -0,0 +1,486 @@
1/* sun4d_smp.c: Sparc SS1000/SC2000 SMP support.
2 *
3 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
4 *
5 * Based on sun4m's smp.c, which is:
6 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
7 */
8
9#include <asm/head.h>
10
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/threads.h>
14#include <linux/smp.h>
15#include <linux/smp_lock.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18#include <linux/init.h>
19#include <linux/spinlock.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/profile.h>
23
24#include <asm/ptrace.h>
25#include <asm/atomic.h>
26
27#include <asm/delay.h>
28#include <asm/irq.h>
29#include <asm/page.h>
30#include <asm/pgalloc.h>
31#include <asm/pgtable.h>
32#include <asm/oplib.h>
33#include <asm/sbus.h>
34#include <asm/sbi.h>
35#include <asm/tlbflush.h>
36#include <asm/cacheflush.h>
37#include <asm/cpudata.h>
38
39#define IRQ_CROSS_CALL 15
40
41extern ctxd_t *srmmu_ctx_table_phys;
42
43extern void calibrate_delay(void);
44
45extern volatile int smp_processors_ready;
46extern int smp_num_cpus;
47static int smp_highest_cpu;
48extern volatile unsigned long cpu_callin_map[NR_CPUS];
49extern struct cpuinfo_sparc cpu_data[NR_CPUS];
50extern unsigned char boot_cpu_id;
51extern int smp_activated;
52extern volatile int __cpu_number_map[NR_CPUS];
53extern volatile int __cpu_logical_map[NR_CPUS];
54extern volatile unsigned long ipi_count;
55extern volatile int smp_process_available;
56extern volatile int smp_commenced;
57extern int __smp4d_processor_id(void);
58
59/* #define SMP_DEBUG */
60
61#ifdef SMP_DEBUG
62#define SMP_PRINTK(x) printk x
63#else
64#define SMP_PRINTK(x)
65#endif
66
67static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
68{
69 __asm__ __volatile__("swap [%1], %0\n\t" :
70 "=&r" (val), "=&r" (ptr) :
71 "0" (val), "1" (ptr));
72 return val;
73}
74
75static void smp_setup_percpu_timer(void);
76extern void cpu_probe(void);
77extern void sun4d_distribute_irqs(void);
78
79void __init smp4d_callin(void)
80{
81 int cpuid = hard_smp4d_processor_id();
82 extern spinlock_t sun4d_imsk_lock;
83 unsigned long flags;
84
85 /* Show we are alive */
86 cpu_leds[cpuid] = 0x6;
87 show_leds(cpuid);
88
89 /* Enable level15 interrupt, disable level14 interrupt for now */
90 cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
91
92 local_flush_cache_all();
93 local_flush_tlb_all();
94
95 /*
96 * Unblock the master CPU _only_ when the scheduler state
97 * of all secondary CPUs will be up-to-date, so after
98 * the SMP initialization the master will be just allowed
99 * to call the scheduler code.
100 */
101 /* Get our local ticker going. */
102 smp_setup_percpu_timer();
103
104 calibrate_delay();
105 smp_store_cpu_info(cpuid);
106 local_flush_cache_all();
107 local_flush_tlb_all();
108
109 /* Allow master to continue. */
110 swap((unsigned long *)&cpu_callin_map[cpuid], 1);
111 local_flush_cache_all();
112 local_flush_tlb_all();
113
114 cpu_probe();
115
116 while((unsigned long)current_set[cpuid] < PAGE_OFFSET)
117 barrier();
118
119 while(current_set[cpuid]->cpu != cpuid)
120 barrier();
121
122 /* Fix idle thread fields. */
123 __asm__ __volatile__("ld [%0], %%g6\n\t"
124 : : "r" (&current_set[cpuid])
125 : "memory" /* paranoid */);
126
127 cpu_leds[cpuid] = 0x9;
128 show_leds(cpuid);
129
130 /* Attach to the address space of init_task. */
131 atomic_inc(&init_mm.mm_count);
132 current->active_mm = &init_mm;
133
134 local_flush_cache_all();
135 local_flush_tlb_all();
136
137 local_irq_enable(); /* We don't allow PIL 14 yet */
138
139 while(!smp_commenced)
140 barrier();
141
142 spin_lock_irqsave(&sun4d_imsk_lock, flags);
143 cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */
144 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
145}
146
147extern void init_IRQ(void);
148extern void cpu_panic(void);
149
150/*
151 * Cycle through the processors asking the PROM to start each one.
152 */
153
154extern struct linux_prom_registers smp_penguin_ctable;
155extern unsigned long trapbase_cpu1[];
156extern unsigned long trapbase_cpu2[];
157extern unsigned long trapbase_cpu3[];
158
159void __init smp4d_boot_cpus(void)
160{
161 int cpucount = 0;
162 int i, mid;
163
164 printk("Entering SMP Mode...\n");
165
166 if (boot_cpu_id)
167 current_set[0] = NULL;
168
169 local_irq_enable();
170 cpus_clear(cpu_present_map);
171
172 /* XXX This whole thing has to go. See sparc64. */
173 for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
174 cpu_set(mid, cpu_present_map);
175 SMP_PRINTK(("cpu_present_map %08lx\n", cpus_addr(cpu_present_map)[0]));
176 for(i=0; i < NR_CPUS; i++)
177 __cpu_number_map[i] = -1;
178 for(i=0; i < NR_CPUS; i++)
179 __cpu_logical_map[i] = -1;
180 __cpu_number_map[boot_cpu_id] = 0;
181 __cpu_logical_map[0] = boot_cpu_id;
182 current_thread_info()->cpu = boot_cpu_id;
183 smp_store_cpu_info(boot_cpu_id);
184 smp_setup_percpu_timer();
185 local_flush_cache_all();
186 if (cpu_find_by_instance(1, NULL, NULL))
187 return; /* Not an MP box. */
188 SMP_PRINTK(("Iterating over CPUs\n"));
189 for(i = 0; i < NR_CPUS; i++) {
190 if(i == boot_cpu_id)
191 continue;
192
193 if (cpu_isset(i, cpu_present_map)) {
194 extern unsigned long sun4d_cpu_startup;
195 unsigned long *entry = &sun4d_cpu_startup;
196 struct task_struct *p;
197 int timeout;
198 int no;
199
200 /* Cook up an idler for this guy. */
201 p = fork_idle(i);
202 cpucount++;
203 current_set[i] = p->thread_info;
204 for (no = 0; !cpu_find_by_instance(no, NULL, &mid)
205 && mid != i; no++) ;
206
207 /*
208 * Initialize the contexts table
209 * Since the call to prom_startcpu() trashes the structure,
210 * we need to re-initialize it for each cpu
211 */
212 smp_penguin_ctable.which_io = 0;
213 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
214 smp_penguin_ctable.reg_size = 0;
215
216 /* whirrr, whirrr, whirrrrrrrrr... */
217 SMP_PRINTK(("Starting CPU %d at %p task %d node %08x\n", i, entry, cpucount, cpu_data(no).prom_node));
218 local_flush_cache_all();
219 prom_startcpu(cpu_data(no).prom_node,
220 &smp_penguin_ctable, 0, (char *)entry);
221
222 SMP_PRINTK(("prom_startcpu returned :)\n"));
223
224 /* wheee... it's going... */
225 for(timeout = 0; timeout < 10000; timeout++) {
226 if(cpu_callin_map[i])
227 break;
228 udelay(200);
229 }
230
231 if(cpu_callin_map[i]) {
232 /* Another "Red Snapper". */
233 __cpu_number_map[i] = cpucount;
234 __cpu_logical_map[cpucount] = i;
235 } else {
236 cpucount--;
237 printk("Processor %d is stuck.\n", i);
238 }
239 }
240 if(!(cpu_callin_map[i])) {
241 cpu_clear(i, cpu_present_map);
242 __cpu_number_map[i] = -1;
243 }
244 }
245 local_flush_cache_all();
246 if(cpucount == 0) {
247 printk("Error: only one Processor found.\n");
248 cpu_present_map = cpumask_of_cpu(hard_smp4d_processor_id());
249 } else {
250 unsigned long bogosum = 0;
251
252 for(i = 0; i < NR_CPUS; i++) {
253 if (cpu_isset(i, cpu_present_map)) {
254 bogosum += cpu_data(i).udelay_val;
255 smp_highest_cpu = i;
256 }
257 }
258 SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100));
259 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
260 cpucount + 1,
261 bogosum/(500000/HZ),
262 (bogosum/(5000/HZ))%100);
263 smp_activated = 1;
264 smp_num_cpus = cpucount + 1;
265 }
266
267 /* Free unneeded trap tables */
268 ClearPageReserved(virt_to_page(trapbase_cpu1));
269 set_page_count(virt_to_page(trapbase_cpu1), 1);
270 free_page((unsigned long)trapbase_cpu1);
271 totalram_pages++;
272 num_physpages++;
273
274 ClearPageReserved(virt_to_page(trapbase_cpu2));
275 set_page_count(virt_to_page(trapbase_cpu2), 1);
276 free_page((unsigned long)trapbase_cpu2);
277 totalram_pages++;
278 num_physpages++;
279
280 ClearPageReserved(virt_to_page(trapbase_cpu3));
281 set_page_count(virt_to_page(trapbase_cpu3), 1);
282 free_page((unsigned long)trapbase_cpu3);
283 totalram_pages++;
284 num_physpages++;
285
286 /* Ok, they are spinning and ready to go. */
287 smp_processors_ready = 1;
288 sun4d_distribute_irqs();
289}
290
291static struct smp_funcall {
292 smpfunc_t func;
293 unsigned long arg1;
294 unsigned long arg2;
295 unsigned long arg3;
296 unsigned long arg4;
297 unsigned long arg5;
298 unsigned char processors_in[NR_CPUS]; /* Set when ipi entered. */
299 unsigned char processors_out[NR_CPUS]; /* Set when ipi exited. */
300} ccall_info __attribute__((aligned(8)));
301
302static DEFINE_SPINLOCK(cross_call_lock);
303
304/* Cross calls must be serialized, at least currently. */
305void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
306 unsigned long arg3, unsigned long arg4, unsigned long arg5)
307{
308 if(smp_processors_ready) {
309 register int high = smp_highest_cpu;
310 unsigned long flags;
311
312 spin_lock_irqsave(&cross_call_lock, flags);
313
314 {
315 /* If you make changes here, make sure gcc generates proper code... */
316 register smpfunc_t f asm("i0") = func;
317 register unsigned long a1 asm("i1") = arg1;
318 register unsigned long a2 asm("i2") = arg2;
319 register unsigned long a3 asm("i3") = arg3;
320 register unsigned long a4 asm("i4") = arg4;
321 register unsigned long a5 asm("i5") = arg5;
322
323 __asm__ __volatile__(
324 "std %0, [%6]\n\t"
325 "std %2, [%6 + 8]\n\t"
326 "std %4, [%6 + 16]\n\t" : :
327 "r"(f), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
328 "r" (&ccall_info.func));
329 }
330
331 /* Init receive/complete mapping, plus fire the IPI's off. */
332 {
333 cpumask_t mask;
334 register int i;
335
336 mask = cpumask_of_cpu(hard_smp4d_processor_id());
337 cpus_andnot(mask, cpu_present_map, mask);
338 for(i = 0; i <= high; i++) {
339 if (cpu_isset(i, mask)) {
340 ccall_info.processors_in[i] = 0;
341 ccall_info.processors_out[i] = 0;
342 sun4d_send_ipi(i, IRQ_CROSS_CALL);
343 }
344 }
345 }
346
347 {
348 register int i;
349
350 i = 0;
351 do {
352 while(!ccall_info.processors_in[i])
353 barrier();
354 } while(++i <= high);
355
356 i = 0;
357 do {
358 while(!ccall_info.processors_out[i])
359 barrier();
360 } while(++i <= high);
361 }
362
363 spin_unlock_irqrestore(&cross_call_lock, flags);
364 }
365}
366
367/* Running cross calls. */
368void smp4d_cross_call_irq(void)
369{
370 int i = hard_smp4d_processor_id();
371
372 ccall_info.processors_in[i] = 1;
373 ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
374 ccall_info.arg4, ccall_info.arg5);
375 ccall_info.processors_out[i] = 1;
376}
377
378static int smp4d_stop_cpu_sender;
379
380static void smp4d_stop_cpu(void)
381{
382 int me = hard_smp4d_processor_id();
383
384 if (me != smp4d_stop_cpu_sender)
385 while(1) barrier();
386}
387
388/* Cross calls, in order to work efficiently and atomically do all
389 * the message passing work themselves, only stopcpu and reschedule
390 * messages come through here.
391 */
392void smp4d_message_pass(int target, int msg, unsigned long data, int wait)
393{
394 int me = hard_smp4d_processor_id();
395
396 SMP_PRINTK(("smp4d_message_pass %d %d %08lx %d\n", target, msg, data, wait));
397 if (msg == MSG_STOP_CPU && target == MSG_ALL_BUT_SELF) {
398 unsigned long flags;
399 static DEFINE_SPINLOCK(stop_cpu_lock);
400 spin_lock_irqsave(&stop_cpu_lock, flags);
401 smp4d_stop_cpu_sender = me;
402 smp4d_cross_call((smpfunc_t)smp4d_stop_cpu, 0, 0, 0, 0, 0);
403 spin_unlock_irqrestore(&stop_cpu_lock, flags);
404 }
405 printk("Yeeee, trying to send SMP msg(%d) to %d on cpu %d\n", msg, target, me);
406 panic("Bogon SMP message pass.");
407}
408
409void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
410{
411 int cpu = hard_smp4d_processor_id();
412 static int cpu_tick[NR_CPUS];
413 static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd };
414
415 bw_get_prof_limit(cpu);
416 bw_clear_intr_mask(0, 1); /* INTR_TABLE[0] & 1 is Profile IRQ */
417
418 cpu_tick[cpu]++;
419 if (!(cpu_tick[cpu] & 15)) {
420 if (cpu_tick[cpu] == 0x60)
421 cpu_tick[cpu] = 0;
422 cpu_leds[cpu] = led_mask[cpu_tick[cpu] >> 4];
423 show_leds(cpu);
424 }
425
426 profile_tick(CPU_PROFILING, regs);
427
428 if(!--prof_counter(cpu)) {
429 int user = user_mode(regs);
430
431 irq_enter();
432 update_process_times(user);
433 irq_exit();
434
435 prof_counter(cpu) = prof_multiplier(cpu);
436 }
437}
438
439extern unsigned int lvl14_resolution;
440
441static void __init smp_setup_percpu_timer(void)
442{
443 int cpu = hard_smp4d_processor_id();
444
445 prof_counter(cpu) = prof_multiplier(cpu) = 1;
446 load_profile_irq(cpu, lvl14_resolution);
447}
448
449void __init smp4d_blackbox_id(unsigned *addr)
450{
451 int rd = *addr & 0x3e000000;
452
453 addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
454 addr[1] = 0x01000000; /* nop */
455 addr[2] = 0x01000000; /* nop */
456}
457
458void __init smp4d_blackbox_current(unsigned *addr)
459{
460 int rd = *addr & 0x3e000000;
461
462 addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
463 addr[2] = 0x81282002 | rd | (rd >> 11); /* sll reg, 2, reg */
464 addr[4] = 0x01000000; /* nop */
465}
466
467void __init sun4d_init_smp(void)
468{
469 int i;
470 extern unsigned int t_nmi[], linux_trap_ipi15_sun4d[], linux_trap_ipi15_sun4m[];
471
472 /* Patch ipi15 trap table */
473 t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
474
475 /* And set btfixup... */
476 BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
477 BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
478 BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
479 BTFIXUPSET_CALL(smp_message_pass, smp4d_message_pass, BTFIXUPCALL_NORM);
480 BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
481
482 for (i = 0; i < NR_CPUS; i++) {
483 ccall_info.processors_in[i] = 1;
484 ccall_info.processors_out[i] = 1;
485 }
486}
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
new file mode 100644
index 000000000000..39d712c3c809
--- /dev/null
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -0,0 +1,399 @@
1/* sun4m_irq.c
2 * arch/sparc/kernel/sun4m_irq.c:
3 *
4 * djhr: Hacked out of irq.c into a CPU dependent version.
5 *
6 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
7 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@yahoo.com)
9 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
10 */
11
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/linkage.h>
15#include <linux/kernel_stat.h>
16#include <linux/signal.h>
17#include <linux/sched.h>
18#include <linux/ptrace.h>
19#include <linux/smp.h>
20#include <linux/interrupt.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/ioport.h>
24
25#include <asm/ptrace.h>
26#include <asm/processor.h>
27#include <asm/system.h>
28#include <asm/psr.h>
29#include <asm/vaddrs.h>
30#include <asm/timer.h>
31#include <asm/openprom.h>
32#include <asm/oplib.h>
33#include <asm/traps.h>
34#include <asm/pgalloc.h>
35#include <asm/pgtable.h>
36#include <asm/smp.h>
37#include <asm/irq.h>
38#include <asm/io.h>
39#include <asm/sbus.h>
40#include <asm/cacheflush.h>
41
42static unsigned long dummy;
43
44struct sun4m_intregs *sun4m_interrupts;
45unsigned long *irq_rcvreg = &dummy;
46
47/* These tables only apply for interrupts greater than 15..
48 *
49 * any intr value below 0x10 is considered to be a soft-int
50 * this may be useful or it may not.. but that's how I've done it.
51 * and it won't clash with what OBP is telling us about devices.
52 *
53 * take an encoded intr value and lookup if it's valid
54 * then get the mask bits that match from irq_mask
55 *
56 * P3: Translation from irq 0x0d to mask 0x2000 is for MrCoffee.
57 */
58static unsigned char irq_xlate[32] = {
59 /* 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f */
60 0, 0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 5, 6, 14, 0, 7,
61 0, 0, 8, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 0
62};
63
64static unsigned long irq_mask[] = {
65 0, /* illegal index */
66 SUN4M_INT_SCSI, /* 1 irq 4 */
67 SUN4M_INT_ETHERNET, /* 2 irq 6 */
68 SUN4M_INT_VIDEO, /* 3 irq 8 */
69 SUN4M_INT_REALTIME, /* 4 irq 10 */
70 SUN4M_INT_FLOPPY, /* 5 irq 11 */
71 (SUN4M_INT_SERIAL | SUN4M_INT_KBDMS), /* 6 irq 12 */
72 SUN4M_INT_MODULE_ERR, /* 7 irq 15 */
73 SUN4M_INT_SBUS(0), /* 8 irq 2 */
74 SUN4M_INT_SBUS(1), /* 9 irq 3 */
75 SUN4M_INT_SBUS(2), /* 10 irq 5 */
76 SUN4M_INT_SBUS(3), /* 11 irq 7 */
77 SUN4M_INT_SBUS(4), /* 12 irq 9 */
78 SUN4M_INT_SBUS(5), /* 13 irq 11 */
79 SUN4M_INT_SBUS(6) /* 14 irq 13 */
80};
81
82static int sun4m_pil_map[] = { 0, 2, 3, 5, 7, 9, 11, 13 };
83
84unsigned int sun4m_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint)
85{
86 if (sbint >= sizeof(sun4m_pil_map)) {
87 printk(KERN_ERR "%s: bogus SBINT %d\n", sdev->prom_name, sbint);
88 BUG();
89 }
90 return sun4m_pil_map[sbint] | 0x30;
91}
92
93inline unsigned long sun4m_get_irqmask(unsigned int irq)
94{
95 unsigned long mask;
96
97 if (irq > 0x20) {
98 /* OBIO/SBUS interrupts */
99 irq &= 0x1f;
100 mask = irq_mask[irq_xlate[irq]];
101 if (!mask)
102 printk("sun4m_get_irqmask: IRQ%d has no valid mask!\n",irq);
103 } else {
104 /* Soft Interrupts will come here.
105 * Currently there is no way to trigger them but I'm sure
106 * something could be cooked up.
107 */
108 irq &= 0xf;
109 mask = SUN4M_SOFT_INT(irq);
110 }
111 return mask;
112}
113
114static void sun4m_disable_irq(unsigned int irq_nr)
115{
116 unsigned long mask, flags;
117 int cpu = smp_processor_id();
118
119 mask = sun4m_get_irqmask(irq_nr);
120 local_irq_save(flags);
121 if (irq_nr > 15)
122 sun4m_interrupts->set = mask;
123 else
124 sun4m_interrupts->cpu_intregs[cpu].set = mask;
125 local_irq_restore(flags);
126}
127
128static void sun4m_enable_irq(unsigned int irq_nr)
129{
130 unsigned long mask, flags;
131 int cpu = smp_processor_id();
132
133 /* Dreadful floppy hack. When we use 0x2b instead of
134 * 0x0b the system blows (it starts to whistle!).
135 * So we continue to use 0x0b. Fixme ASAP. --P3
136 */
137 if (irq_nr != 0x0b) {
138 mask = sun4m_get_irqmask(irq_nr);
139 local_irq_save(flags);
140 if (irq_nr > 15)
141 sun4m_interrupts->clear = mask;
142 else
143 sun4m_interrupts->cpu_intregs[cpu].clear = mask;
144 local_irq_restore(flags);
145 } else {
146 local_irq_save(flags);
147 sun4m_interrupts->clear = SUN4M_INT_FLOPPY;
148 local_irq_restore(flags);
149 }
150}
151
152static unsigned long cpu_pil_to_imask[16] = {
153/*0*/ 0x00000000,
154/*1*/ 0x00000000,
155/*2*/ SUN4M_INT_SBUS(0) | SUN4M_INT_VME(0),
156/*3*/ SUN4M_INT_SBUS(1) | SUN4M_INT_VME(1),
157/*4*/ SUN4M_INT_SCSI,
158/*5*/ SUN4M_INT_SBUS(2) | SUN4M_INT_VME(2),
159/*6*/ SUN4M_INT_ETHERNET,
160/*7*/ SUN4M_INT_SBUS(3) | SUN4M_INT_VME(3),
161/*8*/ SUN4M_INT_VIDEO,
162/*9*/ SUN4M_INT_SBUS(4) | SUN4M_INT_VME(4) | SUN4M_INT_MODULE_ERR,
163/*10*/ SUN4M_INT_REALTIME,
164/*11*/ SUN4M_INT_SBUS(5) | SUN4M_INT_VME(5) | SUN4M_INT_FLOPPY,
165/*12*/ SUN4M_INT_SERIAL | SUN4M_INT_KBDMS,
166/*13*/ SUN4M_INT_AUDIO,
167/*14*/ SUN4M_INT_E14,
168/*15*/ 0x00000000
169};
170
171/* We assume the caller has disabled local interrupts when these are called,
172 * or else very bizarre behavior will result.
173 */
174static void sun4m_disable_pil_irq(unsigned int pil)
175{
176 sun4m_interrupts->set = cpu_pil_to_imask[pil];
177}
178
179static void sun4m_enable_pil_irq(unsigned int pil)
180{
181 sun4m_interrupts->clear = cpu_pil_to_imask[pil];
182}
183
184#ifdef CONFIG_SMP
185static void sun4m_send_ipi(int cpu, int level)
186{
187 unsigned long mask;
188
189 mask = sun4m_get_irqmask(level);
190 sun4m_interrupts->cpu_intregs[cpu].set = mask;
191}
192
193static void sun4m_clear_ipi(int cpu, int level)
194{
195 unsigned long mask;
196
197 mask = sun4m_get_irqmask(level);
198 sun4m_interrupts->cpu_intregs[cpu].clear = mask;
199}
200
201static void sun4m_set_udt(int cpu)
202{
203 sun4m_interrupts->undirected_target = cpu;
204}
205#endif
206
207#define OBIO_INTR 0x20
208#define TIMER_IRQ (OBIO_INTR | 10)
209#define PROFILE_IRQ (OBIO_INTR | 14)
210
211struct sun4m_timer_regs *sun4m_timers;
212unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10);
213
214static void sun4m_clear_clock_irq(void)
215{
216 volatile unsigned int clear_intr;
217 clear_intr = sun4m_timers->l10_timer_limit;
218}
219
220static void sun4m_clear_profile_irq(int cpu)
221{
222 volatile unsigned int clear;
223
224 clear = sun4m_timers->cpu_timers[cpu].l14_timer_limit;
225}
226
227static void sun4m_load_profile_irq(int cpu, unsigned int limit)
228{
229 sun4m_timers->cpu_timers[cpu].l14_timer_limit = limit;
230}
231
232char *sun4m_irq_itoa(unsigned int irq)
233{
234 static char buff[16];
235 sprintf(buff, "%d", irq);
236 return buff;
237}
238
239static void __init sun4m_init_timers(irqreturn_t (*counter_fn)(int, void *, struct pt_regs *))
240{
241 int reg_count, irq, cpu;
242 struct linux_prom_registers cnt_regs[PROMREG_MAX];
243 int obio_node, cnt_node;
244 struct resource r;
245
246 cnt_node = 0;
247 if((obio_node =
248 prom_searchsiblings (prom_getchild(prom_root_node), "obio")) == 0 ||
249 (obio_node = prom_getchild (obio_node)) == 0 ||
250 (cnt_node = prom_searchsiblings (obio_node, "counter")) == 0) {
251 prom_printf("Cannot find /obio/counter node\n");
252 prom_halt();
253 }
254 reg_count = prom_getproperty(cnt_node, "reg",
255 (void *) cnt_regs, sizeof(cnt_regs));
256 reg_count = (reg_count/sizeof(struct linux_prom_registers));
257
258 /* Apply the obio ranges to the timer registers. */
259 prom_apply_obio_ranges(cnt_regs, reg_count);
260
261 cnt_regs[4].phys_addr = cnt_regs[reg_count-1].phys_addr;
262 cnt_regs[4].reg_size = cnt_regs[reg_count-1].reg_size;
263 cnt_regs[4].which_io = cnt_regs[reg_count-1].which_io;
264 for(obio_node = 1; obio_node < 4; obio_node++) {
265 cnt_regs[obio_node].phys_addr =
266 cnt_regs[obio_node-1].phys_addr + PAGE_SIZE;
267 cnt_regs[obio_node].reg_size = cnt_regs[obio_node-1].reg_size;
268 cnt_regs[obio_node].which_io = cnt_regs[obio_node-1].which_io;
269 }
270
271 memset((char*)&r, 0, sizeof(struct resource));
272 /* Map the per-cpu Counter registers. */
273 r.flags = cnt_regs[0].which_io;
274 r.start = cnt_regs[0].phys_addr;
275 sun4m_timers = (struct sun4m_timer_regs *) sbus_ioremap(&r, 0,
276 PAGE_SIZE*SUN4M_NCPUS, "sun4m_cpu_cnt");
277 /* Map the system Counter register. */
278 /* XXX Here we expect consequent calls to yeld adjusent maps. */
279 r.flags = cnt_regs[4].which_io;
280 r.start = cnt_regs[4].phys_addr;
281 sbus_ioremap(&r, 0, cnt_regs[4].reg_size, "sun4m_sys_cnt");
282
283 sun4m_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10);
284 master_l10_counter = &sun4m_timers->l10_cur_count;
285 master_l10_limit = &sun4m_timers->l10_timer_limit;
286
287 irq = request_irq(TIMER_IRQ,
288 counter_fn,
289 (SA_INTERRUPT | SA_STATIC_ALLOC),
290 "timer", NULL);
291 if (irq) {
292 prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
293 prom_halt();
294 }
295
296 if (!cpu_find_by_instance(1, NULL, NULL)) {
297 for(cpu = 0; cpu < 4; cpu++)
298 sun4m_timers->cpu_timers[cpu].l14_timer_limit = 0;
299 sun4m_interrupts->set = SUN4M_INT_E14;
300 } else {
301 sun4m_timers->cpu_timers[0].l14_timer_limit = 0;
302 }
303#ifdef CONFIG_SMP
304 {
305 unsigned long flags;
306 extern unsigned long lvl14_save[4];
307 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
308
309 /* For SMP we use the level 14 ticker, however the bootup code
310 * has copied the firmwares level 14 vector into boot cpu's
311 * trap table, we must fix this now or we get squashed.
312 */
313 local_irq_save(flags);
314 trap_table->inst_one = lvl14_save[0];
315 trap_table->inst_two = lvl14_save[1];
316 trap_table->inst_three = lvl14_save[2];
317 trap_table->inst_four = lvl14_save[3];
318 local_flush_cache_all();
319 local_irq_restore(flags);
320 }
321#endif
322}
323
324void __init sun4m_init_IRQ(void)
325{
326 int ie_node,i;
327 struct linux_prom_registers int_regs[PROMREG_MAX];
328 int num_regs;
329 struct resource r;
330 int mid;
331
332 local_irq_disable();
333 if((ie_node = prom_searchsiblings(prom_getchild(prom_root_node), "obio")) == 0 ||
334 (ie_node = prom_getchild (ie_node)) == 0 ||
335 (ie_node = prom_searchsiblings (ie_node, "interrupt")) == 0) {
336 prom_printf("Cannot find /obio/interrupt node\n");
337 prom_halt();
338 }
339 num_regs = prom_getproperty(ie_node, "reg", (char *) int_regs,
340 sizeof(int_regs));
341 num_regs = (num_regs/sizeof(struct linux_prom_registers));
342
343 /* Apply the obio ranges to these registers. */
344 prom_apply_obio_ranges(int_regs, num_regs);
345
346 int_regs[4].phys_addr = int_regs[num_regs-1].phys_addr;
347 int_regs[4].reg_size = int_regs[num_regs-1].reg_size;
348 int_regs[4].which_io = int_regs[num_regs-1].which_io;
349 for(ie_node = 1; ie_node < 4; ie_node++) {
350 int_regs[ie_node].phys_addr = int_regs[ie_node-1].phys_addr + PAGE_SIZE;
351 int_regs[ie_node].reg_size = int_regs[ie_node-1].reg_size;
352 int_regs[ie_node].which_io = int_regs[ie_node-1].which_io;
353 }
354
355 memset((char *)&r, 0, sizeof(struct resource));
356 /* Map the interrupt registers for all possible cpus. */
357 r.flags = int_regs[0].which_io;
358 r.start = int_regs[0].phys_addr;
359 sun4m_interrupts = (struct sun4m_intregs *) sbus_ioremap(&r, 0,
360 PAGE_SIZE*SUN4M_NCPUS, "interrupts_percpu");
361
362 /* Map the system interrupt control registers. */
363 r.flags = int_regs[4].which_io;
364 r.start = int_regs[4].phys_addr;
365 sbus_ioremap(&r, 0, int_regs[4].reg_size, "interrupts_system");
366
367 sun4m_interrupts->set = ~SUN4M_INT_MASKALL;
368 for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
369 sun4m_interrupts->cpu_intregs[mid].clear = ~0x17fff;
370
371 if (!cpu_find_by_instance(1, NULL, NULL)) {
372 /* system wide interrupts go to cpu 0, this should always
373 * be safe because it is guaranteed to be fitted or OBP doesn't
374 * come up
375 *
376 * Not sure, but writing here on SLAVIO systems may puke
377 * so I don't do it unless there is more than 1 cpu.
378 */
379 irq_rcvreg = (unsigned long *)
380 &sun4m_interrupts->undirected_target;
381 sun4m_interrupts->undirected_target = 0;
382 }
383 BTFIXUPSET_CALL(sbint_to_irq, sun4m_sbint_to_irq, BTFIXUPCALL_NORM);
384 BTFIXUPSET_CALL(enable_irq, sun4m_enable_irq, BTFIXUPCALL_NORM);
385 BTFIXUPSET_CALL(disable_irq, sun4m_disable_irq, BTFIXUPCALL_NORM);
386 BTFIXUPSET_CALL(enable_pil_irq, sun4m_enable_pil_irq, BTFIXUPCALL_NORM);
387 BTFIXUPSET_CALL(disable_pil_irq, sun4m_disable_pil_irq, BTFIXUPCALL_NORM);
388 BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM);
389 BTFIXUPSET_CALL(clear_profile_irq, sun4m_clear_profile_irq, BTFIXUPCALL_NORM);
390 BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM);
391 BTFIXUPSET_CALL(__irq_itoa, sun4m_irq_itoa, BTFIXUPCALL_NORM);
392 sparc_init_timers = sun4m_init_timers;
393#ifdef CONFIG_SMP
394 BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM);
395 BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM);
396 BTFIXUPSET_CALL(set_irq_udt, sun4m_set_udt, BTFIXUPCALL_NORM);
397#endif
398 /* Cannot enable interrupts until OBP ticker is disabled. */
399}
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
new file mode 100644
index 000000000000..f113422a3727
--- /dev/null
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -0,0 +1,451 @@
1/* sun4m_smp.c: Sparc SUN4M SMP support.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <asm/head.h>
7
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/threads.h>
11#include <linux/smp.h>
12#include <linux/smp_lock.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/init.h>
16#include <linux/spinlock.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/profile.h>
20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h>
22
23#include <asm/ptrace.h>
24#include <asm/atomic.h>
25
26#include <asm/delay.h>
27#include <asm/irq.h>
28#include <asm/page.h>
29#include <asm/pgalloc.h>
30#include <asm/pgtable.h>
31#include <asm/oplib.h>
32#include <asm/cpudata.h>
33
34#define IRQ_RESCHEDULE 13
35#define IRQ_STOP_CPU 14
36#define IRQ_CROSS_CALL 15
37
38extern ctxd_t *srmmu_ctx_table_phys;
39
40extern void calibrate_delay(void);
41
42extern volatile int smp_processors_ready;
43extern int smp_num_cpus;
44extern volatile unsigned long cpu_callin_map[NR_CPUS];
45extern unsigned char boot_cpu_id;
46extern int smp_activated;
47extern volatile int __cpu_number_map[NR_CPUS];
48extern volatile int __cpu_logical_map[NR_CPUS];
49extern volatile unsigned long ipi_count;
50extern volatile int smp_process_available;
51extern volatile int smp_commenced;
52extern int __smp4m_processor_id(void);
53
54/*#define SMP_DEBUG*/
55
56#ifdef SMP_DEBUG
57#define SMP_PRINTK(x) printk x
58#else
59#define SMP_PRINTK(x)
60#endif
61
62static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
63{
64 __asm__ __volatile__("swap [%1], %0\n\t" :
65 "=&r" (val), "=&r" (ptr) :
66 "0" (val), "1" (ptr));
67 return val;
68}
69
70static void smp_setup_percpu_timer(void);
71extern void cpu_probe(void);
72
73void __init smp4m_callin(void)
74{
75 int cpuid = hard_smp_processor_id();
76
77 local_flush_cache_all();
78 local_flush_tlb_all();
79
80 set_irq_udt(boot_cpu_id);
81
82 /* Get our local ticker going. */
83 smp_setup_percpu_timer();
84
85 calibrate_delay();
86 smp_store_cpu_info(cpuid);
87
88 local_flush_cache_all();
89 local_flush_tlb_all();
90
91 /*
92 * Unblock the master CPU _only_ when the scheduler state
93 * of all secondary CPUs will be up-to-date, so after
94 * the SMP initialization the master will be just allowed
95 * to call the scheduler code.
96 */
97 /* Allow master to continue. */
98 swap((unsigned long *)&cpu_callin_map[cpuid], 1);
99
100 local_flush_cache_all();
101 local_flush_tlb_all();
102
103 cpu_probe();
104
105 /* Fix idle thread fields. */
106 __asm__ __volatile__("ld [%0], %%g6\n\t"
107 : : "r" (&current_set[cpuid])
108 : "memory" /* paranoid */);
109
110 /* Attach to the address space of init_task. */
111 atomic_inc(&init_mm.mm_count);
112 current->active_mm = &init_mm;
113
114 while(!smp_commenced)
115 barrier();
116
117 local_flush_cache_all();
118 local_flush_tlb_all();
119
120 local_irq_enable();
121}
122
123extern void init_IRQ(void);
124extern void cpu_panic(void);
125
126/*
127 * Cycle through the processors asking the PROM to start each one.
128 */
129
130extern struct linux_prom_registers smp_penguin_ctable;
131extern unsigned long trapbase_cpu1[];
132extern unsigned long trapbase_cpu2[];
133extern unsigned long trapbase_cpu3[];
134
135void __init smp4m_boot_cpus(void)
136{
137 int cpucount = 0;
138 int i, mid;
139
140 printk("Entering SMP Mode...\n");
141
142 local_irq_enable();
143 cpus_clear(cpu_present_map);
144
145 for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
146 cpu_set(mid, cpu_present_map);
147
148 for(i=0; i < NR_CPUS; i++) {
149 __cpu_number_map[i] = -1;
150 __cpu_logical_map[i] = -1;
151 }
152
153 __cpu_number_map[boot_cpu_id] = 0;
154 __cpu_logical_map[0] = boot_cpu_id;
155 current_thread_info()->cpu = boot_cpu_id;
156
157 smp_store_cpu_info(boot_cpu_id);
158 set_irq_udt(boot_cpu_id);
159 smp_setup_percpu_timer();
160 local_flush_cache_all();
161 if(cpu_find_by_instance(1, NULL, NULL))
162 return; /* Not an MP box. */
163 for(i = 0; i < NR_CPUS; i++) {
164 if(i == boot_cpu_id)
165 continue;
166
167 if (cpu_isset(i, cpu_present_map)) {
168 extern unsigned long sun4m_cpu_startup;
169 unsigned long *entry = &sun4m_cpu_startup;
170 struct task_struct *p;
171 int timeout;
172
173 /* Cook up an idler for this guy. */
174 p = fork_idle(i);
175 cpucount++;
176 current_set[i] = p->thread_info;
177 /* See trampoline.S for details... */
178 entry += ((i-1) * 3);
179
180 /*
181 * Initialize the contexts table
182 * Since the call to prom_startcpu() trashes the structure,
183 * we need to re-initialize it for each cpu
184 */
185 smp_penguin_ctable.which_io = 0;
186 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
187 smp_penguin_ctable.reg_size = 0;
188
189 /* whirrr, whirrr, whirrrrrrrrr... */
190 printk("Starting CPU %d at %p\n", i, entry);
191 local_flush_cache_all();
192 prom_startcpu(cpu_data(i).prom_node,
193 &smp_penguin_ctable, 0, (char *)entry);
194
195 /* wheee... it's going... */
196 for(timeout = 0; timeout < 10000; timeout++) {
197 if(cpu_callin_map[i])
198 break;
199 udelay(200);
200 }
201 if(cpu_callin_map[i]) {
202 /* Another "Red Snapper". */
203 __cpu_number_map[i] = i;
204 __cpu_logical_map[i] = i;
205 } else {
206 cpucount--;
207 printk("Processor %d is stuck.\n", i);
208 }
209 }
210 if(!(cpu_callin_map[i])) {
211 cpu_clear(i, cpu_present_map);
212 __cpu_number_map[i] = -1;
213 }
214 }
215 local_flush_cache_all();
216 if(cpucount == 0) {
217 printk("Error: only one Processor found.\n");
218 cpu_present_map = cpumask_of_cpu(smp_processor_id());
219 } else {
220 unsigned long bogosum = 0;
221 for(i = 0; i < NR_CPUS; i++) {
222 if (cpu_isset(i, cpu_present_map))
223 bogosum += cpu_data(i).udelay_val;
224 }
225 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
226 cpucount + 1,
227 bogosum/(500000/HZ),
228 (bogosum/(5000/HZ))%100);
229 smp_activated = 1;
230 smp_num_cpus = cpucount + 1;
231 }
232
233 /* Free unneeded trap tables */
234 if (!cpu_isset(i, cpu_present_map)) {
235 ClearPageReserved(virt_to_page(trapbase_cpu1));
236 set_page_count(virt_to_page(trapbase_cpu1), 1);
237 free_page((unsigned long)trapbase_cpu1);
238 totalram_pages++;
239 num_physpages++;
240 }
241 if (!cpu_isset(2, cpu_present_map)) {
242 ClearPageReserved(virt_to_page(trapbase_cpu2));
243 set_page_count(virt_to_page(trapbase_cpu2), 1);
244 free_page((unsigned long)trapbase_cpu2);
245 totalram_pages++;
246 num_physpages++;
247 }
248 if (!cpu_isset(3, cpu_present_map)) {
249 ClearPageReserved(virt_to_page(trapbase_cpu3));
250 set_page_count(virt_to_page(trapbase_cpu3), 1);
251 free_page((unsigned long)trapbase_cpu3);
252 totalram_pages++;
253 num_physpages++;
254 }
255
256 /* Ok, they are spinning and ready to go. */
257 smp_processors_ready = 1;
258}
259
260/* At each hardware IRQ, we get this called to forward IRQ reception
261 * to the next processor. The caller must disable the IRQ level being
262 * serviced globally so that there are no double interrupts received.
263 *
264 * XXX See sparc64 irq.c.
265 */
266void smp4m_irq_rotate(int cpu)
267{
268}
269
270/* Cross calls, in order to work efficiently and atomically do all
271 * the message passing work themselves, only stopcpu and reschedule
272 * messages come through here.
273 */
274void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
275{
276 static unsigned long smp_cpu_in_msg[NR_CPUS];
277 cpumask_t mask;
278 int me = smp_processor_id();
279 int irq, i;
280
281 if(msg == MSG_RESCHEDULE) {
282 irq = IRQ_RESCHEDULE;
283
284 if(smp_cpu_in_msg[me])
285 return;
286 } else if(msg == MSG_STOP_CPU) {
287 irq = IRQ_STOP_CPU;
288 } else {
289 goto barf;
290 }
291
292 smp_cpu_in_msg[me]++;
293 if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
294 mask = cpu_present_map;
295 if(target == MSG_ALL_BUT_SELF)
296 cpu_clear(me, mask);
297 for(i = 0; i < 4; i++) {
298 if (cpu_isset(i, mask))
299 set_cpu_int(i, irq);
300 }
301 } else {
302 set_cpu_int(target, irq);
303 }
304 smp_cpu_in_msg[me]--;
305
306 return;
307barf:
308 printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me);
309 panic("Bogon SMP message pass.");
310}
311
312static struct smp_funcall {
313 smpfunc_t func;
314 unsigned long arg1;
315 unsigned long arg2;
316 unsigned long arg3;
317 unsigned long arg4;
318 unsigned long arg5;
319 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
320 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
321} ccall_info;
322
323static DEFINE_SPINLOCK(cross_call_lock);
324
325/* Cross calls must be serialized, at least currently. */
326void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
327 unsigned long arg3, unsigned long arg4, unsigned long arg5)
328{
329 if(smp_processors_ready) {
330 register int ncpus = smp_num_cpus;
331 unsigned long flags;
332
333 spin_lock_irqsave(&cross_call_lock, flags);
334
335 /* Init function glue. */
336 ccall_info.func = func;
337 ccall_info.arg1 = arg1;
338 ccall_info.arg2 = arg2;
339 ccall_info.arg3 = arg3;
340 ccall_info.arg4 = arg4;
341 ccall_info.arg5 = arg5;
342
343 /* Init receive/complete mapping, plus fire the IPI's off. */
344 {
345 cpumask_t mask = cpu_present_map;
346 register int i;
347
348 cpu_clear(smp_processor_id(), mask);
349 for(i = 0; i < ncpus; i++) {
350 if (cpu_isset(i, mask)) {
351 ccall_info.processors_in[i] = 0;
352 ccall_info.processors_out[i] = 0;
353 set_cpu_int(i, IRQ_CROSS_CALL);
354 } else {
355 ccall_info.processors_in[i] = 1;
356 ccall_info.processors_out[i] = 1;
357 }
358 }
359 }
360
361 {
362 register int i;
363
364 i = 0;
365 do {
366 while(!ccall_info.processors_in[i])
367 barrier();
368 } while(++i < ncpus);
369
370 i = 0;
371 do {
372 while(!ccall_info.processors_out[i])
373 barrier();
374 } while(++i < ncpus);
375 }
376
377 spin_unlock_irqrestore(&cross_call_lock, flags);
378 }
379}
380
381/* Running cross calls. */
382void smp4m_cross_call_irq(void)
383{
384 int i = smp_processor_id();
385
386 ccall_info.processors_in[i] = 1;
387 ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
388 ccall_info.arg4, ccall_info.arg5);
389 ccall_info.processors_out[i] = 1;
390}
391
392void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
393{
394 int cpu = smp_processor_id();
395
396 clear_profile_irq(cpu);
397
398 profile_tick(CPU_PROFILING, regs);
399
400 if(!--prof_counter(cpu)) {
401 int user = user_mode(regs);
402
403 irq_enter();
404 update_process_times(user);
405 irq_exit();
406
407 prof_counter(cpu) = prof_multiplier(cpu);
408 }
409}
410
411extern unsigned int lvl14_resolution;
412
413static void __init smp_setup_percpu_timer(void)
414{
415 int cpu = smp_processor_id();
416
417 prof_counter(cpu) = prof_multiplier(cpu) = 1;
418 load_profile_irq(cpu, lvl14_resolution);
419
420 if(cpu == boot_cpu_id)
421 enable_pil_irq(14);
422}
423
424void __init smp4m_blackbox_id(unsigned *addr)
425{
426 int rd = *addr & 0x3e000000;
427 int rs1 = rd >> 11;
428
429 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
430 addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
431 addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */
432}
433
434void __init smp4m_blackbox_current(unsigned *addr)
435{
436 int rd = *addr & 0x3e000000;
437 int rs1 = rd >> 11;
438
439 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
440 addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
441 addr[4] = 0x8008200c | rd | rs1; /* and reg, 3, reg */
442}
443
444void __init sun4m_init_smp(void)
445{
446 BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
447 BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
448 BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
449 BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
450 BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
451}
diff --git a/arch/sparc/kernel/sun4setup.c b/arch/sparc/kernel/sun4setup.c
new file mode 100644
index 000000000000..229a52f55f16
--- /dev/null
+++ b/arch/sparc/kernel/sun4setup.c
@@ -0,0 +1,75 @@
1/* sun4setup.c: Setup the hardware address of various items in the sun4
2 * architecture. Called from idprom_init
3 *
4 * Copyright (C) 1998 Chris G. Davis (cdavis@cois.on.ca)
5 */
6
7#include <asm/page.h>
8#include <asm/oplib.h>
9#include <asm/idprom.h>
10#include <asm/sun4paddr.h>
11#include <asm/machines.h>
12
13int sun4_memreg_physaddr;
14int sun4_ie_physaddr;
15int sun4_clock_physaddr;
16int sun4_timer_physaddr;
17int sun4_eth_physaddr;
18int sun4_si_physaddr;
19int sun4_bwtwo_physaddr;
20int sun4_zs0_physaddr;
21int sun4_zs1_physaddr;
22int sun4_dma_physaddr;
23int sun4_esp_physaddr;
24int sun4_ie_physaddr;
25
26void __init sun4setup(void)
27{
28 printk("Sun4 Hardware Setup v1.0 18/May/98 Chris Davis (cdavis@cois.on.ca). ");
29 /*
30 setup standard sun4 info
31 */
32 sun4_ie_physaddr=SUN4_IE_PHYSADDR;
33
34 /*
35 setup model specific info
36 */
37 switch(idprom->id_machtype) {
38 case (SM_SUN4 | SM_4_260 ):
39 printk("Setup for a SUN4/260\n");
40 sun4_memreg_physaddr=SUN4_200_MEMREG_PHYSADDR;
41 sun4_clock_physaddr=SUN4_200_CLOCK_PHYSADDR;
42 sun4_timer_physaddr=SUN4_UNUSED_PHYSADDR;
43 sun4_eth_physaddr=SUN4_200_ETH_PHYSADDR;
44 sun4_si_physaddr=SUN4_200_SI_PHYSADDR;
45 sun4_bwtwo_physaddr=SUN4_200_BWTWO_PHYSADDR;
46 sun4_dma_physaddr=SUN4_UNUSED_PHYSADDR;
47 sun4_esp_physaddr=SUN4_UNUSED_PHYSADDR;
48 break;
49 case (SM_SUN4 | SM_4_330 ):
50 printk("Setup for a SUN4/330\n");
51 sun4_memreg_physaddr=SUN4_300_MEMREG_PHYSADDR;
52 sun4_clock_physaddr=SUN4_300_CLOCK_PHYSADDR;
53 sun4_timer_physaddr=SUN4_300_TIMER_PHYSADDR;
54 sun4_eth_physaddr=SUN4_300_ETH_PHYSADDR;
55 sun4_si_physaddr=SUN4_UNUSED_PHYSADDR;
56 sun4_bwtwo_physaddr=SUN4_300_BWTWO_PHYSADDR;
57 sun4_dma_physaddr=SUN4_300_DMA_PHYSADDR;
58 sun4_esp_physaddr=SUN4_300_ESP_PHYSADDR;
59 break;
60 case (SM_SUN4 | SM_4_470 ):
61 printk("Setup for a SUN4/470\n");
62 sun4_memreg_physaddr=SUN4_400_MEMREG_PHYSADDR;
63 sun4_clock_physaddr=SUN4_400_CLOCK_PHYSADDR;
64 sun4_timer_physaddr=SUN4_400_TIMER_PHYSADDR;
65 sun4_eth_physaddr=SUN4_400_ETH_PHYSADDR;
66 sun4_si_physaddr=SUN4_UNUSED_PHYSADDR;
67 sun4_bwtwo_physaddr=SUN4_400_BWTWO_PHYSADDR;
68 sun4_dma_physaddr=SUN4_400_DMA_PHYSADDR;
69 sun4_esp_physaddr=SUN4_400_ESP_PHYSADDR;
70 break;
71 default:
72 ;
73 }
74}
75
diff --git a/arch/sparc/kernel/sunos_asm.S b/arch/sparc/kernel/sunos_asm.S
new file mode 100644
index 000000000000..07fe86014fb5
--- /dev/null
+++ b/arch/sparc/kernel/sunos_asm.S
@@ -0,0 +1,67 @@
1/* $Id: sunos_asm.S,v 1.15 2000/01/11 17:33:21 jj Exp $
2 * sunos_asm.S: SunOS system calls which must have a low-level
3 * entry point to operate correctly.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 *
7 * Based upon preliminary work which is:
8 *
9 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
10 */
11
12#include <asm/ptrace.h>
13
14 .text
15 .align 4
16
17 /* When calling ret_sys_call, %o0 should contain the same
18 * value as in [%sp + STACKFRAME_SZ + PT_I0] */
19
20 /* SunOS getpid() returns pid in %o0 and ppid in %o1 */
21 .globl sunos_getpid
22sunos_getpid:
23 call sys_getppid
24 nop
25
26 call sys_getpid
27 st %o0, [%sp + STACKFRAME_SZ + PT_I1]
28
29 b ret_sys_call
30 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
31
32 /* SunOS getuid() returns uid in %o0 and euid in %o1 */
33 .globl sunos_getuid
34sunos_getuid:
35 call sys_geteuid16
36 nop
37
38 call sys_getuid16
39 st %o0, [%sp + STACKFRAME_SZ + PT_I1]
40
41 b ret_sys_call
42 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
43
44 /* SunOS getgid() returns gid in %o0 and egid in %o1 */
45 .globl sunos_getgid
46sunos_getgid:
47 call sys_getegid16
48 nop
49
50 call sys_getgid16
51 st %o0, [%sp + STACKFRAME_SZ + PT_I1]
52
53 b ret_sys_call
54 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
55
56 /* SunOS's execv() call only specifies the argv argument, the
57 * environment settings are the same as the calling processes.
58 */
59 .globl sunos_execv
60sunos_execv:
61 st %g0, [%sp + STACKFRAME_SZ + PT_I2]
62
63 call sparc_execve
64 add %sp, STACKFRAME_SZ, %o0
65
66 b ret_sys_call
67 ld [%sp + STACKFRAME_SZ + PT_I0], %o0
diff --git a/arch/sparc/kernel/sunos_ioctl.c b/arch/sparc/kernel/sunos_ioctl.c
new file mode 100644
index 000000000000..df1c0b31a930
--- /dev/null
+++ b/arch/sparc/kernel/sunos_ioctl.c
@@ -0,0 +1,231 @@
1/* $Id: sunos_ioctl.c,v 1.34 2000/09/03 14:10:56 anton Exp $
2 * sunos_ioctl.c: The Linux Operating system: SunOS ioctl compatibility.
3 *
4 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <asm/uaccess.h>
9
10#include <linux/sched.h>
11#include <linux/errno.h>
12#include <linux/string.h>
13#include <linux/termios.h>
14#include <linux/ioctl.h>
15#include <linux/route.h>
16#include <linux/sockios.h>
17#include <linux/if.h>
18#include <linux/netdevice.h>
19#include <linux/if_arp.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/syscalls.h>
25#include <linux/file.h>
26#include <asm/kbio.h>
27
28#if 0
29extern char sunkbd_type;
30extern char sunkbd_layout;
31#endif
32
33/* NR_OPEN is now larger and dynamic in recent kernels. */
34#define SUNOS_NR_OPEN 256
35
36asmlinkage int sunos_ioctl (int fd, unsigned long cmd, unsigned long arg)
37{
38 int ret = -EBADF;
39
40 if (fd >= SUNOS_NR_OPEN || !fcheck(fd))
41 goto out;
42
43 /* First handle an easy compat. case for tty ldisc. */
44 if (cmd == TIOCSETD) {
45 int __user *p;
46 int ntty = N_TTY, tmp;
47 mm_segment_t oldfs;
48
49 p = (int __user *) arg;
50 ret = -EFAULT;
51 if (get_user(tmp, p))
52 goto out;
53 if (tmp == 2) {
54 oldfs = get_fs();
55 set_fs(KERNEL_DS);
56 ret = sys_ioctl(fd, cmd, (unsigned long) &ntty);
57 set_fs(oldfs);
58 ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
59 goto out;
60 }
61 }
62
63 /* Binary compatibility is good American knowhow fuckin' up. */
64 if (cmd == TIOCNOTTY) {
65 ret = sys_setsid();
66 goto out;
67 }
68
69 /* SunOS networking ioctls. */
70 switch (cmd) {
71 case _IOW('r', 10, struct rtentry):
72 ret = sys_ioctl(fd, SIOCADDRT, arg);
73 goto out;
74 case _IOW('r', 11, struct rtentry):
75 ret = sys_ioctl(fd, SIOCDELRT, arg);
76 goto out;
77 case _IOW('i', 12, struct ifreq):
78 ret = sys_ioctl(fd, SIOCSIFADDR, arg);
79 goto out;
80 case _IOWR('i', 13, struct ifreq):
81 ret = sys_ioctl(fd, SIOCGIFADDR, arg);
82 goto out;
83 case _IOW('i', 14, struct ifreq):
84 ret = sys_ioctl(fd, SIOCSIFDSTADDR, arg);
85 goto out;
86 case _IOWR('i', 15, struct ifreq):
87 ret = sys_ioctl(fd, SIOCGIFDSTADDR, arg);
88 goto out;
89 case _IOW('i', 16, struct ifreq):
90 ret = sys_ioctl(fd, SIOCSIFFLAGS, arg);
91 goto out;
92 case _IOWR('i', 17, struct ifreq):
93 ret = sys_ioctl(fd, SIOCGIFFLAGS, arg);
94 goto out;
95 case _IOW('i', 18, struct ifreq):
96 ret = sys_ioctl(fd, SIOCSIFMEM, arg);
97 goto out;
98 case _IOWR('i', 19, struct ifreq):
99 ret = sys_ioctl(fd, SIOCGIFMEM, arg);
100 goto out;
101 case _IOWR('i', 20, struct ifconf):
102 ret = sys_ioctl(fd, SIOCGIFCONF, arg);
103 goto out;
104 case _IOW('i', 21, struct ifreq): /* SIOCSIFMTU */
105 ret = sys_ioctl(fd, SIOCSIFMTU, arg);
106 goto out;
107 case _IOWR('i', 22, struct ifreq): /* SIOCGIFMTU */
108 ret = sys_ioctl(fd, SIOCGIFMTU, arg);
109 goto out;
110
111 case _IOWR('i', 23, struct ifreq):
112 ret = sys_ioctl(fd, SIOCGIFBRDADDR, arg);
113 goto out;
114 case _IOW('i', 24, struct ifreq):
115 ret = sys_ioctl(fd, SIOCSIFBRDADDR, arg);
116 goto out;
117 case _IOWR('i', 25, struct ifreq):
118 ret = sys_ioctl(fd, SIOCGIFNETMASK, arg);
119 goto out;
120 case _IOW('i', 26, struct ifreq):
121 ret = sys_ioctl(fd, SIOCSIFNETMASK, arg);
122 goto out;
123 case _IOWR('i', 27, struct ifreq):
124 ret = sys_ioctl(fd, SIOCGIFMETRIC, arg);
125 goto out;
126 case _IOW('i', 28, struct ifreq):
127 ret = sys_ioctl(fd, SIOCSIFMETRIC, arg);
128 goto out;
129
130 case _IOW('i', 30, struct arpreq):
131 ret = sys_ioctl(fd, SIOCSARP, arg);
132 goto out;
133 case _IOWR('i', 31, struct arpreq):
134 ret = sys_ioctl(fd, SIOCGARP, arg);
135 goto out;
136 case _IOW('i', 32, struct arpreq):
137 ret = sys_ioctl(fd, SIOCDARP, arg);
138 goto out;
139
140 case _IOW('i', 40, struct ifreq): /* SIOCUPPER */
141 case _IOW('i', 41, struct ifreq): /* SIOCLOWER */
142 case _IOW('i', 44, struct ifreq): /* SIOCSETSYNC */
143 case _IOW('i', 45, struct ifreq): /* SIOCGETSYNC */
144 case _IOW('i', 46, struct ifreq): /* SIOCSSDSTATS */
145 case _IOW('i', 47, struct ifreq): /* SIOCSSESTATS */
146 case _IOW('i', 48, struct ifreq): /* SIOCSPROMISC */
147 ret = -EOPNOTSUPP;
148 goto out;
149
150 case _IOW('i', 49, struct ifreq):
151 ret = sys_ioctl(fd, SIOCADDMULTI, arg);
152 goto out;
153 case _IOW('i', 50, struct ifreq):
154 ret = sys_ioctl(fd, SIOCDELMULTI, arg);
155 goto out;
156
157 /* FDDI interface ioctls, unsupported. */
158
159 case _IOW('i', 51, struct ifreq): /* SIOCFDRESET */
160 case _IOW('i', 52, struct ifreq): /* SIOCFDSLEEP */
161 case _IOW('i', 53, struct ifreq): /* SIOCSTRTFMWAR */
162 case _IOW('i', 54, struct ifreq): /* SIOCLDNSTRTFW */
163 case _IOW('i', 55, struct ifreq): /* SIOCGETFDSTAT */
164 case _IOW('i', 56, struct ifreq): /* SIOCFDNMIINT */
165 case _IOW('i', 57, struct ifreq): /* SIOCFDEXUSER */
166 case _IOW('i', 58, struct ifreq): /* SIOCFDGNETMAP */
167 case _IOW('i', 59, struct ifreq): /* SIOCFDGIOCTL */
168 printk("FDDI ioctl, returning EOPNOTSUPP\n");
169 ret = -EOPNOTSUPP;
170 goto out;
171
172 case _IOW('t', 125, int):
173 /* More stupid tty sunos ioctls, just
174 * say it worked.
175 */
176 ret = 0;
177 goto out;
178 /* Non posix grp */
179 case _IOW('t', 118, int): {
180 int oldval, newval, __user *ptr;
181
182 cmd = TIOCSPGRP;
183 ptr = (int __user *) arg;
184 ret = -EFAULT;
185 if (get_user(oldval, ptr))
186 goto out;
187 ret = sys_ioctl(fd, cmd, arg);
188 __get_user(newval, ptr);
189 if (newval == -1) {
190 __put_user(oldval, ptr);
191 ret = -EIO;
192 }
193 if (ret == -ENOTTY)
194 ret = -EIO;
195 goto out;
196 }
197
198 case _IOR('t', 119, int): {
199 int oldval, newval, __user *ptr;
200
201 cmd = TIOCGPGRP;
202 ptr = (int __user *) arg;
203 ret = -EFAULT;
204 if (get_user(oldval, ptr))
205 goto out;
206 ret = sys_ioctl(fd, cmd, arg);
207 __get_user(newval, ptr);
208 if (newval == -1) {
209 __put_user(oldval, ptr);
210 ret = -EIO;
211 }
212 if (ret == -ENOTTY)
213 ret = -EIO;
214 goto out;
215 }
216 }
217
218#if 0
219 if ((cmd & 0xff00) == ('k' << 8)) {
220 printk ("[[KBIO: %8.8x\n", (unsigned int) cmd);
221 }
222#endif
223
224 ret = sys_ioctl(fd, cmd, arg);
225 /* so stupid... */
226 ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
227out:
228 return ret;
229}
230
231
diff --git a/arch/sparc/kernel/sys_solaris.c b/arch/sparc/kernel/sys_solaris.c
new file mode 100644
index 000000000000..fb7578554c78
--- /dev/null
+++ b/arch/sparc/kernel/sys_solaris.c
@@ -0,0 +1,37 @@
1/*
2 * linux/arch/sparc/sys_solaris.c
3 *
4 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
5 */
6
7#include <linux/config.h>
8#include <linux/sched.h>
9#include <linux/kernel.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/personality.h>
13#include <linux/ptrace.h>
14#include <linux/mm.h>
15#include <linux/smp.h>
16#include <linux/smp_lock.h>
17#include <linux/module.h>
18
19asmlinkage int
20do_solaris_syscall (struct pt_regs *regs)
21{
22 static int cnt = 0;
23 if (++cnt < 10) printk ("No solaris handler\n");
24 force_sig(SIGSEGV, current);
25 return 0;
26}
27
28#ifndef CONFIG_SUNOS_EMUL
29asmlinkage int
30do_sunos_syscall (struct pt_regs *regs)
31{
32 static int cnt = 0;
33 if (++cnt < 10) printk ("SunOS binary emulation not compiled in\n");
34 force_sig (SIGSEGV, current);
35 return 0;
36}
37#endif
diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c
new file mode 100644
index 000000000000..0cdfc9d294b4
--- /dev/null
+++ b/arch/sparc/kernel/sys_sparc.c
@@ -0,0 +1,485 @@
1/* $Id: sys_sparc.c,v 1.70 2001/04/14 01:12:02 davem Exp $
2 * linux/arch/sparc/kernel/sys_sparc.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
6 * platform.
7 */
8
9#include <linux/errno.h>
10#include <linux/types.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/file.h>
15#include <linux/sem.h>
16#include <linux/msg.h>
17#include <linux/shm.h>
18#include <linux/stat.h>
19#include <linux/syscalls.h>
20#include <linux/mman.h>
21#include <linux/utsname.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24
25#include <asm/uaccess.h>
26#include <asm/ipc.h>
27
28/* #define DEBUG_UNIMP_SYSCALL */
29
30/* XXX Make this per-binary type, this way we can detect the type of
31 * XXX a binary. Every Sparc executable calls this very early on.
32 */
33asmlinkage unsigned long sys_getpagesize(void)
34{
35 return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
36}
37
38#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
39
40unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
41{
42 struct vm_area_struct * vmm;
43
44 if (flags & MAP_FIXED) {
45 /* We do not accept a shared mapping if it would violate
46 * cache aliasing constraints.
47 */
48 if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
49 return -EINVAL;
50 return addr;
51 }
52
53 /* See asm-sparc/uaccess.h */
54 if (len > TASK_SIZE - PAGE_SIZE)
55 return -ENOMEM;
56 if (ARCH_SUN4C_SUN4 && len > 0x20000000)
57 return -ENOMEM;
58 if (!addr)
59 addr = TASK_UNMAPPED_BASE;
60
61 if (flags & MAP_SHARED)
62 addr = COLOUR_ALIGN(addr);
63 else
64 addr = PAGE_ALIGN(addr);
65
66 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
67 /* At this point: (!vmm || addr < vmm->vm_end). */
68 if (ARCH_SUN4C_SUN4 && addr < 0xe0000000 && 0x20000000 - len < addr) {
69 addr = PAGE_OFFSET;
70 vmm = find_vma(current->mm, PAGE_OFFSET);
71 }
72 if (TASK_SIZE - PAGE_SIZE - len < addr)
73 return -ENOMEM;
74 if (!vmm || addr + len <= vmm->vm_start)
75 return addr;
76 addr = vmm->vm_end;
77 if (flags & MAP_SHARED)
78 addr = COLOUR_ALIGN(addr);
79 }
80}
81
82asmlinkage unsigned long sparc_brk(unsigned long brk)
83{
84 if(ARCH_SUN4C_SUN4) {
85 if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000))
86 return current->mm->brk;
87 }
88 return sys_brk(brk);
89}
90
91/*
92 * sys_pipe() is the normal C calling standard for creating
93 * a pipe. It's not the way unix traditionally does this, though.
94 */
95asmlinkage int sparc_pipe(struct pt_regs *regs)
96{
97 int fd[2];
98 int error;
99
100 error = do_pipe(fd);
101 if (error)
102 goto out;
103 regs->u_regs[UREG_I1] = fd[1];
104 error = fd[0];
105out:
106 return error;
107}
108
109/*
110 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
111 *
112 * This is really horribly ugly.
113 */
114
115asmlinkage int sys_ipc (uint call, int first, int second, int third, void __user *ptr, long fifth)
116{
117 int version, err;
118
119 version = call >> 16; /* hack for backward compatibility */
120 call &= 0xffff;
121
122 if (call <= SEMCTL)
123 switch (call) {
124 case SEMOP:
125 err = sys_semtimedop (first, (struct sembuf __user *)ptr, second, NULL);
126 goto out;
127 case SEMTIMEDOP:
128 err = sys_semtimedop (first, (struct sembuf __user *)ptr, second, (const struct timespec __user *) fifth);
129 goto out;
130 case SEMGET:
131 err = sys_semget (first, second, third);
132 goto out;
133 case SEMCTL: {
134 union semun fourth;
135 err = -EINVAL;
136 if (!ptr)
137 goto out;
138 err = -EFAULT;
139 if (get_user(fourth.__pad,
140 (void __user * __user *)ptr))
141 goto out;
142 err = sys_semctl (first, second, third, fourth);
143 goto out;
144 }
145 default:
146 err = -ENOSYS;
147 goto out;
148 }
149 if (call <= MSGCTL)
150 switch (call) {
151 case MSGSND:
152 err = sys_msgsnd (first, (struct msgbuf __user *) ptr,
153 second, third);
154 goto out;
155 case MSGRCV:
156 switch (version) {
157 case 0: {
158 struct ipc_kludge tmp;
159 err = -EINVAL;
160 if (!ptr)
161 goto out;
162 err = -EFAULT;
163 if (copy_from_user(&tmp, (struct ipc_kludge __user *) ptr, sizeof (tmp)))
164 goto out;
165 err = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, third);
166 goto out;
167 }
168 case 1: default:
169 err = sys_msgrcv (first,
170 (struct msgbuf __user *) ptr,
171 second, fifth, third);
172 goto out;
173 }
174 case MSGGET:
175 err = sys_msgget ((key_t) first, second);
176 goto out;
177 case MSGCTL:
178 err = sys_msgctl (first, second, (struct msqid_ds __user *) ptr);
179 goto out;
180 default:
181 err = -ENOSYS;
182 goto out;
183 }
184 if (call <= SHMCTL)
185 switch (call) {
186 case SHMAT:
187 switch (version) {
188 case 0: default: {
189 ulong raddr;
190 err = do_shmat (first, (char __user *) ptr, second, &raddr);
191 if (err)
192 goto out;
193 err = -EFAULT;
194 if (put_user (raddr, (ulong __user *) third))
195 goto out;
196 err = 0;
197 goto out;
198 }
199 case 1: /* iBCS2 emulator entry point */
200 err = -EINVAL;
201 goto out;
202 }
203 case SHMDT:
204 err = sys_shmdt ((char __user *)ptr);
205 goto out;
206 case SHMGET:
207 err = sys_shmget (first, second, third);
208 goto out;
209 case SHMCTL:
210 err = sys_shmctl (first, second, (struct shmid_ds __user *) ptr);
211 goto out;
212 default:
213 err = -ENOSYS;
214 goto out;
215 }
216 else
217 err = -ENOSYS;
218out:
219 return err;
220}
221
222/* Linux version of mmap */
223static unsigned long do_mmap2(unsigned long addr, unsigned long len,
224 unsigned long prot, unsigned long flags, unsigned long fd,
225 unsigned long pgoff)
226{
227 struct file * file = NULL;
228 unsigned long retval = -EBADF;
229
230 if (!(flags & MAP_ANONYMOUS)) {
231 file = fget(fd);
232 if (!file)
233 goto out;
234 }
235
236 retval = -EINVAL;
237 len = PAGE_ALIGN(len);
238 if (ARCH_SUN4C_SUN4 &&
239 (len > 0x20000000 ||
240 ((flags & MAP_FIXED) &&
241 addr < 0xe0000000 && addr + len > 0x20000000)))
242 goto out_putf;
243
244 /* See asm-sparc/uaccess.h */
245 if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
246 goto out_putf;
247
248 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
249
250 down_write(&current->mm->mmap_sem);
251 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
252 up_write(&current->mm->mmap_sem);
253
254out_putf:
255 if (file)
256 fput(file);
257out:
258 return retval;
259}
260
261asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
262 unsigned long prot, unsigned long flags, unsigned long fd,
263 unsigned long pgoff)
264{
265 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
266 we have. */
267 return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
268}
269
270asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
271 unsigned long prot, unsigned long flags, unsigned long fd,
272 unsigned long off)
273{
274 return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
275}
276
277long sparc_remap_file_pages(unsigned long start, unsigned long size,
278 unsigned long prot, unsigned long pgoff,
279 unsigned long flags)
280{
281 /* This works on an existing mmap so we don't need to validate
282 * the range as that was done at the original mmap call.
283 */
284 return sys_remap_file_pages(start, size, prot,
285 (pgoff >> (PAGE_SHIFT - 12)), flags);
286}
287
288extern unsigned long do_mremap(unsigned long addr,
289 unsigned long old_len, unsigned long new_len,
290 unsigned long flags, unsigned long new_addr);
291
292asmlinkage unsigned long sparc_mremap(unsigned long addr,
293 unsigned long old_len, unsigned long new_len,
294 unsigned long flags, unsigned long new_addr)
295{
296 struct vm_area_struct *vma;
297 unsigned long ret = -EINVAL;
298 if (ARCH_SUN4C_SUN4) {
299 if (old_len > 0x20000000 || new_len > 0x20000000)
300 goto out;
301 if (addr < 0xe0000000 && addr + old_len > 0x20000000)
302 goto out;
303 }
304 if (old_len > TASK_SIZE - PAGE_SIZE ||
305 new_len > TASK_SIZE - PAGE_SIZE)
306 goto out;
307 down_write(&current->mm->mmap_sem);
308 if (flags & MREMAP_FIXED) {
309 if (ARCH_SUN4C_SUN4 &&
310 new_addr < 0xe0000000 &&
311 new_addr + new_len > 0x20000000)
312 goto out_sem;
313 if (new_addr + new_len > TASK_SIZE - PAGE_SIZE)
314 goto out_sem;
315 } else if ((ARCH_SUN4C_SUN4 && addr < 0xe0000000 &&
316 addr + new_len > 0x20000000) ||
317 addr + new_len > TASK_SIZE - PAGE_SIZE) {
318 unsigned long map_flags = 0;
319 struct file *file = NULL;
320
321 ret = -ENOMEM;
322 if (!(flags & MREMAP_MAYMOVE))
323 goto out_sem;
324
325 vma = find_vma(current->mm, addr);
326 if (vma) {
327 if (vma->vm_flags & VM_SHARED)
328 map_flags |= MAP_SHARED;
329 file = vma->vm_file;
330 }
331
332 new_addr = get_unmapped_area(file, addr, new_len,
333 vma ? vma->vm_pgoff : 0,
334 map_flags);
335 ret = new_addr;
336 if (new_addr & ~PAGE_MASK)
337 goto out_sem;
338 flags |= MREMAP_FIXED;
339 }
340 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
341out_sem:
342 up_write(&current->mm->mmap_sem);
343out:
344 return ret;
345}
346
347/* we come to here via sys_nis_syscall so it can setup the regs argument */
348asmlinkage unsigned long
349c_sys_nis_syscall (struct pt_regs *regs)
350{
351 static int count = 0;
352
353 if (count++ > 5)
354 return -ENOSYS;
355 printk ("%s[%d]: Unimplemented SPARC system call %d\n",
356 current->comm, current->pid, (int)regs->u_regs[1]);
357#ifdef DEBUG_UNIMP_SYSCALL
358 show_regs (regs);
359#endif
360 return -ENOSYS;
361}
362
363/* #define DEBUG_SPARC_BREAKPOINT */
364
365asmlinkage void
366sparc_breakpoint (struct pt_regs *regs)
367{
368 siginfo_t info;
369
370 lock_kernel();
371#ifdef DEBUG_SPARC_BREAKPOINT
372 printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
373#endif
374 info.si_signo = SIGTRAP;
375 info.si_errno = 0;
376 info.si_code = TRAP_BRKPT;
377 info.si_addr = (void __user *)regs->pc;
378 info.si_trapno = 0;
379 force_sig_info(SIGTRAP, &info, current);
380
381#ifdef DEBUG_SPARC_BREAKPOINT
382 printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
383#endif
384 unlock_kernel();
385}
386
387asmlinkage int
388sparc_sigaction (int sig, const struct old_sigaction __user *act,
389 struct old_sigaction __user *oact)
390{
391 struct k_sigaction new_ka, old_ka;
392 int ret;
393
394 if (sig < 0) {
395 current->thread.new_signal = 1;
396 sig = -sig;
397 }
398
399 if (act) {
400 unsigned long mask;
401
402 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
403 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
404 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
405 return -EFAULT;
406 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
407 __get_user(mask, &act->sa_mask);
408 siginitset(&new_ka.sa.sa_mask, mask);
409 new_ka.ka_restorer = NULL;
410 }
411
412 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
413
414 if (!ret && oact) {
415 /* In the clone() case we could copy half consistent
416 * state to the user, however this could sleep and
417 * deadlock us if we held the signal lock on SMP. So for
418 * now I take the easy way out and do no locking.
419 */
420 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
421 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
422 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
423 return -EFAULT;
424 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
425 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
426 }
427
428 return ret;
429}
430
431asmlinkage long
432sys_rt_sigaction(int sig,
433 const struct sigaction __user *act,
434 struct sigaction __user *oact,
435 void __user *restorer,
436 size_t sigsetsize)
437{
438 struct k_sigaction new_ka, old_ka;
439 int ret;
440
441 /* XXX: Don't preclude handling different sized sigset_t's. */
442 if (sigsetsize != sizeof(sigset_t))
443 return -EINVAL;
444
445 /* All tasks which use RT signals (effectively) use
446 * new style signals.
447 */
448 current->thread.new_signal = 1;
449
450 if (act) {
451 new_ka.ka_restorer = restorer;
452 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
453 return -EFAULT;
454 }
455
456 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
457
458 if (!ret && oact) {
459 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
460 return -EFAULT;
461 }
462
463 return ret;
464}
465
466asmlinkage int sys_getdomainname(char __user *name, int len)
467{
468 int nlen;
469 int err = -EFAULT;
470
471 down_read(&uts_sem);
472
473 nlen = strlen(system_utsname.domainname) + 1;
474
475 if (nlen < len)
476 len = nlen;
477 if (len > __NEW_UTS_LEN)
478 goto done;
479 if (copy_to_user(name, system_utsname.domainname, len))
480 goto done;
481 err = 0;
482done:
483 up_read(&uts_sem);
484 return err;
485}
diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c
new file mode 100644
index 000000000000..81c894acd0db
--- /dev/null
+++ b/arch/sparc/kernel/sys_sunos.c
@@ -0,0 +1,1194 @@
1/* $Id: sys_sunos.c,v 1.137 2002/02/08 03:57:14 davem Exp $
2 * sys_sunos.c: SunOS specific syscall compatibility support.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 *
7 * Based upon preliminary work which is:
8 *
9 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
10 *
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/types.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/resource.h>
22#include <linux/ipc.h>
23#include <linux/shm.h>
24#include <linux/msg.h>
25#include <linux/sem.h>
26#include <linux/signal.h>
27#include <linux/uio.h>
28#include <linux/utsname.h>
29#include <linux/major.h>
30#include <linux/stat.h>
31#include <linux/slab.h>
32#include <linux/pagemap.h>
33#include <linux/errno.h>
34#include <linux/smp.h>
35#include <linux/smp_lock.h>
36#include <linux/syscalls.h>
37
38#include <net/sock.h>
39
40#include <asm/uaccess.h>
41#ifndef KERNEL_DS
42#include <linux/segment.h>
43#endif
44
45#include <asm/page.h>
46#include <asm/pgtable.h>
47#include <asm/pconf.h>
48#include <asm/idprom.h> /* for gethostid() */
49#include <asm/unistd.h>
50#include <asm/system.h>
51
52/* For the nfs mount emulation */
53#include <linux/socket.h>
54#include <linux/in.h>
55#include <linux/nfs.h>
56#include <linux/nfs2.h>
57#include <linux/nfs_mount.h>
58
59/* for sunos_select */
60#include <linux/time.h>
61#include <linux/personality.h>
62
63/* NR_OPEN is now larger and dynamic in recent kernels. */
64#define SUNOS_NR_OPEN 256
65
66/* We use the SunOS mmap() semantics. */
67asmlinkage unsigned long sunos_mmap(unsigned long addr, unsigned long len,
68 unsigned long prot, unsigned long flags,
69 unsigned long fd, unsigned long off)
70{
71 struct file * file = NULL;
72 unsigned long retval, ret_type;
73
74 if (flags & MAP_NORESERVE) {
75 static int cnt;
76 if (cnt++ < 10)
77 printk("%s: unimplemented SunOS MAP_NORESERVE mmap() flag\n",
78 current->comm);
79 flags &= ~MAP_NORESERVE;
80 }
81 retval = -EBADF;
82 if (!(flags & MAP_ANONYMOUS)) {
83 if (fd >= SUNOS_NR_OPEN)
84 goto out;
85 file = fget(fd);
86 if (!file)
87 goto out;
88 }
89
90 retval = -EINVAL;
91 /* If this is ld.so or a shared library doing an mmap
92 * of /dev/zero, transform it into an anonymous mapping.
93 * SunOS is so stupid some times... hmph!
94 */
95 if (file) {
96 if (imajor(file->f_dentry->d_inode) == MEM_MAJOR &&
97 iminor(file->f_dentry->d_inode) == 5) {
98 flags |= MAP_ANONYMOUS;
99 fput(file);
100 file = NULL;
101 }
102 }
103 ret_type = flags & _MAP_NEW;
104 flags &= ~_MAP_NEW;
105
106 if (!(flags & MAP_FIXED))
107 addr = 0;
108 else {
109 if (ARCH_SUN4C_SUN4 &&
110 (len > 0x20000000 ||
111 ((flags & MAP_FIXED) &&
112 addr < 0xe0000000 && addr + len > 0x20000000)))
113 goto out_putf;
114
115 /* See asm-sparc/uaccess.h */
116 if (len > TASK_SIZE - PAGE_SIZE ||
117 addr + len > TASK_SIZE - PAGE_SIZE)
118 goto out_putf;
119 }
120
121 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
122 down_write(&current->mm->mmap_sem);
123 retval = do_mmap(file, addr, len, prot, flags, off);
124 up_write(&current->mm->mmap_sem);
125 if (!ret_type)
126 retval = ((retval < PAGE_OFFSET) ? 0 : retval);
127
128out_putf:
129 if (file)
130 fput(file);
131out:
132 return retval;
133}
134
135/* lmbench calls this, just say "yeah, ok" */
136asmlinkage int sunos_mctl(unsigned long addr, unsigned long len, int function, char *arg)
137{
138 return 0;
139}
140
141/* SunOS is completely broken... it returns 0 on success, otherwise
142 * ENOMEM. For sys_sbrk() it wants the old brk value as a return
143 * on success and ENOMEM as before on failure.
144 */
145asmlinkage int sunos_brk(unsigned long brk)
146{
147 int freepages, retval = -ENOMEM;
148 unsigned long rlim;
149 unsigned long newbrk, oldbrk;
150
151 down_write(&current->mm->mmap_sem);
152 if (ARCH_SUN4C_SUN4) {
153 if (brk >= 0x20000000 && brk < 0xe0000000) {
154 goto out;
155 }
156 }
157
158 if (brk < current->mm->end_code)
159 goto out;
160
161 newbrk = PAGE_ALIGN(brk);
162 oldbrk = PAGE_ALIGN(current->mm->brk);
163 retval = 0;
164 if (oldbrk == newbrk) {
165 current->mm->brk = brk;
166 goto out;
167 }
168
169 /*
170 * Always allow shrinking brk
171 */
172 if (brk <= current->mm->brk) {
173 current->mm->brk = brk;
174 do_munmap(current->mm, newbrk, oldbrk-newbrk);
175 goto out;
176 }
177 /*
178 * Check against rlimit and stack..
179 */
180 retval = -ENOMEM;
181 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
182 if (rlim >= RLIM_INFINITY)
183 rlim = ~0;
184 if (brk - current->mm->end_code > rlim)
185 goto out;
186
187 /*
188 * Check against existing mmap mappings.
189 */
190 if (find_vma_intersection(current->mm, oldbrk, newbrk+PAGE_SIZE))
191 goto out;
192
193 /*
194 * stupid algorithm to decide if we have enough memory: while
195 * simple, it hopefully works in most obvious cases.. Easy to
196 * fool it, but this should catch most mistakes.
197 */
198 freepages = get_page_cache_size();
199 freepages >>= 1;
200 freepages += nr_free_pages();
201 freepages += nr_swap_pages;
202 freepages -= num_physpages >> 4;
203 freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
204 if (freepages < 0)
205 goto out;
206 /*
207 * Ok, we have probably got enough memory - let it rip.
208 */
209 current->mm->brk = brk;
210 do_brk(oldbrk, newbrk-oldbrk);
211 retval = 0;
212out:
213 up_write(&current->mm->mmap_sem);
214 return retval;
215}
216
217asmlinkage unsigned long sunos_sbrk(int increment)
218{
219 int error;
220 unsigned long oldbrk;
221
222 /* This should do it hopefully... */
223 lock_kernel();
224 oldbrk = current->mm->brk;
225 error = sunos_brk(((int) current->mm->brk) + increment);
226 if (!error)
227 error = oldbrk;
228 unlock_kernel();
229 return error;
230}
231
232/* XXX Completely undocumented, and completely magic...
233 * XXX I believe it is to increase the size of the stack by
234 * XXX argument 'increment' and return the new end of stack
235 * XXX area. Wheee...
236 */
237asmlinkage unsigned long sunos_sstk(int increment)
238{
239 lock_kernel();
240 printk("%s: Call to sunos_sstk(increment<%d>) is unsupported\n",
241 current->comm, increment);
242 unlock_kernel();
243 return -1;
244}
245
246/* Give hints to the kernel as to what paging strategy to use...
247 * Completely bogus, don't remind me.
248 */
249#define VA_NORMAL 0 /* Normal vm usage expected */
250#define VA_ABNORMAL 1 /* Abnormal/random vm usage probable */
251#define VA_SEQUENTIAL 2 /* Accesses will be of a sequential nature */
252#define VA_INVALIDATE 3 /* Page table entries should be flushed ??? */
253static char *vstrings[] = {
254 "VA_NORMAL",
255 "VA_ABNORMAL",
256 "VA_SEQUENTIAL",
257 "VA_INVALIDATE",
258};
259
260asmlinkage void sunos_vadvise(unsigned long strategy)
261{
262 /* I wanna see who uses this... */
263 lock_kernel();
264 printk("%s: Advises us to use %s paging strategy\n",
265 current->comm,
266 strategy <= 3 ? vstrings[strategy] : "BOGUS");
267 unlock_kernel();
268}
269
270/* This just wants the soft limit (ie. rlim_cur element) of the RLIMIT_NOFILE
271 * resource limit and is for backwards compatibility with older sunos
272 * revs.
273 */
274asmlinkage long sunos_getdtablesize(void)
275{
276 return SUNOS_NR_OPEN;
277}
278
279#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
280
281asmlinkage unsigned long sunos_sigblock(unsigned long blk_mask)
282{
283 unsigned long old;
284
285 spin_lock_irq(&current->sighand->siglock);
286 old = current->blocked.sig[0];
287 current->blocked.sig[0] |= (blk_mask & _BLOCKABLE);
288 recalc_sigpending();
289 spin_unlock_irq(&current->sighand->siglock);
290 return old;
291}
292
293asmlinkage unsigned long sunos_sigsetmask(unsigned long newmask)
294{
295 unsigned long retval;
296
297 spin_lock_irq(&current->sighand->siglock);
298 retval = current->blocked.sig[0];
299 current->blocked.sig[0] = (newmask & _BLOCKABLE);
300 recalc_sigpending();
301 spin_unlock_irq(&current->sighand->siglock);
302 return retval;
303}
304
305/* SunOS getdents is very similar to the newer Linux (iBCS2 compliant) */
306/* getdents system call, the format of the structure just has a different */
307/* layout (d_off+d_ino instead of d_ino+d_off) */
308struct sunos_dirent {
309 long d_off;
310 unsigned long d_ino;
311 unsigned short d_reclen;
312 unsigned short d_namlen;
313 char d_name[1];
314};
315
316struct sunos_dirent_callback {
317 struct sunos_dirent __user *curr;
318 struct sunos_dirent __user *previous;
319 int count;
320 int error;
321};
322
323#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
324#define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1))
325
326static int sunos_filldir(void * __buf, const char * name, int namlen,
327 loff_t offset, ino_t ino, unsigned int d_type)
328{
329 struct sunos_dirent __user *dirent;
330 struct sunos_dirent_callback * buf = __buf;
331 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
332
333 buf->error = -EINVAL; /* only used if we fail.. */
334 if (reclen > buf->count)
335 return -EINVAL;
336 dirent = buf->previous;
337 if (dirent)
338 put_user(offset, &dirent->d_off);
339 dirent = buf->curr;
340 buf->previous = dirent;
341 put_user(ino, &dirent->d_ino);
342 put_user(namlen, &dirent->d_namlen);
343 put_user(reclen, &dirent->d_reclen);
344 copy_to_user(dirent->d_name, name, namlen);
345 put_user(0, dirent->d_name + namlen);
346 dirent = (void __user *) dirent + reclen;
347 buf->curr = dirent;
348 buf->count -= reclen;
349 return 0;
350}
351
352asmlinkage int sunos_getdents(unsigned int fd, void __user *dirent, int cnt)
353{
354 struct file * file;
355 struct sunos_dirent __user *lastdirent;
356 struct sunos_dirent_callback buf;
357 int error = -EBADF;
358
359 if (fd >= SUNOS_NR_OPEN)
360 goto out;
361
362 file = fget(fd);
363 if (!file)
364 goto out;
365
366 error = -EINVAL;
367 if (cnt < (sizeof(struct sunos_dirent) + 255))
368 goto out_putf;
369
370 buf.curr = (struct sunos_dirent __user *) dirent;
371 buf.previous = NULL;
372 buf.count = cnt;
373 buf.error = 0;
374
375 error = vfs_readdir(file, sunos_filldir, &buf);
376 if (error < 0)
377 goto out_putf;
378
379 lastdirent = buf.previous;
380 error = buf.error;
381 if (lastdirent) {
382 put_user(file->f_pos, &lastdirent->d_off);
383 error = cnt - buf.count;
384 }
385
386out_putf:
387 fput(file);
388out:
389 return error;
390}
391
392/* Old sunos getdirentries, severely broken compatibility stuff here. */
393struct sunos_direntry {
394 unsigned long d_ino;
395 unsigned short d_reclen;
396 unsigned short d_namlen;
397 char d_name[1];
398};
399
400struct sunos_direntry_callback {
401 struct sunos_direntry __user *curr;
402 struct sunos_direntry __user *previous;
403 int count;
404 int error;
405};
406
407static int sunos_filldirentry(void * __buf, const char * name, int namlen,
408 loff_t offset, ino_t ino, unsigned int d_type)
409{
410 struct sunos_direntry __user *dirent;
411 struct sunos_direntry_callback *buf = __buf;
412 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
413
414 buf->error = -EINVAL; /* only used if we fail.. */
415 if (reclen > buf->count)
416 return -EINVAL;
417 dirent = buf->previous;
418 dirent = buf->curr;
419 buf->previous = dirent;
420 put_user(ino, &dirent->d_ino);
421 put_user(namlen, &dirent->d_namlen);
422 put_user(reclen, &dirent->d_reclen);
423 copy_to_user(dirent->d_name, name, namlen);
424 put_user(0, dirent->d_name + namlen);
425 dirent = (void __user *) dirent + reclen;
426 buf->curr = dirent;
427 buf->count -= reclen;
428 return 0;
429}
430
431asmlinkage int sunos_getdirentries(unsigned int fd, void __user *dirent,
432 int cnt, unsigned int __user *basep)
433{
434 struct file * file;
435 struct sunos_direntry __user *lastdirent;
436 struct sunos_direntry_callback buf;
437 int error = -EBADF;
438
439 if (fd >= SUNOS_NR_OPEN)
440 goto out;
441
442 file = fget(fd);
443 if (!file)
444 goto out;
445
446 error = -EINVAL;
447 if (cnt < (sizeof(struct sunos_direntry) + 255))
448 goto out_putf;
449
450 buf.curr = (struct sunos_direntry __user *) dirent;
451 buf.previous = NULL;
452 buf.count = cnt;
453 buf.error = 0;
454
455 error = vfs_readdir(file, sunos_filldirentry, &buf);
456 if (error < 0)
457 goto out_putf;
458
459 lastdirent = buf.previous;
460 error = buf.error;
461 if (lastdirent) {
462 put_user(file->f_pos, basep);
463 error = cnt - buf.count;
464 }
465
466out_putf:
467 fput(file);
468out:
469 return error;
470}
471
472struct sunos_utsname {
473 char sname[9];
474 char nname[9];
475 char nnext[56];
476 char rel[9];
477 char ver[9];
478 char mach[9];
479};
480
481asmlinkage int sunos_uname(struct sunos_utsname __user *name)
482{
483 int ret;
484 down_read(&uts_sem);
485 ret = copy_to_user(&name->sname[0], &system_utsname.sysname[0], sizeof(name->sname) - 1);
486 if (!ret) {
487 ret |= __copy_to_user(&name->nname[0], &system_utsname.nodename[0], sizeof(name->nname) - 1);
488 ret |= __put_user('\0', &name->nname[8]);
489 ret |= __copy_to_user(&name->rel[0], &system_utsname.release[0], sizeof(name->rel) - 1);
490 ret |= __copy_to_user(&name->ver[0], &system_utsname.version[0], sizeof(name->ver) - 1);
491 ret |= __copy_to_user(&name->mach[0], &system_utsname.machine[0], sizeof(name->mach) - 1);
492 }
493 up_read(&uts_sem);
494 return ret ? -EFAULT : 0;
495}
496
497asmlinkage int sunos_nosys(void)
498{
499 struct pt_regs *regs;
500 siginfo_t info;
501 static int cnt;
502
503 lock_kernel();
504 regs = current->thread.kregs;
505 info.si_signo = SIGSYS;
506 info.si_errno = 0;
507 info.si_code = __SI_FAULT|0x100;
508 info.si_addr = (void __user *)regs->pc;
509 info.si_trapno = regs->u_regs[UREG_G1];
510 send_sig_info(SIGSYS, &info, current);
511 if (cnt++ < 4) {
512 printk("Process makes ni_syscall number %d, register dump:\n",
513 (int) regs->u_regs[UREG_G1]);
514 show_regs(regs);
515 }
516 unlock_kernel();
517 return -ENOSYS;
518}
519
520/* This is not a real and complete implementation yet, just to keep
521 * the easy SunOS binaries happy.
522 */
523asmlinkage int sunos_fpathconf(int fd, int name)
524{
525 int ret;
526
527 switch(name) {
528 case _PCONF_LINK:
529 ret = LINK_MAX;
530 break;
531 case _PCONF_CANON:
532 ret = MAX_CANON;
533 break;
534 case _PCONF_INPUT:
535 ret = MAX_INPUT;
536 break;
537 case _PCONF_NAME:
538 ret = NAME_MAX;
539 break;
540 case _PCONF_PATH:
541 ret = PATH_MAX;
542 break;
543 case _PCONF_PIPE:
544 ret = PIPE_BUF;
545 break;
546 case _PCONF_CHRESTRICT: /* XXX Investigate XXX */
547 ret = 1;
548 break;
549 case _PCONF_NOTRUNC: /* XXX Investigate XXX */
550 case _PCONF_VDISABLE:
551 ret = 0;
552 break;
553 default:
554 ret = -EINVAL;
555 break;
556 }
557 return ret;
558}
559
560asmlinkage int sunos_pathconf(char __user *path, int name)
561{
562 int ret;
563
564 ret = sunos_fpathconf(0, name); /* XXX cheese XXX */
565 return ret;
566}
567
568/* SunOS mount system call emulation */
569
570asmlinkage int sunos_select(int width, fd_set __user *inp, fd_set __user *outp,
571 fd_set __user *exp, struct timeval __user *tvp)
572{
573 int ret;
574
575 /* SunOS binaries expect that select won't change the tvp contents */
576 ret = sys_select (width, inp, outp, exp, tvp);
577 if (ret == -EINTR && tvp) {
578 time_t sec, usec;
579
580 __get_user(sec, &tvp->tv_sec);
581 __get_user(usec, &tvp->tv_usec);
582
583 if (sec == 0 && usec == 0)
584 ret = 0;
585 }
586 return ret;
587}
588
589asmlinkage void sunos_nop(void)
590{
591 return;
592}
593
594/* SunOS mount/umount. */
595#define SMNT_RDONLY 1
596#define SMNT_NOSUID 2
597#define SMNT_NEWTYPE 4
598#define SMNT_GRPID 8
599#define SMNT_REMOUNT 16
600#define SMNT_NOSUB 32
601#define SMNT_MULTI 64
602#define SMNT_SYS5 128
603
604struct sunos_fh_t {
605 char fh_data [NFS_FHSIZE];
606};
607
608struct sunos_nfs_mount_args {
609 struct sockaddr_in __user *addr; /* file server address */
610 struct nfs_fh __user *fh; /* File handle to be mounted */
611 int flags; /* flags */
612 int wsize; /* write size in bytes */
613 int rsize; /* read size in bytes */
614 int timeo; /* initial timeout in .1 secs */
615 int retrans; /* times to retry send */
616 char __user *hostname; /* server's hostname */
617 int acregmin; /* attr cache file min secs */
618 int acregmax; /* attr cache file max secs */
619 int acdirmin; /* attr cache dir min secs */
620 int acdirmax; /* attr cache dir max secs */
621 char __user *netname; /* server's netname */
622};
623
624
625/* Bind the socket on a local reserved port and connect it to the
626 * remote server. This on Linux/i386 is done by the mount program,
627 * not by the kernel.
628 */
629static int
630sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
631{
632 struct sockaddr_in local;
633 struct sockaddr_in server;
634 int try_port;
635 struct socket *socket;
636 struct inode *inode;
637 struct file *file;
638 int ret, result = 0;
639
640 file = fget(fd);
641 if (!file)
642 goto out;
643
644 inode = file->f_dentry->d_inode;
645
646 socket = SOCKET_I(inode);
647 local.sin_family = AF_INET;
648 local.sin_addr.s_addr = INADDR_ANY;
649
650 /* IPPORT_RESERVED = 1024, can't find the definition in the kernel */
651 try_port = 1024;
652 do {
653 local.sin_port = htons (--try_port);
654 ret = socket->ops->bind(socket, (struct sockaddr*)&local,
655 sizeof(local));
656 } while (ret && try_port > (1024 / 2));
657
658 if (ret)
659 goto out_putf;
660
661 server.sin_family = AF_INET;
662 server.sin_addr = addr->sin_addr;
663 server.sin_port = NFS_PORT;
664
665 /* Call sys_connect */
666 ret = socket->ops->connect (socket, (struct sockaddr *) &server,
667 sizeof (server), file->f_flags);
668 if (ret >= 0)
669 result = 1;
670
671out_putf:
672 fput(file);
673out:
674 return result;
675}
676
677static int get_default (int value, int def_value)
678{
679 if (value)
680 return value;
681 else
682 return def_value;
683}
684
685static int sunos_nfs_mount(char *dir_name, int linux_flags, void __user *data)
686{
687 int server_fd, err;
688 char *the_name, *mount_page;
689 struct nfs_mount_data linux_nfs_mount;
690 struct sunos_nfs_mount_args sunos_mount;
691
692 /* Ok, here comes the fun part: Linux's nfs mount needs a
693 * socket connection to the server, but SunOS mount does not
694 * require this, so we use the information on the destination
695 * address to create a socket and bind it to a reserved
696 * port on this system
697 */
698 if (copy_from_user(&sunos_mount, data, sizeof(sunos_mount)))
699 return -EFAULT;
700
701 server_fd = sys_socket (AF_INET, SOCK_DGRAM, IPPROTO_UDP);
702 if (server_fd < 0)
703 return -ENXIO;
704
705 if (copy_from_user(&linux_nfs_mount.addr,sunos_mount.addr,
706 sizeof(*sunos_mount.addr)) ||
707 copy_from_user(&linux_nfs_mount.root,sunos_mount.fh,
708 sizeof(*sunos_mount.fh))) {
709 sys_close (server_fd);
710 return -EFAULT;
711 }
712
713 if (!sunos_nfs_get_server_fd (server_fd, &linux_nfs_mount.addr)){
714 sys_close (server_fd);
715 return -ENXIO;
716 }
717
718 /* Now, bind it to a locally reserved port */
719 linux_nfs_mount.version = NFS_MOUNT_VERSION;
720 linux_nfs_mount.flags = sunos_mount.flags;
721 linux_nfs_mount.fd = server_fd;
722
723 linux_nfs_mount.rsize = get_default (sunos_mount.rsize, 8192);
724 linux_nfs_mount.wsize = get_default (sunos_mount.wsize, 8192);
725 linux_nfs_mount.timeo = get_default (sunos_mount.timeo, 10);
726 linux_nfs_mount.retrans = sunos_mount.retrans;
727
728 linux_nfs_mount.acregmin = sunos_mount.acregmin;
729 linux_nfs_mount.acregmax = sunos_mount.acregmax;
730 linux_nfs_mount.acdirmin = sunos_mount.acdirmin;
731 linux_nfs_mount.acdirmax = sunos_mount.acdirmax;
732
733 the_name = getname(sunos_mount.hostname);
734 if (IS_ERR(the_name))
735 return PTR_ERR(the_name);
736
737 strlcpy(linux_nfs_mount.hostname, the_name,
738 sizeof(linux_nfs_mount.hostname));
739 putname (the_name);
740
741 mount_page = (char *) get_zeroed_page(GFP_KERNEL);
742 if (!mount_page)
743 return -ENOMEM;
744
745 memcpy(mount_page, &linux_nfs_mount, sizeof(linux_nfs_mount));
746
747 err = do_mount("", dir_name, "nfs", linux_flags, mount_page);
748
749 free_page((unsigned long) mount_page);
750 return err;
751}
752
753asmlinkage int
754sunos_mount(char __user *type, char __user *dir, int flags, void __user *data)
755{
756 int linux_flags = 0;
757 int ret = -EINVAL;
758 char *dev_fname = NULL;
759 char *dir_page, *type_page;
760
761 if (!capable (CAP_SYS_ADMIN))
762 return -EPERM;
763
764 lock_kernel();
765 /* We don't handle the integer fs type */
766 if ((flags & SMNT_NEWTYPE) == 0)
767 goto out;
768
769 /* Do not allow for those flags we don't support */
770 if (flags & (SMNT_GRPID|SMNT_NOSUB|SMNT_MULTI|SMNT_SYS5))
771 goto out;
772
773 if (flags & SMNT_REMOUNT)
774 linux_flags |= MS_REMOUNT;
775 if (flags & SMNT_RDONLY)
776 linux_flags |= MS_RDONLY;
777 if (flags & SMNT_NOSUID)
778 linux_flags |= MS_NOSUID;
779
780 dir_page = getname(dir);
781 ret = PTR_ERR(dir_page);
782 if (IS_ERR(dir_page))
783 goto out;
784
785 type_page = getname(type);
786 ret = PTR_ERR(type_page);
787 if (IS_ERR(type_page))
788 goto out1;
789
790 if (strcmp(type_page, "ext2") == 0) {
791 dev_fname = getname(data);
792 } else if (strcmp(type_page, "iso9660") == 0) {
793 dev_fname = getname(data);
794 } else if (strcmp(type_page, "minix") == 0) {
795 dev_fname = getname(data);
796 } else if (strcmp(type_page, "nfs") == 0) {
797 ret = sunos_nfs_mount (dir_page, flags, data);
798 goto out2;
799 } else if (strcmp(type_page, "ufs") == 0) {
800 printk("Warning: UFS filesystem mounts unsupported.\n");
801 ret = -ENODEV;
802 goto out2;
803 } else if (strcmp(type_page, "proc")) {
804 ret = -ENODEV;
805 goto out2;
806 }
807 ret = PTR_ERR(dev_fname);
808 if (IS_ERR(dev_fname))
809 goto out2;
810 ret = do_mount(dev_fname, dir_page, type_page, linux_flags, NULL);
811 if (dev_fname)
812 putname(dev_fname);
813out2:
814 putname(type_page);
815out1:
816 putname(dir_page);
817out:
818 unlock_kernel();
819 return ret;
820}
821
822
823asmlinkage int sunos_setpgrp(pid_t pid, pid_t pgid)
824{
825 int ret;
826
827 /* So stupid... */
828 if ((!pid || pid == current->pid) &&
829 !pgid) {
830 sys_setsid();
831 ret = 0;
832 } else {
833 ret = sys_setpgid(pid, pgid);
834 }
835 return ret;
836}
837
838/* So stupid... */
839asmlinkage int sunos_wait4(pid_t pid, unsigned int __user *stat_addr,
840 int options, struct rusage __user*ru)
841{
842 int ret;
843
844 ret = sys_wait4((pid ? pid : -1), stat_addr, options, ru);
845 return ret;
846}
847
848extern int kill_pg(int, int, int);
849asmlinkage int sunos_killpg(int pgrp, int sig)
850{
851 int ret;
852
853 lock_kernel();
854 ret = kill_pg(pgrp, sig, 0);
855 unlock_kernel();
856 return ret;
857}
858
859asmlinkage int sunos_audit(void)
860{
861 lock_kernel();
862 printk ("sys_audit\n");
863 unlock_kernel();
864 return -1;
865}
866
867asmlinkage unsigned long sunos_gethostid(void)
868{
869 unsigned long ret;
870
871 lock_kernel();
872 ret = ((unsigned long)idprom->id_machtype << 24) |
873 (unsigned long)idprom->id_sernum;
874 unlock_kernel();
875 return ret;
876}
877
878/* sysconf options, for SunOS compatibility */
879#define _SC_ARG_MAX 1
880#define _SC_CHILD_MAX 2
881#define _SC_CLK_TCK 3
882#define _SC_NGROUPS_MAX 4
883#define _SC_OPEN_MAX 5
884#define _SC_JOB_CONTROL 6
885#define _SC_SAVED_IDS 7
886#define _SC_VERSION 8
887
888asmlinkage long sunos_sysconf (int name)
889{
890 long ret;
891
892 switch (name){
893 case _SC_ARG_MAX:
894 ret = ARG_MAX;
895 break;
896 case _SC_CHILD_MAX:
897 ret = CHILD_MAX;
898 break;
899 case _SC_CLK_TCK:
900 ret = HZ;
901 break;
902 case _SC_NGROUPS_MAX:
903 ret = NGROUPS_MAX;
904 break;
905 case _SC_OPEN_MAX:
906 ret = OPEN_MAX;
907 break;
908 case _SC_JOB_CONTROL:
909 ret = 1; /* yes, we do support job control */
910 break;
911 case _SC_SAVED_IDS:
912 ret = 1; /* yes, we do support saved uids */
913 break;
914 case _SC_VERSION:
915 /* mhm, POSIX_VERSION is in /usr/include/unistd.h
916 * should it go on /usr/include/linux?
917 */
918 ret = 199009L;
919 break;
920 default:
921 ret = -1;
922 break;
923 };
924 return ret;
925}
926
927asmlinkage int sunos_semsys(int op, unsigned long arg1, unsigned long arg2,
928 unsigned long arg3, void *ptr)
929{
930 union semun arg4;
931 int ret;
932
933 switch (op) {
934 case 0:
935 /* Most arguments match on a 1:1 basis but cmd doesn't */
936 switch(arg3) {
937 case 4:
938 arg3=GETPID; break;
939 case 5:
940 arg3=GETVAL; break;
941 case 6:
942 arg3=GETALL; break;
943 case 3:
944 arg3=GETNCNT; break;
945 case 7:
946 arg3=GETZCNT; break;
947 case 8:
948 arg3=SETVAL; break;
949 case 9:
950 arg3=SETALL; break;
951 }
952 /* sys_semctl(): */
953 /* value to modify semaphore to */
954 arg4.__pad = (void __user *) ptr;
955 ret = sys_semctl((int)arg1, (int)arg2, (int)arg3, arg4 );
956 break;
957 case 1:
958 /* sys_semget(): */
959 ret = sys_semget((key_t)arg1, (int)arg2, (int)arg3);
960 break;
961 case 2:
962 /* sys_semop(): */
963 ret = sys_semop((int)arg1, (struct sembuf __user *)arg2, (unsigned)arg3);
964 break;
965 default:
966 ret = -EINVAL;
967 break;
968 };
969 return ret;
970}
971
972asmlinkage int sunos_msgsys(int op, unsigned long arg1, unsigned long arg2,
973 unsigned long arg3, unsigned long arg4)
974{
975 struct sparc_stackf *sp;
976 unsigned long arg5;
977 int rval;
978
979 switch(op) {
980 case 0:
981 rval = sys_msgget((key_t)arg1, (int)arg2);
982 break;
983 case 1:
984 rval = sys_msgctl((int)arg1, (int)arg2,
985 (struct msqid_ds __user *)arg3);
986 break;
987 case 2:
988 lock_kernel();
989 sp = (struct sparc_stackf *)current->thread.kregs->u_regs[UREG_FP];
990 arg5 = sp->xxargs[0];
991 unlock_kernel();
992 rval = sys_msgrcv((int)arg1, (struct msgbuf __user *)arg2,
993 (size_t)arg3, (long)arg4, (int)arg5);
994 break;
995 case 3:
996 rval = sys_msgsnd((int)arg1, (struct msgbuf __user *)arg2,
997 (size_t)arg3, (int)arg4);
998 break;
999 default:
1000 rval = -EINVAL;
1001 break;
1002 }
1003 return rval;
1004}
1005
1006asmlinkage int sunos_shmsys(int op, unsigned long arg1, unsigned long arg2,
1007 unsigned long arg3)
1008{
1009 unsigned long raddr;
1010 int rval;
1011
1012 switch(op) {
1013 case 0:
1014 /* do_shmat(): attach a shared memory area */
1015 rval = do_shmat((int)arg1,(char __user *)arg2,(int)arg3,&raddr);
1016 if (!rval)
1017 rval = (int) raddr;
1018 break;
1019 case 1:
1020 /* sys_shmctl(): modify shared memory area attr. */
1021 rval = sys_shmctl((int)arg1,(int)arg2,(struct shmid_ds __user *)arg3);
1022 break;
1023 case 2:
1024 /* sys_shmdt(): detach a shared memory area */
1025 rval = sys_shmdt((char __user *)arg1);
1026 break;
1027 case 3:
1028 /* sys_shmget(): get a shared memory area */
1029 rval = sys_shmget((key_t)arg1,(int)arg2,(int)arg3);
1030 break;
1031 default:
1032 rval = -EINVAL;
1033 break;
1034 };
1035 return rval;
1036}
1037
1038#define SUNOS_EWOULDBLOCK 35
1039
1040/* see the sunos man page read(2v) for an explanation
1041 of this garbage. We use O_NDELAY to mark
1042 file descriptors that have been set non-blocking
1043 using 4.2BSD style calls. (tridge) */
1044
1045static inline int check_nonblock(int ret, int fd)
1046{
1047 if (ret == -EAGAIN) {
1048 struct file * file = fget(fd);
1049 if (file) {
1050 if (file->f_flags & O_NDELAY)
1051 ret = -SUNOS_EWOULDBLOCK;
1052 fput(file);
1053 }
1054 }
1055 return ret;
1056}
1057
1058asmlinkage int sunos_read(unsigned int fd, char __user *buf, int count)
1059{
1060 int ret;
1061
1062 ret = check_nonblock(sys_read(fd,buf,count),fd);
1063 return ret;
1064}
1065
1066asmlinkage int sunos_readv(unsigned long fd, const struct iovec __user *vector,
1067 long count)
1068{
1069 int ret;
1070
1071 ret = check_nonblock(sys_readv(fd,vector,count),fd);
1072 return ret;
1073}
1074
1075asmlinkage int sunos_write(unsigned int fd, char __user *buf, int count)
1076{
1077 int ret;
1078
1079 ret = check_nonblock(sys_write(fd,buf,count),fd);
1080 return ret;
1081}
1082
1083asmlinkage int sunos_writev(unsigned long fd,
1084 const struct iovec __user *vector, long count)
1085{
1086 int ret;
1087
1088 ret = check_nonblock(sys_writev(fd,vector,count),fd);
1089 return ret;
1090}
1091
1092asmlinkage int sunos_recv(int fd, void __user *ubuf, int size, unsigned flags)
1093{
1094 int ret;
1095
1096 ret = check_nonblock(sys_recv(fd,ubuf,size,flags),fd);
1097 return ret;
1098}
1099
1100asmlinkage int sunos_send(int fd, void __user *buff, int len, unsigned flags)
1101{
1102 int ret;
1103
1104 ret = check_nonblock(sys_send(fd,buff,len,flags),fd);
1105 return ret;
1106}
1107
1108asmlinkage int sunos_accept(int fd, struct sockaddr __user *sa,
1109 int __user *addrlen)
1110{
1111 int ret;
1112
1113 while (1) {
1114 ret = check_nonblock(sys_accept(fd,sa,addrlen),fd);
1115 if (ret != -ENETUNREACH && ret != -EHOSTUNREACH)
1116 break;
1117 }
1118
1119 return ret;
1120}
1121
1122#define SUNOS_SV_INTERRUPT 2
1123
1124asmlinkage int
1125sunos_sigaction(int sig, const struct old_sigaction __user *act,
1126 struct old_sigaction __user *oact)
1127{
1128 struct k_sigaction new_ka, old_ka;
1129 int ret;
1130
1131 if (act) {
1132 old_sigset_t mask;
1133
1134 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
1135 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
1136 __get_user(new_ka.sa.sa_flags, &act->sa_flags))
1137 return -EFAULT;
1138 __get_user(mask, &act->sa_mask);
1139 new_ka.sa.sa_restorer = NULL;
1140 new_ka.ka_restorer = NULL;
1141 siginitset(&new_ka.sa.sa_mask, mask);
1142 new_ka.sa.sa_flags ^= SUNOS_SV_INTERRUPT;
1143 }
1144
1145 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
1146
1147 if (!ret && oact) {
1148 /* In the clone() case we could copy half consistent
1149 * state to the user, however this could sleep and
1150 * deadlock us if we held the signal lock on SMP. So for
1151 * now I take the easy way out and do no locking.
1152 * But then again we don't support SunOS lwp's anyways ;-)
1153 */
1154 old_ka.sa.sa_flags ^= SUNOS_SV_INTERRUPT;
1155 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
1156 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
1157 __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
1158 return -EFAULT;
1159 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
1160 }
1161
1162 return ret;
1163}
1164
1165
1166asmlinkage int sunos_setsockopt(int fd, int level, int optname,
1167 char __user *optval, int optlen)
1168{
1169 int tr_opt = optname;
1170 int ret;
1171
1172 if (level == SOL_IP) {
1173 /* Multicast socketopts (ttl, membership) */
1174 if (tr_opt >=2 && tr_opt <= 6)
1175 tr_opt += 30;
1176 }
1177 ret = sys_setsockopt(fd, level, tr_opt, optval, optlen);
1178 return ret;
1179}
1180
1181asmlinkage int sunos_getsockopt(int fd, int level, int optname,
1182 char __user *optval, int __user *optlen)
1183{
1184 int tr_opt = optname;
1185 int ret;
1186
1187 if (level == SOL_IP) {
1188 /* Multicast socketopts (ttl, membership) */
1189 if (tr_opt >=2 && tr_opt <= 6)
1190 tr_opt += 30;
1191 }
1192 ret = sys_getsockopt(fd, level, tr_opt, optval, optlen);
1193 return ret;
1194}
diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S
new file mode 100644
index 000000000000..928ffeb0fabb
--- /dev/null
+++ b/arch/sparc/kernel/systbls.S
@@ -0,0 +1,186 @@
1/* $Id: systbls.S,v 1.103 2002/02/08 03:57:14 davem Exp $
2 * systbls.S: System call entry point tables for OS compatibility.
3 * The native Linux system call table lives here also.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 *
7 * Based upon preliminary work which is:
8 *
9 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
10 */
11
12#include <linux/config.h>
13
14 .data
15 .align 4
16
17 /* First, the Linux native syscall table. */
18
19 .globl sys_call_table
20sys_call_table:
21/*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write
22/*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link
23/*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
24/*15*/ .long sys_chmod, sys_lchown16, sparc_brk, sys_nis_syscall, sys_lseek
25/*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
26/*25*/ .long sys_time, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
27/*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
28/*35*/ .long sys_chown, sys_sync, sys_kill, sys_newstat, sys_sendfile
29/*40*/ .long sys_newlstat, sys_dup, sys_pipe, sys_times, sys_getuid
30/*45*/ .long sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16
31/*50*/ .long sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, sys_ioctl
32/*55*/ .long sys_reboot, sys_mmap2, sys_symlink, sys_readlink, sys_execve
33/*60*/ .long sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize
34/*65*/ .long sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_geteuid
35/*70*/ .long sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect
36/*75*/ .long sys_madvise, sys_vhangup, sys_truncate64, sys_mincore, sys_getgroups16
37/*80*/ .long sys_setgroups16, sys_getpgrp, sys_setgroups, sys_setitimer, sys_ftruncate64
38/*85*/ .long sys_swapon, sys_getitimer, sys_setuid, sys_sethostname, sys_setgid
39/*90*/ .long sys_dup2, sys_setfsuid, sys_fcntl, sys_select, sys_setfsgid
40/*95*/ .long sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
41/*100*/ .long sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
42/*105*/ .long sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
43/*110*/ .long sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
44/*115*/ .long sys_getgroups, sys_gettimeofday, sys_getrusage, sys_nis_syscall, sys_getcwd
45/*120*/ .long sys_readv, sys_writev, sys_settimeofday, sys_fchown16, sys_fchmod
46/*125*/ .long sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
47/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
48/*135*/ .long sys_nis_syscall, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
49/*140*/ .long sys_sendfile64, sys_nis_syscall, sys_futex, sys_gettid, sys_getrlimit
50/*145*/ .long sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
51/*150*/ .long sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
52/*155*/ .long sys_fcntl64, sys_ni_syscall, sys_statfs, sys_fstatfs, sys_oldumount
53/*160*/ .long sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
54/*165*/ .long sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
55/*170*/ .long sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
56/*175*/ .long sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
57/*180*/ .long sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_sigpending, sys_ni_syscall
58/*185*/ .long sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname
59/*190*/ .long sys_init_module, sys_personality, sparc_remap_file_pages, sys_epoll_create, sys_epoll_ctl
60/*195*/ .long sys_epoll_wait, sys_nis_syscall, sys_getppid, sparc_sigaction, sys_sgetmask
61/*200*/ .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, old_readdir
62/*205*/ .long sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
63/*210*/ .long sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
64/*215*/ .long sys_ipc, sys_sigreturn, sys_clone, sys_nis_syscall, sys_adjtimex
65/*220*/ .long sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
66/*225*/ .long sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16
67/*230*/ .long sys_select, sys_time, sys_nis_syscall, sys_stime, sys_statfs64
68 /* "We are the Knights of the Forest of Ni!!" */
69/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
70/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
71/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
72/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
73/*255*/ .long sys_nis_syscall, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
74/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
75/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
76/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
77/*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
78/*280*/ .long sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl
79
80#ifdef CONFIG_SUNOS_EMUL
81 /* Now the SunOS syscall table. */
82
83 .align 4
84 .globl sunos_sys_table
85sunos_sys_table:
86/*0*/ .long sunos_indir, sys_exit, sys_fork
87 .long sunos_read, sunos_write, sys_open
88 .long sys_close, sunos_wait4, sys_creat
89 .long sys_link, sys_unlink, sunos_execv
90 .long sys_chdir, sunos_nosys, sys_mknod
91 .long sys_chmod, sys_lchown16, sunos_brk
92 .long sunos_nosys, sys_lseek, sunos_getpid
93 .long sunos_nosys, sunos_nosys, sunos_nosys
94 .long sunos_getuid, sunos_nosys, sys_ptrace
95 .long sunos_nosys, sunos_nosys, sunos_nosys
96 .long sunos_nosys, sunos_nosys, sunos_nosys
97 .long sys_access, sunos_nosys, sunos_nosys
98 .long sys_sync, sys_kill, sys_newstat
99 .long sunos_nosys, sys_newlstat, sys_dup
100 .long sys_pipe, sunos_nosys, sunos_nosys
101 .long sunos_nosys, sunos_nosys, sunos_getgid
102 .long sunos_nosys, sunos_nosys
103/*50*/ .long sunos_nosys, sys_acct, sunos_nosys
104 .long sunos_mctl, sunos_ioctl, sys_reboot
105 .long sunos_nosys, sys_symlink, sys_readlink
106 .long sys_execve, sys_umask, sys_chroot
107 .long sys_newfstat, sunos_nosys, sys_getpagesize
108 .long sys_msync, sys_vfork, sunos_nosys
109 .long sunos_nosys, sunos_sbrk, sunos_sstk
110 .long sunos_mmap, sunos_vadvise, sys_munmap
111 .long sys_mprotect, sys_madvise, sys_vhangup
112 .long sunos_nosys, sys_mincore, sys_getgroups16
113 .long sys_setgroups16, sys_getpgrp, sunos_setpgrp
114 .long sys_setitimer, sunos_nosys, sys_swapon
115 .long sys_getitimer, sys_gethostname, sys_sethostname
116 .long sunos_getdtablesize, sys_dup2, sunos_nop
117 .long sys_fcntl, sunos_select, sunos_nop
118 .long sys_fsync, sys_setpriority, sys_socket
119 .long sys_connect, sunos_accept
120/*100*/ .long sys_getpriority, sunos_send, sunos_recv
121 .long sunos_nosys, sys_bind, sunos_setsockopt
122 .long sys_listen, sunos_nosys, sunos_sigaction
123 .long sunos_sigblock, sunos_sigsetmask, sys_sigpause
124 .long sys_sigstack, sys_recvmsg, sys_sendmsg
125 .long sunos_nosys, sys_gettimeofday, sys_getrusage
126 .long sunos_getsockopt, sunos_nosys, sunos_readv
127 .long sunos_writev, sys_settimeofday, sys_fchown16
128 .long sys_fchmod, sys_recvfrom, sys_setreuid16
129 .long sys_setregid16, sys_rename, sys_truncate
130 .long sys_ftruncate, sys_flock, sunos_nosys
131 .long sys_sendto, sys_shutdown, sys_socketpair
132 .long sys_mkdir, sys_rmdir, sys_utimes
133 .long sys_sigreturn, sunos_nosys, sys_getpeername
134 .long sunos_gethostid, sunos_nosys, sys_getrlimit
135 .long sys_setrlimit, sunos_killpg, sunos_nosys
136 .long sunos_nosys, sunos_nosys
137/*150*/ .long sys_getsockname, sunos_nosys, sunos_nosys
138 .long sys_poll, sunos_nosys, sunos_nosys
139 .long sunos_getdirentries, sys_statfs, sys_fstatfs
140 .long sys_oldumount, sunos_nosys, sunos_nosys
141 .long sys_getdomainname, sys_setdomainname
142 .long sunos_nosys, sys_quotactl, sunos_nosys
143 .long sunos_mount, sys_ustat, sunos_semsys
144 .long sunos_msgsys, sunos_shmsys, sunos_audit
145 .long sunos_nosys, sunos_getdents, sys_setsid
146 .long sys_fchdir, sunos_nosys, sunos_nosys
147 .long sunos_nosys, sunos_nosys, sunos_nosys
148 .long sunos_nosys, sys_sigpending, sunos_nosys
149 .long sys_setpgid, sunos_pathconf, sunos_fpathconf
150 .long sunos_sysconf, sunos_uname, sunos_nosys
151 .long sunos_nosys, sunos_nosys, sunos_nosys
152 .long sunos_nosys, sunos_nosys, sunos_nosys
153 .long sunos_nosys, sunos_nosys, sunos_nosys
154/*200*/ .long sunos_nosys, sunos_nosys, sunos_nosys
155 .long sunos_nosys, sunos_nosys, sunos_nosys
156 .long sunos_nosys, sunos_nosys, sunos_nosys
157 .long sunos_nosys, sunos_nosys, sunos_nosys
158 .long sunos_nosys, sunos_nosys, sunos_nosys
159 .long sunos_nosys, sunos_nosys, sunos_nosys
160 .long sunos_nosys, sunos_nosys, sunos_nosys
161 .long sunos_nosys, sunos_nosys, sunos_nosys
162 .long sunos_nosys, sunos_nosys, sunos_nosys
163 .long sunos_nosys, sunos_nosys, sunos_nosys
164 .long sunos_nosys, sunos_nosys, sunos_nosys
165 .long sunos_nosys, sunos_nosys, sunos_nosys
166 .long sunos_nosys, sunos_nosys, sunos_nosys
167 .long sunos_nosys, sunos_nosys, sunos_nosys
168 .long sunos_nosys, sunos_nosys, sunos_nosys
169 .long sunos_nosys, sunos_nosys, sunos_nosys
170 .long sunos_nosys, sunos_nosys
171/*250*/ .long sunos_nosys, sunos_nosys, sunos_nosys
172 .long sunos_nosys, sunos_nosys, sunos_nosys
173 .long sunos_nosys, sunos_nosys, sunos_nosys
174 .long sunos_nosys
175/*260*/ .long sunos_nosys, sunos_nosys, sunos_nosys
176 .long sunos_nosys, sunos_nosys, sunos_nosys
177 .long sunos_nosys, sunos_nosys, sunos_nosys
178 .long sunos_nosys
179/*270*/ .long sunos_nosys, sunos_nosys, sunos_nosys
180 .long sunos_nosys, sunos_nosys, sunos_nosys
181 .long sunos_nosys, sunos_nosys, sunos_nosys
182 .long sunos_nosys
183/*280*/ .long sunos_nosys, sunos_nosys, sunos_nosys
184 .long sunos_nosys
185
186#endif
diff --git a/arch/sparc/kernel/tadpole.c b/arch/sparc/kernel/tadpole.c
new file mode 100644
index 000000000000..f476a5f4af6a
--- /dev/null
+++ b/arch/sparc/kernel/tadpole.c
@@ -0,0 +1,126 @@
1/* tadpole.c: Probing for the tadpole clock stopping h/w at boot time.
2 *
3 * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
4 */
5
6#include <linux/string.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/init.h>
10
11#include <asm/asi.h>
12#include <asm/oplib.h>
13#include <asm/io.h>
14
15#define MACIO_SCSI_CSR_ADDR 0x78400000
16#define MACIO_EN_DMA 0x00000200
17#define CLOCK_INIT_DONE 1
18
19static int clk_state;
20static volatile unsigned char *clk_ctrl;
21void (*cpu_pwr_save)(void);
22
23static inline unsigned int ldphys(unsigned int addr)
24{
25 unsigned long data;
26
27 __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" :
28 "=r" (data) :
29 "r" (addr), "i" (ASI_M_BYPASS));
30 return data;
31}
32
33static void clk_init(void)
34{
35 __asm__ __volatile__("mov 0x6c, %%g1\n\t"
36 "mov 0x4c, %%g2\n\t"
37 "mov 0xdf, %%g3\n\t"
38 "stb %%g1, [%0+3]\n\t"
39 "stb %%g2, [%0+3]\n\t"
40 "stb %%g3, [%0+3]\n\t" : :
41 "r" (clk_ctrl) :
42 "g1", "g2", "g3");
43}
44
45static void clk_slow(void)
46{
47 __asm__ __volatile__("mov 0xcc, %%g2\n\t"
48 "mov 0x4c, %%g3\n\t"
49 "mov 0xcf, %%g4\n\t"
50 "mov 0xdf, %%g5\n\t"
51 "stb %%g2, [%0+3]\n\t"
52 "stb %%g3, [%0+3]\n\t"
53 "stb %%g4, [%0+3]\n\t"
54 "stb %%g5, [%0+3]\n\t" : :
55 "r" (clk_ctrl) :
56 "g2", "g3", "g4", "g5");
57}
58
59/*
60 * Tadpole is guaranteed to be UP, using local_irq_save.
61 */
62static void tsu_clockstop(void)
63{
64 unsigned int mcsr;
65 unsigned long flags;
66
67 if (!clk_ctrl)
68 return;
69 if (!(clk_state & CLOCK_INIT_DONE)) {
70 local_irq_save(flags);
71 clk_init();
72 clk_state |= CLOCK_INIT_DONE; /* all done */
73 local_irq_restore(flags);
74 return;
75 }
76 if (!(clk_ctrl[2] & 1))
77 return; /* no speed up yet */
78
79 local_irq_save(flags);
80
81 /* if SCSI DMA in progress, don't slow clock */
82 mcsr = ldphys(MACIO_SCSI_CSR_ADDR);
83 if ((mcsr&MACIO_EN_DMA) != 0) {
84 local_irq_restore(flags);
85 return;
86 }
87 /* TODO... the minimum clock setting ought to increase the
88 * memory refresh interval..
89 */
90 clk_slow();
91 local_irq_restore(flags);
92}
93
94static void swift_clockstop(void)
95{
96 if (!clk_ctrl)
97 return;
98 clk_ctrl[0] = 0;
99}
100
101void __init clock_stop_probe(void)
102{
103 unsigned int node, clk_nd;
104 char name[20];
105
106 prom_getstring(prom_root_node, "name", name, sizeof(name));
107 if (strncmp(name, "Tadpole", 7))
108 return;
109 node = prom_getchild(prom_root_node);
110 node = prom_searchsiblings(node, "obio");
111 node = prom_getchild(node);
112 clk_nd = prom_searchsiblings(node, "clk-ctrl");
113 if (!clk_nd)
114 return;
115 printk("Clock Stopping h/w detected... ");
116 clk_ctrl = (char *) prom_getint(clk_nd, "address");
117 clk_state = 0;
118 if (name[10] == '\0') {
119 cpu_pwr_save = tsu_clockstop;
120 printk("enabled (S3)\n");
121 } else if ((name[10] == 'X') || (name[10] == 'G')) {
122 cpu_pwr_save = swift_clockstop;
123 printk("enabled (%s)\n",name+7);
124 } else
125 printk("disabled %s\n",name+7);
126}
diff --git a/arch/sparc/kernel/tick14.c b/arch/sparc/kernel/tick14.c
new file mode 100644
index 000000000000..fd8005a3e6bd
--- /dev/null
+++ b/arch/sparc/kernel/tick14.c
@@ -0,0 +1,85 @@
1/* tick14.c
2 * linux/arch/sparc/kernel/tick14.c
3 *
4 * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
5 *
6 * This file handles the Sparc specific level14 ticker
7 * This is really useful for profiling OBP uses it for keyboard
8 * aborts and other stuff.
9 *
10 *
11 */
12#include <linux/errno.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/param.h>
16#include <linux/string.h>
17#include <linux/mm.h>
18#include <linux/timex.h>
19#include <linux/interrupt.h>
20
21#include <asm/oplib.h>
22#include <asm/segment.h>
23#include <asm/timer.h>
24#include <asm/mostek.h>
25#include <asm/system.h>
26#include <asm/irq.h>
27#include <asm/io.h>
28
29extern unsigned long lvl14_save[5];
30static unsigned long *linux_lvl14 = NULL;
31static unsigned long obp_lvl14[4];
32
33/*
34 * Call with timer IRQ closed.
35 * First time we do it with disable_irq, later prom code uses spin_lock_irq().
36 */
37void install_linux_ticker(void)
38{
39
40 if (!linux_lvl14)
41 return;
42 linux_lvl14[0] = lvl14_save[0];
43 linux_lvl14[1] = lvl14_save[1];
44 linux_lvl14[2] = lvl14_save[2];
45 linux_lvl14[3] = lvl14_save[3];
46}
47
48void install_obp_ticker(void)
49{
50
51 if (!linux_lvl14)
52 return;
53 linux_lvl14[0] = obp_lvl14[0];
54 linux_lvl14[1] = obp_lvl14[1];
55 linux_lvl14[2] = obp_lvl14[2];
56 linux_lvl14[3] = obp_lvl14[3];
57}
58
59void claim_ticker14(irqreturn_t (*handler)(int, void *, struct pt_regs *),
60 int irq_nr, unsigned int timeout )
61{
62 int cpu = smp_processor_id();
63
64 /* first we copy the obp handler instructions
65 */
66 disable_irq(irq_nr);
67 if (!handler)
68 return;
69
70 linux_lvl14 = (unsigned long *)lvl14_save[4];
71 obp_lvl14[0] = linux_lvl14[0];
72 obp_lvl14[1] = linux_lvl14[1];
73 obp_lvl14[2] = linux_lvl14[2];
74 obp_lvl14[3] = linux_lvl14[3];
75
76 if (!request_irq(irq_nr,
77 handler,
78 (SA_INTERRUPT | SA_STATIC_ALLOC),
79 "counter14",
80 NULL)) {
81 install_linux_ticker();
82 load_profile_irq(cpu, timeout);
83 enable_irq(irq_nr);
84 }
85}
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
new file mode 100644
index 000000000000..6486cbf2efe9
--- /dev/null
+++ b/arch/sparc/kernel/time.c
@@ -0,0 +1,641 @@
1/* $Id: time.c,v 1.60 2002/01/23 14:33:55 davem Exp $
2 * linux/arch/sparc/kernel/time.c
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
6 *
7 * Chris Davis (cdavis@cois.on.ca) 03/27/1998
8 * Added support for the intersil on the sun4/4200
9 *
10 * Gleb Raiko (rajko@mech.math.msu.su) 08/18/1998
11 * Support for MicroSPARC-IIep, PCI CPU.
12 *
13 * This file handles the Sparc specific time handling details.
14 *
15 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
16 * "A Kernel Model for Precision Timekeeping" by Dave Mills
17 */
18#include <linux/config.h>
19#include <linux/errno.h>
20#include <linux/module.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/param.h>
24#include <linux/string.h>
25#include <linux/mm.h>
26#include <linux/interrupt.h>
27#include <linux/time.h>
28#include <linux/timex.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/ioport.h>
32#include <linux/profile.h>
33
34#include <asm/oplib.h>
35#include <asm/segment.h>
36#include <asm/timer.h>
37#include <asm/mostek.h>
38#include <asm/system.h>
39#include <asm/irq.h>
40#include <asm/io.h>
41#include <asm/idprom.h>
42#include <asm/machines.h>
43#include <asm/sun4paddr.h>
44#include <asm/page.h>
45#include <asm/pcic.h>
46
47extern unsigned long wall_jiffies;
48
49u64 jiffies_64 = INITIAL_JIFFIES;
50
51EXPORT_SYMBOL(jiffies_64);
52
53DEFINE_SPINLOCK(rtc_lock);
54enum sparc_clock_type sp_clock_typ;
55DEFINE_SPINLOCK(mostek_lock);
56void __iomem *mstk48t02_regs = NULL;
57static struct mostek48t08 *mstk48t08_regs = NULL;
58static int set_rtc_mmss(unsigned long);
59static int sbus_do_settimeofday(struct timespec *tv);
60
61#ifdef CONFIG_SUN4
62struct intersil *intersil_clock;
63#define intersil_cmd(intersil_reg, intsil_cmd) intersil_reg->int_cmd_reg = \
64 (intsil_cmd)
65
66#define intersil_intr(intersil_reg, intsil_cmd) intersil_reg->int_intr_reg = \
67 (intsil_cmd)
68
69#define intersil_start(intersil_reg) intersil_cmd(intersil_reg, \
70 ( INTERSIL_START | INTERSIL_32K | INTERSIL_NORMAL | INTERSIL_24H |\
71 INTERSIL_INTR_ENABLE))
72
73#define intersil_stop(intersil_reg) intersil_cmd(intersil_reg, \
74 ( INTERSIL_STOP | INTERSIL_32K | INTERSIL_NORMAL | INTERSIL_24H |\
75 INTERSIL_INTR_ENABLE))
76
77#define intersil_read_intr(intersil_reg, towhere) towhere = \
78 intersil_reg->int_intr_reg
79
80#endif
81
82unsigned long profile_pc(struct pt_regs *regs)
83{
84 extern char __copy_user_begin[], __copy_user_end[];
85 extern char __atomic_begin[], __atomic_end[];
86 extern char __bzero_begin[], __bzero_end[];
87 extern char __bitops_begin[], __bitops_end[];
88
89 unsigned long pc = regs->pc;
90
91 if (in_lock_functions(pc) ||
92 (pc >= (unsigned long) __copy_user_begin &&
93 pc < (unsigned long) __copy_user_end) ||
94 (pc >= (unsigned long) __atomic_begin &&
95 pc < (unsigned long) __atomic_end) ||
96 (pc >= (unsigned long) __bzero_begin &&
97 pc < (unsigned long) __bzero_end) ||
98 (pc >= (unsigned long) __bitops_begin &&
99 pc < (unsigned long) __bitops_end))
100 pc = regs->u_regs[UREG_RETPC];
101 return pc;
102}
103
104__volatile__ unsigned int *master_l10_counter;
105__volatile__ unsigned int *master_l10_limit;
106
107/*
108 * timer_interrupt() needs to keep up the real-time clock,
109 * as well as call the "do_timer()" routine every clocktick
110 */
111
112#define TICK_SIZE (tick_nsec / 1000)
113
114irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
115{
116 /* last time the cmos clock got updated */
117 static long last_rtc_update;
118
119#ifndef CONFIG_SMP
120 profile_tick(CPU_PROFILING, regs);
121#endif
122
123 /* Protect counter clear so that do_gettimeoffset works */
124 write_seqlock(&xtime_lock);
125#ifdef CONFIG_SUN4
126 if((idprom->id_machtype == (SM_SUN4 | SM_4_260)) ||
127 (idprom->id_machtype == (SM_SUN4 | SM_4_110))) {
128 int temp;
129 intersil_read_intr(intersil_clock, temp);
130 /* re-enable the irq */
131 enable_pil_irq(10);
132 }
133#endif
134 clear_clock_irq();
135
136 do_timer(regs);
137#ifndef CONFIG_SMP
138 update_process_times(user_mode(regs));
139#endif
140
141
142 /* Determine when to update the Mostek clock. */
143 if ((time_status & STA_UNSYNC) == 0 &&
144 xtime.tv_sec > last_rtc_update + 660 &&
145 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
146 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
147 if (set_rtc_mmss(xtime.tv_sec) == 0)
148 last_rtc_update = xtime.tv_sec;
149 else
150 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
151 }
152 write_sequnlock(&xtime_lock);
153
154 return IRQ_HANDLED;
155}
156
157/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
158static void __init kick_start_clock(void)
159{
160 struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs;
161 unsigned char sec;
162 int i, count;
163
164 prom_printf("CLOCK: Clock was stopped. Kick start ");
165
166 spin_lock_irq(&mostek_lock);
167
168 /* Turn on the kick start bit to start the oscillator. */
169 regs->creg |= MSTK_CREG_WRITE;
170 regs->sec &= ~MSTK_STOP;
171 regs->hour |= MSTK_KICK_START;
172 regs->creg &= ~MSTK_CREG_WRITE;
173
174 spin_unlock_irq(&mostek_lock);
175
176 /* Delay to allow the clock oscillator to start. */
177 sec = MSTK_REG_SEC(regs);
178 for (i = 0; i < 3; i++) {
179 while (sec == MSTK_REG_SEC(regs))
180 for (count = 0; count < 100000; count++)
181 /* nothing */ ;
182 prom_printf(".");
183 sec = regs->sec;
184 }
185 prom_printf("\n");
186
187 spin_lock_irq(&mostek_lock);
188
189 /* Turn off kick start and set a "valid" time and date. */
190 regs->creg |= MSTK_CREG_WRITE;
191 regs->hour &= ~MSTK_KICK_START;
192 MSTK_SET_REG_SEC(regs,0);
193 MSTK_SET_REG_MIN(regs,0);
194 MSTK_SET_REG_HOUR(regs,0);
195 MSTK_SET_REG_DOW(regs,5);
196 MSTK_SET_REG_DOM(regs,1);
197 MSTK_SET_REG_MONTH(regs,8);
198 MSTK_SET_REG_YEAR(regs,1996 - MSTK_YEAR_ZERO);
199 regs->creg &= ~MSTK_CREG_WRITE;
200
201 spin_unlock_irq(&mostek_lock);
202
203 /* Ensure the kick start bit is off. If it isn't, turn it off. */
204 while (regs->hour & MSTK_KICK_START) {
205 prom_printf("CLOCK: Kick start still on!\n");
206
207 spin_lock_irq(&mostek_lock);
208 regs->creg |= MSTK_CREG_WRITE;
209 regs->hour &= ~MSTK_KICK_START;
210 regs->creg &= ~MSTK_CREG_WRITE;
211 spin_unlock_irq(&mostek_lock);
212 }
213
214 prom_printf("CLOCK: Kick start procedure successful.\n");
215}
216
217/* Return nonzero if the clock chip battery is low. */
218static __inline__ int has_low_battery(void)
219{
220 struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs;
221 unsigned char data1, data2;
222
223 spin_lock_irq(&mostek_lock);
224 data1 = regs->eeprom[0]; /* Read some data. */
225 regs->eeprom[0] = ~data1; /* Write back the complement. */
226 data2 = regs->eeprom[0]; /* Read back the complement. */
227 regs->eeprom[0] = data1; /* Restore the original value. */
228 spin_unlock_irq(&mostek_lock);
229
230 return (data1 == data2); /* Was the write blocked? */
231}
232
233/* Probe for the real time clock chip on Sun4 */
234static __inline__ void sun4_clock_probe(void)
235{
236#ifdef CONFIG_SUN4
237 int temp;
238 struct resource r;
239
240 memset(&r, 0, sizeof(r));
241 if( idprom->id_machtype == (SM_SUN4 | SM_4_330) ) {
242 sp_clock_typ = MSTK48T02;
243 r.start = sun4_clock_physaddr;
244 mstk48t02_regs = sbus_ioremap(&r, 0,
245 sizeof(struct mostek48t02), NULL);
246 mstk48t08_regs = NULL; /* To catch weirdness */
247 intersil_clock = NULL; /* just in case */
248
249 /* Kick start the clock if it is completely stopped. */
250 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
251 kick_start_clock();
252 } else if( idprom->id_machtype == (SM_SUN4 | SM_4_260)) {
253 /* intersil setup code */
254 printk("Clock: INTERSIL at %8x ",sun4_clock_physaddr);
255 sp_clock_typ = INTERSIL;
256 r.start = sun4_clock_physaddr;
257 intersil_clock = (struct intersil *)
258 sbus_ioremap(&r, 0, sizeof(*intersil_clock), "intersil");
259 mstk48t02_regs = 0; /* just be sure */
260 mstk48t08_regs = NULL; /* ditto */
261 /* initialise the clock */
262
263 intersil_intr(intersil_clock,INTERSIL_INT_100HZ);
264
265 intersil_start(intersil_clock);
266
267 intersil_read_intr(intersil_clock, temp);
268 while (!(temp & 0x80))
269 intersil_read_intr(intersil_clock, temp);
270
271 intersil_read_intr(intersil_clock, temp);
272 while (!(temp & 0x80))
273 intersil_read_intr(intersil_clock, temp);
274
275 intersil_stop(intersil_clock);
276
277 }
278#endif
279}
280
281/* Probe for the mostek real time clock chip. */
282static __inline__ void clock_probe(void)
283{
284 struct linux_prom_registers clk_reg[2];
285 char model[128];
286 register int node, cpuunit, bootbus;
287 struct resource r;
288
289 cpuunit = bootbus = 0;
290 memset(&r, 0, sizeof(r));
291
292 /* Determine the correct starting PROM node for the probe. */
293 node = prom_getchild(prom_root_node);
294 switch (sparc_cpu_model) {
295 case sun4c:
296 break;
297 case sun4m:
298 node = prom_getchild(prom_searchsiblings(node, "obio"));
299 break;
300 case sun4d:
301 node = prom_getchild(bootbus = prom_searchsiblings(prom_getchild(cpuunit = prom_searchsiblings(node, "cpu-unit")), "bootbus"));
302 break;
303 default:
304 prom_printf("CLOCK: Unsupported architecture!\n");
305 prom_halt();
306 }
307
308 /* Find the PROM node describing the real time clock. */
309 sp_clock_typ = MSTK_INVALID;
310 node = prom_searchsiblings(node,"eeprom");
311 if (!node) {
312 prom_printf("CLOCK: No clock found!\n");
313 prom_halt();
314 }
315
316 /* Get the model name and setup everything up. */
317 model[0] = '\0';
318 prom_getstring(node, "model", model, sizeof(model));
319 if (strcmp(model, "mk48t02") == 0) {
320 sp_clock_typ = MSTK48T02;
321 if (prom_getproperty(node, "reg", (char *) clk_reg, sizeof(clk_reg)) == -1) {
322 prom_printf("clock_probe: FAILED!\n");
323 prom_halt();
324 }
325 if (sparc_cpu_model == sun4d)
326 prom_apply_generic_ranges (bootbus, cpuunit, clk_reg, 1);
327 else
328 prom_apply_obio_ranges(clk_reg, 1);
329 /* Map the clock register io area read-only */
330 r.flags = clk_reg[0].which_io;
331 r.start = clk_reg[0].phys_addr;
332 mstk48t02_regs = sbus_ioremap(&r, 0,
333 sizeof(struct mostek48t02), "mk48t02");
334 mstk48t08_regs = NULL; /* To catch weirdness */
335 } else if (strcmp(model, "mk48t08") == 0) {
336 sp_clock_typ = MSTK48T08;
337 if(prom_getproperty(node, "reg", (char *) clk_reg,
338 sizeof(clk_reg)) == -1) {
339 prom_printf("clock_probe: FAILED!\n");
340 prom_halt();
341 }
342 if (sparc_cpu_model == sun4d)
343 prom_apply_generic_ranges (bootbus, cpuunit, clk_reg, 1);
344 else
345 prom_apply_obio_ranges(clk_reg, 1);
346 /* Map the clock register io area read-only */
347 /* XXX r/o attribute is somewhere in r.flags */
348 r.flags = clk_reg[0].which_io;
349 r.start = clk_reg[0].phys_addr;
350 mstk48t08_regs = (struct mostek48t08 *) sbus_ioremap(&r, 0,
351 sizeof(struct mostek48t08), "mk48t08");
352
353 mstk48t02_regs = &mstk48t08_regs->regs;
354 } else {
355 prom_printf("CLOCK: Unknown model name '%s'\n",model);
356 prom_halt();
357 }
358
359 /* Report a low battery voltage condition. */
360 if (has_low_battery())
361 printk(KERN_CRIT "NVRAM: Low battery voltage!\n");
362
363 /* Kick start the clock if it is completely stopped. */
364 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
365 kick_start_clock();
366}
367
368void __init sbus_time_init(void)
369{
370 unsigned int year, mon, day, hour, min, sec;
371 struct mostek48t02 *mregs;
372
373#ifdef CONFIG_SUN4
374 int temp;
375 struct intersil *iregs;
376#endif
377
378 BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM);
379 btfixup();
380
381 if (ARCH_SUN4)
382 sun4_clock_probe();
383 else
384 clock_probe();
385
386 sparc_init_timers(timer_interrupt);
387
388#ifdef CONFIG_SUN4
389 if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) {
390#endif
391 mregs = (struct mostek48t02 *)mstk48t02_regs;
392 if(!mregs) {
393 prom_printf("Something wrong, clock regs not mapped yet.\n");
394 prom_halt();
395 }
396 spin_lock_irq(&mostek_lock);
397 mregs->creg |= MSTK_CREG_READ;
398 sec = MSTK_REG_SEC(mregs);
399 min = MSTK_REG_MIN(mregs);
400 hour = MSTK_REG_HOUR(mregs);
401 day = MSTK_REG_DOM(mregs);
402 mon = MSTK_REG_MONTH(mregs);
403 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
404 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
405 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
406 set_normalized_timespec(&wall_to_monotonic,
407 -xtime.tv_sec, -xtime.tv_nsec);
408 mregs->creg &= ~MSTK_CREG_READ;
409 spin_unlock_irq(&mostek_lock);
410#ifdef CONFIG_SUN4
411 } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) {
412 /* initialise the intersil on sun4 */
413
414 iregs=intersil_clock;
415 if(!iregs) {
416 prom_printf("Something wrong, clock regs not mapped yet.\n");
417 prom_halt();
418 }
419
420 intersil_intr(intersil_clock,INTERSIL_INT_100HZ);
421 disable_pil_irq(10);
422 intersil_stop(iregs);
423 intersil_read_intr(intersil_clock, temp);
424
425 temp = iregs->clk.int_csec;
426
427 sec = iregs->clk.int_sec;
428 min = iregs->clk.int_min;
429 hour = iregs->clk.int_hour;
430 day = iregs->clk.int_day;
431 mon = iregs->clk.int_month;
432 year = MSTK_CVT_YEAR(iregs->clk.int_year);
433
434 enable_pil_irq(10);
435 intersil_start(iregs);
436
437 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
438 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
439 set_normalized_timespec(&wall_to_monotonic,
440 -xtime.tv_sec, -xtime.tv_nsec);
441 printk("%u/%u/%u %u:%u:%u\n",day,mon,year,hour,min,sec);
442 }
443#endif
444
445 /* Now that OBP ticker has been silenced, it is safe to enable IRQ. */
446 local_irq_enable();
447}
448
449void __init time_init(void)
450{
451#ifdef CONFIG_PCI
452 extern void pci_time_init(void);
453 if (pcic_present()) {
454 pci_time_init();
455 return;
456 }
457#endif
458 sbus_time_init();
459}
460
461extern __inline__ unsigned long do_gettimeoffset(void)
462{
463 return (*master_l10_counter >> 10) & 0x1fffff;
464}
465
466/*
467 * Returns nanoseconds
468 * XXX This is a suboptimal implementation.
469 */
470unsigned long long sched_clock(void)
471{
472 return (unsigned long long)jiffies * (1000000000 / HZ);
473}
474
475/* Ok, my cute asm atomicity trick doesn't work anymore.
476 * There are just too many variables that need to be protected
477 * now (both members of xtime, wall_jiffies, et al.)
478 */
479void do_gettimeofday(struct timeval *tv)
480{
481 unsigned long flags;
482 unsigned long seq;
483 unsigned long usec, sec;
484 unsigned long max_ntp_tick = tick_usec - tickadj;
485
486 do {
487 unsigned long lost;
488
489 seq = read_seqbegin_irqsave(&xtime_lock, flags);
490 usec = do_gettimeoffset();
491 lost = jiffies - wall_jiffies;
492
493 /*
494 * If time_adjust is negative then NTP is slowing the clock
495 * so make sure not to go into next possible interval.
496 * Better to lose some accuracy than have time go backwards..
497 */
498 if (unlikely(time_adjust < 0)) {
499 usec = min(usec, max_ntp_tick);
500
501 if (lost)
502 usec += lost * max_ntp_tick;
503 }
504 else if (unlikely(lost))
505 usec += lost * tick_usec;
506
507 sec = xtime.tv_sec;
508 usec += (xtime.tv_nsec / 1000);
509 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
510
511 while (usec >= 1000000) {
512 usec -= 1000000;
513 sec++;
514 }
515
516 tv->tv_sec = sec;
517 tv->tv_usec = usec;
518}
519
520EXPORT_SYMBOL(do_gettimeofday);
521
522int do_settimeofday(struct timespec *tv)
523{
524 int ret;
525
526 write_seqlock_irq(&xtime_lock);
527 ret = bus_do_settimeofday(tv);
528 write_sequnlock_irq(&xtime_lock);
529 clock_was_set();
530 return ret;
531}
532
533EXPORT_SYMBOL(do_settimeofday);
534
535static int sbus_do_settimeofday(struct timespec *tv)
536{
537 time_t wtm_sec, sec = tv->tv_sec;
538 long wtm_nsec, nsec = tv->tv_nsec;
539
540 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
541 return -EINVAL;
542
543 /*
544 * This is revolting. We need to set "xtime" correctly. However, the
545 * value in this location is the value at the most recent update of
546 * wall time. Discover what correction gettimeofday() would have
547 * made, and then undo it!
548 */
549 nsec -= 1000 * (do_gettimeoffset() +
550 (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ));
551
552 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
553 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
554
555 set_normalized_timespec(&xtime, sec, nsec);
556 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
557
558 time_adjust = 0; /* stop active adjtime() */
559 time_status |= STA_UNSYNC;
560 time_maxerror = NTP_PHASE_LIMIT;
561 time_esterror = NTP_PHASE_LIMIT;
562 return 0;
563}
564
565/*
566 * BUG: This routine does not handle hour overflow properly; it just
567 * sets the minutes. Usually you won't notice until after reboot!
568 */
569static int set_rtc_mmss(unsigned long nowtime)
570{
571 int real_seconds, real_minutes, mostek_minutes;
572 struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs;
573 unsigned long flags;
574#ifdef CONFIG_SUN4
575 struct intersil *iregs = intersil_clock;
576 int temp;
577#endif
578
579 /* Not having a register set can lead to trouble. */
580 if (!regs) {
581#ifdef CONFIG_SUN4
582 if(!iregs)
583 return -1;
584 else {
585 temp = iregs->clk.int_csec;
586
587 mostek_minutes = iregs->clk.int_min;
588
589 real_seconds = nowtime % 60;
590 real_minutes = nowtime / 60;
591 if (((abs(real_minutes - mostek_minutes) + 15)/30) & 1)
592 real_minutes += 30; /* correct for half hour time zone */
593 real_minutes %= 60;
594
595 if (abs(real_minutes - mostek_minutes) < 30) {
596 intersil_stop(iregs);
597 iregs->clk.int_sec=real_seconds;
598 iregs->clk.int_min=real_minutes;
599 intersil_start(iregs);
600 } else {
601 printk(KERN_WARNING
602 "set_rtc_mmss: can't update from %d to %d\n",
603 mostek_minutes, real_minutes);
604 return -1;
605 }
606
607 return 0;
608 }
609#endif
610 }
611
612 spin_lock_irqsave(&mostek_lock, flags);
613 /* Read the current RTC minutes. */
614 regs->creg |= MSTK_CREG_READ;
615 mostek_minutes = MSTK_REG_MIN(regs);
616 regs->creg &= ~MSTK_CREG_READ;
617
618 /*
619 * since we're only adjusting minutes and seconds,
620 * don't interfere with hour overflow. This avoids
621 * messing with unknown time zones but requires your
622 * RTC not to be off by more than 15 minutes
623 */
624 real_seconds = nowtime % 60;
625 real_minutes = nowtime / 60;
626 if (((abs(real_minutes - mostek_minutes) + 15)/30) & 1)
627 real_minutes += 30; /* correct for half hour time zone */
628 real_minutes %= 60;
629
630 if (abs(real_minutes - mostek_minutes) < 30) {
631 regs->creg |= MSTK_CREG_WRITE;
632 MSTK_SET_REG_SEC(regs,real_seconds);
633 MSTK_SET_REG_MIN(regs,real_minutes);
634 regs->creg &= ~MSTK_CREG_WRITE;
635 spin_unlock_irqrestore(&mostek_lock, flags);
636 return 0;
637 } else {
638 spin_unlock_irqrestore(&mostek_lock, flags);
639 return -1;
640 }
641}
diff --git a/arch/sparc/kernel/trampoline.S b/arch/sparc/kernel/trampoline.S
new file mode 100644
index 000000000000..2dcdaa1fd8cd
--- /dev/null
+++ b/arch/sparc/kernel/trampoline.S
@@ -0,0 +1,162 @@
1/* $Id: trampoline.S,v 1.14 2002/01/11 08:45:38 davem Exp $
2 * trampoline.S: SMP cpu boot-up trampoline code.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/init.h>
9#include <asm/head.h>
10#include <asm/psr.h>
11#include <asm/page.h>
12#include <asm/asi.h>
13#include <asm/ptrace.h>
14#include <asm/vaddrs.h>
15#include <asm/contregs.h>
16#include <asm/thread_info.h>
17
18 .globl sun4m_cpu_startup, __smp4m_processor_id
19 .globl sun4d_cpu_startup, __smp4d_processor_id
20
21 __INIT
22 .align 4
23
24/* When we start up a cpu for the first time it enters this routine.
25 * This initializes the chip from whatever state the prom left it
26 * in and sets PIL in %psr to 15, no irqs.
27 */
28
29sun4m_cpu_startup:
30cpu1_startup:
31 sethi %hi(trapbase_cpu1), %g3
32 b 1f
33 or %g3, %lo(trapbase_cpu1), %g3
34
35cpu2_startup:
36 sethi %hi(trapbase_cpu2), %g3
37 b 1f
38 or %g3, %lo(trapbase_cpu2), %g3
39
40cpu3_startup:
41 sethi %hi(trapbase_cpu3), %g3
42 b 1f
43 or %g3, %lo(trapbase_cpu3), %g3
44
451:
46 /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
47 set (PSR_PIL | PSR_S | PSR_PS), %g1
48 wr %g1, 0x0, %psr ! traps off though
49 WRITE_PAUSE
50
51 /* Our %wim is one behind CWP */
52 mov 2, %g1
53 wr %g1, 0x0, %wim
54 WRITE_PAUSE
55
56 /* This identifies "this cpu". */
57 wr %g3, 0x0, %tbr
58 WRITE_PAUSE
59
60 /* Give ourselves a stack and curptr. */
61 set current_set, %g5
62 srl %g3, 10, %g4
63 and %g4, 0xc, %g4
64 ld [%g5 + %g4], %g6
65
66 sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp
67 or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp
68 add %g6, %sp, %sp
69
70 /* Turn on traps (PSR_ET). */
71 rd %psr, %g1
72 wr %g1, PSR_ET, %psr ! traps on
73 WRITE_PAUSE
74
75 /* Init our caches, etc. */
76 set poke_srmmu, %g5
77 ld [%g5], %g5
78 call %g5
79 nop
80
81 /* Start this processor. */
82 call smp4m_callin
83 nop
84
85 b,a smp_do_cpu_idle
86
87 .text
88 .align 4
89
90smp_do_cpu_idle:
91 call cpu_idle
92 mov 0, %o0
93
94 call cpu_panic
95 nop
96
97__smp4m_processor_id:
98 rd %tbr, %g2
99 srl %g2, 12, %g2
100 and %g2, 3, %g2
101 retl
102 mov %g1, %o7
103
104__smp4d_processor_id:
105 lda [%g0] ASI_M_VIKING_TMP1, %g2
106 retl
107 mov %g1, %o7
108
109/* CPUID in bootbus can be found at PA 0xff0140000 */
110#define SUN4D_BOOTBUS_CPUID 0xf0140000
111
112 __INIT
113 .align 4
114
115sun4d_cpu_startup:
116 /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
117 set (PSR_PIL | PSR_S | PSR_PS), %g1
118 wr %g1, 0x0, %psr ! traps off though
119 WRITE_PAUSE
120
121 /* Our %wim is one behind CWP */
122 mov 2, %g1
123 wr %g1, 0x0, %wim
124 WRITE_PAUSE
125
126 /* Set tbr - we use just one trap table. */
127 set trapbase, %g1
128 wr %g1, 0x0, %tbr
129 WRITE_PAUSE
130
131 /* Get our CPU id out of bootbus */
132 set SUN4D_BOOTBUS_CPUID, %g3
133 lduba [%g3] ASI_M_CTL, %g3
134 and %g3, 0xf8, %g3
135 srl %g3, 3, %g1
136 sta %g1, [%g0] ASI_M_VIKING_TMP1
137
138 /* Give ourselves a stack and curptr. */
139 set current_set, %g5
140 srl %g3, 1, %g4
141 ld [%g5 + %g4], %g6
142
143 sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp
144 or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp
145 add %g6, %sp, %sp
146
147 /* Turn on traps (PSR_ET). */
148 rd %psr, %g1
149 wr %g1, PSR_ET, %psr ! traps on
150 WRITE_PAUSE
151
152 /* Init our caches, etc. */
153 set poke_srmmu, %g5
154 ld [%g5], %g5
155 call %g5
156 nop
157
158 /* Start this processor. */
159 call smp4d_callin
160 nop
161
162 b,a smp_do_cpu_idle
diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c
new file mode 100644
index 000000000000..3f451ae66482
--- /dev/null
+++ b/arch/sparc/kernel/traps.c
@@ -0,0 +1,515 @@
1/* $Id: traps.c,v 1.64 2000/09/03 15:00:49 anton Exp $
2 * arch/sparc/kernel/traps.c
3 *
4 * Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8/*
9 * I hate traps on the sparc, grrr...
10 */
11
12#include <linux/config.h>
13#include <linux/sched.h> /* for jiffies */
14#include <linux/kernel.h>
15#include <linux/kallsyms.h>
16#include <linux/signal.h>
17#include <linux/smp.h>
18#include <linux/smp_lock.h>
19
20#include <asm/delay.h>
21#include <asm/system.h>
22#include <asm/ptrace.h>
23#include <asm/oplib.h>
24#include <asm/page.h>
25#include <asm/pgtable.h>
26#include <asm/kdebug.h>
27#include <asm/unistd.h>
28#include <asm/traps.h>
29
30/* #define TRAP_DEBUG */
31
32struct trap_trace_entry {
33 unsigned long pc;
34 unsigned long type;
35};
36
37int trap_curbuf = 0;
38struct trap_trace_entry trapbuf[1024];
39
40void syscall_trace_entry(struct pt_regs *regs)
41{
42 printk("%s[%d]: ", current->comm, current->pid);
43 printk("scall<%d> (could be %d)\n", (int) regs->u_regs[UREG_G1],
44 (int) regs->u_regs[UREG_I0]);
45}
46
47void syscall_trace_exit(struct pt_regs *regs)
48{
49}
50
51void sun4m_nmi(struct pt_regs *regs)
52{
53 unsigned long afsr, afar;
54
55 printk("Aieee: sun4m NMI received!\n");
56 /* XXX HyperSparc hack XXX */
57 __asm__ __volatile__("mov 0x500, %%g1\n\t"
58 "lda [%%g1] 0x4, %0\n\t"
59 "mov 0x600, %%g1\n\t"
60 "lda [%%g1] 0x4, %1\n\t" :
61 "=r" (afsr), "=r" (afar));
62 printk("afsr=%08lx afar=%08lx\n", afsr, afar);
63 printk("you lose buddy boy...\n");
64 show_regs(regs);
65 prom_halt();
66}
67
68void sun4d_nmi(struct pt_regs *regs)
69{
70 printk("Aieee: sun4d NMI received!\n");
71 printk("you lose buddy boy...\n");
72 show_regs(regs);
73 prom_halt();
74}
75
76void instruction_dump (unsigned long *pc)
77{
78 int i;
79
80 if((((unsigned long) pc) & 3))
81 return;
82
83 for(i = -3; i < 6; i++)
84 printk("%c%08lx%c",i?' ':'<',pc[i],i?' ':'>');
85 printk("\n");
86}
87
88#define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
89#define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
90
91void die_if_kernel(char *str, struct pt_regs *regs)
92{
93 static int die_counter;
94 int count = 0;
95
96 /* Amuse the user. */
97 printk(
98" \\|/ ____ \\|/\n"
99" \"@'/ ,. \\`@\"\n"
100" /_| \\__/ |_\\\n"
101" \\__U_/\n");
102
103 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
104 show_regs(regs);
105
106 __SAVE; __SAVE; __SAVE; __SAVE;
107 __SAVE; __SAVE; __SAVE; __SAVE;
108 __RESTORE; __RESTORE; __RESTORE; __RESTORE;
109 __RESTORE; __RESTORE; __RESTORE; __RESTORE;
110
111 {
112 struct reg_window *rw = (struct reg_window *)regs->u_regs[UREG_FP];
113
114 /* Stop the back trace when we hit userland or we
115 * find some badly aligned kernel stack. Set an upper
116 * bound in case our stack is trashed and we loop.
117 */
118 while(rw &&
119 count++ < 30 &&
120 (((unsigned long) rw) >= PAGE_OFFSET) &&
121 !(((unsigned long) rw) & 0x7)) {
122 printk("Caller[%08lx]", rw->ins[7]);
123 print_symbol(": %s\n", rw->ins[7]);
124 rw = (struct reg_window *)rw->ins[6];
125 }
126 }
127 printk("Instruction DUMP:");
128 instruction_dump ((unsigned long *) regs->pc);
129 if(regs->psr & PSR_PS)
130 do_exit(SIGKILL);
131 do_exit(SIGSEGV);
132}
133
134void do_hw_interrupt(struct pt_regs *regs, unsigned long type)
135{
136 siginfo_t info;
137
138 if(type < 0x80) {
139 /* Sun OS's puke from bad traps, Linux survives! */
140 printk("Unimplemented Sparc TRAP, type = %02lx\n", type);
141 die_if_kernel("Whee... Hello Mr. Penguin", regs);
142 }
143
144 if(regs->psr & PSR_PS)
145 die_if_kernel("Kernel bad trap", regs);
146
147 info.si_signo = SIGILL;
148 info.si_errno = 0;
149 info.si_code = ILL_ILLTRP;
150 info.si_addr = (void __user *)regs->pc;
151 info.si_trapno = type - 0x80;
152 force_sig_info(SIGILL, &info, current);
153}
154
155void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
156 unsigned long psr)
157{
158 extern int do_user_muldiv (struct pt_regs *, unsigned long);
159 siginfo_t info;
160
161 if(psr & PSR_PS)
162 die_if_kernel("Kernel illegal instruction", regs);
163#ifdef TRAP_DEBUG
164 printk("Ill instr. at pc=%08lx instruction is %08lx\n",
165 regs->pc, *(unsigned long *)regs->pc);
166#endif
167 if (!do_user_muldiv (regs, pc))
168 return;
169
170 info.si_signo = SIGILL;
171 info.si_errno = 0;
172 info.si_code = ILL_ILLOPC;
173 info.si_addr = (void __user *)pc;
174 info.si_trapno = 0;
175 send_sig_info(SIGILL, &info, current);
176}
177
178void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
179 unsigned long psr)
180{
181 siginfo_t info;
182
183 if(psr & PSR_PS)
184 die_if_kernel("Penguin instruction from Penguin mode??!?!", regs);
185 info.si_signo = SIGILL;
186 info.si_errno = 0;
187 info.si_code = ILL_PRVOPC;
188 info.si_addr = (void __user *)pc;
189 info.si_trapno = 0;
190 send_sig_info(SIGILL, &info, current);
191}
192
193/* XXX User may want to be allowed to do this. XXX */
194
195void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned long npc,
196 unsigned long psr)
197{
198 siginfo_t info;
199
200 if(regs->psr & PSR_PS) {
201 printk("KERNEL MNA at pc %08lx npc %08lx called by %08lx\n", pc, npc,
202 regs->u_regs[UREG_RETPC]);
203 die_if_kernel("BOGUS", regs);
204 /* die_if_kernel("Kernel MNA access", regs); */
205 }
206#if 0
207 show_regs (regs);
208 instruction_dump ((unsigned long *) regs->pc);
209 printk ("do_MNA!\n");
210#endif
211 info.si_signo = SIGBUS;
212 info.si_errno = 0;
213 info.si_code = BUS_ADRALN;
214 info.si_addr = /* FIXME: Should dig out mna address */ (void *)0;
215 info.si_trapno = 0;
216 send_sig_info(SIGBUS, &info, current);
217}
218
219extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
220 void *fpqueue, unsigned long *fpqdepth);
221extern void fpload(unsigned long *fpregs, unsigned long *fsr);
222
223static unsigned long init_fsr = 0x0UL;
224static unsigned long init_fregs[32] __attribute__ ((aligned (8))) =
225 { ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
226 ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
227 ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
228 ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL };
229
230void do_fpd_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
231 unsigned long psr)
232{
233 /* Sanity check... */
234 if(psr & PSR_PS)
235 die_if_kernel("Kernel gets FloatingPenguinUnit disabled trap", regs);
236
237 put_psr(get_psr() | PSR_EF); /* Allow FPU ops. */
238 regs->psr |= PSR_EF;
239#ifndef CONFIG_SMP
240 if(last_task_used_math == current)
241 return;
242 if(last_task_used_math) {
243 /* Other processes fpu state, save away */
244 struct task_struct *fptask = last_task_used_math;
245 fpsave(&fptask->thread.float_regs[0], &fptask->thread.fsr,
246 &fptask->thread.fpqueue[0], &fptask->thread.fpqdepth);
247 }
248 last_task_used_math = current;
249 if(used_math()) {
250 fpload(&current->thread.float_regs[0], &current->thread.fsr);
251 } else {
252 /* Set initial sane state. */
253 fpload(&init_fregs[0], &init_fsr);
254 set_used_math();
255 }
256#else
257 if(!used_math()) {
258 fpload(&init_fregs[0], &init_fsr);
259 set_used_math();
260 } else {
261 fpload(&current->thread.float_regs[0], &current->thread.fsr);
262 }
263 current_thread_info()->flags |= _TIF_USEDFPU;
264#endif
265}
266
267static unsigned long fake_regs[32] __attribute__ ((aligned (8)));
268static unsigned long fake_fsr;
269static unsigned long fake_queue[32] __attribute__ ((aligned (8)));
270static unsigned long fake_depth;
271
272extern int do_mathemu(struct pt_regs *, struct task_struct *);
273
274void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
275 unsigned long psr)
276{
277 static int calls;
278 siginfo_t info;
279 unsigned long fsr;
280 int ret = 0;
281#ifndef CONFIG_SMP
282 struct task_struct *fpt = last_task_used_math;
283#else
284 struct task_struct *fpt = current;
285#endif
286 put_psr(get_psr() | PSR_EF);
287 /* If nobody owns the fpu right now, just clear the
288 * error into our fake static buffer and hope it don't
289 * happen again. Thank you crashme...
290 */
291#ifndef CONFIG_SMP
292 if(!fpt) {
293#else
294 if(!(fpt->thread_info->flags & _TIF_USEDFPU)) {
295#endif
296 fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth);
297 regs->psr &= ~PSR_EF;
298 return;
299 }
300 fpsave(&fpt->thread.float_regs[0], &fpt->thread.fsr,
301 &fpt->thread.fpqueue[0], &fpt->thread.fpqdepth);
302#ifdef DEBUG_FPU
303 printk("Hmm, FP exception, fsr was %016lx\n", fpt->thread.fsr);
304#endif
305
306 switch ((fpt->thread.fsr & 0x1c000)) {
307 /* switch on the contents of the ftt [floating point trap type] field */
308#ifdef DEBUG_FPU
309 case (1 << 14):
310 printk("IEEE_754_exception\n");
311 break;
312#endif
313 case (2 << 14): /* unfinished_FPop (underflow & co) */
314 case (3 << 14): /* unimplemented_FPop (quad stuff, maybe sqrt) */
315 ret = do_mathemu(regs, fpt);
316 break;
317#ifdef DEBUG_FPU
318 case (4 << 14):
319 printk("sequence_error (OS bug...)\n");
320 break;
321 case (5 << 14):
322 printk("hardware_error (uhoh!)\n");
323 break;
324 case (6 << 14):
325 printk("invalid_fp_register (user error)\n");
326 break;
327#endif /* DEBUG_FPU */
328 }
329 /* If we successfully emulated the FPop, we pretend the trap never happened :-> */
330 if (ret) {
331 fpload(&current->thread.float_regs[0], &current->thread.fsr);
332 return;
333 }
334 /* nope, better SIGFPE the offending process... */
335
336#ifdef CONFIG_SMP
337 fpt->thread_info->flags &= ~_TIF_USEDFPU;
338#endif
339 if(psr & PSR_PS) {
340 /* The first fsr store/load we tried trapped,
341 * the second one will not (we hope).
342 */
343 printk("WARNING: FPU exception from kernel mode. at pc=%08lx\n",
344 regs->pc);
345 regs->pc = regs->npc;
346 regs->npc += 4;
347 calls++;
348 if(calls > 2)
349 die_if_kernel("Too many Penguin-FPU traps from kernel mode",
350 regs);
351 return;
352 }
353
354 fsr = fpt->thread.fsr;
355 info.si_signo = SIGFPE;
356 info.si_errno = 0;
357 info.si_addr = (void __user *)pc;
358 info.si_trapno = 0;
359 info.si_code = __SI_FAULT;
360 if ((fsr & 0x1c000) == (1 << 14)) {
361 if (fsr & 0x10)
362 info.si_code = FPE_FLTINV;
363 else if (fsr & 0x08)
364 info.si_code = FPE_FLTOVF;
365 else if (fsr & 0x04)
366 info.si_code = FPE_FLTUND;
367 else if (fsr & 0x02)
368 info.si_code = FPE_FLTDIV;
369 else if (fsr & 0x01)
370 info.si_code = FPE_FLTRES;
371 }
372 send_sig_info(SIGFPE, &info, fpt);
373#ifndef CONFIG_SMP
374 last_task_used_math = NULL;
375#endif
376 regs->psr &= ~PSR_EF;
377 if(calls > 0)
378 calls=0;
379}
380
381void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc,
382 unsigned long psr)
383{
384 siginfo_t info;
385
386 if(psr & PSR_PS)
387 die_if_kernel("Penguin overflow trap from kernel mode", regs);
388 info.si_signo = SIGEMT;
389 info.si_errno = 0;
390 info.si_code = EMT_TAGOVF;
391 info.si_addr = (void __user *)pc;
392 info.si_trapno = 0;
393 send_sig_info(SIGEMT, &info, current);
394}
395
396void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc,
397 unsigned long psr)
398{
399#ifdef TRAP_DEBUG
400 printk("Watchpoint detected at PC %08lx NPC %08lx PSR %08lx\n",
401 pc, npc, psr);
402#endif
403 if(psr & PSR_PS)
404 panic("Tell me what a watchpoint trap is, and I'll then deal "
405 "with such a beast...");
406}
407
408void handle_reg_access(struct pt_regs *regs, unsigned long pc, unsigned long npc,
409 unsigned long psr)
410{
411 siginfo_t info;
412
413#ifdef TRAP_DEBUG
414 printk("Register Access Exception at PC %08lx NPC %08lx PSR %08lx\n",
415 pc, npc, psr);
416#endif
417 info.si_signo = SIGBUS;
418 info.si_errno = 0;
419 info.si_code = BUS_OBJERR;
420 info.si_addr = (void __user *)pc;
421 info.si_trapno = 0;
422 force_sig_info(SIGBUS, &info, current);
423}
424
425void handle_cp_disabled(struct pt_regs *regs, unsigned long pc, unsigned long npc,
426 unsigned long psr)
427{
428 siginfo_t info;
429
430 info.si_signo = SIGILL;
431 info.si_errno = 0;
432 info.si_code = ILL_COPROC;
433 info.si_addr = (void __user *)pc;
434 info.si_trapno = 0;
435 send_sig_info(SIGILL, &info, current);
436}
437
438void handle_cp_exception(struct pt_regs *regs, unsigned long pc, unsigned long npc,
439 unsigned long psr)
440{
441 siginfo_t info;
442
443#ifdef TRAP_DEBUG
444 printk("Co-Processor Exception at PC %08lx NPC %08lx PSR %08lx\n",
445 pc, npc, psr);
446#endif
447 info.si_signo = SIGILL;
448 info.si_errno = 0;
449 info.si_code = ILL_COPROC;
450 info.si_addr = (void __user *)pc;
451 info.si_trapno = 0;
452 send_sig_info(SIGILL, &info, current);
453}
454
455void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc,
456 unsigned long psr)
457{
458 siginfo_t info;
459
460 info.si_signo = SIGFPE;
461 info.si_errno = 0;
462 info.si_code = FPE_INTDIV;
463 info.si_addr = (void __user *)pc;
464 info.si_trapno = 0;
465 send_sig_info(SIGFPE, &info, current);
466}
467
468#ifdef CONFIG_DEBUG_BUGVERBOSE
469void do_BUG(const char *file, int line)
470{
471 // bust_spinlocks(1); XXX Not in our original BUG()
472 printk("kernel BUG at %s:%d!\n", file, line);
473}
474#endif
475
476/* Since we have our mappings set up, on multiprocessors we can spin them
477 * up here so that timer interrupts work during initialization.
478 */
479
480extern void sparc_cpu_startup(void);
481
482int linux_smp_still_initting;
483unsigned int thiscpus_tbr;
484int thiscpus_mid;
485
486void trap_init(void)
487{
488 extern void thread_info_offsets_are_bolixed_pete(void);
489
490 /* Force linker to barf if mismatched */
491 if (TI_UWINMASK != offsetof(struct thread_info, uwinmask) ||
492 TI_TASK != offsetof(struct thread_info, task) ||
493 TI_EXECDOMAIN != offsetof(struct thread_info, exec_domain) ||
494 TI_FLAGS != offsetof(struct thread_info, flags) ||
495 TI_CPU != offsetof(struct thread_info, cpu) ||
496 TI_PREEMPT != offsetof(struct thread_info, preempt_count) ||
497 TI_SOFTIRQ != offsetof(struct thread_info, softirq_count) ||
498 TI_HARDIRQ != offsetof(struct thread_info, hardirq_count) ||
499 TI_KSP != offsetof(struct thread_info, ksp) ||
500 TI_KPC != offsetof(struct thread_info, kpc) ||
501 TI_KPSR != offsetof(struct thread_info, kpsr) ||
502 TI_KWIM != offsetof(struct thread_info, kwim) ||
503 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
504 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
505 TI_W_SAVED != offsetof(struct thread_info, w_saved))
506 thread_info_offsets_are_bolixed_pete();
507
508 /* Attach to the address space of init_task. */
509 atomic_inc(&init_mm.mm_count);
510 current->active_mm = &init_mm;
511
512 /* NOTE: Other cpus have this done as they are started
513 * up on SMP.
514 */
515}
diff --git a/arch/sparc/kernel/unaligned.c b/arch/sparc/kernel/unaligned.c
new file mode 100644
index 000000000000..a6330fbc9dd9
--- /dev/null
+++ b/arch/sparc/kernel/unaligned.c
@@ -0,0 +1,548 @@
1/* $Id: unaligned.c,v 1.23 2001/12/21 00:54:31 davem Exp $
2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <asm/ptrace.h>
15#include <asm/processor.h>
16#include <asm/system.h>
17#include <asm/uaccess.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20
21/* #define DEBUG_MNA */
22
23enum direction {
24 load, /* ld, ldd, ldh, ldsh */
25 store, /* st, std, sth, stsh */
26 both, /* Swap, ldstub, etc. */
27 fpload,
28 fpstore,
29 invalid,
30};
31
32#ifdef DEBUG_MNA
33static char *dirstrings[] = {
34 "load", "store", "both", "fpload", "fpstore", "invalid"
35};
36#endif
37
38static inline enum direction decode_direction(unsigned int insn)
39{
40 unsigned long tmp = (insn >> 21) & 1;
41
42 if(!tmp)
43 return load;
44 else {
45 if(((insn>>19)&0x3f) == 15)
46 return both;
47 else
48 return store;
49 }
50}
51
52/* 8 = double-word, 4 = word, 2 = half-word */
53static inline int decode_access_size(unsigned int insn)
54{
55 insn = (insn >> 19) & 3;
56
57 if(!insn)
58 return 4;
59 else if(insn == 3)
60 return 8;
61 else if(insn == 2)
62 return 2;
63 else {
64 printk("Impossible unaligned trap. insn=%08x\n", insn);
65 die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
66 return 4; /* just to keep gcc happy. */
67 }
68}
69
70/* 0x400000 = signed, 0 = unsigned */
71static inline int decode_signedness(unsigned int insn)
72{
73 return (insn & 0x400000);
74}
75
76static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
77 unsigned int rd)
78{
79 if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
80 /* Wheee... */
81 __asm__ __volatile__("save %sp, -0x40, %sp\n\t"
82 "save %sp, -0x40, %sp\n\t"
83 "save %sp, -0x40, %sp\n\t"
84 "save %sp, -0x40, %sp\n\t"
85 "save %sp, -0x40, %sp\n\t"
86 "save %sp, -0x40, %sp\n\t"
87 "save %sp, -0x40, %sp\n\t"
88 "restore; restore; restore; restore;\n\t"
89 "restore; restore; restore;\n\t");
90 }
91}
92
93static inline int sign_extend_imm13(int imm)
94{
95 return imm << 19 >> 19;
96}
97
98static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
99{
100 struct reg_window *win;
101
102 if(reg < 16)
103 return (!reg ? 0 : regs->u_regs[reg]);
104
105 /* Ho hum, the slightly complicated case. */
106 win = (struct reg_window *) regs->u_regs[UREG_FP];
107 return win->locals[reg - 16]; /* yes, I know what this does... */
108}
109
110static inline unsigned long safe_fetch_reg(unsigned int reg, struct pt_regs *regs)
111{
112 struct reg_window __user *win;
113 unsigned long ret;
114
115 if (reg < 16)
116 return (!reg ? 0 : regs->u_regs[reg]);
117
118 /* Ho hum, the slightly complicated case. */
119 win = (struct reg_window __user *) regs->u_regs[UREG_FP];
120
121 if ((unsigned long)win & 3)
122 return -1;
123
124 if (get_user(ret, &win->locals[reg - 16]))
125 return -1;
126
127 return ret;
128}
129
130static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
131{
132 struct reg_window *win;
133
134 if(reg < 16)
135 return &regs->u_regs[reg];
136 win = (struct reg_window *) regs->u_regs[UREG_FP];
137 return &win->locals[reg - 16];
138}
139
140static unsigned long compute_effective_address(struct pt_regs *regs,
141 unsigned int insn)
142{
143 unsigned int rs1 = (insn >> 14) & 0x1f;
144 unsigned int rs2 = insn & 0x1f;
145 unsigned int rd = (insn >> 25) & 0x1f;
146
147 if(insn & 0x2000) {
148 maybe_flush_windows(rs1, 0, rd);
149 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
150 } else {
151 maybe_flush_windows(rs1, rs2, rd);
152 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
153 }
154}
155
156unsigned long safe_compute_effective_address(struct pt_regs *regs,
157 unsigned int insn)
158{
159 unsigned int rs1 = (insn >> 14) & 0x1f;
160 unsigned int rs2 = insn & 0x1f;
161 unsigned int rd = (insn >> 25) & 0x1f;
162
163 if(insn & 0x2000) {
164 maybe_flush_windows(rs1, 0, rd);
165 return (safe_fetch_reg(rs1, regs) + sign_extend_imm13(insn));
166 } else {
167 maybe_flush_windows(rs1, rs2, rd);
168 return (safe_fetch_reg(rs1, regs) + safe_fetch_reg(rs2, regs));
169 }
170}
171
172/* This is just to make gcc think panic does return... */
173static void unaligned_panic(char *str)
174{
175 panic(str);
176}
177
178#define do_integer_load(dest_reg, size, saddr, is_signed, errh) ({ \
179__asm__ __volatile__ ( \
180 "cmp %1, 8\n\t" \
181 "be 9f\n\t" \
182 " cmp %1, 4\n\t" \
183 "be 6f\n" \
184"4:\t" " ldub [%2], %%l1\n" \
185"5:\t" "ldub [%2 + 1], %%l2\n\t" \
186 "sll %%l1, 8, %%l1\n\t" \
187 "tst %3\n\t" \
188 "be 3f\n\t" \
189 " add %%l1, %%l2, %%l1\n\t" \
190 "sll %%l1, 16, %%l1\n\t" \
191 "sra %%l1, 16, %%l1\n" \
192"3:\t" "b 0f\n\t" \
193 " st %%l1, [%0]\n" \
194"6:\t" "ldub [%2 + 1], %%l2\n\t" \
195 "sll %%l1, 24, %%l1\n" \
196"7:\t" "ldub [%2 + 2], %%g7\n\t" \
197 "sll %%l2, 16, %%l2\n" \
198"8:\t" "ldub [%2 + 3], %%g1\n\t" \
199 "sll %%g7, 8, %%g7\n\t" \
200 "or %%l1, %%l2, %%l1\n\t" \
201 "or %%g7, %%g1, %%g7\n\t" \
202 "or %%l1, %%g7, %%l1\n\t" \
203 "b 0f\n\t" \
204 " st %%l1, [%0]\n" \
205"9:\t" "ldub [%2], %%l1\n" \
206"10:\t" "ldub [%2 + 1], %%l2\n\t" \
207 "sll %%l1, 24, %%l1\n" \
208"11:\t" "ldub [%2 + 2], %%g7\n\t" \
209 "sll %%l2, 16, %%l2\n" \
210"12:\t" "ldub [%2 + 3], %%g1\n\t" \
211 "sll %%g7, 8, %%g7\n\t" \
212 "or %%l1, %%l2, %%l1\n\t" \
213 "or %%g7, %%g1, %%g7\n\t" \
214 "or %%l1, %%g7, %%g7\n" \
215"13:\t" "ldub [%2 + 4], %%l1\n\t" \
216 "st %%g7, [%0]\n" \
217"14:\t" "ldub [%2 + 5], %%l2\n\t" \
218 "sll %%l1, 24, %%l1\n" \
219"15:\t" "ldub [%2 + 6], %%g7\n\t" \
220 "sll %%l2, 16, %%l2\n" \
221"16:\t" "ldub [%2 + 7], %%g1\n\t" \
222 "sll %%g7, 8, %%g7\n\t" \
223 "or %%l1, %%l2, %%l1\n\t" \
224 "or %%g7, %%g1, %%g7\n\t" \
225 "or %%l1, %%g7, %%g7\n\t" \
226 "st %%g7, [%0 + 4]\n" \
227"0:\n\n\t" \
228 ".section __ex_table,#alloc\n\t" \
229 ".word 4b, " #errh "\n\t" \
230 ".word 5b, " #errh "\n\t" \
231 ".word 6b, " #errh "\n\t" \
232 ".word 7b, " #errh "\n\t" \
233 ".word 8b, " #errh "\n\t" \
234 ".word 9b, " #errh "\n\t" \
235 ".word 10b, " #errh "\n\t" \
236 ".word 11b, " #errh "\n\t" \
237 ".word 12b, " #errh "\n\t" \
238 ".word 13b, " #errh "\n\t" \
239 ".word 14b, " #errh "\n\t" \
240 ".word 15b, " #errh "\n\t" \
241 ".word 16b, " #errh "\n\n\t" \
242 ".previous\n\t" \
243 : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed) \
244 : "l1", "l2", "g7", "g1", "cc"); \
245})
246
247#define store_common(dst_addr, size, src_val, errh) ({ \
248__asm__ __volatile__ ( \
249 "ld [%2], %%l1\n" \
250 "cmp %1, 2\n\t" \
251 "be 2f\n\t" \
252 " cmp %1, 4\n\t" \
253 "be 1f\n\t" \
254 " srl %%l1, 24, %%l2\n\t" \
255 "srl %%l1, 16, %%g7\n" \
256"4:\t" "stb %%l2, [%0]\n\t" \
257 "srl %%l1, 8, %%l2\n" \
258"5:\t" "stb %%g7, [%0 + 1]\n\t" \
259 "ld [%2 + 4], %%g7\n" \
260"6:\t" "stb %%l2, [%0 + 2]\n\t" \
261 "srl %%g7, 24, %%l2\n" \
262"7:\t" "stb %%l1, [%0 + 3]\n\t" \
263 "srl %%g7, 16, %%l1\n" \
264"8:\t" "stb %%l2, [%0 + 4]\n\t" \
265 "srl %%g7, 8, %%l2\n" \
266"9:\t" "stb %%l1, [%0 + 5]\n" \
267"10:\t" "stb %%l2, [%0 + 6]\n\t" \
268 "b 0f\n" \
269"11:\t" " stb %%g7, [%0 + 7]\n" \
270"1:\t" "srl %%l1, 16, %%g7\n" \
271"12:\t" "stb %%l2, [%0]\n\t" \
272 "srl %%l1, 8, %%l2\n" \
273"13:\t" "stb %%g7, [%0 + 1]\n" \
274"14:\t" "stb %%l2, [%0 + 2]\n\t" \
275 "b 0f\n" \
276"15:\t" " stb %%l1, [%0 + 3]\n" \
277"2:\t" "srl %%l1, 8, %%l2\n" \
278"16:\t" "stb %%l2, [%0]\n" \
279"17:\t" "stb %%l1, [%0 + 1]\n" \
280"0:\n\n\t" \
281 ".section __ex_table,#alloc\n\t" \
282 ".word 4b, " #errh "\n\t" \
283 ".word 5b, " #errh "\n\t" \
284 ".word 6b, " #errh "\n\t" \
285 ".word 7b, " #errh "\n\t" \
286 ".word 8b, " #errh "\n\t" \
287 ".word 9b, " #errh "\n\t" \
288 ".word 10b, " #errh "\n\t" \
289 ".word 11b, " #errh "\n\t" \
290 ".word 12b, " #errh "\n\t" \
291 ".word 13b, " #errh "\n\t" \
292 ".word 14b, " #errh "\n\t" \
293 ".word 15b, " #errh "\n\t" \
294 ".word 16b, " #errh "\n\t" \
295 ".word 17b, " #errh "\n\n\t" \
296 ".previous\n\t" \
297 : : "r" (dst_addr), "r" (size), "r" (src_val) \
298 : "l1", "l2", "g7", "g1", "cc"); \
299})
300
301#define do_integer_store(reg_num, size, dst_addr, regs, errh) ({ \
302 unsigned long *src_val; \
303 static unsigned long zero[2] = { 0, }; \
304 \
305 if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
306 else { \
307 src_val = &zero[0]; \
308 if (size == 8) \
309 zero[1] = fetch_reg(1, regs); \
310 } \
311 store_common(dst_addr, size, src_val, errh); \
312})
313
314extern void smp_capture(void);
315extern void smp_release(void);
316
317#define do_atomic(srcdest_reg, mem, errh) ({ \
318 unsigned long flags, tmp; \
319 \
320 smp_capture(); \
321 local_irq_save(flags); \
322 tmp = *srcdest_reg; \
323 do_integer_load(srcdest_reg, 4, mem, 0, errh); \
324 store_common(mem, 4, &tmp, errh); \
325 local_irq_restore(flags); \
326 smp_release(); \
327})
328
329static inline void advance(struct pt_regs *regs)
330{
331 regs->pc = regs->npc;
332 regs->npc += 4;
333}
334
335static inline int floating_point_load_or_store_p(unsigned int insn)
336{
337 return (insn >> 24) & 1;
338}
339
340static inline int ok_for_kernel(unsigned int insn)
341{
342 return !floating_point_load_or_store_p(insn);
343}
344
345void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
346
347void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
348{
349 unsigned long g2 = regs->u_regs [UREG_G2];
350 unsigned long fixup = search_extables_range(regs->pc, &g2);
351
352 if (!fixup) {
353 unsigned long address = compute_effective_address(regs, insn);
354 if(address < PAGE_SIZE) {
355 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
356 } else
357 printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
358 printk(KERN_ALERT " at virtual address %08lx\n",address);
359 printk(KERN_ALERT "current->{mm,active_mm}->context = %08lx\n",
360 (current->mm ? current->mm->context :
361 current->active_mm->context));
362 printk(KERN_ALERT "current->{mm,active_mm}->pgd = %08lx\n",
363 (current->mm ? (unsigned long) current->mm->pgd :
364 (unsigned long) current->active_mm->pgd));
365 die_if_kernel("Oops", regs);
366 /* Not reached */
367 }
368 regs->pc = fixup;
369 regs->npc = regs->pc + 4;
370 regs->u_regs [UREG_G2] = g2;
371}
372
373asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
374{
375 enum direction dir = decode_direction(insn);
376 int size = decode_access_size(insn);
377
378 if(!ok_for_kernel(insn) || dir == both) {
379 printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n",
380 regs->pc);
381 unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
382
383 __asm__ __volatile__ ("\n"
384"kernel_unaligned_trap_fault:\n\t"
385 "mov %0, %%o0\n\t"
386 "call kernel_mna_trap_fault\n\t"
387 " mov %1, %%o1\n\t"
388 :
389 : "r" (regs), "r" (insn)
390 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
391 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
392 } else {
393 unsigned long addr = compute_effective_address(regs, insn);
394
395#ifdef DEBUG_MNA
396 printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n",
397 regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
398#endif
399 switch(dir) {
400 case load:
401 do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
402 size, (unsigned long *) addr,
403 decode_signedness(insn),
404 kernel_unaligned_trap_fault);
405 break;
406
407 case store:
408 do_integer_store(((insn>>25)&0x1f), size,
409 (unsigned long *) addr, regs,
410 kernel_unaligned_trap_fault);
411 break;
412#if 0 /* unsupported */
413 case both:
414 do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
415 (unsigned long *) addr,
416 kernel_unaligned_trap_fault);
417 break;
418#endif
419 default:
420 panic("Impossible kernel unaligned trap.");
421 /* Not reached... */
422 }
423 advance(regs);
424 }
425}
426
427static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
428 enum direction dir)
429{
430 unsigned int reg;
431 int check = (dir == load) ? VERIFY_READ : VERIFY_WRITE;
432 int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
433
434 if ((regs->pc | regs->npc) & 3)
435 return 0;
436
437 /* Must access_ok() in all the necessary places. */
438#define WINREG_ADDR(regnum) \
439 ((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
440
441 reg = (insn >> 25) & 0x1f;
442 if (reg >= 16) {
443 if (!access_ok(check, WINREG_ADDR(reg - 16), size))
444 return -EFAULT;
445 }
446 reg = (insn >> 14) & 0x1f;
447 if (reg >= 16) {
448 if (!access_ok(check, WINREG_ADDR(reg - 16), size))
449 return -EFAULT;
450 }
451 if (!(insn & 0x2000)) {
452 reg = (insn & 0x1f);
453 if (reg >= 16) {
454 if (!access_ok(check, WINREG_ADDR(reg - 16), size))
455 return -EFAULT;
456 }
457 }
458#undef WINREG_ADDR
459 return 0;
460}
461
462void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("user_mna_trap_fault");
463
464void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
465{
466 siginfo_t info;
467
468 info.si_signo = SIGBUS;
469 info.si_errno = 0;
470 info.si_code = BUS_ADRALN;
471 info.si_addr = (void __user *)safe_compute_effective_address(regs, insn);
472 info.si_trapno = 0;
473 send_sig_info(SIGBUS, &info, current);
474}
475
476asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
477{
478 enum direction dir;
479
480 lock_kernel();
481 if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
482 (((insn >> 30) & 3) != 3))
483 goto kill_user;
484 dir = decode_direction(insn);
485 if(!ok_for_user(regs, insn, dir)) {
486 goto kill_user;
487 } else {
488 int size = decode_access_size(insn);
489 unsigned long addr;
490
491 if(floating_point_load_or_store_p(insn)) {
492 printk("User FPU load/store unaligned unsupported.\n");
493 goto kill_user;
494 }
495
496 addr = compute_effective_address(regs, insn);
497 switch(dir) {
498 case load:
499 do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
500 size, (unsigned long *) addr,
501 decode_signedness(insn),
502 user_unaligned_trap_fault);
503 break;
504
505 case store:
506 do_integer_store(((insn>>25)&0x1f), size,
507 (unsigned long *) addr, regs,
508 user_unaligned_trap_fault);
509 break;
510
511 case both:
512#if 0 /* unsupported */
513 do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
514 (unsigned long *) addr,
515 user_unaligned_trap_fault);
516#else
517 /*
518 * This was supported in 2.4. However, we question
519 * the value of SWAP instruction across word boundaries.
520 */
521 printk("Unaligned SWAP unsupported.\n");
522 goto kill_user;
523#endif
524 break;
525
526 default:
527 unaligned_panic("Impossible user unaligned trap.");
528
529 __asm__ __volatile__ ("\n"
530"user_unaligned_trap_fault:\n\t"
531 "mov %0, %%o0\n\t"
532 "call user_mna_trap_fault\n\t"
533 " mov %1, %%o1\n\t"
534 :
535 : "r" (regs), "r" (insn)
536 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
537 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
538 goto out;
539 }
540 advance(regs);
541 goto out;
542 }
543
544kill_user:
545 user_mna_trap_fault(regs, insn);
546out:
547 unlock_kernel();
548}
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..38938d2e63aa
--- /dev/null
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -0,0 +1,103 @@
1/* ld script to make SparcLinux kernel */
2
3#include <asm-generic/vmlinux.lds.h>
4
5OUTPUT_FORMAT("elf32-sparc", "elf32-sparc", "elf32-sparc")
6OUTPUT_ARCH(sparc)
7ENTRY(_start)
8jiffies = jiffies_64 + 4;
9SECTIONS
10{
11 . = 0x10000 + SIZEOF_HEADERS;
12 .text 0xf0004000 :
13 {
14 *(.text)
15 SCHED_TEXT
16 LOCK_TEXT
17 *(.gnu.warning)
18 } =0
19 _etext = .;
20 PROVIDE (etext = .);
21 RODATA
22 .data :
23 {
24 *(.data)
25 CONSTRUCTORS
26 }
27 .data1 : { *(.data1) }
28 _edata = .;
29 PROVIDE (edata = .);
30 __start___fixup = .;
31 .fixup : { *(.fixup) }
32 __stop___fixup = .;
33 __start___ex_table = .;
34 __ex_table : { *(__ex_table) }
35 __stop___ex_table = .;
36
37 . = ALIGN(4096);
38 __init_begin = .;
39 .init.text : {
40 _sinittext = .;
41 *(.init.text)
42 _einittext = .;
43 }
44 __init_text_end = .;
45 .init.data : { *(.init.data) }
46 . = ALIGN(16);
47 __setup_start = .;
48 .init.setup : { *(.init.setup) }
49 __setup_end = .;
50 __initcall_start = .;
51 .initcall.init : {
52 *(.initcall1.init)
53 *(.initcall2.init)
54 *(.initcall3.init)
55 *(.initcall4.init)
56 *(.initcall5.init)
57 *(.initcall6.init)
58 *(.initcall7.init)
59 }
60 __initcall_end = .;
61 __con_initcall_start = .;
62 .con_initcall.init : { *(.con_initcall.init) }
63 __con_initcall_end = .;
64 SECURITY_INIT
65 . = ALIGN(4096);
66 __initramfs_start = .;
67 .init.ramfs : { *(.init.ramfs) }
68 __initramfs_end = .;
69 . = ALIGN(32);
70 __per_cpu_start = .;
71 .data.percpu : { *(.data.percpu) }
72 __per_cpu_end = .;
73 . = ALIGN(4096);
74 __init_end = .;
75 . = ALIGN(32);
76 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
77
78 __bss_start = .;
79 .sbss : { *(.sbss) *(.scommon) }
80 .bss :
81 {
82 *(.dynbss)
83 *(.bss)
84 *(COMMON)
85 }
86 _end = . ;
87 PROVIDE (end = .);
88 /* Stabs debugging sections. */
89 .stab 0 : { *(.stab) }
90 .stabstr 0 : { *(.stabstr) }
91 .stab.excl 0 : { *(.stab.excl) }
92 .stab.exclstr 0 : { *(.stab.exclstr) }
93 .stab.index 0 : { *(.stab.index) }
94 .stab.indexstr 0 : { *(.stab.indexstr) }
95 .comment 0 : { *(.comment) }
96 .debug 0 : { *(.debug) }
97 .debug_srcinfo 0 : { *(.debug_srcinfo) }
98 .debug_aranges 0 : { *(.debug_aranges) }
99 .debug_pubnames 0 : { *(.debug_pubnames) }
100 .debug_sfnames 0 : { *(.debug_sfnames) }
101 .line 0 : { *(.line) }
102 /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) }
103}
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
new file mode 100644
index 000000000000..9cc93eaa4abf
--- /dev/null
+++ b/arch/sparc/kernel/windows.c
@@ -0,0 +1,127 @@
1/* windows.c: Routines to deal with register window management
2 * at the C-code level.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/string.h>
10#include <linux/mm.h>
11#include <linux/smp.h>
12#include <linux/smp_lock.h>
13
14#include <asm/uaccess.h>
15
16/* Do save's until all user register windows are out of the cpu. */
17void flush_user_windows(void)
18{
19 register int ctr asm("g5");
20
21 ctr = 0;
22 __asm__ __volatile__(
23 "\n1:\n\t"
24 "ld [%%g6 + %2], %%g4\n\t"
25 "orcc %%g0, %%g4, %%g0\n\t"
26 "add %0, 1, %0\n\t"
27 "bne 1b\n\t"
28 " save %%sp, -64, %%sp\n"
29 "2:\n\t"
30 "subcc %0, 1, %0\n\t"
31 "bne 2b\n\t"
32 " restore %%g0, %%g0, %%g0\n"
33 : "=&r" (ctr)
34 : "0" (ctr),
35 "i" ((const unsigned long)TI_UWINMASK)
36 : "g4", "cc");
37}
38
39static inline void shift_window_buffer(int first_win, int last_win, struct thread_info *tp)
40{
41 int i;
42
43 for(i = first_win; i < last_win; i++) {
44 tp->rwbuf_stkptrs[i] = tp->rwbuf_stkptrs[i+1];
45 memcpy(&tp->reg_window[i], &tp->reg_window[i+1], sizeof(struct reg_window));
46 }
47}
48
49/* Place as many of the user's current register windows
50 * on the stack that we can. Even if the %sp is unaligned
51 * we still copy the window there, the only case that we don't
52 * succeed is if the %sp points to a bum mapping altogether.
53 * setup_frame() and do_sigreturn() use this before shifting
54 * the user stack around. Future instruction and hardware
55 * bug workaround routines will need this functionality as
56 * well.
57 */
58void synchronize_user_stack(void)
59{
60 struct thread_info *tp = current_thread_info();
61 int window;
62
63 flush_user_windows();
64 if(!tp->w_saved)
65 return;
66
67 /* Ok, there is some dirty work to do. */
68 for(window = tp->w_saved - 1; window >= 0; window--) {
69 unsigned long sp = tp->rwbuf_stkptrs[window];
70
71 /* Ok, let it rip. */
72 if (copy_to_user((char __user *) sp, &tp->reg_window[window],
73 sizeof(struct reg_window)))
74 continue;
75
76 shift_window_buffer(window, tp->w_saved - 1, tp);
77 tp->w_saved--;
78 }
79}
80
81#if 0
82/* An optimization. */
83static inline void copy_aligned_window(void *dest, const void *src)
84{
85 __asm__ __volatile__("ldd [%1], %%g2\n\t"
86 "ldd [%1 + 0x8], %%g4\n\t"
87 "std %%g2, [%0]\n\t"
88 "std %%g4, [%0 + 0x8]\n\t"
89 "ldd [%1 + 0x10], %%g2\n\t"
90 "ldd [%1 + 0x18], %%g4\n\t"
91 "std %%g2, [%0 + 0x10]\n\t"
92 "std %%g4, [%0 + 0x18]\n\t"
93 "ldd [%1 + 0x20], %%g2\n\t"
94 "ldd [%1 + 0x28], %%g4\n\t"
95 "std %%g2, [%0 + 0x20]\n\t"
96 "std %%g4, [%0 + 0x28]\n\t"
97 "ldd [%1 + 0x30], %%g2\n\t"
98 "ldd [%1 + 0x38], %%g4\n\t"
99 "std %%g2, [%0 + 0x30]\n\t"
100 "std %%g4, [%0 + 0x38]\n\t" : :
101 "r" (dest), "r" (src) :
102 "g2", "g3", "g4", "g5");
103}
104#endif
105
106/* Try to push the windows in a threads window buffer to the
107 * user stack. Unaligned %sp's are not allowed here.
108 */
109
110void try_to_clear_window_buffer(struct pt_regs *regs, int who)
111{
112 struct thread_info *tp = current_thread_info();
113 int window;
114
115 lock_kernel();
116 flush_user_windows();
117 for(window = 0; window < tp->w_saved; window++) {
118 unsigned long sp = tp->rwbuf_stkptrs[window];
119
120 if ((sp & 7) ||
121 copy_to_user((char __user *) sp, &tp->reg_window[window],
122 sizeof(struct reg_window)))
123 do_exit(SIGILL);
124 }
125 tp->w_saved = 0;
126 unlock_kernel();
127}
diff --git a/arch/sparc/kernel/wof.S b/arch/sparc/kernel/wof.S
new file mode 100644
index 000000000000..083b1215d515
--- /dev/null
+++ b/arch/sparc/kernel/wof.S
@@ -0,0 +1,428 @@
1/* $Id: wof.S,v 1.40 2000/01/08 16:38:18 anton Exp $
2 * wof.S: Sparc window overflow handler.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/contregs.h>
8#include <asm/page.h>
9#include <asm/ptrace.h>
10#include <asm/psr.h>
11#include <asm/smp.h>
12#include <asm/asi.h>
13#include <asm/winmacro.h>
14#include <asm/asmmacro.h>
15#include <asm/thread_info.h>
16
17/* WARNING: This routine is hairy and _very_ complicated, but it
18 * must be as fast as possible as it handles the allocation
19 * of register windows to the user and kernel. If you touch
20 * this code be _very_ careful as many other pieces of the
21 * kernel depend upon how this code behaves. You have been
22 * duly warned...
23 */
24
25/* We define macro's for registers which have a fixed
26 * meaning throughout this entire routine. The 'T' in
27 * the comments mean that the register can only be
28 * accessed when in the 'trap' window, 'G' means
29 * accessible in any window. Do not change these registers
30 * after they have been set, until you are ready to return
31 * from the trap.
32 */
33#define t_psr l0 /* %psr at trap time T */
34#define t_pc l1 /* PC for trap return T */
35#define t_npc l2 /* NPC for trap return T */
36#define t_wim l3 /* %wim at trap time T */
37#define saved_g5 l5 /* Global save register T */
38#define saved_g6 l6 /* Global save register T */
39#define curptr g6 /* Gets set to 'current' then stays G */
40
41/* Now registers whose values can change within the handler. */
42#define twin_tmp l4 /* Temp reg, only usable in trap window T */
43#define glob_tmp g5 /* Global temporary reg, usable anywhere G */
44
45 .text
46 .align 4
47 /* BEGINNING OF PATCH INSTRUCTIONS */
48 /* On a 7-window Sparc the boot code patches spnwin_*
49 * instructions with the following ones.
50 */
51 .globl spnwin_patch1_7win, spnwin_patch2_7win, spnwin_patch3_7win
52spnwin_patch1_7win: sll %t_wim, 6, %glob_tmp
53spnwin_patch2_7win: and %glob_tmp, 0x7f, %glob_tmp
54spnwin_patch3_7win: and %twin_tmp, 0x7f, %twin_tmp
55 /* END OF PATCH INSTRUCTIONS */
56
57 /* The trap entry point has done the following:
58 *
59 * rd %psr, %l0
60 * rd %wim, %l3
61 * b spill_window_entry
62 * andcc %l0, PSR_PS, %g0
63 */
64
65 /* Datum current_thread_info->uwinmask contains at all times a bitmask
66 * where if any user windows are active, at least one bit will
67 * be set in to mask. If no user windows are active, the bitmask
68 * will be all zeroes.
69 */
70 .globl spill_window_entry
71 .globl spnwin_patch1, spnwin_patch2, spnwin_patch3
72spill_window_entry:
73 /* LOCATION: Trap Window */
74
75 mov %g5, %saved_g5 ! save away global temp register
76 mov %g6, %saved_g6 ! save away 'current' ptr register
77
78 /* Compute what the new %wim will be if we save the
79 * window properly in this trap handler.
80 *
81 * newwim = ((%wim>>1) | (%wim<<(nwindows - 1)));
82 */
83 srl %t_wim, 0x1, %twin_tmp
84spnwin_patch1: sll %t_wim, 7, %glob_tmp
85 or %glob_tmp, %twin_tmp, %glob_tmp
86spnwin_patch2: and %glob_tmp, 0xff, %glob_tmp
87
88 /* The trap entry point has set the condition codes
89 * up for us to see if this is from user or kernel.
90 * Get the load of 'curptr' out of the way.
91 */
92 LOAD_CURRENT(curptr, twin_tmp)
93
94 andcc %t_psr, PSR_PS, %g0
95 be,a spwin_fromuser ! all user wins, branch
96 save %g0, %g0, %g0 ! Go where saving will occur
97
98 /* See if any user windows are active in the set. */
99 ld [%curptr + TI_UWINMASK], %twin_tmp ! grab win mask
100 orcc %g0, %twin_tmp, %g0 ! check for set bits
101 bne spwin_exist_uwins ! yep, there are some
102 andn %twin_tmp, %glob_tmp, %twin_tmp ! compute new uwinmask
103
104 /* Save into the window which must be saved and do it.
105 * Basically if we are here, this means that we trapped
106 * from kernel mode with only kernel windows in the register
107 * file.
108 */
109 save %g0, %g0, %g0 ! save into the window to stash away
110 wr %glob_tmp, 0x0, %wim ! set new %wim, this is safe now
111
112spwin_no_userwins_from_kernel:
113 /* LOCATION: Window to be saved */
114
115 STORE_WINDOW(sp) ! stash the window
116 restore %g0, %g0, %g0 ! go back into trap window
117
118 /* LOCATION: Trap window */
119 mov %saved_g5, %g5 ! restore %glob_tmp
120 mov %saved_g6, %g6 ! restore %curptr
121 wr %t_psr, 0x0, %psr ! restore condition codes in %psr
122 WRITE_PAUSE ! waste some time
123 jmp %t_pc ! Return from trap
124 rett %t_npc ! we are done
125
126spwin_exist_uwins:
127 /* LOCATION: Trap window */
128
129 /* Wow, user windows have to be dealt with, this is dirty
130 * and messy as all hell. And difficult to follow if you
131 * are approaching the infamous register window trap handling
132 * problem for the first time. DON'T LOOK!
133 *
134 * Note that how the execution path works out, the new %wim
135 * will be left for us in the global temporary register,
136 * %glob_tmp. We cannot set the new %wim first because we
137 * need to save into the appropriate window without inducing
138 * a trap (traps are off, we'd get a watchdog wheee)...
139 * But first, store the new user window mask calculated
140 * above.
141 */
142 st %twin_tmp, [%curptr + TI_UWINMASK]
143 save %g0, %g0, %g0 ! Go to where the saving will occur
144
145spwin_fromuser:
146 /* LOCATION: Window to be saved */
147 wr %glob_tmp, 0x0, %wim ! Now it is safe to set new %wim
148
149 /* LOCATION: Window to be saved */
150
151 /* This instruction branches to a routine which will check
152 * to validity of the users stack pointer by whatever means
153 * are necessary. This means that this is architecture
154 * specific and thus this branch instruction will need to
155 * be patched at boot time once the machine type is known.
156 * This routine _shall not_ touch %curptr under any
157 * circumstances whatsoever! It will branch back to the
158 * label 'spwin_good_ustack' if the stack is ok but still
159 * needs to be dumped (SRMMU for instance will not need to
160 * do this) or 'spwin_finish_up' if the stack is ok and the
161 * registers have already been saved. If the stack is found
162 * to be bogus for some reason the routine shall branch to
163 * the label 'spwin_user_stack_is_bolixed' which will take
164 * care of things at that point.
165 */
166 .globl spwin_mmu_patchme
167spwin_mmu_patchme: b spwin_sun4c_stackchk
168 andcc %sp, 0x7, %g0
169
170spwin_good_ustack:
171 /* LOCATION: Window to be saved */
172
173 /* The users stack is ok and we can safely save it at
174 * %sp.
175 */
176 STORE_WINDOW(sp)
177
178spwin_finish_up:
179 restore %g0, %g0, %g0 /* Back to trap window. */
180
181 /* LOCATION: Trap window */
182
183 /* We have spilled successfully, and we have properly stored
184 * the appropriate window onto the stack.
185 */
186
187 /* Restore saved globals */
188 mov %saved_g5, %g5
189 mov %saved_g6, %g6
190
191 wr %t_psr, 0x0, %psr
192 WRITE_PAUSE
193 jmp %t_pc
194 rett %t_npc
195
196spwin_user_stack_is_bolixed:
197 /* LOCATION: Window to be saved */
198
199 /* Wheee, user has trashed his/her stack. We have to decide
200 * how to proceed based upon whether we came from kernel mode
201 * or not. If we came from kernel mode, toss the window into
202 * a special buffer and proceed, the kernel _needs_ a window
203 * and we could be in an interrupt handler so timing is crucial.
204 * If we came from user land we build a full stack frame and call
205 * c-code to gun down the process.
206 */
207 rd %psr, %glob_tmp
208 andcc %glob_tmp, PSR_PS, %g0
209 bne spwin_bad_ustack_from_kernel
210 nop
211
212 /* Oh well, throw this one window into the per-task window
213 * buffer, the first one.
214 */
215 st %sp, [%curptr + TI_RWIN_SPTRS]
216 STORE_WINDOW(curptr + TI_REG_WINDOW)
217 restore %g0, %g0, %g0
218
219 /* LOCATION: Trap Window */
220
221 /* Back in the trap window, update winbuffer save count. */
222 mov 1, %twin_tmp
223 st %twin_tmp, [%curptr + TI_W_SAVED]
224
225 /* Compute new user window mask. What we are basically
226 * doing is taking two windows, the invalid one at trap
227 * time and the one we attempted to throw onto the users
228 * stack, and saying that everything else is an ok user
229 * window. umask = ((~(%t_wim | %wim)) & valid_wim_bits)
230 */
231 rd %wim, %twin_tmp
232 or %twin_tmp, %t_wim, %twin_tmp
233 not %twin_tmp
234spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs
235 st %twin_tmp, [%curptr + TI_UWINMASK]
236
237#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
238
239 sethi %hi(STACK_OFFSET), %sp
240 or %sp, %lo(STACK_OFFSET), %sp
241 add %curptr, %sp, %sp
242
243 /* Restore the saved globals and build a pt_regs frame. */
244 mov %saved_g5, %g5
245 mov %saved_g6, %g6
246 STORE_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
247
248 sethi %hi(STACK_OFFSET), %g6
249 or %g6, %lo(STACK_OFFSET), %g6
250 sub %sp, %g6, %g6 ! curptr
251
252 /* Turn on traps and call c-code to deal with it. */
253 wr %t_psr, PSR_ET, %psr
254 nop
255 call window_overflow_fault
256 nop
257
258 /* Return from trap if C-code actually fixes things, if it
259 * doesn't then we never get this far as the process will
260 * be given the look of death from Commander Peanut.
261 */
262 b ret_trap_entry
263 clr %l6
264
265spwin_bad_ustack_from_kernel:
266 /* LOCATION: Window to be saved */
267
268 /* The kernel provoked a spill window trap, but the window we
269 * need to save is a user one and the process has trashed its
270 * stack pointer. We need to be quick, so we throw it into
271 * a per-process window buffer until we can properly handle
272 * this later on.
273 */
274 SAVE_BOLIXED_USER_STACK(curptr, glob_tmp)
275 restore %g0, %g0, %g0
276
277 /* LOCATION: Trap window */
278
279 /* Restore globals, condition codes in the %psr and
280 * return from trap. Note, restoring %g6 when returning
281 * to kernel mode is not necessarily these days. ;-)
282 */
283 mov %saved_g5, %g5
284 mov %saved_g6, %g6
285
286 wr %t_psr, 0x0, %psr
287 WRITE_PAUSE
288
289 jmp %t_pc
290 rett %t_npc
291
292/* Undefine the register macros which would only cause trouble
293 * if used below. This helps find 'stupid' coding errors that
294 * produce 'odd' behavior. The routines below are allowed to
295 * make usage of glob_tmp and t_psr so we leave them defined.
296 */
297#undef twin_tmp
298#undef curptr
299#undef t_pc
300#undef t_npc
301#undef t_wim
302#undef saved_g5
303#undef saved_g6
304
305/* Now come the per-architecture window overflow stack checking routines.
306 * As noted above %curptr cannot be touched by this routine at all.
307 */
308
309 .globl spwin_sun4c_stackchk
310spwin_sun4c_stackchk:
311 /* LOCATION: Window to be saved on the stack */
312
313 /* See if the stack is in the address space hole but first,
314 * check results of callers andcc %sp, 0x7, %g0
315 */
316 be 1f
317 sra %sp, 29, %glob_tmp
318
319 rd %psr, %glob_tmp
320 b spwin_user_stack_is_bolixed + 0x4
321 nop
322
3231:
324 add %glob_tmp, 0x1, %glob_tmp
325 andncc %glob_tmp, 0x1, %g0
326 be 1f
327 and %sp, 0xfff, %glob_tmp ! delay slot
328
329 rd %psr, %glob_tmp
330 b spwin_user_stack_is_bolixed + 0x4
331 nop
332
333 /* See if our dump area will be on more than one
334 * page.
335 */
3361:
337 add %glob_tmp, 0x38, %glob_tmp
338 andncc %glob_tmp, 0xff8, %g0
339 be spwin_sun4c_onepage ! only one page to check
340 lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways
341
342spwin_sun4c_twopages:
343 /* Is first page ok permission wise? */
344 srl %glob_tmp, 29, %glob_tmp
345 cmp %glob_tmp, 0x6
346 be 1f
347 add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */
348
349 rd %psr, %glob_tmp
350 b spwin_user_stack_is_bolixed + 0x4
351 nop
352
3531:
354 sra %glob_tmp, 29, %glob_tmp
355 add %glob_tmp, 0x1, %glob_tmp
356 andncc %glob_tmp, 0x1, %g0
357 be 1f
358 add %sp, 0x38, %glob_tmp
359
360 rd %psr, %glob_tmp
361 b spwin_user_stack_is_bolixed + 0x4
362 nop
363
3641:
365 lda [%glob_tmp] ASI_PTE, %glob_tmp
366
367spwin_sun4c_onepage:
368 srl %glob_tmp, 29, %glob_tmp
369 cmp %glob_tmp, 0x6 ! can user write to it?
370 be spwin_good_ustack ! success
371 nop
372
373 rd %psr, %glob_tmp
374 b spwin_user_stack_is_bolixed + 0x4
375 nop
376
377 /* This is a generic SRMMU routine. As far as I know this
378 * works for all current v8/srmmu implementations, we'll
379 * see...
380 */
381 .globl spwin_srmmu_stackchk
382spwin_srmmu_stackchk:
383 /* LOCATION: Window to be saved on the stack */
384
385 /* Because of SMP concerns and speed we play a trick.
386 * We disable fault traps in the MMU control register,
387 * Execute the stores, then check the fault registers
388 * to see what happens. I can hear Linus now
389 * "disgusting... broken hardware...".
390 *
391 * But first, check to see if the users stack has ended
392 * up in kernel vma, then we would succeed for the 'wrong'
393 * reason... ;( Note that the 'sethi' below assumes the
394 * kernel is page aligned, which should always be the case.
395 */
396 /* Check results of callers andcc %sp, 0x7, %g0 */
397 bne spwin_user_stack_is_bolixed
398 sethi %hi(PAGE_OFFSET), %glob_tmp
399 cmp %glob_tmp, %sp
400 bleu spwin_user_stack_is_bolixed
401 mov AC_M_SFSR, %glob_tmp
402
403 /* Clear the fault status and turn on the no_fault bit. */
404 lda [%glob_tmp] ASI_M_MMUREGS, %g0 ! eat SFSR
405
406 lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
407 or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
408 sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
409
410 /* Dump the registers and cross fingers. */
411 STORE_WINDOW(sp)
412
413 /* Clear the no_fault bit and check the status. */
414 andn %glob_tmp, 0x2, %glob_tmp
415 sta %glob_tmp, [%g0] ASI_M_MMUREGS
416
417 mov AC_M_SFAR, %glob_tmp
418 lda [%glob_tmp] ASI_M_MMUREGS, %g0
419
420 mov AC_M_SFSR, %glob_tmp
421 lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp
422 andcc %glob_tmp, 0x2, %g0 ! did we fault?
423 be,a spwin_finish_up + 0x4 ! cool beans, success
424 restore %g0, %g0, %g0
425
426 rd %psr, %glob_tmp
427 b spwin_user_stack_is_bolixed + 0x4 ! we faulted, ugh
428 nop
diff --git a/arch/sparc/kernel/wuf.S b/arch/sparc/kernel/wuf.S
new file mode 100644
index 000000000000..d1a266bf103a
--- /dev/null
+++ b/arch/sparc/kernel/wuf.S
@@ -0,0 +1,360 @@
1/* $Id: wuf.S,v 1.39 2000/01/08 16:38:18 anton Exp $
2 * wuf.S: Window underflow trap handler for the Sparc.
3 *
4 * Copyright (C) 1995 David S. Miller
5 */
6
7#include <asm/contregs.h>
8#include <asm/page.h>
9#include <asm/ptrace.h>
10#include <asm/psr.h>
11#include <asm/smp.h>
12#include <asm/asi.h>
13#include <asm/winmacro.h>
14#include <asm/asmmacro.h>
15#include <asm/thread_info.h>
16
17/* Just like the overflow handler we define macros for registers
18 * with fixed meanings in this routine.
19 */
20#define t_psr l0
21#define t_pc l1
22#define t_npc l2
23#define t_wim l3
24/* Don't touch the above registers or else you die horribly... */
25
26/* Now macros for the available scratch registers in this routine. */
27#define twin_tmp1 l4
28#define twin_tmp2 l5
29
30#define curptr g6
31
32 .text
33 .align 4
34
35 /* The trap entry point has executed the following:
36 *
37 * rd %psr, %l0
38 * rd %wim, %l3
39 * b fill_window_entry
40 * andcc %l0, PSR_PS, %g0
41 */
42
43 /* Datum current_thread_info->uwinmask contains at all times a bitmask
44 * where if any user windows are active, at least one bit will
45 * be set in to mask. If no user windows are active, the bitmask
46 * will be all zeroes.
47 */
48
49 /* To get an idea of what has just happened to cause this
50 * trap take a look at this diagram:
51 *
52 * 1 2 3 4 <-- Window number
53 * ----------
54 * T O W I <-- Symbolic name
55 *
56 * O == the window that execution was in when
57 * the restore was attempted
58 *
59 * T == the trap itself has save'd us into this
60 * window
61 *
62 * W == this window is the one which is now invalid
63 * and must be made valid plus loaded from the
64 * stack
65 *
66 * I == this window will be the invalid one when we
67 * are done and return from trap if successful
68 */
69
70 /* BEGINNING OF PATCH INSTRUCTIONS */
71
72 /* On 7-window Sparc the boot code patches fnwin_patch1
73 * with the following instruction.
74 */
75 .globl fnwin_patch1_7win, fnwin_patch2_7win
76fnwin_patch1_7win: srl %t_wim, 6, %twin_tmp2
77fnwin_patch2_7win: and %twin_tmp1, 0x7f, %twin_tmp1
78 /* END OF PATCH INSTRUCTIONS */
79
80 .globl fill_window_entry, fnwin_patch1, fnwin_patch2
81fill_window_entry:
82 /* LOCATION: Window 'T' */
83
84 /* Compute what the new %wim is going to be if we retrieve
85 * the proper window off of the stack.
86 */
87 sll %t_wim, 1, %twin_tmp1
88fnwin_patch1: srl %t_wim, 7, %twin_tmp2
89 or %twin_tmp1, %twin_tmp2, %twin_tmp1
90fnwin_patch2: and %twin_tmp1, 0xff, %twin_tmp1
91
92 wr %twin_tmp1, 0x0, %wim /* Make window 'I' invalid */
93
94 andcc %t_psr, PSR_PS, %g0
95 be fwin_from_user
96 restore %g0, %g0, %g0 /* Restore to window 'O' */
97
98 /* Trapped from kernel, we trust that the kernel does not
99 * 'over restore' sorta speak and just grab the window
100 * from the stack and return. Easy enough.
101 */
102fwin_from_kernel:
103 /* LOCATION: Window 'O' */
104
105 restore %g0, %g0, %g0
106
107 /* LOCATION: Window 'W' */
108
109 LOAD_WINDOW(sp) /* Load it up */
110
111 /* Spin the wheel... */
112 save %g0, %g0, %g0
113 save %g0, %g0, %g0
114 /* I'd like to buy a vowel please... */
115
116 /* LOCATION: Window 'T' */
117
118 /* Now preserve the condition codes in %psr, pause, and
119 * return from trap. This is the simplest case of all.
120 */
121 wr %t_psr, 0x0, %psr
122 WRITE_PAUSE
123
124 jmp %t_pc
125 rett %t_npc
126
127fwin_from_user:
128 /* LOCATION: Window 'O' */
129
130 restore %g0, %g0, %g0 /* Restore to window 'W' */
131
132 /* LOCATION: Window 'W' */
133
134 /* Branch to the architecture specific stack validation
135 * routine. They can be found below...
136 */
137 .globl fwin_mmu_patchme
138fwin_mmu_patchme: b sun4c_fwin_stackchk
139 andcc %sp, 0x7, %g0
140
141#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
142
143fwin_user_stack_is_bolixed:
144 /* LOCATION: Window 'W' */
145
146 /* Place a pt_regs frame on the kernel stack, save back
147 * to the trap window and call c-code to deal with this.
148 */
149 LOAD_CURRENT(l4, l5)
150
151 sethi %hi(STACK_OFFSET), %l5
152 or %l5, %lo(STACK_OFFSET), %l5
153 add %l4, %l5, %l5
154
155 /* Store globals into pt_regs frame. */
156 STORE_PT_GLOBALS(l5)
157 STORE_PT_YREG(l5, g3)
158
159 /* Save current in a global while we change windows. */
160 mov %l4, %curptr
161
162 save %g0, %g0, %g0
163
164 /* LOCATION: Window 'O' */
165
166 rd %psr, %g3 /* Read %psr in live user window */
167 mov %fp, %g4 /* Save bogus frame pointer. */
168
169 save %g0, %g0, %g0
170
171 /* LOCATION: Window 'T' */
172
173 sethi %hi(STACK_OFFSET), %l5
174 or %l5, %lo(STACK_OFFSET), %l5
175 add %curptr, %l5, %sp
176
177 /* Build rest of pt_regs. */
178 STORE_PT_INS(sp)
179 STORE_PT_PRIV(sp, t_psr, t_pc, t_npc)
180
181 /* re-set trap time %wim value */
182 wr %t_wim, 0x0, %wim
183
184 /* Fix users window mask and buffer save count. */
185 mov 0x1, %g5
186 sll %g5, %g3, %g5
187 st %g5, [%curptr + TI_UWINMASK] ! one live user window still
188 st %g0, [%curptr + TI_W_SAVED] ! no windows in the buffer
189
190 wr %t_psr, PSR_ET, %psr ! enable traps
191 nop
192 call window_underflow_fault
193 mov %g4, %o0
194
195 b ret_trap_entry
196 clr %l6
197
198fwin_user_stack_is_ok:
199 /* LOCATION: Window 'W' */
200
201 /* The users stack area is kosher and mapped, load the
202 * window and fall through to the finish up routine.
203 */
204 LOAD_WINDOW(sp)
205
206 /* Round and round she goes... */
207 save %g0, %g0, %g0 /* Save to window 'O' */
208 save %g0, %g0, %g0 /* Save to window 'T' */
209 /* Where she'll trap nobody knows... */
210
211 /* LOCATION: Window 'T' */
212
213fwin_user_finish_up:
214 /* LOCATION: Window 'T' */
215
216 wr %t_psr, 0x0, %psr
217 WRITE_PAUSE
218
219 jmp %t_pc
220 rett %t_npc
221
222 /* Here come the architecture specific checks for stack.
223 * mappings. Note that unlike the window overflow handler
224 * we only need to check whether the user can read from
225 * the appropriate addresses. Also note that we are in
226 * an invalid window which will be loaded, and this means
227 * that until we actually load the window up we are free
228 * to use any of the local registers contained within.
229 *
230 * On success these routine branch to fwin_user_stack_is_ok
231 * if the area at %sp is user readable and the window still
232 * needs to be loaded, else fwin_user_finish_up if the
233 * routine has done the loading itself. On failure (bogus
234 * user stack) the routine shall branch to the label called
235 * fwin_user_stack_is_bolixed.
236 *
237 * Contrary to the arch-specific window overflow stack
238 * check routines in wof.S, these routines are free to use
239 * any of the local registers they want to as this window
240 * does not belong to anyone at this point, however the
241 * outs and ins are still verboten as they are part of
242 * 'someone elses' window possibly.
243 */
244
245 .align 4
246 .globl sun4c_fwin_stackchk
247sun4c_fwin_stackchk:
248 /* LOCATION: Window 'W' */
249
250 /* Caller did 'andcc %sp, 0x7, %g0' */
251 be 1f
252 and %sp, 0xfff, %l0 ! delay slot
253
254 b,a fwin_user_stack_is_bolixed
255
256 /* See if we have to check the sanity of one page or two */
2571:
258 add %l0, 0x38, %l0
259 sra %sp, 29, %l5
260 add %l5, 0x1, %l5
261 andncc %l5, 0x1, %g0
262 be 1f
263 andncc %l0, 0xff8, %g0
264
265 b,a fwin_user_stack_is_bolixed /* %sp is in vma hole, yuck */
266
2671:
268 be sun4c_fwin_onepage /* Only one page to check */
269 lda [%sp] ASI_PTE, %l1
270sun4c_fwin_twopages:
271 add %sp, 0x38, %l0
272 sra %l0, 29, %l5
273 add %l5, 0x1, %l5
274 andncc %l5, 0x1, %g0
275 be 1f
276 lda [%l0] ASI_PTE, %l1
277
278 b,a fwin_user_stack_is_bolixed /* Second page in vma hole */
279
2801:
281 srl %l1, 29, %l1
282 andcc %l1, 0x4, %g0
283 bne sun4c_fwin_onepage
284 lda [%sp] ASI_PTE, %l1
285
286 b,a fwin_user_stack_is_bolixed /* Second page has bad perms */
287
288sun4c_fwin_onepage:
289 srl %l1, 29, %l1
290 andcc %l1, 0x4, %g0
291 bne fwin_user_stack_is_ok
292 nop
293
294 /* A page had bad page permissions, losing... */
295 b,a fwin_user_stack_is_bolixed
296
297 .globl srmmu_fwin_stackchk
298srmmu_fwin_stackchk:
299 /* LOCATION: Window 'W' */
300
301 /* Caller did 'andcc %sp, 0x7, %g0' */
302 bne fwin_user_stack_is_bolixed
303 sethi %hi(PAGE_OFFSET), %l5
304
305 /* Check if the users stack is in kernel vma, then our
306 * trial and error technique below would succeed for
307 * the 'wrong' reason.
308 */
309 mov AC_M_SFSR, %l4
310 cmp %l5, %sp
311 bleu fwin_user_stack_is_bolixed
312 lda [%l4] ASI_M_MMUREGS, %g0 ! clear fault status
313
314 /* The technique is, turn off faults on this processor,
315 * just let the load rip, then check the sfsr to see if
316 * a fault did occur. Then we turn on fault traps again
317 * and branch conditionally based upon what happened.
318 */
319 lda [%g0] ASI_M_MMUREGS, %l5 ! read mmu-ctrl reg
320 or %l5, 0x2, %l5 ! turn on no-fault bit
321 sta %l5, [%g0] ASI_M_MMUREGS ! store it
322
323 /* Cross fingers and go for it. */
324 LOAD_WINDOW(sp)
325
326 /* A penny 'saved'... */
327 save %g0, %g0, %g0
328 save %g0, %g0, %g0
329 /* Is a BADTRAP earned... */
330
331 /* LOCATION: Window 'T' */
332
333 lda [%g0] ASI_M_MMUREGS, %twin_tmp1 ! load mmu-ctrl again
334 andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit
335 sta %twin_tmp1, [%g0] ASI_M_MMUREGS ! store it
336
337 mov AC_M_SFAR, %twin_tmp2
338 lda [%twin_tmp2] ASI_M_MMUREGS, %g0 ! read fault address
339
340 mov AC_M_SFSR, %twin_tmp2
341 lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2 ! read fault status
342 andcc %twin_tmp2, 0x2, %g0 ! did fault occur?
343
344 bne 1f ! yep, cleanup
345 nop
346
347 wr %t_psr, 0x0, %psr
348 nop
349 b fwin_user_finish_up + 0x4
350 nop
351
352 /* Did I ever tell you about my window lobotomy?
353 * anyways... fwin_user_stack_is_bolixed expects
354 * to be in window 'W' so make it happy or else
355 * we watchdog badly.
356 */
3571:
358 restore %g0, %g0, %g0
359 b fwin_user_stack_is_bolixed ! oh well
360 restore %g0, %g0, %g0
diff --git a/arch/sparc/lib/COPYING.LIB b/arch/sparc/lib/COPYING.LIB
new file mode 100644
index 000000000000..eb685a5ec981
--- /dev/null
+++ b/arch/sparc/lib/COPYING.LIB
@@ -0,0 +1,481 @@
1 GNU LIBRARY GENERAL PUBLIC LICENSE
2 Version 2, June 1991
3
4 Copyright (C) 1991 Free Software Foundation, Inc.
5 675 Mass Ave, Cambridge, MA 02139, USA
6 Everyone is permitted to copy and distribute verbatim copies
7 of this license document, but changing it is not allowed.
8
9[This is the first released version of the library GPL. It is
10 numbered 2 because it goes with version 2 of the ordinary GPL.]
11
12 Preamble
13
14 The licenses for most software are designed to take away your
15freedom to share and change it. By contrast, the GNU General Public
16Licenses are intended to guarantee your freedom to share and change
17free software--to make sure the software is free for all its users.
18
19 This license, the Library General Public License, applies to some
20specially designated Free Software Foundation software, and to any
21other libraries whose authors decide to use it. You can use it for
22your libraries, too.
23
24 When we speak of free software, we are referring to freedom, not
25price. Our General Public Licenses are designed to make sure that you
26have the freedom to distribute copies of free software (and charge for
27this service if you wish), that you receive source code or can get it
28if you want it, that you can change the software or use pieces of it
29in new free programs; and that you know you can do these things.
30
31 To protect your rights, we need to make restrictions that forbid
32anyone to deny you these rights or to ask you to surrender the rights.
33These restrictions translate to certain responsibilities for you if
34you distribute copies of the library, or if you modify it.
35
36 For example, if you distribute copies of the library, whether gratis
37or for a fee, you must give the recipients all the rights that we gave
38you. You must make sure that they, too, receive or can get the source
39code. If you link a program with the library, you must provide
40complete object files to the recipients so that they can relink them
41with the library, after making changes to the library and recompiling
42it. And you must show them these terms so they know their rights.
43
44 Our method of protecting your rights has two steps: (1) copyright
45the library, and (2) offer you this license which gives you legal
46permission to copy, distribute and/or modify the library.
47
48 Also, for each distributor's protection, we want to make certain
49that everyone understands that there is no warranty for this free
50library. If the library is modified by someone else and passed on, we
51want its recipients to know that what they have is not the original
52version, so that any problems introduced by others will not reflect on
53the original authors' reputations.
54
55 Finally, any free program is threatened constantly by software
56patents. We wish to avoid the danger that companies distributing free
57software will individually obtain patent licenses, thus in effect
58transforming the program into proprietary software. To prevent this,
59we have made it clear that any patent must be licensed for everyone's
60free use or not licensed at all.
61
62 Most GNU software, including some libraries, is covered by the ordinary
63GNU General Public License, which was designed for utility programs. This
64license, the GNU Library General Public License, applies to certain
65designated libraries. This license is quite different from the ordinary
66one; be sure to read it in full, and don't assume that anything in it is
67the same as in the ordinary license.
68
69 The reason we have a separate public license for some libraries is that
70they blur the distinction we usually make between modifying or adding to a
71program and simply using it. Linking a program with a library, without
72changing the library, is in some sense simply using the library, and is
73analogous to running a utility program or application program. However, in
74a textual and legal sense, the linked executable is a combined work, a
75derivative of the original library, and the ordinary General Public License
76treats it as such.
77
78 Because of this blurred distinction, using the ordinary General
79Public License for libraries did not effectively promote software
80sharing, because most developers did not use the libraries. We
81concluded that weaker conditions might promote sharing better.
82
83 However, unrestricted linking of non-free programs would deprive the
84users of those programs of all benefit from the free status of the
85libraries themselves. This Library General Public License is intended to
86permit developers of non-free programs to use free libraries, while
87preserving your freedom as a user of such programs to change the free
88libraries that are incorporated in them. (We have not seen how to achieve
89this as regards changes in header files, but we have achieved it as regards
90changes in the actual functions of the Library.) The hope is that this
91will lead to faster development of free libraries.
92
93 The precise terms and conditions for copying, distribution and
94modification follow. Pay close attention to the difference between a
95"work based on the library" and a "work that uses the library". The
96former contains code derived from the library, while the latter only
97works together with the library.
98
99 Note that it is possible for a library to be covered by the ordinary
100General Public License rather than by this special one.
101
102 GNU LIBRARY GENERAL PUBLIC LICENSE
103 TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
104
105 0. This License Agreement applies to any software library which
106contains a notice placed by the copyright holder or other authorized
107party saying it may be distributed under the terms of this Library
108General Public License (also called "this License"). Each licensee is
109addressed as "you".
110
111 A "library" means a collection of software functions and/or data
112prepared so as to be conveniently linked with application programs
113(which use some of those functions and data) to form executables.
114
115 The "Library", below, refers to any such software library or work
116which has been distributed under these terms. A "work based on the
117Library" means either the Library or any derivative work under
118copyright law: that is to say, a work containing the Library or a
119portion of it, either verbatim or with modifications and/or translated
120straightforwardly into another language. (Hereinafter, translation is
121included without limitation in the term "modification".)
122
123 "Source code" for a work means the preferred form of the work for
124making modifications to it. For a library, complete source code means
125all the source code for all modules it contains, plus any associated
126interface definition files, plus the scripts used to control compilation
127and installation of the library.
128
129 Activities other than copying, distribution and modification are not
130covered by this License; they are outside its scope. The act of
131running a program using the Library is not restricted, and output from
132such a program is covered only if its contents constitute a work based
133on the Library (independent of the use of the Library in a tool for
134writing it). Whether that is true depends on what the Library does
135and what the program that uses the Library does.
136
137 1. You may copy and distribute verbatim copies of the Library's
138complete source code as you receive it, in any medium, provided that
139you conspicuously and appropriately publish on each copy an
140appropriate copyright notice and disclaimer of warranty; keep intact
141all the notices that refer to this License and to the absence of any
142warranty; and distribute a copy of this License along with the
143Library.
144
145 You may charge a fee for the physical act of transferring a copy,
146and you may at your option offer warranty protection in exchange for a
147fee.
148
149 2. You may modify your copy or copies of the Library or any portion
150of it, thus forming a work based on the Library, and copy and
151distribute such modifications or work under the terms of Section 1
152above, provided that you also meet all of these conditions:
153
154 a) The modified work must itself be a software library.
155
156 b) You must cause the files modified to carry prominent notices
157 stating that you changed the files and the date of any change.
158
159 c) You must cause the whole of the work to be licensed at no
160 charge to all third parties under the terms of this License.
161
162 d) If a facility in the modified Library refers to a function or a
163 table of data to be supplied by an application program that uses
164 the facility, other than as an argument passed when the facility
165 is invoked, then you must make a good faith effort to ensure that,
166 in the event an application does not supply such function or
167 table, the facility still operates, and performs whatever part of
168 its purpose remains meaningful.
169
170 (For example, a function in a library to compute square roots has
171 a purpose that is entirely well-defined independent of the
172 application. Therefore, Subsection 2d requires that any
173 application-supplied function or table used by this function must
174 be optional: if the application does not supply it, the square
175 root function must still compute square roots.)
176
177These requirements apply to the modified work as a whole. If
178identifiable sections of that work are not derived from the Library,
179and can be reasonably considered independent and separate works in
180themselves, then this License, and its terms, do not apply to those
181sections when you distribute them as separate works. But when you
182distribute the same sections as part of a whole which is a work based
183on the Library, the distribution of the whole must be on the terms of
184this License, whose permissions for other licensees extend to the
185entire whole, and thus to each and every part regardless of who wrote
186it.
187
188Thus, it is not the intent of this section to claim rights or contest
189your rights to work written entirely by you; rather, the intent is to
190exercise the right to control the distribution of derivative or
191collective works based on the Library.
192
193In addition, mere aggregation of another work not based on the Library
194with the Library (or with a work based on the Library) on a volume of
195a storage or distribution medium does not bring the other work under
196the scope of this License.
197
198 3. You may opt to apply the terms of the ordinary GNU General Public
199License instead of this License to a given copy of the Library. To do
200this, you must alter all the notices that refer to this License, so
201that they refer to the ordinary GNU General Public License, version 2,
202instead of to this License. (If a newer version than version 2 of the
203ordinary GNU General Public License has appeared, then you can specify
204that version instead if you wish.) Do not make any other change in
205these notices.
206
207 Once this change is made in a given copy, it is irreversible for
208that copy, so the ordinary GNU General Public License applies to all
209subsequent copies and derivative works made from that copy.
210
211 This option is useful when you wish to copy part of the code of
212the Library into a program that is not a library.
213
214 4. You may copy and distribute the Library (or a portion or
215derivative of it, under Section 2) in object code or executable form
216under the terms of Sections 1 and 2 above provided that you accompany
217it with the complete corresponding machine-readable source code, which
218must be distributed under the terms of Sections 1 and 2 above on a
219medium customarily used for software interchange.
220
221 If distribution of object code is made by offering access to copy
222from a designated place, then offering equivalent access to copy the
223source code from the same place satisfies the requirement to
224distribute the source code, even though third parties are not
225compelled to copy the source along with the object code.
226
227 5. A program that contains no derivative of any portion of the
228Library, but is designed to work with the Library by being compiled or
229linked with it, is called a "work that uses the Library". Such a
230work, in isolation, is not a derivative work of the Library, and
231therefore falls outside the scope of this License.
232
233 However, linking a "work that uses the Library" with the Library
234creates an executable that is a derivative of the Library (because it
235contains portions of the Library), rather than a "work that uses the
236library". The executable is therefore covered by this License.
237Section 6 states terms for distribution of such executables.
238
239 When a "work that uses the Library" uses material from a header file
240that is part of the Library, the object code for the work may be a
241derivative work of the Library even though the source code is not.
242Whether this is true is especially significant if the work can be
243linked without the Library, or if the work is itself a library. The
244threshold for this to be true is not precisely defined by law.
245
246 If such an object file uses only numerical parameters, data
247structure layouts and accessors, and small macros and small inline
248functions (ten lines or less in length), then the use of the object
249file is unrestricted, regardless of whether it is legally a derivative
250work. (Executables containing this object code plus portions of the
251Library will still fall under Section 6.)
252
253 Otherwise, if the work is a derivative of the Library, you may
254distribute the object code for the work under the terms of Section 6.
255Any executables containing that work also fall under Section 6,
256whether or not they are linked directly with the Library itself.
257
258 6. As an exception to the Sections above, you may also compile or
259link a "work that uses the Library" with the Library to produce a
260work containing portions of the Library, and distribute that work
261under terms of your choice, provided that the terms permit
262modification of the work for the customer's own use and reverse
263engineering for debugging such modifications.
264
265 You must give prominent notice with each copy of the work that the
266Library is used in it and that the Library and its use are covered by
267this License. You must supply a copy of this License. If the work
268during execution displays copyright notices, you must include the
269copyright notice for the Library among them, as well as a reference
270directing the user to the copy of this License. Also, you must do one
271of these things:
272
273 a) Accompany the work with the complete corresponding
274 machine-readable source code for the Library including whatever
275 changes were used in the work (which must be distributed under
276 Sections 1 and 2 above); and, if the work is an executable linked
277 with the Library, with the complete machine-readable "work that
278 uses the Library", as object code and/or source code, so that the
279 user can modify the Library and then relink to produce a modified
280 executable containing the modified Library. (It is understood
281 that the user who changes the contents of definitions files in the
282 Library will not necessarily be able to recompile the application
283 to use the modified definitions.)
284
285 b) Accompany the work with a written offer, valid for at
286 least three years, to give the same user the materials
287 specified in Subsection 6a, above, for a charge no more
288 than the cost of performing this distribution.
289
290 c) If distribution of the work is made by offering access to copy
291 from a designated place, offer equivalent access to copy the above
292 specified materials from the same place.
293
294 d) Verify that the user has already received a copy of these
295 materials or that you have already sent this user a copy.
296
297 For an executable, the required form of the "work that uses the
298Library" must include any data and utility programs needed for
299reproducing the executable from it. However, as a special exception,
300the source code distributed need not include anything that is normally
301distributed (in either source or binary form) with the major
302components (compiler, kernel, and so on) of the operating system on
303which the executable runs, unless that component itself accompanies
304the executable.
305
306 It may happen that this requirement contradicts the license
307restrictions of other proprietary libraries that do not normally
308accompany the operating system. Such a contradiction means you cannot
309use both them and the Library together in an executable that you
310distribute.
311
312 7. You may place library facilities that are a work based on the
313Library side-by-side in a single library together with other library
314facilities not covered by this License, and distribute such a combined
315library, provided that the separate distribution of the work based on
316the Library and of the other library facilities is otherwise
317permitted, and provided that you do these two things:
318
319 a) Accompany the combined library with a copy of the same work
320 based on the Library, uncombined with any other library
321 facilities. This must be distributed under the terms of the
322 Sections above.
323
324 b) Give prominent notice with the combined library of the fact
325 that part of it is a work based on the Library, and explaining
326 where to find the accompanying uncombined form of the same work.
327
328 8. You may not copy, modify, sublicense, link with, or distribute
329the Library except as expressly provided under this License. Any
330attempt otherwise to copy, modify, sublicense, link with, or
331distribute the Library is void, and will automatically terminate your
332rights under this License. However, parties who have received copies,
333or rights, from you under this License will not have their licenses
334terminated so long as such parties remain in full compliance.
335
336 9. You are not required to accept this License, since you have not
337signed it. However, nothing else grants you permission to modify or
338distribute the Library or its derivative works. These actions are
339prohibited by law if you do not accept this License. Therefore, by
340modifying or distributing the Library (or any work based on the
341Library), you indicate your acceptance of this License to do so, and
342all its terms and conditions for copying, distributing or modifying
343the Library or works based on it.
344
345 10. Each time you redistribute the Library (or any work based on the
346Library), the recipient automatically receives a license from the
347original licensor to copy, distribute, link with or modify the Library
348subject to these terms and conditions. You may not impose any further
349restrictions on the recipients' exercise of the rights granted herein.
350You are not responsible for enforcing compliance by third parties to
351this License.
352
353 11. If, as a consequence of a court judgment or allegation of patent
354infringement or for any other reason (not limited to patent issues),
355conditions are imposed on you (whether by court order, agreement or
356otherwise) that contradict the conditions of this License, they do not
357excuse you from the conditions of this License. If you cannot
358distribute so as to satisfy simultaneously your obligations under this
359License and any other pertinent obligations, then as a consequence you
360may not distribute the Library at all. For example, if a patent
361license would not permit royalty-free redistribution of the Library by
362all those who receive copies directly or indirectly through you, then
363the only way you could satisfy both it and this License would be to
364refrain entirely from distribution of the Library.
365
366If any portion of this section is held invalid or unenforceable under any
367particular circumstance, the balance of the section is intended to apply,
368and the section as a whole is intended to apply in other circumstances.
369
370It is not the purpose of this section to induce you to infringe any
371patents or other property right claims or to contest validity of any
372such claims; this section has the sole purpose of protecting the
373integrity of the free software distribution system which is
374implemented by public license practices. Many people have made
375generous contributions to the wide range of software distributed
376through that system in reliance on consistent application of that
377system; it is up to the author/donor to decide if he or she is willing
378to distribute software through any other system and a licensee cannot
379impose that choice.
380
381This section is intended to make thoroughly clear what is believed to
382be a consequence of the rest of this License.
383
384 12. If the distribution and/or use of the Library is restricted in
385certain countries either by patents or by copyrighted interfaces, the
386original copyright holder who places the Library under this License may add
387an explicit geographical distribution limitation excluding those countries,
388so that distribution is permitted only in or among countries not thus
389excluded. In such case, this License incorporates the limitation as if
390written in the body of this License.
391
392 13. The Free Software Foundation may publish revised and/or new
393versions of the Library General Public License from time to time.
394Such new versions will be similar in spirit to the present version,
395but may differ in detail to address new problems or concerns.
396
397Each version is given a distinguishing version number. If the Library
398specifies a version number of this License which applies to it and
399"any later version", you have the option of following the terms and
400conditions either of that version or of any later version published by
401the Free Software Foundation. If the Library does not specify a
402license version number, you may choose any version ever published by
403the Free Software Foundation.
404
405 14. If you wish to incorporate parts of the Library into other free
406programs whose distribution conditions are incompatible with these,
407write to the author to ask for permission. For software which is
408copyrighted by the Free Software Foundation, write to the Free
409Software Foundation; we sometimes make exceptions for this. Our
410decision will be guided by the two goals of preserving the free status
411of all derivatives of our free software and of promoting the sharing
412and reuse of software generally.
413
414 NO WARRANTY
415
416 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
417WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
418EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
419OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
420KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
421IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
422PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
423LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
424THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
425
426 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
427WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
428AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
429FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
430CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
431LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
432RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
433FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
434SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
435DAMAGES.
436
437 END OF TERMS AND CONDITIONS
438
439 Appendix: How to Apply These Terms to Your New Libraries
440
441 If you develop a new library, and you want it to be of the greatest
442possible use to the public, we recommend making it free software that
443everyone can redistribute and change. You can do so by permitting
444redistribution under these terms (or, alternatively, under the terms of the
445ordinary General Public License).
446
447 To apply these terms, attach the following notices to the library. It is
448safest to attach them to the start of each source file to most effectively
449convey the exclusion of warranty; and each file should have at least the
450"copyright" line and a pointer to where the full notice is found.
451
452 <one line to give the library's name and a brief idea of what it does.>
453 Copyright (C) <year> <name of author>
454
455 This library is free software; you can redistribute it and/or
456 modify it under the terms of the GNU Library General Public
457 License as published by the Free Software Foundation; either
458 version 2 of the License, or (at your option) any later version.
459
460 This library is distributed in the hope that it will be useful,
461 but WITHOUT ANY WARRANTY; without even the implied warranty of
462 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
463 Library General Public License for more details.
464
465 You should have received a copy of the GNU Library General Public
466 License along with this library; if not, write to the Free
467 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
468
469Also add information on how to contact you by electronic and paper mail.
470
471You should also get your employer (if you work as a programmer) or your
472school, if any, to sign a "copyright disclaimer" for the library, if
473necessary. Here is a sample; alter the names:
474
475 Yoyodyne, Inc., hereby disclaims all copyright interest in the
476 library `Frob' (a library for tweaking knobs) written by James Random Hacker.
477
478 <signature of Ty Coon>, 1 April 1990
479 Ty Coon, President of Vice
480
481That's all there is to it!
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
new file mode 100644
index 000000000000..2296ff9dc47a
--- /dev/null
+++ b/arch/sparc/lib/Makefile
@@ -0,0 +1,13 @@
1# $Id: Makefile,v 1.35 2000/12/15 00:41:18 davem Exp $
2# Makefile for Sparc library files..
3#
4
5EXTRA_AFLAGS := -ansi -DST_DIV0=0x02
6
7lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
8 strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \
9 strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
10 copy_user.o locks.o atomic.o atomic32.o bitops.o \
11 lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
12
13lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
diff --git a/arch/sparc/lib/ashldi3.S b/arch/sparc/lib/ashldi3.S
new file mode 100644
index 000000000000..52418a0cb3dd
--- /dev/null
+++ b/arch/sparc/lib/ashldi3.S
@@ -0,0 +1,34 @@
1/* $Id: ashldi3.S,v 1.2 1999/11/19 04:11:46 davem Exp $
2 * ashldi3.S: GCC emits these for certain drivers playing
3 * with long longs.
4 *
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 */
7
8 .text
9 .align 4
10 .globl __ashldi3
11__ashldi3:
12 cmp %o2, 0
13 be 9f
14 mov 0x20, %g2
15
16 sub %g2, %o2, %g2
17 cmp %g2, 0
18 bg 7f
19 sll %o0, %o2, %g3
20
21 neg %g2
22 clr %o5
23 b 8f
24 sll %o1, %g2, %o4
257:
26 srl %o1, %g2, %g2
27 sll %o1, %o2, %o5
28 or %g3, %g2, %o4
298:
30 mov %o4, %o0
31 mov %o5, %o1
329:
33 retl
34 nop
diff --git a/arch/sparc/lib/ashrdi3.S b/arch/sparc/lib/ashrdi3.S
new file mode 100644
index 000000000000..2848237598a4
--- /dev/null
+++ b/arch/sparc/lib/ashrdi3.S
@@ -0,0 +1,36 @@
1/* $Id: ashrdi3.S,v 1.4 1999/11/19 04:11:49 davem Exp $
2 * ashrdi3.S: The filesystem code creates all kinds of references to
3 * this little routine on the sparc with gcc.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8 .text
9 .align 4
10 .globl __ashrdi3
11__ashrdi3:
12 tst %o2
13 be 3f
14 or %g0, 32, %g2
15
16 sub %g2, %o2, %g2
17
18 tst %g2
19 bg 1f
20 sra %o0, %o2, %o4
21
22 sra %o0, 31, %o4
23 sub %g0, %g2, %g2
24 ba 2f
25 sra %o0, %g2, %o5
26
271:
28 sll %o0, %g2, %g3
29 srl %o1, %o2, %g2
30 or %g2, %g3, %o5
312:
32 or %g0, %o4, %o0
33 or %g0, %o5, %o1
343:
35 jmpl %o7 + 8, %g0
36 nop
diff --git a/arch/sparc/lib/atomic.S b/arch/sparc/lib/atomic.S
new file mode 100644
index 000000000000..f48ad0c4dadb
--- /dev/null
+++ b/arch/sparc/lib/atomic.S
@@ -0,0 +1,100 @@
1/* atomic.S: Move this stuff here for better ICACHE hit rates.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
4 */
5
6#include <linux/config.h>
7#include <asm/ptrace.h>
8#include <asm/psr.h>
9
10 .text
11 .align 4
12
13 .globl __atomic_begin
14__atomic_begin:
15
16#ifndef CONFIG_SMP
17 .globl ___xchg32_sun4c
18___xchg32_sun4c:
19 rd %psr, %g3
20 andcc %g3, PSR_PIL, %g0
21 bne 1f
22 nop
23 wr %g3, PSR_PIL, %psr
24 nop; nop; nop
251:
26 andcc %g3, PSR_PIL, %g0
27 ld [%g1], %g7
28 bne 1f
29 st %g2, [%g1]
30 wr %g3, 0x0, %psr
31 nop; nop; nop
321:
33 mov %g7, %g2
34 jmpl %o7 + 8, %g0
35 mov %g4, %o7
36
37 .globl ___xchg32_sun4md
38___xchg32_sun4md:
39 swap [%g1], %g2
40 jmpl %o7 + 8, %g0
41 mov %g4, %o7
42#endif
43
44 /* Read asm-sparc/atomic.h carefully to understand how this works for SMP.
45 * Really, some things here for SMP are overly clever, go read the header.
46 */
47 .globl ___atomic24_add
48___atomic24_add:
49 rd %psr, %g3 ! Keep the code small, old way was stupid
50 nop; nop; nop; ! Let the bits set
51 or %g3, PSR_PIL, %g7 ! Disable interrupts
52 wr %g7, 0x0, %psr ! Set %psr
53 nop; nop; nop; ! Let the bits set
54#ifdef CONFIG_SMP
551: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
56 orcc %g7, 0x0, %g0 ! Did we get it?
57 bne 1b ! Nope...
58 ld [%g1], %g7 ! Load locked atomic24_t
59 sra %g7, 8, %g7 ! Get signed 24-bit integer
60 add %g7, %g2, %g2 ! Add in argument
61 sll %g2, 8, %g7 ! Transpose back to atomic24_t
62 st %g7, [%g1] ! Clever: This releases the lock as well.
63#else
64 ld [%g1], %g7 ! Load locked atomic24_t
65 add %g7, %g2, %g2 ! Add in argument
66 st %g2, [%g1] ! Store it back
67#endif
68 wr %g3, 0x0, %psr ! Restore original PSR_PIL
69 nop; nop; nop; ! Let the bits set
70 jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
71 mov %g4, %o7 ! Restore %o7
72
73 .globl ___atomic24_sub
74___atomic24_sub:
75 rd %psr, %g3 ! Keep the code small, old way was stupid
76 nop; nop; nop; ! Let the bits set
77 or %g3, PSR_PIL, %g7 ! Disable interrupts
78 wr %g7, 0x0, %psr ! Set %psr
79 nop; nop; nop; ! Let the bits set
80#ifdef CONFIG_SMP
811: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
82 orcc %g7, 0x0, %g0 ! Did we get it?
83 bne 1b ! Nope...
84 ld [%g1], %g7 ! Load locked atomic24_t
85 sra %g7, 8, %g7 ! Get signed 24-bit integer
86 sub %g7, %g2, %g2 ! Subtract argument
87 sll %g2, 8, %g7 ! Transpose back to atomic24_t
88 st %g7, [%g1] ! Clever: This releases the lock as well
89#else
90 ld [%g1], %g7 ! Load locked atomic24_t
91 sub %g7, %g2, %g2 ! Subtract argument
92 st %g2, [%g1] ! Store it back
93#endif
94 wr %g3, 0x0, %psr ! Restore original PSR_PIL
95 nop; nop; nop; ! Let the bits set
96 jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
97 mov %g4, %o7 ! Restore %o7
98
99 .globl __atomic_end
100__atomic_end:
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
new file mode 100644
index 000000000000..19724c5800a7
--- /dev/null
+++ b/arch/sparc/lib/atomic32.c
@@ -0,0 +1,53 @@
1/*
2 * atomic32.c: 32-bit atomic_t implementation
3 *
4 * Copyright (C) 2004 Keith M Wesolowski
5 *
6 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
7 */
8
9#include <asm/atomic.h>
10#include <linux/spinlock.h>
11#include <linux/module.h>
12
13#ifdef CONFIG_SMP
14#define ATOMIC_HASH_SIZE 4
15#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
16
17spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
18 [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
19};
20
21#else /* SMP */
22
23static spinlock_t dummy = SPIN_LOCK_UNLOCKED;
24#define ATOMIC_HASH_SIZE 1
25#define ATOMIC_HASH(a) (&dummy)
26
27#endif /* SMP */
28
29int __atomic_add_return(int i, atomic_t *v)
30{
31 int ret;
32 unsigned long flags;
33 spin_lock_irqsave(ATOMIC_HASH(v), flags);
34
35 ret = (v->counter += i);
36
37 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
38 return ret;
39}
40
41void atomic_set(atomic_t *v, int i)
42{
43 unsigned long flags;
44 spin_lock_irqsave(ATOMIC_HASH(v), flags);
45
46 v->counter = i;
47
48 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
49}
50
51EXPORT_SYMBOL(__atomic_add_return);
52EXPORT_SYMBOL(atomic_set);
53
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c
new file mode 100644
index 000000000000..94b05e8c906c
--- /dev/null
+++ b/arch/sparc/lib/bitext.c
@@ -0,0 +1,132 @@
1/*
2 * bitext.c: kernel little helper (of bit shuffling variety).
3 *
4 * Copyright (C) 2002 Pete Zaitcev <zaitcev@yahoo.com>
5 *
6 * The algorithm to search a zero bit string is geared towards its application.
7 * We expect a couple of fixed sizes of requests, so a rotating counter, reset
8 * by align size, should provide fast enough search while maintaining low
9 * fragmentation.
10 */
11
12#include <linux/smp_lock.h>
13#include <linux/bitops.h>
14
15#include <asm/bitext.h>
16
17/**
18 * bit_map_string_get - find and set a bit string in bit map.
19 * @t: the bit map.
20 * @len: requested string length
21 * @align: requested alignment
22 *
23 * Returns offset in the map or -1 if out of space.
24 *
25 * Not safe to call from an interrupt (uses spin_lock).
26 */
27int bit_map_string_get(struct bit_map *t, int len, int align)
28{
29 int offset, count; /* siamese twins */
30 int off_new;
31 int align1;
32 int i, color;
33
34 if (t->num_colors) {
35 /* align is overloaded to be the page color */
36 color = align;
37 align = t->num_colors;
38 } else {
39 color = 0;
40 if (align == 0)
41 align = 1;
42 }
43 align1 = align - 1;
44 if ((align & align1) != 0)
45 BUG();
46 if (align < 0 || align >= t->size)
47 BUG();
48 if (len <= 0 || len > t->size)
49 BUG();
50 color &= align1;
51
52 spin_lock(&t->lock);
53 if (len < t->last_size)
54 offset = t->first_free;
55 else
56 offset = t->last_off & ~align1;
57 count = 0;
58 for (;;) {
59 off_new = find_next_zero_bit(t->map, t->size, offset);
60 off_new = ((off_new + align1) & ~align1) + color;
61 count += off_new - offset;
62 offset = off_new;
63 if (offset >= t->size)
64 offset = 0;
65 if (count + len > t->size) {
66 spin_unlock(&t->lock);
67/* P3 */ printk(KERN_ERR
68 "bitmap out: size %d used %d off %d len %d align %d count %d\n",
69 t->size, t->used, offset, len, align, count);
70 return -1;
71 }
72
73 if (offset + len > t->size) {
74 count += t->size - offset;
75 offset = 0;
76 continue;
77 }
78
79 i = 0;
80 while (test_bit(offset + i, t->map) == 0) {
81 i++;
82 if (i == len) {
83 for (i = 0; i < len; i++)
84 __set_bit(offset + i, t->map);
85 if (offset == t->first_free)
86 t->first_free = find_next_zero_bit
87 (t->map, t->size,
88 t->first_free + len);
89 if ((t->last_off = offset + len) >= t->size)
90 t->last_off = 0;
91 t->used += len;
92 t->last_size = len;
93 spin_unlock(&t->lock);
94 return offset;
95 }
96 }
97 count += i + 1;
98 if ((offset += i + 1) >= t->size)
99 offset = 0;
100 }
101}
102
103void bit_map_clear(struct bit_map *t, int offset, int len)
104{
105 int i;
106
107 if (t->used < len)
108 BUG(); /* Much too late to do any good, but alas... */
109 spin_lock(&t->lock);
110 for (i = 0; i < len; i++) {
111 if (test_bit(offset + i, t->map) == 0)
112 BUG();
113 __clear_bit(offset + i, t->map);
114 }
115 if (offset < t->first_free)
116 t->first_free = offset;
117 t->used -= len;
118 spin_unlock(&t->lock);
119}
120
121void bit_map_init(struct bit_map *t, unsigned long *map, int size)
122{
123
124 if ((size & 07) != 0)
125 BUG();
126 memset(map, 0, size>>3);
127
128 memset(t, 0, sizeof *t);
129 spin_lock_init(&t->lock);
130 t->map = map;
131 t->size = size;
132}
diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S
new file mode 100644
index 000000000000..3e9399769075
--- /dev/null
+++ b/arch/sparc/lib/bitops.S
@@ -0,0 +1,110 @@
1/* bitops.S: Low level assembler bit operations.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <linux/config.h>
7#include <asm/ptrace.h>
8#include <asm/psr.h>
9
10 .text
11 .align 4
12
13 .globl __bitops_begin
14__bitops_begin:
15
16 /* Take bits in %g2 and set them in word at %g1,
17 * return whether bits were set in original value
18 * in %g2. %g4 holds value to restore into %o7
19 * in delay slot of jmpl return, %g3 + %g5 + %g7 can be
20 * used as temporaries and thus is considered clobbered
21 * by all callers.
22 */
23 .globl ___set_bit
24___set_bit:
25 rd %psr, %g3
26 nop; nop; nop;
27 or %g3, PSR_PIL, %g5
28 wr %g5, 0x0, %psr
29 nop; nop; nop
30#ifdef CONFIG_SMP
31 set bitops_spinlock, %g5
322: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
33 orcc %g7, 0x0, %g0 ! Did we get it?
34 bne 2b ! Nope...
35#endif
36 ld [%g1], %g7
37 or %g7, %g2, %g5
38 and %g7, %g2, %g2
39#ifdef CONFIG_SMP
40 st %g5, [%g1]
41 set bitops_spinlock, %g5
42 stb %g0, [%g5]
43#else
44 st %g5, [%g1]
45#endif
46 wr %g3, 0x0, %psr
47 nop; nop; nop
48 jmpl %o7, %g0
49 mov %g4, %o7
50
51 /* Same as above, but clears the bits from %g2 instead. */
52 .globl ___clear_bit
53___clear_bit:
54 rd %psr, %g3
55 nop; nop; nop
56 or %g3, PSR_PIL, %g5
57 wr %g5, 0x0, %psr
58 nop; nop; nop
59#ifdef CONFIG_SMP
60 set bitops_spinlock, %g5
612: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
62 orcc %g7, 0x0, %g0 ! Did we get it?
63 bne 2b ! Nope...
64#endif
65 ld [%g1], %g7
66 andn %g7, %g2, %g5
67 and %g7, %g2, %g2
68#ifdef CONFIG_SMP
69 st %g5, [%g1]
70 set bitops_spinlock, %g5
71 stb %g0, [%g5]
72#else
73 st %g5, [%g1]
74#endif
75 wr %g3, 0x0, %psr
76 nop; nop; nop
77 jmpl %o7, %g0
78 mov %g4, %o7
79
80 /* Same thing again, but this time toggles the bits from %g2. */
81 .globl ___change_bit
82___change_bit:
83 rd %psr, %g3
84 nop; nop; nop
85 or %g3, PSR_PIL, %g5
86 wr %g5, 0x0, %psr
87 nop; nop; nop
88#ifdef CONFIG_SMP
89 set bitops_spinlock, %g5
902: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
91 orcc %g7, 0x0, %g0 ! Did we get it?
92 bne 2b ! Nope...
93#endif
94 ld [%g1], %g7
95 xor %g7, %g2, %g5
96 and %g7, %g2, %g2
97#ifdef CONFIG_SMP
98 st %g5, [%g1]
99 set bitops_spinlock, %g5
100 stb %g0, [%g5]
101#else
102 st %g5, [%g1]
103#endif
104 wr %g3, 0x0, %psr
105 nop; nop; nop
106 jmpl %o7, %g0
107 mov %g4, %o7
108
109 .globl __bitops_end
110__bitops_end:
diff --git a/arch/sparc/lib/blockops.S b/arch/sparc/lib/blockops.S
new file mode 100644
index 000000000000..a7c7ffaa4a94
--- /dev/null
+++ b/arch/sparc/lib/blockops.S
@@ -0,0 +1,89 @@
1/* $Id: blockops.S,v 1.8 1998/01/30 10:58:44 jj Exp $
2 * blockops.S: Common block zero optimized routines.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/page.h>
8
9 /* Zero out 64 bytes of memory at (buf + offset).
10 * Assumes %g1 contains zero.
11 */
12#define BLAST_BLOCK(buf, offset) \
13 std %g0, [buf + offset + 0x38]; \
14 std %g0, [buf + offset + 0x30]; \
15 std %g0, [buf + offset + 0x28]; \
16 std %g0, [buf + offset + 0x20]; \
17 std %g0, [buf + offset + 0x18]; \
18 std %g0, [buf + offset + 0x10]; \
19 std %g0, [buf + offset + 0x08]; \
20 std %g0, [buf + offset + 0x00];
21
22 /* Copy 32 bytes of memory at (src + offset) to
23 * (dst + offset).
24 */
25#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
26 ldd [src + offset + 0x18], t0; \
27 ldd [src + offset + 0x10], t2; \
28 ldd [src + offset + 0x08], t4; \
29 ldd [src + offset + 0x00], t6; \
30 std t0, [dst + offset + 0x18]; \
31 std t2, [dst + offset + 0x10]; \
32 std t4, [dst + offset + 0x08]; \
33 std t6, [dst + offset + 0x00];
34
35 /* Profiling evidence indicates that memset() is
36 * commonly called for blocks of size PAGE_SIZE,
37 * and (2 * PAGE_SIZE) (for kernel stacks)
38 * and with a second arg of zero. We assume in
39 * all of these cases that the buffer is aligned
40 * on at least an 8 byte boundary.
41 *
42 * Therefore we special case them to make them
43 * as fast as possible.
44 */
45
46 .text
47 .align 4
48 .globl bzero_1page, __copy_1page
49
50bzero_1page:
51/* NOTE: If you change the number of insns of this routine, please check
52 * arch/sparc/mm/hypersparc.S */
53 /* %o0 = buf */
54 or %g0, %g0, %g1
55 or %o0, %g0, %o1
56 or %g0, (PAGE_SIZE >> 8), %g2
571:
58 BLAST_BLOCK(%o0, 0x00)
59 BLAST_BLOCK(%o0, 0x40)
60 BLAST_BLOCK(%o0, 0x80)
61 BLAST_BLOCK(%o0, 0xc0)
62 subcc %g2, 1, %g2
63 bne 1b
64 add %o0, 0x100, %o0
65
66 retl
67 nop
68
69__copy_1page:
70/* NOTE: If you change the number of insns of this routine, please check
71 * arch/sparc/mm/hypersparc.S */
72 /* %o0 = dst, %o1 = src */
73 or %g0, (PAGE_SIZE >> 8), %g1
741:
75 MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
76 MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
77 MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
78 MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
79 MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
80 MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
81 MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
82 MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
83 subcc %g1, 1, %g1
84 add %o0, 0x100, %o0
85 bne 1b
86 add %o1, 0x100, %o1
87
88 retl
89 nop
diff --git a/arch/sparc/lib/checksum.S b/arch/sparc/lib/checksum.S
new file mode 100644
index 000000000000..77f228533d47
--- /dev/null
+++ b/arch/sparc/lib/checksum.S
@@ -0,0 +1,583 @@
1/* checksum.S: Sparc optimized checksum code.
2 *
3 * Copyright(C) 1995 Linus Torvalds
4 * Copyright(C) 1995 Miguel de Icaza
5 * Copyright(C) 1996 David S. Miller
6 * Copyright(C) 1997 Jakub Jelinek
7 *
8 * derived from:
9 * Linux/Alpha checksum c-code
10 * Linux/ix86 inline checksum assembly
11 * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
12 * David Mosberger-Tang for optimized reference c-code
13 * BSD4.4 portable checksum routine
14 */
15
16#include <asm/errno.h>
17
18#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \
19 ldd [buf + offset + 0x00], t0; \
20 ldd [buf + offset + 0x08], t2; \
21 addxcc t0, sum, sum; \
22 addxcc t1, sum, sum; \
23 ldd [buf + offset + 0x10], t4; \
24 addxcc t2, sum, sum; \
25 addxcc t3, sum, sum; \
26 ldd [buf + offset + 0x18], t0; \
27 addxcc t4, sum, sum; \
28 addxcc t5, sum, sum; \
29 addxcc t0, sum, sum; \
30 addxcc t1, sum, sum;
31
32#define CSUM_LASTCHUNK(buf, offset, sum, t0, t1, t2, t3) \
33 ldd [buf - offset - 0x08], t0; \
34 ldd [buf - offset - 0x00], t2; \
35 addxcc t0, sum, sum; \
36 addxcc t1, sum, sum; \
37 addxcc t2, sum, sum; \
38 addxcc t3, sum, sum;
39
40 /* Do end cruft out of band to get better cache patterns. */
41csum_partial_end_cruft:
42 be 1f ! caller asks %o1 & 0x8
43 andcc %o1, 4, %g0 ! nope, check for word remaining
44 ldd [%o0], %g2 ! load two
45 addcc %g2, %o2, %o2 ! add first word to sum
46 addxcc %g3, %o2, %o2 ! add second word as well
47 add %o0, 8, %o0 ! advance buf ptr
48 addx %g0, %o2, %o2 ! add in final carry
49 andcc %o1, 4, %g0 ! check again for word remaining
501: be 1f ! nope, skip this code
51 andcc %o1, 3, %o1 ! check for trailing bytes
52 ld [%o0], %g2 ! load it
53 addcc %g2, %o2, %o2 ! add to sum
54 add %o0, 4, %o0 ! advance buf ptr
55 addx %g0, %o2, %o2 ! add in final carry
56 andcc %o1, 3, %g0 ! check again for trailing bytes
571: be 1f ! no trailing bytes, return
58 addcc %o1, -1, %g0 ! only one byte remains?
59 bne 2f ! at least two bytes more
60 subcc %o1, 2, %o1 ! only two bytes more?
61 b 4f ! only one byte remains
62 or %g0, %g0, %o4 ! clear fake hword value
632: lduh [%o0], %o4 ! get hword
64 be 6f ! jmp if only hword remains
65 add %o0, 2, %o0 ! advance buf ptr either way
66 sll %o4, 16, %o4 ! create upper hword
674: ldub [%o0], %o5 ! get final byte
68 sll %o5, 8, %o5 ! put into place
69 or %o5, %o4, %o4 ! coalese with hword (if any)
706: addcc %o4, %o2, %o2 ! add to sum
711: retl ! get outta here
72 addx %g0, %o2, %o0 ! add final carry into retval
73
74 /* Also do alignment out of band to get better cache patterns. */
75csum_partial_fix_alignment:
76 cmp %o1, 6
77 bl cpte - 0x4
78 andcc %o0, 0x2, %g0
79 be 1f
80 andcc %o0, 0x4, %g0
81 lduh [%o0 + 0x00], %g2
82 sub %o1, 2, %o1
83 add %o0, 2, %o0
84 sll %g2, 16, %g2
85 addcc %g2, %o2, %o2
86 srl %o2, 16, %g3
87 addx %g0, %g3, %g2
88 sll %o2, 16, %o2
89 sll %g2, 16, %g3
90 srl %o2, 16, %o2
91 andcc %o0, 0x4, %g0
92 or %g3, %o2, %o2
931: be cpa
94 andcc %o1, 0xffffff80, %o3
95 ld [%o0 + 0x00], %g2
96 sub %o1, 4, %o1
97 addcc %g2, %o2, %o2
98 add %o0, 4, %o0
99 addx %g0, %o2, %o2
100 b cpa
101 andcc %o1, 0xffffff80, %o3
102
103 /* The common case is to get called with a nicely aligned
104 * buffer of size 0x20. Follow the code path for that case.
105 */
106 .globl csum_partial
107csum_partial: /* %o0=buf, %o1=len, %o2=sum */
108 andcc %o0, 0x7, %g0 ! alignment problems?
109 bne csum_partial_fix_alignment ! yep, handle it
110 sethi %hi(cpte - 8), %g7 ! prepare table jmp ptr
111 andcc %o1, 0xffffff80, %o3 ! num loop iterations
112cpa: be 3f ! none to do
113 andcc %o1, 0x70, %g1 ! clears carry flag too
1145: CSUM_BIGCHUNK(%o0, 0x00, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
115 CSUM_BIGCHUNK(%o0, 0x20, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
116 CSUM_BIGCHUNK(%o0, 0x40, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
117 CSUM_BIGCHUNK(%o0, 0x60, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
118 addx %g0, %o2, %o2 ! sink in final carry
119 subcc %o3, 128, %o3 ! detract from loop iters
120 bne 5b ! more to do
121 add %o0, 128, %o0 ! advance buf ptr
122 andcc %o1, 0x70, %g1 ! clears carry flag too
1233: be cpte ! nope
124 andcc %o1, 0xf, %g0 ! anything left at all?
125 srl %g1, 1, %o4 ! compute offset
126 sub %g7, %g1, %g7 ! adjust jmp ptr
127 sub %g7, %o4, %g7 ! final jmp ptr adjust
128 jmp %g7 + %lo(cpte - 8) ! enter the table
129 add %o0, %g1, %o0 ! advance buf ptr
130cptbl: CSUM_LASTCHUNK(%o0, 0x68, %o2, %g2, %g3, %g4, %g5)
131 CSUM_LASTCHUNK(%o0, 0x58, %o2, %g2, %g3, %g4, %g5)
132 CSUM_LASTCHUNK(%o0, 0x48, %o2, %g2, %g3, %g4, %g5)
133 CSUM_LASTCHUNK(%o0, 0x38, %o2, %g2, %g3, %g4, %g5)
134 CSUM_LASTCHUNK(%o0, 0x28, %o2, %g2, %g3, %g4, %g5)
135 CSUM_LASTCHUNK(%o0, 0x18, %o2, %g2, %g3, %g4, %g5)
136 CSUM_LASTCHUNK(%o0, 0x08, %o2, %g2, %g3, %g4, %g5)
137 addx %g0, %o2, %o2 ! fetch final carry
138 andcc %o1, 0xf, %g0 ! anything left at all?
139cpte: bne csum_partial_end_cruft ! yep, handle it
140 andcc %o1, 8, %g0 ! check how much
141cpout: retl ! get outta here
142 mov %o2, %o0 ! return computed csum
143
144 .globl __csum_partial_copy_start, __csum_partial_copy_end
145__csum_partial_copy_start:
146
147/* Work around cpp -rob */
148#define ALLOC #alloc
149#define EXECINSTR #execinstr
150#define EX(x,y,a,b) \
15198: x,y; \
152 .section .fixup,ALLOC,EXECINSTR; \
153 .align 4; \
15499: ba 30f; \
155 a, b, %o3; \
156 .section __ex_table,ALLOC; \
157 .align 4; \
158 .word 98b, 99b; \
159 .text; \
160 .align 4
161
162#define EX2(x,y) \
16398: x,y; \
164 .section __ex_table,ALLOC; \
165 .align 4; \
166 .word 98b, 30f; \
167 .text; \
168 .align 4
169
170#define EX3(x,y) \
17198: x,y; \
172 .section __ex_table,ALLOC; \
173 .align 4; \
174 .word 98b, 96f; \
175 .text; \
176 .align 4
177
178#define EXT(start,end,handler) \
179 .section __ex_table,ALLOC; \
180 .align 4; \
181 .word start, 0, end, handler; \
182 .text; \
183 .align 4
184
185 /* This aligned version executes typically in 8.5 superscalar cycles, this
186 * is the best I can do. I say 8.5 because the final add will pair with
187 * the next ldd in the main unrolled loop. Thus the pipe is always full.
188 * If you change these macros (including order of instructions),
189 * please check the fixup code below as well.
190 */
191#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
192 ldd [src + off + 0x00], t0; \
193 ldd [src + off + 0x08], t2; \
194 addxcc t0, sum, sum; \
195 ldd [src + off + 0x10], t4; \
196 addxcc t1, sum, sum; \
197 ldd [src + off + 0x18], t6; \
198 addxcc t2, sum, sum; \
199 std t0, [dst + off + 0x00]; \
200 addxcc t3, sum, sum; \
201 std t2, [dst + off + 0x08]; \
202 addxcc t4, sum, sum; \
203 std t4, [dst + off + 0x10]; \
204 addxcc t5, sum, sum; \
205 std t6, [dst + off + 0x18]; \
206 addxcc t6, sum, sum; \
207 addxcc t7, sum, sum;
208
209 /* 12 superscalar cycles seems to be the limit for this case,
210 * because of this we thus do all the ldd's together to get
211 * Viking MXCC into streaming mode. Ho hum...
212 */
213#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
214 ldd [src + off + 0x00], t0; \
215 ldd [src + off + 0x08], t2; \
216 ldd [src + off + 0x10], t4; \
217 ldd [src + off + 0x18], t6; \
218 st t0, [dst + off + 0x00]; \
219 addxcc t0, sum, sum; \
220 st t1, [dst + off + 0x04]; \
221 addxcc t1, sum, sum; \
222 st t2, [dst + off + 0x08]; \
223 addxcc t2, sum, sum; \
224 st t3, [dst + off + 0x0c]; \
225 addxcc t3, sum, sum; \
226 st t4, [dst + off + 0x10]; \
227 addxcc t4, sum, sum; \
228 st t5, [dst + off + 0x14]; \
229 addxcc t5, sum, sum; \
230 st t6, [dst + off + 0x18]; \
231 addxcc t6, sum, sum; \
232 st t7, [dst + off + 0x1c]; \
233 addxcc t7, sum, sum;
234
235 /* Yuck, 6 superscalar cycles... */
236#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
237 ldd [src - off - 0x08], t0; \
238 ldd [src - off - 0x00], t2; \
239 addxcc t0, sum, sum; \
240 st t0, [dst - off - 0x08]; \
241 addxcc t1, sum, sum; \
242 st t1, [dst - off - 0x04]; \
243 addxcc t2, sum, sum; \
244 st t2, [dst - off - 0x00]; \
245 addxcc t3, sum, sum; \
246 st t3, [dst - off + 0x04];
247
248 /* Handle the end cruft code out of band for better cache patterns. */
249cc_end_cruft:
250 be 1f
251 andcc %o3, 4, %g0
252 EX(ldd [%o0 + 0x00], %g2, and %o3, 0xf)
253 add %o1, 8, %o1
254 addcc %g2, %g7, %g7
255 add %o0, 8, %o0
256 addxcc %g3, %g7, %g7
257 EX2(st %g2, [%o1 - 0x08])
258 addx %g0, %g7, %g7
259 andcc %o3, 4, %g0
260 EX2(st %g3, [%o1 - 0x04])
2611: be 1f
262 andcc %o3, 3, %o3
263 EX(ld [%o0 + 0x00], %g2, add %o3, 4)
264 add %o1, 4, %o1
265 addcc %g2, %g7, %g7
266 EX2(st %g2, [%o1 - 0x04])
267 addx %g0, %g7, %g7
268 andcc %o3, 3, %g0
269 add %o0, 4, %o0
2701: be 1f
271 addcc %o3, -1, %g0
272 bne 2f
273 subcc %o3, 2, %o3
274 b 4f
275 or %g0, %g0, %o4
2762: EX(lduh [%o0 + 0x00], %o4, add %o3, 2)
277 add %o0, 2, %o0
278 EX2(sth %o4, [%o1 + 0x00])
279 be 6f
280 add %o1, 2, %o1
281 sll %o4, 16, %o4
2824: EX(ldub [%o0 + 0x00], %o5, add %g0, 1)
283 EX2(stb %o5, [%o1 + 0x00])
284 sll %o5, 8, %o5
285 or %o5, %o4, %o4
2866: addcc %o4, %g7, %g7
2871: retl
288 addx %g0, %g7, %o0
289
290 /* Also, handle the alignment code out of band. */
291cc_dword_align:
292 cmp %g1, 6
293 bl,a ccte
294 andcc %g1, 0xf, %o3
295 andcc %o0, 0x1, %g0
296 bne ccslow
297 andcc %o0, 0x2, %g0
298 be 1f
299 andcc %o0, 0x4, %g0
300 EX(lduh [%o0 + 0x00], %g4, add %g1, 0)
301 sub %g1, 2, %g1
302 EX2(sth %g4, [%o1 + 0x00])
303 add %o0, 2, %o0
304 sll %g4, 16, %g4
305 addcc %g4, %g7, %g7
306 add %o1, 2, %o1
307 srl %g7, 16, %g3
308 addx %g0, %g3, %g4
309 sll %g7, 16, %g7
310 sll %g4, 16, %g3
311 srl %g7, 16, %g7
312 andcc %o0, 0x4, %g0
313 or %g3, %g7, %g7
3141: be 3f
315 andcc %g1, 0xffffff80, %g0
316 EX(ld [%o0 + 0x00], %g4, add %g1, 0)
317 sub %g1, 4, %g1
318 EX2(st %g4, [%o1 + 0x00])
319 add %o0, 4, %o0
320 addcc %g4, %g7, %g7
321 add %o1, 4, %o1
322 addx %g0, %g7, %g7
323 b 3f
324 andcc %g1, 0xffffff80, %g0
325
326 /* Sun, you just can't beat me, you just can't. Stop trying,
327 * give up. I'm serious, I am going to kick the living shit
328 * out of you, game over, lights out.
329 */
330 .align 8
331 .globl __csum_partial_copy_sparc_generic
332__csum_partial_copy_sparc_generic:
333 /* %o0=src, %o1=dest, %g1=len, %g7=sum */
334 xor %o0, %o1, %o4 ! get changing bits
335 andcc %o4, 3, %g0 ! check for mismatched alignment
336 bne ccslow ! better this than unaligned/fixups
337 andcc %o0, 7, %g0 ! need to align things?
338 bne cc_dword_align ! yes, we check for short lengths there
339 andcc %g1, 0xffffff80, %g0 ! can we use unrolled loop?
3403: be 3f ! nope, less than one loop remains
341 andcc %o1, 4, %g0 ! dest aligned on 4 or 8 byte boundary?
342 be ccdbl + 4 ! 8 byte aligned, kick ass
3435: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
344 CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
345 CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
346 CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
34710: EXT(5b, 10b, 20f) ! note for exception handling
348 sub %g1, 128, %g1 ! detract from length
349 addx %g0, %g7, %g7 ! add in last carry bit
350 andcc %g1, 0xffffff80, %g0 ! more to csum?
351 add %o0, 128, %o0 ! advance src ptr
352 bne 5b ! we did not go negative, continue looping
353 add %o1, 128, %o1 ! advance dest ptr
3543: andcc %g1, 0x70, %o2 ! can use table?
355ccmerge:be ccte ! nope, go and check for end cruft
356 andcc %g1, 0xf, %o3 ! get low bits of length (clears carry btw)
357 srl %o2, 1, %o4 ! begin negative offset computation
358 sethi %hi(12f), %o5 ! set up table ptr end
359 add %o0, %o2, %o0 ! advance src ptr
360 sub %o5, %o4, %o5 ! continue table calculation
361 sll %o2, 1, %g2 ! constant multiplies are fun...
362 sub %o5, %g2, %o5 ! some more adjustments
363 jmp %o5 + %lo(12f) ! jump into it, duff style, wheee...
364 add %o1, %o2, %o1 ! advance dest ptr (carry is clear btw)
365cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
366 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3,%g4,%g5)
367 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3,%g4,%g5)
368 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3,%g4,%g5)
369 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
370 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
371 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
37212: EXT(cctbl, 12b, 22f) ! note for exception table handling
373 addx %g0, %g7, %g7
374 andcc %o3, 0xf, %g0 ! check for low bits set
375ccte: bne cc_end_cruft ! something left, handle it out of band
376 andcc %o3, 8, %g0 ! begin checks for that code
377 retl ! return
378 mov %g7, %o0 ! give em the computed checksum
379ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
380 CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
381 CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
382 CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
38311: EXT(ccdbl, 11b, 21f) ! note for exception table handling
384 sub %g1, 128, %g1 ! detract from length
385 addx %g0, %g7, %g7 ! add in last carry bit
386 andcc %g1, 0xffffff80, %g0 ! more to csum?
387 add %o0, 128, %o0 ! advance src ptr
388 bne ccdbl ! we did not go negative, continue looping
389 add %o1, 128, %o1 ! advance dest ptr
390 b ccmerge ! finish it off, above
391 andcc %g1, 0x70, %o2 ! can use table? (clears carry btw)
392
393ccslow: cmp %g1, 0
394 mov 0, %g5
395 bleu 4f
396 andcc %o0, 1, %o5
397 be,a 1f
398 srl %g1, 1, %g4
399 sub %g1, 1, %g1
400 EX(ldub [%o0], %g5, add %g1, 1)
401 add %o0, 1, %o0
402 EX2(stb %g5, [%o1])
403 srl %g1, 1, %g4
404 add %o1, 1, %o1
4051: cmp %g4, 0
406 be,a 3f
407 andcc %g1, 1, %g0
408 andcc %o0, 2, %g0
409 be,a 1f
410 srl %g4, 1, %g4
411 EX(lduh [%o0], %o4, add %g1, 0)
412 sub %g1, 2, %g1
413 srl %o4, 8, %g2
414 sub %g4, 1, %g4
415 EX2(stb %g2, [%o1])
416 add %o4, %g5, %g5
417 EX2(stb %o4, [%o1 + 1])
418 add %o0, 2, %o0
419 srl %g4, 1, %g4
420 add %o1, 2, %o1
4211: cmp %g4, 0
422 be,a 2f
423 andcc %g1, 2, %g0
424 EX3(ld [%o0], %o4)
4255: srl %o4, 24, %g2
426 srl %o4, 16, %g3
427 EX2(stb %g2, [%o1])
428 srl %o4, 8, %g2
429 EX2(stb %g3, [%o1 + 1])
430 add %o0, 4, %o0
431 EX2(stb %g2, [%o1 + 2])
432 addcc %o4, %g5, %g5
433 EX2(stb %o4, [%o1 + 3])
434 addx %g5, %g0, %g5 ! I am now to lazy to optimize this (question it
435 add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl
436 subcc %g4, 1, %g4 ! tricks
437 bne,a 5b
438 EX3(ld [%o0], %o4)
439 sll %g5, 16, %g2
440 srl %g5, 16, %g5
441 srl %g2, 16, %g2
442 andcc %g1, 2, %g0
443 add %g2, %g5, %g5
4442: be,a 3f
445 andcc %g1, 1, %g0
446 EX(lduh [%o0], %o4, and %g1, 3)
447 andcc %g1, 1, %g0
448 srl %o4, 8, %g2
449 add %o0, 2, %o0
450 EX2(stb %g2, [%o1])
451 add %g5, %o4, %g5
452 EX2(stb %o4, [%o1 + 1])
453 add %o1, 2, %o1
4543: be,a 1f
455 sll %g5, 16, %o4
456 EX(ldub [%o0], %g2, add %g0, 1)
457 sll %g2, 8, %o4
458 EX2(stb %g2, [%o1])
459 add %g5, %o4, %g5
460 sll %g5, 16, %o4
4611: addcc %o4, %g5, %g5
462 srl %g5, 16, %o4
463 addx %g0, %o4, %g5
464 orcc %o5, %g0, %g0
465 be 4f
466 srl %g5, 8, %o4
467 and %g5, 0xff, %g2
468 and %o4, 0xff, %o4
469 sll %g2, 8, %g2
470 or %g2, %o4, %g5
4714: addcc %g7, %g5, %g7
472 retl
473 addx %g0, %g7, %o0
474__csum_partial_copy_end:
475
476/* We do these strange calculations for the csum_*_from_user case only, ie.
477 * we only bother with faults on loads... */
478
479/* o2 = ((g2%20)&3)*8
480 * o3 = g1 - (g2/20)*32 - o2 */
48120:
482 cmp %g2, 20
483 blu,a 1f
484 and %g2, 3, %o2
485 sub %g1, 32, %g1
486 b 20b
487 sub %g2, 20, %g2
4881:
489 sll %o2, 3, %o2
490 b 31f
491 sub %g1, %o2, %o3
492
493/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8)
494 * o3 = g1 - (g2/16)*32 - o2 */
49521:
496 andcc %g2, 15, %o3
497 srl %g2, 4, %g2
498 be,a 1f
499 clr %o2
500 add %o3, 1, %o3
501 and %o3, 14, %o3
502 sll %o3, 3, %o2
5031:
504 sll %g2, 5, %g2
505 sub %g1, %g2, %o3
506 b 31f
507 sub %o3, %o2, %o3
508
509/* o0 += (g2/10)*16 - 0x70
510 * 01 += (g2/10)*16 - 0x70
511 * o2 = (g2 % 10) ? 8 : 0
512 * o3 += 0x70 - (g2/10)*16 - o2 */
51322:
514 cmp %g2, 10
515 blu,a 1f
516 sub %o0, 0x70, %o0
517 add %o0, 16, %o0
518 add %o1, 16, %o1
519 sub %o3, 16, %o3
520 b 22b
521 sub %g2, 10, %g2
5221:
523 sub %o1, 0x70, %o1
524 add %o3, 0x70, %o3
525 clr %o2
526 tst %g2
527 bne,a 1f
528 mov 8, %o2
5291:
530 b 31f
531 sub %o3, %o2, %o3
53296:
533 and %g1, 3, %g1
534 sll %g4, 2, %g4
535 add %g1, %g4, %o3
53630:
537/* %o1 is dst
538 * %o3 is # bytes to zero out
539 * %o4 is faulting address
540 * %o5 is %pc where fault occurred */
541 clr %o2
54231:
543/* %o0 is src
544 * %o1 is dst
545 * %o2 is # of bytes to copy from src to dst
546 * %o3 is # bytes to zero out
547 * %o4 is faulting address
548 * %o5 is %pc where fault occurred */
549 save %sp, -104, %sp
550 mov %i5, %o0
551 mov %i7, %o1
552 mov %i4, %o2
553 call lookup_fault
554 mov %g7, %i4
555 cmp %o0, 2
556 bne 1f
557 add %g0, -EFAULT, %i5
558 tst %i2
559 be 2f
560 mov %i0, %o1
561 mov %i1, %o0
5625:
563 call __memcpy
564 mov %i2, %o2
565 tst %o0
566 bne,a 2f
567 add %i3, %i2, %i3
568 add %i1, %i2, %i1
5692:
570 mov %i1, %o0
5716:
572 call __bzero
573 mov %i3, %o1
5741:
575 ld [%sp + 168], %o2 ! struct_ptr of parent
576 st %i5, [%o2]
577 ret
578 restore
579
580 .section __ex_table,#alloc
581 .align 4
582 .word 5b,2
583 .word 6b,2
diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S
new file mode 100644
index 000000000000..577505b692ae
--- /dev/null
+++ b/arch/sparc/lib/copy_user.S
@@ -0,0 +1,492 @@
1/* copy_user.S: Sparc optimized copy_from_user and copy_to_user code.
2 *
3 * Copyright(C) 1995 Linus Torvalds
4 * Copyright(C) 1996 David S. Miller
5 * Copyright(C) 1996 Eddie C. Dost
6 * Copyright(C) 1996,1998 Jakub Jelinek
7 *
8 * derived from:
9 * e-mail between David and Eddie.
10 *
11 * Returns 0 if successful, otherwise count of bytes not copied yet
12 */
13
14#include <asm/ptrace.h>
15#include <asm/asmmacro.h>
16#include <asm/page.h>
17
18/* Work around cpp -rob */
19#define ALLOC #alloc
20#define EXECINSTR #execinstr
21#define EX(x,y,a,b) \
2298: x,y; \
23 .section .fixup,ALLOC,EXECINSTR; \
24 .align 4; \
2599: ba fixupretl; \
26 a, b, %g3; \
27 .section __ex_table,ALLOC; \
28 .align 4; \
29 .word 98b, 99b; \
30 .text; \
31 .align 4
32
33#define EX2(x,y,c,d,e,a,b) \
3498: x,y; \
35 .section .fixup,ALLOC,EXECINSTR; \
36 .align 4; \
3799: c, d, e; \
38 ba fixupretl; \
39 a, b, %g3; \
40 .section __ex_table,ALLOC; \
41 .align 4; \
42 .word 98b, 99b; \
43 .text; \
44 .align 4
45
46#define EXO2(x,y) \
4798: x, y; \
48 .section __ex_table,ALLOC; \
49 .align 4; \
50 .word 98b, 97f; \
51 .text; \
52 .align 4
53
54#define EXT(start,end,handler) \
55 .section __ex_table,ALLOC; \
56 .align 4; \
57 .word start, 0, end, handler; \
58 .text; \
59 .align 4
60
61/* Please do not change following macros unless you change logic used
62 * in .fixup at the end of this file as well
63 */
64
65/* Both these macros have to start with exactly the same insn */
66#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
67 ldd [%src + (offset) + 0x00], %t0; \
68 ldd [%src + (offset) + 0x08], %t2; \
69 ldd [%src + (offset) + 0x10], %t4; \
70 ldd [%src + (offset) + 0x18], %t6; \
71 st %t0, [%dst + (offset) + 0x00]; \
72 st %t1, [%dst + (offset) + 0x04]; \
73 st %t2, [%dst + (offset) + 0x08]; \
74 st %t3, [%dst + (offset) + 0x0c]; \
75 st %t4, [%dst + (offset) + 0x10]; \
76 st %t5, [%dst + (offset) + 0x14]; \
77 st %t6, [%dst + (offset) + 0x18]; \
78 st %t7, [%dst + (offset) + 0x1c];
79
80#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
81 ldd [%src + (offset) + 0x00], %t0; \
82 ldd [%src + (offset) + 0x08], %t2; \
83 ldd [%src + (offset) + 0x10], %t4; \
84 ldd [%src + (offset) + 0x18], %t6; \
85 std %t0, [%dst + (offset) + 0x00]; \
86 std %t2, [%dst + (offset) + 0x08]; \
87 std %t4, [%dst + (offset) + 0x10]; \
88 std %t6, [%dst + (offset) + 0x18];
89
90#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
91 ldd [%src - (offset) - 0x10], %t0; \
92 ldd [%src - (offset) - 0x08], %t2; \
93 st %t0, [%dst - (offset) - 0x10]; \
94 st %t1, [%dst - (offset) - 0x0c]; \
95 st %t2, [%dst - (offset) - 0x08]; \
96 st %t3, [%dst - (offset) - 0x04];
97
98#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
99 lduh [%src + (offset) + 0x00], %t0; \
100 lduh [%src + (offset) + 0x02], %t1; \
101 lduh [%src + (offset) + 0x04], %t2; \
102 lduh [%src + (offset) + 0x06], %t3; \
103 sth %t0, [%dst + (offset) + 0x00]; \
104 sth %t1, [%dst + (offset) + 0x02]; \
105 sth %t2, [%dst + (offset) + 0x04]; \
106 sth %t3, [%dst + (offset) + 0x06];
107
108#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
109 ldub [%src - (offset) - 0x02], %t0; \
110 ldub [%src - (offset) - 0x01], %t1; \
111 stb %t0, [%dst - (offset) - 0x02]; \
112 stb %t1, [%dst - (offset) - 0x01];
113
114 .text
115 .align 4
116
117 .globl __copy_user_begin
118__copy_user_begin:
119
120 .globl __copy_user
121dword_align:
122 andcc %o1, 1, %g0
123 be 4f
124 andcc %o1, 2, %g0
125
126 EXO2(ldub [%o1], %g2)
127 add %o1, 1, %o1
128 EXO2(stb %g2, [%o0])
129 sub %o2, 1, %o2
130 bne 3f
131 add %o0, 1, %o0
132
133 EXO2(lduh [%o1], %g2)
134 add %o1, 2, %o1
135 EXO2(sth %g2, [%o0])
136 sub %o2, 2, %o2
137 b 3f
138 add %o0, 2, %o0
1394:
140 EXO2(lduh [%o1], %g2)
141 add %o1, 2, %o1
142 EXO2(sth %g2, [%o0])
143 sub %o2, 2, %o2
144 b 3f
145 add %o0, 2, %o0
146
147__copy_user: /* %o0=dst %o1=src %o2=len */
148 xor %o0, %o1, %o4
1491:
150 andcc %o4, 3, %o5
1512:
152 bne cannot_optimize
153 cmp %o2, 15
154
155 bleu short_aligned_end
156 andcc %o1, 3, %g0
157
158 bne dword_align
1593:
160 andcc %o1, 4, %g0
161
162 be 2f
163 mov %o2, %g1
164
165 EXO2(ld [%o1], %o4)
166 sub %g1, 4, %g1
167 EXO2(st %o4, [%o0])
168 add %o1, 4, %o1
169 add %o0, 4, %o0
1702:
171 andcc %g1, 0xffffff80, %g7
172 be 3f
173 andcc %o0, 4, %g0
174
175 be ldd_std + 4
1765:
177 MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
178 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
179 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
180 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
18180:
182 EXT(5b, 80b, 50f)
183 subcc %g7, 128, %g7
184 add %o1, 128, %o1
185 bne 5b
186 add %o0, 128, %o0
1873:
188 andcc %g1, 0x70, %g7
189 be copy_user_table_end
190 andcc %g1, 8, %g0
191
192 sethi %hi(copy_user_table_end), %o5
193 srl %g7, 1, %o4
194 add %g7, %o4, %o4
195 add %o1, %g7, %o1
196 sub %o5, %o4, %o5
197 jmpl %o5 + %lo(copy_user_table_end), %g0
198 add %o0, %g7, %o0
199
200copy_user_table:
201 MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
202 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
203 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
204 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
205 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
206 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
207 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
208copy_user_table_end:
209 EXT(copy_user_table, copy_user_table_end, 51f)
210 be copy_user_last7
211 andcc %g1, 4, %g0
212
213 EX(ldd [%o1], %g2, and %g1, 0xf)
214 add %o0, 8, %o0
215 add %o1, 8, %o1
216 EX(st %g2, [%o0 - 0x08], and %g1, 0xf)
217 EX2(st %g3, [%o0 - 0x04], and %g1, 0xf, %g1, sub %g1, 4)
218copy_user_last7:
219 be 1f
220 andcc %g1, 2, %g0
221
222 EX(ld [%o1], %g2, and %g1, 7)
223 add %o1, 4, %o1
224 EX(st %g2, [%o0], and %g1, 7)
225 add %o0, 4, %o0
2261:
227 be 1f
228 andcc %g1, 1, %g0
229
230 EX(lduh [%o1], %g2, and %g1, 3)
231 add %o1, 2, %o1
232 EX(sth %g2, [%o0], and %g1, 3)
233 add %o0, 2, %o0
2341:
235 be 1f
236 nop
237
238 EX(ldub [%o1], %g2, add %g0, 1)
239 EX(stb %g2, [%o0], add %g0, 1)
2401:
241 retl
242 clr %o0
243
244ldd_std:
245 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
246 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
247 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
248 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
24981:
250 EXT(ldd_std, 81b, 52f)
251 subcc %g7, 128, %g7
252 add %o1, 128, %o1
253 bne ldd_std
254 add %o0, 128, %o0
255
256 andcc %g1, 0x70, %g7
257 be copy_user_table_end
258 andcc %g1, 8, %g0
259
260 sethi %hi(copy_user_table_end), %o5
261 srl %g7, 1, %o4
262 add %g7, %o4, %o4
263 add %o1, %g7, %o1
264 sub %o5, %o4, %o5
265 jmpl %o5 + %lo(copy_user_table_end), %g0
266 add %o0, %g7, %o0
267
268cannot_optimize:
269 bleu short_end
270 cmp %o5, 2
271
272 bne byte_chunk
273 and %o2, 0xfffffff0, %o3
274
275 andcc %o1, 1, %g0
276 be 10f
277 nop
278
279 EXO2(ldub [%o1], %g2)
280 add %o1, 1, %o1
281 EXO2(stb %g2, [%o0])
282 sub %o2, 1, %o2
283 andcc %o2, 0xfffffff0, %o3
284 be short_end
285 add %o0, 1, %o0
28610:
287 MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
288 MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
28982:
290 EXT(10b, 82b, 53f)
291 subcc %o3, 0x10, %o3
292 add %o1, 0x10, %o1
293 bne 10b
294 add %o0, 0x10, %o0
295 b 2f
296 and %o2, 0xe, %o3
297
298byte_chunk:
299 MOVE_SHORTCHUNK(o1, o0, -0x02, g2, g3)
300 MOVE_SHORTCHUNK(o1, o0, -0x04, g2, g3)
301 MOVE_SHORTCHUNK(o1, o0, -0x06, g2, g3)
302 MOVE_SHORTCHUNK(o1, o0, -0x08, g2, g3)
303 MOVE_SHORTCHUNK(o1, o0, -0x0a, g2, g3)
304 MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
305 MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
306 MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
30783:
308 EXT(byte_chunk, 83b, 54f)
309 subcc %o3, 0x10, %o3
310 add %o1, 0x10, %o1
311 bne byte_chunk
312 add %o0, 0x10, %o0
313
314short_end:
315 and %o2, 0xe, %o3
3162:
317 sethi %hi(short_table_end), %o5
318 sll %o3, 3, %o4
319 add %o0, %o3, %o0
320 sub %o5, %o4, %o5
321 add %o1, %o3, %o1
322 jmpl %o5 + %lo(short_table_end), %g0
323 andcc %o2, 1, %g0
32484:
325 MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
326 MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
327 MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
328 MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
329 MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
330 MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
331 MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
332short_table_end:
333 EXT(84b, short_table_end, 55f)
334 be 1f
335 nop
336 EX(ldub [%o1], %g2, add %g0, 1)
337 EX(stb %g2, [%o0], add %g0, 1)
3381:
339 retl
340 clr %o0
341
342short_aligned_end:
343 bne short_end
344 andcc %o2, 8, %g0
345
346 be 1f
347 andcc %o2, 4, %g0
348
349 EXO2(ld [%o1 + 0x00], %g2)
350 EXO2(ld [%o1 + 0x04], %g3)
351 add %o1, 8, %o1
352 EXO2(st %g2, [%o0 + 0x00])
353 EX(st %g3, [%o0 + 0x04], sub %o2, 4)
354 add %o0, 8, %o0
3551:
356 b copy_user_last7
357 mov %o2, %g1
358
359 .section .fixup,#alloc,#execinstr
360 .align 4
36197:
362 mov %o2, %g3
363fixupretl:
364 sethi %hi(PAGE_OFFSET), %g1
365 cmp %o0, %g1
366 blu 1f
367 cmp %o1, %g1
368 bgeu 1f
369 nop
370 save %sp, -64, %sp
371 mov %i0, %o0
372 call __bzero
373 mov %g3, %o1
374 restore
3751: retl
376 mov %g3, %o0
377
378/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
37950:
380/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK
381 * happens. This is derived from the amount ldd reads, st stores, etc.
382 * x = g2 % 12;
383 * g3 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? 0 : (x - 4) * 4);
384 * o0 += (g2 / 12) * 32;
385 */
386 cmp %g2, 12
387 add %o0, %g7, %o0
388 bcs 1f
389 cmp %g2, 24
390 bcs 2f
391 cmp %g2, 36
392 bcs 3f
393 nop
394 sub %g2, 12, %g2
395 sub %g7, 32, %g7
3963: sub %g2, 12, %g2
397 sub %g7, 32, %g7
3982: sub %g2, 12, %g2
399 sub %g7, 32, %g7
4001: cmp %g2, 4
401 bcs,a 60f
402 clr %g2
403 sub %g2, 4, %g2
404 sll %g2, 2, %g2
40560: and %g1, 0x7f, %g3
406 sub %o0, %g7, %o0
407 add %g3, %g7, %g3
408 ba fixupretl
409 sub %g3, %g2, %g3
41051:
411/* i = 41 - g2; j = i % 6;
412 * g3 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : 16;
413 * o0 -= (i / 6) * 16 + 16;
414 */
415 neg %g2
416 and %g1, 0xf, %g1
417 add %g2, 41, %g2
418 add %o0, %g1, %o0
4191: cmp %g2, 6
420 bcs,a 2f
421 cmp %g2, 4
422 add %g1, 16, %g1
423 b 1b
424 sub %g2, 6, %g2
4252: bcc,a 2f
426 mov 16, %g2
427 inc %g2
428 sll %g2, 2, %g2
4292: add %g1, %g2, %g3
430 ba fixupretl
431 sub %o0, %g3, %o0
43252:
433/* g3 = g1 + g7 - (g2 / 8) * 32 + (g2 & 4) ? (g2 & 3) * 8 : 0;
434 o0 += (g2 / 8) * 32 */
435 andn %g2, 7, %g4
436 add %o0, %g7, %o0
437 andcc %g2, 4, %g0
438 and %g2, 3, %g2
439 sll %g4, 2, %g4
440 sll %g2, 3, %g2
441 bne 60b
442 sub %g7, %g4, %g7
443 ba 60b
444 clr %g2
44553:
446/* g3 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 4) ? (g2 & 3) * 2 : 0;
447 o0 += (g2 & 8) */
448 and %g2, 3, %g4
449 andcc %g2, 4, %g0
450 and %g2, 8, %g2
451 sll %g4, 1, %g4
452 be 1f
453 add %o0, %g2, %o0
454 add %g2, %g4, %g2
4551: and %o2, 0xf, %g3
456 add %g3, %o3, %g3
457 ba fixupretl
458 sub %g3, %g2, %g3
45954:
460/* g3 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 2) ? (g2 & 1) : 0;
461 o0 += (g2 / 4) * 2 */
462 srl %g2, 2, %o4
463 and %g2, 1, %o5
464 srl %g2, 1, %g2
465 add %o4, %o4, %o4
466 and %o5, %g2, %o5
467 and %o2, 0xf, %o2
468 add %o0, %o4, %o0
469 sub %o3, %o5, %o3
470 sub %o2, %o4, %o2
471 ba fixupretl
472 add %o2, %o3, %g3
47355:
474/* i = 27 - g2;
475 g3 = (o2 & 1) + i / 4 * 2 + !(i & 3);
476 o0 -= i / 4 * 2 + 1 */
477 neg %g2
478 and %o2, 1, %o2
479 add %g2, 27, %g2
480 srl %g2, 2, %o5
481 andcc %g2, 3, %g0
482 mov 1, %g2
483 add %o5, %o5, %o5
484 be,a 1f
485 clr %g2
4861: add %g2, %o5, %g3
487 sub %o0, %g3, %o0
488 ba fixupretl
489 add %g3, %o2, %g3
490
491 .globl __copy_user_end
492__copy_user_end:
diff --git a/arch/sparc/lib/debuglocks.c b/arch/sparc/lib/debuglocks.c
new file mode 100644
index 000000000000..fb182352782c
--- /dev/null
+++ b/arch/sparc/lib/debuglocks.c
@@ -0,0 +1,202 @@
1/* $Id: debuglocks.c,v 1.11 2001/09/20 00:35:31 davem Exp $
2 * debuglocks.c: Debugging versions of SMP locking primitives.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au)
6 */
7
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/threads.h> /* For NR_CPUS */
11#include <linux/spinlock.h>
12#include <asm/psr.h>
13#include <asm/system.h>
14
15#ifdef CONFIG_SMP
16
17/* Some notes on how these debugging routines work. When a lock is acquired
18 * an extra debugging member lock->owner_pc is set to the caller of the lock
19 * acquisition routine. Right before releasing a lock, the debugging program
20 * counter is cleared to zero.
21 *
22 * Furthermore, since PC's are 4 byte aligned on Sparc, we stuff the CPU
23 * number of the owner in the lowest two bits.
24 */
25
26#define STORE_CALLER(A) __asm__ __volatile__("mov %%i7, %0" : "=r" (A));
27
28static inline void show(char *str, spinlock_t *lock, unsigned long caller)
29{
30 int cpu = smp_processor_id();
31
32 printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n",str,
33 lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
34}
35
36static inline void show_read(char *str, rwlock_t *lock, unsigned long caller)
37{
38 int cpu = smp_processor_id();
39
40 printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n", str,
41 lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
42}
43
44static inline void show_write(char *str, rwlock_t *lock, unsigned long caller)
45{
46 int cpu = smp_processor_id();
47 int i;
48
49 printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)", str,
50 lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
51
52 for(i = 0; i < NR_CPUS; i++)
53 printk(" reader[%d]=%08lx", i, lock->reader_pc[i]);
54
55 printk("\n");
56}
57
58#undef INIT_STUCK
59#define INIT_STUCK 100000000
60
61void _do_spin_lock(spinlock_t *lock, char *str)
62{
63 unsigned long caller;
64 unsigned long val;
65 int cpu = smp_processor_id();
66 int stuck = INIT_STUCK;
67
68 STORE_CALLER(caller);
69
70again:
71 __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock)));
72 if(val) {
73 while(lock->lock) {
74 if (!--stuck) {
75 show(str, lock, caller);
76 stuck = INIT_STUCK;
77 }
78 barrier();
79 }
80 goto again;
81 }
82 lock->owner_pc = (cpu & 3) | (caller & ~3);
83}
84
85int _spin_trylock(spinlock_t *lock)
86{
87 unsigned long val;
88 unsigned long caller;
89 int cpu = smp_processor_id();
90
91 STORE_CALLER(caller);
92
93 __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock)));
94 if(!val) {
95 /* We got it, record our identity for debugging. */
96 lock->owner_pc = (cpu & 3) | (caller & ~3);
97 }
98 return val == 0;
99}
100
101void _do_spin_unlock(spinlock_t *lock)
102{
103 lock->owner_pc = 0;
104 barrier();
105 lock->lock = 0;
106}
107
108void _do_read_lock(rwlock_t *rw, char *str)
109{
110 unsigned long caller;
111 unsigned long val;
112 int cpu = smp_processor_id();
113 int stuck = INIT_STUCK;
114
115 STORE_CALLER(caller);
116
117wlock_again:
118 __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
119 if(val) {
120 while(rw->lock & 0xff) {
121 if (!--stuck) {
122 show_read(str, rw, caller);
123 stuck = INIT_STUCK;
124 }
125 barrier();
126 }
127 goto wlock_again;
128 }
129
130 rw->reader_pc[cpu] = caller;
131 barrier();
132 rw->lock++;
133}
134
135void _do_read_unlock(rwlock_t *rw, char *str)
136{
137 unsigned long caller;
138 unsigned long val;
139 int cpu = smp_processor_id();
140 int stuck = INIT_STUCK;
141
142 STORE_CALLER(caller);
143
144wlock_again:
145 __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
146 if(val) {
147 while(rw->lock & 0xff) {
148 if (!--stuck) {
149 show_read(str, rw, caller);
150 stuck = INIT_STUCK;
151 }
152 barrier();
153 }
154 goto wlock_again;
155 }
156
157 rw->reader_pc[cpu] = 0;
158 barrier();
159 rw->lock -= 0x1ff;
160}
161
162void _do_write_lock(rwlock_t *rw, char *str)
163{
164 unsigned long caller;
165 unsigned long val;
166 int cpu = smp_processor_id();
167 int stuck = INIT_STUCK;
168
169 STORE_CALLER(caller);
170
171wlock_again:
172 __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
173 if(val) {
174wlock_wait:
175 while(rw->lock) {
176 if (!--stuck) {
177 show_write(str, rw, caller);
178 stuck = INIT_STUCK;
179 }
180 barrier();
181 }
182 goto wlock_again;
183 }
184
185 if (rw->lock & ~0xff) {
186 *(((unsigned char *)&rw->lock)+3) = 0;
187 barrier();
188 goto wlock_wait;
189 }
190
191 barrier();
192 rw->owner_pc = (cpu & 3) | (caller & ~3);
193}
194
195void _do_write_unlock(rwlock_t *rw)
196{
197 rw->owner_pc = 0;
198 barrier();
199 rw->lock = 0;
200}
201
202#endif /* SMP */
diff --git a/arch/sparc/lib/divdi3.S b/arch/sparc/lib/divdi3.S
new file mode 100644
index 000000000000..681b3683da9e
--- /dev/null
+++ b/arch/sparc/lib/divdi3.S
@@ -0,0 +1,295 @@
1/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
2
3This file is part of GNU CC.
4
5GNU CC is free software; you can redistribute it and/or modify
6it under the terms of the GNU General Public License as published by
7the Free Software Foundation; either version 2, or (at your option)
8any later version.
9
10GNU CC is distributed in the hope that it will be useful,
11but WITHOUT ANY WARRANTY; without even the implied warranty of
12MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13GNU General Public License for more details.
14
15You should have received a copy of the GNU General Public License
16along with GNU CC; see the file COPYING. If not, write to
17the Free Software Foundation, 59 Temple Place - Suite 330,
18Boston, MA 02111-1307, USA. */
19
20 .data
21 .align 8
22 .globl __clz_tab
23__clz_tab:
24 .byte 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5
25 .byte 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6
26 .byte 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
27 .byte 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
28 .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
29 .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
30 .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
31 .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
32 .size __clz_tab,256
33 .global .udiv
34
35 .text
36 .align 4
37 .globl __divdi3
38__divdi3:
39 save %sp,-104,%sp
40 cmp %i0,0
41 bge .LL40
42 mov 0,%l4
43 mov -1,%l4
44 sub %g0,%i1,%o0
45 mov %o0,%o5
46 subcc %g0,%o0,%g0
47 sub %g0,%i0,%o0
48 subx %o0,0,%o4
49 mov %o4,%i0
50 mov %o5,%i1
51.LL40:
52 cmp %i2,0
53 bge .LL84
54 mov %i3,%o4
55 xnor %g0,%l4,%l4
56 sub %g0,%i3,%o0
57 mov %o0,%o3
58 subcc %g0,%o0,%g0
59 sub %g0,%i2,%o0
60 subx %o0,0,%o2
61 mov %o2,%i2
62 mov %o3,%i3
63 mov %i3,%o4
64.LL84:
65 cmp %i2,0
66 bne .LL45
67 mov %i1,%i3
68 cmp %o4,%i0
69 bleu .LL46
70 mov %i3,%o1
71 mov 32,%g1
72 subcc %i0,%o4,%g0
731: bcs 5f
74 addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
75 sub %i0,%o4,%i0 ! this kills msb of n
76 addx %i0,%i0,%i0 ! so this cannot give carry
77 subcc %g1,1,%g1
782: bne 1b
79 subcc %i0,%o4,%g0
80 bcs 3f
81 addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
82 b 3f
83 sub %i0,%o4,%i0 ! this kills msb of n
844: sub %i0,%o4,%i0
855: addxcc %i0,%i0,%i0
86 bcc 2b
87 subcc %g1,1,%g1
88! Got carry from n. Subtract next step to cancel this carry.
89 bne 4b
90 addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
91 sub %i0,%o4,%i0
923: xnor %o1,0,%o1
93 b .LL50
94 mov 0,%o2
95.LL46:
96 cmp %o4,0
97 bne .LL85
98 mov %i0,%o2
99 mov 1,%o0
100 call .udiv,0
101 mov 0,%o1
102 mov %o0,%o4
103 mov %i0,%o2
104.LL85:
105 mov 0,%g3
106 mov 32,%g1
107 subcc %g3,%o4,%g0
1081: bcs 5f
109 addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb
110 sub %g3,%o4,%g3 ! this kills msb of n
111 addx %g3,%g3,%g3 ! so this cannot give carry
112 subcc %g1,1,%g1
1132: bne 1b
114 subcc %g3,%o4,%g0
115 bcs 3f
116 addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb
117 b 3f
118 sub %g3,%o4,%g3 ! this kills msb of n
1194: sub %g3,%o4,%g3
1205: addxcc %g3,%g3,%g3
121 bcc 2b
122 subcc %g1,1,%g1
123! Got carry from n. Subtract next step to cancel this carry.
124 bne 4b
125 addcc %o2,%o2,%o2 ! shift n1n0 and a 0-bit in lsb
126 sub %g3,%o4,%g3
1273: xnor %o2,0,%o2
128 mov %g3,%i0
129 mov %i3,%o1
130 mov 32,%g1
131 subcc %i0,%o4,%g0
1321: bcs 5f
133 addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
134 sub %i0,%o4,%i0 ! this kills msb of n
135 addx %i0,%i0,%i0 ! so this cannot give carry
136 subcc %g1,1,%g1
1372: bne 1b
138 subcc %i0,%o4,%g0
139 bcs 3f
140 addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
141 b 3f
142 sub %i0,%o4,%i0 ! this kills msb of n
1434: sub %i0,%o4,%i0
1445: addxcc %i0,%i0,%i0
145 bcc 2b
146 subcc %g1,1,%g1
147! Got carry from n. Subtract next step to cancel this carry.
148 bne 4b
149 addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
150 sub %i0,%o4,%i0
1513: xnor %o1,0,%o1
152 b .LL86
153 mov %o1,%l1
154.LL45:
155 cmp %i2,%i0
156 bleu .LL51
157 sethi %hi(65535),%o0
158 b .LL78
159 mov 0,%o1
160.LL51:
161 or %o0,%lo(65535),%o0
162 cmp %i2,%o0
163 bgu .LL58
164 mov %i2,%o1
165 cmp %i2,256
166 addx %g0,-1,%o0
167 b .LL64
168 and %o0,8,%o2
169.LL58:
170 sethi %hi(16777215),%o0
171 or %o0,%lo(16777215),%o0
172 cmp %i2,%o0
173 bgu .LL64
174 mov 24,%o2
175 mov 16,%o2
176.LL64:
177 srl %o1,%o2,%o0
178 sethi %hi(__clz_tab),%o1
179 or %o1,%lo(__clz_tab),%o1
180 ldub [%o0+%o1],%o0
181 add %o0,%o2,%o0
182 mov 32,%o1
183 subcc %o1,%o0,%o3
184 bne,a .LL72
185 sub %o1,%o3,%o1
186 cmp %i0,%i2
187 bgu .LL74
188 cmp %i3,%o4
189 blu .LL78
190 mov 0,%o1
191.LL74:
192 b .LL78
193 mov 1,%o1
194.LL72:
195 sll %i2,%o3,%o2
196 srl %o4,%o1,%o0
197 or %o2,%o0,%i2
198 sll %o4,%o3,%o4
199 srl %i0,%o1,%o2
200 sll %i0,%o3,%o0
201 srl %i3,%o1,%o1
202 or %o0,%o1,%i0
203 sll %i3,%o3,%i3
204 mov %i0,%o1
205 mov 32,%g1
206 subcc %o2,%i2,%g0
2071: bcs 5f
208 addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
209 sub %o2,%i2,%o2 ! this kills msb of n
210 addx %o2,%o2,%o2 ! so this cannot give carry
211 subcc %g1,1,%g1
2122: bne 1b
213 subcc %o2,%i2,%g0
214 bcs 3f
215 addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
216 b 3f
217 sub %o2,%i2,%o2 ! this kills msb of n
2184: sub %o2,%i2,%o2
2195: addxcc %o2,%o2,%o2
220 bcc 2b
221 subcc %g1,1,%g1
222! Got carry from n. Subtract next step to cancel this carry.
223 bne 4b
224 addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
225 sub %o2,%i2,%o2
2263: xnor %o1,0,%o1
227 mov %o2,%i0
228 wr %g0,%o1,%y ! SPARC has 0-3 delay insn after a wr
229 sra %o4,31,%g2 ! Do not move this insn
230 and %o1,%g2,%g2 ! Do not move this insn
231 andcc %g0,0,%g1 ! Do not move this insn
232 mulscc %g1,%o4,%g1
233 mulscc %g1,%o4,%g1
234 mulscc %g1,%o4,%g1
235 mulscc %g1,%o4,%g1
236 mulscc %g1,%o4,%g1
237 mulscc %g1,%o4,%g1
238 mulscc %g1,%o4,%g1
239 mulscc %g1,%o4,%g1
240 mulscc %g1,%o4,%g1
241 mulscc %g1,%o4,%g1
242 mulscc %g1,%o4,%g1
243 mulscc %g1,%o4,%g1
244 mulscc %g1,%o4,%g1
245 mulscc %g1,%o4,%g1
246 mulscc %g1,%o4,%g1
247 mulscc %g1,%o4,%g1
248 mulscc %g1,%o4,%g1
249 mulscc %g1,%o4,%g1
250 mulscc %g1,%o4,%g1
251 mulscc %g1,%o4,%g1
252 mulscc %g1,%o4,%g1
253 mulscc %g1,%o4,%g1
254 mulscc %g1,%o4,%g1
255 mulscc %g1,%o4,%g1
256 mulscc %g1,%o4,%g1
257 mulscc %g1,%o4,%g1
258 mulscc %g1,%o4,%g1
259 mulscc %g1,%o4,%g1
260 mulscc %g1,%o4,%g1
261 mulscc %g1,%o4,%g1
262 mulscc %g1,%o4,%g1
263 mulscc %g1,%o4,%g1
264 mulscc %g1,0,%g1
265 add %g1,%g2,%o0
266 rd %y,%o2
267 cmp %o0,%i0
268 bgu,a .LL78
269 add %o1,-1,%o1
270 bne,a .LL50
271 mov 0,%o2
272 cmp %o2,%i3
273 bleu .LL50
274 mov 0,%o2
275 add %o1,-1,%o1
276.LL78:
277 mov 0,%o2
278.LL50:
279 mov %o1,%l1
280.LL86:
281 mov %o2,%l0
282 mov %l0,%i0
283 mov %l1,%i1
284 cmp %l4,0
285 be .LL81
286 sub %g0,%i1,%o0
287 mov %o0,%l3
288 subcc %g0,%o0,%g0
289 sub %g0,%i0,%o0
290 subx %o0,0,%l2
291 mov %l2,%i0
292 mov %l3,%i1
293.LL81:
294 ret
295 restore
diff --git a/arch/sparc/lib/locks.S b/arch/sparc/lib/locks.S
new file mode 100644
index 000000000000..95fa48424967
--- /dev/null
+++ b/arch/sparc/lib/locks.S
@@ -0,0 +1,72 @@
1/* $Id: locks.S,v 1.16 2000/02/26 11:02:47 anton Exp $
2 * locks.S: SMP low-level lock primitives on Sparc.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Anton Blanchard (anton@progsoc.uts.edu.au)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <asm/ptrace.h>
10#include <asm/psr.h>
11#include <asm/smp.h>
12#include <asm/spinlock.h>
13
14 .text
15 .align 4
16
17 /* Read/writer locks, as usual this is overly clever to make it
18 * as fast as possible.
19 */
20
21 /* caches... */
22___rw_read_enter_spin_on_wlock:
23 orcc %g2, 0x0, %g0
24 be,a ___rw_read_enter
25 ldstub [%g1 + 3], %g2
26 b ___rw_read_enter_spin_on_wlock
27 ldub [%g1 + 3], %g2
28___rw_read_exit_spin_on_wlock:
29 orcc %g2, 0x0, %g0
30 be,a ___rw_read_exit
31 ldstub [%g1 + 3], %g2
32 b ___rw_read_exit_spin_on_wlock
33 ldub [%g1 + 3], %g2
34___rw_write_enter_spin_on_wlock:
35 orcc %g2, 0x0, %g0
36 be,a ___rw_write_enter
37 ldstub [%g1 + 3], %g2
38 b ___rw_write_enter_spin_on_wlock
39 ld [%g1], %g2
40
41 .globl ___rw_read_enter
42___rw_read_enter:
43 orcc %g2, 0x0, %g0
44 bne,a ___rw_read_enter_spin_on_wlock
45 ldub [%g1 + 3], %g2
46 ld [%g1], %g2
47 add %g2, 1, %g2
48 st %g2, [%g1]
49 retl
50 mov %g4, %o7
51
52 .globl ___rw_read_exit
53___rw_read_exit:
54 orcc %g2, 0x0, %g0
55 bne,a ___rw_read_exit_spin_on_wlock
56 ldub [%g1 + 3], %g2
57 ld [%g1], %g2
58 sub %g2, 0x1ff, %g2
59 st %g2, [%g1]
60 retl
61 mov %g4, %o7
62
63 .globl ___rw_write_enter
64___rw_write_enter:
65 orcc %g2, 0x0, %g0
66 bne ___rw_write_enter_spin_on_wlock
67 ld [%g1], %g2
68 andncc %g2, 0xff, %g0
69 bne,a ___rw_write_enter_spin_on_wlock
70 stb %g0, [%g1 + 3]
71 retl
72 mov %g4, %o7
diff --git a/arch/sparc/lib/lshrdi3.S b/arch/sparc/lib/lshrdi3.S
new file mode 100644
index 000000000000..35abf5b2bd15
--- /dev/null
+++ b/arch/sparc/lib/lshrdi3.S
@@ -0,0 +1,27 @@
1/* $Id: lshrdi3.S,v 1.1 1999/03/21 06:37:45 davem Exp $ */
2
3 .globl __lshrdi3
4__lshrdi3:
5 cmp %o2, 0
6 be 3f
7 mov 0x20, %g2
8
9 sub %g2, %o2, %g2
10 cmp %g2, 0
11 bg 1f
12 srl %o0, %o2, %o4
13
14 clr %o4
15 neg %g2
16 b 2f
17 srl %o0, %g2, %o5
181:
19 sll %o0, %g2, %g3
20 srl %o1, %o2, %g2
21 or %g2, %g3, %o5
222:
23 mov %o4, %o0
24 mov %o5, %o1
253:
26 retl
27 nop
diff --git a/arch/sparc/lib/memcmp.S b/arch/sparc/lib/memcmp.S
new file mode 100644
index 000000000000..cb4bdb0cc2af
--- /dev/null
+++ b/arch/sparc/lib/memcmp.S
@@ -0,0 +1,312 @@
1 .text
2 .align 4
3 .global __memcmp, memcmp
4__memcmp:
5memcmp:
6#if 1
7 cmp %o2, 0
8 ble L3
9 mov 0, %g3
10L5:
11 ldub [%o0], %g2
12 ldub [%o1], %g3
13 sub %g2, %g3, %g2
14 mov %g2, %g3
15 sll %g2, 24, %g2
16
17 cmp %g2, 0
18 bne L3
19 add %o0, 1, %o0
20
21 add %o2, -1, %o2
22
23 cmp %o2, 0
24 bg L5
25 add %o1, 1, %o1
26L3:
27 sll %g3, 24, %o0
28 sra %o0, 24, %o0
29
30 retl
31 nop
32#else
33 save %sp, -104, %sp
34 mov %i2, %o4
35 mov %i0, %o0
36
37 cmp %o4, 15
38 ble L72
39 mov %i1, %i2
40
41 andcc %i2, 3, %g0
42 be L161
43 andcc %o0, 3, %g2
44L75:
45 ldub [%o0], %g3
46 ldub [%i2], %g2
47 add %o0,1, %o0
48
49 subcc %g3, %g2, %i0
50 bne L156
51 add %i2, 1, %i2
52
53 andcc %i2, 3, %g0
54 bne L75
55 add %o4, -1, %o4
56
57 andcc %o0, 3, %g2
58L161:
59 bne,a L78
60 mov %i2, %i1
61
62 mov %o0, %i5
63 mov %i2, %i3
64 srl %o4, 2, %i4
65
66 cmp %i4, 0
67 bge L93
68 mov %i4, %g2
69
70 add %i4, 3, %g2
71L93:
72 sra %g2, 2, %g2
73 sll %g2, 2, %g2
74 sub %i4, %g2, %g2
75
76 cmp %g2, 1
77 be,a L88
78 add %o0, 4, %i5
79
80 bg L94
81 cmp %g2, 2
82
83 cmp %g2, 0
84 be,a L86
85 ld [%o0], %g3
86
87 b L162
88 ld [%i5], %g3
89L94:
90 be L81
91 cmp %g2, 3
92
93 be,a L83
94 add %o0, -4, %i5
95
96 b L162
97 ld [%i5], %g3
98L81:
99 add %o0, -8, %i5
100 ld [%o0], %g3
101 add %i2, -8, %i3
102 ld [%i2], %g2
103
104 b L82
105 add %i4, 2, %i4
106L83:
107 ld [%o0], %g4
108 add %i2, -4, %i3
109 ld [%i2], %g1
110
111 b L84
112 add %i4, 1, %i4
113L86:
114 b L87
115 ld [%i2], %g2
116L88:
117 add %i2, 4, %i3
118 ld [%o0], %g4
119 add %i4, -1, %i4
120 ld [%i2], %g1
121L95:
122 ld [%i5], %g3
123L162:
124 cmp %g4, %g1
125 be L87
126 ld [%i3], %g2
127
128 cmp %g4, %g1
129L163:
130 bleu L114
131 mov -1, %i0
132
133 b L114
134 mov 1, %i0
135L87:
136 ld [%i5 + 4], %g4
137 cmp %g3, %g2
138 bne L163
139 ld [%i3 + 4], %g1
140L84:
141 ld [%i5 + 8], %g3
142
143 cmp %g4, %g1
144 bne L163
145 ld [%i3 + 8], %g2
146L82:
147 ld [%i5 + 12], %g4
148 cmp %g3, %g2
149 bne L163
150 ld [%i3 + 12], %g1
151
152 add %i5, 16, %i5
153
154 addcc %i4, -4, %i4
155 bne L95
156 add %i3, 16, %i3
157
158 cmp %g4, %g1
159 bne L163
160 nop
161
162 b L114
163 mov 0, %i0
164L78:
165 srl %o4, 2, %i0
166 and %o0, -4, %i3
167 orcc %i0, %g0, %g3
168 sll %g2, 3, %o7
169 mov 32, %g2
170
171 bge L129
172 sub %g2, %o7, %o1
173
174 add %i0, 3, %g3
175L129:
176 sra %g3, 2, %g2
177 sll %g2, 2, %g2
178 sub %i0, %g2, %g2
179
180 cmp %g2, 1
181 be,a L124
182 ld [%i3], %o3
183
184 bg L130
185 cmp %g2, 2
186
187 cmp %g2, 0
188 be,a L122
189 ld [%i3], %o2
190
191 b L164
192 sll %o3, %o7, %g3
193L130:
194 be L117
195 cmp %g2, 3
196
197 be,a L119
198 ld [%i3], %g1
199
200 b L164
201 sll %o3, %o7, %g3
202L117:
203 ld [%i3], %g4
204 add %i2, -8, %i1
205 ld [%i3 + 4], %o3
206 add %i0, 2, %i0
207 ld [%i2], %i4
208
209 b L118
210 add %i3, -4, %i3
211L119:
212 ld [%i3 + 4], %g4
213 add %i2, -4, %i1
214 ld [%i2], %i5
215
216 b L120
217 add %i0, 1, %i0
218L122:
219 ld [%i3 + 4], %g1
220 ld [%i2], %i4
221
222 b L123
223 add %i3, 4, %i3
224L124:
225 add %i2, 4, %i1
226 ld [%i3 + 4], %o2
227 add %i0, -1, %i0
228 ld [%i2], %i5
229 add %i3, 8, %i3
230L131:
231 sll %o3, %o7, %g3
232L164:
233 srl %o2, %o1, %g2
234 ld [%i3], %g1
235 or %g3, %g2, %g3
236
237 cmp %g3, %i5
238 bne L163
239 ld [%i1], %i4
240L123:
241 sll %o2, %o7, %g3
242 srl %g1, %o1, %g2
243 ld [%i3 + 4], %g4
244 or %g3, %g2, %g3
245
246 cmp %g3, %i4
247 bne L163
248 ld [%i1 + 4], %i5
249L120:
250 sll %g1, %o7, %g3
251 srl %g4, %o1, %g2
252 ld [%i3 + 8], %o3
253 or %g3, %g2, %g3
254
255 cmp %g3, %i5
256 bne L163
257 ld [%i1 + 8], %i4
258L118:
259 sll %g4, %o7, %g3
260 srl %o3, %o1, %g2
261 ld [%i3 + 12], %o2
262 or %g3, %g2, %g3
263
264 cmp %g3, %i4
265 bne L163
266 ld [%i1 + 12], %i5
267
268 add %i3, 16, %i3
269 addcc %i0, -4, %i0
270 bne L131
271 add %i1, 16, %i1
272
273 sll %o3, %o7, %g3
274 srl %o2, %o1, %g2
275 or %g3, %g2, %g3
276
277 cmp %g3, %i5
278 be,a L114
279 mov 0, %i0
280
281 b,a L163
282L114:
283 cmp %i0, 0
284 bne L156
285 and %o4, -4, %g2
286
287 add %o0, %g2, %o0
288 add %i2, %g2, %i2
289 and %o4, 3, %o4
290L72:
291 cmp %o4, 0
292 be L156
293 mov 0, %i0
294
295 ldub [%o0], %g3
296L165:
297 ldub [%i2], %g2
298 add %o0, 1, %o0
299
300 subcc %g3, %g2, %i0
301 bne L156
302 add %i2, 1, %i2
303
304 addcc %o4, -1, %o4
305 bne,a L165
306 ldub [%o0], %g3
307
308 mov 0, %i0
309L156:
310 ret
311 restore
312#endif
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
new file mode 100644
index 000000000000..ce10bc869af9
--- /dev/null
+++ b/arch/sparc/lib/memcpy.S
@@ -0,0 +1,1150 @@
1/* memcpy.S: Sparc optimized memcpy and memmove code
2 * Hand optimized from GNU libc's memcpy and memmove
3 * Copyright (C) 1991,1996 Free Software Foundation
4 * Copyright (C) 1995 Linus Torvalds (Linus.Torvalds@helsinki.fi)
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
9
10#ifdef __KERNEL__
11
12#define FUNC(x) \
13 .globl x; \
14 .type x,@function; \
15 .align 4; \
16x:
17
18#undef FASTER_REVERSE
19#undef FASTER_NONALIGNED
20#define FASTER_ALIGNED
21
22/* In kernel these functions don't return a value.
23 * One should use macros in asm/string.h for that purpose.
24 * We return 0, so that bugs are more apparent.
25 */
26#define SETUP_RETL
27#define RETL_INSN clr %o0
28
29#else
30
31/* libc */
32
33#include "DEFS.h"
34
35#define FASTER_REVERSE
36#define FASTER_NONALIGNED
37#define FASTER_ALIGNED
38
39#define SETUP_RETL mov %o0, %g6
40#define RETL_INSN mov %g6, %o0
41
42#endif
43
44/* Both these macros have to start with exactly the same insn */
45#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
46 ldd [%src + (offset) + 0x00], %t0; \
47 ldd [%src + (offset) + 0x08], %t2; \
48 ldd [%src + (offset) + 0x10], %t4; \
49 ldd [%src + (offset) + 0x18], %t6; \
50 st %t0, [%dst + (offset) + 0x00]; \
51 st %t1, [%dst + (offset) + 0x04]; \
52 st %t2, [%dst + (offset) + 0x08]; \
53 st %t3, [%dst + (offset) + 0x0c]; \
54 st %t4, [%dst + (offset) + 0x10]; \
55 st %t5, [%dst + (offset) + 0x14]; \
56 st %t6, [%dst + (offset) + 0x18]; \
57 st %t7, [%dst + (offset) + 0x1c];
58
59#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
60 ldd [%src + (offset) + 0x00], %t0; \
61 ldd [%src + (offset) + 0x08], %t2; \
62 ldd [%src + (offset) + 0x10], %t4; \
63 ldd [%src + (offset) + 0x18], %t6; \
64 std %t0, [%dst + (offset) + 0x00]; \
65 std %t2, [%dst + (offset) + 0x08]; \
66 std %t4, [%dst + (offset) + 0x10]; \
67 std %t6, [%dst + (offset) + 0x18];
68
69#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
70 ldd [%src - (offset) - 0x10], %t0; \
71 ldd [%src - (offset) - 0x08], %t2; \
72 st %t0, [%dst - (offset) - 0x10]; \
73 st %t1, [%dst - (offset) - 0x0c]; \
74 st %t2, [%dst - (offset) - 0x08]; \
75 st %t3, [%dst - (offset) - 0x04];
76
77#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
78 ldd [%src - (offset) - 0x10], %t0; \
79 ldd [%src - (offset) - 0x08], %t2; \
80 std %t0, [%dst - (offset) - 0x10]; \
81 std %t2, [%dst - (offset) - 0x08];
82
83#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
84 ldub [%src - (offset) - 0x02], %t0; \
85 ldub [%src - (offset) - 0x01], %t1; \
86 stb %t0, [%dst - (offset) - 0x02]; \
87 stb %t1, [%dst - (offset) - 0x01];
88
89/* Both these macros have to start with exactly the same insn */
90#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
91 ldd [%src - (offset) - 0x20], %t0; \
92 ldd [%src - (offset) - 0x18], %t2; \
93 ldd [%src - (offset) - 0x10], %t4; \
94 ldd [%src - (offset) - 0x08], %t6; \
95 st %t0, [%dst - (offset) - 0x20]; \
96 st %t1, [%dst - (offset) - 0x1c]; \
97 st %t2, [%dst - (offset) - 0x18]; \
98 st %t3, [%dst - (offset) - 0x14]; \
99 st %t4, [%dst - (offset) - 0x10]; \
100 st %t5, [%dst - (offset) - 0x0c]; \
101 st %t6, [%dst - (offset) - 0x08]; \
102 st %t7, [%dst - (offset) - 0x04];
103
104#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
105 ldd [%src - (offset) - 0x20], %t0; \
106 ldd [%src - (offset) - 0x18], %t2; \
107 ldd [%src - (offset) - 0x10], %t4; \
108 ldd [%src - (offset) - 0x08], %t6; \
109 std %t0, [%dst - (offset) - 0x20]; \
110 std %t2, [%dst - (offset) - 0x18]; \
111 std %t4, [%dst - (offset) - 0x10]; \
112 std %t6, [%dst - (offset) - 0x08];
113
114#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
115 ldd [%src + (offset) + 0x00], %t0; \
116 ldd [%src + (offset) + 0x08], %t2; \
117 st %t0, [%dst + (offset) + 0x00]; \
118 st %t1, [%dst + (offset) + 0x04]; \
119 st %t2, [%dst + (offset) + 0x08]; \
120 st %t3, [%dst + (offset) + 0x0c];
121
122#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
123 ldub [%src + (offset) + 0x00], %t0; \
124 ldub [%src + (offset) + 0x01], %t1; \
125 stb %t0, [%dst + (offset) + 0x00]; \
126 stb %t1, [%dst + (offset) + 0x01];
127
128#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
129 ldd [%src + (offset) + 0x00], %t0; \
130 ldd [%src + (offset) + 0x08], %t2; \
131 srl %t0, shir, %t5; \
132 srl %t1, shir, %t6; \
133 sll %t0, shil, %t0; \
134 or %t5, %prev, %t5; \
135 sll %t1, shil, %prev; \
136 or %t6, %t0, %t0; \
137 srl %t2, shir, %t1; \
138 srl %t3, shir, %t6; \
139 sll %t2, shil, %t2; \
140 or %t1, %prev, %t1; \
141 std %t4, [%dst + (offset) + (offset2) - 0x04]; \
142 std %t0, [%dst + (offset) + (offset2) + 0x04]; \
143 sll %t3, shil, %prev; \
144 or %t6, %t2, %t4;
145
146#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
147 ldd [%src + (offset) + 0x00], %t0; \
148 ldd [%src + (offset) + 0x08], %t2; \
149 srl %t0, shir, %t4; \
150 srl %t1, shir, %t5; \
151 sll %t0, shil, %t6; \
152 or %t4, %prev, %t0; \
153 sll %t1, shil, %prev; \
154 or %t5, %t6, %t1; \
155 srl %t2, shir, %t4; \
156 srl %t3, shir, %t5; \
157 sll %t2, shil, %t6; \
158 or %t4, %prev, %t2; \
159 sll %t3, shil, %prev; \
160 or %t5, %t6, %t3; \
161 std %t0, [%dst + (offset) + (offset2) + 0x00]; \
162 std %t2, [%dst + (offset) + (offset2) + 0x08];
163
164 .text
165 .align 4
166
167#ifdef FASTER_REVERSE
168
16970: /* rdword_align */
170
171 andcc %o1, 1, %g0
172 be 4f
173 andcc %o1, 2, %g0
174
175 ldub [%o1 - 1], %g2
176 sub %o1, 1, %o1
177 stb %g2, [%o0 - 1]
178 sub %o2, 1, %o2
179 be 3f
180 sub %o0, 1, %o0
1814:
182 lduh [%o1 - 2], %g2
183 sub %o1, 2, %o1
184 sth %g2, [%o0 - 2]
185 sub %o2, 2, %o2
186 b 3f
187 sub %o0, 2, %o0
188
189#endif /* FASTER_REVERSE */
190
1910:
192 retl
193 nop ! Only bcopy returns here and it retuns void...
194
195#ifdef __KERNEL__
196FUNC(amemmove)
197FUNC(__memmove)
198#endif
199FUNC(memmove)
200 cmp %o0, %o1
201 SETUP_RETL
202 bleu 9f
203 sub %o0, %o1, %o4
204
205 add %o1, %o2, %o3
206 cmp %o3, %o0
207 bleu 0f
208 andcc %o4, 3, %o5
209
210#ifndef FASTER_REVERSE
211
212 add %o1, %o2, %o1
213 add %o0, %o2, %o0
214 sub %o1, 1, %o1
215 sub %o0, 1, %o0
216
2171: /* reverse_bytes */
218
219 ldub [%o1], %o4
220 subcc %o2, 1, %o2
221 stb %o4, [%o0]
222 sub %o1, 1, %o1
223 bne 1b
224 sub %o0, 1, %o0
225
226 retl
227 RETL_INSN
228
229#else /* FASTER_REVERSE */
230
231 add %o1, %o2, %o1
232 add %o0, %o2, %o0
233 bne 77f
234 cmp %o2, 15
235 bleu 91f
236 andcc %o1, 3, %g0
237 bne 70b
2383:
239 andcc %o1, 4, %g0
240
241 be 2f
242 mov %o2, %g1
243
244 ld [%o1 - 4], %o4
245 sub %g1, 4, %g1
246 st %o4, [%o0 - 4]
247 sub %o1, 4, %o1
248 sub %o0, 4, %o0
2492:
250 andcc %g1, 0xffffff80, %g7
251 be 3f
252 andcc %o0, 4, %g0
253
254 be 74f + 4
2555:
256 RMOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
257 RMOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
258 RMOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
259 RMOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
260 subcc %g7, 128, %g7
261 sub %o1, 128, %o1
262 bne 5b
263 sub %o0, 128, %o0
2643:
265 andcc %g1, 0x70, %g7
266 be 72f
267 andcc %g1, 8, %g0
268
269 sethi %hi(72f), %o5
270 srl %g7, 1, %o4
271 add %g7, %o4, %o4
272 sub %o1, %g7, %o1
273 sub %o5, %o4, %o5
274 jmpl %o5 + %lo(72f), %g0
275 sub %o0, %g7, %o0
276
27771: /* rmemcpy_table */
278 RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
279 RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
280 RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
281 RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
282 RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
283 RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
284 RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
285
28672: /* rmemcpy_table_end */
287
288 be 73f
289 andcc %g1, 4, %g0
290
291 ldd [%o1 - 0x08], %g2
292 sub %o0, 8, %o0
293 sub %o1, 8, %o1
294 st %g2, [%o0]
295 st %g3, [%o0 + 0x04]
296
29773: /* rmemcpy_last7 */
298
299 be 1f
300 andcc %g1, 2, %g0
301
302 ld [%o1 - 4], %g2
303 sub %o1, 4, %o1
304 st %g2, [%o0 - 4]
305 sub %o0, 4, %o0
3061:
307 be 1f
308 andcc %g1, 1, %g0
309
310 lduh [%o1 - 2], %g2
311 sub %o1, 2, %o1
312 sth %g2, [%o0 - 2]
313 sub %o0, 2, %o0
3141:
315 be 1f
316 nop
317
318 ldub [%o1 - 1], %g2
319 stb %g2, [%o0 - 1]
3201:
321 retl
322 RETL_INSN
323
32474: /* rldd_std */
325 RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
326 RMOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
327 RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
328 RMOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
329 subcc %g7, 128, %g7
330 sub %o1, 128, %o1
331 bne 74b
332 sub %o0, 128, %o0
333
334 andcc %g1, 0x70, %g7
335 be 72b
336 andcc %g1, 8, %g0
337
338 sethi %hi(72b), %o5
339 srl %g7, 1, %o4
340 add %g7, %o4, %o4
341 sub %o1, %g7, %o1
342 sub %o5, %o4, %o5
343 jmpl %o5 + %lo(72b), %g0
344 sub %o0, %g7, %o0
345
34675: /* rshort_end */
347
348 and %o2, 0xe, %o3
3492:
350 sethi %hi(76f), %o5
351 sll %o3, 3, %o4
352 sub %o0, %o3, %o0
353 sub %o5, %o4, %o5
354 sub %o1, %o3, %o1
355 jmpl %o5 + %lo(76f), %g0
356 andcc %o2, 1, %g0
357
358 RMOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
359 RMOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
360 RMOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
361 RMOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
362 RMOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
363 RMOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
364 RMOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
365
36676: /* rshort_table_end */
367
368 be 1f
369 nop
370 ldub [%o1 - 1], %g2
371 stb %g2, [%o0 - 1]
3721:
373 retl
374 RETL_INSN
375
37691: /* rshort_aligned_end */
377
378 bne 75b
379 andcc %o2, 8, %g0
380
381 be 1f
382 andcc %o2, 4, %g0
383
384 ld [%o1 - 0x08], %g2
385 ld [%o1 - 0x04], %g3
386 sub %o1, 8, %o1
387 st %g2, [%o0 - 0x08]
388 st %g3, [%o0 - 0x04]
389 sub %o0, 8, %o0
3901:
391 b 73b
392 mov %o2, %g1
393
39477: /* rnon_aligned */
395 cmp %o2, 15
396 bleu 75b
397 andcc %o0, 3, %g0
398 be 64f
399 andcc %o0, 1, %g0
400 be 63f
401 andcc %o0, 2, %g0
402 ldub [%o1 - 1], %g5
403 sub %o1, 1, %o1
404 stb %g5, [%o0 - 1]
405 sub %o0, 1, %o0
406 be 64f
407 sub %o2, 1, %o2
40863:
409 ldub [%o1 - 1], %g5
410 sub %o1, 2, %o1
411 stb %g5, [%o0 - 1]
412 sub %o0, 2, %o0
413 ldub [%o1], %g5
414 sub %o2, 2, %o2
415 stb %g5, [%o0]
41664:
417 and %o1, 3, %g2
418 and %o1, -4, %o1
419 and %o2, 0xc, %g3
420 add %o1, 4, %o1
421 cmp %g3, 4
422 sll %g2, 3, %g4
423 mov 32, %g2
424 be 4f
425 sub %g2, %g4, %g7
426
427 blu 3f
428 cmp %g3, 8
429
430 be 2f
431 srl %o2, 2, %g3
432
433 ld [%o1 - 4], %o3
434 add %o0, -8, %o0
435 ld [%o1 - 8], %o4
436 add %o1, -16, %o1
437 b 7f
438 add %g3, 1, %g3
4392:
440 ld [%o1 - 4], %o4
441 add %o0, -4, %o0
442 ld [%o1 - 8], %g1
443 add %o1, -12, %o1
444 b 8f
445 add %g3, 2, %g3
4463:
447 ld [%o1 - 4], %o5
448 add %o0, -12, %o0
449 ld [%o1 - 8], %o3
450 add %o1, -20, %o1
451 b 6f
452 srl %o2, 2, %g3
4534:
454 ld [%o1 - 4], %g1
455 srl %o2, 2, %g3
456 ld [%o1 - 8], %o5
457 add %o1, -24, %o1
458 add %o0, -16, %o0
459 add %g3, -1, %g3
460
461 ld [%o1 + 12], %o3
4625:
463 sll %o5, %g4, %g2
464 srl %g1, %g7, %g5
465 or %g2, %g5, %g2
466 st %g2, [%o0 + 12]
4676:
468 ld [%o1 + 8], %o4
469 sll %o3, %g4, %g2
470 srl %o5, %g7, %g5
471 or %g2, %g5, %g2
472 st %g2, [%o0 + 8]
4737:
474 ld [%o1 + 4], %g1
475 sll %o4, %g4, %g2
476 srl %o3, %g7, %g5
477 or %g2, %g5, %g2
478 st %g2, [%o0 + 4]
4798:
480 ld [%o1], %o5
481 sll %g1, %g4, %g2
482 srl %o4, %g7, %g5
483 addcc %g3, -4, %g3
484 or %g2, %g5, %g2
485 add %o1, -16, %o1
486 st %g2, [%o0]
487 add %o0, -16, %o0
488 bne,a 5b
489 ld [%o1 + 12], %o3
490 sll %o5, %g4, %g2
491 srl %g1, %g7, %g5
492 srl %g4, 3, %g3
493 or %g2, %g5, %g2
494 add %o1, %g3, %o1
495 andcc %o2, 2, %g0
496 st %g2, [%o0 + 12]
497 be 1f
498 andcc %o2, 1, %g0
499
500 ldub [%o1 + 15], %g5
501 add %o1, -2, %o1
502 stb %g5, [%o0 + 11]
503 add %o0, -2, %o0
504 ldub [%o1 + 16], %g5
505 stb %g5, [%o0 + 12]
5061:
507 be 1f
508 nop
509 ldub [%o1 + 15], %g5
510 stb %g5, [%o0 + 11]
5111:
512 retl
513 RETL_INSN
514
515#endif /* FASTER_REVERSE */
516
517/* NOTE: This code is executed just for the cases,
518 where %src (=%o1) & 3 is != 0.
519 We need to align it to 4. So, for (%src & 3)
520 1 we need to do ldub,lduh
521 2 lduh
522 3 just ldub
523 so even if it looks weird, the branches
524 are correct here. -jj
525 */
52678: /* dword_align */
527
528 andcc %o1, 1, %g0
529 be 4f
530 andcc %o1, 2, %g0
531
532 ldub [%o1], %g2
533 add %o1, 1, %o1
534 stb %g2, [%o0]
535 sub %o2, 1, %o2
536 bne 3f
537 add %o0, 1, %o0
5384:
539 lduh [%o1], %g2
540 add %o1, 2, %o1
541 sth %g2, [%o0]
542 sub %o2, 2, %o2
543 b 3f
544 add %o0, 2, %o0
545
546#ifdef __KERNEL__
547FUNC(__memcpy)
548#endif
549FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
550
551 sub %o0, %o1, %o4
552 SETUP_RETL
5539:
554 andcc %o4, 3, %o5
5550:
556 bne 86f
557 cmp %o2, 15
558
559 bleu 90f
560 andcc %o1, 3, %g0
561
562 bne 78b
5633:
564 andcc %o1, 4, %g0
565
566 be 2f
567 mov %o2, %g1
568
569 ld [%o1], %o4
570 sub %g1, 4, %g1
571 st %o4, [%o0]
572 add %o1, 4, %o1
573 add %o0, 4, %o0
5742:
575 andcc %g1, 0xffffff80, %g7
576 be 3f
577 andcc %o0, 4, %g0
578
579 be 82f + 4
5805:
581 MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
582 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
583 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
584 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
585 subcc %g7, 128, %g7
586 add %o1, 128, %o1
587 bne 5b
588 add %o0, 128, %o0
5893:
590 andcc %g1, 0x70, %g7
591 be 80f
592 andcc %g1, 8, %g0
593
594 sethi %hi(80f), %o5
595 srl %g7, 1, %o4
596 add %g7, %o4, %o4
597 add %o1, %g7, %o1
598 sub %o5, %o4, %o5
599 jmpl %o5 + %lo(80f), %g0
600 add %o0, %g7, %o0
601
60279: /* memcpy_table */
603
604 MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
605 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
606 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
607 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
608 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
609 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
610 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
611
61280: /* memcpy_table_end */
613 be 81f
614 andcc %g1, 4, %g0
615
616 ldd [%o1], %g2
617 add %o0, 8, %o0
618 st %g2, [%o0 - 0x08]
619 add %o1, 8, %o1
620 st %g3, [%o0 - 0x04]
621
62281: /* memcpy_last7 */
623
624 be 1f
625 andcc %g1, 2, %g0
626
627 ld [%o1], %g2
628 add %o1, 4, %o1
629 st %g2, [%o0]
630 add %o0, 4, %o0
6311:
632 be 1f
633 andcc %g1, 1, %g0
634
635 lduh [%o1], %g2
636 add %o1, 2, %o1
637 sth %g2, [%o0]
638 add %o0, 2, %o0
6391:
640 be 1f
641 nop
642
643 ldub [%o1], %g2
644 stb %g2, [%o0]
6451:
646 retl
647 RETL_INSN
648
64982: /* ldd_std */
650 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
651 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
652 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
653 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
654 subcc %g7, 128, %g7
655 add %o1, 128, %o1
656 bne 82b
657 add %o0, 128, %o0
658
659#ifndef FASTER_ALIGNED
660
661 andcc %g1, 0x70, %g7
662 be 80b
663 andcc %g1, 8, %g0
664
665 sethi %hi(80b), %o5
666 srl %g7, 1, %o4
667 add %g7, %o4, %o4
668 add %o1, %g7, %o1
669 sub %o5, %o4, %o5
670 jmpl %o5 + %lo(80b), %g0
671 add %o0, %g7, %o0
672
673#else /* FASTER_ALIGNED */
674
675 andcc %g1, 0x70, %g7
676 be 84f
677 andcc %g1, 8, %g0
678
679 sethi %hi(84f), %o5
680 add %o1, %g7, %o1
681 sub %o5, %g7, %o5
682 jmpl %o5 + %lo(84f), %g0
683 add %o0, %g7, %o0
684
68583: /* amemcpy_table */
686
687 MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
688 MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
689 MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
690 MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
691 MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
692 MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
693 MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
694
69584: /* amemcpy_table_end */
696 be 85f
697 andcc %g1, 4, %g0
698
699 ldd [%o1], %g2
700 add %o0, 8, %o0
701 std %g2, [%o0 - 0x08]
702 add %o1, 8, %o1
70385: /* amemcpy_last7 */
704 be 1f
705 andcc %g1, 2, %g0
706
707 ld [%o1], %g2
708 add %o1, 4, %o1
709 st %g2, [%o0]
710 add %o0, 4, %o0
7111:
712 be 1f
713 andcc %g1, 1, %g0
714
715 lduh [%o1], %g2
716 add %o1, 2, %o1
717 sth %g2, [%o0]
718 add %o0, 2, %o0
7191:
720 be 1f
721 nop
722
723 ldub [%o1], %g2
724 stb %g2, [%o0]
7251:
726 retl
727 RETL_INSN
728
729#endif /* FASTER_ALIGNED */
730
73186: /* non_aligned */
732 cmp %o2, 6
733 bleu 88f
734
735#ifdef FASTER_NONALIGNED
736
737 cmp %o2, 256
738 bcc 87f
739
740#endif /* FASTER_NONALIGNED */
741
742 andcc %o0, 3, %g0
743 be 61f
744 andcc %o0, 1, %g0
745 be 60f
746 andcc %o0, 2, %g0
747
748 ldub [%o1], %g5
749 add %o1, 1, %o1
750 stb %g5, [%o0]
751 sub %o2, 1, %o2
752 bne 61f
753 add %o0, 1, %o0
75460:
755 ldub [%o1], %g3
756 add %o1, 2, %o1
757 stb %g3, [%o0]
758 sub %o2, 2, %o2
759 ldub [%o1 - 1], %g3
760 add %o0, 2, %o0
761 stb %g3, [%o0 - 1]
76261:
763 and %o1, 3, %g2
764 and %o2, 0xc, %g3
765 and %o1, -4, %o1
766 cmp %g3, 4
767 sll %g2, 3, %g4
768 mov 32, %g2
769 be 4f
770 sub %g2, %g4, %g7
771
772 blu 3f
773 cmp %g3, 0x8
774
775 be 2f
776 srl %o2, 2, %g3
777
778 ld [%o1], %o3
779 add %o0, -8, %o0
780 ld [%o1 + 4], %o4
781 b 8f
782 add %g3, 1, %g3
7832:
784 ld [%o1], %o4
785 add %o0, -12, %o0
786 ld [%o1 + 4], %o5
787 add %g3, 2, %g3
788 b 9f
789 add %o1, -4, %o1
7903:
791 ld [%o1], %g1
792 add %o0, -4, %o0
793 ld [%o1 + 4], %o3
794 srl %o2, 2, %g3
795 b 7f
796 add %o1, 4, %o1
7974:
798 ld [%o1], %o5
799 cmp %o2, 7
800 ld [%o1 + 4], %g1
801 srl %o2, 2, %g3
802 bleu 10f
803 add %o1, 8, %o1
804
805 ld [%o1], %o3
806 add %g3, -1, %g3
8075:
808 sll %o5, %g4, %g2
809 srl %g1, %g7, %g5
810 or %g2, %g5, %g2
811 st %g2, [%o0]
8127:
813 ld [%o1 + 4], %o4
814 sll %g1, %g4, %g2
815 srl %o3, %g7, %g5
816 or %g2, %g5, %g2
817 st %g2, [%o0 + 4]
8188:
819 ld [%o1 + 8], %o5
820 sll %o3, %g4, %g2
821 srl %o4, %g7, %g5
822 or %g2, %g5, %g2
823 st %g2, [%o0 + 8]
8249:
825 ld [%o1 + 12], %g1
826 sll %o4, %g4, %g2
827 srl %o5, %g7, %g5
828 addcc %g3, -4, %g3
829 or %g2, %g5, %g2
830 add %o1, 16, %o1
831 st %g2, [%o0 + 12]
832 add %o0, 16, %o0
833 bne,a 5b
834 ld [%o1], %o3
83510:
836 sll %o5, %g4, %g2
837 srl %g1, %g7, %g5
838 srl %g7, 3, %g3
839 or %g2, %g5, %g2
840 sub %o1, %g3, %o1
841 andcc %o2, 2, %g0
842 st %g2, [%o0]
843 be 1f
844 andcc %o2, 1, %g0
845
846 ldub [%o1], %g2
847 add %o1, 2, %o1
848 stb %g2, [%o0 + 4]
849 add %o0, 2, %o0
850 ldub [%o1 - 1], %g2
851 stb %g2, [%o0 + 3]
8521:
853 be 1f
854 nop
855 ldub [%o1], %g2
856 stb %g2, [%o0 + 4]
8571:
858 retl
859 RETL_INSN
860
861#ifdef FASTER_NONALIGNED
862
86387: /* faster_nonaligned */
864
865 andcc %o1, 3, %g0
866 be 3f
867 andcc %o1, 1, %g0
868
869 be 4f
870 andcc %o1, 2, %g0
871
872 ldub [%o1], %g2
873 add %o1, 1, %o1
874 stb %g2, [%o0]
875 sub %o2, 1, %o2
876 bne 3f
877 add %o0, 1, %o0
8784:
879 lduh [%o1], %g2
880 add %o1, 2, %o1
881 srl %g2, 8, %g3
882 sub %o2, 2, %o2
883 stb %g3, [%o0]
884 add %o0, 2, %o0
885 stb %g2, [%o0 - 1]
8863:
887 andcc %o1, 4, %g0
888
889 bne 2f
890 cmp %o5, 1
891
892 ld [%o1], %o4
893 srl %o4, 24, %g2
894 stb %g2, [%o0]
895 srl %o4, 16, %g3
896 stb %g3, [%o0 + 1]
897 srl %o4, 8, %g2
898 stb %g2, [%o0 + 2]
899 sub %o2, 4, %o2
900 stb %o4, [%o0 + 3]
901 add %o1, 4, %o1
902 add %o0, 4, %o0
9032:
904 be 33f
905 cmp %o5, 2
906 be 32f
907 sub %o2, 4, %o2
90831:
909 ld [%o1], %g2
910 add %o1, 4, %o1
911 srl %g2, 24, %g3
912 and %o0, 7, %g5
913 stb %g3, [%o0]
914 cmp %g5, 7
915 sll %g2, 8, %g1
916 add %o0, 4, %o0
917 be 41f
918 and %o2, 0xffffffc0, %o3
919 ld [%o0 - 7], %o4
9204:
921 SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
922 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
923 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
924 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
925 subcc %o3, 64, %o3
926 add %o1, 64, %o1
927 bne 4b
928 add %o0, 64, %o0
929
930 andcc %o2, 0x30, %o3
931 be,a 1f
932 srl %g1, 16, %g2
9334:
934 SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
935 subcc %o3, 16, %o3
936 add %o1, 16, %o1
937 bne 4b
938 add %o0, 16, %o0
939
940 srl %g1, 16, %g2
9411:
942 st %o4, [%o0 - 7]
943 sth %g2, [%o0 - 3]
944 srl %g1, 8, %g4
945 b 88f
946 stb %g4, [%o0 - 1]
94732:
948 ld [%o1], %g2
949 add %o1, 4, %o1
950 srl %g2, 16, %g3
951 and %o0, 7, %g5
952 sth %g3, [%o0]
953 cmp %g5, 6
954 sll %g2, 16, %g1
955 add %o0, 4, %o0
956 be 42f
957 and %o2, 0xffffffc0, %o3
958 ld [%o0 - 6], %o4
9594:
960 SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
961 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
962 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
963 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
964 subcc %o3, 64, %o3
965 add %o1, 64, %o1
966 bne 4b
967 add %o0, 64, %o0
968
969 andcc %o2, 0x30, %o3
970 be,a 1f
971 srl %g1, 16, %g2
9724:
973 SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
974 subcc %o3, 16, %o3
975 add %o1, 16, %o1
976 bne 4b
977 add %o0, 16, %o0
978
979 srl %g1, 16, %g2
9801:
981 st %o4, [%o0 - 6]
982 b 88f
983 sth %g2, [%o0 - 2]
98433:
985 ld [%o1], %g2
986 sub %o2, 4, %o2
987 srl %g2, 24, %g3
988 and %o0, 7, %g5
989 stb %g3, [%o0]
990 cmp %g5, 5
991 srl %g2, 8, %g4
992 sll %g2, 24, %g1
993 sth %g4, [%o0 + 1]
994 add %o1, 4, %o1
995 be 43f
996 and %o2, 0xffffffc0, %o3
997
998 ld [%o0 - 1], %o4
999 add %o0, 4, %o0
10004:
1001 SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1002 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1003 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1004 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1005 subcc %o3, 64, %o3
1006 add %o1, 64, %o1
1007 bne 4b
1008 add %o0, 64, %o0
1009
1010 andcc %o2, 0x30, %o3
1011 be,a 1f
1012 srl %g1, 24, %g2
10134:
1014 SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1015 subcc %o3, 16, %o3
1016 add %o1, 16, %o1
1017 bne 4b
1018 add %o0, 16, %o0
1019
1020 srl %g1, 24, %g2
10211:
1022 st %o4, [%o0 - 5]
1023 b 88f
1024 stb %g2, [%o0 - 1]
102541:
1026 SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1027 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1028 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1029 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1030 subcc %o3, 64, %o3
1031 add %o1, 64, %o1
1032 bne 41b
1033 add %o0, 64, %o0
1034
1035 andcc %o2, 0x30, %o3
1036 be,a 1f
1037 srl %g1, 16, %g2
10384:
1039 SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1040 subcc %o3, 16, %o3
1041 add %o1, 16, %o1
1042 bne 4b
1043 add %o0, 16, %o0
1044
1045 srl %g1, 16, %g2
10461:
1047 sth %g2, [%o0 - 3]
1048 srl %g1, 8, %g4
1049 b 88f
1050 stb %g4, [%o0 - 1]
105143:
1052 SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1053 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1054 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1055 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1056 subcc %o3, 64, %o3
1057 add %o1, 64, %o1
1058 bne 43b
1059 add %o0, 64, %o0
1060
1061 andcc %o2, 0x30, %o3
1062 be,a 1f
1063 srl %g1, 24, %g2
10644:
1065 SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1066 subcc %o3, 16, %o3
1067 add %o1, 16, %o1
1068 bne 4b
1069 add %o0, 16, %o0
1070
1071 srl %g1, 24, %g2
10721:
1073 stb %g2, [%o0 + 3]
1074 b 88f
1075 add %o0, 4, %o0
107642:
1077 SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1078 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1079 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1080 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1081 subcc %o3, 64, %o3
1082 add %o1, 64, %o1
1083 bne 42b
1084 add %o0, 64, %o0
1085
1086 andcc %o2, 0x30, %o3
1087 be,a 1f
1088 srl %g1, 16, %g2
10894:
1090 SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1091 subcc %o3, 16, %o3
1092 add %o1, 16, %o1
1093 bne 4b
1094 add %o0, 16, %o0
1095
1096 srl %g1, 16, %g2
10971:
1098 sth %g2, [%o0 - 2]
1099
1100 /* Fall through */
1101
1102#endif /* FASTER_NONALIGNED */
1103
110488: /* short_end */
1105
1106 and %o2, 0xe, %o3
110720:
1108 sethi %hi(89f), %o5
1109 sll %o3, 3, %o4
1110 add %o0, %o3, %o0
1111 sub %o5, %o4, %o5
1112 add %o1, %o3, %o1
1113 jmpl %o5 + %lo(89f), %g0
1114 andcc %o2, 1, %g0
1115
1116 MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
1117 MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
1118 MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
1119 MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
1120 MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
1121 MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
1122 MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
1123
112489: /* short_table_end */
1125
1126 be 1f
1127 nop
1128
1129 ldub [%o1], %g2
1130 stb %g2, [%o0]
11311:
1132 retl
1133 RETL_INSN
1134
113590: /* short_aligned_end */
1136 bne 88b
1137 andcc %o2, 8, %g0
1138
1139 be 1f
1140 andcc %o2, 4, %g0
1141
1142 ld [%o1 + 0x00], %g2
1143 ld [%o1 + 0x04], %g3
1144 add %o1, 8, %o1
1145 st %g2, [%o0 + 0x00]
1146 st %g3, [%o0 + 0x04]
1147 add %o0, 8, %o0
11481:
1149 b 81b
1150 mov %o2, %g1
diff --git a/arch/sparc/lib/memscan.S b/arch/sparc/lib/memscan.S
new file mode 100644
index 000000000000..28e78ff090ac
--- /dev/null
+++ b/arch/sparc/lib/memscan.S
@@ -0,0 +1,133 @@
1/* $Id: memscan.S,v 1.4 1996/09/08 02:01:20 davem Exp $
2 * memscan.S: Optimized memscan for the Sparc.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7/* In essence, this is just a fancy strlen. */
8
9#define LO_MAGIC 0x01010101
10#define HI_MAGIC 0x80808080
11
12 .text
13 .align 4
14 .globl __memscan_zero, __memscan_generic
15 .globl memscan
16__memscan_zero:
17 /* %o0 = addr, %o1 = size */
18 cmp %o1, 0
19 bne,a 1f
20 andcc %o0, 3, %g0
21
22 retl
23 nop
24
251:
26 be mzero_scan_word
27 sethi %hi(HI_MAGIC), %g2
28
29 ldsb [%o0], %g3
30mzero_still_not_word_aligned:
31 cmp %g3, 0
32 bne 1f
33 add %o0, 1, %o0
34
35 retl
36 sub %o0, 1, %o0
37
381:
39 subcc %o1, 1, %o1
40 bne,a 1f
41 andcc %o0, 3, %g0
42
43 retl
44 nop
45
461:
47 bne,a mzero_still_not_word_aligned
48 ldsb [%o0], %g3
49
50 sethi %hi(HI_MAGIC), %g2
51mzero_scan_word:
52 or %g2, %lo(HI_MAGIC), %o3
53 sethi %hi(LO_MAGIC), %g3
54 or %g3, %lo(LO_MAGIC), %o2
55mzero_next_word:
56 ld [%o0], %g2
57mzero_next_word_preloaded:
58 sub %g2, %o2, %g2
59mzero_next_word_preloaded_next:
60 andcc %g2, %o3, %g0
61 bne mzero_byte_zero
62 add %o0, 4, %o0
63
64mzero_check_out_of_fuel:
65 subcc %o1, 4, %o1
66 bg,a 1f
67 ld [%o0], %g2
68
69 retl
70 nop
71
721:
73 b mzero_next_word_preloaded_next
74 sub %g2, %o2, %g2
75
76 /* Check every byte. */
77mzero_byte_zero:
78 ldsb [%o0 - 4], %g2
79 cmp %g2, 0
80 bne mzero_byte_one
81 sub %o0, 4, %g3
82
83 retl
84 mov %g3, %o0
85
86mzero_byte_one:
87 ldsb [%o0 - 3], %g2
88 cmp %g2, 0
89 bne,a mzero_byte_two_and_three
90 ldsb [%o0 - 2], %g2
91
92 retl
93 sub %o0, 3, %o0
94
95mzero_byte_two_and_three:
96 cmp %g2, 0
97 bne,a 1f
98 ldsb [%o0 - 1], %g2
99
100 retl
101 sub %o0, 2, %o0
102
1031:
104 cmp %g2, 0
105 bne,a mzero_next_word_preloaded
106 ld [%o0], %g2
107
108 retl
109 sub %o0, 1, %o0
110
111mzero_found_it:
112 retl
113 sub %o0, 2, %o0
114
115memscan:
116__memscan_generic:
117 /* %o0 = addr, %o1 = c, %o2 = size */
118 cmp %o2, 0
119 bne,a 0f
120 ldub [%o0], %g2
121
122 b,a 2f
1231:
124 ldub [%o0], %g2
1250:
126 cmp %g2, %o1
127 be 2f
128 addcc %o2, -1, %o2
129 bne 1b
130 add %o0, 1, %o0
1312:
132 retl
133 nop
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
new file mode 100644
index 000000000000..a65eba41097c
--- /dev/null
+++ b/arch/sparc/lib/memset.S
@@ -0,0 +1,203 @@
1/* linux/arch/sparc/lib/memset.S: Sparc optimized memset, bzero and clear_user code
2 * Copyright (C) 1991,1996 Free Software Foundation
3 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 *
6 * Returns 0, if ok, and number of bytes not yet set if exception
7 * occurs and we were called as clear_user.
8 */
9
10#include <asm/ptrace.h>
11
12/* Work around cpp -rob */
13#define ALLOC #alloc
14#define EXECINSTR #execinstr
15#define EX(x,y,a,b) \
1698: x,y; \
17 .section .fixup,ALLOC,EXECINSTR; \
18 .align 4; \
1999: ba 30f; \
20 a, b, %o0; \
21 .section __ex_table,ALLOC; \
22 .align 4; \
23 .word 98b, 99b; \
24 .text; \
25 .align 4
26
27#define EXT(start,end,handler) \
28 .section __ex_table,ALLOC; \
29 .align 4; \
30 .word start, 0, end, handler; \
31 .text; \
32 .align 4
33
34/* Please don't change these macros, unless you change the logic
35 * in the .fixup section below as well.
36 * Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
37#define ZERO_BIG_BLOCK(base, offset, source) \
38 std source, [base + offset + 0x00]; \
39 std source, [base + offset + 0x08]; \
40 std source, [base + offset + 0x10]; \
41 std source, [base + offset + 0x18]; \
42 std source, [base + offset + 0x20]; \
43 std source, [base + offset + 0x28]; \
44 std source, [base + offset + 0x30]; \
45 std source, [base + offset + 0x38];
46
47#define ZERO_LAST_BLOCKS(base, offset, source) \
48 std source, [base - offset - 0x38]; \
49 std source, [base - offset - 0x30]; \
50 std source, [base - offset - 0x28]; \
51 std source, [base - offset - 0x20]; \
52 std source, [base - offset - 0x18]; \
53 std source, [base - offset - 0x10]; \
54 std source, [base - offset - 0x08]; \
55 std source, [base - offset - 0x00];
56
57 .text
58 .align 4
59
60 .globl __bzero_begin
61__bzero_begin:
62
63 .globl __bzero, __memset,
64 .globl memset
65 .globl __memset_start, __memset_end
66__memset_start:
67__memset:
68memset:
69 and %o1, 0xff, %g3
70 sll %g3, 8, %g2
71 or %g3, %g2, %g3
72 sll %g3, 16, %g2
73 or %g3, %g2, %g3
74 b 1f
75 mov %o2, %o1
763:
77 cmp %o2, 3
78 be 2f
79 EX(stb %g3, [%o0], sub %o1, 0)
80
81 cmp %o2, 2
82 be 2f
83 EX(stb %g3, [%o0 + 0x01], sub %o1, 1)
84
85 EX(stb %g3, [%o0 + 0x02], sub %o1, 2)
862:
87 sub %o2, 4, %o2
88 add %o1, %o2, %o1
89 b 4f
90 sub %o0, %o2, %o0
91
92__bzero:
93 mov %g0, %g3
941:
95 cmp %o1, 7
96 bleu 7f
97 andcc %o0, 3, %o2
98
99 bne 3b
1004:
101 andcc %o0, 4, %g0
102
103 be 2f
104 mov %g3, %g2
105
106 EX(st %g3, [%o0], sub %o1, 0)
107 sub %o1, 4, %o1
108 add %o0, 4, %o0
1092:
110 andcc %o1, 0xffffff80, %o3 ! Now everything is 8 aligned and o1 is len to run
111 be 9f
112 andcc %o1, 0x78, %o2
11310:
114 ZERO_BIG_BLOCK(%o0, 0x00, %g2)
115 subcc %o3, 128, %o3
116 ZERO_BIG_BLOCK(%o0, 0x40, %g2)
11711:
118 EXT(10b, 11b, 20f)
119 bne 10b
120 add %o0, 128, %o0
121
122 orcc %o2, %g0, %g0
1239:
124 be 13f
125 andcc %o1, 7, %o1
126
127 srl %o2, 1, %o3
128 set 13f, %o4
129 sub %o4, %o3, %o4
130 jmp %o4
131 add %o0, %o2, %o0
132
13312:
134 ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
135 ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
13613:
137 be 8f
138 andcc %o1, 4, %g0
139
140 be 1f
141 andcc %o1, 2, %g0
142
143 EX(st %g3, [%o0], and %o1, 7)
144 add %o0, 4, %o0
1451:
146 be 1f
147 andcc %o1, 1, %g0
148
149 EX(sth %g3, [%o0], and %o1, 3)
150 add %o0, 2, %o0
1511:
152 bne,a 8f
153 EX(stb %g3, [%o0], and %o1, 1)
1548:
155 retl
156 clr %o0
1577:
158 be 13b
159 orcc %o1, 0, %g0
160
161 be 0f
1628:
163 add %o0, 1, %o0
164 subcc %o1, 1, %o1
165 bne,a 8b
166 EX(stb %g3, [%o0 - 1], add %o1, 1)
1670:
168 retl
169 clr %o0
170__memset_end:
171
172 .section .fixup,#alloc,#execinstr
173 .align 4
17420:
175 cmp %g2, 8
176 bleu 1f
177 and %o1, 0x7f, %o1
178 sub %g2, 9, %g2
179 add %o3, 64, %o3
1801:
181 sll %g2, 3, %g2
182 add %o3, %o1, %o0
183 b 30f
184 sub %o0, %g2, %o0
18521:
186 mov 8, %o0
187 and %o1, 7, %o1
188 sub %o0, %g2, %o0
189 sll %o0, 3, %o0
190 b 30f
191 add %o0, %o1, %o0
19230:
193/* %o4 is faulting address, %o5 is %pc where fault occurred */
194 save %sp, -104, %sp
195 mov %i5, %o0
196 mov %i7, %o1
197 call lookup_fault
198 mov %i4, %o2
199 ret
200 restore
201
202 .globl __bzero_end
203__bzero_end:
diff --git a/arch/sparc/lib/mul.S b/arch/sparc/lib/mul.S
new file mode 100644
index 000000000000..83dffbc2f62f
--- /dev/null
+++ b/arch/sparc/lib/mul.S
@@ -0,0 +1,135 @@
1/* $Id: mul.S,v 1.4 1996/09/30 02:22:32 davem Exp $
2 * mul.S: This routine was taken from glibc-1.09 and is covered
3 * by the GNU Library General Public License Version 2.
4 */
5
6/*
7 * Signed multiply, from Appendix E of the Sparc Version 8
8 * Architecture Manual.
9 */
10
11/*
12 * Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the upper 32 bits of
13 * the 64-bit product).
14 *
15 * This code optimizes short (less than 13-bit) multiplies.
16 */
17
18 .globl .mul
19.mul:
20 mov %o0, %y ! multiplier -> Y
21 andncc %o0, 0xfff, %g0 ! test bits 12..31
22 be Lmul_shortway ! if zero, can do it the short way
23 andcc %g0, %g0, %o4 ! zero the partial product and clear N and V
24
25 /*
26 * Long multiply. 32 steps, followed by a final shift step.
27 */
28 mulscc %o4, %o1, %o4 ! 1
29 mulscc %o4, %o1, %o4 ! 2
30 mulscc %o4, %o1, %o4 ! 3
31 mulscc %o4, %o1, %o4 ! 4
32 mulscc %o4, %o1, %o4 ! 5
33 mulscc %o4, %o1, %o4 ! 6
34 mulscc %o4, %o1, %o4 ! 7
35 mulscc %o4, %o1, %o4 ! 8
36 mulscc %o4, %o1, %o4 ! 9
37 mulscc %o4, %o1, %o4 ! 10
38 mulscc %o4, %o1, %o4 ! 11
39 mulscc %o4, %o1, %o4 ! 12
40 mulscc %o4, %o1, %o4 ! 13
41 mulscc %o4, %o1, %o4 ! 14
42 mulscc %o4, %o1, %o4 ! 15
43 mulscc %o4, %o1, %o4 ! 16
44 mulscc %o4, %o1, %o4 ! 17
45 mulscc %o4, %o1, %o4 ! 18
46 mulscc %o4, %o1, %o4 ! 19
47 mulscc %o4, %o1, %o4 ! 20
48 mulscc %o4, %o1, %o4 ! 21
49 mulscc %o4, %o1, %o4 ! 22
50 mulscc %o4, %o1, %o4 ! 23
51 mulscc %o4, %o1, %o4 ! 24
52 mulscc %o4, %o1, %o4 ! 25
53 mulscc %o4, %o1, %o4 ! 26
54 mulscc %o4, %o1, %o4 ! 27
55 mulscc %o4, %o1, %o4 ! 28
56 mulscc %o4, %o1, %o4 ! 29
57 mulscc %o4, %o1, %o4 ! 30
58 mulscc %o4, %o1, %o4 ! 31
59 mulscc %o4, %o1, %o4 ! 32
60 mulscc %o4, %g0, %o4 ! final shift
61
62 ! If %o0 was negative, the result is
63 ! (%o0 * %o1) + (%o1 << 32))
64 ! We fix that here.
65
66#if 0
67 tst %o0
68 bge 1f
69 rd %y, %o0
70
71 ! %o0 was indeed negative; fix upper 32 bits of result by subtracting
72 ! %o1 (i.e., return %o4 - %o1 in %o1).
73 retl
74 sub %o4, %o1, %o1
75
761:
77 retl
78 mov %o4, %o1
79#else
80 /* Faster code adapted from tege@sics.se's code for umul.S. */
81 sra %o0, 31, %o2 ! make mask from sign bit
82 and %o1, %o2, %o2 ! %o2 = 0 or %o1, depending on sign of %o0
83 rd %y, %o0 ! get lower half of product
84 retl
85 sub %o4, %o2, %o1 ! subtract compensation
86 ! and put upper half in place
87#endif
88
89Lmul_shortway:
90 /*
91 * Short multiply. 12 steps, followed by a final shift step.
92 * The resulting bits are off by 12 and (32-12) = 20 bit positions,
93 * but there is no problem with %o0 being negative (unlike above).
94 */
95 mulscc %o4, %o1, %o4 ! 1
96 mulscc %o4, %o1, %o4 ! 2
97 mulscc %o4, %o1, %o4 ! 3
98 mulscc %o4, %o1, %o4 ! 4
99 mulscc %o4, %o1, %o4 ! 5
100 mulscc %o4, %o1, %o4 ! 6
101 mulscc %o4, %o1, %o4 ! 7
102 mulscc %o4, %o1, %o4 ! 8
103 mulscc %o4, %o1, %o4 ! 9
104 mulscc %o4, %o1, %o4 ! 10
105 mulscc %o4, %o1, %o4 ! 11
106 mulscc %o4, %o1, %o4 ! 12
107 mulscc %o4, %g0, %o4 ! final shift
108
109 /*
110 * %o4 has 20 of the bits that should be in the low part of the
111 * result; %y has the bottom 12 (as %y's top 12). That is:
112 *
113 * %o4 %y
114 * +----------------+----------------+
115 * | -12- | -20- | -12- | -20- |
116 * +------(---------+------)---------+
117 * --hi-- ----low-part----
118 *
119 * The upper 12 bits of %o4 should be sign-extended to form the
120 * high part of the product (i.e., highpart = %o4 >> 20).
121 */
122
123 rd %y, %o5
124 sll %o4, 12, %o0 ! shift middle bits left 12
125 srl %o5, 20, %o5 ! shift low bits right 20, zero fill at left
126 or %o5, %o0, %o0 ! construct low part of result
127 retl
128 sra %o4, 20, %o1 ! ... and extract high part of result
129
130 .globl .mul_patch
131.mul_patch:
132 smul %o0, %o1, %o0
133 retl
134 rd %y, %o1
135 nop
diff --git a/arch/sparc/lib/muldi3.S b/arch/sparc/lib/muldi3.S
new file mode 100644
index 000000000000..7f17872d0603
--- /dev/null
+++ b/arch/sparc/lib/muldi3.S
@@ -0,0 +1,76 @@
1/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
2
3This file is part of GNU CC.
4
5GNU CC is free software; you can redistribute it and/or modify
6it under the terms of the GNU General Public License as published by
7the Free Software Foundation; either version 2, or (at your option)
8any later version.
9
10GNU CC is distributed in the hope that it will be useful,
11but WITHOUT ANY WARRANTY; without even the implied warranty of
12MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13GNU General Public License for more details.
14
15You should have received a copy of the GNU General Public License
16along with GNU CC; see the file COPYING. If not, write to
17the Free Software Foundation, 59 Temple Place - Suite 330,
18Boston, MA 02111-1307, USA. */
19
20 .text
21 .align 4
22 .globl __muldi3
23__muldi3:
24 save %sp, -104, %sp
25 wr %g0, %i1, %y
26 sra %i3, 0x1f, %g2
27 and %i1, %g2, %g2
28 andcc %g0, 0, %g1
29 mulscc %g1, %i3, %g1
30 mulscc %g1, %i3, %g1
31 mulscc %g1, %i3, %g1
32 mulscc %g1, %i3, %g1
33 mulscc %g1, %i3, %g1
34 mulscc %g1, %i3, %g1
35 mulscc %g1, %i3, %g1
36 mulscc %g1, %i3, %g1
37 mulscc %g1, %i3, %g1
38 mulscc %g1, %i3, %g1
39 mulscc %g1, %i3, %g1
40 mulscc %g1, %i3, %g1
41 mulscc %g1, %i3, %g1
42 mulscc %g1, %i3, %g1
43 mulscc %g1, %i3, %g1
44 mulscc %g1, %i3, %g1
45 mulscc %g1, %i3, %g1
46 mulscc %g1, %i3, %g1
47 mulscc %g1, %i3, %g1
48 mulscc %g1, %i3, %g1
49 mulscc %g1, %i3, %g1
50 mulscc %g1, %i3, %g1
51 mulscc %g1, %i3, %g1
52 mulscc %g1, %i3, %g1
53 mulscc %g1, %i3, %g1
54 mulscc %g1, %i3, %g1
55 mulscc %g1, %i3, %g1
56 mulscc %g1, %i3, %g1
57 mulscc %g1, %i3, %g1
58 mulscc %g1, %i3, %g1
59 mulscc %g1, %i3, %g1
60 mulscc %g1, %i3, %g1
61 mulscc %g1, 0, %g1
62 add %g1, %g2, %l2
63 rd %y, %o1
64 mov %o1, %l3
65 mov %i1, %o0
66 call .umul
67 mov %i2, %o1
68 mov %o0, %l0
69 mov %i0, %o0
70 call .umul
71 mov %i3, %o1
72 add %l0, %o0, %l0
73 mov %l2, %i0
74 add %l2, %l0, %i0
75 ret
76 restore %g0, %l3, %o1
diff --git a/arch/sparc/lib/rem.S b/arch/sparc/lib/rem.S
new file mode 100644
index 000000000000..44508148d055
--- /dev/null
+++ b/arch/sparc/lib/rem.S
@@ -0,0 +1,382 @@
1/* $Id: rem.S,v 1.7 1996/09/30 02:22:34 davem Exp $
2 * rem.S: This routine was taken from glibc-1.09 and is covered
3 * by the GNU Library General Public License Version 2.
4 */
5
6
7/* This file is generated from divrem.m4; DO NOT EDIT! */
8/*
9 * Division and remainder, from Appendix E of the Sparc Version 8
10 * Architecture Manual, with fixes from Gordon Irlam.
11 */
12
13/*
14 * Input: dividend and divisor in %o0 and %o1 respectively.
15 *
16 * m4 parameters:
17 * .rem name of function to generate
18 * rem rem=div => %o0 / %o1; rem=rem => %o0 % %o1
19 * true true=true => signed; true=false => unsigned
20 *
21 * Algorithm parameters:
22 * N how many bits per iteration we try to get (4)
23 * WORDSIZE total number of bits (32)
24 *
25 * Derived constants:
26 * TOPBITS number of bits in the top decade of a number
27 *
28 * Important variables:
29 * Q the partial quotient under development (initially 0)
30 * R the remainder so far, initially the dividend
31 * ITER number of main division loop iterations required;
32 * equal to ceil(log2(quotient) / N). Note that this
33 * is the log base (2^N) of the quotient.
34 * V the current comparand, initially divisor*2^(ITER*N-1)
35 *
36 * Cost:
37 * Current estimate for non-large dividend is
38 * ceil(log2(quotient) / N) * (10 + 7N/2) + C
39 * A large dividend is one greater than 2^(31-TOPBITS) and takes a
40 * different path, as the upper bits of the quotient must be developed
41 * one bit at a time.
42 */
43
44
45 .globl .rem
46.rem:
47 ! compute sign of result; if neither is negative, no problem
48 orcc %o1, %o0, %g0 ! either negative?
49 bge 2f ! no, go do the divide
50 mov %o0, %g2 ! compute sign in any case
51
52 tst %o1
53 bge 1f
54 tst %o0
55 ! %o1 is definitely negative; %o0 might also be negative
56 bge 2f ! if %o0 not negative...
57 sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
581: ! %o0 is negative, %o1 is nonnegative
59 sub %g0, %o0, %o0 ! make %o0 nonnegative
602:
61
62 ! Ready to divide. Compute size of quotient; scale comparand.
63 orcc %o1, %g0, %o5
64 bne 1f
65 mov %o0, %o3
66
67 ! Divide by zero trap. If it returns, return 0 (about as
68 ! wrong as possible, but that is what SunOS does...).
69 ta ST_DIV0
70 retl
71 clr %o0
72
731:
74 cmp %o3, %o5 ! if %o1 exceeds %o0, done
75 blu Lgot_result ! (and algorithm fails otherwise)
76 clr %o2
77
78 sethi %hi(1 << (32 - 4 - 1)), %g1
79
80 cmp %o3, %g1
81 blu Lnot_really_big
82 clr %o4
83
84 ! Here the dividend is >= 2**(31-N) or so. We must be careful here,
85 ! as our usual N-at-a-shot divide step will cause overflow and havoc.
86 ! The number of bits in the result here is N*ITER+SC, where SC <= N.
87 ! Compute ITER in an unorthodox manner: know we need to shift V into
88 ! the top decade: so do not even bother to compare to R.
89 1:
90 cmp %o5, %g1
91 bgeu 3f
92 mov 1, %g7
93
94 sll %o5, 4, %o5
95
96 b 1b
97 add %o4, 1, %o4
98
99 ! Now compute %g7.
100 2:
101 addcc %o5, %o5, %o5
102
103 bcc Lnot_too_big
104 add %g7, 1, %g7
105
106 ! We get here if the %o1 overflowed while shifting.
107 ! This means that %o3 has the high-order bit set.
108 ! Restore %o5 and subtract from %o3.
109 sll %g1, 4, %g1 ! high order bit
110 srl %o5, 1, %o5 ! rest of %o5
111 add %o5, %g1, %o5
112
113 b Ldo_single_div
114 sub %g7, 1, %g7
115
116 Lnot_too_big:
117 3:
118 cmp %o5, %o3
119 blu 2b
120 nop
121
122 be Ldo_single_div
123 nop
124 /* NB: these are commented out in the V8-Sparc manual as well */
125 /* (I do not understand this) */
126 ! %o5 > %o3: went too far: back up 1 step
127 ! srl %o5, 1, %o5
128 ! dec %g7
129 ! do single-bit divide steps
130 !
131 ! We have to be careful here. We know that %o3 >= %o5, so we can do the
132 ! first divide step without thinking. BUT, the others are conditional,
133 ! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high-
134 ! order bit set in the first step, just falling into the regular
135 ! division loop will mess up the first time around.
136 ! So we unroll slightly...
137 Ldo_single_div:
138 subcc %g7, 1, %g7
139 bl Lend_regular_divide
140 nop
141
142 sub %o3, %o5, %o3
143 mov 1, %o2
144
145 b Lend_single_divloop
146 nop
147 Lsingle_divloop:
148 sll %o2, 1, %o2
149
150 bl 1f
151 srl %o5, 1, %o5
152 ! %o3 >= 0
153 sub %o3, %o5, %o3
154
155 b 2f
156 add %o2, 1, %o2
157 1: ! %o3 < 0
158 add %o3, %o5, %o3
159 sub %o2, 1, %o2
160 2:
161 Lend_single_divloop:
162 subcc %g7, 1, %g7
163 bge Lsingle_divloop
164 tst %o3
165
166 b,a Lend_regular_divide
167
168Lnot_really_big:
1691:
170 sll %o5, 4, %o5
171 cmp %o5, %o3
172 bleu 1b
173 addcc %o4, 1, %o4
174 be Lgot_result
175 sub %o4, 1, %o4
176
177 tst %o3 ! set up for initial iteration
178Ldivloop:
179 sll %o2, 4, %o2
180 ! depth 1, accumulated bits 0
181 bl L.1.16
182 srl %o5,1,%o5
183 ! remainder is positive
184 subcc %o3,%o5,%o3
185 ! depth 2, accumulated bits 1
186 bl L.2.17
187 srl %o5,1,%o5
188 ! remainder is positive
189 subcc %o3,%o5,%o3
190 ! depth 3, accumulated bits 3
191 bl L.3.19
192 srl %o5,1,%o5
193 ! remainder is positive
194 subcc %o3,%o5,%o3
195 ! depth 4, accumulated bits 7
196 bl L.4.23
197 srl %o5,1,%o5
198 ! remainder is positive
199 subcc %o3,%o5,%o3
200
201 b 9f
202 add %o2, (7*2+1), %o2
203
204L.4.23:
205 ! remainder is negative
206 addcc %o3,%o5,%o3
207 b 9f
208 add %o2, (7*2-1), %o2
209
210L.3.19:
211 ! remainder is negative
212 addcc %o3,%o5,%o3
213 ! depth 4, accumulated bits 5
214 bl L.4.21
215 srl %o5,1,%o5
216 ! remainder is positive
217 subcc %o3,%o5,%o3
218 b 9f
219 add %o2, (5*2+1), %o2
220
221L.4.21:
222 ! remainder is negative
223 addcc %o3,%o5,%o3
224 b 9f
225 add %o2, (5*2-1), %o2
226
227L.2.17:
228 ! remainder is negative
229 addcc %o3,%o5,%o3
230 ! depth 3, accumulated bits 1
231 bl L.3.17
232 srl %o5,1,%o5
233 ! remainder is positive
234 subcc %o3,%o5,%o3
235 ! depth 4, accumulated bits 3
236 bl L.4.19
237 srl %o5,1,%o5
238 ! remainder is positive
239 subcc %o3,%o5,%o3
240 b 9f
241 add %o2, (3*2+1), %o2
242
243L.4.19:
244 ! remainder is negative
245 addcc %o3,%o5,%o3
246 b 9f
247 add %o2, (3*2-1), %o2
248
249L.3.17:
250 ! remainder is negative
251 addcc %o3,%o5,%o3
252 ! depth 4, accumulated bits 1
253 bl L.4.17
254 srl %o5,1,%o5
255 ! remainder is positive
256 subcc %o3,%o5,%o3
257 b 9f
258 add %o2, (1*2+1), %o2
259
260L.4.17:
261 ! remainder is negative
262 addcc %o3,%o5,%o3
263 b 9f
264 add %o2, (1*2-1), %o2
265
266L.1.16:
267 ! remainder is negative
268 addcc %o3,%o5,%o3
269 ! depth 2, accumulated bits -1
270 bl L.2.15
271 srl %o5,1,%o5
272 ! remainder is positive
273 subcc %o3,%o5,%o3
274 ! depth 3, accumulated bits -1
275 bl L.3.15
276 srl %o5,1,%o5
277 ! remainder is positive
278 subcc %o3,%o5,%o3
279 ! depth 4, accumulated bits -1
280 bl L.4.15
281 srl %o5,1,%o5
282 ! remainder is positive
283 subcc %o3,%o5,%o3
284 b 9f
285 add %o2, (-1*2+1), %o2
286
287L.4.15:
288 ! remainder is negative
289 addcc %o3,%o5,%o3
290 b 9f
291 add %o2, (-1*2-1), %o2
292
293L.3.15:
294 ! remainder is negative
295 addcc %o3,%o5,%o3
296 ! depth 4, accumulated bits -3
297 bl L.4.13
298 srl %o5,1,%o5
299 ! remainder is positive
300 subcc %o3,%o5,%o3
301 b 9f
302 add %o2, (-3*2+1), %o2
303
304L.4.13:
305 ! remainder is negative
306 addcc %o3,%o5,%o3
307 b 9f
308 add %o2, (-3*2-1), %o2
309
310L.2.15:
311 ! remainder is negative
312 addcc %o3,%o5,%o3
313 ! depth 3, accumulated bits -3
314 bl L.3.13
315 srl %o5,1,%o5
316 ! remainder is positive
317 subcc %o3,%o5,%o3
318 ! depth 4, accumulated bits -5
319 bl L.4.11
320 srl %o5,1,%o5
321 ! remainder is positive
322 subcc %o3,%o5,%o3
323 b 9f
324 add %o2, (-5*2+1), %o2
325
326L.4.11:
327 ! remainder is negative
328 addcc %o3,%o5,%o3
329 b 9f
330 add %o2, (-5*2-1), %o2
331
332
333L.3.13:
334 ! remainder is negative
335 addcc %o3,%o5,%o3
336 ! depth 4, accumulated bits -7
337 bl L.4.9
338 srl %o5,1,%o5
339 ! remainder is positive
340 subcc %o3,%o5,%o3
341 b 9f
342 add %o2, (-7*2+1), %o2
343
344L.4.9:
345 ! remainder is negative
346 addcc %o3,%o5,%o3
347 b 9f
348 add %o2, (-7*2-1), %o2
349
350 9:
351Lend_regular_divide:
352 subcc %o4, 1, %o4
353 bge Ldivloop
354 tst %o3
355
356 bl,a Lgot_result
357 ! non-restoring fixup here (one instruction only!)
358 add %o3, %o1, %o3
359
360Lgot_result:
361 ! check to see if answer should be < 0
362 tst %g2
363 bl,a 1f
364 sub %g0, %o3, %o3
3651:
366 retl
367 mov %o3, %o0
368
369 .globl .rem_patch
370.rem_patch:
371 sra %o0, 0x1f, %o4
372 wr %o4, 0x0, %y
373 nop
374 nop
375 nop
376 sdivcc %o0, %o1, %o2
377 bvs,a 1f
378 xnor %o2, %g0, %o2
3791: smul %o2, %o1, %o2
380 retl
381 sub %o0, %o2, %o0
382 nop
diff --git a/arch/sparc/lib/rwsem.S b/arch/sparc/lib/rwsem.S
new file mode 100644
index 000000000000..e7578dc600b8
--- /dev/null
+++ b/arch/sparc/lib/rwsem.S
@@ -0,0 +1,205 @@
1/* $Id: rwsem.S,v 1.5 2000/05/09 17:40:13 davem Exp $
2 * Assembly part of rw semaphores.
3 *
4 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <asm/ptrace.h>
9#include <asm/psr.h>
10
11 .section .sched.text
12 .align 4
13
14 .globl ___down_read
15___down_read:
16 rd %psr, %g3
17 nop
18 nop
19 nop
20 or %g3, PSR_PIL, %g7
21 wr %g7, 0, %psr
22 nop
23 nop
24 nop
25#ifdef CONFIG_SMP
261: ldstub [%g1 + 4], %g7
27 tst %g7
28 bne 1b
29 ld [%g1], %g7
30 sub %g7, 1, %g7
31 st %g7, [%g1]
32 stb %g0, [%g1 + 4]
33#else
34 ld [%g1], %g7
35 sub %g7, 1, %g7
36 st %g7, [%g1]
37#endif
38 wr %g3, 0, %psr
39 add %g7, 1, %g7
40 nop
41 nop
42 subcc %g7, 1, %g7
43 bneg 3f
44 nop
452: jmpl %o7, %g0
46 mov %g4, %o7
473: save %sp, -64, %sp
48 mov %g1, %l1
49 mov %g4, %l4
50 bcs 4f
51 mov %g5, %l5
52 call down_read_failed
53 mov %l1, %o0
54 mov %l1, %g1
55 mov %l4, %g4
56 ba ___down_read
57 restore %l5, %g0, %g5
584: call down_read_failed_biased
59 mov %l1, %o0
60 mov %l1, %g1
61 mov %l4, %g4
62 ba 2b
63 restore %l5, %g0, %g5
64
65 .globl ___down_write
66___down_write:
67 rd %psr, %g3
68 nop
69 nop
70 nop
71 or %g3, PSR_PIL, %g7
72 wr %g7, 0, %psr
73 sethi %hi(0x01000000), %g2
74 nop
75 nop
76#ifdef CONFIG_SMP
771: ldstub [%g1 + 4], %g7
78 tst %g7
79 bne 1b
80 ld [%g1], %g7
81 sub %g7, %g2, %g7
82 st %g7, [%g1]
83 stb %g0, [%g1 + 4]
84#else
85 ld [%g1], %g7
86 sub %g7, %g2, %g7
87 st %g7, [%g1]
88#endif
89 wr %g3, 0, %psr
90 add %g7, %g2, %g7
91 nop
92 nop
93 subcc %g7, %g2, %g7
94 bne 3f
95 nop
962: jmpl %o7, %g0
97 mov %g4, %o7
983: save %sp, -64, %sp
99 mov %g1, %l1
100 mov %g4, %l4
101 bcs 4f
102 mov %g5, %l5
103 call down_write_failed
104 mov %l1, %o0
105 mov %l1, %g1
106 mov %l4, %g4
107 ba ___down_write
108 restore %l5, %g0, %g5
1094: call down_write_failed_biased
110 mov %l1, %o0
111 mov %l1, %g1
112 mov %l4, %g4
113 ba 2b
114 restore %l5, %g0, %g5
115
116 .text
117 .globl ___up_read
118___up_read:
119 rd %psr, %g3
120 nop
121 nop
122 nop
123 or %g3, PSR_PIL, %g7
124 wr %g7, 0, %psr
125 nop
126 nop
127 nop
128#ifdef CONFIG_SMP
1291: ldstub [%g1 + 4], %g7
130 tst %g7
131 bne 1b
132 ld [%g1], %g7
133 add %g7, 1, %g7
134 st %g7, [%g1]
135 stb %g0, [%g1 + 4]
136#else
137 ld [%g1], %g7
138 add %g7, 1, %g7
139 st %g7, [%g1]
140#endif
141 wr %g3, 0, %psr
142 nop
143 nop
144 nop
145 cmp %g7, 0
146 be 3f
147 nop
1482: jmpl %o7, %g0
149 mov %g4, %o7
1503: save %sp, -64, %sp
151 mov %g1, %l1
152 mov %g4, %l4
153 mov %g5, %l5
154 clr %o1
155 call __rwsem_wake
156 mov %l1, %o0
157 mov %l1, %g1
158 mov %l4, %g4
159 ba 2b
160 restore %l5, %g0, %g5
161
162 .globl ___up_write
163___up_write:
164 rd %psr, %g3
165 nop
166 nop
167 nop
168 or %g3, PSR_PIL, %g7
169 wr %g7, 0, %psr
170 sethi %hi(0x01000000), %g2
171 nop
172 nop
173#ifdef CONFIG_SMP
1741: ldstub [%g1 + 4], %g7
175 tst %g7
176 bne 1b
177 ld [%g1], %g7
178 add %g7, %g2, %g7
179 st %g7, [%g1]
180 stb %g0, [%g1 + 4]
181#else
182 ld [%g1], %g7
183 add %g7, %g2, %g7
184 st %g7, [%g1]
185#endif
186 wr %g3, 0, %psr
187 sub %g7, %g2, %g7
188 nop
189 nop
190 addcc %g7, %g2, %g7
191 bcs 3f
192 nop
1932: jmpl %o7, %g0
194 mov %g4, %o7
1953: save %sp, -64, %sp
196 mov %g1, %l1
197 mov %g4, %l4
198 mov %g5, %l5
199 mov %g7, %o1
200 call __rwsem_wake
201 mov %l1, %o0
202 mov %l1, %g1
203 mov %l4, %g4
204 ba 2b
205 restore %l5, %g0, %g5
diff --git a/arch/sparc/lib/sdiv.S b/arch/sparc/lib/sdiv.S
new file mode 100644
index 000000000000..e0ad80b6f63d
--- /dev/null
+++ b/arch/sparc/lib/sdiv.S
@@ -0,0 +1,379 @@
1/* $Id: sdiv.S,v 1.6 1996/10/02 17:37:00 davem Exp $
2 * sdiv.S: This routine was taken from glibc-1.09 and is covered
3 * by the GNU Library General Public License Version 2.
4 */
5
6
7/* This file is generated from divrem.m4; DO NOT EDIT! */
8/*
9 * Division and remainder, from Appendix E of the Sparc Version 8
10 * Architecture Manual, with fixes from Gordon Irlam.
11 */
12
13/*
14 * Input: dividend and divisor in %o0 and %o1 respectively.
15 *
16 * m4 parameters:
17 * .div name of function to generate
18 * div div=div => %o0 / %o1; div=rem => %o0 % %o1
19 * true true=true => signed; true=false => unsigned
20 *
21 * Algorithm parameters:
22 * N how many bits per iteration we try to get (4)
23 * WORDSIZE total number of bits (32)
24 *
25 * Derived constants:
26 * TOPBITS number of bits in the top decade of a number
27 *
28 * Important variables:
29 * Q the partial quotient under development (initially 0)
30 * R the remainder so far, initially the dividend
31 * ITER number of main division loop iterations required;
32 * equal to ceil(log2(quotient) / N). Note that this
33 * is the log base (2^N) of the quotient.
34 * V the current comparand, initially divisor*2^(ITER*N-1)
35 *
36 * Cost:
37 * Current estimate for non-large dividend is
38 * ceil(log2(quotient) / N) * (10 + 7N/2) + C
39 * A large dividend is one greater than 2^(31-TOPBITS) and takes a
40 * different path, as the upper bits of the quotient must be developed
41 * one bit at a time.
42 */
43
44
45 .globl .div
46.div:
47 ! compute sign of result; if neither is negative, no problem
48 orcc %o1, %o0, %g0 ! either negative?
49 bge 2f ! no, go do the divide
50 xor %o1, %o0, %g2 ! compute sign in any case
51
52 tst %o1
53 bge 1f
54 tst %o0
55 ! %o1 is definitely negative; %o0 might also be negative
56 bge 2f ! if %o0 not negative...
57 sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
581: ! %o0 is negative, %o1 is nonnegative
59 sub %g0, %o0, %o0 ! make %o0 nonnegative
602:
61
62 ! Ready to divide. Compute size of quotient; scale comparand.
63 orcc %o1, %g0, %o5
64 bne 1f
65 mov %o0, %o3
66
67 ! Divide by zero trap. If it returns, return 0 (about as
68 ! wrong as possible, but that is what SunOS does...).
69 ta ST_DIV0
70 retl
71 clr %o0
72
731:
74 cmp %o3, %o5 ! if %o1 exceeds %o0, done
75 blu Lgot_result ! (and algorithm fails otherwise)
76 clr %o2
77
78 sethi %hi(1 << (32 - 4 - 1)), %g1
79
80 cmp %o3, %g1
81 blu Lnot_really_big
82 clr %o4
83
84 ! Here the dividend is >= 2**(31-N) or so. We must be careful here,
85 ! as our usual N-at-a-shot divide step will cause overflow and havoc.
86 ! The number of bits in the result here is N*ITER+SC, where SC <= N.
87 ! Compute ITER in an unorthodox manner: know we need to shift V into
88 ! the top decade: so do not even bother to compare to R.
89 1:
90 cmp %o5, %g1
91 bgeu 3f
92 mov 1, %g7
93
94 sll %o5, 4, %o5
95
96 b 1b
97 add %o4, 1, %o4
98
99 ! Now compute %g7.
100 2:
101 addcc %o5, %o5, %o5
102 bcc Lnot_too_big
103 add %g7, 1, %g7
104
105 ! We get here if the %o1 overflowed while shifting.
106 ! This means that %o3 has the high-order bit set.
107 ! Restore %o5 and subtract from %o3.
108 sll %g1, 4, %g1 ! high order bit
109 srl %o5, 1, %o5 ! rest of %o5
110 add %o5, %g1, %o5
111
112 b Ldo_single_div
113 sub %g7, 1, %g7
114
115 Lnot_too_big:
116 3:
117 cmp %o5, %o3
118 blu 2b
119 nop
120
121 be Ldo_single_div
122 nop
123 /* NB: these are commented out in the V8-Sparc manual as well */
124 /* (I do not understand this) */
125 ! %o5 > %o3: went too far: back up 1 step
126 ! srl %o5, 1, %o5
127 ! dec %g7
128 ! do single-bit divide steps
129 !
130 ! We have to be careful here. We know that %o3 >= %o5, so we can do the
131 ! first divide step without thinking. BUT, the others are conditional,
132 ! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high-
133 ! order bit set in the first step, just falling into the regular
134 ! division loop will mess up the first time around.
135 ! So we unroll slightly...
136 Ldo_single_div:
137 subcc %g7, 1, %g7
138 bl Lend_regular_divide
139 nop
140
141 sub %o3, %o5, %o3
142 mov 1, %o2
143
144 b Lend_single_divloop
145 nop
146 Lsingle_divloop:
147 sll %o2, 1, %o2
148
149 bl 1f
150 srl %o5, 1, %o5
151 ! %o3 >= 0
152 sub %o3, %o5, %o3
153
154 b 2f
155 add %o2, 1, %o2
156 1: ! %o3 < 0
157 add %o3, %o5, %o3
158 sub %o2, 1, %o2
159 2:
160 Lend_single_divloop:
161 subcc %g7, 1, %g7
162 bge Lsingle_divloop
163 tst %o3
164
165 b,a Lend_regular_divide
166
167Lnot_really_big:
1681:
169 sll %o5, 4, %o5
170 cmp %o5, %o3
171 bleu 1b
172 addcc %o4, 1, %o4
173
174 be Lgot_result
175 sub %o4, 1, %o4
176
177 tst %o3 ! set up for initial iteration
178Ldivloop:
179 sll %o2, 4, %o2
180 ! depth 1, accumulated bits 0
181 bl L.1.16
182 srl %o5,1,%o5
183 ! remainder is positive
184 subcc %o3,%o5,%o3
185 ! depth 2, accumulated bits 1
186 bl L.2.17
187 srl %o5,1,%o5
188 ! remainder is positive
189 subcc %o3,%o5,%o3
190 ! depth 3, accumulated bits 3
191 bl L.3.19
192 srl %o5,1,%o5
193 ! remainder is positive
194 subcc %o3,%o5,%o3
195 ! depth 4, accumulated bits 7
196 bl L.4.23
197 srl %o5,1,%o5
198 ! remainder is positive
199 subcc %o3,%o5,%o3
200 b 9f
201 add %o2, (7*2+1), %o2
202
203L.4.23:
204 ! remainder is negative
205 addcc %o3,%o5,%o3
206 b 9f
207 add %o2, (7*2-1), %o2
208
209L.3.19:
210 ! remainder is negative
211 addcc %o3,%o5,%o3
212 ! depth 4, accumulated bits 5
213 bl L.4.21
214 srl %o5,1,%o5
215 ! remainder is positive
216 subcc %o3,%o5,%o3
217 b 9f
218 add %o2, (5*2+1), %o2
219
220L.4.21:
221 ! remainder is negative
222 addcc %o3,%o5,%o3
223 b 9f
224 add %o2, (5*2-1), %o2
225
226L.2.17:
227 ! remainder is negative
228 addcc %o3,%o5,%o3
229 ! depth 3, accumulated bits 1
230 bl L.3.17
231 srl %o5,1,%o5
232 ! remainder is positive
233 subcc %o3,%o5,%o3
234 ! depth 4, accumulated bits 3
235 bl L.4.19
236 srl %o5,1,%o5
237 ! remainder is positive
238 subcc %o3,%o5,%o3
239 b 9f
240 add %o2, (3*2+1), %o2
241
242L.4.19:
243 ! remainder is negative
244 addcc %o3,%o5,%o3
245 b 9f
246 add %o2, (3*2-1), %o2
247
248
249L.3.17:
250 ! remainder is negative
251 addcc %o3,%o5,%o3
252 ! depth 4, accumulated bits 1
253 bl L.4.17
254 srl %o5,1,%o5
255 ! remainder is positive
256 subcc %o3,%o5,%o3
257 b 9f
258 add %o2, (1*2+1), %o2
259
260L.4.17:
261 ! remainder is negative
262 addcc %o3,%o5,%o3
263 b 9f
264 add %o2, (1*2-1), %o2
265
266L.1.16:
267 ! remainder is negative
268 addcc %o3,%o5,%o3
269 ! depth 2, accumulated bits -1
270 bl L.2.15
271 srl %o5,1,%o5
272 ! remainder is positive
273 subcc %o3,%o5,%o3
274 ! depth 3, accumulated bits -1
275 bl L.3.15
276 srl %o5,1,%o5
277 ! remainder is positive
278 subcc %o3,%o5,%o3
279 ! depth 4, accumulated bits -1
280 bl L.4.15
281 srl %o5,1,%o5
282 ! remainder is positive
283 subcc %o3,%o5,%o3
284 b 9f
285 add %o2, (-1*2+1), %o2
286
287L.4.15:
288 ! remainder is negative
289 addcc %o3,%o5,%o3
290 b 9f
291 add %o2, (-1*2-1), %o2
292
293L.3.15:
294 ! remainder is negative
295 addcc %o3,%o5,%o3
296 ! depth 4, accumulated bits -3
297 bl L.4.13
298 srl %o5,1,%o5
299 ! remainder is positive
300 subcc %o3,%o5,%o3
301 b 9f
302 add %o2, (-3*2+1), %o2
303
304L.4.13:
305 ! remainder is negative
306 addcc %o3,%o5,%o3
307 b 9f
308 add %o2, (-3*2-1), %o2
309
310L.2.15:
311 ! remainder is negative
312 addcc %o3,%o5,%o3
313 ! depth 3, accumulated bits -3
314 bl L.3.13
315 srl %o5,1,%o5
316 ! remainder is positive
317 subcc %o3,%o5,%o3
318 ! depth 4, accumulated bits -5
319 bl L.4.11
320 srl %o5,1,%o5
321 ! remainder is positive
322 subcc %o3,%o5,%o3
323 b 9f
324 add %o2, (-5*2+1), %o2
325
326L.4.11:
327 ! remainder is negative
328 addcc %o3,%o5,%o3
329 b 9f
330 add %o2, (-5*2-1), %o2
331
332L.3.13:
333 ! remainder is negative
334 addcc %o3,%o5,%o3
335 ! depth 4, accumulated bits -7
336 bl L.4.9
337 srl %o5,1,%o5
338 ! remainder is positive
339 subcc %o3,%o5,%o3
340 b 9f
341 add %o2, (-7*2+1), %o2
342
343L.4.9:
344 ! remainder is negative
345 addcc %o3,%o5,%o3
346 b 9f
347 add %o2, (-7*2-1), %o2
348
349 9:
350Lend_regular_divide:
351 subcc %o4, 1, %o4
352 bge Ldivloop
353 tst %o3
354
355 bl,a Lgot_result
356 ! non-restoring fixup here (one instruction only!)
357 sub %o2, 1, %o2
358
359Lgot_result:
360 ! check to see if answer should be < 0
361 tst %g2
362 bl,a 1f
363 sub %g0, %o2, %o2
3641:
365 retl
366 mov %o2, %o0
367
368 .globl .div_patch
369.div_patch:
370 sra %o0, 0x1f, %o2
371 wr %o2, 0x0, %y
372 nop
373 nop
374 nop
375 sdivcc %o0, %o1, %o0
376 bvs,a 1f
377 xnor %o0, %g0, %o0
3781: retl
379 nop
diff --git a/arch/sparc/lib/strlen.S b/arch/sparc/lib/strlen.S
new file mode 100644
index 000000000000..ed9a763368cd
--- /dev/null
+++ b/arch/sparc/lib/strlen.S
@@ -0,0 +1,81 @@
1/* strlen.S: Sparc optimized strlen code
2 * Hand optimized from GNU libc's strlen
3 * Copyright (C) 1991,1996 Free Software Foundation
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#define LO_MAGIC 0x01010101
9#define HI_MAGIC 0x80808080
10
110:
12 ldub [%o0], %o5
13 cmp %o5, 0
14 be 1f
15 add %o0, 1, %o0
16 andcc %o0, 3, %g0
17 be 4f
18 or %o4, %lo(HI_MAGIC), %o3
19 ldub [%o0], %o5
20 cmp %o5, 0
21 be 2f
22 add %o0, 1, %o0
23 andcc %o0, 3, %g0
24 be 5f
25 sethi %hi(LO_MAGIC), %o4
26 ldub [%o0], %o5
27 cmp %o5, 0
28 be 3f
29 add %o0, 1, %o0
30 b 8f
31 or %o4, %lo(LO_MAGIC), %o2
321:
33 retl
34 mov 0, %o0
352:
36 retl
37 mov 1, %o0
383:
39 retl
40 mov 2, %o0
41
42 .align 4
43 .global strlen
44strlen:
45 mov %o0, %o1
46 andcc %o0, 3, %g0
47 bne 0b
48 sethi %hi(HI_MAGIC), %o4
49 or %o4, %lo(HI_MAGIC), %o3
504:
51 sethi %hi(LO_MAGIC), %o4
525:
53 or %o4, %lo(LO_MAGIC), %o2
548:
55 ld [%o0], %o5
562:
57 sub %o5, %o2, %o4
58 andcc %o4, %o3, %g0
59 be 8b
60 add %o0, 4, %o0
61
62 /* Check every byte. */
63 srl %o5, 24, %g5
64 andcc %g5, 0xff, %g0
65 be 1f
66 add %o0, -4, %o4
67 srl %o5, 16, %g5
68 andcc %g5, 0xff, %g0
69 be 1f
70 add %o4, 1, %o4
71 srl %o5, 8, %g5
72 andcc %g5, 0xff, %g0
73 be 1f
74 add %o4, 1, %o4
75 andcc %o5, 0xff, %g0
76 bne,a 2b
77 ld [%o0], %o5
78 add %o4, 1, %o4
791:
80 retl
81 sub %o4, %o1, %o0
diff --git a/arch/sparc/lib/strlen_user.S b/arch/sparc/lib/strlen_user.S
new file mode 100644
index 000000000000..8c8a371df3c9
--- /dev/null
+++ b/arch/sparc/lib/strlen_user.S
@@ -0,0 +1,109 @@
1/* strlen_user.S: Sparc optimized strlen_user code
2 *
3 * Return length of string in userspace including terminating 0
4 * or 0 for error
5 *
6 * Copyright (C) 1991,1996 Free Software Foundation
7 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#define LO_MAGIC 0x01010101
12#define HI_MAGIC 0x80808080
13
1410:
15 ldub [%o0], %o5
16 cmp %o5, 0
17 be 1f
18 add %o0, 1, %o0
19 andcc %o0, 3, %g0
20 be 4f
21 or %o4, %lo(HI_MAGIC), %o3
2211:
23 ldub [%o0], %o5
24 cmp %o5, 0
25 be 2f
26 add %o0, 1, %o0
27 andcc %o0, 3, %g0
28 be 5f
29 sethi %hi(LO_MAGIC), %o4
3012:
31 ldub [%o0], %o5
32 cmp %o5, 0
33 be 3f
34 add %o0, 1, %o0
35 b 13f
36 or %o4, %lo(LO_MAGIC), %o2
371:
38 retl
39 mov 1, %o0
402:
41 retl
42 mov 2, %o0
433:
44 retl
45 mov 3, %o0
46
47 .align 4
48 .global __strlen_user, __strnlen_user
49__strlen_user:
50 sethi %hi(32768), %o1
51__strnlen_user:
52 mov %o1, %g1
53 mov %o0, %o1
54 andcc %o0, 3, %g0
55 bne 10b
56 sethi %hi(HI_MAGIC), %o4
57 or %o4, %lo(HI_MAGIC), %o3
584:
59 sethi %hi(LO_MAGIC), %o4
605:
61 or %o4, %lo(LO_MAGIC), %o2
6213:
63 ld [%o0], %o5
642:
65 sub %o5, %o2, %o4
66 andcc %o4, %o3, %g0
67 bne 82f
68 add %o0, 4, %o0
69 sub %o0, %o1, %g2
7081: cmp %g2, %g1
71 blu 13b
72 mov %o0, %o4
73 ba,a 1f
74
75 /* Check every byte. */
7682: srl %o5, 24, %g5
77 andcc %g5, 0xff, %g0
78 be 1f
79 add %o0, -3, %o4
80 srl %o5, 16, %g5
81 andcc %g5, 0xff, %g0
82 be 1f
83 add %o4, 1, %o4
84 srl %o5, 8, %g5
85 andcc %g5, 0xff, %g0
86 be 1f
87 add %o4, 1, %o4
88 andcc %o5, 0xff, %g0
89 bne 81b
90 sub %o0, %o1, %g2
91
92 add %o4, 1, %o4
931:
94 retl
95 sub %o4, %o1, %o0
96
97 .section .fixup,#alloc,#execinstr
98 .align 4
999:
100 retl
101 clr %o0
102
103 .section __ex_table,#alloc
104 .align 4
105
106 .word 10b, 9b
107 .word 11b, 9b
108 .word 12b, 9b
109 .word 13b, 9b
diff --git a/arch/sparc/lib/strncmp.S b/arch/sparc/lib/strncmp.S
new file mode 100644
index 000000000000..615626805d4b
--- /dev/null
+++ b/arch/sparc/lib/strncmp.S
@@ -0,0 +1,118 @@
1/* $Id: strncmp.S,v 1.2 1996/09/09 02:47:20 davem Exp $
2 * strncmp.S: Hand optimized Sparc assembly of GCC output from GNU libc
3 * generic strncmp routine.
4 */
5
6 .text
7 .align 4
8 .global __strncmp, strncmp
9__strncmp:
10strncmp:
11 mov %o0, %g3
12 mov 0, %o3
13
14 cmp %o2, 3
15 ble 7f
16 mov 0, %g2
17
18 sra %o2, 2, %o4
19 ldub [%g3], %o3
20
210:
22 ldub [%o1], %g2
23 add %g3, 1, %g3
24 and %o3, 0xff, %o0
25
26 cmp %o0, 0
27 be 8f
28 add %o1, 1, %o1
29
30 cmp %o0, %g2
31 be,a 1f
32 ldub [%g3], %o3
33
34 retl
35 sub %o0, %g2, %o0
36
371:
38 ldub [%o1], %g2
39 add %g3,1, %g3
40 and %o3, 0xff, %o0
41
42 cmp %o0, 0
43 be 8f
44 add %o1, 1, %o1
45
46 cmp %o0, %g2
47 be,a 1f
48 ldub [%g3], %o3
49
50 retl
51 sub %o0, %g2, %o0
52
531:
54 ldub [%o1], %g2
55 add %g3, 1, %g3
56 and %o3, 0xff, %o0
57
58 cmp %o0, 0
59 be 8f
60 add %o1, 1, %o1
61
62 cmp %o0, %g2
63 be,a 1f
64 ldub [%g3], %o3
65
66 retl
67 sub %o0, %g2, %o0
68
691:
70 ldub [%o1], %g2
71 add %g3, 1, %g3
72 and %o3, 0xff, %o0
73
74 cmp %o0, 0
75 be 8f
76 add %o1, 1, %o1
77
78 cmp %o0, %g2
79 be 1f
80 add %o4, -1, %o4
81
82 retl
83 sub %o0, %g2, %o0
84
851:
86
87 cmp %o4, 0
88 bg,a 0b
89 ldub [%g3], %o3
90
91 b 7f
92 and %o2, 3, %o2
93
949:
95 ldub [%o1], %g2
96 add %g3, 1, %g3
97 and %o3, 0xff, %o0
98
99 cmp %o0, 0
100 be 8f
101 add %o1, 1, %o1
102
103 cmp %o0, %g2
104 be 7f
105 add %o2, -1, %o2
106
1078:
108 retl
109 sub %o0, %g2, %o0
110
1117:
112 cmp %o2, 0
113 bg,a 9b
114 ldub [%g3], %o3
115
116 and %g2, 0xff, %o0
117 retl
118 sub %o3, %o0, %o0
diff --git a/arch/sparc/lib/strncpy_from_user.S b/arch/sparc/lib/strncpy_from_user.S
new file mode 100644
index 000000000000..d77198976a66
--- /dev/null
+++ b/arch/sparc/lib/strncpy_from_user.S
@@ -0,0 +1,47 @@
1/* strncpy_from_user.S: Sparc strncpy from userspace.
2 *
3 * Copyright(C) 1996 David S. Miller
4 */
5
6#include <asm/ptrace.h>
7#include <asm/errno.h>
8
9 .text
10 .align 4
11
12 /* Must return:
13 *
14 * -EFAULT for an exception
15 * count if we hit the buffer limit
16 * bytes copied if we hit a null byte
17 */
18
19 .globl __strncpy_from_user
20__strncpy_from_user:
21 /* %o0=dest, %o1=src, %o2=count */
22 mov %o2, %o3
231:
24 subcc %o2, 1, %o2
25 bneg 2f
26 nop
2710:
28 ldub [%o1], %o4
29 add %o0, 1, %o0
30 cmp %o4, 0
31 add %o1, 1, %o1
32 bne 1b
33 stb %o4, [%o0 - 1]
342:
35 add %o2, 1, %o0
36 retl
37 sub %o3, %o0, %o0
38
39 .section .fixup,#alloc,#execinstr
40 .align 4
414:
42 retl
43 mov -EFAULT, %o0
44
45 .section __ex_table,#alloc
46 .align 4
47 .word 10b, 4b
diff --git a/arch/sparc/lib/udiv.S b/arch/sparc/lib/udiv.S
new file mode 100644
index 000000000000..2abfc6b0f3e9
--- /dev/null
+++ b/arch/sparc/lib/udiv.S
@@ -0,0 +1,355 @@
1/* $Id: udiv.S,v 1.4 1996/09/30 02:22:38 davem Exp $
2 * udiv.S: This routine was taken from glibc-1.09 and is covered
3 * by the GNU Library General Public License Version 2.
4 */
5
6
7/* This file is generated from divrem.m4; DO NOT EDIT! */
8/*
9 * Division and remainder, from Appendix E of the Sparc Version 8
10 * Architecture Manual, with fixes from Gordon Irlam.
11 */
12
13/*
14 * Input: dividend and divisor in %o0 and %o1 respectively.
15 *
16 * m4 parameters:
17 * .udiv name of function to generate
18 * div div=div => %o0 / %o1; div=rem => %o0 % %o1
19 * false false=true => signed; false=false => unsigned
20 *
21 * Algorithm parameters:
22 * N how many bits per iteration we try to get (4)
23 * WORDSIZE total number of bits (32)
24 *
25 * Derived constants:
26 * TOPBITS number of bits in the top decade of a number
27 *
28 * Important variables:
29 * Q the partial quotient under development (initially 0)
30 * R the remainder so far, initially the dividend
31 * ITER number of main division loop iterations required;
32 * equal to ceil(log2(quotient) / N). Note that this
33 * is the log base (2^N) of the quotient.
34 * V the current comparand, initially divisor*2^(ITER*N-1)
35 *
36 * Cost:
37 * Current estimate for non-large dividend is
38 * ceil(log2(quotient) / N) * (10 + 7N/2) + C
39 * A large dividend is one greater than 2^(31-TOPBITS) and takes a
40 * different path, as the upper bits of the quotient must be developed
41 * one bit at a time.
42 */
43
44
45 .globl .udiv
46.udiv:
47
48 ! Ready to divide. Compute size of quotient; scale comparand.
49 orcc %o1, %g0, %o5
50 bne 1f
51 mov %o0, %o3
52
53 ! Divide by zero trap. If it returns, return 0 (about as
54 ! wrong as possible, but that is what SunOS does...).
55 ta ST_DIV0
56 retl
57 clr %o0
58
591:
60 cmp %o3, %o5 ! if %o1 exceeds %o0, done
61 blu Lgot_result ! (and algorithm fails otherwise)
62 clr %o2
63
64 sethi %hi(1 << (32 - 4 - 1)), %g1
65
66 cmp %o3, %g1
67 blu Lnot_really_big
68 clr %o4
69
70 ! Here the dividend is >= 2**(31-N) or so. We must be careful here,
71 ! as our usual N-at-a-shot divide step will cause overflow and havoc.
72 ! The number of bits in the result here is N*ITER+SC, where SC <= N.
73 ! Compute ITER in an unorthodox manner: know we need to shift V into
74 ! the top decade: so do not even bother to compare to R.
75 1:
76 cmp %o5, %g1
77 bgeu 3f
78 mov 1, %g7
79
80 sll %o5, 4, %o5
81
82 b 1b
83 add %o4, 1, %o4
84
85 ! Now compute %g7.
86 2:
87 addcc %o5, %o5, %o5
88 bcc Lnot_too_big
89 add %g7, 1, %g7
90
91 ! We get here if the %o1 overflowed while shifting.
92 ! This means that %o3 has the high-order bit set.
93 ! Restore %o5 and subtract from %o3.
94 sll %g1, 4, %g1 ! high order bit
95 srl %o5, 1, %o5 ! rest of %o5
96 add %o5, %g1, %o5
97
98 b Ldo_single_div
99 sub %g7, 1, %g7
100
101 Lnot_too_big:
102 3:
103 cmp %o5, %o3
104 blu 2b
105 nop
106
107 be Ldo_single_div
108 nop
109 /* NB: these are commented out in the V8-Sparc manual as well */
110 /* (I do not understand this) */
111 ! %o5 > %o3: went too far: back up 1 step
112 ! srl %o5, 1, %o5
113 ! dec %g7
114 ! do single-bit divide steps
115 !
116 ! We have to be careful here. We know that %o3 >= %o5, so we can do the
117 ! first divide step without thinking. BUT, the others are conditional,
118 ! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high-
119 ! order bit set in the first step, just falling into the regular
120 ! division loop will mess up the first time around.
121 ! So we unroll slightly...
122 Ldo_single_div:
123 subcc %g7, 1, %g7
124 bl Lend_regular_divide
125 nop
126
127 sub %o3, %o5, %o3
128 mov 1, %o2
129
130 b Lend_single_divloop
131 nop
132 Lsingle_divloop:
133 sll %o2, 1, %o2
134 bl 1f
135 srl %o5, 1, %o5
136 ! %o3 >= 0
137 sub %o3, %o5, %o3
138 b 2f
139 add %o2, 1, %o2
140 1: ! %o3 < 0
141 add %o3, %o5, %o3
142 sub %o2, 1, %o2
143 2:
144 Lend_single_divloop:
145 subcc %g7, 1, %g7
146 bge Lsingle_divloop
147 tst %o3
148
149 b,a Lend_regular_divide
150
151Lnot_really_big:
1521:
153 sll %o5, 4, %o5
154
155 cmp %o5, %o3
156 bleu 1b
157 addcc %o4, 1, %o4
158
159 be Lgot_result
160 sub %o4, 1, %o4
161
162 tst %o3 ! set up for initial iteration
163Ldivloop:
164 sll %o2, 4, %o2
165 ! depth 1, accumulated bits 0
166 bl L.1.16
167 srl %o5,1,%o5
168 ! remainder is positive
169 subcc %o3,%o5,%o3
170 ! depth 2, accumulated bits 1
171 bl L.2.17
172 srl %o5,1,%o5
173 ! remainder is positive
174 subcc %o3,%o5,%o3
175 ! depth 3, accumulated bits 3
176 bl L.3.19
177 srl %o5,1,%o5
178 ! remainder is positive
179 subcc %o3,%o5,%o3
180 ! depth 4, accumulated bits 7
181 bl L.4.23
182 srl %o5,1,%o5
183 ! remainder is positive
184 subcc %o3,%o5,%o3
185 b 9f
186 add %o2, (7*2+1), %o2
187
188L.4.23:
189 ! remainder is negative
190 addcc %o3,%o5,%o3
191 b 9f
192 add %o2, (7*2-1), %o2
193
194L.3.19:
195 ! remainder is negative
196 addcc %o3,%o5,%o3
197 ! depth 4, accumulated bits 5
198 bl L.4.21
199 srl %o5,1,%o5
200 ! remainder is positive
201 subcc %o3,%o5,%o3
202 b 9f
203 add %o2, (5*2+1), %o2
204
205L.4.21:
206 ! remainder is negative
207 addcc %o3,%o5,%o3
208 b 9f
209 add %o2, (5*2-1), %o2
210
211L.2.17:
212 ! remainder is negative
213 addcc %o3,%o5,%o3
214 ! depth 3, accumulated bits 1
215 bl L.3.17
216 srl %o5,1,%o5
217 ! remainder is positive
218 subcc %o3,%o5,%o3
219 ! depth 4, accumulated bits 3
220 bl L.4.19
221 srl %o5,1,%o5
222 ! remainder is positive
223 subcc %o3,%o5,%o3
224 b 9f
225 add %o2, (3*2+1), %o2
226
227L.4.19:
228 ! remainder is negative
229 addcc %o3,%o5,%o3
230 b 9f
231 add %o2, (3*2-1), %o2
232
233L.3.17:
234 ! remainder is negative
235 addcc %o3,%o5,%o3
236 ! depth 4, accumulated bits 1
237 bl L.4.17
238 srl %o5,1,%o5
239 ! remainder is positive
240 subcc %o3,%o5,%o3
241 b 9f
242 add %o2, (1*2+1), %o2
243
244L.4.17:
245 ! remainder is negative
246 addcc %o3,%o5,%o3
247 b 9f
248 add %o2, (1*2-1), %o2
249
250L.1.16:
251 ! remainder is negative
252 addcc %o3,%o5,%o3
253 ! depth 2, accumulated bits -1
254 bl L.2.15
255 srl %o5,1,%o5
256 ! remainder is positive
257 subcc %o3,%o5,%o3
258 ! depth 3, accumulated bits -1
259 bl L.3.15
260 srl %o5,1,%o5
261 ! remainder is positive
262 subcc %o3,%o5,%o3
263 ! depth 4, accumulated bits -1
264 bl L.4.15
265 srl %o5,1,%o5
266 ! remainder is positive
267 subcc %o3,%o5,%o3
268 b 9f
269 add %o2, (-1*2+1), %o2
270
271L.4.15:
272 ! remainder is negative
273 addcc %o3,%o5,%o3
274 b 9f
275 add %o2, (-1*2-1), %o2
276
277L.3.15:
278 ! remainder is negative
279 addcc %o3,%o5,%o3
280 ! depth 4, accumulated bits -3
281 bl L.4.13
282 srl %o5,1,%o5
283 ! remainder is positive
284 subcc %o3,%o5,%o3
285 b 9f
286 add %o2, (-3*2+1), %o2
287
288L.4.13:
289 ! remainder is negative
290 addcc %o3,%o5,%o3
291 b 9f
292 add %o2, (-3*2-1), %o2
293
294L.2.15:
295 ! remainder is negative
296 addcc %o3,%o5,%o3
297 ! depth 3, accumulated bits -3
298 bl L.3.13
299 srl %o5,1,%o5
300 ! remainder is positive
301 subcc %o3,%o5,%o3
302 ! depth 4, accumulated bits -5
303 bl L.4.11
304 srl %o5,1,%o5
305 ! remainder is positive
306 subcc %o3,%o5,%o3
307 b 9f
308 add %o2, (-5*2+1), %o2
309
310L.4.11:
311 ! remainder is negative
312 addcc %o3,%o5,%o3
313 b 9f
314 add %o2, (-5*2-1), %o2
315
316L.3.13:
317 ! remainder is negative
318 addcc %o3,%o5,%o3
319 ! depth 4, accumulated bits -7
320 bl L.4.9
321 srl %o5,1,%o5
322 ! remainder is positive
323 subcc %o3,%o5,%o3
324 b 9f
325 add %o2, (-7*2+1), %o2
326
327L.4.9:
328 ! remainder is negative
329 addcc %o3,%o5,%o3
330 b 9f
331 add %o2, (-7*2-1), %o2
332
333 9:
334Lend_regular_divide:
335 subcc %o4, 1, %o4
336 bge Ldivloop
337 tst %o3
338
339 bl,a Lgot_result
340 ! non-restoring fixup here (one instruction only!)
341 sub %o2, 1, %o2
342
343Lgot_result:
344
345 retl
346 mov %o2, %o0
347
348 .globl .udiv_patch
349.udiv_patch:
350 wr %g0, 0x0, %y
351 nop
352 nop
353 retl
354 udiv %o0, %o1, %o0
355 nop
diff --git a/arch/sparc/lib/udivdi3.S b/arch/sparc/lib/udivdi3.S
new file mode 100644
index 000000000000..b430f1f0ef62
--- /dev/null
+++ b/arch/sparc/lib/udivdi3.S
@@ -0,0 +1,258 @@
1/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
2
3This file is part of GNU CC.
4
5GNU CC is free software; you can redistribute it and/or modify
6it under the terms of the GNU General Public License as published by
7the Free Software Foundation; either version 2, or (at your option)
8any later version.
9
10GNU CC is distributed in the hope that it will be useful,
11but WITHOUT ANY WARRANTY; without even the implied warranty of
12MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13GNU General Public License for more details.
14
15You should have received a copy of the GNU General Public License
16along with GNU CC; see the file COPYING. If not, write to
17the Free Software Foundation, 59 Temple Place - Suite 330,
18Boston, MA 02111-1307, USA. */
19
20 .text
21 .align 4
22 .globl __udivdi3
23__udivdi3:
24 save %sp,-104,%sp
25 mov %i3,%o3
26 cmp %i2,0
27 bne .LL40
28 mov %i1,%i3
29 cmp %o3,%i0
30 bleu .LL41
31 mov %i3,%o1
32 ! Inlined udiv_qrnnd
33 mov 32,%g1
34 subcc %i0,%o3,%g0
351: bcs 5f
36 addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
37 sub %i0,%o3,%i0 ! this kills msb of n
38 addx %i0,%i0,%i0 ! so this cannot give carry
39 subcc %g1,1,%g1
402: bne 1b
41 subcc %i0,%o3,%g0
42 bcs 3f
43 addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
44 b 3f
45 sub %i0,%o3,%i0 ! this kills msb of n
464: sub %i0,%o3,%i0
475: addxcc %i0,%i0,%i0
48 bcc 2b
49 subcc %g1,1,%g1
50! Got carry from n. Subtract next step to cancel this carry.
51 bne 4b
52 addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
53 sub %i0,%o3,%i0
543: xnor %o1,0,%o1
55 ! End of inline udiv_qrnnd
56 b .LL45
57 mov 0,%o2
58.LL41:
59 cmp %o3,0
60 bne .LL77
61 mov %i0,%o2
62 mov 1,%o0
63 call .udiv,0
64 mov 0,%o1
65 mov %o0,%o3
66 mov %i0,%o2
67.LL77:
68 mov 0,%o4
69 ! Inlined udiv_qrnnd
70 mov 32,%g1
71 subcc %o4,%o3,%g0
721: bcs 5f
73 addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb
74 sub %o4,%o3,%o4 ! this kills msb of n
75 addx %o4,%o4,%o4 ! so this cannot give carry
76 subcc %g1,1,%g1
772: bne 1b
78 subcc %o4,%o3,%g0
79 bcs 3f
80 addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb
81 b 3f
82 sub %o4,%o3,%o4 ! this kills msb of n
834: sub %o4,%o3,%o4
845: addxcc %o4,%o4,%o4
85 bcc 2b
86 subcc %g1,1,%g1
87! Got carry from n. Subtract next step to cancel this carry.
88 bne 4b
89 addcc %o2,%o2,%o2 ! shift n1n0 and a 0-bit in lsb
90 sub %o4,%o3,%o4
913: xnor %o2,0,%o2
92 ! End of inline udiv_qrnnd
93 mov %o4,%i0
94 mov %i3,%o1
95 ! Inlined udiv_qrnnd
96 mov 32,%g1
97 subcc %i0,%o3,%g0
981: bcs 5f
99 addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
100 sub %i0,%o3,%i0 ! this kills msb of n
101 addx %i0,%i0,%i0 ! so this cannot give carry
102 subcc %g1,1,%g1
1032: bne 1b
104 subcc %i0,%o3,%g0
105 bcs 3f
106 addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
107 b 3f
108 sub %i0,%o3,%i0 ! this kills msb of n
1094: sub %i0,%o3,%i0
1105: addxcc %i0,%i0,%i0
111 bcc 2b
112 subcc %g1,1,%g1
113! Got carry from n. Subtract next step to cancel this carry.
114 bne 4b
115 addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
116 sub %i0,%o3,%i0
1173: xnor %o1,0,%o1
118 ! End of inline udiv_qrnnd
119 b .LL78
120 mov %o1,%l1
121.LL40:
122 cmp %i2,%i0
123 bleu .LL46
124 sethi %hi(65535),%o0
125 b .LL73
126 mov 0,%o1
127.LL46:
128 or %o0,%lo(65535),%o0
129 cmp %i2,%o0
130 bgu .LL53
131 mov %i2,%o1
132 cmp %i2,256
133 addx %g0,-1,%o0
134 b .LL59
135 and %o0,8,%o2
136.LL53:
137 sethi %hi(16777215),%o0
138 or %o0,%lo(16777215),%o0
139 cmp %o1,%o0
140 bgu .LL59
141 mov 24,%o2
142 mov 16,%o2
143.LL59:
144 srl %o1,%o2,%o1
145 sethi %hi(__clz_tab),%o0
146 or %o0,%lo(__clz_tab),%o0
147 ldub [%o1+%o0],%o0
148 add %o0,%o2,%o0
149 mov 32,%o1
150 subcc %o1,%o0,%o2
151 bne,a .LL67
152 mov 32,%o0
153 cmp %i0,%i2
154 bgu .LL69
155 cmp %i3,%o3
156 blu .LL73
157 mov 0,%o1
158.LL69:
159 b .LL73
160 mov 1,%o1
161.LL67:
162 sub %o0,%o2,%o0
163 sll %i2,%o2,%i2
164 srl %o3,%o0,%o1
165 or %i2,%o1,%i2
166 sll %o3,%o2,%o3
167 srl %i0,%o0,%o1
168 sll %i0,%o2,%i0
169 srl %i3,%o0,%o0
170 or %i0,%o0,%i0
171 sll %i3,%o2,%i3
172 mov %i0,%o5
173 mov %o1,%o4
174 ! Inlined udiv_qrnnd
175 mov 32,%g1
176 subcc %o4,%i2,%g0
1771: bcs 5f
178 addxcc %o5,%o5,%o5 ! shift n1n0 and a q-bit in lsb
179 sub %o4,%i2,%o4 ! this kills msb of n
180 addx %o4,%o4,%o4 ! so this cannot give carry
181 subcc %g1,1,%g1
1822: bne 1b
183 subcc %o4,%i2,%g0
184 bcs 3f
185 addxcc %o5,%o5,%o5 ! shift n1n0 and a q-bit in lsb
186 b 3f
187 sub %o4,%i2,%o4 ! this kills msb of n
1884: sub %o4,%i2,%o4
1895: addxcc %o4,%o4,%o4
190 bcc 2b
191 subcc %g1,1,%g1
192! Got carry from n. Subtract next step to cancel this carry.
193 bne 4b
194 addcc %o5,%o5,%o5 ! shift n1n0 and a 0-bit in lsb
195 sub %o4,%i2,%o4
1963: xnor %o5,0,%o5
197 ! End of inline udiv_qrnnd
198 mov %o4,%i0
199 mov %o5,%o1
200 ! Inlined umul_ppmm
201 wr %g0,%o1,%y ! SPARC has 0-3 delay insn after a wr
202 sra %o3,31,%g2 ! Do not move this insn
203 and %o1,%g2,%g2 ! Do not move this insn
204 andcc %g0,0,%g1 ! Do not move this insn
205 mulscc %g1,%o3,%g1
206 mulscc %g1,%o3,%g1
207 mulscc %g1,%o3,%g1
208 mulscc %g1,%o3,%g1
209 mulscc %g1,%o3,%g1
210 mulscc %g1,%o3,%g1
211 mulscc %g1,%o3,%g1
212 mulscc %g1,%o3,%g1
213 mulscc %g1,%o3,%g1
214 mulscc %g1,%o3,%g1
215 mulscc %g1,%o3,%g1
216 mulscc %g1,%o3,%g1
217 mulscc %g1,%o3,%g1
218 mulscc %g1,%o3,%g1
219 mulscc %g1,%o3,%g1
220 mulscc %g1,%o3,%g1
221 mulscc %g1,%o3,%g1
222 mulscc %g1,%o3,%g1
223 mulscc %g1,%o3,%g1
224 mulscc %g1,%o3,%g1
225 mulscc %g1,%o3,%g1
226 mulscc %g1,%o3,%g1
227 mulscc %g1,%o3,%g1
228 mulscc %g1,%o3,%g1
229 mulscc %g1,%o3,%g1
230 mulscc %g1,%o3,%g1
231 mulscc %g1,%o3,%g1
232 mulscc %g1,%o3,%g1
233 mulscc %g1,%o3,%g1
234 mulscc %g1,%o3,%g1
235 mulscc %g1,%o3,%g1
236 mulscc %g1,%o3,%g1
237 mulscc %g1,0,%g1
238 add %g1,%g2,%o0
239 rd %y,%o2
240 cmp %o0,%i0
241 bgu,a .LL73
242 add %o1,-1,%o1
243 bne,a .LL45
244 mov 0,%o2
245 cmp %o2,%i3
246 bleu .LL45
247 mov 0,%o2
248 add %o1,-1,%o1
249.LL73:
250 mov 0,%o2
251.LL45:
252 mov %o1,%l1
253.LL78:
254 mov %o2,%l0
255 mov %l0,%i0
256 mov %l1,%i1
257 ret
258 restore
diff --git a/arch/sparc/lib/umul.S b/arch/sparc/lib/umul.S
new file mode 100644
index 000000000000..a784720a8a22
--- /dev/null
+++ b/arch/sparc/lib/umul.S
@@ -0,0 +1,169 @@
1/* $Id: umul.S,v 1.4 1996/09/30 02:22:39 davem Exp $
2 * umul.S: This routine was taken from glibc-1.09 and is covered
3 * by the GNU Library General Public License Version 2.
4 */
5
6
7/*
8 * Unsigned multiply. Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the
9 * upper 32 bits of the 64-bit product).
10 *
11 * This code optimizes short (less than 13-bit) multiplies. Short
12 * multiplies require 25 instruction cycles, and long ones require
13 * 45 instruction cycles.
14 *
15 * On return, overflow has occurred (%o1 is not zero) if and only if
16 * the Z condition code is clear, allowing, e.g., the following:
17 *
18 * call .umul
19 * nop
20 * bnz overflow (or tnz)
21 */
22
23 .globl .umul
24.umul:
25 or %o0, %o1, %o4
26 mov %o0, %y ! multiplier -> Y
27
28 andncc %o4, 0xfff, %g0 ! test bits 12..31 of *both* args
29 be Lmul_shortway ! if zero, can do it the short way
30 andcc %g0, %g0, %o4 ! zero the partial product and clear N and V
31
32 /*
33 * Long multiply. 32 steps, followed by a final shift step.
34 */
35 mulscc %o4, %o1, %o4 ! 1
36 mulscc %o4, %o1, %o4 ! 2
37 mulscc %o4, %o1, %o4 ! 3
38 mulscc %o4, %o1, %o4 ! 4
39 mulscc %o4, %o1, %o4 ! 5
40 mulscc %o4, %o1, %o4 ! 6
41 mulscc %o4, %o1, %o4 ! 7
42 mulscc %o4, %o1, %o4 ! 8
43 mulscc %o4, %o1, %o4 ! 9
44 mulscc %o4, %o1, %o4 ! 10
45 mulscc %o4, %o1, %o4 ! 11
46 mulscc %o4, %o1, %o4 ! 12
47 mulscc %o4, %o1, %o4 ! 13
48 mulscc %o4, %o1, %o4 ! 14
49 mulscc %o4, %o1, %o4 ! 15
50 mulscc %o4, %o1, %o4 ! 16
51 mulscc %o4, %o1, %o4 ! 17
52 mulscc %o4, %o1, %o4 ! 18
53 mulscc %o4, %o1, %o4 ! 19
54 mulscc %o4, %o1, %o4 ! 20
55 mulscc %o4, %o1, %o4 ! 21
56 mulscc %o4, %o1, %o4 ! 22
57 mulscc %o4, %o1, %o4 ! 23
58 mulscc %o4, %o1, %o4 ! 24
59 mulscc %o4, %o1, %o4 ! 25
60 mulscc %o4, %o1, %o4 ! 26
61 mulscc %o4, %o1, %o4 ! 27
62 mulscc %o4, %o1, %o4 ! 28
63 mulscc %o4, %o1, %o4 ! 29
64 mulscc %o4, %o1, %o4 ! 30
65 mulscc %o4, %o1, %o4 ! 31
66 mulscc %o4, %o1, %o4 ! 32
67 mulscc %o4, %g0, %o4 ! final shift
68
69
70 /*
71 * Normally, with the shift-and-add approach, if both numbers are
72 * positive you get the correct result. With 32-bit two's-complement
73 * numbers, -x is represented as
74 *
75 * x 32
76 * ( 2 - ------ ) mod 2 * 2
77 * 32
78 * 2
79 *
80 * (the `mod 2' subtracts 1 from 1.bbbb). To avoid lots of 2^32s,
81 * we can treat this as if the radix point were just to the left
82 * of the sign bit (multiply by 2^32), and get
83 *
84 * -x = (2 - x) mod 2
85 *
86 * Then, ignoring the `mod 2's for convenience:
87 *
88 * x * y = xy
89 * -x * y = 2y - xy
90 * x * -y = 2x - xy
91 * -x * -y = 4 - 2x - 2y + xy
92 *
93 * For signed multiplies, we subtract (x << 32) from the partial
94 * product to fix this problem for negative multipliers (see mul.s).
95 * Because of the way the shift into the partial product is calculated
96 * (N xor V), this term is automatically removed for the multiplicand,
97 * so we don't have to adjust.
98 *
99 * But for unsigned multiplies, the high order bit wasn't a sign bit,
100 * and the correction is wrong. So for unsigned multiplies where the
101 * high order bit is one, we end up with xy - (y << 32). To fix it
102 * we add y << 32.
103 */
104#if 0
105 tst %o1
106 bl,a 1f ! if %o1 < 0 (high order bit = 1),
107 add %o4, %o0, %o4 ! %o4 += %o0 (add y to upper half)
108
1091:
110 rd %y, %o0 ! get lower half of product
111 retl
112 addcc %o4, %g0, %o1 ! put upper half in place and set Z for %o1==0
113#else
114 /* Faster code from tege@sics.se. */
115 sra %o1, 31, %o2 ! make mask from sign bit
116 and %o0, %o2, %o2 ! %o2 = 0 or %o0, depending on sign of %o1
117 rd %y, %o0 ! get lower half of product
118 retl
119 addcc %o4, %o2, %o1 ! add compensation and put upper half in place
120#endif
121
122Lmul_shortway:
123 /*
124 * Short multiply. 12 steps, followed by a final shift step.
125 * The resulting bits are off by 12 and (32-12) = 20 bit positions,
126 * but there is no problem with %o0 being negative (unlike above),
127 * and overflow is impossible (the answer is at most 24 bits long).
128 */
129 mulscc %o4, %o1, %o4 ! 1
130 mulscc %o4, %o1, %o4 ! 2
131 mulscc %o4, %o1, %o4 ! 3
132 mulscc %o4, %o1, %o4 ! 4
133 mulscc %o4, %o1, %o4 ! 5
134 mulscc %o4, %o1, %o4 ! 6
135 mulscc %o4, %o1, %o4 ! 7
136 mulscc %o4, %o1, %o4 ! 8
137 mulscc %o4, %o1, %o4 ! 9
138 mulscc %o4, %o1, %o4 ! 10
139 mulscc %o4, %o1, %o4 ! 11
140 mulscc %o4, %o1, %o4 ! 12
141 mulscc %o4, %g0, %o4 ! final shift
142
143 /*
144 * %o4 has 20 of the bits that should be in the result; %y has
145 * the bottom 12 (as %y's top 12). That is:
146 *
147 * %o4 %y
148 * +----------------+----------------+
149 * | -12- | -20- | -12- | -20- |
150 * +------(---------+------)---------+
151 * -----result-----
152 *
153 * The 12 bits of %o4 left of the `result' area are all zero;
154 * in fact, all top 20 bits of %o4 are zero.
155 */
156
157 rd %y, %o5
158 sll %o4, 12, %o0 ! shift middle bits left 12
159 srl %o5, 20, %o5 ! shift low bits right 20
160 or %o5, %o0, %o0
161 retl
162 addcc %g0, %g0, %o1 ! %o1 = zero, and set Z
163
164 .globl .umul_patch
165.umul_patch:
166 umul %o0, %o1, %o0
167 retl
168 rd %y, %o1
169 nop
diff --git a/arch/sparc/lib/urem.S b/arch/sparc/lib/urem.S
new file mode 100644
index 000000000000..ec7f0c502c56
--- /dev/null
+++ b/arch/sparc/lib/urem.S
@@ -0,0 +1,355 @@
1/* $Id: urem.S,v 1.4 1996/09/30 02:22:42 davem Exp $
2 * urem.S: This routine was taken from glibc-1.09 and is covered
3 * by the GNU Library General Public License Version 2.
4 */
5
6/* This file is generated from divrem.m4; DO NOT EDIT! */
7/*
8 * Division and remainder, from Appendix E of the Sparc Version 8
9 * Architecture Manual, with fixes from Gordon Irlam.
10 */
11
12/*
13 * Input: dividend and divisor in %o0 and %o1 respectively.
14 *
15 * m4 parameters:
16 * .urem name of function to generate
17 * rem rem=div => %o0 / %o1; rem=rem => %o0 % %o1
18 * false false=true => signed; false=false => unsigned
19 *
20 * Algorithm parameters:
21 * N how many bits per iteration we try to get (4)
22 * WORDSIZE total number of bits (32)
23 *
24 * Derived constants:
25 * TOPBITS number of bits in the top decade of a number
26 *
27 * Important variables:
28 * Q the partial quotient under development (initially 0)
29 * R the remainder so far, initially the dividend
30 * ITER number of main division loop iterations required;
31 * equal to ceil(log2(quotient) / N). Note that this
32 * is the log base (2^N) of the quotient.
33 * V the current comparand, initially divisor*2^(ITER*N-1)
34 *
35 * Cost:
36 * Current estimate for non-large dividend is
37 * ceil(log2(quotient) / N) * (10 + 7N/2) + C
38 * A large dividend is one greater than 2^(31-TOPBITS) and takes a
39 * different path, as the upper bits of the quotient must be developed
40 * one bit at a time.
41 */
42
43 .globl .urem
44.urem:
45
46 ! Ready to divide. Compute size of quotient; scale comparand.
47 orcc %o1, %g0, %o5
48 bne 1f
49 mov %o0, %o3
50
51 ! Divide by zero trap. If it returns, return 0 (about as
52 ! wrong as possible, but that is what SunOS does...).
53 ta ST_DIV0
54 retl
55 clr %o0
56
571:
58 cmp %o3, %o5 ! if %o1 exceeds %o0, done
59 blu Lgot_result ! (and algorithm fails otherwise)
60 clr %o2
61
62 sethi %hi(1 << (32 - 4 - 1)), %g1
63
64 cmp %o3, %g1
65 blu Lnot_really_big
66 clr %o4
67
68 ! Here the dividend is >= 2**(31-N) or so. We must be careful here,
69 ! as our usual N-at-a-shot divide step will cause overflow and havoc.
70 ! The number of bits in the result here is N*ITER+SC, where SC <= N.
71 ! Compute ITER in an unorthodox manner: know we need to shift V into
72 ! the top decade: so do not even bother to compare to R.
73 1:
74 cmp %o5, %g1
75 bgeu 3f
76 mov 1, %g7
77
78 sll %o5, 4, %o5
79
80 b 1b
81 add %o4, 1, %o4
82
83 ! Now compute %g7.
84 2:
85 addcc %o5, %o5, %o5
86 bcc Lnot_too_big
87 add %g7, 1, %g7
88
89 ! We get here if the %o1 overflowed while shifting.
90 ! This means that %o3 has the high-order bit set.
91 ! Restore %o5 and subtract from %o3.
92 sll %g1, 4, %g1 ! high order bit
93 srl %o5, 1, %o5 ! rest of %o5
94 add %o5, %g1, %o5
95
96 b Ldo_single_div
97 sub %g7, 1, %g7
98
99 Lnot_too_big:
100 3:
101 cmp %o5, %o3
102 blu 2b
103 nop
104
105 be Ldo_single_div
106 nop
107 /* NB: these are commented out in the V8-Sparc manual as well */
108 /* (I do not understand this) */
109 ! %o5 > %o3: went too far: back up 1 step
110 ! srl %o5, 1, %o5
111 ! dec %g7
112 ! do single-bit divide steps
113 !
114 ! We have to be careful here. We know that %o3 >= %o5, so we can do the
115 ! first divide step without thinking. BUT, the others are conditional,
116 ! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high-
117 ! order bit set in the first step, just falling into the regular
118 ! division loop will mess up the first time around.
119 ! So we unroll slightly...
120 Ldo_single_div:
121 subcc %g7, 1, %g7
122 bl Lend_regular_divide
123 nop
124
125 sub %o3, %o5, %o3
126 mov 1, %o2
127
128 b Lend_single_divloop
129 nop
130 Lsingle_divloop:
131 sll %o2, 1, %o2
132 bl 1f
133 srl %o5, 1, %o5
134 ! %o3 >= 0
135 sub %o3, %o5, %o3
136 b 2f
137 add %o2, 1, %o2
138 1: ! %o3 < 0
139 add %o3, %o5, %o3
140 sub %o2, 1, %o2
141 2:
142 Lend_single_divloop:
143 subcc %g7, 1, %g7
144 bge Lsingle_divloop
145 tst %o3
146
147 b,a Lend_regular_divide
148
149Lnot_really_big:
1501:
151 sll %o5, 4, %o5
152
153 cmp %o5, %o3
154 bleu 1b
155 addcc %o4, 1, %o4
156
157 be Lgot_result
158 sub %o4, 1, %o4
159
160 tst %o3 ! set up for initial iteration
161Ldivloop:
162 sll %o2, 4, %o2
163 ! depth 1, accumulated bits 0
164 bl L.1.16
165 srl %o5,1,%o5
166 ! remainder is positive
167 subcc %o3,%o5,%o3
168 ! depth 2, accumulated bits 1
169 bl L.2.17
170 srl %o5,1,%o5
171 ! remainder is positive
172 subcc %o3,%o5,%o3
173 ! depth 3, accumulated bits 3
174 bl L.3.19
175 srl %o5,1,%o5
176 ! remainder is positive
177 subcc %o3,%o5,%o3
178 ! depth 4, accumulated bits 7
179 bl L.4.23
180 srl %o5,1,%o5
181 ! remainder is positive
182 subcc %o3,%o5,%o3
183 b 9f
184 add %o2, (7*2+1), %o2
185
186L.4.23:
187 ! remainder is negative
188 addcc %o3,%o5,%o3
189 b 9f
190 add %o2, (7*2-1), %o2
191
192L.3.19:
193 ! remainder is negative
194 addcc %o3,%o5,%o3
195 ! depth 4, accumulated bits 5
196 bl L.4.21
197 srl %o5,1,%o5
198 ! remainder is positive
199 subcc %o3,%o5,%o3
200 b 9f
201 add %o2, (5*2+1), %o2
202
203L.4.21:
204 ! remainder is negative
205 addcc %o3,%o5,%o3
206 b 9f
207 add %o2, (5*2-1), %o2
208
209L.2.17:
210 ! remainder is negative
211 addcc %o3,%o5,%o3
212 ! depth 3, accumulated bits 1
213 bl L.3.17
214 srl %o5,1,%o5
215 ! remainder is positive
216 subcc %o3,%o5,%o3
217 ! depth 4, accumulated bits 3
218 bl L.4.19
219 srl %o5,1,%o5
220 ! remainder is positive
221 subcc %o3,%o5,%o3
222 b 9f
223 add %o2, (3*2+1), %o2
224
225L.4.19:
226 ! remainder is negative
227 addcc %o3,%o5,%o3
228 b 9f
229 add %o2, (3*2-1), %o2
230
231L.3.17:
232 ! remainder is negative
233 addcc %o3,%o5,%o3
234 ! depth 4, accumulated bits 1
235 bl L.4.17
236 srl %o5,1,%o5
237 ! remainder is positive
238 subcc %o3,%o5,%o3
239 b 9f
240 add %o2, (1*2+1), %o2
241
242L.4.17:
243 ! remainder is negative
244 addcc %o3,%o5,%o3
245 b 9f
246 add %o2, (1*2-1), %o2
247
248L.1.16:
249 ! remainder is negative
250 addcc %o3,%o5,%o3
251 ! depth 2, accumulated bits -1
252 bl L.2.15
253 srl %o5,1,%o5
254 ! remainder is positive
255 subcc %o3,%o5,%o3
256 ! depth 3, accumulated bits -1
257 bl L.3.15
258 srl %o5,1,%o5
259 ! remainder is positive
260 subcc %o3,%o5,%o3
261 ! depth 4, accumulated bits -1
262 bl L.4.15
263 srl %o5,1,%o5
264 ! remainder is positive
265 subcc %o3,%o5,%o3
266 b 9f
267 add %o2, (-1*2+1), %o2
268
269L.4.15:
270 ! remainder is negative
271 addcc %o3,%o5,%o3
272 b 9f
273 add %o2, (-1*2-1), %o2
274
275L.3.15:
276 ! remainder is negative
277 addcc %o3,%o5,%o3
278 ! depth 4, accumulated bits -3
279 bl L.4.13
280 srl %o5,1,%o5
281 ! remainder is positive
282 subcc %o3,%o5,%o3
283 b 9f
284 add %o2, (-3*2+1), %o2
285
286L.4.13:
287 ! remainder is negative
288 addcc %o3,%o5,%o3
289 b 9f
290 add %o2, (-3*2-1), %o2
291
292L.2.15:
293 ! remainder is negative
294 addcc %o3,%o5,%o3
295 ! depth 3, accumulated bits -3
296 bl L.3.13
297 srl %o5,1,%o5
298 ! remainder is positive
299 subcc %o3,%o5,%o3
300 ! depth 4, accumulated bits -5
301 bl L.4.11
302 srl %o5,1,%o5
303 ! remainder is positive
304 subcc %o3,%o5,%o3
305 b 9f
306 add %o2, (-5*2+1), %o2
307
308L.4.11:
309 ! remainder is negative
310 addcc %o3,%o5,%o3
311 b 9f
312 add %o2, (-5*2-1), %o2
313
314L.3.13:
315 ! remainder is negative
316 addcc %o3,%o5,%o3
317 ! depth 4, accumulated bits -7
318 bl L.4.9
319 srl %o5,1,%o5
320 ! remainder is positive
321 subcc %o3,%o5,%o3
322 b 9f
323 add %o2, (-7*2+1), %o2
324
325L.4.9:
326 ! remainder is negative
327 addcc %o3,%o5,%o3
328 b 9f
329 add %o2, (-7*2-1), %o2
330
331 9:
332Lend_regular_divide:
333 subcc %o4, 1, %o4
334 bge Ldivloop
335 tst %o3
336
337 bl,a Lgot_result
338 ! non-restoring fixup here (one instruction only!)
339 add %o3, %o1, %o3
340
341Lgot_result:
342
343 retl
344 mov %o3, %o0
345
346 .globl .urem_patch
347.urem_patch:
348 wr %g0, 0x0, %y
349 nop
350 nop
351 nop
352 udiv %o0, %o1, %o2
353 umul %o2, %o1, %o2
354 retl
355 sub %o0, %o2, %o0
diff --git a/arch/sparc/math-emu/Makefile b/arch/sparc/math-emu/Makefile
new file mode 100644
index 000000000000..f84a9a6162be
--- /dev/null
+++ b/arch/sparc/math-emu/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the FPU instruction emulation.
3#
4
5obj-y := math.o
6
7EXTRA_AFLAGS := -ansi
8EXTRA_CFLAGS = -I. -I$(TOPDIR)/include/math-emu -w
diff --git a/arch/sparc/math-emu/ashldi3.S b/arch/sparc/math-emu/ashldi3.S
new file mode 100644
index 000000000000..eab1d097296a
--- /dev/null
+++ b/arch/sparc/math-emu/ashldi3.S
@@ -0,0 +1,36 @@
1/* $Id: ashldi3.S,v 1.1 1998/04/06 16:09:28 jj Exp $
2 * ashldi3.S: Math-emu code creates all kinds of references to
3 * this little routine on the sparc with gcc.
4 *
5 * Copyright (C) 1998 Jakub Jelinek(jj@ultra.linux.cz)
6 */
7
8#include <asm/cprefix.h>
9
10 .globl C_LABEL(__ashldi3)
11C_LABEL(__ashldi3):
12 tst %o2
13 be 3f
14 mov 32, %g2
15
16 sub %g2, %o2, %g2
17
18 tst %g2
19 bg 1f
20 srl %o1, %g2, %g3
21
22 clr %o5
23 neg %g2
24 ba 2f
25 sll %o1, %g2, %o4
26
271:
28 sll %o1, %o2, %o5
29 srl %o0, %o2, %g2
30 or %g2, %g3, %o4
312:
32 mov %o4, %o0
33 mov %o5, %o1
343:
35 jmpl %o7 + 8, %g0
36 nop
diff --git a/arch/sparc/math-emu/math.c b/arch/sparc/math-emu/math.c
new file mode 100644
index 000000000000..be2c80932e26
--- /dev/null
+++ b/arch/sparc/math-emu/math.c
@@ -0,0 +1,521 @@
1/*
2 * arch/sparc/math-emu/math.c
3 *
4 * Copyright (C) 1998 Peter Maydell (pmaydell@chiark.greenend.org.uk)
5 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
6 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
7 *
8 * This is a good place to start if you're trying to understand the
9 * emulation code, because it's pretty simple. What we do is
10 * essentially analyse the instruction to work out what the operation
11 * is and which registers are involved. We then execute the appropriate
12 * FXXXX function. [The floating point queue introduces a minor wrinkle;
13 * see below...]
14 * The fxxxxx.c files each emulate a single insn. They look relatively
15 * simple because the complexity is hidden away in an unholy tangle
16 * of preprocessor macros.
17 *
18 * The first layer of macros is single.h, double.h, quad.h. Generally
19 * these files define macros for working with floating point numbers
20 * of the three IEEE formats. FP_ADD_D(R,A,B) is for adding doubles,
21 * for instance. These macros are usually defined as calls to more
22 * generic macros (in this case _FP_ADD(D,2,R,X,Y) where the number
23 * of machine words required to store the given IEEE format is passed
24 * as a parameter. [double.h and co check the number of bits in a word
25 * and define FP_ADD_D & co appropriately].
26 * The generic macros are defined in op-common.h. This is where all
27 * the grotty stuff like handling NaNs is coded. To handle the possible
28 * word sizes macros in op-common.h use macros like _FP_FRAC_SLL_##wc()
29 * where wc is the 'number of machine words' parameter (here 2).
30 * These are defined in the third layer of macros: op-1.h, op-2.h
31 * and op-4.h. These handle operations on floating point numbers composed
32 * of 1,2 and 4 machine words respectively. [For example, on sparc64
33 * doubles are one machine word so macros in double.h eventually use
34 * constructs in op-1.h, but on sparc32 they use op-2.h definitions.]
35 * soft-fp.h is on the same level as op-common.h, and defines some
36 * macros which are independent of both word size and FP format.
37 * Finally, sfp-machine.h is the machine dependent part of the
38 * code: it defines the word size and what type a word is. It also
39 * defines how _FP_MUL_MEAT_t() maps to _FP_MUL_MEAT_n_* : op-n.h
40 * provide several possible flavours of multiply algorithm, most
41 * of which require that you supply some form of asm or C primitive to
42 * do the actual multiply. (such asm primitives should be defined
43 * in sfp-machine.h too). udivmodti4.c is the same sort of thing.
44 *
45 * There may be some errors here because I'm working from a
46 * SPARC architecture manual V9, and what I really want is V8...
47 * Also, the insns which can generate exceptions seem to be a
48 * greater subset of the FPops than for V9 (for example, FCMPED
49 * has to be emulated on V8). So I think I'm going to have
50 * to emulate them all just to be on the safe side...
51 *
52 * Emulation routines originate from soft-fp package, which is
53 * part of glibc and has appropriate copyrights in it (allegedly).
54 *
55 * NB: on sparc int == long == 4 bytes, long long == 8 bytes.
56 * Most bits of the kernel seem to go for long rather than int,
57 * so we follow that practice...
58 */
59
60/* TODO:
61 * fpsave() saves the FP queue but fpload() doesn't reload it.
62 * Therefore when we context switch or change FPU ownership
63 * we have to check to see if the queue had anything in it and
64 * emulate it if it did. This is going to be a pain.
65 */
66
67#include <linux/types.h>
68#include <linux/sched.h>
69#include <linux/mm.h>
70#include <asm/uaccess.h>
71
72#include "sfp-util.h"
73#include <math-emu/soft-fp.h>
74#include <math-emu/single.h>
75#include <math-emu/double.h>
76#include <math-emu/quad.h>
77
78#define FLOATFUNC(x) extern int x(void *,void *,void *)
79
80/* The Vn labels indicate what version of the SPARC architecture gas thinks
81 * each insn is. This is from the binutils source :->
82 */
83/* quadword instructions */
84#define FSQRTQ 0x02b /* v8 */
85#define FADDQ 0x043 /* v8 */
86#define FSUBQ 0x047 /* v8 */
87#define FMULQ 0x04b /* v8 */
88#define FDIVQ 0x04f /* v8 */
89#define FDMULQ 0x06e /* v8 */
90#define FQTOS 0x0c7 /* v8 */
91#define FQTOD 0x0cb /* v8 */
92#define FITOQ 0x0cc /* v8 */
93#define FSTOQ 0x0cd /* v8 */
94#define FDTOQ 0x0ce /* v8 */
95#define FQTOI 0x0d3 /* v8 */
96#define FCMPQ 0x053 /* v8 */
97#define FCMPEQ 0x057 /* v8 */
98/* single/double instructions (subnormal): should all work */
99#define FSQRTS 0x029 /* v7 */
100#define FSQRTD 0x02a /* v7 */
101#define FADDS 0x041 /* v6 */
102#define FADDD 0x042 /* v6 */
103#define FSUBS 0x045 /* v6 */
104#define FSUBD 0x046 /* v6 */
105#define FMULS 0x049 /* v6 */
106#define FMULD 0x04a /* v6 */
107#define FDIVS 0x04d /* v6 */
108#define FDIVD 0x04e /* v6 */
109#define FSMULD 0x069 /* v6 */
110#define FDTOS 0x0c6 /* v6 */
111#define FSTOD 0x0c9 /* v6 */
112#define FSTOI 0x0d1 /* v6 */
113#define FDTOI 0x0d2 /* v6 */
114#define FABSS 0x009 /* v6 */
115#define FCMPS 0x051 /* v6 */
116#define FCMPES 0x055 /* v6 */
117#define FCMPD 0x052 /* v6 */
118#define FCMPED 0x056 /* v6 */
119#define FMOVS 0x001 /* v6 */
120#define FNEGS 0x005 /* v6 */
121#define FITOS 0x0c4 /* v6 */
122#define FITOD 0x0c8 /* v6 */
123
124#define FSR_TEM_SHIFT 23UL
125#define FSR_TEM_MASK (0x1fUL << FSR_TEM_SHIFT)
126#define FSR_AEXC_SHIFT 5UL
127#define FSR_AEXC_MASK (0x1fUL << FSR_AEXC_SHIFT)
128#define FSR_CEXC_SHIFT 0UL
129#define FSR_CEXC_MASK (0x1fUL << FSR_CEXC_SHIFT)
130
131static int do_one_mathemu(u32 insn, unsigned long *fsr, unsigned long *fregs);
132
133/* Unlike the Sparc64 version (which has a struct fpustate), we
134 * pass the taskstruct corresponding to the task which currently owns the
135 * FPU. This is partly because we don't have the fpustate struct and
136 * partly because the task owning the FPU isn't always current (as is
137 * the case for the Sparc64 port). This is probably SMP-related...
138 * This function returns 1 if all queued insns were emulated successfully.
139 * The test for unimplemented FPop in kernel mode has been moved into
140 * kernel/traps.c for simplicity.
141 */
142int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
143{
144 /* regs->pc isn't necessarily the PC at which the offending insn is sitting.
145 * The FPU maintains a queue of FPops which cause traps.
146 * When it hits an instruction that requires that the trapped op succeeded
147 * (usually because it reads a reg. that the trapped op wrote) then it
148 * causes this exception. We need to emulate all the insns on the queue
149 * and then allow the op to proceed.
150 * This code should also handle the case where the trap was precise,
151 * in which case the queue length is zero and regs->pc points at the
152 * single FPop to be emulated. (this case is untested, though :->)
153 * You'll need this case if you want to be able to emulate all FPops
154 * because the FPU either doesn't exist or has been software-disabled.
155 * [The UltraSPARC makes FP a precise trap; this isn't as stupid as it
156 * might sound because the Ultra does funky things with a superscalar
157 * architecture.]
158 */
159
160 /* You wouldn't believe how often I typed 'ftp' when I meant 'fpt' :-> */
161
162 int i;
163 int retcode = 0; /* assume all succeed */
164 unsigned long insn;
165
166#ifdef DEBUG_MATHEMU
167 printk("In do_mathemu()... pc is %08lx\n", regs->pc);
168 printk("fpqdepth is %ld\n", fpt->thread.fpqdepth);
169 for (i = 0; i < fpt->thread.fpqdepth; i++)
170 printk("%d: %08lx at %08lx\n", i, fpt->thread.fpqueue[i].insn,
171 (unsigned long)fpt->thread.fpqueue[i].insn_addr);
172#endif
173
174 if (fpt->thread.fpqdepth == 0) { /* no queue, guilty insn is at regs->pc */
175#ifdef DEBUG_MATHEMU
176 printk("precise trap at %08lx\n", regs->pc);
177#endif
178 if (!get_user(insn, (u32 __user *) regs->pc)) {
179 retcode = do_one_mathemu(insn, &fpt->thread.fsr, fpt->thread.float_regs);
180 if (retcode) {
181 /* in this case we need to fix up PC & nPC */
182 regs->pc = regs->npc;
183 regs->npc += 4;
184 }
185 }
186 return retcode;
187 }
188
189 /* Normal case: need to empty the queue... */
190 for (i = 0; i < fpt->thread.fpqdepth; i++) {
191 retcode = do_one_mathemu(fpt->thread.fpqueue[i].insn, &(fpt->thread.fsr), fpt->thread.float_regs);
192 if (!retcode) /* insn failed, no point doing any more */
193 break;
194 }
195 /* Now empty the queue and clear the queue_not_empty flag */
196 if (retcode)
197 fpt->thread.fsr &= ~(0x3000 | FSR_CEXC_MASK);
198 else
199 fpt->thread.fsr &= ~0x3000;
200 fpt->thread.fpqdepth = 0;
201
202 return retcode;
203}
204
205/* All routines returning an exception to raise should detect
206 * such exceptions _before_ rounding to be consistent with
207 * the behavior of the hardware in the implemented cases
208 * (and thus with the recommendations in the V9 architecture
209 * manual).
210 *
211 * We return 0 if a SIGFPE should be sent, 1 otherwise.
212 */
213static inline int record_exception(unsigned long *pfsr, int eflag)
214{
215 unsigned long fsr = *pfsr;
216 int would_trap;
217
218 /* Determine if this exception would have generated a trap. */
219 would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL;
220
221 /* If trapping, we only want to signal one bit. */
222 if (would_trap != 0) {
223 eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT);
224 if ((eflag & (eflag - 1)) != 0) {
225 if (eflag & FP_EX_INVALID)
226 eflag = FP_EX_INVALID;
227 else if (eflag & FP_EX_OVERFLOW)
228 eflag = FP_EX_OVERFLOW;
229 else if (eflag & FP_EX_UNDERFLOW)
230 eflag = FP_EX_UNDERFLOW;
231 else if (eflag & FP_EX_DIVZERO)
232 eflag = FP_EX_DIVZERO;
233 else if (eflag & FP_EX_INEXACT)
234 eflag = FP_EX_INEXACT;
235 }
236 }
237
238 /* Set CEXC, here is the rule:
239 *
240 * In general all FPU ops will set one and only one
241 * bit in the CEXC field, this is always the case
242 * when the IEEE exception trap is enabled in TEM.
243 */
244 fsr &= ~(FSR_CEXC_MASK);
245 fsr |= ((long)eflag << FSR_CEXC_SHIFT);
246
247 /* Set the AEXC field, rule is:
248 *
249 * If a trap would not be generated, the
250 * CEXC just generated is OR'd into the
251 * existing value of AEXC.
252 */
253 if (would_trap == 0)
254 fsr |= ((long)eflag << FSR_AEXC_SHIFT);
255
256 /* If trapping, indicate fault trap type IEEE. */
257 if (would_trap != 0)
258 fsr |= (1UL << 14);
259
260 *pfsr = fsr;
261
262 return (would_trap ? 0 : 1);
263}
264
265typedef union {
266 u32 s;
267 u64 d;
268 u64 q[2];
269} *argp;
270
271static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
272{
273 /* Emulate the given insn, updating fsr and fregs appropriately. */
274 int type = 0;
275 /* r is rd, b is rs2 and a is rs1. The *u arg tells
276 whether the argument should be packed/unpacked (0 - do not unpack/pack, 1 - unpack/pack)
277 non-u args tells the size of the argument (0 - no argument, 1 - single, 2 - double, 3 - quad */
278#define TYPE(dummy, r, ru, b, bu, a, au) type = (au << 2) | (a << 0) | (bu << 5) | (b << 3) | (ru << 8) | (r << 6)
279 int freg;
280 argp rs1 = NULL, rs2 = NULL, rd = NULL;
281 FP_DECL_EX;
282 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
283 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
284 FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
285 int IR;
286 long fsr;
287
288#ifdef DEBUG_MATHEMU
289 printk("In do_mathemu(), emulating %08lx\n", insn);
290#endif
291
292 if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
293 switch ((insn >> 5) & 0x1ff) {
294 case FSQRTQ: TYPE(3,3,1,3,1,0,0); break;
295 case FADDQ:
296 case FSUBQ:
297 case FMULQ:
298 case FDIVQ: TYPE(3,3,1,3,1,3,1); break;
299 case FDMULQ: TYPE(3,3,1,2,1,2,1); break;
300 case FQTOS: TYPE(3,1,1,3,1,0,0); break;
301 case FQTOD: TYPE(3,2,1,3,1,0,0); break;
302 case FITOQ: TYPE(3,3,1,1,0,0,0); break;
303 case FSTOQ: TYPE(3,3,1,1,1,0,0); break;
304 case FDTOQ: TYPE(3,3,1,2,1,0,0); break;
305 case FQTOI: TYPE(3,1,0,3,1,0,0); break;
306 case FSQRTS: TYPE(2,1,1,1,1,0,0); break;
307 case FSQRTD: TYPE(2,2,1,2,1,0,0); break;
308 case FADDD:
309 case FSUBD:
310 case FMULD:
311 case FDIVD: TYPE(2,2,1,2,1,2,1); break;
312 case FADDS:
313 case FSUBS:
314 case FMULS:
315 case FDIVS: TYPE(2,1,1,1,1,1,1); break;
316 case FSMULD: TYPE(2,2,1,1,1,1,1); break;
317 case FDTOS: TYPE(2,1,1,2,1,0,0); break;
318 case FSTOD: TYPE(2,2,1,1,1,0,0); break;
319 case FSTOI: TYPE(2,1,0,1,1,0,0); break;
320 case FDTOI: TYPE(2,1,0,2,1,0,0); break;
321 case FITOS: TYPE(2,1,1,1,0,0,0); break;
322 case FITOD: TYPE(2,2,1,1,0,0,0); break;
323 case FMOVS:
324 case FABSS:
325 case FNEGS: TYPE(2,1,0,1,0,0,0); break;
326 default:
327#ifdef DEBUG_MATHEMU
328 printk("unknown FPop1: %03lx\n",(insn>>5)&0x1ff);
329#endif
330 break;
331 }
332 } else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
333 switch ((insn >> 5) & 0x1ff) {
334 case FCMPS: TYPE(3,0,0,1,1,1,1); break;
335 case FCMPES: TYPE(3,0,0,1,1,1,1); break;
336 case FCMPD: TYPE(3,0,0,2,1,2,1); break;
337 case FCMPED: TYPE(3,0,0,2,1,2,1); break;
338 case FCMPQ: TYPE(3,0,0,3,1,3,1); break;
339 case FCMPEQ: TYPE(3,0,0,3,1,3,1); break;
340 default:
341#ifdef DEBUG_MATHEMU
342 printk("unknown FPop2: %03lx\n",(insn>>5)&0x1ff);
343#endif
344 break;
345 }
346 }
347
348 if (!type) { /* oops, didn't recognise that FPop */
349#ifdef DEBUG_MATHEMU
350 printk("attempt to emulate unrecognised FPop!\n");
351#endif
352 return 0;
353 }
354
355 /* Decode the registers to be used */
356 freg = (*pfsr >> 14) & 0xf;
357
358 *pfsr &= ~0x1c000; /* clear the traptype bits */
359
360 freg = ((insn >> 14) & 0x1f);
361 switch (type & 0x3) { /* is rs1 single, double or quad? */
362 case 3:
363 if (freg & 3) { /* quadwords must have bits 4&5 of the */
364 /* encoded reg. number set to zero. */
365 *pfsr |= (6 << 14);
366 return 0; /* simulate invalid_fp_register exception */
367 }
368 /* fall through */
369 case 2:
370 if (freg & 1) { /* doublewords must have bit 5 zeroed */
371 *pfsr |= (6 << 14);
372 return 0;
373 }
374 }
375 rs1 = (argp)&fregs[freg];
376 switch (type & 0x7) {
377 case 7: FP_UNPACK_QP (QA, rs1); break;
378 case 6: FP_UNPACK_DP (DA, rs1); break;
379 case 5: FP_UNPACK_SP (SA, rs1); break;
380 }
381 freg = (insn & 0x1f);
382 switch ((type >> 3) & 0x3) { /* same again for rs2 */
383 case 3:
384 if (freg & 3) { /* quadwords must have bits 4&5 of the */
385 /* encoded reg. number set to zero. */
386 *pfsr |= (6 << 14);
387 return 0; /* simulate invalid_fp_register exception */
388 }
389 /* fall through */
390 case 2:
391 if (freg & 1) { /* doublewords must have bit 5 zeroed */
392 *pfsr |= (6 << 14);
393 return 0;
394 }
395 }
396 rs2 = (argp)&fregs[freg];
397 switch ((type >> 3) & 0x7) {
398 case 7: FP_UNPACK_QP (QB, rs2); break;
399 case 6: FP_UNPACK_DP (DB, rs2); break;
400 case 5: FP_UNPACK_SP (SB, rs2); break;
401 }
402 freg = ((insn >> 25) & 0x1f);
403 switch ((type >> 6) & 0x3) { /* and finally rd. This one's a bit different */
404 case 0: /* dest is fcc. (this must be FCMPQ or FCMPEQ) */
405 if (freg) { /* V8 has only one set of condition codes, so */
406 /* anything but 0 in the rd field is an error */
407 *pfsr |= (6 << 14); /* (should probably flag as invalid opcode */
408 return 0; /* but SIGFPE will do :-> ) */
409 }
410 break;
411 case 3:
412 if (freg & 3) { /* quadwords must have bits 4&5 of the */
413 /* encoded reg. number set to zero. */
414 *pfsr |= (6 << 14);
415 return 0; /* simulate invalid_fp_register exception */
416 }
417 /* fall through */
418 case 2:
419 if (freg & 1) { /* doublewords must have bit 5 zeroed */
420 *pfsr |= (6 << 14);
421 return 0;
422 }
423 /* fall through */
424 case 1:
425 rd = (void *)&fregs[freg];
426 break;
427 }
428#ifdef DEBUG_MATHEMU
429 printk("executing insn...\n");
430#endif
431 /* do the Right Thing */
432 switch ((insn >> 5) & 0x1ff) {
433 /* + */
434 case FADDS: FP_ADD_S (SR, SA, SB); break;
435 case FADDD: FP_ADD_D (DR, DA, DB); break;
436 case FADDQ: FP_ADD_Q (QR, QA, QB); break;
437 /* - */
438 case FSUBS: FP_SUB_S (SR, SA, SB); break;
439 case FSUBD: FP_SUB_D (DR, DA, DB); break;
440 case FSUBQ: FP_SUB_Q (QR, QA, QB); break;
441 /* * */
442 case FMULS: FP_MUL_S (SR, SA, SB); break;
443 case FSMULD: FP_CONV (D, S, 2, 1, DA, SA);
444 FP_CONV (D, S, 2, 1, DB, SB);
445 case FMULD: FP_MUL_D (DR, DA, DB); break;
446 case FDMULQ: FP_CONV (Q, D, 4, 2, QA, DA);
447 FP_CONV (Q, D, 4, 2, QB, DB);
448 case FMULQ: FP_MUL_Q (QR, QA, QB); break;
449 /* / */
450 case FDIVS: FP_DIV_S (SR, SA, SB); break;
451 case FDIVD: FP_DIV_D (DR, DA, DB); break;
452 case FDIVQ: FP_DIV_Q (QR, QA, QB); break;
453 /* sqrt */
454 case FSQRTS: FP_SQRT_S (SR, SB); break;
455 case FSQRTD: FP_SQRT_D (DR, DB); break;
456 case FSQRTQ: FP_SQRT_Q (QR, QB); break;
457 /* mov */
458 case FMOVS: rd->s = rs2->s; break;
459 case FABSS: rd->s = rs2->s & 0x7fffffff; break;
460 case FNEGS: rd->s = rs2->s ^ 0x80000000; break;
461 /* float to int */
462 case FSTOI: FP_TO_INT_S (IR, SB, 32, 1); break;
463 case FDTOI: FP_TO_INT_D (IR, DB, 32, 1); break;
464 case FQTOI: FP_TO_INT_Q (IR, QB, 32, 1); break;
465 /* int to float */
466 case FITOS: IR = rs2->s; FP_FROM_INT_S (SR, IR, 32, int); break;
467 case FITOD: IR = rs2->s; FP_FROM_INT_D (DR, IR, 32, int); break;
468 case FITOQ: IR = rs2->s; FP_FROM_INT_Q (QR, IR, 32, int); break;
469 /* float to float */
470 case FSTOD: FP_CONV (D, S, 2, 1, DR, SB); break;
471 case FSTOQ: FP_CONV (Q, S, 4, 1, QR, SB); break;
472 case FDTOQ: FP_CONV (Q, D, 4, 2, QR, DB); break;
473 case FDTOS: FP_CONV (S, D, 1, 2, SR, DB); break;
474 case FQTOS: FP_CONV (S, Q, 1, 4, SR, QB); break;
475 case FQTOD: FP_CONV (D, Q, 2, 4, DR, QB); break;
476 /* comparison */
477 case FCMPS:
478 case FCMPES:
479 FP_CMP_S(IR, SB, SA, 3);
480 if (IR == 3 &&
481 (((insn >> 5) & 0x1ff) == FCMPES ||
482 FP_ISSIGNAN_S(SA) ||
483 FP_ISSIGNAN_S(SB)))
484 FP_SET_EXCEPTION (FP_EX_INVALID);
485 break;
486 case FCMPD:
487 case FCMPED:
488 FP_CMP_D(IR, DB, DA, 3);
489 if (IR == 3 &&
490 (((insn >> 5) & 0x1ff) == FCMPED ||
491 FP_ISSIGNAN_D(DA) ||
492 FP_ISSIGNAN_D(DB)))
493 FP_SET_EXCEPTION (FP_EX_INVALID);
494 break;
495 case FCMPQ:
496 case FCMPEQ:
497 FP_CMP_Q(IR, QB, QA, 3);
498 if (IR == 3 &&
499 (((insn >> 5) & 0x1ff) == FCMPEQ ||
500 FP_ISSIGNAN_Q(QA) ||
501 FP_ISSIGNAN_Q(QB)))
502 FP_SET_EXCEPTION (FP_EX_INVALID);
503 }
504 if (!FP_INHIBIT_RESULTS) {
505 switch ((type >> 6) & 0x7) {
506 case 0: fsr = *pfsr;
507 if (IR == -1) IR = 2;
508 /* fcc is always fcc0 */
509 fsr &= ~0xc00; fsr |= (IR << 10); break;
510 *pfsr = fsr;
511 break;
512 case 1: rd->s = IR; break;
513 case 5: FP_PACK_SP (rd, SR); break;
514 case 6: FP_PACK_DP (rd, DR); break;
515 case 7: FP_PACK_QP (rd, QR); break;
516 }
517 }
518 if (_fex == 0)
519 return 1; /* success! */
520 return record_exception(pfsr, _fex);
521}
diff --git a/arch/sparc/math-emu/sfp-util.h b/arch/sparc/math-emu/sfp-util.h
new file mode 100644
index 000000000000..d1b2aff3c259
--- /dev/null
+++ b/arch/sparc/math-emu/sfp-util.h
@@ -0,0 +1,115 @@
1#include <linux/kernel.h>
2#include <linux/sched.h>
3#include <linux/types.h>
4#include <asm/byteorder.h>
5
6#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
7 __asm__ ("addcc %r4,%5,%1\n\t" \
8 "addx %r2,%3,%0\n" \
9 : "=r" ((USItype)(sh)), \
10 "=&r" ((USItype)(sl)) \
11 : "%rJ" ((USItype)(ah)), \
12 "rI" ((USItype)(bh)), \
13 "%rJ" ((USItype)(al)), \
14 "rI" ((USItype)(bl)) \
15 : "cc")
16#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
17 __asm__ ("subcc %r4,%5,%1\n\t" \
18 "subx %r2,%3,%0\n" \
19 : "=r" ((USItype)(sh)), \
20 "=&r" ((USItype)(sl)) \
21 : "rJ" ((USItype)(ah)), \
22 "rI" ((USItype)(bh)), \
23 "rJ" ((USItype)(al)), \
24 "rI" ((USItype)(bl)) \
25 : "cc")
26
27#define umul_ppmm(w1, w0, u, v) \
28 __asm__ ("! Inlined umul_ppmm\n\t" \
29 "wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n\t" \
30 "sra %3,31,%%g2 ! Don't move this insn\n\t" \
31 "and %2,%%g2,%%g2 ! Don't move this insn\n\t" \
32 "andcc %%g0,0,%%g1 ! Don't move this insn\n\t" \
33 "mulscc %%g1,%3,%%g1\n\t" \
34 "mulscc %%g1,%3,%%g1\n\t" \
35 "mulscc %%g1,%3,%%g1\n\t" \
36 "mulscc %%g1,%3,%%g1\n\t" \
37 "mulscc %%g1,%3,%%g1\n\t" \
38 "mulscc %%g1,%3,%%g1\n\t" \
39 "mulscc %%g1,%3,%%g1\n\t" \
40 "mulscc %%g1,%3,%%g1\n\t" \
41 "mulscc %%g1,%3,%%g1\n\t" \
42 "mulscc %%g1,%3,%%g1\n\t" \
43 "mulscc %%g1,%3,%%g1\n\t" \
44 "mulscc %%g1,%3,%%g1\n\t" \
45 "mulscc %%g1,%3,%%g1\n\t" \
46 "mulscc %%g1,%3,%%g1\n\t" \
47 "mulscc %%g1,%3,%%g1\n\t" \
48 "mulscc %%g1,%3,%%g1\n\t" \
49 "mulscc %%g1,%3,%%g1\n\t" \
50 "mulscc %%g1,%3,%%g1\n\t" \
51 "mulscc %%g1,%3,%%g1\n\t" \
52 "mulscc %%g1,%3,%%g1\n\t" \
53 "mulscc %%g1,%3,%%g1\n\t" \
54 "mulscc %%g1,%3,%%g1\n\t" \
55 "mulscc %%g1,%3,%%g1\n\t" \
56 "mulscc %%g1,%3,%%g1\n\t" \
57 "mulscc %%g1,%3,%%g1\n\t" \
58 "mulscc %%g1,%3,%%g1\n\t" \
59 "mulscc %%g1,%3,%%g1\n\t" \
60 "mulscc %%g1,%3,%%g1\n\t" \
61 "mulscc %%g1,%3,%%g1\n\t" \
62 "mulscc %%g1,%3,%%g1\n\t" \
63 "mulscc %%g1,%3,%%g1\n\t" \
64 "mulscc %%g1,%3,%%g1\n\t" \
65 "mulscc %%g1,0,%%g1\n\t" \
66 "add %%g1,%%g2,%0\n\t" \
67 "rd %%y,%1\n" \
68 : "=r" ((USItype)(w1)), \
69 "=r" ((USItype)(w0)) \
70 : "%rI" ((USItype)(u)), \
71 "r" ((USItype)(v)) \
72 : "%g1", "%g2", "cc")
73
74/* It's quite necessary to add this much assembler for the sparc.
75 The default udiv_qrnnd (in C) is more than 10 times slower! */
76#define udiv_qrnnd(q, r, n1, n0, d) \
77 __asm__ ("! Inlined udiv_qrnnd\n\t" \
78 "mov 32,%%g1\n\t" \
79 "subcc %1,%2,%%g0\n\t" \
80 "1: bcs 5f\n\t" \
81 "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \
82 "sub %1,%2,%1 ! this kills msb of n\n\t" \
83 "addx %1,%1,%1 ! so this can't give carry\n\t" \
84 "subcc %%g1,1,%%g1\n\t" \
85 "2: bne 1b\n\t" \
86 "subcc %1,%2,%%g0\n\t" \
87 "bcs 3f\n\t" \
88 "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \
89 "b 3f\n\t" \
90 "sub %1,%2,%1 ! this kills msb of n\n\t" \
91 "4: sub %1,%2,%1\n\t" \
92 "5: addxcc %1,%1,%1\n\t" \
93 "bcc 2b\n\t" \
94 "subcc %%g1,1,%%g1\n\t" \
95 "! Got carry from n. Subtract next step to cancel this carry.\n\t" \
96 "bne 4b\n\t" \
97 "addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb\n\t" \
98 "sub %1,%2,%1\n\t" \
99 "3: xnor %0,0,%0\n\t" \
100 "! End of inline udiv_qrnnd\n" \
101 : "=&r" ((USItype)(q)), \
102 "=&r" ((USItype)(r)) \
103 : "r" ((USItype)(d)), \
104 "1" ((USItype)(n1)), \
105 "0" ((USItype)(n0)) : "%g1", "cc")
106#define UDIV_NEEDS_NORMALIZATION 0
107
108#define abort() \
109 return 0
110
111#ifdef __BIG_ENDIAN
112#define __BYTE_ORDER __BIG_ENDIAN
113#else
114#define __BYTE_ORDER __LITTLE_ENDIAN
115#endif
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
new file mode 100644
index 000000000000..16eeba4b991a
--- /dev/null
+++ b/arch/sparc/mm/Makefile
@@ -0,0 +1,23 @@
1# $Id: Makefile,v 1.38 2000/12/15 00:41:22 davem Exp $
2# Makefile for the linux Sparc-specific parts of the memory manager.
3#
4
5EXTRA_AFLAGS := -ansi
6
7obj-y := fault.o init.o loadmmu.o generic.o extable.o btfixup.o
8
9ifeq ($(CONFIG_SUN4),y)
10obj-y += nosrmmu.o
11else
12obj-y += srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o swift.o
13endif
14
15ifdef CONFIG_HIGHMEM
16obj-y += highmem.o
17endif
18
19ifdef CONFIG_SMP
20obj-y += nosun4c.o
21else
22obj-y += sun4c.o
23endif
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
new file mode 100644
index 000000000000..f147a44c9780
--- /dev/null
+++ b/arch/sparc/mm/btfixup.c
@@ -0,0 +1,336 @@
1/* $Id: btfixup.c,v 1.10 2000/05/09 17:40:13 davem Exp $
2 * btfixup.c: Boot time code fixup and relocator, so that
3 * we can get rid of most indirect calls to achieve single
4 * image sun4c and srmmu kernel.
5 *
6 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/config.h>
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <asm/btfixup.h>
13#include <asm/page.h>
14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <asm/oplib.h>
17#include <asm/system.h>
18#include <asm/cacheflush.h>
19
20#define BTFIXUP_OPTIMIZE_NOP
21#define BTFIXUP_OPTIMIZE_OTHER
22
23extern char *srmmu_name;
24static char version[] __initdata = "Boot time fixup v1.6. 4/Mar/98 Jakub Jelinek (jj@ultra.linux.cz). Patching kernel for ";
25#ifdef CONFIG_SUN4
26static char str_sun4c[] __initdata = "sun4\n";
27#else
28static char str_sun4c[] __initdata = "sun4c\n";
29#endif
30static char str_srmmu[] __initdata = "srmmu[%s]/";
31static char str_iommu[] __initdata = "iommu\n";
32static char str_iounit[] __initdata = "io-unit\n";
33
34static int visited __initdata = 0;
35extern unsigned int ___btfixup_start[], ___btfixup_end[], __init_begin[], __init_end[], __init_text_end[];
36extern unsigned int _stext[], _end[], __start___ksymtab[], __stop___ksymtab[];
37static char wrong_f[] __initdata = "Trying to set f fixup %p to invalid function %08x\n";
38static char wrong_b[] __initdata = "Trying to set b fixup %p to invalid function %08x\n";
39static char wrong_s[] __initdata = "Trying to set s fixup %p to invalid value %08x\n";
40static char wrong_h[] __initdata = "Trying to set h fixup %p to invalid value %08x\n";
41static char wrong_a[] __initdata = "Trying to set a fixup %p to invalid value %08x\n";
42static char wrong[] __initdata = "Wrong address for %c fixup %p\n";
43static char insn_f[] __initdata = "Fixup f %p refers to weird instructions at %p[%08x,%08x]\n";
44static char insn_b[] __initdata = "Fixup b %p doesn't refer to a SETHI at %p[%08x]\n";
45static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]\n";
46static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n";
47static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n";
48static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n";
49static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n";
50static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n";
51
52#ifdef BTFIXUP_OPTIMIZE_OTHER
53static void __init set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
54{
55 if (!fmangled)
56 *addr = value;
57 else {
58 unsigned int *q = (unsigned int *)q1;
59 if (*addr == 0x01000000) {
60 /* Noped */
61 *q = value;
62 } else if (addr[-1] == *q) {
63 /* Moved */
64 addr[-1] = value;
65 *q = value;
66 } else {
67 prom_printf(wrong_setaddr, addr-1, addr[-1], *addr, *q, value);
68 prom_halt();
69 }
70 }
71}
72#else
73static __inline__ void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
74{
75 *addr = value;
76}
77#endif
78
79void __init btfixup(void)
80{
81 unsigned int *p, *q;
82 int type, count;
83 unsigned insn;
84 unsigned *addr;
85 int fmangled = 0;
86 void (*flush_cacheall)(void);
87
88 if (!visited) {
89 visited++;
90 printk(version);
91 if (ARCH_SUN4C_SUN4)
92 printk(str_sun4c);
93 else {
94 printk(str_srmmu, srmmu_name);
95 if (sparc_cpu_model == sun4d)
96 printk(str_iounit);
97 else
98 printk(str_iommu);
99 }
100 }
101 for (p = ___btfixup_start; p < ___btfixup_end; ) {
102 count = p[2];
103 q = p + 3;
104 switch (type = *(unsigned char *)p) {
105 case 'f':
106 count = p[3];
107 q = p + 4;
108 if (((p[0] & 1) || p[1])
109 && ((p[1] & 3) || (unsigned *)(p[1]) < _stext || (unsigned *)(p[1]) >= _end)) {
110 prom_printf(wrong_f, p, p[1]);
111 prom_halt();
112 }
113 break;
114 case 'b':
115 if (p[1] < (unsigned long)__init_begin || p[1] >= (unsigned long)__init_text_end || (p[1] & 3)) {
116 prom_printf(wrong_b, p, p[1]);
117 prom_halt();
118 }
119 break;
120 case 's':
121 if (p[1] + 0x1000 >= 0x2000) {
122 prom_printf(wrong_s, p, p[1]);
123 prom_halt();
124 }
125 break;
126 case 'h':
127 if (p[1] & 0x3ff) {
128 prom_printf(wrong_h, p, p[1]);
129 prom_halt();
130 }
131 break;
132 case 'a':
133 if (p[1] + 0x1000 >= 0x2000 && (p[1] & 0x3ff)) {
134 prom_printf(wrong_a, p, p[1]);
135 prom_halt();
136 }
137 break;
138 }
139 if (p[0] & 1) {
140 p[0] &= ~1;
141 while (count) {
142 fmangled = 0;
143 addr = (unsigned *)*q;
144 if (addr < _stext || addr >= _end) {
145 prom_printf(wrong, type, p);
146 prom_halt();
147 }
148 insn = *addr;
149#ifdef BTFIXUP_OPTIMIZE_OTHER
150 if (type != 'f' && q[1]) {
151 insn = *(unsigned int *)q[1];
152 if (!insn || insn == 1)
153 insn = *addr;
154 else
155 fmangled = 1;
156 }
157#endif
158 switch (type) {
159 case 'f': /* CALL */
160 if (addr >= __start___ksymtab && addr < __stop___ksymtab) {
161 *addr = p[1];
162 break;
163 } else if (!q[1]) {
164 if ((insn & 0xc1c00000) == 0x01000000) { /* SETHI */
165 *addr = (insn & 0xffc00000) | (p[1] >> 10); break;
166 } else if ((insn & 0xc1f82000) == 0x80102000) { /* OR X, %LO(i), Y */
167 *addr = (insn & 0xffffe000) | (p[1] & 0x3ff); break;
168 } else if ((insn & 0xc0000000) != 0x40000000) { /* !CALL */
169 bad_f:
170 prom_printf(insn_f, p, addr, insn, addr[1]);
171 prom_halt();
172 }
173 } else if (q[1] != 1)
174 addr[1] = q[1];
175 if (p[2] == BTFIXUPCALL_NORM) {
176 norm_f:
177 *addr = 0x40000000 | ((p[1] - (unsigned)addr) >> 2);
178 q[1] = 0;
179 break;
180 }
181#ifndef BTFIXUP_OPTIMIZE_NOP
182 goto norm_f;
183#else
184 if (!(addr[1] & 0x80000000)) {
185 if ((addr[1] & 0xc1c00000) != 0x01000000) /* !SETHI */
186 goto bad_f; /* CALL, Bicc, FBfcc, CBccc are weird in delay slot, aren't they? */
187 } else {
188 if ((addr[1] & 0x01800000) == 0x01800000) {
189 if ((addr[1] & 0x01f80000) == 0x01e80000) {
190 /* RESTORE */
191 goto norm_f; /* It is dangerous to patch that */
192 }
193 goto bad_f;
194 }
195 if ((addr[1] & 0xffffe003) == 0x9e03e000) {
196 /* ADD %O7, XX, %o7 */
197 int displac = (addr[1] << 19);
198
199 displac = (displac >> 21) + 2;
200 *addr = (0x10800000) + (displac & 0x3fffff);
201 q[1] = addr[1];
202 addr[1] = p[2];
203 break;
204 }
205 if ((addr[1] & 0x201f) == 0x200f || (addr[1] & 0x7c000) == 0x3c000)
206 goto norm_f; /* Someone is playing bad tricks with us: rs1 or rs2 is o7 */
207 if ((addr[1] & 0x3e000000) == 0x1e000000)
208 goto norm_f; /* rd is %o7. We'd better take care. */
209 }
210 if (p[2] == BTFIXUPCALL_NOP) {
211 *addr = 0x01000000;
212 q[1] = 1;
213 break;
214 }
215#ifndef BTFIXUP_OPTIMIZE_OTHER
216 goto norm_f;
217#else
218 if (addr[1] == 0x01000000) { /* NOP in the delay slot */
219 q[1] = addr[1];
220 *addr = p[2];
221 break;
222 }
223 if ((addr[1] & 0xc0000000) != 0xc0000000) {
224 /* Not a memory operation */
225 if ((addr[1] & 0x30000000) == 0x10000000) {
226 /* Ok, non-memory op with rd %oX */
227 if ((addr[1] & 0x3e000000) == 0x1c000000)
228 goto bad_f; /* Aiee. Someone is playing strange %sp tricks */
229 if ((addr[1] & 0x3e000000) > 0x12000000 ||
230 ((addr[1] & 0x3e000000) == 0x12000000 &&
231 p[2] != BTFIXUPCALL_STO1O0 && p[2] != BTFIXUPCALL_SWAPO0O1) ||
232 ((p[2] & 0xffffe000) == BTFIXUPCALL_RETINT(0))) {
233 /* Nobody uses the result. We can nop it out. */
234 *addr = p[2];
235 q[1] = addr[1];
236 addr[1] = 0x01000000;
237 break;
238 }
239 if ((addr[1] & 0xf1ffffe0) == 0x90100000) {
240 /* MOV %reg, %Ox */
241 if ((addr[1] & 0x3e000000) == 0x10000000 &&
242 (p[2] & 0x7c000) == 0x20000) {
243 /* Ok, it is call xx; mov reg, %o0 and call optimizes
244 to doing something on %o0. Patch the patch. */
245 *addr = (p[2] & ~0x7c000) | ((addr[1] & 0x1f) << 14);
246 q[1] = addr[1];
247 addr[1] = 0x01000000;
248 break;
249 }
250 if ((addr[1] & 0x3e000000) == 0x12000000 &&
251 p[2] == BTFIXUPCALL_STO1O0) {
252 *addr = (p[2] & ~0x3e000000) | ((addr[1] & 0x1f) << 25);
253 q[1] = addr[1];
254 addr[1] = 0x01000000;
255 break;
256 }
257 }
258 }
259 }
260 *addr = addr[1];
261 q[1] = addr[1];
262 addr[1] = p[2];
263 break;
264#endif /* BTFIXUP_OPTIMIZE_OTHER */
265#endif /* BTFIXUP_OPTIMIZE_NOP */
266 case 'b': /* BLACKBOX */
267 /* Has to be sethi i, xx */
268 if ((insn & 0xc1c00000) != 0x01000000) {
269 prom_printf(insn_b, p, addr, insn);
270 prom_halt();
271 } else {
272 void (*do_fixup)(unsigned *);
273
274 do_fixup = (void (*)(unsigned *))p[1];
275 do_fixup(addr);
276 }
277 break;
278 case 's': /* SIMM13 */
279 /* Has to be or %g0, i, xx */
280 if ((insn & 0xc1ffe000) != 0x80102000) {
281 prom_printf(insn_s, p, addr, insn);
282 prom_halt();
283 }
284 set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x1fff));
285 break;
286 case 'h': /* SETHI */
287 /* Has to be sethi i, xx */
288 if ((insn & 0xc1c00000) != 0x01000000) {
289 prom_printf(insn_h, p, addr, insn);
290 prom_halt();
291 }
292 set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
293 break;
294 case 'a': /* HALF */
295 /* Has to be sethi i, xx or or %g0, i, xx */
296 if ((insn & 0xc1c00000) != 0x01000000 &&
297 (insn & 0xc1ffe000) != 0x80102000) {
298 prom_printf(insn_a, p, addr, insn);
299 prom_halt();
300 }
301 if (p[1] & 0x3ff)
302 set_addr(addr, q[1], fmangled,
303 (insn & 0x3e000000) | 0x80102000 | (p[1] & 0x1fff));
304 else
305 set_addr(addr, q[1], fmangled,
306 (insn & 0x3e000000) | 0x01000000 | (p[1] >> 10));
307 break;
308 case 'i': /* INT */
309 if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
310 set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
311 else if ((insn & 0x80002000) == 0x80002000 &&
312 (insn & 0x01800000) != 0x01800000) /* %LO */
313 set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
314 else {
315 prom_printf(insn_i, p, addr, insn);
316 prom_halt();
317 }
318 break;
319 }
320 count -= 2;
321 q += 2;
322 }
323 } else
324 p = q + count;
325 }
326#ifdef CONFIG_SMP
327 flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all);
328#else
329 flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all);
330#endif
331 if (!flush_cacheall) {
332 prom_printf(fca_und);
333 prom_halt();
334 }
335 (*flush_cacheall)();
336}
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c
new file mode 100644
index 000000000000..c9845c71f426
--- /dev/null
+++ b/arch/sparc/mm/extable.c
@@ -0,0 +1,77 @@
1/*
2 * linux/arch/sparc/mm/extable.c
3 */
4
5#include <linux/config.h>
6#include <linux/module.h>
7#include <asm/uaccess.h>
8
9void sort_extable(struct exception_table_entry *start,
10 struct exception_table_entry *finish)
11{
12}
13
14/* Caller knows they are in a range if ret->fixup == 0 */
15const struct exception_table_entry *
16search_extable(const struct exception_table_entry *start,
17 const struct exception_table_entry *last,
18 unsigned long value)
19{
20 const struct exception_table_entry *walk;
21
22 /* Single insn entries are encoded as:
23 * word 1: insn address
24 * word 2: fixup code address
25 *
26 * Range entries are encoded as:
27 * word 1: first insn address
28 * word 2: 0
29 * word 3: last insn address + 4 bytes
30 * word 4: fixup code address
31 *
32 * See asm/uaccess.h for more details.
33 */
34
35 /* 1. Try to find an exact match. */
36 for (walk = start; walk <= last; walk++) {
37 if (walk->fixup == 0) {
38 /* A range entry, skip both parts. */
39 walk++;
40 continue;
41 }
42
43 if (walk->insn == value)
44 return walk;
45 }
46
47 /* 2. Try to find a range match. */
48 for (walk = start; walk <= (last - 1); walk++) {
49 if (walk->fixup)
50 continue;
51
52 if (walk[0].insn <= value && walk[1].insn > value)
53 return walk;
54
55 walk++;
56 }
57
58 return NULL;
59}
60
61/* Special extable search, which handles ranges. Returns fixup */
62unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
63{
64 const struct exception_table_entry *entry;
65
66 entry = search_exception_tables(addr);
67 if (!entry)
68 return 0;
69
70 /* Inside range? Fix g2 and return correct fixup */
71 if (!entry->fixup) {
72 *g2 = (addr - entry->insn) / 4;
73 return (entry + 1)->fixup;
74 }
75
76 return entry->fixup;
77}
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c
new file mode 100644
index 000000000000..37f4107bae66
--- /dev/null
+++ b/arch/sparc/mm/fault.c
@@ -0,0 +1,596 @@
1/* $Id: fault.c,v 1.122 2001/11/17 07:19:26 davem Exp $
2 * fault.c: Page fault handlers for the Sparc.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <asm/head.h>
10
11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/sched.h>
14#include <linux/ptrace.h>
15#include <linux/mman.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/signal.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24
25#include <asm/system.h>
26#include <asm/segment.h>
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/memreg.h>
30#include <asm/openprom.h>
31#include <asm/oplib.h>
32#include <asm/smp.h>
33#include <asm/traps.h>
34#include <asm/kdebug.h>
35#include <asm/uaccess.h>
36
37#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
38
39extern int prom_node_root;
40
41/* At boot time we determine these two values necessary for setting
42 * up the segment maps and page table entries (pte's).
43 */
44
45int num_segmaps, num_contexts;
46int invalid_segment;
47
48/* various Virtual Address Cache parameters we find at boot time... */
49
50int vac_size, vac_linesize, vac_do_hw_vac_flushes;
51int vac_entries_per_context, vac_entries_per_segment;
52int vac_entries_per_page;
53
54/* Nice, simple, prom library does all the sweating for us. ;) */
55int prom_probe_memory (void)
56{
57 register struct linux_mlist_v0 *mlist;
58 register unsigned long bytes, base_paddr, tally;
59 register int i;
60
61 i = 0;
62 mlist= *prom_meminfo()->v0_available;
63 bytes = tally = mlist->num_bytes;
64 base_paddr = (unsigned long) mlist->start_adr;
65
66 sp_banks[0].base_addr = base_paddr;
67 sp_banks[0].num_bytes = bytes;
68
69 while (mlist->theres_more != (void *) 0){
70 i++;
71 mlist = mlist->theres_more;
72 bytes = mlist->num_bytes;
73 tally += bytes;
74 if (i > SPARC_PHYS_BANKS-1) {
75 printk ("The machine has more banks than "
76 "this kernel can support\n"
77 "Increase the SPARC_PHYS_BANKS "
78 "setting (currently %d)\n",
79 SPARC_PHYS_BANKS);
80 i = SPARC_PHYS_BANKS-1;
81 break;
82 }
83
84 sp_banks[i].base_addr = (unsigned long) mlist->start_adr;
85 sp_banks[i].num_bytes = mlist->num_bytes;
86 }
87
88 i++;
89 sp_banks[i].base_addr = 0xdeadbeef;
90 sp_banks[i].num_bytes = 0;
91
92 /* Now mask all bank sizes on a page boundary, it is all we can
93 * use anyways.
94 */
95 for(i=0; sp_banks[i].num_bytes != 0; i++)
96 sp_banks[i].num_bytes &= PAGE_MASK;
97
98 return tally;
99}
100
101/* Traverse the memory lists in the prom to see how much physical we
102 * have.
103 */
104unsigned long
105probe_memory(void)
106{
107 int total;
108
109 total = prom_probe_memory();
110
111 /* Oh man, much nicer, keep the dirt in promlib. */
112 return total;
113}
114
115extern void sun4c_complete_all_stores(void);
116
117/* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
118asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
119 unsigned long svaddr, unsigned long aerr,
120 unsigned long avaddr)
121{
122 sun4c_complete_all_stores();
123 printk("FAULT: NMI received\n");
124 printk("SREGS: Synchronous Error %08lx\n", serr);
125 printk(" Synchronous Vaddr %08lx\n", svaddr);
126 printk(" Asynchronous Error %08lx\n", aerr);
127 printk(" Asynchronous Vaddr %08lx\n", avaddr);
128 if (sun4c_memerr_reg)
129 printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
130 printk("REGISTER DUMP:\n");
131 show_regs(regs);
132 prom_halt();
133}
134
135static void unhandled_fault(unsigned long, struct task_struct *,
136 struct pt_regs *) __attribute__ ((noreturn));
137
138static void unhandled_fault(unsigned long address, struct task_struct *tsk,
139 struct pt_regs *regs)
140{
141 if((unsigned long) address < PAGE_SIZE) {
142 printk(KERN_ALERT
143 "Unable to handle kernel NULL pointer dereference\n");
144 } else {
145 printk(KERN_ALERT "Unable to handle kernel paging request "
146 "at virtual address %08lx\n", address);
147 }
148 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
149 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
150 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
151 (tsk->mm ? (unsigned long) tsk->mm->pgd :
152 (unsigned long) tsk->active_mm->pgd));
153 die_if_kernel("Oops", regs);
154}
155
156asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
157 unsigned long address)
158{
159 struct pt_regs regs;
160 unsigned long g2;
161 unsigned int insn;
162 int i;
163
164 i = search_extables_range(ret_pc, &g2);
165 switch (i) {
166 case 3:
167 /* load & store will be handled by fixup */
168 return 3;
169
170 case 1:
171 /* store will be handled by fixup, load will bump out */
172 /* for _to_ macros */
173 insn = *((unsigned int *) pc);
174 if ((insn >> 21) & 1)
175 return 1;
176 break;
177
178 case 2:
179 /* load will be handled by fixup, store will bump out */
180 /* for _from_ macros */
181 insn = *((unsigned int *) pc);
182 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
183 return 2;
184 break;
185
186 default:
187 break;
188 };
189
190 memset(&regs, 0, sizeof (regs));
191 regs.pc = pc;
192 regs.npc = pc + 4;
193 __asm__ __volatile__(
194 "rd %%psr, %0\n\t"
195 "nop\n\t"
196 "nop\n\t"
197 "nop\n" : "=r" (regs.psr));
198 unhandled_fault(address, current, &regs);
199
200 /* Not reached */
201 return 0;
202}
203
204extern unsigned long safe_compute_effective_address(struct pt_regs *,
205 unsigned int);
206
207static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
208{
209 unsigned int insn;
210
211 if (text_fault)
212 return regs->pc;
213
214 if (regs->psr & PSR_PS) {
215 insn = *(unsigned int *) regs->pc;
216 } else {
217 __get_user(insn, (unsigned int *) regs->pc);
218 }
219
220 return safe_compute_effective_address(regs, insn);
221}
222
223asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
224 unsigned long address)
225{
226 struct vm_area_struct *vma;
227 struct task_struct *tsk = current;
228 struct mm_struct *mm = tsk->mm;
229 unsigned int fixup;
230 unsigned long g2;
231 siginfo_t info;
232 int from_user = !(regs->psr & PSR_PS);
233
234 if(text_fault)
235 address = regs->pc;
236
237 /*
238 * We fault-in kernel-space virtual memory on-demand. The
239 * 'reference' page table is init_mm.pgd.
240 *
241 * NOTE! We MUST NOT take any locks for this case. We may
242 * be in an interrupt or a critical region, and should
243 * only copy the information from the master page table,
244 * nothing more.
245 */
246 if (!ARCH_SUN4C_SUN4 && address >= TASK_SIZE)
247 goto vmalloc_fault;
248
249 info.si_code = SEGV_MAPERR;
250
251 /*
252 * If we're in an interrupt or have no user
253 * context, we must not take the fault..
254 */
255 if (in_atomic() || !mm)
256 goto no_context;
257
258 down_read(&mm->mmap_sem);
259
260 /*
261 * The kernel referencing a bad kernel pointer can lock up
262 * a sun4c machine completely, so we must attempt recovery.
263 */
264 if(!from_user && address >= PAGE_OFFSET)
265 goto bad_area;
266
267 vma = find_vma(mm, address);
268 if(!vma)
269 goto bad_area;
270 if(vma->vm_start <= address)
271 goto good_area;
272 if(!(vma->vm_flags & VM_GROWSDOWN))
273 goto bad_area;
274 if(expand_stack(vma, address))
275 goto bad_area;
276 /*
277 * Ok, we have a good vm_area for this memory access, so
278 * we can handle it..
279 */
280good_area:
281 info.si_code = SEGV_ACCERR;
282 if(write) {
283 if(!(vma->vm_flags & VM_WRITE))
284 goto bad_area;
285 } else {
286 /* Allow reads even for write-only mappings */
287 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
288 goto bad_area;
289 }
290
291 /*
292 * If for any reason at all we couldn't handle the fault,
293 * make sure we exit gracefully rather than endlessly redo
294 * the fault.
295 */
296 switch (handle_mm_fault(mm, vma, address, write)) {
297 case VM_FAULT_SIGBUS:
298 goto do_sigbus;
299 case VM_FAULT_OOM:
300 goto out_of_memory;
301 case VM_FAULT_MAJOR:
302 current->maj_flt++;
303 break;
304 case VM_FAULT_MINOR:
305 default:
306 current->min_flt++;
307 break;
308 }
309 up_read(&mm->mmap_sem);
310 return;
311
312 /*
313 * Something tried to access memory that isn't in our memory map..
314 * Fix it, but check if it's kernel or user first..
315 */
316bad_area:
317 up_read(&mm->mmap_sem);
318
319bad_area_nosemaphore:
320 /* User mode accesses just cause a SIGSEGV */
321 if(from_user) {
322#if 0
323 printk("Fault whee %s [%d]: segfaults at %08lx pc=%08lx\n",
324 tsk->comm, tsk->pid, address, regs->pc);
325#endif
326 info.si_signo = SIGSEGV;
327 info.si_errno = 0;
328 /* info.si_code set above to make clear whether
329 this was a SEGV_MAPERR or SEGV_ACCERR fault. */
330 info.si_addr = (void __user *)compute_si_addr(regs, text_fault);
331 info.si_trapno = 0;
332 force_sig_info (SIGSEGV, &info, tsk);
333 return;
334 }
335
336 /* Is this in ex_table? */
337no_context:
338 g2 = regs->u_regs[UREG_G2];
339 if (!from_user && (fixup = search_extables_range(regs->pc, &g2))) {
340 if (fixup > 10) { /* Values below are reserved for other things */
341 extern const unsigned __memset_start[];
342 extern const unsigned __memset_end[];
343 extern const unsigned __csum_partial_copy_start[];
344 extern const unsigned __csum_partial_copy_end[];
345
346#ifdef DEBUG_EXCEPTIONS
347 printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
348 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
349 regs->pc, fixup, g2);
350#endif
351 if ((regs->pc >= (unsigned long)__memset_start &&
352 regs->pc < (unsigned long)__memset_end) ||
353 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
354 regs->pc < (unsigned long)__csum_partial_copy_end)) {
355 regs->u_regs[UREG_I4] = address;
356 regs->u_regs[UREG_I5] = regs->pc;
357 }
358 regs->u_regs[UREG_G2] = g2;
359 regs->pc = fixup;
360 regs->npc = regs->pc + 4;
361 return;
362 }
363 }
364
365 unhandled_fault (address, tsk, regs);
366 do_exit(SIGKILL);
367
368/*
369 * We ran out of memory, or some other thing happened to us that made
370 * us unable to handle the page fault gracefully.
371 */
372out_of_memory:
373 up_read(&mm->mmap_sem);
374 printk("VM: killing process %s\n", tsk->comm);
375 if (from_user)
376 do_exit(SIGKILL);
377 goto no_context;
378
379do_sigbus:
380 up_read(&mm->mmap_sem);
381 info.si_signo = SIGBUS;
382 info.si_errno = 0;
383 info.si_code = BUS_ADRERR;
384 info.si_addr = (void __user *) compute_si_addr(regs, text_fault);
385 info.si_trapno = 0;
386 force_sig_info (SIGBUS, &info, tsk);
387 if (!from_user)
388 goto no_context;
389
390vmalloc_fault:
391 {
392 /*
393 * Synchronize this task's top level page-table
394 * with the 'reference' page table.
395 */
396 int offset = pgd_index(address);
397 pgd_t *pgd, *pgd_k;
398 pmd_t *pmd, *pmd_k;
399
400 pgd = tsk->active_mm->pgd + offset;
401 pgd_k = init_mm.pgd + offset;
402
403 if (!pgd_present(*pgd)) {
404 if (!pgd_present(*pgd_k))
405 goto bad_area_nosemaphore;
406 pgd_val(*pgd) = pgd_val(*pgd_k);
407 return;
408 }
409
410 pmd = pmd_offset(pgd, address);
411 pmd_k = pmd_offset(pgd_k, address);
412
413 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
414 goto bad_area_nosemaphore;
415 *pmd = *pmd_k;
416 return;
417 }
418}
419
420asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
421 unsigned long address)
422{
423 extern void sun4c_update_mmu_cache(struct vm_area_struct *,
424 unsigned long,pte_t);
425 extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
426 struct task_struct *tsk = current;
427 struct mm_struct *mm = tsk->mm;
428 pgd_t *pgdp;
429 pte_t *ptep;
430
431 if (text_fault) {
432 address = regs->pc;
433 } else if (!write &&
434 !(regs->psr & PSR_PS)) {
435 unsigned int insn, __user *ip;
436
437 ip = (unsigned int __user *)regs->pc;
438 if (!get_user(insn, ip)) {
439 if ((insn & 0xc1680000) == 0xc0680000)
440 write = 1;
441 }
442 }
443
444 if (!mm) {
445 /* We are oopsing. */
446 do_sparc_fault(regs, text_fault, write, address);
447 BUG(); /* P3 Oops already, you bitch */
448 }
449
450 pgdp = pgd_offset(mm, address);
451 ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
452
453 if (pgd_val(*pgdp)) {
454 if (write) {
455 if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
456 == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
457 unsigned long flags;
458
459 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
460 _SUN4C_PAGE_MODIFIED |
461 _SUN4C_PAGE_VALID |
462 _SUN4C_PAGE_DIRTY);
463
464 local_irq_save(flags);
465 if (sun4c_get_segmap(address) != invalid_segment) {
466 sun4c_put_pte(address, pte_val(*ptep));
467 local_irq_restore(flags);
468 return;
469 }
470 local_irq_restore(flags);
471 }
472 } else {
473 if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
474 == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
475 unsigned long flags;
476
477 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
478 _SUN4C_PAGE_VALID);
479
480 local_irq_save(flags);
481 if (sun4c_get_segmap(address) != invalid_segment) {
482 sun4c_put_pte(address, pte_val(*ptep));
483 local_irq_restore(flags);
484 return;
485 }
486 local_irq_restore(flags);
487 }
488 }
489 }
490
491 /* This conditional is 'interesting'. */
492 if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
493 && (pte_val(*ptep) & _SUN4C_PAGE_VALID))
494 /* Note: It is safe to not grab the MMAP semaphore here because
495 * we know that update_mmu_cache() will not sleep for
496 * any reason (at least not in the current implementation)
497 * and therefore there is no danger of another thread getting
498 * on the CPU and doing a shrink_mmap() on this vma.
499 */
500 sun4c_update_mmu_cache (find_vma(current->mm, address), address,
501 *ptep);
502 else
503 do_sparc_fault(regs, text_fault, write, address);
504}
505
506/* This always deals with user addresses. */
507inline void force_user_fault(unsigned long address, int write)
508{
509 struct vm_area_struct *vma;
510 struct task_struct *tsk = current;
511 struct mm_struct *mm = tsk->mm;
512 siginfo_t info;
513
514 info.si_code = SEGV_MAPERR;
515
516#if 0
517 printk("wf<pid=%d,wr=%d,addr=%08lx>\n",
518 tsk->pid, write, address);
519#endif
520 down_read(&mm->mmap_sem);
521 vma = find_vma(mm, address);
522 if(!vma)
523 goto bad_area;
524 if(vma->vm_start <= address)
525 goto good_area;
526 if(!(vma->vm_flags & VM_GROWSDOWN))
527 goto bad_area;
528 if(expand_stack(vma, address))
529 goto bad_area;
530good_area:
531 info.si_code = SEGV_ACCERR;
532 if(write) {
533 if(!(vma->vm_flags & VM_WRITE))
534 goto bad_area;
535 } else {
536 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
537 goto bad_area;
538 }
539 switch (handle_mm_fault(mm, vma, address, write)) {
540 case VM_FAULT_SIGBUS:
541 case VM_FAULT_OOM:
542 goto do_sigbus;
543 }
544 up_read(&mm->mmap_sem);
545 return;
546bad_area:
547 up_read(&mm->mmap_sem);
548#if 0
549 printk("Window whee %s [%d]: segfaults at %08lx\n",
550 tsk->comm, tsk->pid, address);
551#endif
552 info.si_signo = SIGSEGV;
553 info.si_errno = 0;
554 /* info.si_code set above to make clear whether
555 this was a SEGV_MAPERR or SEGV_ACCERR fault. */
556 info.si_addr = (void __user *) address;
557 info.si_trapno = 0;
558 force_sig_info (SIGSEGV, &info, tsk);
559 return;
560
561do_sigbus:
562 up_read(&mm->mmap_sem);
563 info.si_signo = SIGBUS;
564 info.si_errno = 0;
565 info.si_code = BUS_ADRERR;
566 info.si_addr = (void __user *) address;
567 info.si_trapno = 0;
568 force_sig_info (SIGBUS, &info, tsk);
569}
570
571void window_overflow_fault(void)
572{
573 unsigned long sp;
574
575 sp = current_thread_info()->rwbuf_stkptrs[0];
576 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
577 force_user_fault(sp + 0x38, 1);
578 force_user_fault(sp, 1);
579}
580
581void window_underflow_fault(unsigned long sp)
582{
583 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
584 force_user_fault(sp + 0x38, 0);
585 force_user_fault(sp, 0);
586}
587
588void window_ret_fault(struct pt_regs *regs)
589{
590 unsigned long sp;
591
592 sp = regs->u_regs[UREG_FP];
593 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
594 force_user_fault(sp + 0x38, 0);
595 force_user_fault(sp, 0);
596}
diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c
new file mode 100644
index 000000000000..db27eee3bda1
--- /dev/null
+++ b/arch/sparc/mm/generic.c
@@ -0,0 +1,154 @@
1/* $Id: generic.c,v 1.14 2001/12/21 04:56:15 davem Exp $
2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/pagemap.h>
12
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/page.h>
16#include <asm/cacheflush.h>
17#include <asm/tlbflush.h>
18
19static inline void forget_pte(pte_t page)
20{
21#if 0 /* old 2.4 code */
22 if (pte_none(page))
23 return;
24 if (pte_present(page)) {
25 unsigned long pfn = pte_pfn(page);
26 struct page *ptpage;
27 if (!pfn_valid(pfn))
28 return;
29 ptpage = pfn_to_page(pfn);
30 if (PageReserved(ptpage))
31 return;
32 page_cache_release(ptpage);
33 return;
34 }
35 swap_free(pte_to_swp_entry(page));
36#else
37 if (!pte_none(page)) {
38 printk("forget_pte: old mapping existed!\n");
39 BUG();
40 }
41#endif
42}
43
44/* Remap IO memory, the same way as remap_pfn_range(), but use
45 * the obio memory space.
46 *
47 * They use a pgprot that sets PAGE_IO and does not check the
48 * mem_map table as this is independent of normal memory.
49 */
50static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
51 unsigned long offset, pgprot_t prot, int space)
52{
53 unsigned long end;
54
55 address &= ~PMD_MASK;
56 end = address + size;
57 if (end > PMD_SIZE)
58 end = PMD_SIZE;
59 do {
60 pte_t oldpage = *pte;
61 pte_clear(mm, address, pte);
62 set_pte(pte, mk_pte_io(offset, prot, space));
63 forget_pte(oldpage);
64 address += PAGE_SIZE;
65 offset += PAGE_SIZE;
66 pte++;
67 } while (address < end);
68}
69
70static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
71 unsigned long offset, pgprot_t prot, int space)
72{
73 unsigned long end;
74
75 address &= ~PGDIR_MASK;
76 end = address + size;
77 if (end > PGDIR_SIZE)
78 end = PGDIR_SIZE;
79 offset -= address;
80 do {
81 pte_t * pte = pte_alloc_map(mm, pmd, address);
82 if (!pte)
83 return -ENOMEM;
84 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
85 address = (address + PMD_SIZE) & PMD_MASK;
86 pmd++;
87 } while (address < end);
88 return 0;
89}
90
91int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
92{
93 int error = 0;
94 pgd_t * dir;
95 unsigned long beg = from;
96 unsigned long end = from + size;
97 struct mm_struct *mm = vma->vm_mm;
98
99 prot = __pgprot(pg_iobits);
100 offset -= from;
101 dir = pgd_offset(mm, from);
102 flush_cache_range(vma, beg, end);
103
104 spin_lock(&mm->page_table_lock);
105 while (from < end) {
106 pmd_t *pmd = pmd_alloc(current->mm, dir, from);
107 error = -ENOMEM;
108 if (!pmd)
109 break;
110 error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
111 if (error)
112 break;
113 from = (from + PGDIR_SIZE) & PGDIR_MASK;
114 dir++;
115 }
116 spin_unlock(&mm->page_table_lock);
117
118 flush_tlb_range(vma, beg, end);
119 return error;
120}
121
122int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
123 unsigned long pfn, unsigned long size, pgprot_t prot)
124{
125 int error = 0;
126 pgd_t * dir;
127 unsigned long beg = from;
128 unsigned long end = from + size;
129 struct mm_struct *mm = vma->vm_mm;
130 int space = GET_IOSPACE(pfn);
131 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
132
133 prot = __pgprot(pg_iobits);
134 offset -= from;
135 dir = pgd_offset(mm, from);
136 flush_cache_range(vma, beg, end);
137
138 spin_lock(&mm->page_table_lock);
139 while (from < end) {
140 pmd_t *pmd = pmd_alloc(current->mm, dir, from);
141 error = -ENOMEM;
142 if (!pmd)
143 break;
144 error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
145 if (error)
146 break;
147 from = (from + PGDIR_SIZE) & PGDIR_MASK;
148 dir++;
149 }
150 spin_unlock(&mm->page_table_lock);
151
152 flush_tlb_range(vma, beg, end);
153 return error;
154}
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
new file mode 100644
index 000000000000..4d8ed9c65182
--- /dev/null
+++ b/arch/sparc/mm/highmem.c
@@ -0,0 +1,120 @@
1/*
2 * highmem.c: virtual kernel memory mappings for high memory
3 *
4 * Provides kernel-static versions of atomic kmap functions originally
5 * found as inlines in include/asm-sparc/highmem.h. These became
6 * needed as kmap_atomic() and kunmap_atomic() started getting
7 * called from within modules.
8 * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
9 *
10 * But kmap_atomic() and kunmap_atomic() cannot be inlined in
11 * modules because they are loaded with btfixup-ped functions.
12 */
13
14/*
15 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
16 * gives a more generic (and caching) interface. But kmap_atomic can
17 * be used in IRQ contexts, so in some (very limited) cases we need it.
18 *
19 * XXX This is an old text. Actually, it's good to use atomic kmaps,
20 * provided you remember that they are atomic and not try to sleep
21 * with a kmap taken, much like a spinlock. Non-atomic kmaps are
22 * shared by CPUs, and so precious, and establishing them requires IPI.
23 * Atomic kmaps are lightweight and we may have NCPUS more of them.
24 */
25#include <linux/mm.h>
26#include <linux/highmem.h>
27#include <asm/pgalloc.h>
28#include <asm/cacheflush.h>
29#include <asm/tlbflush.h>
30#include <asm/fixmap.h>
31
32void *kmap_atomic(struct page *page, enum km_type type)
33{
34 unsigned long idx;
35 unsigned long vaddr;
36
37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
38 inc_preempt_count();
39 if (!PageHighMem(page))
40 return page_address(page);
41
42 idx = type + KM_TYPE_NR*smp_processor_id();
43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
44
45/* XXX Fix - Anton */
46#if 0
47 __flush_cache_one(vaddr);
48#else
49 flush_cache_all();
50#endif
51
52#ifdef CONFIG_DEBUG_HIGHMEM
53 BUG_ON(!pte_none(*(kmap_pte-idx)));
54#endif
55 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
56/* XXX Fix - Anton */
57#if 0
58 __flush_tlb_one(vaddr);
59#else
60 flush_tlb_all();
61#endif
62
63 return (void*) vaddr;
64}
65
66void kunmap_atomic(void *kvaddr, enum km_type type)
67{
68#ifdef CONFIG_DEBUG_HIGHMEM
69 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
70 unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
71
72 if (vaddr < FIXADDR_START) { // FIXME
73 dec_preempt_count();
74 preempt_check_resched();
75 return;
76 }
77
78 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
79
80/* XXX Fix - Anton */
81#if 0
82 __flush_cache_one(vaddr);
83#else
84 flush_cache_all();
85#endif
86
87 /*
88 * force other mappings to Oops if they'll try to access
89 * this pte without first remap it
90 */
91 pte_clear(&init_mm, vaddr, kmap_pte-idx);
92/* XXX Fix - Anton */
93#if 0
94 __flush_tlb_one(vaddr);
95#else
96 flush_tlb_all();
97#endif
98#endif
99
100 dec_preempt_count();
101 preempt_check_resched();
102}
103
104/* We may be fed a pagetable here by ptep_to_xxx and others. */
105struct page *kmap_atomic_to_page(void *ptr)
106{
107 unsigned long idx, vaddr = (unsigned long)ptr;
108 pte_t *pte;
109
110 if (vaddr < SRMMU_NOCACHE_VADDR)
111 return virt_to_page(ptr);
112 if (vaddr < PKMAP_BASE)
113 return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
114 BUG_ON(vaddr < FIXADDR_START);
115 BUG_ON(vaddr > FIXADDR_TOP);
116
117 idx = virt_to_fix(vaddr);
118 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
119 return pte_page(*pte);
120}
diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
new file mode 100644
index 000000000000..54b8e764b042
--- /dev/null
+++ b/arch/sparc/mm/hypersparc.S
@@ -0,0 +1,413 @@
1/* $Id: hypersparc.S,v 1.18 2001/12/21 04:56:15 davem Exp $
2 * hypersparc.S: High speed Hypersparc mmu/cache operations.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/ptrace.h>
8#include <asm/psr.h>
9#include <asm/asm_offsets.h>
10#include <asm/asi.h>
11#include <asm/page.h>
12#include <asm/pgtsrmmu.h>
13#include <linux/config.h>
14#include <linux/init.h>
15
16 .text
17 .align 4
18
19 .globl hypersparc_flush_cache_all, hypersparc_flush_cache_mm
20 .globl hypersparc_flush_cache_range, hypersparc_flush_cache_page
21 .globl hypersparc_flush_page_to_ram
22 .globl hypersparc_flush_page_for_dma, hypersparc_flush_sig_insns
23 .globl hypersparc_flush_tlb_all, hypersparc_flush_tlb_mm
24 .globl hypersparc_flush_tlb_range, hypersparc_flush_tlb_page
25
26hypersparc_flush_cache_all:
27 WINDOW_FLUSH(%g4, %g5)
28 sethi %hi(vac_cache_size), %g4
29 ld [%g4 + %lo(vac_cache_size)], %g5
30 sethi %hi(vac_line_size), %g1
31 ld [%g1 + %lo(vac_line_size)], %g2
321:
33 subcc %g5, %g2, %g5 ! hyper_flush_unconditional_combined
34 bne 1b
35 sta %g0, [%g5] ASI_M_FLUSH_CTX
36 retl
37 sta %g0, [%g0] ASI_M_FLUSH_IWHOLE ! hyper_flush_whole_icache
38
39 /* We expand the window flush to get maximum performance. */
40hypersparc_flush_cache_mm:
41#ifndef CONFIG_SMP
42 ld [%o0 + AOFF_mm_context], %g1
43 cmp %g1, -1
44 be hypersparc_flush_cache_mm_out
45#endif
46 WINDOW_FLUSH(%g4, %g5)
47
48 sethi %hi(vac_line_size), %g1
49 ld [%g1 + %lo(vac_line_size)], %o1
50 sethi %hi(vac_cache_size), %g2
51 ld [%g2 + %lo(vac_cache_size)], %o0
52 add %o1, %o1, %g1
53 add %o1, %g1, %g2
54 add %o1, %g2, %g3
55 add %o1, %g3, %g4
56 add %o1, %g4, %g5
57 add %o1, %g5, %o4
58 add %o1, %o4, %o5
59
60 /* BLAMMO! */
611:
62 subcc %o0, %o5, %o0 ! hyper_flush_cache_user
63 sta %g0, [%o0 + %g0] ASI_M_FLUSH_USER
64 sta %g0, [%o0 + %o1] ASI_M_FLUSH_USER
65 sta %g0, [%o0 + %g1] ASI_M_FLUSH_USER
66 sta %g0, [%o0 + %g2] ASI_M_FLUSH_USER
67 sta %g0, [%o0 + %g3] ASI_M_FLUSH_USER
68 sta %g0, [%o0 + %g4] ASI_M_FLUSH_USER
69 sta %g0, [%o0 + %g5] ASI_M_FLUSH_USER
70 bne 1b
71 sta %g0, [%o0 + %o4] ASI_M_FLUSH_USER
72hypersparc_flush_cache_mm_out:
73 retl
74 nop
75
76 /* The things we do for performance... */
77hypersparc_flush_cache_range:
78 ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
79#ifndef CONFIG_SMP
80 ld [%o0 + AOFF_mm_context], %g1
81 cmp %g1, -1
82 be hypersparc_flush_cache_range_out
83#endif
84 WINDOW_FLUSH(%g4, %g5)
85
86 sethi %hi(vac_line_size), %g1
87 ld [%g1 + %lo(vac_line_size)], %o4
88 sethi %hi(vac_cache_size), %g2
89 ld [%g2 + %lo(vac_cache_size)], %o3
90
91 /* Here comes the fun part... */
92 add %o2, (PAGE_SIZE - 1), %o2
93 andn %o1, (PAGE_SIZE - 1), %o1
94 add %o4, %o4, %o5
95 andn %o2, (PAGE_SIZE - 1), %o2
96 add %o4, %o5, %g1
97 sub %o2, %o1, %g4
98 add %o4, %g1, %g2
99 sll %o3, 2, %g5
100 add %o4, %g2, %g3
101 cmp %g4, %g5
102 add %o4, %g3, %g4
103 blu 0f
104 add %o4, %g4, %g5
105 add %o4, %g5, %g7
106
107 /* Flush entire user space, believe it or not this is quicker
108 * than page at a time flushings for range > (cache_size<<2).
109 */
1101:
111 subcc %o3, %g7, %o3
112 sta %g0, [%o3 + %g0] ASI_M_FLUSH_USER
113 sta %g0, [%o3 + %o4] ASI_M_FLUSH_USER
114 sta %g0, [%o3 + %o5] ASI_M_FLUSH_USER
115 sta %g0, [%o3 + %g1] ASI_M_FLUSH_USER
116 sta %g0, [%o3 + %g2] ASI_M_FLUSH_USER
117 sta %g0, [%o3 + %g3] ASI_M_FLUSH_USER
118 sta %g0, [%o3 + %g4] ASI_M_FLUSH_USER
119 bne 1b
120 sta %g0, [%o3 + %g5] ASI_M_FLUSH_USER
121 retl
122 nop
123
124 /* Below our threshold, flush one page at a time. */
1250:
126 ld [%o0 + AOFF_mm_context], %o0
127 mov SRMMU_CTX_REG, %g7
128 lda [%g7] ASI_M_MMUREGS, %o3
129 sta %o0, [%g7] ASI_M_MMUREGS
130 add %o2, -PAGE_SIZE, %o0
1311:
132 or %o0, 0x400, %g7
133 lda [%g7] ASI_M_FLUSH_PROBE, %g7
134 orcc %g7, 0, %g0
135 be,a 3f
136 mov %o0, %o2
137 add %o4, %g5, %g7
1382:
139 sub %o2, %g7, %o2
140 sta %g0, [%o2 + %g0] ASI_M_FLUSH_PAGE
141 sta %g0, [%o2 + %o4] ASI_M_FLUSH_PAGE
142 sta %g0, [%o2 + %o5] ASI_M_FLUSH_PAGE
143 sta %g0, [%o2 + %g1] ASI_M_FLUSH_PAGE
144 sta %g0, [%o2 + %g2] ASI_M_FLUSH_PAGE
145 sta %g0, [%o2 + %g3] ASI_M_FLUSH_PAGE
146 andcc %o2, 0xffc, %g0
147 sta %g0, [%o2 + %g4] ASI_M_FLUSH_PAGE
148 bne 2b
149 sta %g0, [%o2 + %g5] ASI_M_FLUSH_PAGE
1503:
151 cmp %o2, %o1
152 bne 1b
153 add %o2, -PAGE_SIZE, %o0
154 mov SRMMU_FAULT_STATUS, %g5
155 lda [%g5] ASI_M_MMUREGS, %g0
156 mov SRMMU_CTX_REG, %g7
157 sta %o3, [%g7] ASI_M_MMUREGS
158hypersparc_flush_cache_range_out:
159 retl
160 nop
161
162 /* HyperSparc requires a valid mapping where we are about to flush
163 * in order to check for a physical tag match during the flush.
164 */
165 /* Verified, my ass... */
166hypersparc_flush_cache_page:
167 ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
168 ld [%o0 + AOFF_mm_context], %g2
169#ifndef CONFIG_SMP
170 cmp %g2, -1
171 be hypersparc_flush_cache_page_out
172#endif
173 WINDOW_FLUSH(%g4, %g5)
174
175 sethi %hi(vac_line_size), %g1
176 ld [%g1 + %lo(vac_line_size)], %o4
177 mov SRMMU_CTX_REG, %o3
178 andn %o1, (PAGE_SIZE - 1), %o1
179 lda [%o3] ASI_M_MMUREGS, %o2
180 sta %g2, [%o3] ASI_M_MMUREGS
181 or %o1, 0x400, %o5
182 lda [%o5] ASI_M_FLUSH_PROBE, %g1
183 orcc %g0, %g1, %g0
184 be 2f
185 add %o4, %o4, %o5
186 sub %o1, -PAGE_SIZE, %o1
187 add %o4, %o5, %g1
188 add %o4, %g1, %g2
189 add %o4, %g2, %g3
190 add %o4, %g3, %g4
191 add %o4, %g4, %g5
192 add %o4, %g5, %g7
193
194 /* BLAMMO! */
1951:
196 sub %o1, %g7, %o1
197 sta %g0, [%o1 + %g0] ASI_M_FLUSH_PAGE
198 sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
199 sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
200 sta %g0, [%o1 + %g1] ASI_M_FLUSH_PAGE
201 sta %g0, [%o1 + %g2] ASI_M_FLUSH_PAGE
202 sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
203 andcc %o1, 0xffc, %g0
204 sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
205 bne 1b
206 sta %g0, [%o1 + %g5] ASI_M_FLUSH_PAGE
2072:
208 mov SRMMU_FAULT_STATUS, %g7
209 mov SRMMU_CTX_REG, %g4
210 lda [%g7] ASI_M_MMUREGS, %g0
211 sta %o2, [%g4] ASI_M_MMUREGS
212hypersparc_flush_cache_page_out:
213 retl
214 nop
215
216hypersparc_flush_sig_insns:
217 flush %o1
218 retl
219 flush %o1 + 4
220
221 /* HyperSparc is copy-back. */
222hypersparc_flush_page_to_ram:
223 sethi %hi(vac_line_size), %g1
224 ld [%g1 + %lo(vac_line_size)], %o4
225 andn %o0, (PAGE_SIZE - 1), %o0
226 add %o4, %o4, %o5
227 or %o0, 0x400, %g7
228 lda [%g7] ASI_M_FLUSH_PROBE, %g5
229 add %o4, %o5, %g1
230 orcc %g5, 0, %g0
231 be 2f
232 add %o4, %g1, %g2
233 add %o4, %g2, %g3
234 sub %o0, -PAGE_SIZE, %o0
235 add %o4, %g3, %g4
236 add %o4, %g4, %g5
237 add %o4, %g5, %g7
238
239 /* BLAMMO! */
2401:
241 sub %o0, %g7, %o0
242 sta %g0, [%o0 + %g0] ASI_M_FLUSH_PAGE
243 sta %g0, [%o0 + %o4] ASI_M_FLUSH_PAGE
244 sta %g0, [%o0 + %o5] ASI_M_FLUSH_PAGE
245 sta %g0, [%o0 + %g1] ASI_M_FLUSH_PAGE
246 sta %g0, [%o0 + %g2] ASI_M_FLUSH_PAGE
247 sta %g0, [%o0 + %g3] ASI_M_FLUSH_PAGE
248 andcc %o0, 0xffc, %g0
249 sta %g0, [%o0 + %g4] ASI_M_FLUSH_PAGE
250 bne 1b
251 sta %g0, [%o0 + %g5] ASI_M_FLUSH_PAGE
2522:
253 mov SRMMU_FAULT_STATUS, %g1
254 retl
255 lda [%g1] ASI_M_MMUREGS, %g0
256
257 /* HyperSparc is IO cache coherent. */
258hypersparc_flush_page_for_dma:
259 retl
260 nop
261
262 /* It was noted that at boot time a TLB flush all in a delay slot
263 * can deliver an illegal instruction to the processor if the timing
264 * is just right...
265 */
266hypersparc_flush_tlb_all:
267 mov 0x400, %g1
268 sta %g0, [%g1] ASI_M_FLUSH_PROBE
269 retl
270 nop
271
272hypersparc_flush_tlb_mm:
273 mov SRMMU_CTX_REG, %g1
274 ld [%o0 + AOFF_mm_context], %o1
275 lda [%g1] ASI_M_MMUREGS, %g5
276#ifndef CONFIG_SMP
277 cmp %o1, -1
278 be hypersparc_flush_tlb_mm_out
279#endif
280 mov 0x300, %g2
281 sta %o1, [%g1] ASI_M_MMUREGS
282 sta %g0, [%g2] ASI_M_FLUSH_PROBE
283hypersparc_flush_tlb_mm_out:
284 retl
285 sta %g5, [%g1] ASI_M_MMUREGS
286
287hypersparc_flush_tlb_range:
288 ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
289 mov SRMMU_CTX_REG, %g1
290 ld [%o0 + AOFF_mm_context], %o3
291 lda [%g1] ASI_M_MMUREGS, %g5
292#ifndef CONFIG_SMP
293 cmp %o3, -1
294 be hypersparc_flush_tlb_range_out
295#endif
296 sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
297 sta %o3, [%g1] ASI_M_MMUREGS
298 and %o1, %o4, %o1
299 add %o1, 0x200, %o1
300 sta %g0, [%o1] ASI_M_FLUSH_PROBE
3011:
302 sub %o1, %o4, %o1
303 cmp %o1, %o2
304 blu,a 1b
305 sta %g0, [%o1] ASI_M_FLUSH_PROBE
306hypersparc_flush_tlb_range_out:
307 retl
308 sta %g5, [%g1] ASI_M_MMUREGS
309
310hypersparc_flush_tlb_page:
311 ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
312 mov SRMMU_CTX_REG, %g1
313 ld [%o0 + AOFF_mm_context], %o3
314 andn %o1, (PAGE_SIZE - 1), %o1
315#ifndef CONFIG_SMP
316 cmp %o3, -1
317 be hypersparc_flush_tlb_page_out
318#endif
319 lda [%g1] ASI_M_MMUREGS, %g5
320 sta %o3, [%g1] ASI_M_MMUREGS
321 sta %g0, [%o1] ASI_M_FLUSH_PROBE
322hypersparc_flush_tlb_page_out:
323 retl
324 sta %g5, [%g1] ASI_M_MMUREGS
325
326 __INIT
327
328 /* High speed page clear/copy. */
329hypersparc_bzero_1page:
330/* NOTE: This routine has to be shorter than 40insns --jj */
331 clr %g1
332 mov 32, %g2
333 mov 64, %g3
334 mov 96, %g4
335 mov 128, %g5
336 mov 160, %g7
337 mov 192, %o2
338 mov 224, %o3
339 mov 16, %o1
3401:
341 stda %g0, [%o0 + %g0] ASI_M_BFILL
342 stda %g0, [%o0 + %g2] ASI_M_BFILL
343 stda %g0, [%o0 + %g3] ASI_M_BFILL
344 stda %g0, [%o0 + %g4] ASI_M_BFILL
345 stda %g0, [%o0 + %g5] ASI_M_BFILL
346 stda %g0, [%o0 + %g7] ASI_M_BFILL
347 stda %g0, [%o0 + %o2] ASI_M_BFILL
348 stda %g0, [%o0 + %o3] ASI_M_BFILL
349 subcc %o1, 1, %o1
350 bne 1b
351 add %o0, 256, %o0
352
353 retl
354 nop
355
356hypersparc_copy_1page:
357/* NOTE: This routine has to be shorter than 70insns --jj */
358 sub %o1, %o0, %o2 ! difference
359 mov 16, %g1
3601:
361 sta %o0, [%o0 + %o2] ASI_M_BCOPY
362 add %o0, 32, %o0
363 sta %o0, [%o0 + %o2] ASI_M_BCOPY
364 add %o0, 32, %o0
365 sta %o0, [%o0 + %o2] ASI_M_BCOPY
366 add %o0, 32, %o0
367 sta %o0, [%o0 + %o2] ASI_M_BCOPY
368 add %o0, 32, %o0
369 sta %o0, [%o0 + %o2] ASI_M_BCOPY
370 add %o0, 32, %o0
371 sta %o0, [%o0 + %o2] ASI_M_BCOPY
372 add %o0, 32, %o0
373 sta %o0, [%o0 + %o2] ASI_M_BCOPY
374 add %o0, 32, %o0
375 sta %o0, [%o0 + %o2] ASI_M_BCOPY
376 subcc %g1, 1, %g1
377 bne 1b
378 add %o0, 32, %o0
379
380 retl
381 nop
382
383 .globl hypersparc_setup_blockops
384hypersparc_setup_blockops:
385 sethi %hi(bzero_1page), %o0
386 or %o0, %lo(bzero_1page), %o0
387 sethi %hi(hypersparc_bzero_1page), %o1
388 or %o1, %lo(hypersparc_bzero_1page), %o1
389 sethi %hi(hypersparc_copy_1page), %o2
390 or %o2, %lo(hypersparc_copy_1page), %o2
391 ld [%o1], %o4
3921:
393 add %o1, 4, %o1
394 st %o4, [%o0]
395 add %o0, 4, %o0
396 cmp %o1, %o2
397 bne 1b
398 ld [%o1], %o4
399 sethi %hi(__copy_1page), %o0
400 or %o0, %lo(__copy_1page), %o0
401 sethi %hi(hypersparc_setup_blockops), %o2
402 or %o2, %lo(hypersparc_setup_blockops), %o2
403 ld [%o1], %o4
4041:
405 add %o1, 4, %o1
406 st %o4, [%o0]
407 add %o0, 4, %o0
408 cmp %o1, %o2
409 bne 1b
410 ld [%o1], %o4
411 sta %g0, [%g0] ASI_M_FLUSH_IWHOLE
412 retl
413 nop
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
new file mode 100644
index 000000000000..a2dea69b2f07
--- /dev/null
+++ b/arch/sparc/mm/init.c
@@ -0,0 +1,515 @@
1/* $Id: init.c,v 1.103 2001/11/19 19:03:08 davem Exp $
2 * linux/arch/sparc/mm/init.c
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 * Copyright (C) 2000 Anton Blanchard (anton@samba.org)
8 */
9
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/signal.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/ptrace.h>
19#include <linux/mman.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/initrd.h>
23#include <linux/init.h>
24#include <linux/highmem.h>
25#include <linux/bootmem.h>
26
27#include <asm/system.h>
28#include <asm/segment.h>
29#include <asm/vac-ops.h>
30#include <asm/page.h>
31#include <asm/pgtable.h>
32#include <asm/vaddrs.h>
33#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
34#include <asm/tlb.h>
35
36DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
37
38unsigned long *sparc_valid_addr_bitmap;
39
40unsigned long phys_base;
41unsigned long pfn_base;
42
43unsigned long page_kernel;
44
45struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
46unsigned long sparc_unmapped_base;
47
48struct pgtable_cache_struct pgt_quicklists;
49
50/* References to section boundaries */
51extern char __init_begin, __init_end, _start, _end, etext , edata;
52
53/* Initial ramdisk setup */
54extern unsigned int sparc_ramdisk_image;
55extern unsigned int sparc_ramdisk_size;
56
57unsigned long highstart_pfn, highend_pfn;
58
59pte_t *kmap_pte;
60pgprot_t kmap_prot;
61
62#define kmap_get_fixmap_pte(vaddr) \
63 pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
64
65void __init kmap_init(void)
66{
67 /* cache the first kmap pte */
68 kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
69 kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
70}
71
72void show_mem(void)
73{
74 printk("Mem-info:\n");
75 show_free_areas();
76 printk("Free swap: %6ldkB\n",
77 nr_swap_pages << (PAGE_SHIFT-10));
78 printk("%ld pages of RAM\n", totalram_pages);
79 printk("%d free pages\n", nr_free_pages());
80#if 0 /* undefined pgtable_cache_size, pgd_cache_size */
81 printk("%ld pages in page table cache\n",pgtable_cache_size);
82#ifndef CONFIG_SMP
83 if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d)
84 printk("%ld entries in page dir cache\n",pgd_cache_size);
85#endif
86#endif
87}
88
89void __init sparc_context_init(int numctx)
90{
91 int ctx;
92
93 ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL);
94
95 for(ctx = 0; ctx < numctx; ctx++) {
96 struct ctx_list *clist;
97
98 clist = (ctx_list_pool + ctx);
99 clist->ctx_number = ctx;
100 clist->ctx_mm = NULL;
101 }
102 ctx_free.next = ctx_free.prev = &ctx_free;
103 ctx_used.next = ctx_used.prev = &ctx_used;
104 for(ctx = 0; ctx < numctx; ctx++)
105 add_to_free_ctxlist(ctx_list_pool + ctx);
106}
107
108extern unsigned long cmdline_memory_size;
109unsigned long last_valid_pfn;
110
111unsigned long calc_highpages(void)
112{
113 int i;
114 int nr = 0;
115
116 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
117 unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
118 unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
119
120 if (end_pfn <= max_low_pfn)
121 continue;
122
123 if (start_pfn < max_low_pfn)
124 start_pfn = max_low_pfn;
125
126 nr += end_pfn - start_pfn;
127 }
128
129 return nr;
130}
131
132unsigned long calc_max_low_pfn(void)
133{
134 int i;
135 unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
136 unsigned long curr_pfn, last_pfn;
137
138 last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT;
139 for (i = 1; sp_banks[i].num_bytes != 0; i++) {
140 curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
141
142 if (curr_pfn >= tmp) {
143 if (last_pfn < tmp)
144 tmp = last_pfn;
145 break;
146 }
147
148 last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
149 }
150
151 return tmp;
152}
153
154unsigned long __init bootmem_init(unsigned long *pages_avail)
155{
156 unsigned long bootmap_size, start_pfn;
157 unsigned long end_of_phys_memory = 0UL;
158 unsigned long bootmap_pfn, bytes_avail, size;
159 int i;
160
161 bytes_avail = 0UL;
162 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
163 end_of_phys_memory = sp_banks[i].base_addr +
164 sp_banks[i].num_bytes;
165 bytes_avail += sp_banks[i].num_bytes;
166 if (cmdline_memory_size) {
167 if (bytes_avail > cmdline_memory_size) {
168 unsigned long slack = bytes_avail - cmdline_memory_size;
169
170 bytes_avail -= slack;
171 end_of_phys_memory -= slack;
172
173 sp_banks[i].num_bytes -= slack;
174 if (sp_banks[i].num_bytes == 0) {
175 sp_banks[i].base_addr = 0xdeadbeef;
176 } else {
177 sp_banks[i+1].num_bytes = 0;
178 sp_banks[i+1].base_addr = 0xdeadbeef;
179 }
180 break;
181 }
182 }
183 }
184
185 /* Start with page aligned address of last symbol in kernel
186 * image.
187 */
188 start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end));
189
190 /* Now shift down to get the real physical page frame number. */
191 start_pfn >>= PAGE_SHIFT;
192
193 bootmap_pfn = start_pfn;
194
195 max_pfn = end_of_phys_memory >> PAGE_SHIFT;
196
197 max_low_pfn = max_pfn;
198 highstart_pfn = highend_pfn = max_pfn;
199
200 if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) {
201 highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
202 max_low_pfn = calc_max_low_pfn();
203 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
204 calc_highpages() >> (20 - PAGE_SHIFT));
205 }
206
207#ifdef CONFIG_BLK_DEV_INITRD
208 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
209 if (sparc_ramdisk_image) {
210 if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)
211 sparc_ramdisk_image -= KERNBASE;
212 initrd_start = sparc_ramdisk_image + phys_base;
213 initrd_end = initrd_start + sparc_ramdisk_size;
214 if (initrd_end > end_of_phys_memory) {
215 printk(KERN_CRIT "initrd extends beyond end of memory "
216 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
217 initrd_end, end_of_phys_memory);
218 initrd_start = 0;
219 }
220 if (initrd_start) {
221 if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
222 initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
223 bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
224 }
225 }
226#endif
227 /* Initialize the boot-time allocator. */
228 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base,
229 max_low_pfn);
230
231 /* Now register the available physical memory with the
232 * allocator.
233 */
234 *pages_avail = 0;
235 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
236 unsigned long curr_pfn, last_pfn;
237
238 curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
239 if (curr_pfn >= max_low_pfn)
240 break;
241
242 last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
243 if (last_pfn > max_low_pfn)
244 last_pfn = max_low_pfn;
245
246 /*
247 * .. finally, did all the rounding and playing
248 * around just make the area go away?
249 */
250 if (last_pfn <= curr_pfn)
251 continue;
252
253 size = (last_pfn - curr_pfn) << PAGE_SHIFT;
254 *pages_avail += last_pfn - curr_pfn;
255
256 free_bootmem(sp_banks[i].base_addr, size);
257 }
258
259#ifdef CONFIG_BLK_DEV_INITRD
260 if (initrd_start) {
261 /* Reserve the initrd image area. */
262 size = initrd_end - initrd_start;
263 reserve_bootmem(initrd_start, size);
264 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
265
266 initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
267 initrd_end = (initrd_end - phys_base) + PAGE_OFFSET;
268 }
269#endif
270 /* Reserve the kernel text/data/bss. */
271 size = (start_pfn << PAGE_SHIFT) - phys_base;
272 reserve_bootmem(phys_base, size);
273 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
274
275 /* Reserve the bootmem map. We do not account for it
276 * in pages_avail because we will release that memory
277 * in free_all_bootmem.
278 */
279 size = bootmap_size;
280 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
281 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
282
283 return max_pfn;
284}
285
286/*
287 * check_pgt_cache
288 *
289 * This is called at the end of unmapping of VMA (zap_page_range),
290 * to rescan the page cache for architecture specific things,
291 * presumably something like sun4/sun4c PMEGs. Most architectures
292 * define check_pgt_cache empty.
293 *
294 * We simply copy the 2.4 implementation for now.
295 */
296int pgt_cache_water[2] = { 25, 50 };
297
298void check_pgt_cache(void)
299{
300 do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
301}
302
303/*
304 * paging_init() sets up the page tables: We call the MMU specific
305 * init routine based upon the Sun model type on the Sparc.
306 *
307 */
308extern void sun4c_paging_init(void);
309extern void srmmu_paging_init(void);
310extern void device_scan(void);
311
312void __init paging_init(void)
313{
314 switch(sparc_cpu_model) {
315 case sun4c:
316 case sun4e:
317 case sun4:
318 sun4c_paging_init();
319 sparc_unmapped_base = 0xe0000000;
320 BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
321 break;
322 case sun4m:
323 case sun4d:
324 srmmu_paging_init();
325 sparc_unmapped_base = 0x50000000;
326 BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
327 break;
328 default:
329 prom_printf("paging_init: Cannot init paging on this Sparc\n");
330 prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
331 prom_printf("paging_init: Halting...\n");
332 prom_halt();
333 };
334
335 /* Initialize the protection map with non-constant, MMU dependent values. */
336 protection_map[0] = PAGE_NONE;
337 protection_map[1] = PAGE_READONLY;
338 protection_map[2] = PAGE_COPY;
339 protection_map[3] = PAGE_COPY;
340 protection_map[4] = PAGE_READONLY;
341 protection_map[5] = PAGE_READONLY;
342 protection_map[6] = PAGE_COPY;
343 protection_map[7] = PAGE_COPY;
344 protection_map[8] = PAGE_NONE;
345 protection_map[9] = PAGE_READONLY;
346 protection_map[10] = PAGE_SHARED;
347 protection_map[11] = PAGE_SHARED;
348 protection_map[12] = PAGE_READONLY;
349 protection_map[13] = PAGE_READONLY;
350 protection_map[14] = PAGE_SHARED;
351 protection_map[15] = PAGE_SHARED;
352 btfixup();
353 device_scan();
354}
355
356struct cache_palias *sparc_aliases;
357
358static void __init taint_real_pages(void)
359{
360 int i;
361
362 for (i = 0; sp_banks[i].num_bytes; i++) {
363 unsigned long start, end;
364
365 start = sp_banks[i].base_addr;
366 end = start + sp_banks[i].num_bytes;
367
368 while (start < end) {
369 set_bit(start >> 20, sparc_valid_addr_bitmap);
370 start += PAGE_SIZE;
371 }
372 }
373}
374
375void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
376{
377 unsigned long tmp;
378
379#ifdef CONFIG_DEBUG_HIGHMEM
380 printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn);
381#endif
382
383 for (tmp = start_pfn; tmp < end_pfn; tmp++) {
384 struct page *page = pfn_to_page(tmp);
385
386 ClearPageReserved(page);
387 set_bit(PG_highmem, &page->flags);
388 set_page_count(page, 1);
389 __free_page(page);
390 totalhigh_pages++;
391 }
392}
393
394void __init mem_init(void)
395{
396 int codepages = 0;
397 int datapages = 0;
398 int initpages = 0;
399 int reservedpages = 0;
400 int i;
401
402 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
403 prom_printf("BUG: fixmap and pkmap areas overlap\n");
404 prom_printf("pkbase: 0x%lx pkend: 0x%lx fixstart 0x%lx\n",
405 PKMAP_BASE,
406 (unsigned long)PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
407 FIXADDR_START);
408 prom_printf("Please mail sparclinux@vger.kernel.org.\n");
409 prom_halt();
410 }
411
412
413 /* Saves us work later. */
414 memset((void *)&empty_zero_page, 0, PAGE_SIZE);
415
416 i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
417 i += 1;
418 sparc_valid_addr_bitmap = (unsigned long *)
419 __alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL);
420
421 if (sparc_valid_addr_bitmap == NULL) {
422 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
423 prom_halt();
424 }
425 memset(sparc_valid_addr_bitmap, 0, i << 2);
426
427 taint_real_pages();
428
429 max_mapnr = last_valid_pfn - pfn_base;
430 high_memory = __va(max_low_pfn << PAGE_SHIFT);
431
432 totalram_pages = free_all_bootmem();
433
434 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
435 unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
436 unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
437
438 num_physpages += sp_banks[i].num_bytes >> PAGE_SHIFT;
439
440 if (end_pfn <= highstart_pfn)
441 continue;
442
443 if (start_pfn < highstart_pfn)
444 start_pfn = highstart_pfn;
445
446 map_high_region(start_pfn, end_pfn);
447 }
448
449 totalram_pages += totalhigh_pages;
450
451 codepages = (((unsigned long) &etext) - ((unsigned long)&_start));
452 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
453 datapages = (((unsigned long) &edata) - ((unsigned long)&etext));
454 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
455 initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin));
456 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
457
458 /* Ignore memory holes for the purpose of counting reserved pages */
459 for (i=0; i < max_low_pfn; i++)
460 if (test_bit(i >> (20 - PAGE_SHIFT), sparc_valid_addr_bitmap)
461 && PageReserved(pfn_to_page(i)))
462 reservedpages++;
463
464 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
465 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
466 num_physpages << (PAGE_SHIFT - 10),
467 codepages << (PAGE_SHIFT-10),
468 reservedpages << (PAGE_SHIFT - 10),
469 datapages << (PAGE_SHIFT-10),
470 initpages << (PAGE_SHIFT-10),
471 totalhigh_pages << (PAGE_SHIFT-10));
472}
473
474void free_initmem (void)
475{
476 unsigned long addr;
477
478 addr = (unsigned long)(&__init_begin);
479 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
480 struct page *p;
481
482 p = virt_to_page(addr);
483
484 ClearPageReserved(p);
485 set_page_count(p, 1);
486 __free_page(p);
487 totalram_pages++;
488 num_physpages++;
489 }
490 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
491}
492
493#ifdef CONFIG_BLK_DEV_INITRD
494void free_initrd_mem(unsigned long start, unsigned long end)
495{
496 if (start < end)
497 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
498 for (; start < end; start += PAGE_SIZE) {
499 struct page *p = virt_to_page(start);
500
501 ClearPageReserved(p);
502 set_page_count(p, 1);
503 __free_page(p);
504 num_physpages++;
505 }
506}
507#endif
508
509void sparc_flush_page_to_ram(struct page *page)
510{
511 unsigned long vaddr = (unsigned long)page_address(page);
512
513 if (vaddr)
514 __flush_page_to_ram(vaddr);
515}
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
new file mode 100644
index 000000000000..eefffa1dc5de
--- /dev/null
+++ b/arch/sparc/mm/io-unit.c
@@ -0,0 +1,318 @@
1/* $Id: io-unit.c,v 1.24 2001/12/17 07:05:09 davem Exp $
2 * io-unit.c: IO-UNIT specific routines for memory management.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/mm.h>
13#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
14#include <linux/bitops.h>
15
16#include <asm/scatterlist.h>
17#include <asm/pgalloc.h>
18#include <asm/pgtable.h>
19#include <asm/sbus.h>
20#include <asm/io.h>
21#include <asm/io-unit.h>
22#include <asm/mxcc.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/dma.h>
26
27/* #define IOUNIT_DEBUG */
28#ifdef IOUNIT_DEBUG
29#define IOD(x) printk(x)
30#else
31#define IOD(x) do { } while (0)
32#endif
33
34#define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
35#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
36
37void __init
38iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
39{
40 iopte_t *xpt, *xptend;
41 struct iounit_struct *iounit;
42 struct linux_prom_registers iommu_promregs[PROMREG_MAX];
43 struct resource r;
44
45 iounit = kmalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
46
47 memset(iounit, 0, sizeof(*iounit));
48 iounit->limit[0] = IOUNIT_BMAP1_START;
49 iounit->limit[1] = IOUNIT_BMAP2_START;
50 iounit->limit[2] = IOUNIT_BMAPM_START;
51 iounit->limit[3] = IOUNIT_BMAPM_END;
52 iounit->rotor[1] = IOUNIT_BMAP2_START;
53 iounit->rotor[2] = IOUNIT_BMAPM_START;
54
55 xpt = NULL;
56 if(prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
57 sizeof(iommu_promregs)) != -1) {
58 prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3);
59 memset(&r, 0, sizeof(r));
60 r.flags = iommu_promregs[2].which_io;
61 r.start = iommu_promregs[2].phys_addr;
62 xpt = (iopte_t *) sbus_ioremap(&r, 0, PAGE_SIZE * 16, "XPT");
63 }
64 if(!xpt) panic("Cannot map External Page Table.");
65
66 sbus->iommu = (struct iommu_struct *)iounit;
67 iounit->page_table = xpt;
68
69 for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
70 xpt < xptend;)
71 iopte_val(*xpt++) = 0;
72}
73
74/* One has to hold iounit->lock to call this */
75static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
76{
77 int i, j, k, npages;
78 unsigned long rotor, scan, limit;
79 iopte_t iopte;
80
81 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
82
83 /* A tiny bit of magic ingredience :) */
84 switch (npages) {
85 case 1: i = 0x0231; break;
86 case 2: i = 0x0132; break;
87 default: i = 0x0213; break;
88 }
89
90 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
91
92next: j = (i & 15);
93 rotor = iounit->rotor[j - 1];
94 limit = iounit->limit[j];
95 scan = rotor;
96nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
97 if (scan + npages > limit) {
98 if (limit != rotor) {
99 limit = rotor;
100 scan = iounit->limit[j - 1];
101 goto nexti;
102 }
103 i >>= 4;
104 if (!(i & 15))
105 panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
106 goto next;
107 }
108 for (k = 1, scan++; k < npages; k++)
109 if (test_bit(scan++, iounit->bmap))
110 goto nexti;
111 iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
112 scan -= npages;
113 iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
114 vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
115 for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
116 set_bit(scan, iounit->bmap);
117 iounit->page_table[scan] = iopte;
118 }
119 IOD(("%08lx\n", vaddr));
120 return vaddr;
121}
122
123static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct sbus_bus *sbus)
124{
125 unsigned long ret, flags;
126 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
127
128 spin_lock_irqsave(&iounit->lock, flags);
129 ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
130 spin_unlock_irqrestore(&iounit->lock, flags);
131 return ret;
132}
133
134static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
135{
136 unsigned long flags;
137 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
138
139 /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
140 spin_lock_irqsave(&iounit->lock, flags);
141 while (sz != 0) {
142 --sz;
143 sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
144 sg[sz].dvma_length = sg[sz].length;
145 }
146 spin_unlock_irqrestore(&iounit->lock, flags);
147}
148
149static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
150{
151 unsigned long flags;
152 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
153
154 spin_lock_irqsave(&iounit->lock, flags);
155 len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
156 vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
157 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
158 for (len += vaddr; vaddr < len; vaddr++)
159 clear_bit(vaddr, iounit->bmap);
160 spin_unlock_irqrestore(&iounit->lock, flags);
161}
162
163static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
164{
165 unsigned long flags;
166 unsigned long vaddr, len;
167 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
168
169 spin_lock_irqsave(&iounit->lock, flags);
170 while (sz != 0) {
171 --sz;
172 len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
173 vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
174 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
175 for (len += vaddr; vaddr < len; vaddr++)
176 clear_bit(vaddr, iounit->bmap);
177 }
178 spin_unlock_irqrestore(&iounit->lock, flags);
179}
180
181#ifdef CONFIG_SBUS
182static int iounit_map_dma_area(dma_addr_t *pba, unsigned long va, __u32 addr, int len)
183{
184 unsigned long page, end;
185 pgprot_t dvma_prot;
186 iopte_t *iopte;
187 struct sbus_bus *sbus;
188
189 *pba = addr;
190
191 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
192 end = PAGE_ALIGN((addr + len));
193 while(addr < end) {
194 page = va;
195 {
196 pgd_t *pgdp;
197 pmd_t *pmdp;
198 pte_t *ptep;
199 long i;
200
201 pgdp = pgd_offset(&init_mm, addr);
202 pmdp = pmd_offset(pgdp, addr);
203 ptep = pte_offset_map(pmdp, addr);
204
205 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
206
207 i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
208
209 for_each_sbus(sbus) {
210 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
211
212 iopte = (iopte_t *)(iounit->page_table + i);
213 *iopte = MKIOPTE(__pa(page));
214 }
215 }
216 addr += PAGE_SIZE;
217 va += PAGE_SIZE;
218 }
219 flush_cache_all();
220 flush_tlb_all();
221
222 return 0;
223}
224
225static void iounit_unmap_dma_area(unsigned long addr, int len)
226{
227 /* XXX Somebody please fill this in */
228}
229
230/* XXX We do not pass sbus device here, bad. */
231static struct page *iounit_translate_dvma(unsigned long addr)
232{
233 struct sbus_bus *sbus = sbus_root; /* They are all the same */
234 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
235 int i;
236 iopte_t *iopte;
237
238 i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
239 iopte = (iopte_t *)(iounit->page_table + i);
240 return pfn_to_page(iopte_val(*iopte) >> (PAGE_SHIFT-4)); /* XXX sun4d guru, help */
241}
242#endif
243
244static char *iounit_lockarea(char *vaddr, unsigned long len)
245{
246/* FIXME: Write this */
247 return vaddr;
248}
249
250static void iounit_unlockarea(char *vaddr, unsigned long len)
251{
252/* FIXME: Write this */
253}
254
255void __init ld_mmu_iounit(void)
256{
257 BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
258 BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
259
260 BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
261 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
262 BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
263 BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
264
265#ifdef CONFIG_SBUS
266 BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
267 BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
268 BTFIXUPSET_CALL(mmu_translate_dvma, iounit_translate_dvma, BTFIXUPCALL_NORM);
269#endif
270}
271
272__u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
273{
274 int i, j, k, npages;
275 unsigned long rotor, scan, limit;
276 unsigned long flags;
277 __u32 ret;
278 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
279
280 npages = (size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
281 i = 0x0213;
282 spin_lock_irqsave(&iounit->lock, flags);
283next: j = (i & 15);
284 rotor = iounit->rotor[j - 1];
285 limit = iounit->limit[j];
286 scan = rotor;
287nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
288 if (scan + npages > limit) {
289 if (limit != rotor) {
290 limit = rotor;
291 scan = iounit->limit[j - 1];
292 goto nexti;
293 }
294 i >>= 4;
295 if (!(i & 15))
296 panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size);
297 goto next;
298 }
299 for (k = 1, scan++; k < npages; k++)
300 if (test_bit(scan++, iounit->bmap))
301 goto nexti;
302 iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
303 scan -= npages;
304 ret = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT);
305 for (k = 0; k < npages; k++, scan++)
306 set_bit(scan, iounit->bmap);
307 spin_unlock_irqrestore(&iounit->lock, flags);
308 return ret;
309}
310
311__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
312{
313 int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
314 struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
315
316 iounit->page_table[scan] = MKIOPTE(__pa(((unsigned long)addr) & PAGE_MASK));
317 return vaddr + (((unsigned long)addr) & ~PAGE_MASK);
318}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
new file mode 100644
index 000000000000..489bf68d5f05
--- /dev/null
+++ b/arch/sparc/mm/iommu.c
@@ -0,0 +1,475 @@
1/*
2 * iommu.c: IOMMU specific routines for memory management.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
9
10#include <linux/config.h>
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
16
17#include <asm/scatterlist.h>
18#include <asm/pgalloc.h>
19#include <asm/pgtable.h>
20#include <asm/sbus.h>
21#include <asm/io.h>
22#include <asm/mxcc.h>
23#include <asm/mbus.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <asm/bitext.h>
27#include <asm/iommu.h>
28#include <asm/dma.h>
29
30/*
31 * This can be sized dynamically, but we will do this
32 * only when we have a guidance about actual I/O pressures.
33 */
34#define IOMMU_RNGE IOMMU_RNGE_256MB
35#define IOMMU_START 0xF0000000
36#define IOMMU_WINSIZE (256*1024*1024U)
37#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
38#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
39
40/* srmmu.c */
41extern int viking_mxcc_present;
42BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
43#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
44extern int flush_page_for_dma_global;
45static int viking_flush;
46/* viking.S */
47extern void viking_flush_page(unsigned long page);
48extern void viking_mxcc_flush_page(unsigned long page);
49
50/*
51 * Values precomputed according to CPU type.
52 */
53static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
54static pgprot_t dvma_prot; /* Consistent mapping pte flags */
55
56#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
57#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
58
59void __init
60iommu_init(int iommund, struct sbus_bus *sbus)
61{
62 unsigned int impl, vers;
63 unsigned long tmp;
64 struct iommu_struct *iommu;
65 struct linux_prom_registers iommu_promregs[PROMREG_MAX];
66 struct resource r;
67 unsigned long *bitmap;
68
69 iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
70 if (!iommu) {
71 prom_printf("Unable to allocate iommu structure\n");
72 prom_halt();
73 }
74 iommu->regs = NULL;
75 if (prom_getproperty(iommund, "reg", (void *) iommu_promregs,
76 sizeof(iommu_promregs)) != -1) {
77 memset(&r, 0, sizeof(r));
78 r.flags = iommu_promregs[0].which_io;
79 r.start = iommu_promregs[0].phys_addr;
80 iommu->regs = (struct iommu_regs *)
81 sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
82 }
83 if (!iommu->regs) {
84 prom_printf("Cannot map IOMMU registers\n");
85 prom_halt();
86 }
87 impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
88 vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
89 tmp = iommu->regs->control;
90 tmp &= ~(IOMMU_CTRL_RNGE);
91 tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
92 iommu->regs->control = tmp;
93 iommu_invalidate(iommu->regs);
94 iommu->start = IOMMU_START;
95 iommu->end = 0xffffffff;
96
97 /* Allocate IOMMU page table */
98 /* Stupid alignment constraints give me a headache.
99 We need 256K or 512K or 1M or 2M area aligned to
100 its size and current gfp will fortunately give
101 it to us. */
102 tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
103 if (!tmp) {
104 prom_printf("Unable to allocate iommu table [0x%08x]\n",
105 IOMMU_NPTES*sizeof(iopte_t));
106 prom_halt();
107 }
108 iommu->page_table = (iopte_t *)tmp;
109
110 /* Initialize new table. */
111 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
112 flush_cache_all();
113 flush_tlb_all();
114 iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
115 iommu_invalidate(iommu->regs);
116
117 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
118 if (!bitmap) {
119 prom_printf("Unable to allocate iommu bitmap [%d]\n",
120 (int)(IOMMU_NPTES>>3));
121 prom_halt();
122 }
123 bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
124 /* To be coherent on HyperSparc, the page color of DVMA
125 * and physical addresses must match.
126 */
127 if (srmmu_modtype == HyperSparc)
128 iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
129 else
130 iommu->usemap.num_colors = 1;
131
132 printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
133 impl, vers, iommu->page_table,
134 (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
135
136 sbus->iommu = iommu;
137}
138
139/* This begs to be btfixup-ed by srmmu. */
140/* Flush the iotlb entries to ram. */
141/* This could be better if we didn't have to flush whole pages. */
142static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
143{
144 unsigned long start;
145 unsigned long end;
146
147 start = (unsigned long)iopte & PAGE_MASK;
148 end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
149 if (viking_mxcc_present) {
150 while(start < end) {
151 viking_mxcc_flush_page(start);
152 start += PAGE_SIZE;
153 }
154 } else if (viking_flush) {
155 while(start < end) {
156 viking_flush_page(start);
157 start += PAGE_SIZE;
158 }
159 } else {
160 while(start < end) {
161 __flush_page_to_ram(start);
162 start += PAGE_SIZE;
163 }
164 }
165}
166
167static u32 iommu_get_one(struct page *page, int npages, struct sbus_bus *sbus)
168{
169 struct iommu_struct *iommu = sbus->iommu;
170 int ioptex;
171 iopte_t *iopte, *iopte0;
172 unsigned int busa, busa0;
173 int i;
174
175 /* page color = pfn of page */
176 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
177 if (ioptex < 0)
178 panic("iommu out");
179 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
180 iopte0 = &iommu->page_table[ioptex];
181
182 busa = busa0;
183 iopte = iopte0;
184 for (i = 0; i < npages; i++) {
185 iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
186 iommu_invalidate_page(iommu->regs, busa);
187 busa += PAGE_SIZE;
188 iopte++;
189 page++;
190 }
191
192 iommu_flush_iotlb(iopte0, npages);
193
194 return busa0;
195}
196
197static u32 iommu_get_scsi_one(char *vaddr, unsigned int len,
198 struct sbus_bus *sbus)
199{
200 unsigned long off;
201 int npages;
202 struct page *page;
203 u32 busa;
204
205 off = (unsigned long)vaddr & ~PAGE_MASK;
206 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
207 page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
208 busa = iommu_get_one(page, npages, sbus);
209 return busa + off;
210}
211
212static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
213{
214 return iommu_get_scsi_one(vaddr, len, sbus);
215}
216
217static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
218{
219 flush_page_for_dma(0);
220 return iommu_get_scsi_one(vaddr, len, sbus);
221}
222
223static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
224{
225 unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
226
227 while(page < ((unsigned long)(vaddr + len))) {
228 flush_page_for_dma(page);
229 page += PAGE_SIZE;
230 }
231 return iommu_get_scsi_one(vaddr, len, sbus);
232}
233
234static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
235{
236 int n;
237
238 while (sz != 0) {
239 --sz;
240 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
241 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
242 sg->dvma_length = (__u32) sg->length;
243 sg++;
244 }
245}
246
247static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
248{
249 int n;
250
251 flush_page_for_dma(0);
252 while (sz != 0) {
253 --sz;
254 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
255 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
256 sg->dvma_length = (__u32) sg->length;
257 sg++;
258 }
259}
260
261static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
262{
263 unsigned long page, oldpage = 0;
264 int n, i;
265
266 while(sz != 0) {
267 --sz;
268
269 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
270
271 /*
272 * We expect unmapped highmem pages to be not in the cache.
273 * XXX Is this a good assumption?
274 * XXX What if someone else unmaps it here and races us?
275 */
276 if ((page = (unsigned long) page_address(sg->page)) != 0) {
277 for (i = 0; i < n; i++) {
278 if (page != oldpage) { /* Already flushed? */
279 flush_page_for_dma(page);
280 oldpage = page;
281 }
282 page += PAGE_SIZE;
283 }
284 }
285
286 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
287 sg->dvma_length = (__u32) sg->length;
288 sg++;
289 }
290}
291
292static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
293{
294 struct iommu_struct *iommu = sbus->iommu;
295 int ioptex;
296 int i;
297
298 if (busa < iommu->start)
299 BUG();
300 ioptex = (busa - iommu->start) >> PAGE_SHIFT;
301 for (i = 0; i < npages; i++) {
302 iopte_val(iommu->page_table[ioptex + i]) = 0;
303 iommu_invalidate_page(iommu->regs, busa);
304 busa += PAGE_SIZE;
305 }
306 bit_map_clear(&iommu->usemap, ioptex, npages);
307}
308
309static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
310{
311 unsigned long off;
312 int npages;
313
314 off = vaddr & ~PAGE_MASK;
315 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
316 iommu_release_one(vaddr & PAGE_MASK, npages, sbus);
317}
318
319static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
320{
321 int n;
322
323 while(sz != 0) {
324 --sz;
325
326 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
327 iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
328 sg->dvma_address = 0x21212121;
329 sg++;
330 }
331}
332
333#ifdef CONFIG_SBUS
334static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
335 unsigned long addr, int len)
336{
337 unsigned long page, end;
338 struct iommu_struct *iommu = sbus_root->iommu;
339 iopte_t *iopte = iommu->page_table;
340 iopte_t *first;
341 int ioptex;
342
343 if ((va & ~PAGE_MASK) != 0) BUG();
344 if ((addr & ~PAGE_MASK) != 0) BUG();
345 if ((len & ~PAGE_MASK) != 0) BUG();
346
347 /* page color = physical address */
348 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
349 addr >> PAGE_SHIFT);
350 if (ioptex < 0)
351 panic("iommu out");
352
353 iopte += ioptex;
354 first = iopte;
355 end = addr + len;
356 while(addr < end) {
357 page = va;
358 {
359 pgd_t *pgdp;
360 pmd_t *pmdp;
361 pte_t *ptep;
362
363 if (viking_mxcc_present)
364 viking_mxcc_flush_page(page);
365 else if (viking_flush)
366 viking_flush_page(page);
367 else
368 __flush_page_to_ram(page);
369
370 pgdp = pgd_offset(&init_mm, addr);
371 pmdp = pmd_offset(pgdp, addr);
372 ptep = pte_offset_map(pmdp, addr);
373
374 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
375 }
376 iopte_val(*iopte++) =
377 MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
378 addr += PAGE_SIZE;
379 va += PAGE_SIZE;
380 }
381 /* P3: why do we need this?
382 *
383 * DAVEM: Because there are several aspects, none of which
384 * are handled by a single interface. Some cpus are
385 * completely not I/O DMA coherent, and some have
386 * virtually indexed caches. The driver DMA flushing
387 * methods handle the former case, but here during
388 * IOMMU page table modifications, and usage of non-cacheable
389 * cpu mappings of pages potentially in the cpu caches, we have
390 * to handle the latter case as well.
391 */
392 flush_cache_all();
393 iommu_flush_iotlb(first, len >> PAGE_SHIFT);
394 flush_tlb_all();
395 iommu_invalidate(iommu->regs);
396
397 *pba = iommu->start + (ioptex << PAGE_SHIFT);
398 return 0;
399}
400
401static void iommu_unmap_dma_area(unsigned long busa, int len)
402{
403 struct iommu_struct *iommu = sbus_root->iommu;
404 iopte_t *iopte = iommu->page_table;
405 unsigned long end;
406 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
407
408 if ((busa & ~PAGE_MASK) != 0) BUG();
409 if ((len & ~PAGE_MASK) != 0) BUG();
410
411 iopte += ioptex;
412 end = busa + len;
413 while (busa < end) {
414 iopte_val(*iopte++) = 0;
415 busa += PAGE_SIZE;
416 }
417 flush_tlb_all();
418 iommu_invalidate(iommu->regs);
419 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
420}
421
422static struct page *iommu_translate_dvma(unsigned long busa)
423{
424 struct iommu_struct *iommu = sbus_root->iommu;
425 iopte_t *iopte = iommu->page_table;
426
427 iopte += ((busa - iommu->start) >> PAGE_SHIFT);
428 return pfn_to_page((iopte_val(*iopte) & IOPTE_PAGE) >> (PAGE_SHIFT-4));
429}
430#endif
431
432static char *iommu_lockarea(char *vaddr, unsigned long len)
433{
434 return vaddr;
435}
436
437static void iommu_unlockarea(char *vaddr, unsigned long len)
438{
439}
440
441void __init ld_mmu_iommu(void)
442{
443 viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
444 BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
445 BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
446
447 if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
448 /* IO coherent chip */
449 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
450 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
451 } else if (flush_page_for_dma_global) {
452 /* flush_page_for_dma flushes everything, no matter of what page is it */
453 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
454 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
455 } else {
456 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
457 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
458 }
459 BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
460 BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
461
462#ifdef CONFIG_SBUS
463 BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
464 BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
465 BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM);
466#endif
467
468 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
469 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
470 ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
471 } else {
472 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
473 ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
474 }
475}
diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c
new file mode 100644
index 000000000000..e9f9571601ba
--- /dev/null
+++ b/arch/sparc/mm/loadmmu.c
@@ -0,0 +1,46 @@
1/* $Id: loadmmu.c,v 1.56 2000/02/08 20:24:21 davem Exp $
2 * loadmmu.c: This code loads up all the mm function pointers once the
3 * machine type has been determined. It also sets the static
4 * mmu values such as PAGE_NONE, etc.
5 *
6 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
7 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
9
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/init.h>
13
14#include <asm/system.h>
15#include <asm/page.h>
16#include <asm/pgtable.h>
17#include <asm/a.out.h>
18#include <asm/mmu_context.h>
19#include <asm/oplib.h>
20
21struct ctx_list *ctx_list_pool;
22struct ctx_list ctx_free;
23struct ctx_list ctx_used;
24
25unsigned int pg_iobits;
26
27extern void ld_mmu_sun4c(void);
28extern void ld_mmu_srmmu(void);
29
30void __init load_mmu(void)
31{
32 switch(sparc_cpu_model) {
33 case sun4c:
34 case sun4:
35 ld_mmu_sun4c();
36 break;
37 case sun4m:
38 case sun4d:
39 ld_mmu_srmmu();
40 break;
41 default:
42 prom_printf("load_mmu: %d unsupported\n", (int)sparc_cpu_model);
43 prom_halt();
44 }
45 btfixup();
46}
diff --git a/arch/sparc/mm/nosrmmu.c b/arch/sparc/mm/nosrmmu.c
new file mode 100644
index 000000000000..9e215659697e
--- /dev/null
+++ b/arch/sparc/mm/nosrmmu.c
@@ -0,0 +1,59 @@
1/* $Id: nosrmmu.c,v 1.5 1999/11/19 04:11:54 davem Exp $
2 * nosrmmu.c: This file is a bunch of dummies for sun4 compiles,
3 * so that it does not need srmmu and avoid ifdefs.
4 *
5 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/init.h>
11#include <asm/mbus.h>
12#include <asm/sbus.h>
13
14static char shouldnothappen[] __initdata = "SUN4 kernel can only run on SUN4\n";
15
16enum mbus_module srmmu_modtype;
17void *srmmu_nocache_pool;
18
19int vac_cache_size = 0;
20
21static void __init should_not_happen(void)
22{
23 prom_printf(shouldnothappen);
24 prom_halt();
25}
26
27void __init srmmu_frob_mem_map(unsigned long start_mem)
28{
29 should_not_happen();
30}
31
32unsigned long __init srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
33{
34 should_not_happen();
35 return 0;
36}
37
38void __init ld_mmu_srmmu(void)
39{
40 should_not_happen();
41}
42
43void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
44{
45}
46
47void srmmu_unmapioaddr(unsigned long virt_addr)
48{
49}
50
51__u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
52{
53 return 0;
54}
55
56__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
57{
58 return 0;
59}
diff --git a/arch/sparc/mm/nosun4c.c b/arch/sparc/mm/nosun4c.c
new file mode 100644
index 000000000000..ea2e2105341d
--- /dev/null
+++ b/arch/sparc/mm/nosun4c.c
@@ -0,0 +1,77 @@
1/* $Id: nosun4c.c,v 1.3 2000/02/14 04:52:36 jj Exp $
2 * nosun4c.c: This file is a bunch of dummies for SMP compiles,
3 * so that it does not need sun4c and avoid ifdefs.
4 *
5 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/init.h>
11#include <asm/pgtable.h>
12
13static char shouldnothappen[] __initdata = "32bit SMP kernel only supports sun4m and sun4d\n";
14
15/* Dummies */
16struct sun4c_mmu_ring {
17 unsigned long xxx1[3];
18 unsigned char xxx2[2];
19 int xxx3;
20};
21struct sun4c_mmu_ring sun4c_kernel_ring;
22struct sun4c_mmu_ring sun4c_kfree_ring;
23unsigned long sun4c_kernel_faults;
24unsigned long *sun4c_memerr_reg;
25
26static void __init should_not_happen(void)
27{
28 prom_printf(shouldnothappen);
29 prom_halt();
30}
31
32unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long end_mem)
33{
34 should_not_happen();
35 return 0;
36}
37
38void __init ld_mmu_sun4c(void)
39{
40 should_not_happen();
41}
42
43void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
44{
45}
46
47void sun4c_unmapioaddr(unsigned long virt_addr)
48{
49}
50
51void sun4c_complete_all_stores(void)
52{
53}
54
55pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
56{
57 return NULL;
58}
59
60pte_t *sun4c_pte_offset_kernel(pmd_t *dir, unsigned long address)
61{
62 return NULL;
63}
64
65void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
66{
67}
68
69void __init sun4c_probe_vac(void)
70{
71 should_not_happen();
72}
73
74void __init sun4c_probe_memerr_reg(void)
75{
76 should_not_happen();
77}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
new file mode 100644
index 000000000000..c89a803cbc20
--- /dev/null
+++ b/arch/sparc/mm/srmmu.c
@@ -0,0 +1,2274 @@
1/*
2 * srmmu.c: SRMMU specific routines for memory management.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
9 */
10
11#include <linux/config.h>
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/pagemap.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/bootmem.h>
20#include <linux/fs.h>
21#include <linux/seq_file.h>
22
23#include <asm/bitext.h>
24#include <asm/page.h>
25#include <asm/pgalloc.h>
26#include <asm/pgtable.h>
27#include <asm/io.h>
28#include <asm/kdebug.h>
29#include <asm/vaddrs.h>
30#include <asm/traps.h>
31#include <asm/smp.h>
32#include <asm/mbus.h>
33#include <asm/cache.h>
34#include <asm/oplib.h>
35#include <asm/sbus.h>
36#include <asm/asi.h>
37#include <asm/msi.h>
38#include <asm/a.out.h>
39#include <asm/mmu_context.h>
40#include <asm/io-unit.h>
41#include <asm/cacheflush.h>
42#include <asm/tlbflush.h>
43
44/* Now the cpu specific definitions. */
45#include <asm/viking.h>
46#include <asm/mxcc.h>
47#include <asm/ross.h>
48#include <asm/tsunami.h>
49#include <asm/swift.h>
50#include <asm/turbosparc.h>
51
52#include <asm/btfixup.h>
53
54enum mbus_module srmmu_modtype;
55unsigned int hwbug_bitmask;
56int vac_cache_size;
57int vac_line_size;
58
59extern struct resource sparc_iomap;
60
61extern unsigned long last_valid_pfn;
62
63extern unsigned long page_kernel;
64
65pgd_t *srmmu_swapper_pg_dir;
66
67#ifdef CONFIG_SMP
68#define FLUSH_BEGIN(mm)
69#define FLUSH_END
70#else
71#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
72#define FLUSH_END }
73#endif
74
75BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
76#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
77
78int flush_page_for_dma_global = 1;
79
80#ifdef CONFIG_SMP
81BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
82#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
83#endif
84
85char *srmmu_name;
86
87ctxd_t *srmmu_ctx_table_phys;
88ctxd_t *srmmu_context_table;
89
90int viking_mxcc_present;
91static DEFINE_SPINLOCK(srmmu_context_spinlock);
92
93int is_hypersparc;
94
95/*
96 * In general all page table modifications should use the V8 atomic
97 * swap instruction. This insures the mmu and the cpu are in sync
98 * with respect to ref/mod bits in the page tables.
99 */
100static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
101{
102 __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
103 return value;
104}
105
106static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval)
107{
108 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
109}
110
111/* The very generic SRMMU page table operations. */
112static inline int srmmu_device_memory(unsigned long x)
113{
114 return ((x & 0xF0000000) != 0);
115}
116
117int srmmu_cache_pagetables;
118
119/* these will be initialized in srmmu_nocache_calcsize() */
120unsigned long srmmu_nocache_size;
121unsigned long srmmu_nocache_end;
122
123/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
124#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
125
126/* The context table is a nocache user with the biggest alignment needs. */
127#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
128
129void *srmmu_nocache_pool;
130void *srmmu_nocache_bitmap;
131static struct bit_map srmmu_nocache_map;
132
133static unsigned long srmmu_pte_pfn(pte_t pte)
134{
135 if (srmmu_device_memory(pte_val(pte))) {
136 /* Just return something that will cause
137 * pfn_valid() to return false. This makes
138 * copy_one_pte() to just directly copy to
139 * PTE over.
140 */
141 return ~0UL;
142 }
143 return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
144}
145
146static struct page *srmmu_pmd_page(pmd_t pmd)
147{
148
149 if (srmmu_device_memory(pmd_val(pmd)))
150 BUG();
151 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
152}
153
154static inline unsigned long srmmu_pgd_page(pgd_t pgd)
155{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
156
157
158static inline int srmmu_pte_none(pte_t pte)
159{ return !(pte_val(pte) & 0xFFFFFFF); }
160
161static inline int srmmu_pte_present(pte_t pte)
162{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
163
164static inline int srmmu_pte_read(pte_t pte)
165{ return !(pte_val(pte) & SRMMU_NOREAD); }
166
167static inline void srmmu_pte_clear(pte_t *ptep)
168{ srmmu_set_pte(ptep, __pte(0)); }
169
170static inline int srmmu_pmd_none(pmd_t pmd)
171{ return !(pmd_val(pmd) & 0xFFFFFFF); }
172
173static inline int srmmu_pmd_bad(pmd_t pmd)
174{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
175
176static inline int srmmu_pmd_present(pmd_t pmd)
177{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
178
179static inline void srmmu_pmd_clear(pmd_t *pmdp) {
180 int i;
181 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
182 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
183}
184
185static inline int srmmu_pgd_none(pgd_t pgd)
186{ return !(pgd_val(pgd) & 0xFFFFFFF); }
187
188static inline int srmmu_pgd_bad(pgd_t pgd)
189{ return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
190
191static inline int srmmu_pgd_present(pgd_t pgd)
192{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
193
194static inline void srmmu_pgd_clear(pgd_t * pgdp)
195{ srmmu_set_pte((pte_t *)pgdp, __pte(0)); }
196
197static inline pte_t srmmu_pte_wrprotect(pte_t pte)
198{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
199
200static inline pte_t srmmu_pte_mkclean(pte_t pte)
201{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
202
203static inline pte_t srmmu_pte_mkold(pte_t pte)
204{ return __pte(pte_val(pte) & ~SRMMU_REF);}
205
206static inline pte_t srmmu_pte_mkwrite(pte_t pte)
207{ return __pte(pte_val(pte) | SRMMU_WRITE);}
208
209static inline pte_t srmmu_pte_mkdirty(pte_t pte)
210{ return __pte(pte_val(pte) | SRMMU_DIRTY);}
211
212static inline pte_t srmmu_pte_mkyoung(pte_t pte)
213{ return __pte(pte_val(pte) | SRMMU_REF);}
214
215/*
216 * Conversion functions: convert a page and protection to a page entry,
217 * and a page entry and page directory to the page they refer to.
218 */
219static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot)
220{ return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); }
221
222static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
223{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
224
225static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
226{ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); }
227
228/* XXX should we hyper_flush_whole_icache here - Anton */
229static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
230{ srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
231
232static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
233{ srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
234
235static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
236{
237 unsigned long ptp; /* Physical address, shifted right by 4 */
238 int i;
239
240 ptp = __nocache_pa((unsigned long) ptep) >> 4;
241 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
242 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
243 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
244 }
245}
246
247static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
248{
249 unsigned long ptp; /* Physical address, shifted right by 4 */
250 int i;
251
252 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
253 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
254 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
255 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
256 }
257}
258
259static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
260{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
261
262/* to find an entry in a top-level page table... */
263extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
264{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
265
266/* Find an entry in the second-level page table.. */
267static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
268{
269 return (pmd_t *) srmmu_pgd_page(*dir) +
270 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
271}
272
273/* Find an entry in the third-level page table.. */
274static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
275{
276 void *pte;
277
278 pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
279 return (pte_t *) pte +
280 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
281}
282
283static unsigned long srmmu_swp_type(swp_entry_t entry)
284{
285 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
286}
287
288static unsigned long srmmu_swp_offset(swp_entry_t entry)
289{
290 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
291}
292
293static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset)
294{
295 return (swp_entry_t) {
296 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
297 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
298}
299
300/*
301 * size: bytes to allocate in the nocache area.
302 * align: bytes, number to align at.
303 * Returns the virtual address of the allocated area.
304 */
305static unsigned long __srmmu_get_nocache(int size, int align)
306{
307 int offset;
308
309 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
310 printk("Size 0x%x too small for nocache request\n", size);
311 size = SRMMU_NOCACHE_BITMAP_SHIFT;
312 }
313 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) {
314 printk("Size 0x%x unaligned int nocache request\n", size);
315 size += SRMMU_NOCACHE_BITMAP_SHIFT-1;
316 }
317 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
318
319 offset = bit_map_string_get(&srmmu_nocache_map,
320 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
321 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
322 if (offset == -1) {
323 printk("srmmu: out of nocache %d: %d/%d\n",
324 size, (int) srmmu_nocache_size,
325 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
326 return 0;
327 }
328
329 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
330}
331
332unsigned inline long srmmu_get_nocache(int size, int align)
333{
334 unsigned long tmp;
335
336 tmp = __srmmu_get_nocache(size, align);
337
338 if (tmp)
339 memset((void *)tmp, 0, size);
340
341 return tmp;
342}
343
344void srmmu_free_nocache(unsigned long vaddr, int size)
345{
346 int offset;
347
348 if (vaddr < SRMMU_NOCACHE_VADDR) {
349 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
350 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
351 BUG();
352 }
353 if (vaddr+size > srmmu_nocache_end) {
354 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
355 vaddr, srmmu_nocache_end);
356 BUG();
357 }
358 if (size & (size-1)) {
359 printk("Size 0x%x is not a power of 2\n", size);
360 BUG();
361 }
362 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
363 printk("Size 0x%x is too small\n", size);
364 BUG();
365 }
366 if (vaddr & (size-1)) {
367 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
368 BUG();
369 }
370
371 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
372 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
373
374 bit_map_clear(&srmmu_nocache_map, offset, size);
375}
376
377void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end);
378
379extern unsigned long probe_memory(void); /* in fault.c */
380
381/*
382 * Reserve nocache dynamically proportionally to the amount of
383 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
384 */
385void srmmu_nocache_calcsize(void)
386{
387 unsigned long sysmemavail = probe_memory() / 1024;
388 int srmmu_nocache_npages;
389
390 srmmu_nocache_npages =
391 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
392
393 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
394 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
395 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
396 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
397
398 /* anything above 1280 blows up */
399 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
400 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
401
402 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
403 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
404}
405
406void srmmu_nocache_init(void)
407{
408 unsigned int bitmap_bits;
409 pgd_t *pgd;
410 pmd_t *pmd;
411 pte_t *pte;
412 unsigned long paddr, vaddr;
413 unsigned long pteval;
414
415 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
416
417 srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
418 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
419 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
420
421 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
422 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
423
424 srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
425 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
426 init_mm.pgd = srmmu_swapper_pg_dir;
427
428 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
429
430 paddr = __pa((unsigned long)srmmu_nocache_pool);
431 vaddr = SRMMU_NOCACHE_VADDR;
432
433 while (vaddr < srmmu_nocache_end) {
434 pgd = pgd_offset_k(vaddr);
435 pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr);
436 pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr);
437
438 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
439
440 if (srmmu_cache_pagetables)
441 pteval |= SRMMU_CACHE;
442
443 srmmu_set_pte(__nocache_fix(pte), __pte(pteval));
444
445 vaddr += PAGE_SIZE;
446 paddr += PAGE_SIZE;
447 }
448
449 flush_cache_all();
450 flush_tlb_all();
451}
452
453static inline pgd_t *srmmu_get_pgd_fast(void)
454{
455 pgd_t *pgd = NULL;
456
457 pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
458 if (pgd) {
459 pgd_t *init = pgd_offset_k(0);
460 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
461 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
462 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
463 }
464
465 return pgd;
466}
467
468static void srmmu_free_pgd_fast(pgd_t *pgd)
469{
470 srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
471}
472
473static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
474{
475 return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
476}
477
478static void srmmu_pmd_free(pmd_t * pmd)
479{
480 srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
481}
482
483/*
484 * Hardware needs alignment to 256 only, but we align to whole page size
485 * to reduce fragmentation problems due to the buddy principle.
486 * XXX Provide actual fragmentation statistics in /proc.
487 *
488 * Alignments up to the page size are the same for physical and virtual
489 * addresses of the nocache area.
490 */
491static pte_t *
492srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
493{
494 return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
495}
496
497static struct page *
498srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
499{
500 unsigned long pte;
501
502 if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
503 return NULL;
504 return pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
505}
506
507static void srmmu_free_pte_fast(pte_t *pte)
508{
509 srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
510}
511
512static void srmmu_pte_free(struct page *pte)
513{
514 unsigned long p;
515
516 p = (unsigned long)page_address(pte); /* Cached address (for test) */
517 if (p == 0)
518 BUG();
519 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
520 p = (unsigned long) __nocache_va(p); /* Nocached virtual */
521 srmmu_free_nocache(p, PTE_SIZE);
522}
523
524/*
525 */
526static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
527{
528 struct ctx_list *ctxp;
529
530 ctxp = ctx_free.next;
531 if(ctxp != &ctx_free) {
532 remove_from_ctx_list(ctxp);
533 add_to_used_ctxlist(ctxp);
534 mm->context = ctxp->ctx_number;
535 ctxp->ctx_mm = mm;
536 return;
537 }
538 ctxp = ctx_used.next;
539 if(ctxp->ctx_mm == old_mm)
540 ctxp = ctxp->next;
541 if(ctxp == &ctx_used)
542 panic("out of mmu contexts");
543 flush_cache_mm(ctxp->ctx_mm);
544 flush_tlb_mm(ctxp->ctx_mm);
545 remove_from_ctx_list(ctxp);
546 add_to_used_ctxlist(ctxp);
547 ctxp->ctx_mm->context = NO_CONTEXT;
548 ctxp->ctx_mm = mm;
549 mm->context = ctxp->ctx_number;
550}
551
552static inline void free_context(int context)
553{
554 struct ctx_list *ctx_old;
555
556 ctx_old = ctx_list_pool + context;
557 remove_from_ctx_list(ctx_old);
558 add_to_free_ctxlist(ctx_old);
559}
560
561
562static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
563 struct task_struct *tsk, int cpu)
564{
565 if(mm->context == NO_CONTEXT) {
566 spin_lock(&srmmu_context_spinlock);
567 alloc_context(old_mm, mm);
568 spin_unlock(&srmmu_context_spinlock);
569 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
570 }
571
572 if (is_hypersparc)
573 hyper_flush_whole_icache();
574
575 srmmu_set_context(mm->context);
576}
577
578/* Low level IO area allocation on the SRMMU. */
579static inline void srmmu_mapioaddr(unsigned long physaddr,
580 unsigned long virt_addr, int bus_type)
581{
582 pgd_t *pgdp;
583 pmd_t *pmdp;
584 pte_t *ptep;
585 unsigned long tmp;
586
587 physaddr &= PAGE_MASK;
588 pgdp = pgd_offset_k(virt_addr);
589 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
590 ptep = srmmu_pte_offset(pmdp, virt_addr);
591 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
592
593 /*
594 * I need to test whether this is consistent over all
595 * sun4m's. The bus_type represents the upper 4 bits of
596 * 36-bit physical address on the I/O space lines...
597 */
598 tmp |= (bus_type << 28);
599 tmp |= SRMMU_PRIV;
600 __flush_page_to_ram(virt_addr);
601 srmmu_set_pte(ptep, __pte(tmp));
602}
603
604static void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
605 unsigned long xva, unsigned int len)
606{
607 while (len != 0) {
608 len -= PAGE_SIZE;
609 srmmu_mapioaddr(xpa, xva, bus);
610 xva += PAGE_SIZE;
611 xpa += PAGE_SIZE;
612 }
613 flush_tlb_all();
614}
615
616static inline void srmmu_unmapioaddr(unsigned long virt_addr)
617{
618 pgd_t *pgdp;
619 pmd_t *pmdp;
620 pte_t *ptep;
621
622 pgdp = pgd_offset_k(virt_addr);
623 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
624 ptep = srmmu_pte_offset(pmdp, virt_addr);
625
626 /* No need to flush uncacheable page. */
627 srmmu_pte_clear(ptep);
628}
629
630static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
631{
632 while (len != 0) {
633 len -= PAGE_SIZE;
634 srmmu_unmapioaddr(virt_addr);
635 virt_addr += PAGE_SIZE;
636 }
637 flush_tlb_all();
638}
639
640/*
641 * On the SRMMU we do not have the problems with limited tlb entries
642 * for mapping kernel pages, so we just take things from the free page
643 * pool. As a side effect we are putting a little too much pressure
644 * on the gfp() subsystem. This setup also makes the logic of the
645 * iommu mapping code a lot easier as we can transparently handle
646 * mappings on the kernel stack without any special code as we did
647 * need on the sun4c.
648 */
649struct thread_info *srmmu_alloc_thread_info(void)
650{
651 struct thread_info *ret;
652
653 ret = (struct thread_info *)__get_free_pages(GFP_KERNEL,
654 THREAD_INFO_ORDER);
655#ifdef CONFIG_DEBUG_STACK_USAGE
656 if (ret)
657 memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER);
658#endif /* DEBUG_STACK_USAGE */
659
660 return ret;
661}
662
663static void srmmu_free_thread_info(struct thread_info *ti)
664{
665 free_pages((unsigned long)ti, THREAD_INFO_ORDER);
666}
667
668/* tsunami.S */
669extern void tsunami_flush_cache_all(void);
670extern void tsunami_flush_cache_mm(struct mm_struct *mm);
671extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
672extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
673extern void tsunami_flush_page_to_ram(unsigned long page);
674extern void tsunami_flush_page_for_dma(unsigned long page);
675extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
676extern void tsunami_flush_tlb_all(void);
677extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
678extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
679extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
680extern void tsunami_setup_blockops(void);
681
682/*
683 * Workaround, until we find what's going on with Swift. When low on memory,
684 * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find
685 * out it is already in page tables/ fault again on the same instruction.
686 * I really don't understand it, have checked it and contexts
687 * are right, flush_tlb_all is done as well, and it faults again...
688 * Strange. -jj
689 *
690 * The following code is a deadwood that may be necessary when
691 * we start to make precise page flushes again. --zaitcev
692 */
693static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
694{
695#if 0
696 static unsigned long last;
697 unsigned int val;
698 /* unsigned int n; */
699
700 if (address == last) {
701 val = srmmu_hwprobe(address);
702 if (val != 0 && pte_val(pte) != val) {
703 printk("swift_update_mmu_cache: "
704 "addr %lx put %08x probed %08x from %p\n",
705 address, pte_val(pte), val,
706 __builtin_return_address(0));
707 srmmu_flush_whole_tlb();
708 }
709 }
710 last = address;
711#endif
712}
713
714/* swift.S */
715extern void swift_flush_cache_all(void);
716extern void swift_flush_cache_mm(struct mm_struct *mm);
717extern void swift_flush_cache_range(struct vm_area_struct *vma,
718 unsigned long start, unsigned long end);
719extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
720extern void swift_flush_page_to_ram(unsigned long page);
721extern void swift_flush_page_for_dma(unsigned long page);
722extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
723extern void swift_flush_tlb_all(void);
724extern void swift_flush_tlb_mm(struct mm_struct *mm);
725extern void swift_flush_tlb_range(struct vm_area_struct *vma,
726 unsigned long start, unsigned long end);
727extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
728
729#if 0 /* P3: deadwood to debug precise flushes on Swift. */
730void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
731{
732 int cctx, ctx1;
733
734 page &= PAGE_MASK;
735 if ((ctx1 = vma->vm_mm->context) != -1) {
736 cctx = srmmu_get_context();
737/* Is context # ever different from current context? P3 */
738 if (cctx != ctx1) {
739 printk("flush ctx %02x curr %02x\n", ctx1, cctx);
740 srmmu_set_context(ctx1);
741 swift_flush_page(page);
742 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
743 "r" (page), "i" (ASI_M_FLUSH_PROBE));
744 srmmu_set_context(cctx);
745 } else {
746 /* Rm. prot. bits from virt. c. */
747 /* swift_flush_cache_all(); */
748 /* swift_flush_cache_page(vma, page); */
749 swift_flush_page(page);
750
751 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
752 "r" (page), "i" (ASI_M_FLUSH_PROBE));
753 /* same as above: srmmu_flush_tlb_page() */
754 }
755 }
756}
757#endif
758
759/*
760 * The following are all MBUS based SRMMU modules, and therefore could
761 * be found in a multiprocessor configuration. On the whole, these
762 * chips seems to be much more touchy about DVMA and page tables
763 * with respect to cache coherency.
764 */
765
766/* Cypress flushes. */
767static void cypress_flush_cache_all(void)
768{
769 volatile unsigned long cypress_sucks;
770 unsigned long faddr, tagval;
771
772 flush_user_windows();
773 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
774 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
775 "=r" (tagval) :
776 "r" (faddr), "r" (0x40000),
777 "i" (ASI_M_DATAC_TAG));
778
779 /* If modified and valid, kick it. */
780 if((tagval & 0x60) == 0x60)
781 cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
782 }
783}
784
785static void cypress_flush_cache_mm(struct mm_struct *mm)
786{
787 register unsigned long a, b, c, d, e, f, g;
788 unsigned long flags, faddr;
789 int octx;
790
791 FLUSH_BEGIN(mm)
792 flush_user_windows();
793 local_irq_save(flags);
794 octx = srmmu_get_context();
795 srmmu_set_context(mm->context);
796 a = 0x20; b = 0x40; c = 0x60;
797 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
798
799 faddr = (0x10000 - 0x100);
800 goto inside;
801 do {
802 faddr -= 0x100;
803 inside:
804 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
805 "sta %%g0, [%0 + %2] %1\n\t"
806 "sta %%g0, [%0 + %3] %1\n\t"
807 "sta %%g0, [%0 + %4] %1\n\t"
808 "sta %%g0, [%0 + %5] %1\n\t"
809 "sta %%g0, [%0 + %6] %1\n\t"
810 "sta %%g0, [%0 + %7] %1\n\t"
811 "sta %%g0, [%0 + %8] %1\n\t" : :
812 "r" (faddr), "i" (ASI_M_FLUSH_CTX),
813 "r" (a), "r" (b), "r" (c), "r" (d),
814 "r" (e), "r" (f), "r" (g));
815 } while(faddr);
816 srmmu_set_context(octx);
817 local_irq_restore(flags);
818 FLUSH_END
819}
820
821static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
822{
823 struct mm_struct *mm = vma->vm_mm;
824 register unsigned long a, b, c, d, e, f, g;
825 unsigned long flags, faddr;
826 int octx;
827
828 FLUSH_BEGIN(mm)
829 flush_user_windows();
830 local_irq_save(flags);
831 octx = srmmu_get_context();
832 srmmu_set_context(mm->context);
833 a = 0x20; b = 0x40; c = 0x60;
834 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
835
836 start &= SRMMU_REAL_PMD_MASK;
837 while(start < end) {
838 faddr = (start + (0x10000 - 0x100));
839 goto inside;
840 do {
841 faddr -= 0x100;
842 inside:
843 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
844 "sta %%g0, [%0 + %2] %1\n\t"
845 "sta %%g0, [%0 + %3] %1\n\t"
846 "sta %%g0, [%0 + %4] %1\n\t"
847 "sta %%g0, [%0 + %5] %1\n\t"
848 "sta %%g0, [%0 + %6] %1\n\t"
849 "sta %%g0, [%0 + %7] %1\n\t"
850 "sta %%g0, [%0 + %8] %1\n\t" : :
851 "r" (faddr),
852 "i" (ASI_M_FLUSH_SEG),
853 "r" (a), "r" (b), "r" (c), "r" (d),
854 "r" (e), "r" (f), "r" (g));
855 } while (faddr != start);
856 start += SRMMU_REAL_PMD_SIZE;
857 }
858 srmmu_set_context(octx);
859 local_irq_restore(flags);
860 FLUSH_END
861}
862
863static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
864{
865 register unsigned long a, b, c, d, e, f, g;
866 struct mm_struct *mm = vma->vm_mm;
867 unsigned long flags, line;
868 int octx;
869
870 FLUSH_BEGIN(mm)
871 flush_user_windows();
872 local_irq_save(flags);
873 octx = srmmu_get_context();
874 srmmu_set_context(mm->context);
875 a = 0x20; b = 0x40; c = 0x60;
876 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
877
878 page &= PAGE_MASK;
879 line = (page + PAGE_SIZE) - 0x100;
880 goto inside;
881 do {
882 line -= 0x100;
883 inside:
884 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
885 "sta %%g0, [%0 + %2] %1\n\t"
886 "sta %%g0, [%0 + %3] %1\n\t"
887 "sta %%g0, [%0 + %4] %1\n\t"
888 "sta %%g0, [%0 + %5] %1\n\t"
889 "sta %%g0, [%0 + %6] %1\n\t"
890 "sta %%g0, [%0 + %7] %1\n\t"
891 "sta %%g0, [%0 + %8] %1\n\t" : :
892 "r" (line),
893 "i" (ASI_M_FLUSH_PAGE),
894 "r" (a), "r" (b), "r" (c), "r" (d),
895 "r" (e), "r" (f), "r" (g));
896 } while(line != page);
897 srmmu_set_context(octx);
898 local_irq_restore(flags);
899 FLUSH_END
900}
901
902/* Cypress is copy-back, at least that is how we configure it. */
903static void cypress_flush_page_to_ram(unsigned long page)
904{
905 register unsigned long a, b, c, d, e, f, g;
906 unsigned long line;
907
908 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
909 page &= PAGE_MASK;
910 line = (page + PAGE_SIZE) - 0x100;
911 goto inside;
912 do {
913 line -= 0x100;
914 inside:
915 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
916 "sta %%g0, [%0 + %2] %1\n\t"
917 "sta %%g0, [%0 + %3] %1\n\t"
918 "sta %%g0, [%0 + %4] %1\n\t"
919 "sta %%g0, [%0 + %5] %1\n\t"
920 "sta %%g0, [%0 + %6] %1\n\t"
921 "sta %%g0, [%0 + %7] %1\n\t"
922 "sta %%g0, [%0 + %8] %1\n\t" : :
923 "r" (line),
924 "i" (ASI_M_FLUSH_PAGE),
925 "r" (a), "r" (b), "r" (c), "r" (d),
926 "r" (e), "r" (f), "r" (g));
927 } while(line != page);
928}
929
930/* Cypress is also IO cache coherent. */
931static void cypress_flush_page_for_dma(unsigned long page)
932{
933}
934
935/* Cypress has unified L2 VIPT, from which both instructions and data
936 * are stored. It does not have an onboard icache of any sort, therefore
937 * no flush is necessary.
938 */
939static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
940{
941}
942
943static void cypress_flush_tlb_all(void)
944{
945 srmmu_flush_whole_tlb();
946}
947
948static void cypress_flush_tlb_mm(struct mm_struct *mm)
949{
950 FLUSH_BEGIN(mm)
951 __asm__ __volatile__(
952 "lda [%0] %3, %%g5\n\t"
953 "sta %2, [%0] %3\n\t"
954 "sta %%g0, [%1] %4\n\t"
955 "sta %%g5, [%0] %3\n"
956 : /* no outputs */
957 : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
958 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
959 : "g5");
960 FLUSH_END
961}
962
963static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
964{
965 struct mm_struct *mm = vma->vm_mm;
966 unsigned long size;
967
968 FLUSH_BEGIN(mm)
969 start &= SRMMU_PGDIR_MASK;
970 size = SRMMU_PGDIR_ALIGN(end) - start;
971 __asm__ __volatile__(
972 "lda [%0] %5, %%g5\n\t"
973 "sta %1, [%0] %5\n"
974 "1:\n\t"
975 "subcc %3, %4, %3\n\t"
976 "bne 1b\n\t"
977 " sta %%g0, [%2 + %3] %6\n\t"
978 "sta %%g5, [%0] %5\n"
979 : /* no outputs */
980 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
981 "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
982 "i" (ASI_M_FLUSH_PROBE)
983 : "g5", "cc");
984 FLUSH_END
985}
986
987static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
988{
989 struct mm_struct *mm = vma->vm_mm;
990
991 FLUSH_BEGIN(mm)
992 __asm__ __volatile__(
993 "lda [%0] %3, %%g5\n\t"
994 "sta %1, [%0] %3\n\t"
995 "sta %%g0, [%2] %4\n\t"
996 "sta %%g5, [%0] %3\n"
997 : /* no outputs */
998 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
999 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
1000 : "g5");
1001 FLUSH_END
1002}
1003
1004/* viking.S */
1005extern void viking_flush_cache_all(void);
1006extern void viking_flush_cache_mm(struct mm_struct *mm);
1007extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
1008 unsigned long end);
1009extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
1010extern void viking_flush_page_to_ram(unsigned long page);
1011extern void viking_flush_page_for_dma(unsigned long page);
1012extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
1013extern void viking_flush_page(unsigned long page);
1014extern void viking_mxcc_flush_page(unsigned long page);
1015extern void viking_flush_tlb_all(void);
1016extern void viking_flush_tlb_mm(struct mm_struct *mm);
1017extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
1018 unsigned long end);
1019extern void viking_flush_tlb_page(struct vm_area_struct *vma,
1020 unsigned long page);
1021extern void sun4dsmp_flush_tlb_all(void);
1022extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
1023extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
1024 unsigned long end);
1025extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
1026 unsigned long page);
1027
1028/* hypersparc.S */
1029extern void hypersparc_flush_cache_all(void);
1030extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
1031extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
1032extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
1033extern void hypersparc_flush_page_to_ram(unsigned long page);
1034extern void hypersparc_flush_page_for_dma(unsigned long page);
1035extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
1036extern void hypersparc_flush_tlb_all(void);
1037extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
1038extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
1039extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
1040extern void hypersparc_setup_blockops(void);
1041
1042/*
1043 * NOTE: All of this startup code assumes the low 16mb (approx.) of
1044 * kernel mappings are done with one single contiguous chunk of
1045 * ram. On small ram machines (classics mainly) we only get
1046 * around 8mb mapped for us.
1047 */
1048
1049void __init early_pgtable_allocfail(char *type)
1050{
1051 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
1052 prom_halt();
1053}
1054
1055void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end)
1056{
1057 pgd_t *pgdp;
1058 pmd_t *pmdp;
1059 pte_t *ptep;
1060
1061 while(start < end) {
1062 pgdp = pgd_offset_k(start);
1063 if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
1064 pmdp = (pmd_t *) __srmmu_get_nocache(
1065 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1066 if (pmdp == NULL)
1067 early_pgtable_allocfail("pmd");
1068 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
1069 srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
1070 }
1071 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
1072 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
1073 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
1074 if (ptep == NULL)
1075 early_pgtable_allocfail("pte");
1076 memset(__nocache_fix(ptep), 0, PTE_SIZE);
1077 srmmu_pmd_set(__nocache_fix(pmdp), ptep);
1078 }
1079 if (start > (0xffffffffUL - PMD_SIZE))
1080 break;
1081 start = (start + PMD_SIZE) & PMD_MASK;
1082 }
1083}
1084
1085void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
1086{
1087 pgd_t *pgdp;
1088 pmd_t *pmdp;
1089 pte_t *ptep;
1090
1091 while(start < end) {
1092 pgdp = pgd_offset_k(start);
1093 if(srmmu_pgd_none(*pgdp)) {
1094 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1095 if (pmdp == NULL)
1096 early_pgtable_allocfail("pmd");
1097 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
1098 srmmu_pgd_set(pgdp, pmdp);
1099 }
1100 pmdp = srmmu_pmd_offset(pgdp, start);
1101 if(srmmu_pmd_none(*pmdp)) {
1102 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
1103 PTE_SIZE);
1104 if (ptep == NULL)
1105 early_pgtable_allocfail("pte");
1106 memset(ptep, 0, PTE_SIZE);
1107 srmmu_pmd_set(pmdp, ptep);
1108 }
1109 if (start > (0xffffffffUL - PMD_SIZE))
1110 break;
1111 start = (start + PMD_SIZE) & PMD_MASK;
1112 }
1113}
1114
1115/*
1116 * This is much cleaner than poking around physical address space
1117 * looking at the prom's page table directly which is what most
1118 * other OS's do. Yuck... this is much better.
1119 */
1120void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
1121{
1122 pgd_t *pgdp;
1123 pmd_t *pmdp;
1124 pte_t *ptep;
1125 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
1126 unsigned long prompte;
1127
1128 while(start <= end) {
1129 if (start == 0)
1130 break; /* probably wrap around */
1131 if(start == 0xfef00000)
1132 start = KADB_DEBUGGER_BEGVM;
1133 if(!(prompte = srmmu_hwprobe(start))) {
1134 start += PAGE_SIZE;
1135 continue;
1136 }
1137
1138 /* A red snapper, see what it really is. */
1139 what = 0;
1140
1141 if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
1142 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
1143 what = 1;
1144 }
1145
1146 if(!(start & ~(SRMMU_PGDIR_MASK))) {
1147 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
1148 prompte)
1149 what = 2;
1150 }
1151
1152 pgdp = pgd_offset_k(start);
1153 if(what == 2) {
1154 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
1155 start += SRMMU_PGDIR_SIZE;
1156 continue;
1157 }
1158 if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
1159 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1160 if (pmdp == NULL)
1161 early_pgtable_allocfail("pmd");
1162 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
1163 srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
1164 }
1165 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
1166 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
1167 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
1168 PTE_SIZE);
1169 if (ptep == NULL)
1170 early_pgtable_allocfail("pte");
1171 memset(__nocache_fix(ptep), 0, PTE_SIZE);
1172 srmmu_pmd_set(__nocache_fix(pmdp), ptep);
1173 }
1174 if(what == 1) {
1175 /*
1176 * We bend the rule where all 16 PTPs in a pmd_t point
1177 * inside the same PTE page, and we leak a perfectly
1178 * good hardware PTE piece. Alternatives seem worse.
1179 */
1180 unsigned int x; /* Index of HW PMD in soft cluster */
1181 x = (start >> PMD_SHIFT) & 15;
1182 *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
1183 start += SRMMU_REAL_PMD_SIZE;
1184 continue;
1185 }
1186 ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);
1187 *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
1188 start += PAGE_SIZE;
1189 }
1190}
1191
1192#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
1193
1194/* Create a third-level SRMMU 16MB page mapping. */
1195static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
1196{
1197 pgd_t *pgdp = pgd_offset_k(vaddr);
1198 unsigned long big_pte;
1199
1200 big_pte = KERNEL_PTE(phys_base >> 4);
1201 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
1202}
1203
1204/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
1205static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
1206{
1207 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
1208 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
1209 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
1210 /* Map "low" memory only */
1211 const unsigned long min_vaddr = PAGE_OFFSET;
1212 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
1213
1214 if (vstart < min_vaddr || vstart >= max_vaddr)
1215 return vstart;
1216
1217 if (vend > max_vaddr || vend < min_vaddr)
1218 vend = max_vaddr;
1219
1220 while(vstart < vend) {
1221 do_large_mapping(vstart, pstart);
1222 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
1223 }
1224 return vstart;
1225}
1226
1227static inline void memprobe_error(char *msg)
1228{
1229 prom_printf(msg);
1230 prom_printf("Halting now...\n");
1231 prom_halt();
1232}
1233
1234static inline void map_kernel(void)
1235{
1236 int i;
1237
1238 if (phys_base > 0) {
1239 do_large_mapping(PAGE_OFFSET, phys_base);
1240 }
1241
1242 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1243 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
1244 }
1245
1246 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE);
1247}
1248
1249/* Paging initialization on the Sparc Reference MMU. */
1250extern void sparc_context_init(int);
1251
1252void (*poke_srmmu)(void) __initdata = NULL;
1253
1254extern unsigned long bootmem_init(unsigned long *pages_avail);
1255
1256void __init srmmu_paging_init(void)
1257{
1258 int i, cpunode;
1259 char node_str[128];
1260 pgd_t *pgd;
1261 pmd_t *pmd;
1262 pte_t *pte;
1263 unsigned long pages_avail;
1264
1265 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
1266
1267 if (sparc_cpu_model == sun4d)
1268 num_contexts = 65536; /* We know it is Viking */
1269 else {
1270 /* Find the number of contexts on the srmmu. */
1271 cpunode = prom_getchild(prom_root_node);
1272 num_contexts = 0;
1273 while(cpunode != 0) {
1274 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1275 if(!strcmp(node_str, "cpu")) {
1276 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1277 break;
1278 }
1279 cpunode = prom_getsibling(cpunode);
1280 }
1281 }
1282
1283 if(!num_contexts) {
1284 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
1285 prom_halt();
1286 }
1287
1288 pages_avail = 0;
1289 last_valid_pfn = bootmem_init(&pages_avail);
1290
1291 srmmu_nocache_calcsize();
1292 srmmu_nocache_init();
1293 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
1294 map_kernel();
1295
1296 /* ctx table has to be physically aligned to its size */
1297 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));
1298 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
1299
1300 for(i = 0; i < num_contexts; i++)
1301 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
1302
1303 flush_cache_all();
1304 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
1305 flush_tlb_all();
1306 poke_srmmu();
1307
1308#ifdef CONFIG_SUN_IO
1309 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
1310 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
1311#endif
1312
1313 srmmu_allocate_ptable_skeleton(
1314 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
1315 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
1316
1317 pgd = pgd_offset_k(PKMAP_BASE);
1318 pmd = srmmu_pmd_offset(pgd, PKMAP_BASE);
1319 pte = srmmu_pte_offset(pmd, PKMAP_BASE);
1320 pkmap_page_table = pte;
1321
1322 flush_cache_all();
1323 flush_tlb_all();
1324
1325 sparc_context_init(num_contexts);
1326
1327 kmap_init();
1328
1329 {
1330 unsigned long zones_size[MAX_NR_ZONES];
1331 unsigned long zholes_size[MAX_NR_ZONES];
1332 unsigned long npages;
1333 int znum;
1334
1335 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1336 zones_size[znum] = zholes_size[znum] = 0;
1337
1338 npages = max_low_pfn - pfn_base;
1339
1340 zones_size[ZONE_DMA] = npages;
1341 zholes_size[ZONE_DMA] = npages - pages_avail;
1342
1343 npages = highend_pfn - max_low_pfn;
1344 zones_size[ZONE_HIGHMEM] = npages;
1345 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
1346
1347 free_area_init_node(0, &contig_page_data, zones_size,
1348 pfn_base, zholes_size);
1349 }
1350}
1351
1352static void srmmu_mmu_info(struct seq_file *m)
1353{
1354 seq_printf(m,
1355 "MMU type\t: %s\n"
1356 "contexts\t: %d\n"
1357 "nocache total\t: %ld\n"
1358 "nocache used\t: %d\n",
1359 srmmu_name,
1360 num_contexts,
1361 srmmu_nocache_size,
1362 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
1363}
1364
1365static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
1366{
1367}
1368
1369static void srmmu_destroy_context(struct mm_struct *mm)
1370{
1371
1372 if(mm->context != NO_CONTEXT) {
1373 flush_cache_mm(mm);
1374 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
1375 flush_tlb_mm(mm);
1376 spin_lock(&srmmu_context_spinlock);
1377 free_context(mm->context);
1378 spin_unlock(&srmmu_context_spinlock);
1379 mm->context = NO_CONTEXT;
1380 }
1381}
1382
1383/* Init various srmmu chip types. */
1384static void __init srmmu_is_bad(void)
1385{
1386 prom_printf("Could not determine SRMMU chip type.\n");
1387 prom_halt();
1388}
1389
1390static void __init init_vac_layout(void)
1391{
1392 int nd, cache_lines;
1393 char node_str[128];
1394#ifdef CONFIG_SMP
1395 int cpu = 0;
1396 unsigned long max_size = 0;
1397 unsigned long min_line_size = 0x10000000;
1398#endif
1399
1400 nd = prom_getchild(prom_root_node);
1401 while((nd = prom_getsibling(nd)) != 0) {
1402 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
1403 if(!strcmp(node_str, "cpu")) {
1404 vac_line_size = prom_getint(nd, "cache-line-size");
1405 if (vac_line_size == -1) {
1406 prom_printf("can't determine cache-line-size, "
1407 "halting.\n");
1408 prom_halt();
1409 }
1410 cache_lines = prom_getint(nd, "cache-nlines");
1411 if (cache_lines == -1) {
1412 prom_printf("can't determine cache-nlines, halting.\n");
1413 prom_halt();
1414 }
1415
1416 vac_cache_size = cache_lines * vac_line_size;
1417#ifdef CONFIG_SMP
1418 if(vac_cache_size > max_size)
1419 max_size = vac_cache_size;
1420 if(vac_line_size < min_line_size)
1421 min_line_size = vac_line_size;
1422 cpu++;
1423 if (cpu >= NR_CPUS || !cpu_online(cpu))
1424 break;
1425#else
1426 break;
1427#endif
1428 }
1429 }
1430 if(nd == 0) {
1431 prom_printf("No CPU nodes found, halting.\n");
1432 prom_halt();
1433 }
1434#ifdef CONFIG_SMP
1435 vac_cache_size = max_size;
1436 vac_line_size = min_line_size;
1437#endif
1438 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1439 (int)vac_cache_size, (int)vac_line_size);
1440}
1441
1442static void __init poke_hypersparc(void)
1443{
1444 volatile unsigned long clear;
1445 unsigned long mreg = srmmu_get_mmureg();
1446
1447 hyper_flush_unconditional_combined();
1448
1449 mreg &= ~(HYPERSPARC_CWENABLE);
1450 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1451 mreg |= (HYPERSPARC_CMODE);
1452
1453 srmmu_set_mmureg(mreg);
1454
1455#if 0 /* XXX I think this is bad news... -DaveM */
1456 hyper_clear_all_tags();
1457#endif
1458
1459 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1460 hyper_flush_whole_icache();
1461 clear = srmmu_get_faddr();
1462 clear = srmmu_get_fstatus();
1463}
1464
1465static void __init init_hypersparc(void)
1466{
1467 srmmu_name = "ROSS HyperSparc";
1468 srmmu_modtype = HyperSparc;
1469
1470 init_vac_layout();
1471
1472 is_hypersparc = 1;
1473
1474 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
1475 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
1476 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
1477 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
1478 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
1479 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
1480 BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
1481
1482 BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
1483 BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1484 BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
1485 BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
1486
1487 BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1488 BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
1489 BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
1490
1491
1492 poke_srmmu = poke_hypersparc;
1493
1494 hypersparc_setup_blockops();
1495}
1496
1497static void __init poke_cypress(void)
1498{
1499 unsigned long mreg = srmmu_get_mmureg();
1500 unsigned long faddr, tagval;
1501 volatile unsigned long cypress_sucks;
1502 volatile unsigned long clear;
1503
1504 clear = srmmu_get_faddr();
1505 clear = srmmu_get_fstatus();
1506
1507 if (!(mreg & CYPRESS_CENABLE)) {
1508 for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
1509 __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
1510 "sta %%g0, [%0] %2\n\t" : :
1511 "r" (faddr), "r" (0x40000),
1512 "i" (ASI_M_DATAC_TAG));
1513 }
1514 } else {
1515 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
1516 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
1517 "=r" (tagval) :
1518 "r" (faddr), "r" (0x40000),
1519 "i" (ASI_M_DATAC_TAG));
1520
1521 /* If modified and valid, kick it. */
1522 if((tagval & 0x60) == 0x60)
1523 cypress_sucks = *(unsigned long *)
1524 (0xf0020000 + faddr);
1525 }
1526 }
1527
1528 /* And one more, for our good neighbor, Mr. Broken Cypress. */
1529 clear = srmmu_get_faddr();
1530 clear = srmmu_get_fstatus();
1531
1532 mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
1533 srmmu_set_mmureg(mreg);
1534}
1535
1536static void __init init_cypress_common(void)
1537{
1538 init_vac_layout();
1539
1540 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
1541 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
1542 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
1543 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
1544 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
1545 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
1546 BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
1547
1548 BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
1549 BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
1550 BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
1551 BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
1552
1553
1554 BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
1555 BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
1556 BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
1557
1558 poke_srmmu = poke_cypress;
1559}
1560
1561static void __init init_cypress_604(void)
1562{
1563 srmmu_name = "ROSS Cypress-604(UP)";
1564 srmmu_modtype = Cypress;
1565 init_cypress_common();
1566}
1567
1568static void __init init_cypress_605(unsigned long mrev)
1569{
1570 srmmu_name = "ROSS Cypress-605(MP)";
1571 if(mrev == 0xe) {
1572 srmmu_modtype = Cypress_vE;
1573 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
1574 } else {
1575 if(mrev == 0xd) {
1576 srmmu_modtype = Cypress_vD;
1577 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
1578 } else {
1579 srmmu_modtype = Cypress;
1580 }
1581 }
1582 init_cypress_common();
1583}
1584
1585static void __init poke_swift(void)
1586{
1587 unsigned long mreg;
1588
1589 /* Clear any crap from the cache or else... */
1590 swift_flush_cache_all();
1591
1592 /* Enable I & D caches */
1593 mreg = srmmu_get_mmureg();
1594 mreg |= (SWIFT_IE | SWIFT_DE);
1595 /*
1596 * The Swift branch folding logic is completely broken. At
1597 * trap time, if things are just right, if can mistakenly
1598 * think that a trap is coming from kernel mode when in fact
1599 * it is coming from user mode (it mis-executes the branch in
1600 * the trap code). So you see things like crashme completely
1601 * hosing your machine which is completely unacceptable. Turn
1602 * this shit off... nice job Fujitsu.
1603 */
1604 mreg &= ~(SWIFT_BF);
1605 srmmu_set_mmureg(mreg);
1606}
1607
1608#define SWIFT_MASKID_ADDR 0x10003018
1609static void __init init_swift(void)
1610{
1611 unsigned long swift_rev;
1612
1613 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1614 "srl %0, 0x18, %0\n\t" :
1615 "=r" (swift_rev) :
1616 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1617 srmmu_name = "Fujitsu Swift";
1618 switch(swift_rev) {
1619 case 0x11:
1620 case 0x20:
1621 case 0x23:
1622 case 0x30:
1623 srmmu_modtype = Swift_lots_o_bugs;
1624 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1625 /*
1626 * Gee george, I wonder why Sun is so hush hush about
1627 * this hardware bug... really braindamage stuff going
1628 * on here. However I think we can find a way to avoid
1629 * all of the workaround overhead under Linux. Basically,
1630 * any page fault can cause kernel pages to become user
1631 * accessible (the mmu gets confused and clears some of
1632 * the ACC bits in kernel ptes). Aha, sounds pretty
1633 * horrible eh? But wait, after extensive testing it appears
1634 * that if you use pgd_t level large kernel pte's (like the
1635 * 4MB pages on the Pentium) the bug does not get tripped
1636 * at all. This avoids almost all of the major overhead.
1637 * Welcome to a world where your vendor tells you to,
1638 * "apply this kernel patch" instead of "sorry for the
1639 * broken hardware, send it back and we'll give you
1640 * properly functioning parts"
1641 */
1642 break;
1643 case 0x25:
1644 case 0x31:
1645 srmmu_modtype = Swift_bad_c;
1646 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1647 /*
1648 * You see Sun allude to this hardware bug but never
1649 * admit things directly, they'll say things like,
1650 * "the Swift chip cache problems" or similar.
1651 */
1652 break;
1653 default:
1654 srmmu_modtype = Swift_ok;
1655 break;
1656 };
1657
1658 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
1659 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
1660 BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
1661 BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
1662
1663
1664 BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
1665 BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
1666 BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
1667 BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
1668
1669 BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
1670 BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
1671 BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
1672
1673 BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
1674
1675 flush_page_for_dma_global = 0;
1676
1677 /*
1678 * Are you now convinced that the Swift is one of the
1679 * biggest VLSI abortions of all time? Bravo Fujitsu!
1680 * Fujitsu, the !#?!%$'d up processor people. I bet if
1681 * you examined the microcode of the Swift you'd find
1682 * XXX's all over the place.
1683 */
1684 poke_srmmu = poke_swift;
1685}
1686
1687static void turbosparc_flush_cache_all(void)
1688{
1689 flush_user_windows();
1690 turbosparc_idflash_clear();
1691}
1692
1693static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1694{
1695 FLUSH_BEGIN(mm)
1696 flush_user_windows();
1697 turbosparc_idflash_clear();
1698 FLUSH_END
1699}
1700
1701static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1702{
1703 FLUSH_BEGIN(vma->vm_mm)
1704 flush_user_windows();
1705 turbosparc_idflash_clear();
1706 FLUSH_END
1707}
1708
1709static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1710{
1711 FLUSH_BEGIN(vma->vm_mm)
1712 flush_user_windows();
1713 if (vma->vm_flags & VM_EXEC)
1714 turbosparc_flush_icache();
1715 turbosparc_flush_dcache();
1716 FLUSH_END
1717}
1718
1719/* TurboSparc is copy-back, if we turn it on, but this does not work. */
1720static void turbosparc_flush_page_to_ram(unsigned long page)
1721{
1722#ifdef TURBOSPARC_WRITEBACK
1723 volatile unsigned long clear;
1724
1725 if (srmmu_hwprobe(page))
1726 turbosparc_flush_page_cache(page);
1727 clear = srmmu_get_fstatus();
1728#endif
1729}
1730
1731static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1732{
1733}
1734
1735static void turbosparc_flush_page_for_dma(unsigned long page)
1736{
1737 turbosparc_flush_dcache();
1738}
1739
1740static void turbosparc_flush_tlb_all(void)
1741{
1742 srmmu_flush_whole_tlb();
1743}
1744
1745static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1746{
1747 FLUSH_BEGIN(mm)
1748 srmmu_flush_whole_tlb();
1749 FLUSH_END
1750}
1751
1752static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1753{
1754 FLUSH_BEGIN(vma->vm_mm)
1755 srmmu_flush_whole_tlb();
1756 FLUSH_END
1757}
1758
1759static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1760{
1761 FLUSH_BEGIN(vma->vm_mm)
1762 srmmu_flush_whole_tlb();
1763 FLUSH_END
1764}
1765
1766
1767static void __init poke_turbosparc(void)
1768{
1769 unsigned long mreg = srmmu_get_mmureg();
1770 unsigned long ccreg;
1771
1772 /* Clear any crap from the cache or else... */
1773 turbosparc_flush_cache_all();
1774 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */
1775 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1776 srmmu_set_mmureg(mreg);
1777
1778 ccreg = turbosparc_get_ccreg();
1779
1780#ifdef TURBOSPARC_WRITEBACK
1781 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
1782 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1783 /* Write-back D-cache, emulate VLSI
1784 * abortion number three, not number one */
1785#else
1786 /* For now let's play safe, optimize later */
1787 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1788 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1789 ccreg &= ~(TURBOSPARC_uS2);
1790 /* Emulate VLSI abortion number three, not number one */
1791#endif
1792
1793 switch (ccreg & 7) {
1794 case 0: /* No SE cache */
1795 case 7: /* Test mode */
1796 break;
1797 default:
1798 ccreg |= (TURBOSPARC_SCENABLE);
1799 }
1800 turbosparc_set_ccreg (ccreg);
1801
1802 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1803 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
1804 srmmu_set_mmureg(mreg);
1805}
1806
1807static void __init init_turbosparc(void)
1808{
1809 srmmu_name = "Fujitsu TurboSparc";
1810 srmmu_modtype = TurboSparc;
1811
1812 BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
1813 BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
1814 BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
1815 BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
1816
1817 BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
1818 BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1819 BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
1820 BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
1821
1822 BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1823
1824 BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
1825 BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
1826
1827 poke_srmmu = poke_turbosparc;
1828}
1829
1830static void __init poke_tsunami(void)
1831{
1832 unsigned long mreg = srmmu_get_mmureg();
1833
1834 tsunami_flush_icache();
1835 tsunami_flush_dcache();
1836 mreg &= ~TSUNAMI_ITD;
1837 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1838 srmmu_set_mmureg(mreg);
1839}
1840
1841static void __init init_tsunami(void)
1842{
1843 /*
1844 * Tsunami's pretty sane, Sun and TI actually got it
1845 * somewhat right this time. Fujitsu should have
1846 * taken some lessons from them.
1847 */
1848
1849 srmmu_name = "TI Tsunami";
1850 srmmu_modtype = Tsunami;
1851
1852 BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
1853 BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
1854 BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
1855 BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
1856
1857
1858 BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
1859 BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
1860 BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
1861 BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
1862
1863 BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
1864 BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
1865 BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
1866
1867 poke_srmmu = poke_tsunami;
1868
1869 tsunami_setup_blockops();
1870}
1871
1872static void __init poke_viking(void)
1873{
1874 unsigned long mreg = srmmu_get_mmureg();
1875 static int smp_catch;
1876
1877 if(viking_mxcc_present) {
1878 unsigned long mxcc_control = mxcc_get_creg();
1879
1880 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1881 mxcc_control &= ~(MXCC_CTL_RRC);
1882 mxcc_set_creg(mxcc_control);
1883
1884 /*
1885 * We don't need memory parity checks.
1886 * XXX This is a mess, have to dig out later. ecd.
1887 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1888 */
1889
1890 /* We do cache ptables on MXCC. */
1891 mreg |= VIKING_TCENABLE;
1892 } else {
1893 unsigned long bpreg;
1894
1895 mreg &= ~(VIKING_TCENABLE);
1896 if(smp_catch++) {
1897 /* Must disable mixed-cmd mode here for other cpu's. */
1898 bpreg = viking_get_bpreg();
1899 bpreg &= ~(VIKING_ACTION_MIX);
1900 viking_set_bpreg(bpreg);
1901
1902 /* Just in case PROM does something funny. */
1903 msi_set_sync();
1904 }
1905 }
1906
1907 mreg |= VIKING_SPENABLE;
1908 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1909 mreg |= VIKING_SBENABLE;
1910 mreg &= ~(VIKING_ACENABLE);
1911 srmmu_set_mmureg(mreg);
1912
1913#ifdef CONFIG_SMP
1914 /* Avoid unnecessary cross calls. */
1915 BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
1916 BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
1917 BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
1918 BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
1919 BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram);
1920 BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
1921 BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
1922 btfixup();
1923#endif
1924}
1925
1926static void __init init_viking(void)
1927{
1928 unsigned long mreg = srmmu_get_mmureg();
1929
1930 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1931 if(mreg & VIKING_MMODE) {
1932 srmmu_name = "TI Viking";
1933 viking_mxcc_present = 0;
1934 msi_set_sync();
1935
1936 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
1937 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
1938 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
1939
1940 /*
1941 * We need this to make sure old viking takes no hits
1942 * on it's cache for dma snoops to workaround the
1943 * "load from non-cacheable memory" interrupt bug.
1944 * This is only necessary because of the new way in
1945 * which we use the IOMMU.
1946 */
1947 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
1948
1949 flush_page_for_dma_global = 0;
1950 } else {
1951 srmmu_name = "TI Viking/MXCC";
1952 viking_mxcc_present = 1;
1953
1954 srmmu_cache_pagetables = 1;
1955
1956 /* MXCC vikings lack the DMA snooping bug. */
1957 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
1958 }
1959
1960 BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM);
1961 BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM);
1962 BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
1963 BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
1964
1965#ifdef CONFIG_SMP
1966 if (sparc_cpu_model == sun4d) {
1967 BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM);
1968 BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM);
1969 BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
1970 BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
1971 } else
1972#endif
1973 {
1974 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
1975 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
1976 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
1977 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
1978 }
1979
1980 BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
1981 BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
1982
1983 poke_srmmu = poke_viking;
1984}
1985
1986/* Probe for the srmmu chip version. */
1987static void __init get_srmmu_type(void)
1988{
1989 unsigned long mreg, psr;
1990 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1991
1992 srmmu_modtype = SRMMU_INVAL_MOD;
1993 hwbug_bitmask = 0;
1994
1995 mreg = srmmu_get_mmureg(); psr = get_psr();
1996 mod_typ = (mreg & 0xf0000000) >> 28;
1997 mod_rev = (mreg & 0x0f000000) >> 24;
1998 psr_typ = (psr >> 28) & 0xf;
1999 psr_vers = (psr >> 24) & 0xf;
2000
2001 /* First, check for HyperSparc or Cypress. */
2002 if(mod_typ == 1) {
2003 switch(mod_rev) {
2004 case 7:
2005 /* UP or MP Hypersparc */
2006 init_hypersparc();
2007 break;
2008 case 0:
2009 case 2:
2010 /* Uniprocessor Cypress */
2011 init_cypress_604();
2012 break;
2013 case 10:
2014 case 11:
2015 case 12:
2016 /* _REALLY OLD_ Cypress MP chips... */
2017 case 13:
2018 case 14:
2019 case 15:
2020 /* MP Cypress mmu/cache-controller */
2021 init_cypress_605(mod_rev);
2022 break;
2023 default:
2024 /* Some other Cypress revision, assume a 605. */
2025 init_cypress_605(mod_rev);
2026 break;
2027 };
2028 return;
2029 }
2030
2031 /*
2032 * Now Fujitsu TurboSparc. It might happen that it is
2033 * in Swift emulation mode, so we will check later...
2034 */
2035 if (psr_typ == 0 && psr_vers == 5) {
2036 init_turbosparc();
2037 return;
2038 }
2039
2040 /* Next check for Fujitsu Swift. */
2041 if(psr_typ == 0 && psr_vers == 4) {
2042 int cpunode;
2043 char node_str[128];
2044
2045 /* Look if it is not a TurboSparc emulating Swift... */
2046 cpunode = prom_getchild(prom_root_node);
2047 while((cpunode = prom_getsibling(cpunode)) != 0) {
2048 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
2049 if(!strcmp(node_str, "cpu")) {
2050 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
2051 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
2052 init_turbosparc();
2053 return;
2054 }
2055 break;
2056 }
2057 }
2058
2059 init_swift();
2060 return;
2061 }
2062
2063 /* Now the Viking family of srmmu. */
2064 if(psr_typ == 4 &&
2065 ((psr_vers == 0) ||
2066 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
2067 init_viking();
2068 return;
2069 }
2070
2071 /* Finally the Tsunami. */
2072 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
2073 init_tsunami();
2074 return;
2075 }
2076
2077 /* Oh well */
2078 srmmu_is_bad();
2079}
2080
2081/* don't laugh, static pagetables */
2082static void srmmu_check_pgt_cache(int low, int high)
2083{
2084}
2085
2086extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
2087 tsetup_mmu_patchme, rtrap_mmu_patchme;
2088
2089extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
2090 tsetup_srmmu_stackchk, srmmu_rett_stackchk;
2091
2092extern unsigned long srmmu_fault;
2093
2094#define PATCH_BRANCH(insn, dest) do { \
2095 iaddr = &(insn); \
2096 daddr = &(dest); \
2097 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
2098 } while(0)
2099
2100static void __init patch_window_trap_handlers(void)
2101{
2102 unsigned long *iaddr, *daddr;
2103
2104 PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
2105 PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
2106 PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
2107 PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
2108 PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
2109 PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
2110 PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
2111}
2112
2113#ifdef CONFIG_SMP
2114/* Local cross-calls. */
2115static void smp_flush_page_for_dma(unsigned long page)
2116{
2117 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
2118 local_flush_page_for_dma(page);
2119}
2120
2121#endif
2122
2123static pte_t srmmu_pgoff_to_pte(unsigned long pgoff)
2124{
2125 return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
2126}
2127
2128static unsigned long srmmu_pte_to_pgoff(pte_t pte)
2129{
2130 return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
2131}
2132
2133/* Load up routines and constants for sun4m and sun4d mmu */
2134void __init ld_mmu_srmmu(void)
2135{
2136 extern void ld_mmu_iommu(void);
2137 extern void ld_mmu_iounit(void);
2138 extern void ___xchg32_sun4md(void);
2139
2140 BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT);
2141 BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE);
2142 BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK);
2143
2144 BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD);
2145 BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
2146
2147 BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
2148 BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED));
2149 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
2150 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
2151 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
2152 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
2153 pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
2154
2155 /* Functions */
2156#ifndef CONFIG_SMP
2157 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
2158#endif
2159 BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP);
2160
2161 BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1);
2162 BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM);
2163
2164 BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM);
2165 BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
2166 BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM);
2167
2168 BTFIXUPSET_SETHI(none_mask, 0xF0000000);
2169
2170 BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
2171 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
2172 BTFIXUPSET_CALL(pte_read, srmmu_pte_read, BTFIXUPCALL_NORM);
2173
2174 BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM);
2175 BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM);
2176 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0);
2177
2178 BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM);
2179 BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM);
2180 BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM);
2181 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0);
2182
2183 BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
2184 BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
2185 BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
2186 BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
2187 BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
2188 BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM);
2189
2190 BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
2191 BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
2192 BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
2193
2194 BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM);
2195 BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
2196 BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
2197 BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM);
2198 BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM);
2199 BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM);
2200 BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM);
2201 BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM);
2202
2203 BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE);
2204 BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY);
2205 BTFIXUPSET_HALF(pte_youngi, SRMMU_REF);
2206 BTFIXUPSET_HALF(pte_filei, SRMMU_FILE);
2207 BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE);
2208 BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY);
2209 BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF);
2210 BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE));
2211 BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY));
2212 BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF));
2213 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
2214 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
2215
2216 BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
2217 BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
2218
2219 BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM);
2220 BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM);
2221 BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM);
2222
2223 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
2224
2225 BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM);
2226 BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM);
2227
2228 BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM);
2229 BTFIXUPSET_CALL(pgoff_to_pte, srmmu_pgoff_to_pte, BTFIXUPCALL_NORM);
2230
2231 get_srmmu_type();
2232 patch_window_trap_handlers();
2233
2234#ifdef CONFIG_SMP
2235 /* El switcheroo... */
2236
2237 BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
2238 BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
2239 BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
2240 BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
2241 BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
2242 BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
2243 BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
2244 BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
2245 BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
2246 BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
2247 BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
2248
2249 BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
2250 BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
2251 BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
2252 BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
2253 if (sparc_cpu_model != sun4d) {
2254 BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
2255 BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
2256 BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
2257 BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
2258 }
2259 BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
2260 BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
2261 BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
2262#endif
2263
2264 if (sparc_cpu_model == sun4d)
2265 ld_mmu_iounit();
2266 else
2267 ld_mmu_iommu();
2268#ifdef CONFIG_SMP
2269 if (sparc_cpu_model == sun4d)
2270 sun4d_init_smp();
2271 else
2272 sun4m_init_smp();
2273#endif
2274}
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
new file mode 100644
index 000000000000..1d560390e282
--- /dev/null
+++ b/arch/sparc/mm/sun4c.c
@@ -0,0 +1,2276 @@
1/* $Id: sun4c.c,v 1.212 2001/12/21 04:56:15 davem Exp $
2 * sun4c.c: Doing in software what should be done in hardware.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au)
7 * Copyright (C) 1997-2000 Anton Blanchard (anton@samba.org)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#define NR_TASK_BUCKETS 512
12
13#include <linux/config.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/init.h>
17#include <linux/bootmem.h>
18#include <linux/highmem.h>
19#include <linux/fs.h>
20#include <linux/seq_file.h>
21
22#include <asm/scatterlist.h>
23#include <asm/page.h>
24#include <asm/pgalloc.h>
25#include <asm/pgtable.h>
26#include <asm/vaddrs.h>
27#include <asm/idprom.h>
28#include <asm/machines.h>
29#include <asm/memreg.h>
30#include <asm/processor.h>
31#include <asm/auxio.h>
32#include <asm/io.h>
33#include <asm/oplib.h>
34#include <asm/openprom.h>
35#include <asm/mmu_context.h>
36#include <asm/sun4paddr.h>
37#include <asm/highmem.h>
38#include <asm/btfixup.h>
39#include <asm/cacheflush.h>
40#include <asm/tlbflush.h>
41
42/* Because of our dynamic kernel TLB miss strategy, and how
43 * our DVMA mapping allocation works, you _MUST_:
44 *
45 * 1) Disable interrupts _and_ not touch any dynamic kernel
46 * memory while messing with kernel MMU state. By
47 * dynamic memory I mean any object which is not in
48 * the kernel image itself or a thread_union (both of
49 * which are locked into the MMU).
50 * 2) Disable interrupts while messing with user MMU state.
51 */
52
53extern int num_segmaps, num_contexts;
54
55extern unsigned long page_kernel;
56
57#ifdef CONFIG_SUN4
58#define SUN4C_VAC_SIZE sun4c_vacinfo.num_bytes
59#else
60/* That's it, we prom_halt() on sun4c if the cache size is something other than 65536.
61 * So let's save some cycles and just use that everywhere except for that bootup
62 * sanity check.
63 */
64#define SUN4C_VAC_SIZE 65536
65#endif
66
67#define SUN4C_KERNEL_BUCKETS 32
68
69/* Flushing the cache. */
70struct sun4c_vac_props sun4c_vacinfo;
71unsigned long sun4c_kernel_faults;
72
73/* Invalidate every sun4c cache line tag. */
74static void __init sun4c_flush_all(void)
75{
76 unsigned long begin, end;
77
78 if (sun4c_vacinfo.on)
79 panic("SUN4C: AIEEE, trying to invalidate vac while it is on.");
80
81 /* Clear 'valid' bit in all cache line tags */
82 begin = AC_CACHETAGS;
83 end = (AC_CACHETAGS + SUN4C_VAC_SIZE);
84 while (begin < end) {
85 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
86 "r" (begin), "i" (ASI_CONTROL));
87 begin += sun4c_vacinfo.linesize;
88 }
89}
90
91static void sun4c_flush_context_hw(void)
92{
93 unsigned long end = SUN4C_VAC_SIZE;
94
95 __asm__ __volatile__(
96 "1: addcc %0, -4096, %0\n\t"
97 " bne 1b\n\t"
98 " sta %%g0, [%0] %2"
99 : "=&r" (end)
100 : "0" (end), "i" (ASI_HWFLUSHCONTEXT)
101 : "cc");
102}
103
104/* Must be called minimally with IRQs disabled. */
105static void sun4c_flush_segment_hw(unsigned long addr)
106{
107 if (sun4c_get_segmap(addr) != invalid_segment) {
108 unsigned long vac_size = SUN4C_VAC_SIZE;
109
110 __asm__ __volatile__(
111 "1: addcc %0, -4096, %0\n\t"
112 " bne 1b\n\t"
113 " sta %%g0, [%2 + %0] %3"
114 : "=&r" (vac_size)
115 : "0" (vac_size), "r" (addr), "i" (ASI_HWFLUSHSEG)
116 : "cc");
117 }
118}
119
120/* File local boot time fixups. */
121BTFIXUPDEF_CALL(void, sun4c_flush_page, unsigned long)
122BTFIXUPDEF_CALL(void, sun4c_flush_segment, unsigned long)
123BTFIXUPDEF_CALL(void, sun4c_flush_context, void)
124
125#define sun4c_flush_page(addr) BTFIXUP_CALL(sun4c_flush_page)(addr)
126#define sun4c_flush_segment(addr) BTFIXUP_CALL(sun4c_flush_segment)(addr)
127#define sun4c_flush_context() BTFIXUP_CALL(sun4c_flush_context)()
128
129/* Must be called minimally with interrupts disabled. */
130static void sun4c_flush_page_hw(unsigned long addr)
131{
132 addr &= PAGE_MASK;
133 if ((int)sun4c_get_pte(addr) < 0)
134 __asm__ __volatile__("sta %%g0, [%0] %1"
135 : : "r" (addr), "i" (ASI_HWFLUSHPAGE));
136}
137
138/* Don't inline the software version as it eats too many cache lines if expanded. */
139static void sun4c_flush_context_sw(void)
140{
141 unsigned long nbytes = SUN4C_VAC_SIZE;
142 unsigned long lsize = sun4c_vacinfo.linesize;
143
144 __asm__ __volatile__(
145 "add %2, %2, %%g1\n\t"
146 "add %2, %%g1, %%g2\n\t"
147 "add %2, %%g2, %%g3\n\t"
148 "add %2, %%g3, %%g4\n\t"
149 "add %2, %%g4, %%g5\n\t"
150 "add %2, %%g5, %%o4\n\t"
151 "add %2, %%o4, %%o5\n"
152 "1:\n\t"
153 "subcc %0, %%o5, %0\n\t"
154 "sta %%g0, [%0] %3\n\t"
155 "sta %%g0, [%0 + %2] %3\n\t"
156 "sta %%g0, [%0 + %%g1] %3\n\t"
157 "sta %%g0, [%0 + %%g2] %3\n\t"
158 "sta %%g0, [%0 + %%g3] %3\n\t"
159 "sta %%g0, [%0 + %%g4] %3\n\t"
160 "sta %%g0, [%0 + %%g5] %3\n\t"
161 "bg 1b\n\t"
162 " sta %%g0, [%1 + %%o4] %3\n"
163 : "=&r" (nbytes)
164 : "0" (nbytes), "r" (lsize), "i" (ASI_FLUSHCTX)
165 : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
166}
167
168/* Don't inline the software version as it eats too many cache lines if expanded. */
169static void sun4c_flush_segment_sw(unsigned long addr)
170{
171 if (sun4c_get_segmap(addr) != invalid_segment) {
172 unsigned long nbytes = SUN4C_VAC_SIZE;
173 unsigned long lsize = sun4c_vacinfo.linesize;
174
175 __asm__ __volatile__(
176 "add %2, %2, %%g1\n\t"
177 "add %2, %%g1, %%g2\n\t"
178 "add %2, %%g2, %%g3\n\t"
179 "add %2, %%g3, %%g4\n\t"
180 "add %2, %%g4, %%g5\n\t"
181 "add %2, %%g5, %%o4\n\t"
182 "add %2, %%o4, %%o5\n"
183 "1:\n\t"
184 "subcc %1, %%o5, %1\n\t"
185 "sta %%g0, [%0] %6\n\t"
186 "sta %%g0, [%0 + %2] %6\n\t"
187 "sta %%g0, [%0 + %%g1] %6\n\t"
188 "sta %%g0, [%0 + %%g2] %6\n\t"
189 "sta %%g0, [%0 + %%g3] %6\n\t"
190 "sta %%g0, [%0 + %%g4] %6\n\t"
191 "sta %%g0, [%0 + %%g5] %6\n\t"
192 "sta %%g0, [%0 + %%o4] %6\n\t"
193 "bg 1b\n\t"
194 " add %0, %%o5, %0\n"
195 : "=&r" (addr), "=&r" (nbytes), "=&r" (lsize)
196 : "0" (addr), "1" (nbytes), "2" (lsize),
197 "i" (ASI_FLUSHSEG)
198 : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
199 }
200}
201
202/* Don't inline the software version as it eats too many cache lines if expanded. */
203static void sun4c_flush_page_sw(unsigned long addr)
204{
205 addr &= PAGE_MASK;
206 if ((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
207 _SUN4C_PAGE_VALID) {
208 unsigned long left = PAGE_SIZE;
209 unsigned long lsize = sun4c_vacinfo.linesize;
210
211 __asm__ __volatile__(
212 "add %2, %2, %%g1\n\t"
213 "add %2, %%g1, %%g2\n\t"
214 "add %2, %%g2, %%g3\n\t"
215 "add %2, %%g3, %%g4\n\t"
216 "add %2, %%g4, %%g5\n\t"
217 "add %2, %%g5, %%o4\n\t"
218 "add %2, %%o4, %%o5\n"
219 "1:\n\t"
220 "subcc %1, %%o5, %1\n\t"
221 "sta %%g0, [%0] %6\n\t"
222 "sta %%g0, [%0 + %2] %6\n\t"
223 "sta %%g0, [%0 + %%g1] %6\n\t"
224 "sta %%g0, [%0 + %%g2] %6\n\t"
225 "sta %%g0, [%0 + %%g3] %6\n\t"
226 "sta %%g0, [%0 + %%g4] %6\n\t"
227 "sta %%g0, [%0 + %%g5] %6\n\t"
228 "sta %%g0, [%0 + %%o4] %6\n\t"
229 "bg 1b\n\t"
230 " add %0, %%o5, %0\n"
231 : "=&r" (addr), "=&r" (left), "=&r" (lsize)
232 : "0" (addr), "1" (left), "2" (lsize),
233 "i" (ASI_FLUSHPG)
234 : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
235 }
236}
237
238/* The sun4c's do have an on chip store buffer. And the way you
239 * clear them out isn't so obvious. The only way I can think of
240 * to accomplish this is to read the current context register,
241 * store the same value there, then read an external hardware
242 * register.
243 */
244void sun4c_complete_all_stores(void)
245{
246 volatile int _unused;
247
248 _unused = sun4c_get_context();
249 sun4c_set_context(_unused);
250#ifdef CONFIG_SUN_AUXIO
251 _unused = get_auxio();
252#endif
253}
254
255/* Bootup utility functions. */
256static inline void sun4c_init_clean_segmap(unsigned char pseg)
257{
258 unsigned long vaddr;
259
260 sun4c_put_segmap(0, pseg);
261 for (vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr += PAGE_SIZE)
262 sun4c_put_pte(vaddr, 0);
263 sun4c_put_segmap(0, invalid_segment);
264}
265
266static inline void sun4c_init_clean_mmu(unsigned long kernel_end)
267{
268 unsigned long vaddr;
269 unsigned char savectx, ctx;
270
271 savectx = sun4c_get_context();
272 kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
273 for (ctx = 0; ctx < num_contexts; ctx++) {
274 sun4c_set_context(ctx);
275 for (vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE)
276 sun4c_put_segmap(vaddr, invalid_segment);
277 for (vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE)
278 sun4c_put_segmap(vaddr, invalid_segment);
279 for (vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE)
280 sun4c_put_segmap(vaddr, invalid_segment);
281 for (vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE)
282 sun4c_put_segmap(vaddr, invalid_segment);
283 }
284 sun4c_set_context(savectx);
285}
286
287void __init sun4c_probe_vac(void)
288{
289 sun4c_disable_vac();
290
291 if (ARCH_SUN4) {
292 switch (idprom->id_machtype) {
293
294 case (SM_SUN4|SM_4_110):
295 sun4c_vacinfo.type = VAC_NONE;
296 sun4c_vacinfo.num_bytes = 0;
297 sun4c_vacinfo.linesize = 0;
298 sun4c_vacinfo.do_hwflushes = 0;
299 prom_printf("No VAC. Get some bucks and buy a real computer.");
300 prom_halt();
301 break;
302
303 case (SM_SUN4|SM_4_260):
304 sun4c_vacinfo.type = VAC_WRITE_BACK;
305 sun4c_vacinfo.num_bytes = 128 * 1024;
306 sun4c_vacinfo.linesize = 16;
307 sun4c_vacinfo.do_hwflushes = 0;
308 break;
309
310 case (SM_SUN4|SM_4_330):
311 sun4c_vacinfo.type = VAC_WRITE_THROUGH;
312 sun4c_vacinfo.num_bytes = 128 * 1024;
313 sun4c_vacinfo.linesize = 16;
314 sun4c_vacinfo.do_hwflushes = 0;
315 break;
316
317 case (SM_SUN4|SM_4_470):
318 sun4c_vacinfo.type = VAC_WRITE_BACK;
319 sun4c_vacinfo.num_bytes = 128 * 1024;
320 sun4c_vacinfo.linesize = 32;
321 sun4c_vacinfo.do_hwflushes = 0;
322 break;
323
324 default:
325 prom_printf("Cannot initialize VAC - weird sun4 model idprom->id_machtype = %d", idprom->id_machtype);
326 prom_halt();
327 };
328 } else {
329 sun4c_vacinfo.type = VAC_WRITE_THROUGH;
330
331 if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
332 (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
333 /* PROM on SS1 lacks this info, to be super safe we
334 * hard code it here since this arch is cast in stone.
335 */
336 sun4c_vacinfo.num_bytes = 65536;
337 sun4c_vacinfo.linesize = 16;
338 } else {
339 sun4c_vacinfo.num_bytes =
340 prom_getintdefault(prom_root_node, "vac-size", 65536);
341 sun4c_vacinfo.linesize =
342 prom_getintdefault(prom_root_node, "vac-linesize", 16);
343 }
344 sun4c_vacinfo.do_hwflushes =
345 prom_getintdefault(prom_root_node, "vac-hwflush", 0);
346
347 if (sun4c_vacinfo.do_hwflushes == 0)
348 sun4c_vacinfo.do_hwflushes =
349 prom_getintdefault(prom_root_node, "vac_hwflush", 0);
350
351 if (sun4c_vacinfo.num_bytes != 65536) {
352 prom_printf("WEIRD Sun4C VAC cache size, "
353 "tell sparclinux@vger.kernel.org");
354 prom_halt();
355 }
356 }
357
358 sun4c_vacinfo.num_lines =
359 (sun4c_vacinfo.num_bytes / sun4c_vacinfo.linesize);
360 switch (sun4c_vacinfo.linesize) {
361 case 16:
362 sun4c_vacinfo.log2lsize = 4;
363 break;
364 case 32:
365 sun4c_vacinfo.log2lsize = 5;
366 break;
367 default:
368 prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n",
369 sun4c_vacinfo.linesize);
370 prom_halt();
371 };
372
373 sun4c_flush_all();
374 sun4c_enable_vac();
375}
376
377/* Patch instructions for the low level kernel fault handler. */
378extern unsigned long invalid_segment_patch1, invalid_segment_patch1_ff;
379extern unsigned long invalid_segment_patch2, invalid_segment_patch2_ff;
380extern unsigned long invalid_segment_patch1_1ff, invalid_segment_patch2_1ff;
381extern unsigned long num_context_patch1, num_context_patch1_16;
382extern unsigned long num_context_patch2_16;
383extern unsigned long vac_linesize_patch, vac_linesize_patch_32;
384extern unsigned long vac_hwflush_patch1, vac_hwflush_patch1_on;
385extern unsigned long vac_hwflush_patch2, vac_hwflush_patch2_on;
386
387#define PATCH_INSN(src, dst) do { \
388 daddr = &(dst); \
389 iaddr = &(src); \
390 *daddr = *iaddr; \
391 } while (0)
392
393static void __init patch_kernel_fault_handler(void)
394{
395 unsigned long *iaddr, *daddr;
396
397 switch (num_segmaps) {
398 case 128:
399 /* Default, nothing to do. */
400 break;
401 case 256:
402 PATCH_INSN(invalid_segment_patch1_ff,
403 invalid_segment_patch1);
404 PATCH_INSN(invalid_segment_patch2_ff,
405 invalid_segment_patch2);
406 break;
407 case 512:
408 PATCH_INSN(invalid_segment_patch1_1ff,
409 invalid_segment_patch1);
410 PATCH_INSN(invalid_segment_patch2_1ff,
411 invalid_segment_patch2);
412 break;
413 default:
414 prom_printf("Unhandled number of segmaps: %d\n",
415 num_segmaps);
416 prom_halt();
417 };
418 switch (num_contexts) {
419 case 8:
420 /* Default, nothing to do. */
421 break;
422 case 16:
423 PATCH_INSN(num_context_patch1_16,
424 num_context_patch1);
425 break;
426 default:
427 prom_printf("Unhandled number of contexts: %d\n",
428 num_contexts);
429 prom_halt();
430 };
431
432 if (sun4c_vacinfo.do_hwflushes != 0) {
433 PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
434 PATCH_INSN(vac_hwflush_patch2_on, vac_hwflush_patch2);
435 } else {
436 switch (sun4c_vacinfo.linesize) {
437 case 16:
438 /* Default, nothing to do. */
439 break;
440 case 32:
441 PATCH_INSN(vac_linesize_patch_32, vac_linesize_patch);
442 break;
443 default:
444 prom_printf("Impossible VAC linesize %d, halting...\n",
445 sun4c_vacinfo.linesize);
446 prom_halt();
447 };
448 }
449}
450
451static void __init sun4c_probe_mmu(void)
452{
453 if (ARCH_SUN4) {
454 switch (idprom->id_machtype) {
455 case (SM_SUN4|SM_4_110):
456 prom_printf("No support for 4100 yet\n");
457 prom_halt();
458 num_segmaps = 256;
459 num_contexts = 8;
460 break;
461
462 case (SM_SUN4|SM_4_260):
463 /* should be 512 segmaps. when it get fixed */
464 num_segmaps = 256;
465 num_contexts = 16;
466 break;
467
468 case (SM_SUN4|SM_4_330):
469 num_segmaps = 256;
470 num_contexts = 16;
471 break;
472
473 case (SM_SUN4|SM_4_470):
474 /* should be 1024 segmaps. when it get fixed */
475 num_segmaps = 256;
476 num_contexts = 64;
477 break;
478 default:
479 prom_printf("Invalid SUN4 model\n");
480 prom_halt();
481 };
482 } else {
483 if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
484 (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
485 /* Hardcode these just to be safe, PROM on SS1 does
486 * not have this info available in the root node.
487 */
488 num_segmaps = 128;
489 num_contexts = 8;
490 } else {
491 num_segmaps =
492 prom_getintdefault(prom_root_node, "mmu-npmg", 128);
493 num_contexts =
494 prom_getintdefault(prom_root_node, "mmu-nctx", 0x8);
495 }
496 }
497 patch_kernel_fault_handler();
498}
499
500volatile unsigned long *sun4c_memerr_reg = NULL;
501
502void __init sun4c_probe_memerr_reg(void)
503{
504 int node;
505 struct linux_prom_registers regs[1];
506
507 if (ARCH_SUN4) {
508 sun4c_memerr_reg = ioremap(sun4_memreg_physaddr, PAGE_SIZE);
509 } else {
510 node = prom_getchild(prom_root_node);
511 node = prom_searchsiblings(prom_root_node, "memory-error");
512 if (!node)
513 return;
514 if (prom_getproperty(node, "reg", (char *)regs, sizeof(regs)) <= 0)
515 return;
516 /* hmm I think regs[0].which_io is zero here anyways */
517 sun4c_memerr_reg = ioremap(regs[0].phys_addr, regs[0].reg_size);
518 }
519}
520
521static inline void sun4c_init_ss2_cache_bug(void)
522{
523 extern unsigned long start;
524
525 if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
526 (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
527 (idprom->id_machtype == (SM_SUN4 | SM_4_330)) ||
528 (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
529 /* Whee.. */
530 printk("SS2 cache bug detected, uncaching trap table page\n");
531 sun4c_flush_page((unsigned int) &start);
532 sun4c_put_pte(((unsigned long) &start),
533 (sun4c_get_pte((unsigned long) &start) | _SUN4C_PAGE_NOCACHE));
534 }
535}
536
537/* Addr is always aligned on a page boundary for us already. */
538static int sun4c_map_dma_area(dma_addr_t *pba, unsigned long va,
539 unsigned long addr, int len)
540{
541 unsigned long page, end;
542
543 *pba = addr;
544
545 end = PAGE_ALIGN((addr + len));
546 while (addr < end) {
547 page = va;
548 sun4c_flush_page(page);
549 page -= PAGE_OFFSET;
550 page >>= PAGE_SHIFT;
551 page |= (_SUN4C_PAGE_VALID | _SUN4C_PAGE_DIRTY |
552 _SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_PRIV);
553 sun4c_put_pte(addr, page);
554 addr += PAGE_SIZE;
555 va += PAGE_SIZE;
556 }
557
558 return 0;
559}
560
561static struct page *sun4c_translate_dvma(unsigned long busa)
562{
563 /* Fortunately for us, bus_addr == uncached_virt in sun4c. */
564 unsigned long pte = sun4c_get_pte(busa);
565 return pfn_to_page(pte & SUN4C_PFN_MASK);
566}
567
568static void sun4c_unmap_dma_area(unsigned long busa, int len)
569{
570 /* Fortunately for us, bus_addr == uncached_virt in sun4c. */
571 /* XXX Implement this */
572}
573
574/* TLB management. */
575
576/* Don't change this struct without changing entry.S. This is used
577 * in the in-window kernel fault handler, and you don't want to mess
578 * with that. (See sun4c_fault in entry.S).
579 */
580struct sun4c_mmu_entry {
581 struct sun4c_mmu_entry *next;
582 struct sun4c_mmu_entry *prev;
583 unsigned long vaddr;
584 unsigned char pseg;
585 unsigned char locked;
586
587 /* For user mappings only, and completely hidden from kernel
588 * TLB miss code.
589 */
590 unsigned char ctx;
591 struct sun4c_mmu_entry *lru_next;
592 struct sun4c_mmu_entry *lru_prev;
593};
594
595static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS];
596
597static void __init sun4c_init_mmu_entry_pool(void)
598{
599 int i;
600
601 for (i=0; i < SUN4C_MAX_SEGMAPS; i++) {
602 mmu_entry_pool[i].pseg = i;
603 mmu_entry_pool[i].next = NULL;
604 mmu_entry_pool[i].prev = NULL;
605 mmu_entry_pool[i].vaddr = 0;
606 mmu_entry_pool[i].locked = 0;
607 mmu_entry_pool[i].ctx = 0;
608 mmu_entry_pool[i].lru_next = NULL;
609 mmu_entry_pool[i].lru_prev = NULL;
610 }
611 mmu_entry_pool[invalid_segment].locked = 1;
612}
613
614static inline void fix_permissions(unsigned long vaddr, unsigned long bits_on,
615 unsigned long bits_off)
616{
617 unsigned long start, end;
618
619 end = vaddr + SUN4C_REAL_PGDIR_SIZE;
620 for (start = vaddr; start < end; start += PAGE_SIZE)
621 if (sun4c_get_pte(start) & _SUN4C_PAGE_VALID)
622 sun4c_put_pte(start, (sun4c_get_pte(start) | bits_on) &
623 ~bits_off);
624}
625
626static inline void sun4c_init_map_kernelprom(unsigned long kernel_end)
627{
628 unsigned long vaddr;
629 unsigned char pseg, ctx;
630#ifdef CONFIG_SUN4
631 /* sun4/110 and 260 have no kadb. */
632 if ((idprom->id_machtype != (SM_SUN4 | SM_4_260)) &&
633 (idprom->id_machtype != (SM_SUN4 | SM_4_110))) {
634#endif
635 for (vaddr = KADB_DEBUGGER_BEGVM;
636 vaddr < LINUX_OPPROM_ENDVM;
637 vaddr += SUN4C_REAL_PGDIR_SIZE) {
638 pseg = sun4c_get_segmap(vaddr);
639 if (pseg != invalid_segment) {
640 mmu_entry_pool[pseg].locked = 1;
641 for (ctx = 0; ctx < num_contexts; ctx++)
642 prom_putsegment(ctx, vaddr, pseg);
643 fix_permissions(vaddr, _SUN4C_PAGE_PRIV, 0);
644 }
645 }
646#ifdef CONFIG_SUN4
647 }
648#endif
649 for (vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) {
650 pseg = sun4c_get_segmap(vaddr);
651 mmu_entry_pool[pseg].locked = 1;
652 for (ctx = 0; ctx < num_contexts; ctx++)
653 prom_putsegment(ctx, vaddr, pseg);
654 fix_permissions(vaddr, _SUN4C_PAGE_PRIV, _SUN4C_PAGE_NOCACHE);
655 }
656}
657
658static void __init sun4c_init_lock_area(unsigned long start, unsigned long end)
659{
660 int i, ctx;
661
662 while (start < end) {
663 for (i = 0; i < invalid_segment; i++)
664 if (!mmu_entry_pool[i].locked)
665 break;
666 mmu_entry_pool[i].locked = 1;
667 sun4c_init_clean_segmap(i);
668 for (ctx = 0; ctx < num_contexts; ctx++)
669 prom_putsegment(ctx, start, mmu_entry_pool[i].pseg);
670 start += SUN4C_REAL_PGDIR_SIZE;
671 }
672}
673
674/* Don't change this struct without changing entry.S. This is used
675 * in the in-window kernel fault handler, and you don't want to mess
676 * with that. (See sun4c_fault in entry.S).
677 */
678struct sun4c_mmu_ring {
679 struct sun4c_mmu_entry ringhd;
680 int num_entries;
681};
682
683static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */
684static struct sun4c_mmu_ring sun4c_ufree_ring; /* free user entries */
685static struct sun4c_mmu_ring sun4c_ulru_ring; /* LRU user entries */
686struct sun4c_mmu_ring sun4c_kernel_ring; /* used kernel entries */
687struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */
688
689static inline void sun4c_init_rings(void)
690{
691 int i;
692
693 for (i = 0; i < SUN4C_MAX_CONTEXTS; i++) {
694 sun4c_context_ring[i].ringhd.next =
695 sun4c_context_ring[i].ringhd.prev =
696 &sun4c_context_ring[i].ringhd;
697 sun4c_context_ring[i].num_entries = 0;
698 }
699 sun4c_ufree_ring.ringhd.next = sun4c_ufree_ring.ringhd.prev =
700 &sun4c_ufree_ring.ringhd;
701 sun4c_ufree_ring.num_entries = 0;
702 sun4c_ulru_ring.ringhd.lru_next = sun4c_ulru_ring.ringhd.lru_prev =
703 &sun4c_ulru_ring.ringhd;
704 sun4c_ulru_ring.num_entries = 0;
705 sun4c_kernel_ring.ringhd.next = sun4c_kernel_ring.ringhd.prev =
706 &sun4c_kernel_ring.ringhd;
707 sun4c_kernel_ring.num_entries = 0;
708 sun4c_kfree_ring.ringhd.next = sun4c_kfree_ring.ringhd.prev =
709 &sun4c_kfree_ring.ringhd;
710 sun4c_kfree_ring.num_entries = 0;
711}
712
713static void add_ring(struct sun4c_mmu_ring *ring,
714 struct sun4c_mmu_entry *entry)
715{
716 struct sun4c_mmu_entry *head = &ring->ringhd;
717
718 entry->prev = head;
719 (entry->next = head->next)->prev = entry;
720 head->next = entry;
721 ring->num_entries++;
722}
723
724static __inline__ void add_lru(struct sun4c_mmu_entry *entry)
725{
726 struct sun4c_mmu_ring *ring = &sun4c_ulru_ring;
727 struct sun4c_mmu_entry *head = &ring->ringhd;
728
729 entry->lru_next = head;
730 (entry->lru_prev = head->lru_prev)->lru_next = entry;
731 head->lru_prev = entry;
732}
733
734static void add_ring_ordered(struct sun4c_mmu_ring *ring,
735 struct sun4c_mmu_entry *entry)
736{
737 struct sun4c_mmu_entry *head = &ring->ringhd;
738 unsigned long addr = entry->vaddr;
739
740 while ((head->next != &ring->ringhd) && (head->next->vaddr < addr))
741 head = head->next;
742
743 entry->prev = head;
744 (entry->next = head->next)->prev = entry;
745 head->next = entry;
746 ring->num_entries++;
747
748 add_lru(entry);
749}
750
751static __inline__ void remove_ring(struct sun4c_mmu_ring *ring,
752 struct sun4c_mmu_entry *entry)
753{
754 struct sun4c_mmu_entry *next = entry->next;
755
756 (next->prev = entry->prev)->next = next;
757 ring->num_entries--;
758}
759
760static void remove_lru(struct sun4c_mmu_entry *entry)
761{
762 struct sun4c_mmu_entry *next = entry->lru_next;
763
764 (next->lru_prev = entry->lru_prev)->lru_next = next;
765}
766
767static void free_user_entry(int ctx, struct sun4c_mmu_entry *entry)
768{
769 remove_ring(sun4c_context_ring+ctx, entry);
770 remove_lru(entry);
771 add_ring(&sun4c_ufree_ring, entry);
772}
773
774static void free_kernel_entry(struct sun4c_mmu_entry *entry,
775 struct sun4c_mmu_ring *ring)
776{
777 remove_ring(ring, entry);
778 add_ring(&sun4c_kfree_ring, entry);
779}
780
781static void __init sun4c_init_fill_kernel_ring(int howmany)
782{
783 int i;
784
785 while (howmany) {
786 for (i = 0; i < invalid_segment; i++)
787 if (!mmu_entry_pool[i].locked)
788 break;
789 mmu_entry_pool[i].locked = 1;
790 sun4c_init_clean_segmap(i);
791 add_ring(&sun4c_kfree_ring, &mmu_entry_pool[i]);
792 howmany--;
793 }
794}
795
796static void __init sun4c_init_fill_user_ring(void)
797{
798 int i;
799
800 for (i = 0; i < invalid_segment; i++) {
801 if (mmu_entry_pool[i].locked)
802 continue;
803 sun4c_init_clean_segmap(i);
804 add_ring(&sun4c_ufree_ring, &mmu_entry_pool[i]);
805 }
806}
807
808static void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry)
809{
810 int savectx, ctx;
811
812 savectx = sun4c_get_context();
813 for (ctx = 0; ctx < num_contexts; ctx++) {
814 sun4c_set_context(ctx);
815 sun4c_put_segmap(kentry->vaddr, invalid_segment);
816 }
817 sun4c_set_context(savectx);
818}
819
820static void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
821{
822 int savectx, ctx;
823
824 savectx = sun4c_get_context();
825 for (ctx = 0; ctx < num_contexts; ctx++) {
826 sun4c_set_context(ctx);
827 sun4c_put_segmap(kentry->vaddr, kentry->pseg);
828 }
829 sun4c_set_context(savectx);
830}
831
832#define sun4c_user_unmap(__entry) \
833 sun4c_put_segmap((__entry)->vaddr, invalid_segment)
834
835static void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx)
836{
837 struct sun4c_mmu_entry *head = &crp->ringhd;
838 unsigned long flags;
839
840 local_irq_save(flags);
841 if (head->next != head) {
842 struct sun4c_mmu_entry *entry = head->next;
843 int savectx = sun4c_get_context();
844
845 flush_user_windows();
846 sun4c_set_context(ctx);
847 sun4c_flush_context();
848 do {
849 struct sun4c_mmu_entry *next = entry->next;
850
851 sun4c_user_unmap(entry);
852 free_user_entry(ctx, entry);
853
854 entry = next;
855 } while (entry != head);
856 sun4c_set_context(savectx);
857 }
858 local_irq_restore(flags);
859}
860
861static int sun4c_user_taken_entries; /* This is how much we have. */
862static int max_user_taken_entries; /* This limits us and prevents deadlock. */
863
864static struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
865{
866 struct sun4c_mmu_entry *this_entry;
867
868 /* If some are free, return first one. */
869 if (sun4c_kfree_ring.num_entries) {
870 this_entry = sun4c_kfree_ring.ringhd.next;
871 return this_entry;
872 }
873
874 /* Else free one up. */
875 this_entry = sun4c_kernel_ring.ringhd.prev;
876 sun4c_flush_segment(this_entry->vaddr);
877 sun4c_kernel_unmap(this_entry);
878 free_kernel_entry(this_entry, &sun4c_kernel_ring);
879 this_entry = sun4c_kfree_ring.ringhd.next;
880
881 return this_entry;
882}
883
884/* Using this method to free up mmu entries eliminates a lot of
885 * potential races since we have a kernel that incurs tlb
886 * replacement faults. There may be performance penalties.
887 *
888 * NOTE: Must be called with interrupts disabled.
889 */
890static struct sun4c_mmu_entry *sun4c_user_strategy(void)
891{
892 struct sun4c_mmu_entry *entry;
893 unsigned char ctx;
894 int savectx;
895
896 /* If some are free, return first one. */
897 if (sun4c_ufree_ring.num_entries) {
898 entry = sun4c_ufree_ring.ringhd.next;
899 goto unlink_out;
900 }
901
902 if (sun4c_user_taken_entries) {
903 entry = sun4c_kernel_strategy();
904 sun4c_user_taken_entries--;
905 goto kunlink_out;
906 }
907
908 /* Grab from the beginning of the LRU list. */
909 entry = sun4c_ulru_ring.ringhd.lru_next;
910 ctx = entry->ctx;
911
912 savectx = sun4c_get_context();
913 flush_user_windows();
914 sun4c_set_context(ctx);
915 sun4c_flush_segment(entry->vaddr);
916 sun4c_user_unmap(entry);
917 remove_ring(sun4c_context_ring + ctx, entry);
918 remove_lru(entry);
919 sun4c_set_context(savectx);
920
921 return entry;
922
923unlink_out:
924 remove_ring(&sun4c_ufree_ring, entry);
925 return entry;
926kunlink_out:
927 remove_ring(&sun4c_kfree_ring, entry);
928 return entry;
929}
930
931/* NOTE: Must be called with interrupts disabled. */
932void sun4c_grow_kernel_ring(void)
933{
934 struct sun4c_mmu_entry *entry;
935
936 /* Prevent deadlock condition. */
937 if (sun4c_user_taken_entries >= max_user_taken_entries)
938 return;
939
940 if (sun4c_ufree_ring.num_entries) {
941 entry = sun4c_ufree_ring.ringhd.next;
942 remove_ring(&sun4c_ufree_ring, entry);
943 add_ring(&sun4c_kfree_ring, entry);
944 sun4c_user_taken_entries++;
945 }
946}
947
948/* 2 page buckets for task struct and kernel stack allocation.
949 *
950 * TASK_STACK_BEGIN
951 * bucket[0]
952 * bucket[1]
953 * [ ... ]
954 * bucket[NR_TASK_BUCKETS-1]
955 * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASK_BUCKETS)
956 *
957 * Each slot looks like:
958 *
959 * page 1 -- task struct + beginning of kernel stack
960 * page 2 -- rest of kernel stack
961 */
962
963union task_union *sun4c_bucket[NR_TASK_BUCKETS];
964
965static int sun4c_lowbucket_avail;
966
967#define BUCKET_EMPTY ((union task_union *) 0)
968#define BUCKET_SHIFT (PAGE_SHIFT + 1) /* log2(sizeof(struct task_bucket)) */
969#define BUCKET_SIZE (1 << BUCKET_SHIFT)
970#define BUCKET_NUM(addr) ((((addr) - SUN4C_LOCK_VADDR) >> BUCKET_SHIFT))
971#define BUCKET_ADDR(num) (((num) << BUCKET_SHIFT) + SUN4C_LOCK_VADDR)
972#define BUCKET_PTE(page) \
973 ((((page) - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(SUN4C_PAGE_KERNEL))
974#define BUCKET_PTE_PAGE(pte) \
975 (PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT))
976
977static void get_locked_segment(unsigned long addr)
978{
979 struct sun4c_mmu_entry *stolen;
980 unsigned long flags;
981
982 local_irq_save(flags);
983 addr &= SUN4C_REAL_PGDIR_MASK;
984 stolen = sun4c_user_strategy();
985 max_user_taken_entries--;
986 stolen->vaddr = addr;
987 flush_user_windows();
988 sun4c_kernel_map(stolen);
989 local_irq_restore(flags);
990}
991
992static void free_locked_segment(unsigned long addr)
993{
994 struct sun4c_mmu_entry *entry;
995 unsigned long flags;
996 unsigned char pseg;
997
998 local_irq_save(flags);
999 addr &= SUN4C_REAL_PGDIR_MASK;
1000 pseg = sun4c_get_segmap(addr);
1001 entry = &mmu_entry_pool[pseg];
1002
1003 flush_user_windows();
1004 sun4c_flush_segment(addr);
1005 sun4c_kernel_unmap(entry);
1006 add_ring(&sun4c_ufree_ring, entry);
1007 max_user_taken_entries++;
1008 local_irq_restore(flags);
1009}
1010
1011static inline void garbage_collect(int entry)
1012{
1013 int start, end;
1014
1015 /* 32 buckets per segment... */
1016 entry &= ~31;
1017 start = entry;
1018 for (end = (start + 32); start < end; start++)
1019 if (sun4c_bucket[start] != BUCKET_EMPTY)
1020 return;
1021
1022 /* Entire segment empty, release it. */
1023 free_locked_segment(BUCKET_ADDR(entry));
1024}
1025
1026static struct thread_info *sun4c_alloc_thread_info(void)
1027{
1028 unsigned long addr, pages;
1029 int entry;
1030
1031 pages = __get_free_pages(GFP_KERNEL, THREAD_INFO_ORDER);
1032 if (!pages)
1033 return NULL;
1034
1035 for (entry = sun4c_lowbucket_avail; entry < NR_TASK_BUCKETS; entry++)
1036 if (sun4c_bucket[entry] == BUCKET_EMPTY)
1037 break;
1038 if (entry == NR_TASK_BUCKETS) {
1039 free_pages(pages, THREAD_INFO_ORDER);
1040 return NULL;
1041 }
1042 if (entry >= sun4c_lowbucket_avail)
1043 sun4c_lowbucket_avail = entry + 1;
1044
1045 addr = BUCKET_ADDR(entry);
1046 sun4c_bucket[entry] = (union task_union *) addr;
1047 if(sun4c_get_segmap(addr) == invalid_segment)
1048 get_locked_segment(addr);
1049
1050 /* We are changing the virtual color of the page(s)
1051 * so we must flush the cache to guarantee consistency.
1052 */
1053 sun4c_flush_page(pages);
1054#ifndef CONFIG_SUN4
1055 sun4c_flush_page(pages + PAGE_SIZE);
1056#endif
1057
1058 sun4c_put_pte(addr, BUCKET_PTE(pages));
1059#ifndef CONFIG_SUN4
1060 sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE));
1061#endif
1062
1063#ifdef CONFIG_DEBUG_STACK_USAGE
1064 memset((void *)addr, 0, PAGE_SIZE << THREAD_INFO_ORDER);
1065#endif /* DEBUG_STACK_USAGE */
1066
1067 return (struct thread_info *) addr;
1068}
1069
1070static void sun4c_free_thread_info(struct thread_info *ti)
1071{
1072 unsigned long tiaddr = (unsigned long) ti;
1073 unsigned long pages = BUCKET_PTE_PAGE(sun4c_get_pte(tiaddr));
1074 int entry = BUCKET_NUM(tiaddr);
1075
1076 /* We are deleting a mapping, so the flush here is mandatory. */
1077 sun4c_flush_page(tiaddr);
1078#ifndef CONFIG_SUN4
1079 sun4c_flush_page(tiaddr + PAGE_SIZE);
1080#endif
1081 sun4c_put_pte(tiaddr, 0);
1082#ifndef CONFIG_SUN4
1083 sun4c_put_pte(tiaddr + PAGE_SIZE, 0);
1084#endif
1085 sun4c_bucket[entry] = BUCKET_EMPTY;
1086 if (entry < sun4c_lowbucket_avail)
1087 sun4c_lowbucket_avail = entry;
1088
1089 free_pages(pages, THREAD_INFO_ORDER);
1090 garbage_collect(entry);
1091}
1092
1093static void __init sun4c_init_buckets(void)
1094{
1095 int entry;
1096
1097 if (sizeof(union thread_union) != (PAGE_SIZE << THREAD_INFO_ORDER)) {
1098 extern void thread_info_size_is_bolixed_pete(void);
1099 thread_info_size_is_bolixed_pete();
1100 }
1101
1102 for (entry = 0; entry < NR_TASK_BUCKETS; entry++)
1103 sun4c_bucket[entry] = BUCKET_EMPTY;
1104 sun4c_lowbucket_avail = 0;
1105}
1106
1107static unsigned long sun4c_iobuffer_start;
1108static unsigned long sun4c_iobuffer_end;
1109static unsigned long sun4c_iobuffer_high;
1110static unsigned long *sun4c_iobuffer_map;
1111static int iobuffer_map_size;
1112
1113/*
1114 * Alias our pages so they do not cause a trap.
1115 * Also one page may be aliased into several I/O areas and we may
1116 * finish these I/O separately.
1117 */
1118static char *sun4c_lockarea(char *vaddr, unsigned long size)
1119{
1120 unsigned long base, scan;
1121 unsigned long npages;
1122 unsigned long vpage;
1123 unsigned long pte;
1124 unsigned long apage;
1125 unsigned long high;
1126 unsigned long flags;
1127
1128 npages = (((unsigned long)vaddr & ~PAGE_MASK) +
1129 size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
1130
1131 scan = 0;
1132 local_irq_save(flags);
1133 for (;;) {
1134 scan = find_next_zero_bit(sun4c_iobuffer_map,
1135 iobuffer_map_size, scan);
1136 if ((base = scan) + npages > iobuffer_map_size) goto abend;
1137 for (;;) {
1138 if (scan >= base + npages) goto found;
1139 if (test_bit(scan, sun4c_iobuffer_map)) break;
1140 scan++;
1141 }
1142 }
1143
1144found:
1145 high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
1146 high = SUN4C_REAL_PGDIR_ALIGN(high);
1147 while (high > sun4c_iobuffer_high) {
1148 get_locked_segment(sun4c_iobuffer_high);
1149 sun4c_iobuffer_high += SUN4C_REAL_PGDIR_SIZE;
1150 }
1151
1152 vpage = ((unsigned long) vaddr) & PAGE_MASK;
1153 for (scan = base; scan < base+npages; scan++) {
1154 pte = ((vpage-PAGE_OFFSET) >> PAGE_SHIFT);
1155 pte |= pgprot_val(SUN4C_PAGE_KERNEL);
1156 pte |= _SUN4C_PAGE_NOCACHE;
1157 set_bit(scan, sun4c_iobuffer_map);
1158 apage = (scan << PAGE_SHIFT) + sun4c_iobuffer_start;
1159
1160 /* Flush original mapping so we see the right things later. */
1161 sun4c_flush_page(vpage);
1162
1163 sun4c_put_pte(apage, pte);
1164 vpage += PAGE_SIZE;
1165 }
1166 local_irq_restore(flags);
1167 return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start +
1168 (((unsigned long) vaddr) & ~PAGE_MASK));
1169
1170abend:
1171 local_irq_restore(flags);
1172 printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size);
1173 panic("Out of iobuffer table");
1174 return NULL;
1175}
1176
1177static void sun4c_unlockarea(char *vaddr, unsigned long size)
1178{
1179 unsigned long vpage, npages;
1180 unsigned long flags;
1181 int scan, high;
1182
1183 vpage = (unsigned long)vaddr & PAGE_MASK;
1184 npages = (((unsigned long)vaddr & ~PAGE_MASK) +
1185 size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
1186
1187 local_irq_save(flags);
1188 while (npages != 0) {
1189 --npages;
1190
1191 /* This mapping is marked non-cachable, no flush necessary. */
1192 sun4c_put_pte(vpage, 0);
1193 clear_bit((vpage - sun4c_iobuffer_start) >> PAGE_SHIFT,
1194 sun4c_iobuffer_map);
1195 vpage += PAGE_SIZE;
1196 }
1197
1198 /* garbage collect */
1199 scan = (sun4c_iobuffer_high - sun4c_iobuffer_start) >> PAGE_SHIFT;
1200 while (scan >= 0 && !sun4c_iobuffer_map[scan >> 5])
1201 scan -= 32;
1202 scan += 32;
1203 high = sun4c_iobuffer_start + (scan << PAGE_SHIFT);
1204 high = SUN4C_REAL_PGDIR_ALIGN(high) + SUN4C_REAL_PGDIR_SIZE;
1205 while (high < sun4c_iobuffer_high) {
1206 sun4c_iobuffer_high -= SUN4C_REAL_PGDIR_SIZE;
1207 free_locked_segment(sun4c_iobuffer_high);
1208 }
1209 local_irq_restore(flags);
1210}
1211
1212/* Note the scsi code at init time passes to here buffers
1213 * which sit on the kernel stack, those are already locked
1214 * by implication and fool the page locking code above
1215 * if passed to by mistake.
1216 */
1217static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus *sbus)
1218{
1219 unsigned long page;
1220
1221 page = ((unsigned long)bufptr) & PAGE_MASK;
1222 if (!virt_addr_valid(page)) {
1223 sun4c_flush_page(page);
1224 return (__u32)bufptr; /* already locked */
1225 }
1226 return (__u32)sun4c_lockarea(bufptr, len);
1227}
1228
1229static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
1230{
1231 while (sz != 0) {
1232 --sz;
1233 sg[sz].dvma_address = (__u32)sun4c_lockarea(page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
1234 sg[sz].dvma_length = sg[sz].length;
1235 }
1236}
1237
1238static void sun4c_release_scsi_one(__u32 bufptr, unsigned long len, struct sbus_bus *sbus)
1239{
1240 if (bufptr < sun4c_iobuffer_start)
1241 return; /* On kernel stack or similar, see above */
1242 sun4c_unlockarea((char *)bufptr, len);
1243}
1244
1245static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
1246{
1247 while (sz != 0) {
1248 --sz;
1249 sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length);
1250 }
1251}
1252
1253#define TASK_ENTRY_SIZE BUCKET_SIZE /* see above */
1254#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1255
1256struct vm_area_struct sun4c_kstack_vma;
1257
1258static void __init sun4c_init_lock_areas(void)
1259{
1260 unsigned long sun4c_taskstack_start;
1261 unsigned long sun4c_taskstack_end;
1262 int bitmap_size;
1263
1264 sun4c_init_buckets();
1265 sun4c_taskstack_start = SUN4C_LOCK_VADDR;
1266 sun4c_taskstack_end = (sun4c_taskstack_start +
1267 (TASK_ENTRY_SIZE * NR_TASK_BUCKETS));
1268 if (sun4c_taskstack_end >= SUN4C_LOCK_END) {
1269 prom_printf("Too many tasks, decrease NR_TASK_BUCKETS please.\n");
1270 prom_halt();
1271 }
1272
1273 sun4c_iobuffer_start = sun4c_iobuffer_high =
1274 SUN4C_REAL_PGDIR_ALIGN(sun4c_taskstack_end);
1275 sun4c_iobuffer_end = SUN4C_LOCK_END;
1276 bitmap_size = (sun4c_iobuffer_end - sun4c_iobuffer_start) >> PAGE_SHIFT;
1277 bitmap_size = (bitmap_size + 7) >> 3;
1278 bitmap_size = LONG_ALIGN(bitmap_size);
1279 iobuffer_map_size = bitmap_size << 3;
1280 sun4c_iobuffer_map = __alloc_bootmem(bitmap_size, SMP_CACHE_BYTES, 0UL);
1281 memset((void *) sun4c_iobuffer_map, 0, bitmap_size);
1282
1283 sun4c_kstack_vma.vm_mm = &init_mm;
1284 sun4c_kstack_vma.vm_start = sun4c_taskstack_start;
1285 sun4c_kstack_vma.vm_end = sun4c_taskstack_end;
1286 sun4c_kstack_vma.vm_page_prot = PAGE_SHARED;
1287 sun4c_kstack_vma.vm_flags = VM_READ | VM_WRITE | VM_EXEC;
1288 insert_vm_struct(&init_mm, &sun4c_kstack_vma);
1289}
1290
1291/* Cache flushing on the sun4c. */
1292static void sun4c_flush_cache_all(void)
1293{
1294 unsigned long begin, end;
1295
1296 flush_user_windows();
1297 begin = (KERNBASE + SUN4C_REAL_PGDIR_SIZE);
1298 end = (begin + SUN4C_VAC_SIZE);
1299
1300 if (sun4c_vacinfo.linesize == 32) {
1301 while (begin < end) {
1302 __asm__ __volatile__(
1303 "ld [%0 + 0x00], %%g0\n\t"
1304 "ld [%0 + 0x20], %%g0\n\t"
1305 "ld [%0 + 0x40], %%g0\n\t"
1306 "ld [%0 + 0x60], %%g0\n\t"
1307 "ld [%0 + 0x80], %%g0\n\t"
1308 "ld [%0 + 0xa0], %%g0\n\t"
1309 "ld [%0 + 0xc0], %%g0\n\t"
1310 "ld [%0 + 0xe0], %%g0\n\t"
1311 "ld [%0 + 0x100], %%g0\n\t"
1312 "ld [%0 + 0x120], %%g0\n\t"
1313 "ld [%0 + 0x140], %%g0\n\t"
1314 "ld [%0 + 0x160], %%g0\n\t"
1315 "ld [%0 + 0x180], %%g0\n\t"
1316 "ld [%0 + 0x1a0], %%g0\n\t"
1317 "ld [%0 + 0x1c0], %%g0\n\t"
1318 "ld [%0 + 0x1e0], %%g0\n"
1319 : : "r" (begin));
1320 begin += 512;
1321 }
1322 } else {
1323 while (begin < end) {
1324 __asm__ __volatile__(
1325 "ld [%0 + 0x00], %%g0\n\t"
1326 "ld [%0 + 0x10], %%g0\n\t"
1327 "ld [%0 + 0x20], %%g0\n\t"
1328 "ld [%0 + 0x30], %%g0\n\t"
1329 "ld [%0 + 0x40], %%g0\n\t"
1330 "ld [%0 + 0x50], %%g0\n\t"
1331 "ld [%0 + 0x60], %%g0\n\t"
1332 "ld [%0 + 0x70], %%g0\n\t"
1333 "ld [%0 + 0x80], %%g0\n\t"
1334 "ld [%0 + 0x90], %%g0\n\t"
1335 "ld [%0 + 0xa0], %%g0\n\t"
1336 "ld [%0 + 0xb0], %%g0\n\t"
1337 "ld [%0 + 0xc0], %%g0\n\t"
1338 "ld [%0 + 0xd0], %%g0\n\t"
1339 "ld [%0 + 0xe0], %%g0\n\t"
1340 "ld [%0 + 0xf0], %%g0\n"
1341 : : "r" (begin));
1342 begin += 256;
1343 }
1344 }
1345}
1346
1347static void sun4c_flush_cache_mm(struct mm_struct *mm)
1348{
1349 int new_ctx = mm->context;
1350
1351 if (new_ctx != NO_CONTEXT) {
1352 flush_user_windows();
1353
1354 if (sun4c_context_ring[new_ctx].num_entries) {
1355 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1356 unsigned long flags;
1357
1358 local_irq_save(flags);
1359 if (head->next != head) {
1360 struct sun4c_mmu_entry *entry = head->next;
1361 int savectx = sun4c_get_context();
1362
1363 sun4c_set_context(new_ctx);
1364 sun4c_flush_context();
1365 do {
1366 struct sun4c_mmu_entry *next = entry->next;
1367
1368 sun4c_user_unmap(entry);
1369 free_user_entry(new_ctx, entry);
1370
1371 entry = next;
1372 } while (entry != head);
1373 sun4c_set_context(savectx);
1374 }
1375 local_irq_restore(flags);
1376 }
1377 }
1378}
1379
1380static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1381{
1382 struct mm_struct *mm = vma->vm_mm;
1383 int new_ctx = mm->context;
1384
1385 if (new_ctx != NO_CONTEXT) {
1386 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1387 struct sun4c_mmu_entry *entry;
1388 unsigned long flags;
1389
1390 flush_user_windows();
1391
1392 local_irq_save(flags);
1393 /* All user segmap chains are ordered on entry->vaddr. */
1394 for (entry = head->next;
1395 (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
1396 entry = entry->next)
1397 ;
1398
1399 /* Tracing various job mixtures showed that this conditional
1400 * only passes ~35% of the time for most worse case situations,
1401 * therefore we avoid all of this gross overhead ~65% of the time.
1402 */
1403 if ((entry != head) && (entry->vaddr < end)) {
1404 int octx = sun4c_get_context();
1405 sun4c_set_context(new_ctx);
1406
1407 /* At this point, always, (start >= entry->vaddr) and
1408 * (entry->vaddr < end), once the latter condition
1409 * ceases to hold, or we hit the end of the list, we
1410 * exit the loop. The ordering of all user allocated
1411 * segmaps makes this all work out so beautifully.
1412 */
1413 do {
1414 struct sun4c_mmu_entry *next = entry->next;
1415 unsigned long realend;
1416
1417 /* "realstart" is always >= entry->vaddr */
1418 realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE;
1419 if (end < realend)
1420 realend = end;
1421 if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
1422 unsigned long page = entry->vaddr;
1423 while (page < realend) {
1424 sun4c_flush_page(page);
1425 page += PAGE_SIZE;
1426 }
1427 } else {
1428 sun4c_flush_segment(entry->vaddr);
1429 sun4c_user_unmap(entry);
1430 free_user_entry(new_ctx, entry);
1431 }
1432 entry = next;
1433 } while ((entry != head) && (entry->vaddr < end));
1434 sun4c_set_context(octx);
1435 }
1436 local_irq_restore(flags);
1437 }
1438}
1439
1440static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1441{
1442 struct mm_struct *mm = vma->vm_mm;
1443 int new_ctx = mm->context;
1444
1445 /* Sun4c has no separate I/D caches so cannot optimize for non
1446 * text page flushes.
1447 */
1448 if (new_ctx != NO_CONTEXT) {
1449 int octx = sun4c_get_context();
1450 unsigned long flags;
1451
1452 flush_user_windows();
1453 local_irq_save(flags);
1454 sun4c_set_context(new_ctx);
1455 sun4c_flush_page(page);
1456 sun4c_set_context(octx);
1457 local_irq_restore(flags);
1458 }
1459}
1460
1461static void sun4c_flush_page_to_ram(unsigned long page)
1462{
1463 unsigned long flags;
1464
1465 local_irq_save(flags);
1466 sun4c_flush_page(page);
1467 local_irq_restore(flags);
1468}
1469
1470/* Sun4c cache is unified, both instructions and data live there, so
1471 * no need to flush the on-stack instructions for new signal handlers.
1472 */
1473static void sun4c_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1474{
1475}
1476
1477/* TLB flushing on the sun4c. These routines count on the cache
1478 * flushing code to flush the user register windows so that we need
1479 * not do so when we get here.
1480 */
1481
1482static void sun4c_flush_tlb_all(void)
1483{
1484 struct sun4c_mmu_entry *this_entry, *next_entry;
1485 unsigned long flags;
1486 int savectx, ctx;
1487
1488 local_irq_save(flags);
1489 this_entry = sun4c_kernel_ring.ringhd.next;
1490 savectx = sun4c_get_context();
1491 flush_user_windows();
1492 while (sun4c_kernel_ring.num_entries) {
1493 next_entry = this_entry->next;
1494 sun4c_flush_segment(this_entry->vaddr);
1495 for (ctx = 0; ctx < num_contexts; ctx++) {
1496 sun4c_set_context(ctx);
1497 sun4c_put_segmap(this_entry->vaddr, invalid_segment);
1498 }
1499 free_kernel_entry(this_entry, &sun4c_kernel_ring);
1500 this_entry = next_entry;
1501 }
1502 sun4c_set_context(savectx);
1503 local_irq_restore(flags);
1504}
1505
1506static void sun4c_flush_tlb_mm(struct mm_struct *mm)
1507{
1508 int new_ctx = mm->context;
1509
1510 if (new_ctx != NO_CONTEXT) {
1511 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1512 unsigned long flags;
1513
1514 local_irq_save(flags);
1515 if (head->next != head) {
1516 struct sun4c_mmu_entry *entry = head->next;
1517 int savectx = sun4c_get_context();
1518
1519 sun4c_set_context(new_ctx);
1520 sun4c_flush_context();
1521 do {
1522 struct sun4c_mmu_entry *next = entry->next;
1523
1524 sun4c_user_unmap(entry);
1525 free_user_entry(new_ctx, entry);
1526
1527 entry = next;
1528 } while (entry != head);
1529 sun4c_set_context(savectx);
1530 }
1531 local_irq_restore(flags);
1532 }
1533}
1534
1535static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1536{
1537 struct mm_struct *mm = vma->vm_mm;
1538 int new_ctx = mm->context;
1539
1540 if (new_ctx != NO_CONTEXT) {
1541 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1542 struct sun4c_mmu_entry *entry;
1543 unsigned long flags;
1544
1545 local_irq_save(flags);
1546 /* See commentary in sun4c_flush_cache_range(). */
1547 for (entry = head->next;
1548 (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
1549 entry = entry->next)
1550 ;
1551
1552 if ((entry != head) && (entry->vaddr < end)) {
1553 int octx = sun4c_get_context();
1554
1555 sun4c_set_context(new_ctx);
1556 do {
1557 struct sun4c_mmu_entry *next = entry->next;
1558
1559 sun4c_flush_segment(entry->vaddr);
1560 sun4c_user_unmap(entry);
1561 free_user_entry(new_ctx, entry);
1562
1563 entry = next;
1564 } while ((entry != head) && (entry->vaddr < end));
1565 sun4c_set_context(octx);
1566 }
1567 local_irq_restore(flags);
1568 }
1569}
1570
1571static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1572{
1573 struct mm_struct *mm = vma->vm_mm;
1574 int new_ctx = mm->context;
1575
1576 if (new_ctx != NO_CONTEXT) {
1577 int savectx = sun4c_get_context();
1578 unsigned long flags;
1579
1580 local_irq_save(flags);
1581 sun4c_set_context(new_ctx);
1582 page &= PAGE_MASK;
1583 sun4c_flush_page(page);
1584 sun4c_put_pte(page, 0);
1585 sun4c_set_context(savectx);
1586 local_irq_restore(flags);
1587 }
1588}
1589
1590static inline void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr)
1591{
1592 unsigned long page_entry;
1593
1594 page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
1595 page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
1596 sun4c_put_pte(virt_addr, page_entry);
1597}
1598
1599static void sun4c_mapiorange(unsigned int bus, unsigned long xpa,
1600 unsigned long xva, unsigned int len)
1601{
1602 while (len != 0) {
1603 len -= PAGE_SIZE;
1604 sun4c_mapioaddr(xpa, xva);
1605 xva += PAGE_SIZE;
1606 xpa += PAGE_SIZE;
1607 }
1608}
1609
1610static void sun4c_unmapiorange(unsigned long virt_addr, unsigned int len)
1611{
1612 while (len != 0) {
1613 len -= PAGE_SIZE;
1614 sun4c_put_pte(virt_addr, 0);
1615 virt_addr += PAGE_SIZE;
1616 }
1617}
1618
1619static void sun4c_alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
1620{
1621 struct ctx_list *ctxp;
1622
1623 ctxp = ctx_free.next;
1624 if (ctxp != &ctx_free) {
1625 remove_from_ctx_list(ctxp);
1626 add_to_used_ctxlist(ctxp);
1627 mm->context = ctxp->ctx_number;
1628 ctxp->ctx_mm = mm;
1629 return;
1630 }
1631 ctxp = ctx_used.next;
1632 if (ctxp->ctx_mm == old_mm)
1633 ctxp = ctxp->next;
1634 remove_from_ctx_list(ctxp);
1635 add_to_used_ctxlist(ctxp);
1636 ctxp->ctx_mm->context = NO_CONTEXT;
1637 ctxp->ctx_mm = mm;
1638 mm->context = ctxp->ctx_number;
1639 sun4c_demap_context(&sun4c_context_ring[ctxp->ctx_number],
1640 ctxp->ctx_number);
1641}
1642
1643/* Switch the current MM context. */
1644static void sun4c_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu)
1645{
1646 struct ctx_list *ctx;
1647 int dirty = 0;
1648
1649 if (mm->context == NO_CONTEXT) {
1650 dirty = 1;
1651 sun4c_alloc_context(old_mm, mm);
1652 } else {
1653 /* Update the LRU ring of contexts. */
1654 ctx = ctx_list_pool + mm->context;
1655 remove_from_ctx_list(ctx);
1656 add_to_used_ctxlist(ctx);
1657 }
1658 if (dirty || old_mm != mm)
1659 sun4c_set_context(mm->context);
1660}
1661
1662static void sun4c_destroy_context(struct mm_struct *mm)
1663{
1664 struct ctx_list *ctx_old;
1665
1666 if (mm->context != NO_CONTEXT) {
1667 sun4c_demap_context(&sun4c_context_ring[mm->context], mm->context);
1668 ctx_old = ctx_list_pool + mm->context;
1669 remove_from_ctx_list(ctx_old);
1670 add_to_free_ctxlist(ctx_old);
1671 mm->context = NO_CONTEXT;
1672 }
1673}
1674
1675static void sun4c_mmu_info(struct seq_file *m)
1676{
1677 int used_user_entries, i;
1678
1679 used_user_entries = 0;
1680 for (i = 0; i < num_contexts; i++)
1681 used_user_entries += sun4c_context_ring[i].num_entries;
1682
1683 seq_printf(m,
1684 "vacsize\t\t: %d bytes\n"
1685 "vachwflush\t: %s\n"
1686 "vaclinesize\t: %d bytes\n"
1687 "mmuctxs\t\t: %d\n"
1688 "mmupsegs\t: %d\n"
1689 "kernelpsegs\t: %d\n"
1690 "kfreepsegs\t: %d\n"
1691 "usedpsegs\t: %d\n"
1692 "ufreepsegs\t: %d\n"
1693 "user_taken\t: %d\n"
1694 "max_taken\t: %d\n",
1695 sun4c_vacinfo.num_bytes,
1696 (sun4c_vacinfo.do_hwflushes ? "yes" : "no"),
1697 sun4c_vacinfo.linesize,
1698 num_contexts,
1699 (invalid_segment + 1),
1700 sun4c_kernel_ring.num_entries,
1701 sun4c_kfree_ring.num_entries,
1702 used_user_entries,
1703 sun4c_ufree_ring.num_entries,
1704 sun4c_user_taken_entries,
1705 max_user_taken_entries);
1706}
1707
1708/* Nothing below here should touch the mmu hardware nor the mmu_entry
1709 * data structures.
1710 */
1711
1712/* First the functions which the mid-level code uses to directly
1713 * manipulate the software page tables. Some defines since we are
1714 * emulating the i386 page directory layout.
1715 */
1716#define PGD_PRESENT 0x001
1717#define PGD_RW 0x002
1718#define PGD_USER 0x004
1719#define PGD_ACCESSED 0x020
1720#define PGD_DIRTY 0x040
1721#define PGD_TABLE (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)
1722
1723static void sun4c_set_pte(pte_t *ptep, pte_t pte)
1724{
1725 *ptep = pte;
1726}
1727
1728static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
1729{
1730}
1731
1732static void sun4c_pmd_set(pmd_t * pmdp, pte_t * ptep)
1733{
1734 pmdp->pmdv[0] = PGD_TABLE | (unsigned long) ptep;
1735}
1736
1737static void sun4c_pmd_populate(pmd_t * pmdp, struct page * ptep)
1738{
1739 if (page_address(ptep) == NULL) BUG(); /* No highmem on sun4c */
1740 pmdp->pmdv[0] = PGD_TABLE | (unsigned long) page_address(ptep);
1741}
1742
1743static int sun4c_pte_present(pte_t pte)
1744{
1745 return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0);
1746}
1747static void sun4c_pte_clear(pte_t *ptep) { *ptep = __pte(0); }
1748
1749static int sun4c_pte_read(pte_t pte)
1750{
1751 return (pte_val(pte) & _SUN4C_PAGE_READ);
1752}
1753
1754static int sun4c_pmd_bad(pmd_t pmd)
1755{
1756 return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
1757 (!virt_addr_valid(pmd_val(pmd))));
1758}
1759
1760static int sun4c_pmd_present(pmd_t pmd)
1761{
1762 return ((pmd_val(pmd) & PGD_PRESENT) != 0);
1763}
1764
1765#if 0 /* if PMD takes one word */
1766static void sun4c_pmd_clear(pmd_t *pmdp) { *pmdp = __pmd(0); }
1767#else /* if pmd_t is a longish aggregate */
1768static void sun4c_pmd_clear(pmd_t *pmdp) {
1769 memset((void *)pmdp, 0, sizeof(pmd_t));
1770}
1771#endif
1772
1773static int sun4c_pgd_none(pgd_t pgd) { return 0; }
1774static int sun4c_pgd_bad(pgd_t pgd) { return 0; }
1775static int sun4c_pgd_present(pgd_t pgd) { return 1; }
1776static void sun4c_pgd_clear(pgd_t * pgdp) { }
1777
1778/*
1779 * The following only work if pte_present() is true.
1780 * Undefined behaviour if not..
1781 */
1782static pte_t sun4c_pte_mkwrite(pte_t pte)
1783{
1784 pte = __pte(pte_val(pte) | _SUN4C_PAGE_WRITE);
1785 if (pte_val(pte) & _SUN4C_PAGE_MODIFIED)
1786 pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);
1787 return pte;
1788}
1789
1790static pte_t sun4c_pte_mkdirty(pte_t pte)
1791{
1792 pte = __pte(pte_val(pte) | _SUN4C_PAGE_MODIFIED);
1793 if (pte_val(pte) & _SUN4C_PAGE_WRITE)
1794 pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);
1795 return pte;
1796}
1797
1798static pte_t sun4c_pte_mkyoung(pte_t pte)
1799{
1800 pte = __pte(pte_val(pte) | _SUN4C_PAGE_ACCESSED);
1801 if (pte_val(pte) & _SUN4C_PAGE_READ)
1802 pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_READ);
1803 return pte;
1804}
1805
1806/*
1807 * Conversion functions: convert a page and protection to a page entry,
1808 * and a page entry and page directory to the page they refer to.
1809 */
1810static pte_t sun4c_mk_pte(struct page *page, pgprot_t pgprot)
1811{
1812 return __pte(page_to_pfn(page) | pgprot_val(pgprot));
1813}
1814
1815static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot)
1816{
1817 return __pte((phys_page >> PAGE_SHIFT) | pgprot_val(pgprot));
1818}
1819
1820static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
1821{
1822 return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
1823}
1824
1825static unsigned long sun4c_pte_pfn(pte_t pte)
1826{
1827 return pte_val(pte) & SUN4C_PFN_MASK;
1828}
1829
1830static pte_t sun4c_pgoff_to_pte(unsigned long pgoff)
1831{
1832 return __pte(pgoff | _SUN4C_PAGE_FILE);
1833}
1834
1835static unsigned long sun4c_pte_to_pgoff(pte_t pte)
1836{
1837 return pte_val(pte) & ((1UL << PTE_FILE_MAX_BITS) - 1);
1838}
1839
1840
1841static __inline__ unsigned long sun4c_pmd_page_v(pmd_t pmd)
1842{
1843 return (pmd_val(pmd) & PAGE_MASK);
1844}
1845
1846static struct page *sun4c_pmd_page(pmd_t pmd)
1847{
1848 return virt_to_page(sun4c_pmd_page_v(pmd));
1849}
1850
1851static unsigned long sun4c_pgd_page(pgd_t pgd) { return 0; }
1852
1853/* to find an entry in a page-table-directory */
1854static inline pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
1855{
1856 return mm->pgd + (address >> SUN4C_PGDIR_SHIFT);
1857}
1858
1859/* Find an entry in the second-level page table.. */
1860static pmd_t *sun4c_pmd_offset(pgd_t * dir, unsigned long address)
1861{
1862 return (pmd_t *) dir;
1863}
1864
1865/* Find an entry in the third-level page table.. */
1866pte_t *sun4c_pte_offset_kernel(pmd_t * dir, unsigned long address)
1867{
1868 return (pte_t *) sun4c_pmd_page_v(*dir) +
1869 ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
1870}
1871
1872static unsigned long sun4c_swp_type(swp_entry_t entry)
1873{
1874 return (entry.val & SUN4C_SWP_TYPE_MASK);
1875}
1876
1877static unsigned long sun4c_swp_offset(swp_entry_t entry)
1878{
1879 return (entry.val >> SUN4C_SWP_OFF_SHIFT) & SUN4C_SWP_OFF_MASK;
1880}
1881
1882static swp_entry_t sun4c_swp_entry(unsigned long type, unsigned long offset)
1883{
1884 return (swp_entry_t) {
1885 (offset & SUN4C_SWP_OFF_MASK) << SUN4C_SWP_OFF_SHIFT
1886 | (type & SUN4C_SWP_TYPE_MASK) };
1887}
1888
1889static void sun4c_free_pte_slow(pte_t *pte)
1890{
1891 free_page((unsigned long)pte);
1892}
1893
1894static void sun4c_free_pgd_slow(pgd_t *pgd)
1895{
1896 free_page((unsigned long)pgd);
1897}
1898
1899static pgd_t *sun4c_get_pgd_fast(void)
1900{
1901 unsigned long *ret;
1902
1903 if ((ret = pgd_quicklist) != NULL) {
1904 pgd_quicklist = (unsigned long *)(*ret);
1905 ret[0] = ret[1];
1906 pgtable_cache_size--;
1907 } else {
1908 pgd_t *init;
1909
1910 ret = (unsigned long *)__get_free_page(GFP_KERNEL);
1911 memset (ret, 0, (KERNBASE / SUN4C_PGDIR_SIZE) * sizeof(pgd_t));
1912 init = sun4c_pgd_offset(&init_mm, 0);
1913 memcpy (((pgd_t *)ret) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
1914 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
1915 }
1916 return (pgd_t *)ret;
1917}
1918
1919static void sun4c_free_pgd_fast(pgd_t *pgd)
1920{
1921 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
1922 pgd_quicklist = (unsigned long *) pgd;
1923 pgtable_cache_size++;
1924}
1925
1926
1927static __inline__ pte_t *
1928sun4c_pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
1929{
1930 unsigned long *ret;
1931
1932 if ((ret = (unsigned long *)pte_quicklist) != NULL) {
1933 pte_quicklist = (unsigned long *)(*ret);
1934 ret[0] = ret[1];
1935 pgtable_cache_size--;
1936 }
1937 return (pte_t *)ret;
1938}
1939
1940static pte_t *sun4c_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
1941{
1942 pte_t *pte;
1943
1944 if ((pte = sun4c_pte_alloc_one_fast(mm, address)) != NULL)
1945 return pte;
1946
1947 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
1948 if (pte)
1949 memset(pte, 0, PAGE_SIZE);
1950 return pte;
1951}
1952
1953static struct page *sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address)
1954{
1955 pte_t *pte = sun4c_pte_alloc_one_kernel(mm, address);
1956 if (pte == NULL)
1957 return NULL;
1958 return virt_to_page(pte);
1959}
1960
1961static __inline__ void sun4c_free_pte_fast(pte_t *pte)
1962{
1963 *(unsigned long *)pte = (unsigned long) pte_quicklist;
1964 pte_quicklist = (unsigned long *) pte;
1965 pgtable_cache_size++;
1966}
1967
1968static void sun4c_pte_free(struct page *pte)
1969{
1970 sun4c_free_pte_fast(page_address(pte));
1971}
1972
1973/*
1974 * allocating and freeing a pmd is trivial: the 1-entry pmd is
1975 * inside the pgd, so has no extra memory associated with it.
1976 */
1977static pmd_t *sun4c_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
1978{
1979 BUG();
1980 return NULL;
1981}
1982
1983static void sun4c_free_pmd_fast(pmd_t * pmd) { }
1984
1985static void sun4c_check_pgt_cache(int low, int high)
1986{
1987 if (pgtable_cache_size > high) {
1988 do {
1989 if (pgd_quicklist)
1990 sun4c_free_pgd_slow(sun4c_get_pgd_fast());
1991 if (pte_quicklist)
1992 sun4c_free_pte_slow(sun4c_pte_alloc_one_fast(NULL, 0));
1993 } while (pgtable_cache_size > low);
1994 }
1995}
1996
1997/* An experiment, turn off by default for now... -DaveM */
1998#define SUN4C_PRELOAD_PSEG
1999
2000void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
2001{
2002 unsigned long flags;
2003 int pseg;
2004
2005 local_irq_save(flags);
2006 address &= PAGE_MASK;
2007 if ((pseg = sun4c_get_segmap(address)) == invalid_segment) {
2008 struct sun4c_mmu_entry *entry = sun4c_user_strategy();
2009 struct mm_struct *mm = vma->vm_mm;
2010 unsigned long start, end;
2011
2012 entry->vaddr = start = (address & SUN4C_REAL_PGDIR_MASK);
2013 entry->ctx = mm->context;
2014 add_ring_ordered(sun4c_context_ring + mm->context, entry);
2015 sun4c_put_segmap(entry->vaddr, entry->pseg);
2016 end = start + SUN4C_REAL_PGDIR_SIZE;
2017 while (start < end) {
2018#ifdef SUN4C_PRELOAD_PSEG
2019 pgd_t *pgdp = sun4c_pgd_offset(mm, start);
2020 pte_t *ptep;
2021
2022 if (!pgdp)
2023 goto no_mapping;
2024 ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, start);
2025 if (!ptep || !(pte_val(*ptep) & _SUN4C_PAGE_PRESENT))
2026 goto no_mapping;
2027 sun4c_put_pte(start, pte_val(*ptep));
2028 goto next;
2029
2030 no_mapping:
2031#endif
2032 sun4c_put_pte(start, 0);
2033#ifdef SUN4C_PRELOAD_PSEG
2034 next:
2035#endif
2036 start += PAGE_SIZE;
2037 }
2038#ifndef SUN4C_PRELOAD_PSEG
2039 sun4c_put_pte(address, pte_val(pte));
2040#endif
2041 local_irq_restore(flags);
2042 return;
2043 } else {
2044 struct sun4c_mmu_entry *entry = &mmu_entry_pool[pseg];
2045
2046 remove_lru(entry);
2047 add_lru(entry);
2048 }
2049
2050 sun4c_put_pte(address, pte_val(pte));
2051 local_irq_restore(flags);
2052}
2053
2054extern void sparc_context_init(int);
2055extern unsigned long end;
2056extern unsigned long bootmem_init(unsigned long *pages_avail);
2057extern unsigned long last_valid_pfn;
2058
2059void __init sun4c_paging_init(void)
2060{
2061 int i, cnt;
2062 unsigned long kernel_end, vaddr;
2063 extern struct resource sparc_iomap;
2064 unsigned long end_pfn, pages_avail;
2065
2066 kernel_end = (unsigned long) &end;
2067 kernel_end += (SUN4C_REAL_PGDIR_SIZE * 4);
2068 kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
2069
2070 pages_avail = 0;
2071 last_valid_pfn = bootmem_init(&pages_avail);
2072 end_pfn = last_valid_pfn;
2073
2074 sun4c_probe_mmu();
2075 invalid_segment = (num_segmaps - 1);
2076 sun4c_init_mmu_entry_pool();
2077 sun4c_init_rings();
2078 sun4c_init_map_kernelprom(kernel_end);
2079 sun4c_init_clean_mmu(kernel_end);
2080 sun4c_init_fill_kernel_ring(SUN4C_KERNEL_BUCKETS);
2081 sun4c_init_lock_area(sparc_iomap.start, IOBASE_END);
2082 sun4c_init_lock_area(DVMA_VADDR, DVMA_END);
2083 sun4c_init_lock_areas();
2084 sun4c_init_fill_user_ring();
2085
2086 sun4c_set_context(0);
2087 memset(swapper_pg_dir, 0, PAGE_SIZE);
2088 memset(pg0, 0, PAGE_SIZE);
2089 memset(pg1, 0, PAGE_SIZE);
2090 memset(pg2, 0, PAGE_SIZE);
2091 memset(pg3, 0, PAGE_SIZE);
2092
2093 /* Save work later. */
2094 vaddr = VMALLOC_START;
2095 swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg0);
2096 vaddr += SUN4C_PGDIR_SIZE;
2097 swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg1);
2098 vaddr += SUN4C_PGDIR_SIZE;
2099 swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg2);
2100 vaddr += SUN4C_PGDIR_SIZE;
2101 swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg3);
2102 sun4c_init_ss2_cache_bug();
2103 sparc_context_init(num_contexts);
2104
2105 {
2106 unsigned long zones_size[MAX_NR_ZONES];
2107 unsigned long zholes_size[MAX_NR_ZONES];
2108 unsigned long npages;
2109 int znum;
2110
2111 for (znum = 0; znum < MAX_NR_ZONES; znum++)
2112 zones_size[znum] = zholes_size[znum] = 0;
2113
2114 npages = max_low_pfn - pfn_base;
2115
2116 zones_size[ZONE_DMA] = npages;
2117 zholes_size[ZONE_DMA] = npages - pages_avail;
2118
2119 npages = highend_pfn - max_low_pfn;
2120 zones_size[ZONE_HIGHMEM] = npages;
2121 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
2122
2123 free_area_init_node(0, &contig_page_data, zones_size,
2124 pfn_base, zholes_size);
2125 }
2126
2127 cnt = 0;
2128 for (i = 0; i < num_segmaps; i++)
2129 if (mmu_entry_pool[i].locked)
2130 cnt++;
2131
2132 max_user_taken_entries = num_segmaps - cnt - 40 - 1;
2133
2134 printk("SUN4C: %d mmu entries for the kernel\n", cnt);
2135}
2136
2137/* Load up routines and constants for sun4c mmu */
2138void __init ld_mmu_sun4c(void)
2139{
2140 extern void ___xchg32_sun4c(void);
2141
2142 printk("Loading sun4c MMU routines\n");
2143
2144 /* First the constants */
2145 BTFIXUPSET_SIMM13(pgdir_shift, SUN4C_PGDIR_SHIFT);
2146 BTFIXUPSET_SETHI(pgdir_size, SUN4C_PGDIR_SIZE);
2147 BTFIXUPSET_SETHI(pgdir_mask, SUN4C_PGDIR_MASK);
2148
2149 BTFIXUPSET_SIMM13(ptrs_per_pmd, SUN4C_PTRS_PER_PMD);
2150 BTFIXUPSET_SIMM13(ptrs_per_pgd, SUN4C_PTRS_PER_PGD);
2151 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE);
2152
2153 BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE));
2154 BTFIXUPSET_INT(page_shared, pgprot_val(SUN4C_PAGE_SHARED));
2155 BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY));
2156 BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY));
2157 BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL));
2158 page_kernel = pgprot_val(SUN4C_PAGE_KERNEL);
2159 pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
2160 _SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
2161
2162 /* Functions */
2163 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM);
2164 BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM);
2165
2166 BTFIXUPSET_CALL(flush_cache_all, sun4c_flush_cache_all, BTFIXUPCALL_NORM);
2167
2168 if (sun4c_vacinfo.do_hwflushes) {
2169 BTFIXUPSET_CALL(sun4c_flush_page, sun4c_flush_page_hw, BTFIXUPCALL_NORM);
2170 BTFIXUPSET_CALL(sun4c_flush_segment, sun4c_flush_segment_hw, BTFIXUPCALL_NORM);
2171 BTFIXUPSET_CALL(sun4c_flush_context, sun4c_flush_context_hw, BTFIXUPCALL_NORM);
2172 } else {
2173 BTFIXUPSET_CALL(sun4c_flush_page, sun4c_flush_page_sw, BTFIXUPCALL_NORM);
2174 BTFIXUPSET_CALL(sun4c_flush_segment, sun4c_flush_segment_sw, BTFIXUPCALL_NORM);
2175 BTFIXUPSET_CALL(sun4c_flush_context, sun4c_flush_context_sw, BTFIXUPCALL_NORM);
2176 }
2177
2178 BTFIXUPSET_CALL(flush_tlb_mm, sun4c_flush_tlb_mm, BTFIXUPCALL_NORM);
2179 BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm, BTFIXUPCALL_NORM);
2180 BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context, BTFIXUPCALL_NORM);
2181 BTFIXUPSET_CALL(switch_mm, sun4c_switch_mm, BTFIXUPCALL_NORM);
2182 BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page, BTFIXUPCALL_NORM);
2183 BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page, BTFIXUPCALL_NORM);
2184 BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range, BTFIXUPCALL_NORM);
2185 BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range, BTFIXUPCALL_NORM);
2186 BTFIXUPSET_CALL(__flush_page_to_ram, sun4c_flush_page_to_ram, BTFIXUPCALL_NORM);
2187 BTFIXUPSET_CALL(flush_tlb_all, sun4c_flush_tlb_all, BTFIXUPCALL_NORM);
2188
2189 BTFIXUPSET_CALL(flush_sig_insns, sun4c_flush_sig_insns, BTFIXUPCALL_NOP);
2190
2191 BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
2192
2193 /* The 2.4.18 code does not set this on sun4c, how does it work? XXX */
2194 /* BTFIXUPSET_SETHI(none_mask, 0x00000000); */ /* Defaults to zero? */
2195
2196 BTFIXUPSET_CALL(pte_pfn, sun4c_pte_pfn, BTFIXUPCALL_NORM);
2197#if 0 /* PAGE_SHIFT <= 12 */ /* Eek. Investigate. XXX */
2198 BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
2199#else
2200 BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_NORM);
2201#endif
2202 BTFIXUPSET_CALL(pmd_set, sun4c_pmd_set, BTFIXUPCALL_NORM);
2203 BTFIXUPSET_CALL(pmd_populate, sun4c_pmd_populate, BTFIXUPCALL_NORM);
2204
2205 BTFIXUPSET_CALL(pte_present, sun4c_pte_present, BTFIXUPCALL_NORM);
2206 BTFIXUPSET_CALL(pte_clear, sun4c_pte_clear, BTFIXUPCALL_STG0O0);
2207 BTFIXUPSET_CALL(pte_read, sun4c_pte_read, BTFIXUPCALL_NORM);
2208
2209 BTFIXUPSET_CALL(pmd_bad, sun4c_pmd_bad, BTFIXUPCALL_NORM);
2210 BTFIXUPSET_CALL(pmd_present, sun4c_pmd_present, BTFIXUPCALL_NORM);
2211 BTFIXUPSET_CALL(pmd_clear, sun4c_pmd_clear, BTFIXUPCALL_STG0O0);
2212
2213 BTFIXUPSET_CALL(pgd_none, sun4c_pgd_none, BTFIXUPCALL_RETINT(0));
2214 BTFIXUPSET_CALL(pgd_bad, sun4c_pgd_bad, BTFIXUPCALL_RETINT(0));
2215 BTFIXUPSET_CALL(pgd_present, sun4c_pgd_present, BTFIXUPCALL_RETINT(1));
2216 BTFIXUPSET_CALL(pgd_clear, sun4c_pgd_clear, BTFIXUPCALL_NOP);
2217
2218 BTFIXUPSET_CALL(mk_pte, sun4c_mk_pte, BTFIXUPCALL_NORM);
2219 BTFIXUPSET_CALL(mk_pte_phys, sun4c_mk_pte_phys, BTFIXUPCALL_NORM);
2220 BTFIXUPSET_CALL(mk_pte_io, sun4c_mk_pte_io, BTFIXUPCALL_NORM);
2221
2222 BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK);
2223 BTFIXUPSET_CALL(pmd_offset, sun4c_pmd_offset, BTFIXUPCALL_NORM);
2224 BTFIXUPSET_CALL(pte_offset_kernel, sun4c_pte_offset_kernel, BTFIXUPCALL_NORM);
2225 BTFIXUPSET_CALL(free_pte_fast, sun4c_free_pte_fast, BTFIXUPCALL_NORM);
2226 BTFIXUPSET_CALL(pte_free, sun4c_pte_free, BTFIXUPCALL_NORM);
2227 BTFIXUPSET_CALL(pte_alloc_one_kernel, sun4c_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
2228 BTFIXUPSET_CALL(pte_alloc_one, sun4c_pte_alloc_one, BTFIXUPCALL_NORM);
2229 BTFIXUPSET_CALL(free_pmd_fast, sun4c_free_pmd_fast, BTFIXUPCALL_NOP);
2230 BTFIXUPSET_CALL(pmd_alloc_one, sun4c_pmd_alloc_one, BTFIXUPCALL_RETO0);
2231 BTFIXUPSET_CALL(free_pgd_fast, sun4c_free_pgd_fast, BTFIXUPCALL_NORM);
2232 BTFIXUPSET_CALL(get_pgd_fast, sun4c_get_pgd_fast, BTFIXUPCALL_NORM);
2233
2234 BTFIXUPSET_HALF(pte_writei, _SUN4C_PAGE_WRITE);
2235 BTFIXUPSET_HALF(pte_dirtyi, _SUN4C_PAGE_MODIFIED);
2236 BTFIXUPSET_HALF(pte_youngi, _SUN4C_PAGE_ACCESSED);
2237 BTFIXUPSET_HALF(pte_filei, _SUN4C_PAGE_FILE);
2238 BTFIXUPSET_HALF(pte_wrprotecti, _SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE);
2239 BTFIXUPSET_HALF(pte_mkcleani, _SUN4C_PAGE_MODIFIED|_SUN4C_PAGE_SILENT_WRITE);
2240 BTFIXUPSET_HALF(pte_mkoldi, _SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_SILENT_READ);
2241 BTFIXUPSET_CALL(pte_mkwrite, sun4c_pte_mkwrite, BTFIXUPCALL_NORM);
2242 BTFIXUPSET_CALL(pte_mkdirty, sun4c_pte_mkdirty, BTFIXUPCALL_NORM);
2243 BTFIXUPSET_CALL(pte_mkyoung, sun4c_pte_mkyoung, BTFIXUPCALL_NORM);
2244 BTFIXUPSET_CALL(update_mmu_cache, sun4c_update_mmu_cache, BTFIXUPCALL_NORM);
2245
2246 BTFIXUPSET_CALL(pte_to_pgoff, sun4c_pte_to_pgoff, BTFIXUPCALL_NORM);
2247 BTFIXUPSET_CALL(pgoff_to_pte, sun4c_pgoff_to_pte, BTFIXUPCALL_NORM);
2248
2249 BTFIXUPSET_CALL(mmu_lockarea, sun4c_lockarea, BTFIXUPCALL_NORM);
2250 BTFIXUPSET_CALL(mmu_unlockarea, sun4c_unlockarea, BTFIXUPCALL_NORM);
2251
2252 BTFIXUPSET_CALL(mmu_get_scsi_one, sun4c_get_scsi_one, BTFIXUPCALL_NORM);
2253 BTFIXUPSET_CALL(mmu_get_scsi_sgl, sun4c_get_scsi_sgl, BTFIXUPCALL_NORM);
2254 BTFIXUPSET_CALL(mmu_release_scsi_one, sun4c_release_scsi_one, BTFIXUPCALL_NORM);
2255 BTFIXUPSET_CALL(mmu_release_scsi_sgl, sun4c_release_scsi_sgl, BTFIXUPCALL_NORM);
2256
2257 BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM);
2258 BTFIXUPSET_CALL(mmu_unmap_dma_area, sun4c_unmap_dma_area, BTFIXUPCALL_NORM);
2259 BTFIXUPSET_CALL(mmu_translate_dvma, sun4c_translate_dvma, BTFIXUPCALL_NORM);
2260
2261 BTFIXUPSET_CALL(sparc_mapiorange, sun4c_mapiorange, BTFIXUPCALL_NORM);
2262 BTFIXUPSET_CALL(sparc_unmapiorange, sun4c_unmapiorange, BTFIXUPCALL_NORM);
2263
2264 BTFIXUPSET_CALL(__swp_type, sun4c_swp_type, BTFIXUPCALL_NORM);
2265 BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM);
2266 BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM);
2267
2268 BTFIXUPSET_CALL(alloc_thread_info, sun4c_alloc_thread_info, BTFIXUPCALL_NORM);
2269 BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM);
2270
2271 BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
2272
2273 /* These should _never_ get called with two level tables. */
2274 BTFIXUPSET_CALL(pgd_set, sun4c_pgd_set, BTFIXUPCALL_NOP);
2275 BTFIXUPSET_CALL(pgd_page, sun4c_pgd_page, BTFIXUPCALL_RETO0);
2276}
diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S
new file mode 100644
index 000000000000..2dcaa5ac1a38
--- /dev/null
+++ b/arch/sparc/mm/swift.S
@@ -0,0 +1,256 @@
1/* $Id: swift.S,v 1.9 2002/01/08 11:11:59 davem Exp $
2 * swift.S: MicroSparc-II mmu/cache operations.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <asm/psr.h>
9#include <asm/asi.h>
10#include <asm/page.h>
11#include <asm/pgtsrmmu.h>
12#include <asm/asm_offsets.h>
13
14 .text
15 .align 4
16
17#if 1 /* XXX screw this, I can't get the VAC flushes working
18 * XXX reliably... -DaveM
19 */
20 .globl swift_flush_cache_all, swift_flush_cache_mm
21 .globl swift_flush_cache_range, swift_flush_cache_page
22 .globl swift_flush_page_for_dma
23 .globl swift_flush_page_to_ram
24
25swift_flush_cache_all:
26swift_flush_cache_mm:
27swift_flush_cache_range:
28swift_flush_cache_page:
29swift_flush_page_for_dma:
30swift_flush_page_to_ram:
31 sethi %hi(0x2000), %o0
321: subcc %o0, 0x10, %o0
33 add %o0, %o0, %o1
34 sta %g0, [%o0] ASI_M_DATAC_TAG
35 bne 1b
36 sta %g0, [%o1] ASI_M_TXTC_TAG
37 retl
38 nop
39#else
40
41 .globl swift_flush_cache_all
42swift_flush_cache_all:
43 WINDOW_FLUSH(%g4, %g5)
44
45 /* Just clear out all the tags. */
46 sethi %hi(16 * 1024), %o0
471: subcc %o0, 16, %o0
48 sta %g0, [%o0] ASI_M_TXTC_TAG
49 bne 1b
50 sta %g0, [%o0] ASI_M_DATAC_TAG
51 retl
52 nop
53
54 .globl swift_flush_cache_mm
55swift_flush_cache_mm:
56 ld [%o0 + AOFF_mm_context], %g2
57 cmp %g2, -1
58 be swift_flush_cache_mm_out
59 WINDOW_FLUSH(%g4, %g5)
60 rd %psr, %g1
61 andn %g1, PSR_ET, %g3
62 wr %g3, 0x0, %psr
63 nop
64 nop
65 mov SRMMU_CTX_REG, %g7
66 lda [%g7] ASI_M_MMUREGS, %g5
67 sta %g2, [%g7] ASI_M_MMUREGS
68
69#if 1
70 sethi %hi(0x2000), %o0
711: subcc %o0, 0x10, %o0
72 sta %g0, [%o0] ASI_M_FLUSH_CTX
73 bne 1b
74 nop
75#else
76 clr %o0
77 or %g0, 2048, %g7
78 or %g0, 2048, %o1
79 add %o1, 2048, %o2
80 add %o2, 2048, %o3
81 mov 16, %o4
82 add %o4, 2048, %o5
83 add %o5, 2048, %g2
84 add %g2, 2048, %g3
851: sta %g0, [%o0 ] ASI_M_FLUSH_CTX
86 sta %g0, [%o0 + %o1] ASI_M_FLUSH_CTX
87 sta %g0, [%o0 + %o2] ASI_M_FLUSH_CTX
88 sta %g0, [%o0 + %o3] ASI_M_FLUSH_CTX
89 sta %g0, [%o0 + %o4] ASI_M_FLUSH_CTX
90 sta %g0, [%o0 + %o5] ASI_M_FLUSH_CTX
91 sta %g0, [%o0 + %g2] ASI_M_FLUSH_CTX
92 sta %g0, [%o0 + %g3] ASI_M_FLUSH_CTX
93 subcc %g7, 32, %g7
94 bne 1b
95 add %o0, 32, %o0
96#endif
97
98 mov SRMMU_CTX_REG, %g7
99 sta %g5, [%g7] ASI_M_MMUREGS
100 wr %g1, 0x0, %psr
101 nop
102 nop
103swift_flush_cache_mm_out:
104 retl
105 nop
106
107 .globl swift_flush_cache_range
108swift_flush_cache_range:
109 ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
110 sub %o2, %o1, %o2
111 sethi %hi(4096), %o3
112 cmp %o2, %o3
113 bgu swift_flush_cache_mm
114 nop
115 b 70f
116 nop
117
118 .globl swift_flush_cache_page
119swift_flush_cache_page:
120 ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
12170:
122 ld [%o0 + AOFF_mm_context], %g2
123 cmp %g2, -1
124 be swift_flush_cache_page_out
125 WINDOW_FLUSH(%g4, %g5)
126 rd %psr, %g1
127 andn %g1, PSR_ET, %g3
128 wr %g3, 0x0, %psr
129 nop
130 nop
131 mov SRMMU_CTX_REG, %g7
132 lda [%g7] ASI_M_MMUREGS, %g5
133 sta %g2, [%g7] ASI_M_MMUREGS
134
135 andn %o1, (PAGE_SIZE - 1), %o1
136#if 1
137 sethi %hi(0x1000), %o0
1381: subcc %o0, 0x10, %o0
139 sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
140 bne 1b
141 nop
142#else
143 or %g0, 512, %g7
144 or %g0, 512, %o0
145 add %o0, 512, %o2
146 add %o2, 512, %o3
147 add %o3, 512, %o4
148 add %o4, 512, %o5
149 add %o5, 512, %g3
150 add %g3, 512, %g4
1511: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE
152 sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
153 sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE
154 sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE
155 sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
156 sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
157 sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
158 sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
159 subcc %g7, 16, %g7
160 bne 1b
161 add %o1, 16, %o1
162#endif
163
164 mov SRMMU_CTX_REG, %g7
165 sta %g5, [%g7] ASI_M_MMUREGS
166 wr %g1, 0x0, %psr
167 nop
168 nop
169swift_flush_cache_page_out:
170 retl
171 nop
172
173 /* Swift is write-thru, however it is not
174 * I/O nor TLB-walk coherent. Also it has
175 * caches which are virtually indexed and tagged.
176 */
177 .globl swift_flush_page_for_dma
178 .globl swift_flush_page_to_ram
179swift_flush_page_for_dma:
180swift_flush_page_to_ram:
181 andn %o0, (PAGE_SIZE - 1), %o1
182#if 1
183 sethi %hi(0x1000), %o0
1841: subcc %o0, 0x10, %o0
185 sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
186 bne 1b
187 nop
188#else
189 or %g0, 512, %g7
190 or %g0, 512, %o0
191 add %o0, 512, %o2
192 add %o2, 512, %o3
193 add %o3, 512, %o4
194 add %o4, 512, %o5
195 add %o5, 512, %g3
196 add %g3, 512, %g4
1971: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE
198 sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
199 sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE
200 sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE
201 sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
202 sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
203 sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
204 sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
205 subcc %g7, 16, %g7
206 bne 1b
207 add %o1, 16, %o1
208#endif
209 retl
210 nop
211#endif
212
213 .globl swift_flush_sig_insns
214swift_flush_sig_insns:
215 flush %o1
216 retl
217 flush %o1 + 4
218
219 .globl swift_flush_tlb_mm
220 .globl swift_flush_tlb_range
221 .globl swift_flush_tlb_all
222swift_flush_tlb_range:
223 ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
224swift_flush_tlb_mm:
225 ld [%o0 + AOFF_mm_context], %g2
226 cmp %g2, -1
227 be swift_flush_tlb_all_out
228swift_flush_tlb_all:
229 mov 0x400, %o1
230 sta %g0, [%o1] ASI_M_FLUSH_PROBE
231swift_flush_tlb_all_out:
232 retl
233 nop
234
235 .globl swift_flush_tlb_page
236swift_flush_tlb_page:
237 ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
238 mov SRMMU_CTX_REG, %g1
239 ld [%o0 + AOFF_mm_context], %o3
240 andn %o1, (PAGE_SIZE - 1), %o1
241 cmp %o3, -1
242 be swift_flush_tlb_page_out
243 nop
244#if 1
245 mov 0x400, %o1
246 sta %g0, [%o1] ASI_M_FLUSH_PROBE
247#else
248 lda [%g1] ASI_M_MMUREGS, %g5
249 sta %o3, [%g1] ASI_M_MMUREGS
250 sta %g0, [%o1] ASI_M_FLUSH_PAGE /* rem. virt. cache. prot. */
251 sta %g0, [%o1] ASI_M_FLUSH_PROBE
252 sta %g5, [%g1] ASI_M_MMUREGS
253#endif
254swift_flush_tlb_page_out:
255 retl
256 nop
diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S
new file mode 100644
index 000000000000..8acd1787fde2
--- /dev/null
+++ b/arch/sparc/mm/tsunami.S
@@ -0,0 +1,133 @@
1/* $Id: tsunami.S,v 1.7 2001/12/21 04:56:15 davem Exp $
2 * tsunami.S: High speed MicroSparc-I mmu/cache operations.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/config.h>
8#include <asm/ptrace.h>
9#include <asm/asm_offsets.h>
10#include <asm/psr.h>
11#include <asm/asi.h>
12#include <asm/page.h>
13#include <asm/pgtsrmmu.h>
14
15 .text
16 .align 4
17
18 .globl tsunami_flush_cache_all, tsunami_flush_cache_mm
19 .globl tsunami_flush_cache_range, tsunami_flush_cache_page
20 .globl tsunami_flush_page_to_ram, tsunami_flush_page_for_dma
21 .globl tsunami_flush_sig_insns
22 .globl tsunami_flush_tlb_all, tsunami_flush_tlb_mm
23 .globl tsunami_flush_tlb_range, tsunami_flush_tlb_page
24
25 /* Sliiick... */
26tsunami_flush_cache_page:
27tsunami_flush_cache_range:
28 ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
29tsunami_flush_cache_mm:
30 ld [%o0 + AOFF_mm_context], %g2
31 cmp %g2, -1
32 be tsunami_flush_cache_out
33tsunami_flush_cache_all:
34 WINDOW_FLUSH(%g4, %g5)
35tsunami_flush_page_for_dma:
36 sta %g0, [%g0] ASI_M_IC_FLCLEAR
37 sta %g0, [%g0] ASI_M_DC_FLCLEAR
38tsunami_flush_cache_out:
39tsunami_flush_page_to_ram:
40 retl
41 nop
42
43tsunami_flush_sig_insns:
44 flush %o1
45 retl
46 flush %o1 + 4
47
48 /* More slick stuff... */
49tsunami_flush_tlb_range:
50 ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
51tsunami_flush_tlb_mm:
52 ld [%o0 + AOFF_mm_context], %g2
53 cmp %g2, -1
54 be tsunami_flush_tlb_out
55tsunami_flush_tlb_all:
56 mov 0x400, %o1
57 sta %g0, [%o1] ASI_M_FLUSH_PROBE
58 nop
59 nop
60 nop
61 nop
62 nop
63tsunami_flush_tlb_out:
64 retl
65 nop
66
67 /* This one can be done in a fine grained manner... */
68tsunami_flush_tlb_page:
69 ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
70 mov SRMMU_CTX_REG, %g1
71 ld [%o0 + AOFF_mm_context], %o3
72 andn %o1, (PAGE_SIZE - 1), %o1
73 cmp %o3, -1
74 be tsunami_flush_tlb_page_out
75 lda [%g1] ASI_M_MMUREGS, %g5
76 sta %o3, [%g1] ASI_M_MMUREGS
77 sta %g0, [%o1] ASI_M_FLUSH_PROBE
78 nop
79 nop
80 nop
81 nop
82 nop
83tsunami_flush_tlb_page_out:
84 retl
85 sta %g5, [%g1] ASI_M_MMUREGS
86
87#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3) \
88 ldd [src + offset + 0x18], t0; \
89 std t0, [dst + offset + 0x18]; \
90 ldd [src + offset + 0x10], t2; \
91 std t2, [dst + offset + 0x10]; \
92 ldd [src + offset + 0x08], t0; \
93 std t0, [dst + offset + 0x08]; \
94 ldd [src + offset + 0x00], t2; \
95 std t2, [dst + offset + 0x00];
96
97 .globl tsunami_copy_1page
98tsunami_copy_1page:
99/* NOTE: This routine has to be shorter than 70insns --jj */
100 or %g0, (PAGE_SIZE >> 8), %g1
1011:
102 MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5)
103 MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5)
104 MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5)
105 MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5)
106 MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5)
107 MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5)
108 MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5)
109 MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5)
110 subcc %g1, 1, %g1
111 add %o0, 0x100, %o0
112 bne 1b
113 add %o1, 0x100, %o1
114
115 .globl tsunami_setup_blockops
116tsunami_setup_blockops:
117 sethi %hi(__copy_1page), %o0
118 or %o0, %lo(__copy_1page), %o0
119 sethi %hi(tsunami_copy_1page), %o1
120 or %o1, %lo(tsunami_copy_1page), %o1
121 sethi %hi(tsunami_setup_blockops), %o2
122 or %o2, %lo(tsunami_setup_blockops), %o2
123 ld [%o1], %o4
1241: add %o1, 4, %o1
125 st %o4, [%o0]
126 add %o0, 4, %o0
127 cmp %o1, %o2
128 bne 1b
129 ld [%o1], %o4
130 sta %g0, [%g0] ASI_M_IC_FLCLEAR
131 sta %g0, [%g0] ASI_M_DC_FLCLEAR
132 retl
133 nop
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
new file mode 100644
index 000000000000..f58712d26bf5
--- /dev/null
+++ b/arch/sparc/mm/viking.S
@@ -0,0 +1,284 @@
1/* $Id: viking.S,v 1.19 2001/12/21 04:56:15 davem Exp $
2 * viking.S: High speed Viking cache/mmu operations
3 *
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@ultra.linux.cz)
6 * Copyright (C) 1999 Pavel Semerad (semerad@ss1000.ms.mff.cuni.cz)
7 */
8
9#include <linux/config.h>
10#include <asm/ptrace.h>
11#include <asm/psr.h>
12#include <asm/asm_offsets.h>
13#include <asm/asi.h>
14#include <asm/mxcc.h>
15#include <asm/page.h>
16#include <asm/pgtsrmmu.h>
17#include <asm/viking.h>
18#include <asm/btfixup.h>
19
20#ifdef CONFIG_SMP
21 .data
22 .align 4
23sun4dsmp_flush_tlb_spin:
24 .word 0
25#endif
26
27 .text
28 .align 4
29
30 .globl viking_flush_cache_all, viking_flush_cache_mm
31 .globl viking_flush_cache_range, viking_flush_cache_page
32 .globl viking_flush_page, viking_mxcc_flush_page
33 .globl viking_flush_page_for_dma, viking_flush_page_to_ram
34 .globl viking_flush_sig_insns
35 .globl viking_flush_tlb_all, viking_flush_tlb_mm
36 .globl viking_flush_tlb_range, viking_flush_tlb_page
37
38viking_flush_page:
39 sethi %hi(PAGE_OFFSET), %g2
40 sub %o0, %g2, %g3
41 srl %g3, 12, %g1 ! ppage >> 12
42
43 clr %o1 ! set counter, 0 - 127
44 sethi %hi(PAGE_OFFSET + PAGE_SIZE - 0x80000000), %o3
45 sethi %hi(0x80000000), %o4
46 sethi %hi(VIKING_PTAG_VALID), %o5
47 sethi %hi(2*PAGE_SIZE), %o0
48 sethi %hi(PAGE_SIZE), %g7
49 clr %o2 ! block counter, 0 - 3
505:
51 sll %o1, 5, %g4
52 or %g4, %o4, %g4 ! 0x80000000 | (set << 5)
53
54 sll %o2, 26, %g5 ! block << 26
556:
56 or %g5, %g4, %g5
57 ldda [%g5] ASI_M_DATAC_TAG, %g2
58 cmp %g3, %g1 ! ptag == ppage?
59 bne 7f
60 inc %o2
61
62 andcc %g2, %o5, %g0 ! ptag VALID?
63 be 7f
64 add %g4, %o3, %g2 ! (PAGE_OFFSET + PAGE_SIZE) | (set << 5)
65 ld [%g2], %g3
66 ld [%g2 + %g7], %g3
67 add %g2, %o0, %g2
68 ld [%g2], %g3
69 ld [%g2 + %g7], %g3
70 add %g2, %o0, %g2
71 ld [%g2], %g3
72 ld [%g2 + %g7], %g3
73 add %g2, %o0, %g2
74 ld [%g2], %g3
75 b 8f
76 ld [%g2 + %g7], %g3
77
787:
79 cmp %o2, 3
80 ble 6b
81 sll %o2, 26, %g5 ! block << 26
82
838: inc %o1
84 cmp %o1, 0x7f
85 ble 5b
86 clr %o2
87
889: retl
89 nop
90
91viking_mxcc_flush_page:
92 sethi %hi(PAGE_OFFSET), %g2
93 sub %o0, %g2, %g3
94 sub %g3, -PAGE_SIZE, %g3 ! ppage + PAGE_SIZE
95 sethi %hi(MXCC_SRCSTREAM), %o3 ! assume %hi(MXCC_SRCSTREAM) == %hi(MXCC_DESTSTREAM)
96 mov 0x10, %g2 ! set cacheable bit
97 or %o3, %lo(MXCC_SRCSTREAM), %o2
98 or %o3, %lo(MXCC_DESSTREAM), %o3
99 sub %g3, MXCC_STREAM_SIZE, %g3
1006:
101 stda %g2, [%o2] ASI_M_MXCC
102 stda %g2, [%o3] ASI_M_MXCC
103 andncc %g3, PAGE_MASK, %g0
104 bne 6b
105 sub %g3, MXCC_STREAM_SIZE, %g3
106
1079: retl
108 nop
109
110viking_flush_cache_page:
111viking_flush_cache_range:
112#ifndef CONFIG_SMP
113 ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
114#endif
115viking_flush_cache_mm:
116#ifndef CONFIG_SMP
117 ld [%o0 + AOFF_mm_context], %g1
118 cmp %g1, -1
119 bne viking_flush_cache_all
120 nop
121 b,a viking_flush_cache_out
122#endif
123viking_flush_cache_all:
124 WINDOW_FLUSH(%g4, %g5)
125viking_flush_cache_out:
126 retl
127 nop
128
129viking_flush_tlb_all:
130 mov 0x400, %g1
131 retl
132 sta %g0, [%g1] ASI_M_FLUSH_PROBE
133
134viking_flush_tlb_mm:
135 mov SRMMU_CTX_REG, %g1
136 ld [%o0 + AOFF_mm_context], %o1
137 lda [%g1] ASI_M_MMUREGS, %g5
138#ifndef CONFIG_SMP
139 cmp %o1, -1
140 be 1f
141#endif
142 mov 0x300, %g2
143 sta %o1, [%g1] ASI_M_MMUREGS
144 sta %g0, [%g2] ASI_M_FLUSH_PROBE
145 retl
146 sta %g5, [%g1] ASI_M_MMUREGS
147#ifndef CONFIG_SMP
1481: retl
149 nop
150#endif
151
152viking_flush_tlb_range:
153 ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
154 mov SRMMU_CTX_REG, %g1
155 ld [%o0 + AOFF_mm_context], %o3
156 lda [%g1] ASI_M_MMUREGS, %g5
157#ifndef CONFIG_SMP
158 cmp %o3, -1
159 be 2f
160#endif
161 sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
162 sta %o3, [%g1] ASI_M_MMUREGS
163 and %o1, %o4, %o1
164 add %o1, 0x200, %o1
165 sta %g0, [%o1] ASI_M_FLUSH_PROBE
1661: sub %o1, %o4, %o1
167 cmp %o1, %o2
168 blu,a 1b
169 sta %g0, [%o1] ASI_M_FLUSH_PROBE
170 retl
171 sta %g5, [%g1] ASI_M_MMUREGS
172#ifndef CONFIG_SMP
1732: retl
174 nop
175#endif
176
177viking_flush_tlb_page:
178 ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
179 mov SRMMU_CTX_REG, %g1
180 ld [%o0 + AOFF_mm_context], %o3
181 lda [%g1] ASI_M_MMUREGS, %g5
182#ifndef CONFIG_SMP
183 cmp %o3, -1
184 be 1f
185#endif
186 and %o1, PAGE_MASK, %o1
187 sta %o3, [%g1] ASI_M_MMUREGS
188 sta %g0, [%o1] ASI_M_FLUSH_PROBE
189 retl
190 sta %g5, [%g1] ASI_M_MMUREGS
191#ifndef CONFIG_SMP
1921: retl
193 nop
194#endif
195
196viking_flush_page_to_ram:
197viking_flush_page_for_dma:
198viking_flush_sig_insns:
199 retl
200 nop
201
202#ifdef CONFIG_SMP
203 .globl sun4dsmp_flush_tlb_all, sun4dsmp_flush_tlb_mm
204 .globl sun4dsmp_flush_tlb_range, sun4dsmp_flush_tlb_page
205sun4dsmp_flush_tlb_all:
206 sethi %hi(sun4dsmp_flush_tlb_spin), %g3
2071: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
208 tst %g5
209 bne 2f
210 mov 0x400, %g1
211 sta %g0, [%g1] ASI_M_FLUSH_PROBE
212 retl
213 stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
2142: tst %g5
215 bne,a 2b
216 ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
217 b,a 1b
218
219sun4dsmp_flush_tlb_mm:
220 sethi %hi(sun4dsmp_flush_tlb_spin), %g3
2211: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
222 tst %g5
223 bne 2f
224 mov SRMMU_CTX_REG, %g1
225 ld [%o0 + AOFF_mm_context], %o1
226 lda [%g1] ASI_M_MMUREGS, %g5
227 mov 0x300, %g2
228 sta %o1, [%g1] ASI_M_MMUREGS
229 sta %g0, [%g2] ASI_M_FLUSH_PROBE
230 sta %g5, [%g1] ASI_M_MMUREGS
231 retl
232 stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
2332: tst %g5
234 bne,a 2b
235 ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
236 b,a 1b
237
238sun4dsmp_flush_tlb_range:
239 sethi %hi(sun4dsmp_flush_tlb_spin), %g3
2401: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
241 tst %g5
242 bne 3f
243 mov SRMMU_CTX_REG, %g1
244 ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
245 ld [%o0 + AOFF_mm_context], %o3
246 lda [%g1] ASI_M_MMUREGS, %g5
247 sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
248 sta %o3, [%g1] ASI_M_MMUREGS
249 and %o1, %o4, %o1
250 add %o1, 0x200, %o1
251 sta %g0, [%o1] ASI_M_FLUSH_PROBE
2522: sub %o1, %o4, %o1
253 cmp %o1, %o2
254 blu,a 2b
255 sta %g0, [%o1] ASI_M_FLUSH_PROBE
256 sta %g5, [%g1] ASI_M_MMUREGS
257 retl
258 stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
2593: tst %g5
260 bne,a 3b
261 ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
262 b,a 1b
263
264sun4dsmp_flush_tlb_page:
265 sethi %hi(sun4dsmp_flush_tlb_spin), %g3
2661: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
267 tst %g5
268 bne 2f
269 mov SRMMU_CTX_REG, %g1
270 ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
271 ld [%o0 + AOFF_mm_context], %o3
272 lda [%g1] ASI_M_MMUREGS, %g5
273 and %o1, PAGE_MASK, %o1
274 sta %o3, [%g1] ASI_M_MMUREGS
275 sta %g0, [%o1] ASI_M_FLUSH_PROBE
276 sta %g5, [%g1] ASI_M_MMUREGS
277 retl
278 stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
2792: tst %g5
280 bne,a 2b
281 ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
282 b,a 1b
283 nop
284#endif
diff --git a/arch/sparc/prom/Makefile b/arch/sparc/prom/Makefile
new file mode 100644
index 000000000000..2b217ee40703
--- /dev/null
+++ b/arch/sparc/prom/Makefile
@@ -0,0 +1,9 @@
1# $Id: Makefile,v 1.8 2000/12/15 00:41:22 davem Exp $
2# Makefile for the Sun Boot PROM interface library under
3# Linux.
4#
5
6lib-y := bootstr.o devmap.o devops.o init.o memory.o misc.o mp.o \
7 palloc.o ranges.o segment.o console.o printf.o tree.o
8
9lib-$(CONFIG_SUN4) += sun4prom.o
diff --git a/arch/sparc/prom/bootstr.c b/arch/sparc/prom/bootstr.c
new file mode 100644
index 000000000000..cfdeac2788d1
--- /dev/null
+++ b/arch/sparc/prom/bootstr.c
@@ -0,0 +1,63 @@
1/* $Id: bootstr.c,v 1.20 2000/02/08 20:24:23 davem Exp $
2 * bootstr.c: Boot string/argument acquisition from the PROM.
3 *
4 * Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/string.h>
8#include <asm/oplib.h>
9#include <asm/sun4prom.h>
10#include <linux/init.h>
11
12#define BARG_LEN 256
13static char barg_buf[BARG_LEN] = { 0 };
14static char fetched __initdata = 0;
15
16extern linux_sun4_romvec *sun4_romvec;
17
18char * __init
19prom_getbootargs(void)
20{
21 int iter;
22 char *cp, *arg;
23
24 /* This check saves us from a panic when bootfd patches args. */
25 if (fetched) {
26 return barg_buf;
27 }
28
29 switch(prom_vers) {
30 case PROM_V0:
31 case PROM_SUN4:
32 cp = barg_buf;
33 /* Start from 1 and go over fd(0,0,0)kernel */
34 for(iter = 1; iter < 8; iter++) {
35 arg = (*(romvec->pv_v0bootargs))->argv[iter];
36 if(arg == 0) break;
37 while(*arg != 0) {
38 /* Leave place for space and null. */
39 if(cp >= barg_buf + BARG_LEN-2){
40 /* We might issue a warning here. */
41 break;
42 }
43 *cp++ = *arg++;
44 }
45 *cp++ = ' ';
46 }
47 *cp = 0;
48 break;
49 case PROM_V2:
50 case PROM_V3:
51 /*
52 * V3 PROM cannot supply as with more than 128 bytes
53 * of an argument. But a smart bootstrap loader can.
54 */
55 strlcpy(barg_buf, *romvec->pv_v2bootargs.bootargs, sizeof(barg_buf));
56 break;
57 default:
58 break;
59 }
60
61 fetched = 1;
62 return barg_buf;
63}
diff --git a/arch/sparc/prom/console.c b/arch/sparc/prom/console.c
new file mode 100644
index 000000000000..4e6e41d3291d
--- /dev/null
+++ b/arch/sparc/prom/console.c
@@ -0,0 +1,220 @@
1/* $Id: console.c,v 1.25 2001/10/30 04:54:22 davem Exp $
2 * console.c: Routines that deal with sending and receiving IO
3 * to/from the current console device using the PROM.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1998 Pete Zaitcev <zaitcev@yahoo.com>
7 */
8
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <asm/openprom.h>
13#include <asm/sun4prom.h>
14#include <asm/oplib.h>
15#include <asm/system.h>
16#include <linux/string.h>
17
18extern void restore_current(void);
19
20static char con_name_jmc[] = "/obio/su@"; /* "/obio/su@0,3002f8"; */
21#define CON_SIZE_JMC (sizeof(con_name_jmc))
22
23/* Non blocking get character from console input device, returns -1
24 * if no input was taken. This can be used for polling.
25 */
26int
27prom_nbgetchar(void)
28{
29 static char inc;
30 int i = -1;
31 unsigned long flags;
32
33 spin_lock_irqsave(&prom_lock, flags);
34 switch(prom_vers) {
35 case PROM_V0:
36 case PROM_SUN4:
37 i = (*(romvec->pv_nbgetchar))();
38 break;
39 case PROM_V2:
40 case PROM_V3:
41 if( (*(romvec->pv_v2devops).v2_dev_read)(*romvec->pv_v2bootargs.fd_stdin , &inc, 0x1) == 1) {
42 i = inc;
43 } else {
44 i = -1;
45 }
46 break;
47 default:
48 i = -1;
49 break;
50 };
51 restore_current();
52 spin_unlock_irqrestore(&prom_lock, flags);
53 return i; /* Ugh, we could spin forever on unsupported proms ;( */
54}
55
56/* Non blocking put character to console device, returns -1 if
57 * unsuccessful.
58 */
59int
60prom_nbputchar(char c)
61{
62 static char outc;
63 unsigned long flags;
64 int i = -1;
65
66 spin_lock_irqsave(&prom_lock, flags);
67 switch(prom_vers) {
68 case PROM_V0:
69 case PROM_SUN4:
70 i = (*(romvec->pv_nbputchar))(c);
71 break;
72 case PROM_V2:
73 case PROM_V3:
74 outc = c;
75 if( (*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, &outc, 0x1) == 1)
76 i = 0;
77 else
78 i = -1;
79 break;
80 default:
81 i = -1;
82 break;
83 };
84 restore_current();
85 spin_unlock_irqrestore(&prom_lock, flags);
86 return i; /* Ugh, we could spin forever on unsupported proms ;( */
87}
88
89/* Blocking version of get character routine above. */
90char
91prom_getchar(void)
92{
93 int character;
94 while((character = prom_nbgetchar()) == -1) ;
95 return (char) character;
96}
97
98/* Blocking version of put character routine above. */
99void
100prom_putchar(char c)
101{
102 while(prom_nbputchar(c) == -1) ;
103 return;
104}
105
106/* Query for input device type */
107enum prom_input_device
108prom_query_input_device(void)
109{
110 unsigned long flags;
111 int st_p;
112 char propb[64];
113 char *p;
114 int propl;
115
116 switch(prom_vers) {
117 case PROM_V0:
118 case PROM_V2:
119 case PROM_SUN4:
120 default:
121 switch(*romvec->pv_stdin) {
122 case PROMDEV_KBD: return PROMDEV_IKBD;
123 case PROMDEV_TTYA: return PROMDEV_ITTYA;
124 case PROMDEV_TTYB: return PROMDEV_ITTYB;
125 default:
126 return PROMDEV_I_UNK;
127 };
128 case PROM_V3:
129 spin_lock_irqsave(&prom_lock, flags);
130 st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdin);
131 restore_current();
132 spin_unlock_irqrestore(&prom_lock, flags);
133 if(prom_node_has_property(st_p, "keyboard"))
134 return PROMDEV_IKBD;
135 if (prom_getproperty(st_p, "name", propb, sizeof(propb)) != -1) {
136 if(strncmp(propb, "keyboard", sizeof("serial")) == 0)
137 return PROMDEV_IKBD;
138 }
139 if (prom_getproperty(st_p, "device_type", propb, sizeof(propb)) != -1) {
140 if(strncmp(propb, "serial", sizeof("serial")))
141 return PROMDEV_I_UNK;
142 }
143 propl = prom_getproperty(prom_root_node, "stdin-path", propb, sizeof(propb));
144 if(propl > 2) {
145 p = propb;
146 while(*p) p++; p -= 2;
147 if(p[0] == ':') {
148 if(p[1] == 'a')
149 return PROMDEV_ITTYA;
150 else if(p[1] == 'b')
151 return PROMDEV_ITTYB;
152 }
153 }
154 return PROMDEV_I_UNK;
155 }
156}
157
158/* Query for output device type */
159
160enum prom_output_device
161prom_query_output_device(void)
162{
163 unsigned long flags;
164 int st_p;
165 char propb[64];
166 char *p;
167 int propl;
168
169 switch(prom_vers) {
170 case PROM_V0:
171 case PROM_SUN4:
172 switch(*romvec->pv_stdin) {
173 case PROMDEV_SCREEN: return PROMDEV_OSCREEN;
174 case PROMDEV_TTYA: return PROMDEV_OTTYA;
175 case PROMDEV_TTYB: return PROMDEV_OTTYB;
176 };
177 break;
178 case PROM_V2:
179 case PROM_V3:
180 spin_lock_irqsave(&prom_lock, flags);
181 st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdout);
182 restore_current();
183 spin_unlock_irqrestore(&prom_lock, flags);
184 propl = prom_getproperty(st_p, "device_type", propb, sizeof(propb));
185 if (propl == sizeof("display") &&
186 strncmp("display", propb, sizeof("display")) == 0)
187 {
188 return PROMDEV_OSCREEN;
189 }
190 if(prom_vers == PROM_V3) {
191 if(propl >= 0 &&
192 strncmp("serial", propb, sizeof("serial")) != 0)
193 return PROMDEV_O_UNK;
194 propl = prom_getproperty(prom_root_node, "stdout-path",
195 propb, sizeof(propb));
196 if(propl == CON_SIZE_JMC &&
197 strncmp(propb, con_name_jmc, CON_SIZE_JMC) == 0)
198 return PROMDEV_OTTYA;
199 if(propl > 2) {
200 p = propb;
201 while(*p) p++; p-= 2;
202 if(p[0]==':') {
203 if(p[1] == 'a')
204 return PROMDEV_OTTYA;
205 else if(p[1] == 'b')
206 return PROMDEV_OTTYB;
207 }
208 }
209 } else {
210 switch(*romvec->pv_stdin) {
211 case PROMDEV_TTYA: return PROMDEV_OTTYA;
212 case PROMDEV_TTYB: return PROMDEV_OTTYB;
213 };
214 }
215 break;
216 default:
217 ;
218 };
219 return PROMDEV_O_UNK;
220}
diff --git a/arch/sparc/prom/devmap.c b/arch/sparc/prom/devmap.c
new file mode 100644
index 000000000000..eb12073578ad
--- /dev/null
+++ b/arch/sparc/prom/devmap.c
@@ -0,0 +1,54 @@
1/* $Id: devmap.c,v 1.7 2000/08/26 02:38:03 anton Exp $
2 * promdevmap.c: Map device/IO areas to virtual addresses.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/types.h>
8#include <linux/kernel.h>
9#include <linux/sched.h>
10
11#include <asm/openprom.h>
12#include <asm/oplib.h>
13
14extern void restore_current(void);
15
16/* Just like the routines in palloc.c, these should not be used
17 * by the kernel at all. Bootloader facility mainly. And again,
18 * this is only available on V2 proms and above.
19 */
20
21/* Map physical device address 'paddr' in IO space 'ios' of size
22 * 'num_bytes' to a virtual address, with 'vhint' being a hint to
23 * the prom as to where you would prefer the mapping. We return
24 * where the prom actually mapped it.
25 */
26char *
27prom_mapio(char *vhint, int ios, unsigned int paddr, unsigned int num_bytes)
28{
29 unsigned long flags;
30 char *ret;
31
32 spin_lock_irqsave(&prom_lock, flags);
33 if((num_bytes == 0) || (paddr == 0)) ret = (char *) 0x0;
34 else
35 ret = (*(romvec->pv_v2devops.v2_dumb_mmap))(vhint, ios, paddr,
36 num_bytes);
37 restore_current();
38 spin_unlock_irqrestore(&prom_lock, flags);
39 return ret;
40}
41
42/* Unmap an IO/device area that was mapped using the above routine. */
43void
44prom_unmapio(char *vaddr, unsigned int num_bytes)
45{
46 unsigned long flags;
47
48 if(num_bytes == 0x0) return;
49 spin_lock_irqsave(&prom_lock, flags);
50 (*(romvec->pv_v2devops.v2_dumb_munmap))(vaddr, num_bytes);
51 restore_current();
52 spin_unlock_irqrestore(&prom_lock, flags);
53 return;
54}
diff --git a/arch/sparc/prom/devops.c b/arch/sparc/prom/devops.c
new file mode 100644
index 000000000000..61919b54f6cc
--- /dev/null
+++ b/arch/sparc/prom/devops.c
@@ -0,0 +1,89 @@
1/* $Id: devops.c,v 1.13 2000/08/26 02:38:03 anton Exp $
2 * devops.c: Device operations using the PROM.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6#include <linux/types.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9
10#include <asm/openprom.h>
11#include <asm/oplib.h>
12
13extern void restore_current(void);
14
15/* Open the device described by the string 'dstr'. Returns the handle
16 * to that device used for subsequent operations on that device.
17 * Returns -1 on failure.
18 */
19int
20prom_devopen(char *dstr)
21{
22 int handle;
23 unsigned long flags;
24 spin_lock_irqsave(&prom_lock, flags);
25 switch(prom_vers) {
26 case PROM_V0:
27 handle = (*(romvec->pv_v0devops.v0_devopen))(dstr);
28 if(handle == 0) handle = -1;
29 break;
30 case PROM_V2:
31 case PROM_V3:
32 handle = (*(romvec->pv_v2devops.v2_dev_open))(dstr);
33 break;
34 default:
35 handle = -1;
36 break;
37 };
38 restore_current();
39 spin_unlock_irqrestore(&prom_lock, flags);
40
41 return handle;
42}
43
44/* Close the device described by device handle 'dhandle'. */
45int
46prom_devclose(int dhandle)
47{
48 unsigned long flags;
49 spin_lock_irqsave(&prom_lock, flags);
50 switch(prom_vers) {
51 case PROM_V0:
52 (*(romvec->pv_v0devops.v0_devclose))(dhandle);
53 break;
54 case PROM_V2:
55 case PROM_V3:
56 (*(romvec->pv_v2devops.v2_dev_close))(dhandle);
57 break;
58 default:
59 break;
60 };
61 restore_current();
62 spin_unlock_irqrestore(&prom_lock, flags);
63 return 0;
64}
65
66/* Seek to specified location described by 'seekhi' and 'seeklo'
67 * for device 'dhandle'.
68 */
69void
70prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
71{
72 unsigned long flags;
73 spin_lock_irqsave(&prom_lock, flags);
74 switch(prom_vers) {
75 case PROM_V0:
76 (*(romvec->pv_v0devops.v0_seekdev))(dhandle, seekhi, seeklo);
77 break;
78 case PROM_V2:
79 case PROM_V3:
80 (*(romvec->pv_v2devops.v2_dev_seek))(dhandle, seekhi, seeklo);
81 break;
82 default:
83 break;
84 };
85 restore_current();
86 spin_unlock_irqrestore(&prom_lock, flags);
87
88 return;
89}
diff --git a/arch/sparc/prom/init.c b/arch/sparc/prom/init.c
new file mode 100644
index 000000000000..b83409c81916
--- /dev/null
+++ b/arch/sparc/prom/init.c
@@ -0,0 +1,95 @@
1/* $Id: init.c,v 1.14 2000/01/29 01:09:12 anton Exp $
2 * init.c: Initialize internal variables used by the PROM
3 * library functions.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/config.h>
10#include <linux/kernel.h>
11#include <linux/init.h>
12
13#include <asm/openprom.h>
14#include <asm/oplib.h>
15#include <asm/sun4prom.h>
16
17struct linux_romvec *romvec;
18enum prom_major_version prom_vers;
19unsigned int prom_rev, prom_prev;
20linux_sun4_romvec *sun4_romvec;
21
22/* The root node of the prom device tree. */
23int prom_root_node;
24
25int prom_stdin, prom_stdout;
26
27/* Pointer to the device tree operations structure. */
28struct linux_nodeops *prom_nodeops;
29
30/* You must call prom_init() before you attempt to use any of the
31 * routines in the prom library. It returns 0 on success, 1 on
32 * failure. It gets passed the pointer to the PROM vector.
33 */
34
35extern void prom_meminit(void);
36extern void prom_ranges_init(void);
37
38void __init prom_init(struct linux_romvec *rp)
39{
40#ifdef CONFIG_SUN4
41 extern struct linux_romvec *sun4_prom_init(void);
42 rp = sun4_prom_init();
43#endif
44 romvec = rp;
45
46 switch(romvec->pv_romvers) {
47 case 0:
48 prom_vers = PROM_V0;
49 break;
50 case 2:
51 prom_vers = PROM_V2;
52 break;
53 case 3:
54 prom_vers = PROM_V3;
55 break;
56 case 40:
57 prom_vers = PROM_SUN4;
58 break;
59 default:
60 prom_printf("PROMLIB: Bad PROM version %d\n",
61 romvec->pv_romvers);
62 prom_halt();
63 break;
64 };
65
66 prom_rev = romvec->pv_plugin_revision;
67 prom_prev = romvec->pv_printrev;
68 prom_nodeops = romvec->pv_nodeops;
69
70 prom_root_node = prom_getsibling(0);
71 if((prom_root_node == 0) || (prom_root_node == -1))
72 prom_halt();
73
74 if((((unsigned long) prom_nodeops) == 0) ||
75 (((unsigned long) prom_nodeops) == -1))
76 prom_halt();
77
78 if(prom_vers == PROM_V2 || prom_vers == PROM_V3) {
79 prom_stdout = *romvec->pv_v2bootargs.fd_stdout;
80 prom_stdin = *romvec->pv_v2bootargs.fd_stdin;
81 }
82
83 prom_meminit();
84
85 prom_ranges_init();
86
87#ifndef CONFIG_SUN4
88 /* SUN4 prints this in sun4_prom_init */
89 printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n",
90 romvec->pv_romvers, prom_rev);
91#endif
92
93 /* Initialization successful. */
94 return;
95}
diff --git a/arch/sparc/prom/memory.c b/arch/sparc/prom/memory.c
new file mode 100644
index 000000000000..46aa51afec14
--- /dev/null
+++ b/arch/sparc/prom/memory.c
@@ -0,0 +1,216 @@
1/* $Id: memory.c,v 1.15 2000/01/29 01:09:12 anton Exp $
2 * memory.c: Prom routine for acquiring various bits of information
3 * about RAM on the machine, both virtual and physical.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997 Michael A. Griffith (grif@acm.org)
7 */
8
9#include <linux/config.h>
10#include <linux/kernel.h>
11#include <linux/init.h>
12
13#include <asm/openprom.h>
14#include <asm/sun4prom.h>
15#include <asm/oplib.h>
16
17/* This routine, for consistency, returns the ram parameters in the
18 * V0 prom memory descriptor format. I choose this format because I
19 * think it was the easiest to work with. I feel the religious
20 * arguments now... ;) Also, I return the linked lists sorted to
21 * prevent paging_init() upset stomach as I have not yet written
22 * the pepto-bismol kernel module yet.
23 */
24
25struct linux_prom_registers prom_reg_memlist[64];
26struct linux_prom_registers prom_reg_tmp[64];
27
28struct linux_mlist_v0 prom_phys_total[64];
29struct linux_mlist_v0 prom_prom_taken[64];
30struct linux_mlist_v0 prom_phys_avail[64];
31
32struct linux_mlist_v0 *prom_ptot_ptr = prom_phys_total;
33struct linux_mlist_v0 *prom_ptak_ptr = prom_prom_taken;
34struct linux_mlist_v0 *prom_pavl_ptr = prom_phys_avail;
35
36struct linux_mem_v0 prom_memlist;
37
38
39/* Internal Prom library routine to sort a linux_mlist_v0 memory
40 * list. Used below in initialization.
41 */
42static void __init
43prom_sortmemlist(struct linux_mlist_v0 *thislist)
44{
45 int swapi = 0;
46 int i, mitr, tmpsize;
47 char *tmpaddr;
48 char *lowest;
49
50 for(i=0; thislist[i].theres_more != 0; i++) {
51 lowest = thislist[i].start_adr;
52 for(mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
53 if(thislist[mitr].start_adr < lowest) {
54 lowest = thislist[mitr].start_adr;
55 swapi = mitr;
56 }
57 if(lowest == thislist[i].start_adr) continue;
58 tmpaddr = thislist[swapi].start_adr;
59 tmpsize = thislist[swapi].num_bytes;
60 for(mitr = swapi; mitr > i; mitr--) {
61 thislist[mitr].start_adr = thislist[mitr-1].start_adr;
62 thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
63 }
64 thislist[i].start_adr = tmpaddr;
65 thislist[i].num_bytes = tmpsize;
66 }
67
68 return;
69}
70
71/* Initialize the memory lists based upon the prom version. */
72void __init prom_meminit(void)
73{
74 int node = 0;
75 unsigned int iter, num_regs;
76 struct linux_mlist_v0 *mptr; /* ptr for traversal */
77
78 switch(prom_vers) {
79 case PROM_V0:
80 /* Nice, kind of easier to do in this case. */
81 /* First, the total physical descriptors. */
82 for(mptr = (*(romvec->pv_v0mem.v0_totphys)), iter=0;
83 mptr; mptr=mptr->theres_more, iter++) {
84 prom_phys_total[iter].start_adr = mptr->start_adr;
85 prom_phys_total[iter].num_bytes = mptr->num_bytes;
86 prom_phys_total[iter].theres_more = &prom_phys_total[iter+1];
87 }
88 prom_phys_total[iter-1].theres_more = 0x0;
89 /* Second, the total prom taken descriptors. */
90 for(mptr = (*(romvec->pv_v0mem.v0_prommap)), iter=0;
91 mptr; mptr=mptr->theres_more, iter++) {
92 prom_prom_taken[iter].start_adr = mptr->start_adr;
93 prom_prom_taken[iter].num_bytes = mptr->num_bytes;
94 prom_prom_taken[iter].theres_more = &prom_prom_taken[iter+1];
95 }
96 prom_prom_taken[iter-1].theres_more = 0x0;
97 /* Last, the available physical descriptors. */
98 for(mptr = (*(romvec->pv_v0mem.v0_available)), iter=0;
99 mptr; mptr=mptr->theres_more, iter++) {
100 prom_phys_avail[iter].start_adr = mptr->start_adr;
101 prom_phys_avail[iter].num_bytes = mptr->num_bytes;
102 prom_phys_avail[iter].theres_more = &prom_phys_avail[iter+1];
103 }
104 prom_phys_avail[iter-1].theres_more = 0x0;
105 /* Sort all the lists. */
106 prom_sortmemlist(prom_phys_total);
107 prom_sortmemlist(prom_prom_taken);
108 prom_sortmemlist(prom_phys_avail);
109 break;
110 case PROM_V2:
111 case PROM_V3:
112 /* Grrr, have to traverse the prom device tree ;( */
113 node = prom_getchild(prom_root_node);
114 node = prom_searchsiblings(node, "memory");
115 num_regs = prom_getproperty(node, "available",
116 (char *) prom_reg_memlist,
117 sizeof(prom_reg_memlist));
118 num_regs = (num_regs/sizeof(struct linux_prom_registers));
119 for(iter=0; iter<num_regs; iter++) {
120 prom_phys_avail[iter].start_adr =
121 (char *) prom_reg_memlist[iter].phys_addr;
122 prom_phys_avail[iter].num_bytes =
123 (unsigned long) prom_reg_memlist[iter].reg_size;
124 prom_phys_avail[iter].theres_more =
125 &prom_phys_avail[iter+1];
126 }
127 prom_phys_avail[iter-1].theres_more = 0x0;
128
129 num_regs = prom_getproperty(node, "reg",
130 (char *) prom_reg_memlist,
131 sizeof(prom_reg_memlist));
132 num_regs = (num_regs/sizeof(struct linux_prom_registers));
133 for(iter=0; iter<num_regs; iter++) {
134 prom_phys_total[iter].start_adr =
135 (char *) prom_reg_memlist[iter].phys_addr;
136 prom_phys_total[iter].num_bytes =
137 (unsigned long) prom_reg_memlist[iter].reg_size;
138 prom_phys_total[iter].theres_more =
139 &prom_phys_total[iter+1];
140 }
141 prom_phys_total[iter-1].theres_more = 0x0;
142
143 node = prom_getchild(prom_root_node);
144 node = prom_searchsiblings(node, "virtual-memory");
145 num_regs = prom_getproperty(node, "available",
146 (char *) prom_reg_memlist,
147 sizeof(prom_reg_memlist));
148 num_regs = (num_regs/sizeof(struct linux_prom_registers));
149
150 /* Convert available virtual areas to taken virtual
151 * areas. First sort, then convert.
152 */
153 for(iter=0; iter<num_regs; iter++) {
154 prom_prom_taken[iter].start_adr =
155 (char *) prom_reg_memlist[iter].phys_addr;
156 prom_prom_taken[iter].num_bytes =
157 (unsigned long) prom_reg_memlist[iter].reg_size;
158 prom_prom_taken[iter].theres_more =
159 &prom_prom_taken[iter+1];
160 }
161 prom_prom_taken[iter-1].theres_more = 0x0;
162
163 prom_sortmemlist(prom_prom_taken);
164
165 /* Finally, convert. */
166 for(iter=0; iter<num_regs; iter++) {
167 prom_prom_taken[iter].start_adr =
168 prom_prom_taken[iter].start_adr +
169 prom_prom_taken[iter].num_bytes;
170 prom_prom_taken[iter].num_bytes =
171 prom_prom_taken[iter+1].start_adr -
172 prom_prom_taken[iter].start_adr;
173 }
174 prom_prom_taken[iter-1].num_bytes =
175 0xffffffff - (unsigned long) prom_prom_taken[iter-1].start_adr;
176
177 /* Sort the other two lists. */
178 prom_sortmemlist(prom_phys_total);
179 prom_sortmemlist(prom_phys_avail);
180 break;
181
182 case PROM_SUN4:
183#ifdef CONFIG_SUN4
184 /* how simple :) */
185 prom_phys_total[0].start_adr = 0x0;
186 prom_phys_total[0].num_bytes = *(sun4_romvec->memorysize);
187 prom_phys_total[0].theres_more = 0x0;
188 prom_prom_taken[0].start_adr = 0x0;
189 prom_prom_taken[0].num_bytes = 0x0;
190 prom_prom_taken[0].theres_more = 0x0;
191 prom_phys_avail[0].start_adr = 0x0;
192 prom_phys_avail[0].num_bytes = *(sun4_romvec->memoryavail);
193 prom_phys_avail[0].theres_more = 0x0;
194#endif
195 break;
196
197 default:
198 break;
199 };
200
201 /* Link all the lists into the top-level descriptor. */
202 prom_memlist.v0_totphys=&prom_ptot_ptr;
203 prom_memlist.v0_prommap=&prom_ptak_ptr;
204 prom_memlist.v0_available=&prom_pavl_ptr;
205
206 return;
207}
208
209/* This returns a pointer to our libraries internal v0 format
210 * memory descriptor.
211 */
212struct linux_mem_v0 *
213prom_meminfo(void)
214{
215 return &prom_memlist;
216}
diff --git a/arch/sparc/prom/misc.c b/arch/sparc/prom/misc.c
new file mode 100644
index 000000000000..c840c2062342
--- /dev/null
+++ b/arch/sparc/prom/misc.c
@@ -0,0 +1,139 @@
1/* $Id: misc.c,v 1.18 2000/08/26 02:38:03 anton Exp $
2 * misc.c: Miscellaneous prom functions that don't belong
3 * anywhere else.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/config.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <asm/openprom.h>
13#include <asm/oplib.h>
14#include <asm/auxio.h>
15#include <asm/system.h>
16
17extern void restore_current(void);
18
19DEFINE_SPINLOCK(prom_lock);
20
21/* Reset and reboot the machine with the command 'bcommand'. */
22void
23prom_reboot(char *bcommand)
24{
25 unsigned long flags;
26 spin_lock_irqsave(&prom_lock, flags);
27 (*(romvec->pv_reboot))(bcommand);
28 /* Never get here. */
29 restore_current();
30 spin_unlock_irqrestore(&prom_lock, flags);
31}
32
33/* Forth evaluate the expression contained in 'fstring'. */
34void
35prom_feval(char *fstring)
36{
37 unsigned long flags;
38 if(!fstring || fstring[0] == 0)
39 return;
40 spin_lock_irqsave(&prom_lock, flags);
41 if(prom_vers == PROM_V0)
42 (*(romvec->pv_fortheval.v0_eval))(strlen(fstring), fstring);
43 else
44 (*(romvec->pv_fortheval.v2_eval))(fstring);
45 restore_current();
46 spin_unlock_irqrestore(&prom_lock, flags);
47}
48
49/* We want to do this more nicely some day. */
50extern void (*prom_palette)(int);
51
52/* Drop into the prom, with the chance to continue with the 'go'
53 * prom command.
54 */
55void
56prom_cmdline(void)
57{
58 extern void install_obp_ticker(void);
59 extern void install_linux_ticker(void);
60 unsigned long flags;
61
62 if(!serial_console && prom_palette)
63 prom_palette (1);
64 spin_lock_irqsave(&prom_lock, flags);
65 install_obp_ticker();
66 (*(romvec->pv_abort))();
67 restore_current();
68 install_linux_ticker();
69 spin_unlock_irqrestore(&prom_lock, flags);
70#ifdef CONFIG_SUN_AUXIO
71 set_auxio(AUXIO_LED, 0);
72#endif
73 if(!serial_console && prom_palette)
74 prom_palette (0);
75}
76
77/* Drop into the prom, but completely terminate the program.
78 * No chance of continuing.
79 */
80void
81prom_halt(void)
82{
83 unsigned long flags;
84again:
85 spin_lock_irqsave(&prom_lock, flags);
86 (*(romvec->pv_halt))();
87 /* Never get here. */
88 restore_current();
89 spin_unlock_irqrestore(&prom_lock, flags);
90 goto again; /* PROM is out to get me -DaveM */
91}
92
93typedef void (*sfunc_t)(void);
94
95/* Set prom sync handler to call function 'funcp'. */
96void
97prom_setsync(sfunc_t funcp)
98{
99 if(!funcp) return;
100 *romvec->pv_synchook = funcp;
101}
102
103/* Get the idprom and stuff it into buffer 'idbuf'. Returns the
104 * format type. 'num_bytes' is the number of bytes that your idbuf
105 * has space for. Returns 0xff on error.
106 */
107unsigned char
108prom_get_idprom(char *idbuf, int num_bytes)
109{
110 int len;
111
112 len = prom_getproplen(prom_root_node, "idprom");
113 if((len>num_bytes) || (len==-1)) return 0xff;
114 if(!prom_getproperty(prom_root_node, "idprom", idbuf, num_bytes))
115 return idbuf[0];
116
117 return 0xff;
118}
119
120/* Get the major prom version number. */
121int
122prom_version(void)
123{
124 return romvec->pv_romvers;
125}
126
127/* Get the prom plugin-revision. */
128int
129prom_getrev(void)
130{
131 return prom_rev;
132}
133
134/* Get the prom firmware print revision. */
135int
136prom_getprev(void)
137{
138 return prom_prev;
139}
diff --git a/arch/sparc/prom/mp.c b/arch/sparc/prom/mp.c
new file mode 100644
index 000000000000..92fe3739fdb8
--- /dev/null
+++ b/arch/sparc/prom/mp.c
@@ -0,0 +1,121 @@
1/* $Id: mp.c,v 1.12 2000/08/26 02:38:03 anton Exp $
2 * mp.c: OpenBoot Prom Multiprocessor support routines. Don't call
3 * these on a UP or else you will halt and catch fire. ;)
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11
12#include <asm/openprom.h>
13#include <asm/oplib.h>
14
15extern void restore_current(void);
16
17/* Start cpu with prom-tree node 'cpunode' using context described
18 * by 'ctable_reg' in context 'ctx' at program counter 'pc'.
19 *
20 * XXX Have to look into what the return values mean. XXX
21 */
22int
23prom_startcpu(int cpunode, struct linux_prom_registers *ctable_reg, int ctx, char *pc)
24{
25 int ret;
26 unsigned long flags;
27
28 spin_lock_irqsave(&prom_lock, flags);
29 switch(prom_vers) {
30 case PROM_V0:
31 case PROM_V2:
32 default:
33 ret = -1;
34 break;
35 case PROM_V3:
36 ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc);
37 break;
38 };
39 restore_current();
40 spin_unlock_irqrestore(&prom_lock, flags);
41
42 return ret;
43}
44
45/* Stop CPU with device prom-tree node 'cpunode'.
46 * XXX Again, what does the return value really mean? XXX
47 */
48int
49prom_stopcpu(int cpunode)
50{
51 int ret;
52 unsigned long flags;
53
54 spin_lock_irqsave(&prom_lock, flags);
55 switch(prom_vers) {
56 case PROM_V0:
57 case PROM_V2:
58 default:
59 ret = -1;
60 break;
61 case PROM_V3:
62 ret = (*(romvec->v3_cpustop))(cpunode);
63 break;
64 };
65 restore_current();
66 spin_unlock_irqrestore(&prom_lock, flags);
67
68 return ret;
69}
70
71/* Make CPU with device prom-tree node 'cpunode' idle.
72 * XXX Return value, anyone? XXX
73 */
74int
75prom_idlecpu(int cpunode)
76{
77 int ret;
78 unsigned long flags;
79
80 spin_lock_irqsave(&prom_lock, flags);
81 switch(prom_vers) {
82 case PROM_V0:
83 case PROM_V2:
84 default:
85 ret = -1;
86 break;
87 case PROM_V3:
88 ret = (*(romvec->v3_cpuidle))(cpunode);
89 break;
90 };
91 restore_current();
92 spin_unlock_irqrestore(&prom_lock, flags);
93
94 return ret;
95}
96
97/* Resume the execution of CPU with nodeid 'cpunode'.
98 * XXX Come on, somebody has to know... XXX
99 */
100int
101prom_restartcpu(int cpunode)
102{
103 int ret;
104 unsigned long flags;
105
106 spin_lock_irqsave(&prom_lock, flags);
107 switch(prom_vers) {
108 case PROM_V0:
109 case PROM_V2:
110 default:
111 ret = -1;
112 break;
113 case PROM_V3:
114 ret = (*(romvec->v3_cpuresume))(cpunode);
115 break;
116 };
117 restore_current();
118 spin_unlock_irqrestore(&prom_lock, flags);
119
120 return ret;
121}
diff --git a/arch/sparc/prom/palloc.c b/arch/sparc/prom/palloc.c
new file mode 100644
index 000000000000..84ce8bc54473
--- /dev/null
+++ b/arch/sparc/prom/palloc.c
@@ -0,0 +1,44 @@
1/* $Id: palloc.c,v 1.4 1996/04/25 06:09:48 davem Exp $
2 * palloc.c: Memory allocation from the Sun PROM.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/openprom.h>
8#include <asm/oplib.h>
9
10/* You should not call these routines after memory management
11 * has been initialized in the kernel, if fact you should not
12 * use these if at all possible in the kernel. They are mainly
13 * to be used for a bootloader for temporary allocations which
14 * it will free before jumping into the kernel it has loaded.
15 *
16 * Also, these routines don't work on V0 proms, only V2 and later.
17 */
18
19/* Allocate a chunk of memory of size 'num_bytes' giving a suggestion
20 * of virtual_hint as the preferred virtual base address of this chunk.
21 * There are no guarantees that you will get the allocation, or that
22 * the prom will abide by your "hint". So check your return value.
23 */
24char *
25prom_alloc(char *virtual_hint, unsigned int num_bytes)
26{
27 if(prom_vers == PROM_V0) return (char *) 0x0;
28 if(num_bytes == 0x0) return (char *) 0x0;
29 return (*(romvec->pv_v2devops.v2_dumb_mem_alloc))(virtual_hint, num_bytes);
30}
31
32/* Free a previously allocated chunk back to the prom at virtual address
33 * 'vaddr' of size 'num_bytes'. NOTE: This vaddr is not the hint you
34 * used for the allocation, but the virtual address the prom actually
35 * returned to you. They may be have been the same, they may have not,
36 * doesn't matter.
37 */
38void
39prom_free(char *vaddr, unsigned int num_bytes)
40{
41 if((prom_vers == PROM_V0) || (num_bytes == 0x0)) return;
42 (*(romvec->pv_v2devops.v2_dumb_mem_free))(vaddr, num_bytes);
43 return;
44}
diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c
new file mode 100644
index 000000000000..dc8b598bedbb
--- /dev/null
+++ b/arch/sparc/prom/printf.c
@@ -0,0 +1,46 @@
1/*
2 * printf.c: Internal prom library printf facility.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (c) 2002 Pete Zaitcev (zaitcev@yahoo.com)
6 *
7 * We used to warn all over the code: DO NOT USE prom_printf(),
8 * and yet people do. Anton's banking code was outputing banks
9 * with prom_printf for most of the 2.4 lifetime. Since an effective
10 * stick is not available, we deployed a carrot: an early printk
11 * through PROM by means of -p boot option. This ought to fix it.
12 * USE printk; if you need, deploy -p.
13 */
14
15#include <linux/kernel.h>
16
17#include <asm/openprom.h>
18#include <asm/oplib.h>
19
20static char ppbuf[1024];
21
22void
23prom_write(const char *buf, unsigned int n)
24{
25 char ch;
26
27 while (n != 0) {
28 --n;
29 if ((ch = *buf++) == '\n')
30 prom_putchar('\r');
31 prom_putchar(ch);
32 }
33}
34
35void
36prom_printf(char *fmt, ...)
37{
38 va_list args;
39 int i;
40
41 va_start(args, fmt);
42 i = vscnprintf(ppbuf, sizeof(ppbuf), fmt, args);
43 va_end(args);
44
45 prom_write(ppbuf, i);
46}
diff --git a/arch/sparc/prom/ranges.c b/arch/sparc/prom/ranges.c
new file mode 100644
index 000000000000..a2920323c900
--- /dev/null
+++ b/arch/sparc/prom/ranges.c
@@ -0,0 +1,118 @@
1/* $Id: ranges.c,v 1.15 2001/12/19 00:29:51 davem Exp $
2 * ranges.c: Handle ranges in newer proms for obio/sbus.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/init.h>
9#include <asm/openprom.h>
10#include <asm/oplib.h>
11#include <asm/types.h>
12#include <asm/sbus.h>
13#include <asm/system.h>
14
15struct linux_prom_ranges promlib_obio_ranges[PROMREG_MAX];
16int num_obio_ranges;
17
18/* Adjust register values based upon the ranges parameters. */
19static void
20prom_adjust_regs(struct linux_prom_registers *regp, int nregs,
21 struct linux_prom_ranges *rangep, int nranges)
22{
23 int regc, rngc;
24
25 for (regc = 0; regc < nregs; regc++) {
26 for (rngc = 0; rngc < nranges; rngc++)
27 if (regp[regc].which_io == rangep[rngc].ot_child_space)
28 break; /* Fount it */
29 if (rngc == nranges) /* oops */
30 prom_printf("adjust_regs: Could not find range with matching bus type...\n");
31 regp[regc].which_io = rangep[rngc].ot_parent_space;
32 regp[regc].phys_addr -= rangep[rngc].ot_child_base;
33 regp[regc].phys_addr += rangep[rngc].ot_parent_base;
34 }
35}
36
37void
38prom_adjust_ranges(struct linux_prom_ranges *ranges1, int nranges1,
39 struct linux_prom_ranges *ranges2, int nranges2)
40{
41 int rng1c, rng2c;
42
43 for(rng1c=0; rng1c < nranges1; rng1c++) {
44 for(rng2c=0; rng2c < nranges2; rng2c++)
45 if(ranges1[rng1c].ot_parent_space == ranges2[rng2c].ot_child_space &&
46 ranges1[rng1c].ot_parent_base >= ranges2[rng2c].ot_child_base &&
47 ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base > 0U)
48 break;
49 if(rng2c == nranges2) /* oops */
50 prom_printf("adjust_ranges: Could not find matching bus type...\n");
51 else if (ranges1[rng1c].ot_parent_base + ranges1[rng1c].or_size > ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size)
52 ranges1[rng1c].or_size =
53 ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base;
54 ranges1[rng1c].ot_parent_space = ranges2[rng2c].ot_parent_space;
55 ranges1[rng1c].ot_parent_base += ranges2[rng2c].ot_parent_base;
56 }
57}
58
59/* Apply probed obio ranges to registers passed, if no ranges return. */
60void
61prom_apply_obio_ranges(struct linux_prom_registers *regs, int nregs)
62{
63 if(num_obio_ranges)
64 prom_adjust_regs(regs, nregs, promlib_obio_ranges, num_obio_ranges);
65}
66
67void __init prom_ranges_init(void)
68{
69 int node, obio_node;
70 int success;
71
72 num_obio_ranges = 0;
73
74 /* Check for obio and sbus ranges. */
75 node = prom_getchild(prom_root_node);
76 obio_node = prom_searchsiblings(node, "obio");
77
78 if(obio_node) {
79 success = prom_getproperty(obio_node, "ranges",
80 (char *) promlib_obio_ranges,
81 sizeof(promlib_obio_ranges));
82 if(success != -1)
83 num_obio_ranges = (success/sizeof(struct linux_prom_ranges));
84 }
85
86 if(num_obio_ranges)
87 prom_printf("PROMLIB: obio_ranges %d\n", num_obio_ranges);
88
89 return;
90}
91
92void
93prom_apply_generic_ranges (int node, int parent, struct linux_prom_registers *regs, int nregs)
94{
95 int success;
96 int num_ranges;
97 struct linux_prom_ranges ranges[PROMREG_MAX];
98
99 success = prom_getproperty(node, "ranges",
100 (char *) ranges,
101 sizeof (ranges));
102 if (success != -1) {
103 num_ranges = (success/sizeof(struct linux_prom_ranges));
104 if (parent) {
105 struct linux_prom_ranges parent_ranges[PROMREG_MAX];
106 int num_parent_ranges;
107
108 success = prom_getproperty(parent, "ranges",
109 (char *) parent_ranges,
110 sizeof (parent_ranges));
111 if (success != -1) {
112 num_parent_ranges = (success/sizeof(struct linux_prom_ranges));
113 prom_adjust_ranges (ranges, num_ranges, parent_ranges, num_parent_ranges);
114 }
115 }
116 prom_adjust_regs(regs, nregs, ranges, num_ranges);
117 }
118}
diff --git a/arch/sparc/prom/segment.c b/arch/sparc/prom/segment.c
new file mode 100644
index 000000000000..09d6460165ab
--- /dev/null
+++ b/arch/sparc/prom/segment.c
@@ -0,0 +1,29 @@
1/* $Id: segment.c,v 1.7 2000/08/26 02:38:03 anton Exp $
2 * segment.c: Prom routine to map segments in other contexts before
3 * a standalone is completely mapped. This is for sun4 and
4 * sun4c architectures only.
5 *
6 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
7 */
8
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <asm/openprom.h>
13#include <asm/oplib.h>
14
15extern void restore_current(void);
16
17/* Set physical segment 'segment' at virtual address 'vaddr' in
18 * context 'ctx'.
19 */
20void
21prom_putsegment(int ctx, unsigned long vaddr, int segment)
22{
23 unsigned long flags;
24 spin_lock_irqsave(&prom_lock, flags);
25 (*(romvec->pv_setctxt))(ctx, (char *) vaddr, segment);
26 restore_current();
27 spin_unlock_irqrestore(&prom_lock, flags);
28 return;
29}
diff --git a/arch/sparc/prom/sun4prom.c b/arch/sparc/prom/sun4prom.c
new file mode 100644
index 000000000000..69ca735f0d4e
--- /dev/null
+++ b/arch/sparc/prom/sun4prom.c
@@ -0,0 +1,161 @@
1/*
2 * Copyright (C) 1996 The Australian National University.
3 * Copyright (C) 1996 Fujitsu Laboratories Limited
4 * Copyright (C) 1997 Michael A. Griffith (grif@acm.org)
5 * Copyright (C) 1997 Sun Weenie (ko@ko.reno.nv.us)
6 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 *
8 * This software may be distributed under the terms of the Gnu
9 * Public License version 2 or later
10 *
11 * fake a really simple Sun prom for the SUN4
12 */
13
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <asm/oplib.h>
17#include <asm/idprom.h>
18#include <asm/machines.h>
19#include <asm/sun4prom.h>
20#include <asm/asi.h>
21#include <asm/contregs.h>
22#include <linux/init.h>
23
24static struct linux_romvec sun4romvec;
25static struct idprom sun4_idprom;
26
27struct property {
28 char *name;
29 char *value;
30 int length;
31};
32
33struct node {
34 int level;
35 struct property *properties;
36};
37
38struct property null_properties = { NULL, NULL, -1 };
39
40struct property root_properties[] = {
41 {"device_type", "cpu", 4},
42 {"idprom", (char *)&sun4_idprom, sizeof(struct idprom)},
43 {NULL, NULL, -1}
44};
45
46struct node nodes[] = {
47 { 0, &null_properties },
48 { 0, root_properties },
49 { -1,&null_properties }
50};
51
52
53static int no_nextnode(int node)
54{
55 if (nodes[node].level == nodes[node+1].level)
56 return node+1;
57 return -1;
58}
59
60static int no_child(int node)
61{
62 if (nodes[node].level == nodes[node+1].level-1)
63 return node+1;
64 return -1;
65}
66
67static struct property *find_property(int node,char *name)
68{
69 struct property *prop = &nodes[node].properties[0];
70 while (prop && prop->name) {
71 if (strcmp(prop->name,name) == 0) return prop;
72 prop++;
73 }
74 return NULL;
75}
76
77static int no_proplen(int node,char *name)
78{
79 struct property *prop = find_property(node,name);
80 if (prop) return prop->length;
81 return -1;
82}
83
84static int no_getprop(int node,char *name,char *value)
85{
86 struct property *prop = find_property(node,name);
87 if (prop) {
88 memcpy(value,prop->value,prop->length);
89 return 1;
90 }
91 return -1;
92}
93
94static int no_setprop(int node,char *name,char *value,int len)
95{
96 return -1;
97}
98
99static char *no_nextprop(int node,char *name)
100{
101 struct property *prop = find_property(node,name);
102 if (prop) return prop[1].name;
103 return NULL;
104}
105
106static struct linux_nodeops sun4_nodeops = {
107 no_nextnode,
108 no_child,
109 no_proplen,
110 no_getprop,
111 no_setprop,
112 no_nextprop
113};
114
115static int synch_hook;
116
117struct linux_romvec * __init sun4_prom_init(void)
118{
119 int i;
120 unsigned char x;
121 char *p;
122
123 p = (char *)&sun4_idprom;
124 for (i = 0; i < sizeof(sun4_idprom); i++) {
125 __asm__ __volatile__ ("lduba [%1] %2, %0" : "=r" (x) :
126 "r" (AC_IDPROM + i), "i" (ASI_CONTROL));
127 *p++ = x;
128 }
129
130 memset(&sun4romvec,0,sizeof(sun4romvec));
131
132 sun4_romvec = (linux_sun4_romvec *) SUN4_PROM_VECTOR;
133
134 sun4romvec.pv_romvers = 40;
135 sun4romvec.pv_nodeops = &sun4_nodeops;
136 sun4romvec.pv_reboot = sun4_romvec->reboot;
137 sun4romvec.pv_abort = sun4_romvec->abortentry;
138 sun4romvec.pv_halt = sun4_romvec->exittomon;
139 sun4romvec.pv_synchook = (void (**)(void))&synch_hook;
140 sun4romvec.pv_setctxt = sun4_romvec->setcxsegmap;
141 sun4romvec.pv_v0bootargs = sun4_romvec->bootParam;
142 sun4romvec.pv_nbgetchar = sun4_romvec->mayget;
143 sun4romvec.pv_nbputchar = sun4_romvec->mayput;
144 sun4romvec.pv_stdin = sun4_romvec->insource;
145 sun4romvec.pv_stdout = sun4_romvec->outsink;
146
147 /*
148 * We turn on the LEDs to let folks without monitors or
149 * terminals know we booted. Nothing too fancy now. They
150 * are all on, except for LED 5, which blinks. When we
151 * have more time, we can teach the penguin to say "By your
152 * command" or "Activating turbo boost, Michael". :-)
153 */
154 sun4_romvec->setLEDs(0x0);
155
156 printk("PROMLIB: Old Sun4 boot PROM monitor %s, romvec version %d\n",
157 sun4_romvec->monid,
158 sun4_romvec->romvecversion);
159
160 return &sun4romvec;
161}
diff --git a/arch/sparc/prom/tree.c b/arch/sparc/prom/tree.c
new file mode 100644
index 000000000000..2bf03ee8cde5
--- /dev/null
+++ b/arch/sparc/prom/tree.c
@@ -0,0 +1,364 @@
1/* $Id: tree.c,v 1.26 2000/08/26 02:38:03 anton Exp $
2 * tree.c: Basic device tree traversal/scanning for the Linux
3 * prom library.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#define PROMLIB_INTERNAL
9
10#include <linux/string.h>
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/ctype.h>
15
16#include <asm/openprom.h>
17#include <asm/oplib.h>
18
19extern void restore_current(void);
20
21static char promlib_buf[128];
22
23/* Internal version of prom_getchild that does not alter return values. */
24int __prom_getchild(int node)
25{
26 unsigned long flags;
27 int cnode;
28
29 spin_lock_irqsave(&prom_lock, flags);
30 cnode = prom_nodeops->no_child(node);
31 restore_current();
32 spin_unlock_irqrestore(&prom_lock, flags);
33
34 return cnode;
35}
36
37/* Return the child of node 'node' or zero if no this node has no
38 * direct descendent.
39 */
40int prom_getchild(int node)
41{
42 int cnode;
43
44 if (node == -1)
45 return 0;
46
47 cnode = __prom_getchild(node);
48 if (cnode == 0 || cnode == -1)
49 return 0;
50
51 return cnode;
52}
53
54/* Internal version of prom_getsibling that does not alter return values. */
55int __prom_getsibling(int node)
56{
57 unsigned long flags;
58 int cnode;
59
60 spin_lock_irqsave(&prom_lock, flags);
61 cnode = prom_nodeops->no_nextnode(node);
62 restore_current();
63 spin_unlock_irqrestore(&prom_lock, flags);
64
65 return cnode;
66}
67
68/* Return the next sibling of node 'node' or zero if no more siblings
69 * at this level of depth in the tree.
70 */
71int prom_getsibling(int node)
72{
73 int sibnode;
74
75 if (node == -1)
76 return 0;
77
78 sibnode = __prom_getsibling(node);
79 if (sibnode == 0 || sibnode == -1)
80 return 0;
81
82 return sibnode;
83}
84
85/* Return the length in bytes of property 'prop' at node 'node'.
86 * Return -1 on error.
87 */
88int prom_getproplen(int node, char *prop)
89{
90 int ret;
91 unsigned long flags;
92
93 if((!node) || (!prop))
94 return -1;
95
96 spin_lock_irqsave(&prom_lock, flags);
97 ret = prom_nodeops->no_proplen(node, prop);
98 restore_current();
99 spin_unlock_irqrestore(&prom_lock, flags);
100 return ret;
101}
102
103/* Acquire a property 'prop' at node 'node' and place it in
104 * 'buffer' which has a size of 'bufsize'. If the acquisition
105 * was successful the length will be returned, else -1 is returned.
106 */
107int prom_getproperty(int node, char *prop, char *buffer, int bufsize)
108{
109 int plen, ret;
110 unsigned long flags;
111
112 plen = prom_getproplen(node, prop);
113 if((plen > bufsize) || (plen == 0) || (plen == -1))
114 return -1;
115 /* Ok, things seem all right. */
116 spin_lock_irqsave(&prom_lock, flags);
117 ret = prom_nodeops->no_getprop(node, prop, buffer);
118 restore_current();
119 spin_unlock_irqrestore(&prom_lock, flags);
120 return ret;
121}
122
123/* Acquire an integer property and return its value. Returns -1
124 * on failure.
125 */
126int prom_getint(int node, char *prop)
127{
128 static int intprop;
129
130 if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
131 return intprop;
132
133 return -1;
134}
135
136/* Acquire an integer property, upon error return the passed default
137 * integer.
138 */
139int prom_getintdefault(int node, char *property, int deflt)
140{
141 int retval;
142
143 retval = prom_getint(node, property);
144 if(retval == -1) return deflt;
145
146 return retval;
147}
148
149/* Acquire a boolean property, 1=TRUE 0=FALSE. */
150int prom_getbool(int node, char *prop)
151{
152 int retval;
153
154 retval = prom_getproplen(node, prop);
155 if(retval == -1) return 0;
156 return 1;
157}
158
159/* Acquire a property whose value is a string, returns a null
160 * string on error. The char pointer is the user supplied string
161 * buffer.
162 */
163void prom_getstring(int node, char *prop, char *user_buf, int ubuf_size)
164{
165 int len;
166
167 len = prom_getproperty(node, prop, user_buf, ubuf_size);
168 if(len != -1) return;
169 user_buf[0] = 0;
170 return;
171}
172
173
174/* Does the device at node 'node' have name 'name'?
175 * YES = 1 NO = 0
176 */
177int prom_nodematch(int node, char *name)
178{
179 int error;
180
181 static char namebuf[128];
182 error = prom_getproperty(node, "name", namebuf, sizeof(namebuf));
183 if (error == -1) return 0;
184 if(strcmp(namebuf, name) == 0) return 1;
185 return 0;
186}
187
188/* Search siblings at 'node_start' for a node with name
189 * 'nodename'. Return node if successful, zero if not.
190 */
191int prom_searchsiblings(int node_start, char *nodename)
192{
193
194 int thisnode, error;
195
196 for(thisnode = node_start; thisnode;
197 thisnode=prom_getsibling(thisnode)) {
198 error = prom_getproperty(thisnode, "name", promlib_buf,
199 sizeof(promlib_buf));
200 /* Should this ever happen? */
201 if(error == -1) continue;
202 if(strcmp(nodename, promlib_buf)==0) return thisnode;
203 }
204
205 return 0;
206}
207
208/* Gets name in the form prom v2+ uses it (name@x,yyyyy or name (if no reg)) */
209int prom_getname (int node, char *buffer, int len)
210{
211 int i;
212 struct linux_prom_registers reg[PROMREG_MAX];
213
214 i = prom_getproperty (node, "name", buffer, len);
215 if (i <= 0) return -1;
216 buffer [i] = 0;
217 len -= i;
218 i = prom_getproperty (node, "reg", (char *)reg, sizeof (reg));
219 if (i <= 0) return 0;
220 if (len < 11) return -1;
221 buffer = strchr (buffer, 0);
222 sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);
223 return 0;
224}
225
226/* Interal version of nextprop that does not alter return values. */
227char * __prom_nextprop(int node, char * oprop)
228{
229 unsigned long flags;
230 char *prop;
231
232 spin_lock_irqsave(&prom_lock, flags);
233 prop = prom_nodeops->no_nextprop(node, oprop);
234 restore_current();
235 spin_unlock_irqrestore(&prom_lock, flags);
236
237 return prop;
238}
239
240/* Return the first property name for node 'node'. */
241/* buffer is unused argument, but as v9 uses it, we need to have the same interface */
242char * prom_firstprop(int node, char *bufer)
243{
244 if (node == 0 || node == -1)
245 return "";
246
247 return __prom_nextprop(node, "");
248}
249
250/* Return the property type string after property type 'oprop'
251 * at node 'node' . Returns empty string if no more
252 * property types for this node.
253 */
254char * prom_nextprop(int node, char *oprop, char *buffer)
255{
256 if (node == 0 || node == -1)
257 return "";
258
259 return __prom_nextprop(node, oprop);
260}
261
262int prom_finddevice(char *name)
263{
264 char nbuf[128];
265 char *s = name, *d;
266 int node = prom_root_node, node2;
267 unsigned int which_io, phys_addr;
268 struct linux_prom_registers reg[PROMREG_MAX];
269
270 while (*s++) {
271 if (!*s) return node; /* path '.../' is legal */
272 node = prom_getchild(node);
273
274 for (d = nbuf; *s != 0 && *s != '@' && *s != '/';)
275 *d++ = *s++;
276 *d = 0;
277
278 node = prom_searchsiblings(node, nbuf);
279 if (!node)
280 return 0;
281
282 if (*s == '@') {
283 if (isxdigit(s[1]) && s[2] == ',') {
284 which_io = simple_strtoul(s+1, NULL, 16);
285 phys_addr = simple_strtoul(s+3, &d, 16);
286 if (d != s + 3 && (!*d || *d == '/')
287 && d <= s + 3 + 8) {
288 node2 = node;
289 while (node2 && node2 != -1) {
290 if (prom_getproperty (node2, "reg", (char *)reg, sizeof (reg)) > 0) {
291 if (which_io == reg[0].which_io && phys_addr == reg[0].phys_addr) {
292 node = node2;
293 break;
294 }
295 }
296 node2 = prom_getsibling(node2);
297 if (!node2 || node2 == -1)
298 break;
299 node2 = prom_searchsiblings(prom_getsibling(node2), nbuf);
300 }
301 }
302 }
303 while (*s != 0 && *s != '/') s++;
304 }
305 }
306 return node;
307}
308
309int prom_node_has_property(int node, char *prop)
310{
311 char *current_property = "";
312
313 do {
314 current_property = prom_nextprop(node, current_property, NULL);
315 if(!strcmp(current_property, prop))
316 return 1;
317 } while (*current_property);
318 return 0;
319}
320
321/* Set property 'pname' at node 'node' to value 'value' which has a length
322 * of 'size' bytes. Return the number of bytes the prom accepted.
323 */
324int prom_setprop(int node, char *pname, char *value, int size)
325{
326 unsigned long flags;
327 int ret;
328
329 if(size == 0) return 0;
330 if((pname == 0) || (value == 0)) return 0;
331 spin_lock_irqsave(&prom_lock, flags);
332 ret = prom_nodeops->no_setprop(node, pname, value, size);
333 restore_current();
334 spin_unlock_irqrestore(&prom_lock, flags);
335 return ret;
336}
337
338int prom_inst2pkg(int inst)
339{
340 int node;
341 unsigned long flags;
342
343 spin_lock_irqsave(&prom_lock, flags);
344 node = (*romvec->pv_v2devops.v2_inst2pkg)(inst);
345 restore_current();
346 spin_unlock_irqrestore(&prom_lock, flags);
347 if (node == -1) return 0;
348 return node;
349}
350
351/* Return 'node' assigned to a particular prom 'path'
352 * FIXME: Should work for v0 as well
353 */
354int prom_pathtoinode(char *path)
355{
356 int node, inst;
357
358 inst = prom_devopen (path);
359 if (inst == -1) return 0;
360 node = prom_inst2pkg (inst);
361 prom_devclose (inst);
362 if (node == -1) return 0;
363 return node;
364}