aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Kconfig630
-rw-r--r--arch/sparc64/Kconfig.debug54
-rw-r--r--arch/sparc64/Makefile83
-rw-r--r--arch/sparc64/boot/Makefile34
-rw-r--r--arch/sparc64/boot/piggyback.c109
-rw-r--r--arch/sparc64/defconfig1951
-rw-r--r--arch/sparc64/kernel/Makefile44
-rw-r--r--arch/sparc64/kernel/auxio.c152
-rw-r--r--arch/sparc64/kernel/binfmt_aout32.c424
-rw-r--r--arch/sparc64/kernel/binfmt_elf32.c159
-rw-r--r--arch/sparc64/kernel/central.c457
-rw-r--r--arch/sparc64/kernel/chmc.c458
-rw-r--r--arch/sparc64/kernel/cpu.c124
-rw-r--r--arch/sparc64/kernel/devices.c144
-rw-r--r--arch/sparc64/kernel/dtlb_backend.S181
-rw-r--r--arch/sparc64/kernel/dtlb_base.S113
-rw-r--r--arch/sparc64/kernel/dtlb_prot.S54
-rw-r--r--arch/sparc64/kernel/ebus.c644
-rw-r--r--arch/sparc64/kernel/entry.S1919
-rw-r--r--arch/sparc64/kernel/etrap.S301
-rw-r--r--arch/sparc64/kernel/head.S782
-rw-r--r--arch/sparc64/kernel/idprom.c49
-rw-r--r--arch/sparc64/kernel/init_task.c35
-rw-r--r--arch/sparc64/kernel/ioctl32.c597
-rw-r--r--arch/sparc64/kernel/iommu_common.c231
-rw-r--r--arch/sparc64/kernel/iommu_common.h48
-rw-r--r--arch/sparc64/kernel/irq.c1269
-rw-r--r--arch/sparc64/kernel/isa.c329
-rw-r--r--arch/sparc64/kernel/itlb_base.S83
-rw-r--r--arch/sparc64/kernel/kprobes.c394
-rw-r--r--arch/sparc64/kernel/module.c209
-rw-r--r--arch/sparc64/kernel/pci.c805
-rw-r--r--arch/sparc64/kernel/pci_common.c1040
-rw-r--r--arch/sparc64/kernel/pci_impl.h49
-rw-r--r--arch/sparc64/kernel/pci_iommu.c855
-rw-r--r--arch/sparc64/kernel/pci_psycho.c1560
-rw-r--r--arch/sparc64/kernel/pci_sabre.c1702
-rw-r--r--arch/sparc64/kernel/pci_schizo.c2187
-rw-r--r--arch/sparc64/kernel/power.c150
-rw-r--r--arch/sparc64/kernel/process.c869
-rw-r--r--arch/sparc64/kernel/ptrace.c646
-rw-r--r--arch/sparc64/kernel/rtrap.S362
-rw-r--r--arch/sparc64/kernel/sbus.c1243
-rw-r--r--arch/sparc64/kernel/semaphore.c251
-rw-r--r--arch/sparc64/kernel/setup.c731
-rw-r--r--arch/sparc64/kernel/signal.c688
-rw-r--r--arch/sparc64/kernel/signal32.c1469
-rw-r--r--arch/sparc64/kernel/smp.c1244
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c432
-rw-r--r--arch/sparc64/kernel/starfire.c123
-rw-r--r--arch/sparc64/kernel/sunos_ioctl32.c275
-rw-r--r--arch/sparc64/kernel/sys32.S327
-rw-r--r--arch/sparc64/kernel/sys_sparc.c723
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c1118
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c1343
-rw-r--r--arch/sparc64/kernel/systbls.S251
-rw-r--r--arch/sparc64/kernel/time.c1195
-rw-r--r--arch/sparc64/kernel/trampoline.S368
-rw-r--r--arch/sparc64/kernel/traps.c2118
-rw-r--r--arch/sparc64/kernel/ttable.S280
-rw-r--r--arch/sparc64/kernel/unaligned.c729
-rw-r--r--arch/sparc64/kernel/us2e_cpufreq.c400
-rw-r--r--arch/sparc64/kernel/us3_cpufreq.c255
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S106
-rw-r--r--arch/sparc64/kernel/winfixup.S417
-rw-r--r--arch/sparc64/lib/Makefile20
-rw-r--r--arch/sparc64/lib/PeeCeeI.c237
-rw-r--r--arch/sparc64/lib/U1copy_from_user.S33
-rw-r--r--arch/sparc64/lib/U1copy_to_user.S33
-rw-r--r--arch/sparc64/lib/U1memcpy.S560
-rw-r--r--arch/sparc64/lib/U3copy_from_user.S22
-rw-r--r--arch/sparc64/lib/U3copy_to_user.S33
-rw-r--r--arch/sparc64/lib/U3memcpy.S422
-rw-r--r--arch/sparc64/lib/U3patch.S32
-rw-r--r--arch/sparc64/lib/VISsave.S131
-rw-r--r--arch/sparc64/lib/atomic.S139
-rw-r--r--arch/sparc64/lib/bitops.S145
-rw-r--r--arch/sparc64/lib/bzero.S158
-rw-r--r--arch/sparc64/lib/checksum.S172
-rw-r--r--arch/sparc64/lib/clear_page.S105
-rw-r--r--arch/sparc64/lib/copy_in_user.S119
-rw-r--r--arch/sparc64/lib/copy_page.S242
-rw-r--r--arch/sparc64/lib/csum_copy.S308
-rw-r--r--arch/sparc64/lib/csum_copy_from_user.S21
-rw-r--r--arch/sparc64/lib/csum_copy_to_user.S21
-rw-r--r--arch/sparc64/lib/debuglocks.c376
-rw-r--r--arch/sparc64/lib/dec_and_lock.S78
-rw-r--r--arch/sparc64/lib/delay.c49
-rw-r--r--arch/sparc64/lib/find_bit.c127
-rw-r--r--arch/sparc64/lib/iomap.c48
-rw-r--r--arch/sparc64/lib/ipcsum.S34
-rw-r--r--arch/sparc64/lib/mcount.S61
-rw-r--r--arch/sparc64/lib/memcmp.S28
-rw-r--r--arch/sparc64/lib/memmove.S31
-rw-r--r--arch/sparc64/lib/memscan.S129
-rw-r--r--arch/sparc64/lib/rwsem.S165
-rw-r--r--arch/sparc64/lib/strlen.S80
-rw-r--r--arch/sparc64/lib/strlen_user.S95
-rw-r--r--arch/sparc64/lib/strncmp.S32
-rw-r--r--arch/sparc64/lib/strncpy_from_user.S139
-rw-r--r--arch/sparc64/lib/user_fixup.c71
-rw-r--r--arch/sparc64/lib/xor.S354
-rw-r--r--arch/sparc64/math-emu/Makefile7
-rw-r--r--arch/sparc64/math-emu/math.c493
-rw-r--r--arch/sparc64/math-emu/sfp-util.h120
-rw-r--r--arch/sparc64/mm/Makefile10
-rw-r--r--arch/sparc64/mm/extable.c80
-rw-r--r--arch/sparc64/mm/fault.c527
-rw-r--r--arch/sparc64/mm/generic.c182
-rw-r--r--arch/sparc64/mm/hugetlbpage.c310
-rw-r--r--arch/sparc64/mm/init.c1769
-rw-r--r--arch/sparc64/mm/tlb.c151
-rw-r--r--arch/sparc64/mm/ultra.S583
-rw-r--r--arch/sparc64/oprofile/Kconfig23
-rw-r--r--arch/sparc64/oprofile/Makefile9
-rw-r--r--arch/sparc64/oprofile/init.c23
-rw-r--r--arch/sparc64/prom/Makefile10
-rw-r--r--arch/sparc64/prom/bootstr.c40
-rw-r--r--arch/sparc64/prom/cif.S225
-rw-r--r--arch/sparc64/prom/console.c146
-rw-r--r--arch/sparc64/prom/devops.c41
-rw-r--r--arch/sparc64/prom/init.c101
-rw-r--r--arch/sparc64/prom/map.S72
-rw-r--r--arch/sparc64/prom/memory.c152
-rw-r--r--arch/sparc64/prom/misc.c339
-rw-r--r--arch/sparc64/prom/p1275.c161
-rw-r--r--arch/sparc64/prom/printf.c47
-rw-r--r--arch/sparc64/prom/tree.c377
-rw-r--r--arch/sparc64/solaris/Makefile10
-rw-r--r--arch/sparc64/solaris/conv.h38
-rw-r--r--arch/sparc64/solaris/entry64.S218
-rw-r--r--arch/sparc64/solaris/fs.c739
-rw-r--r--arch/sparc64/solaris/ioctl.c820
-rw-r--r--arch/sparc64/solaris/ipc.c127
-rw-r--r--arch/sparc64/solaris/misc.c784
-rw-r--r--arch/sparc64/solaris/signal.c430
-rw-r--r--arch/sparc64/solaris/signal.h108
-rw-r--r--arch/sparc64/solaris/socket.c415
-rw-r--r--arch/sparc64/solaris/socksys.c211
-rw-r--r--arch/sparc64/solaris/socksys.h208
-rw-r--r--arch/sparc64/solaris/systbl.S314
-rw-r--r--arch/sparc64/solaris/timod.c959
142 files changed, 54601 insertions, 0 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
new file mode 100644
index 000000000000..fb1189641c74
--- /dev/null
+++ b/arch/sparc64/Kconfig
@@ -0,0 +1,630 @@
1# $Id: config.in,v 1.158 2002/01/24 22:14:44 davem Exp $
2# For a description of the syntax of this configuration file,
3# see the Configure script.
4#
5
6mainmenu "Linux/UltraSPARC Kernel Configuration"
7
8config 64BIT
9 def_bool y
10
11config MMU
12 bool
13 default y
14
15config TIME_INTERPOLATION
16 bool
17 default y
18
19choice
20 prompt "Kernel page size"
21 default SPARC64_PAGE_SIZE_8KB
22
23config SPARC64_PAGE_SIZE_8KB
24 bool "8KB"
25 help
26 This lets you select the page size of the kernel.
27
28 8KB and 64KB work quite well, since Sparc ELF sections
29 provide for up to 64KB alignment.
30
31 Therefore, 512KB and 4MB are for expert hackers only.
32
33 If you don't know what to do, choose 8KB.
34
35config SPARC64_PAGE_SIZE_64KB
36 bool "64KB"
37
38config SPARC64_PAGE_SIZE_512KB
39 bool "512KB"
40
41config SPARC64_PAGE_SIZE_4MB
42 bool "4MB"
43
44endchoice
45
46source "init/Kconfig"
47
48config SYSVIPC_COMPAT
49 bool
50 depends on COMPAT && SYSVIPC
51 default y
52
53menu "General machine setup"
54
55config BBC_I2C
56 tristate "UltraSPARC-III bootbus i2c controller driver"
57 depends on PCI
58 help
59 The BBC devices on the UltraSPARC III have two I2C controllers. The
60 first I2C controller connects mainly to configuration PROMs (NVRAM,
61 CPU configuration, DIMM types, etc.). The second I2C controller
62 connects to environmental control devices such as fans and
63 temperature sensors. The second controller also connects to the
64 smartcard reader, if present. Say Y to enable support for these.
65
66config VT
67 bool "Virtual terminal" if EMBEDDED
68 select INPUT
69 default y
70 ---help---
71 If you say Y here, you will get support for terminal devices with
72 display and keyboard devices. These are called "virtual" because you
73 can run several virtual terminals (also called virtual consoles) on
74 one physical terminal. This is rather useful, for example one
75 virtual terminal can collect system messages and warnings, another
76 one can be used for a text-mode user session, and a third could run
77 an X session, all in parallel. Switching between virtual terminals
78 is done with certain key combinations, usually Alt-<function key>.
79
80 The setterm command ("man setterm") can be used to change the
81 properties (such as colors or beeping) of a virtual terminal. The
82 man page console_codes(4) ("man console_codes") contains the special
83 character sequences that can be used to change those properties
84 directly. The fonts used on virtual terminals can be changed with
85 the setfont ("man setfont") command and the key bindings are defined
86 with the loadkeys ("man loadkeys") command.
87
88 You need at least one virtual terminal device in order to make use
89 of your keyboard and monitor. Therefore, only people configuring an
90 embedded system would want to say N here in order to save some
91 memory; the only way to log into such a system is then via a serial
92 or network connection.
93
94 If unsure, say Y, or else you won't be able to do much with your new
95 shiny Linux system :-)
96
97config VT_CONSOLE
98 bool "Support for console on virtual terminal" if EMBEDDED
99 depends on VT
100 default y
101 ---help---
102 The system console is the device which receives all kernel messages
103 and warnings and which allows logins in single user mode. If you
104 answer Y here, a virtual terminal (the device used to interact with
105 a physical terminal) can be used as system console. This is the most
106 common mode of operations, so you should say Y here unless you want
107 the kernel messages be output only to a serial port (in which case
108 you should say Y to "Console on serial port", below).
109
110 If you do say Y here, by default the currently visible virtual
111 terminal (/dev/tty0) will be used as system console. You can change
112 that with a kernel command line option such as "console=tty3" which
113 would use the third virtual terminal as system console. (Try "man
114 bootparam" or see the documentation of your boot loader (lilo or
115 loadlin) about how to pass options to the kernel at boot time.)
116
117 If unsure, say Y.
118
119config HW_CONSOLE
120 bool
121 default y
122
123config SMP
124 bool "Symmetric multi-processing support"
125 ---help---
126 This enables support for systems with more than one CPU. If you have
127 a system with only one CPU, say N. If you have a system with more than
128 one CPU, say Y.
129
130 If you say N here, the kernel will run on single and multiprocessor
131 machines, but will use only one CPU of a multiprocessor machine. If
132 you say Y here, the kernel will run on many, but not all,
133 singleprocessor machines. On a singleprocessor machine, the kernel
134 will run faster if you say N here.
135
136 People using multiprocessor machines who say Y here should also say
137 Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
138 Management" code will be disabled if you say Y here.
139
140 See also the <file:Documentation/smp.txt>,
141 <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
142 <http://www.tldp.org/docs.html#howto>.
143
144 If you don't know what to do here, say N.
145
146config PREEMPT
147 bool "Preemptible Kernel"
148 help
149 This option reduces the latency of the kernel when reacting to
150 real-time or interactive events by allowing a low priority process to
151 be preempted even if it is in kernel mode executing a system call.
152 This allows applications to run more reliably even when the system is
153 under load.
154
155 Say Y here if you are building a kernel for a desktop, embedded
156 or real-time system. Say N if you are unsure.
157
158config NR_CPUS
159 int "Maximum number of CPUs (2-64)"
160 range 2 64
161 depends on SMP
162 default "32"
163
164source "drivers/cpufreq/Kconfig"
165
166config US3_FREQ
167 tristate "UltraSPARC-III CPU Frequency driver"
168 depends on CPU_FREQ
169 select CPU_FREQ_TABLE
170 help
171 This adds the CPUFreq driver for UltraSPARC-III processors.
172
173 For details, take a look at <file:Documentation/cpu-freq>.
174
175 If in doubt, say N.
176
177config US2E_FREQ
178 tristate "UltraSPARC-IIe CPU Frequency driver"
179 depends on CPU_FREQ
180 select CPU_FREQ_TABLE
181 help
182 This adds the CPUFreq driver for UltraSPARC-IIe processors.
183
184 For details, take a look at <file:Documentation/cpu-freq>.
185
186 If in doubt, say N.
187
188# Identify this as a Sparc64 build
189config SPARC64
190 bool
191 default y
192 help
193 SPARC is a family of RISC microprocessors designed and marketed by
194 Sun Microsystems, incorporated. This port covers the newer 64-bit
195 UltraSPARC. The UltraLinux project maintains both the SPARC32 and
196 SPARC64 ports; its web page is available at
197 <http://www.ultralinux.org/>.
198
199# Global things across all Sun machines.
200config RWSEM_GENERIC_SPINLOCK
201 bool
202
203config RWSEM_XCHGADD_ALGORITHM
204 bool
205 default y
206
207config GENERIC_CALIBRATE_DELAY
208 bool
209 default y
210
211choice
212 prompt "SPARC64 Huge TLB Page Size"
213 depends on HUGETLB_PAGE
214 default HUGETLB_PAGE_SIZE_4MB
215
216config HUGETLB_PAGE_SIZE_4MB
217 bool "4MB"
218
219config HUGETLB_PAGE_SIZE_512K
220 depends on !SPARC64_PAGE_SIZE_4MB
221 bool "512K"
222
223config HUGETLB_PAGE_SIZE_64K
224 depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512K
225 bool "64K"
226
227endchoice
228
229config GENERIC_ISA_DMA
230 bool
231 default y
232
233config ISA
234 bool
235 help
236 Find out whether you have ISA slots on your motherboard. ISA is the
237 name of a bus system, i.e. the way the CPU talks to the other stuff
238 inside your box. Other bus systems are PCI, EISA, MicroChannel
239 (MCA) or VESA. ISA is an older system, now being displaced by PCI;
240 newer boards don't support it. If you have ISA, say Y, otherwise N.
241
242config ISAPNP
243 bool
244 help
245 Say Y here if you would like support for ISA Plug and Play devices.
246 Some information is in <file:Documentation/isapnp.txt>.
247
248 To compile this driver as a module, choose M here: the
249 module will be called isapnp.
250
251 If unsure, say Y.
252
253config EISA
254 bool
255 ---help---
256 The Extended Industry Standard Architecture (EISA) bus was
257 developed as an open alternative to the IBM MicroChannel bus.
258
259 The EISA bus provided some of the features of the IBM MicroChannel
260 bus while maintaining backward compatibility with cards made for
261 the older ISA bus. The EISA bus saw limited use between 1988 and
262 1995 when it was made obsolete by the PCI bus.
263
264 Say Y here if you are building a kernel for an EISA-based machine.
265
266 Otherwise, say N.
267
268config MCA
269 bool
270 help
271 MicroChannel Architecture is found in some IBM PS/2 machines and
272 laptops. It is a bus system similar to PCI or ISA. See
273 <file:Documentation/mca.txt> (and especially the web page given
274 there) before attempting to build an MCA bus kernel.
275
276config PCMCIA
277 tristate
278 ---help---
279 Say Y here if you want to attach PCMCIA- or PC-cards to your Linux
280 computer. These are credit-card size devices such as network cards,
281 modems or hard drives often used with laptops computers. There are
282 actually two varieties of these cards: the older 16 bit PCMCIA cards
283 and the newer 32 bit CardBus cards. If you want to use CardBus
284 cards, you need to say Y here and also to "CardBus support" below.
285
286 To use your PC-cards, you will need supporting software from David
287 Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
288 for location). Please also read the PCMCIA-HOWTO, available from
289 <http://www.tldp.org/docs.html#howto>.
290
291 To compile this driver as modules, choose M here: the
292 modules will be called pcmcia_core and ds.
293
294config SBUS
295 bool
296 default y
297
298config SBUSCHAR
299 bool
300 default y
301
302config SUN_AUXIO
303 bool
304 default y
305
306config SUN_IO
307 bool
308 default y
309
310config PCI
311 bool "PCI support"
312 help
313 Find out whether you have a PCI motherboard. PCI is the name of a
314 bus system, i.e. the way the CPU talks to the other stuff inside
315 your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
316 VESA. If you have PCI, say Y, otherwise N.
317
318 The PCI-HOWTO, available from
319 <http://www.tldp.org/docs.html#howto>, contains valuable
320 information about which PCI hardware does work under Linux and which
321 doesn't.
322
323config PCI_DOMAINS
324 bool
325 default PCI
326
327config RTC
328 tristate
329 depends on PCI
330 default y
331 ---help---
332 If you say Y here and create a character special file /dev/rtc with
333 major number 10 and minor number 135 using mknod ("man mknod"), you
334 will get access to the real time clock (or hardware clock) built
335 into your computer.
336
337 Every PC has such a clock built in. It can be used to generate
338 signals from as low as 1Hz up to 8192Hz, and can also be used
339 as a 24 hour alarm. It reports status information via the file
340 /proc/driver/rtc and its behaviour is set by various ioctls on
341 /dev/rtc.
342
343 If you run Linux on a multiprocessor machine and said Y to
344 "Symmetric Multi Processing" above, you should say Y here to read
345 and set the RTC in an SMP compatible fashion.
346
347 If you think you have a use for such a device (such as periodic data
348 sampling), then say Y here, and read <file:Documentation/rtc.txt>
349 for details.
350
351 To compile this driver as a module, choose M here: the
352 module will be called rtc.
353
354source "drivers/pci/Kconfig"
355
356config SUN_OPENPROMFS
357 tristate "Openprom tree appears in /proc/openprom"
358 help
359 If you say Y, the OpenPROM device tree will be available as a
360 virtual file system, which you can mount to /proc/openprom by "mount
361 -t openpromfs none /proc/openprom".
362
363 To compile the /proc/openprom support as a module, choose M here: the
364 module will be called openpromfs. If unsure, choose M.
365
366config SPARC32_COMPAT
367 bool "Kernel support for Linux/Sparc 32bit binary compatibility"
368 help
369 This allows you to run 32-bit binaries on your Ultra.
370 Everybody wants this; say Y.
371
372config COMPAT
373 bool
374 depends on SPARC32_COMPAT
375 default y
376
377config UID16
378 bool
379 depends on SPARC32_COMPAT
380 default y
381
382config BINFMT_ELF32
383 tristate "Kernel support for 32-bit ELF binaries"
384 depends on SPARC32_COMPAT
385 help
386 This allows you to run 32-bit Linux/ELF binaries on your Ultra.
387 Everybody wants this; say Y.
388
389config BINFMT_AOUT32
390 bool "Kernel support for 32-bit (ie. SunOS) a.out binaries"
391 depends on SPARC32_COMPAT
392 help
393 This allows you to run 32-bit a.out format binaries on your Ultra.
394 If you want to run SunOS binaries (see SunOS binary emulation below)
395 or other a.out binaries, say Y. If unsure, say N.
396
397source "fs/Kconfig.binfmt"
398
399config SUNOS_EMUL
400 bool "SunOS binary emulation"
401 depends on BINFMT_AOUT32
402 help
403 This allows you to run most SunOS binaries. If you want to do this,
404 say Y here and place appropriate files in /usr/gnemul/sunos. See
405 <http://www.ultralinux.org/faq.html> for more information. If you
406 want to run SunOS binaries on an Ultra you must also say Y to
407 "Kernel support for 32-bit a.out binaries" above.
408
409config SOLARIS_EMUL
410 tristate "Solaris binary emulation (EXPERIMENTAL)"
411 depends on SPARC32_COMPAT && EXPERIMENTAL
412 help
413 This is experimental code which will enable you to run (many)
414 Solaris binaries on your SPARC Linux machine.
415
416 To compile this code as a module, choose M here: the
417 module will be called solaris.
418
419source "drivers/parport/Kconfig"
420
421config PRINTER
422 tristate "Parallel printer support"
423 depends on PARPORT
424 ---help---
425 If you intend to attach a printer to the parallel port of your Linux
426 box (as opposed to using a serial printer; if the connector at the
427 printer has 9 or 25 holes ["female"], then it's serial), say Y.
428 Also read the Printing-HOWTO, available from
429 <http://www.tldp.org/docs.html#howto>.
430
431 It is possible to share one parallel port among several devices
432 (e.g. printer and ZIP drive) and it is safe to compile the
433 corresponding drivers into the kernel.
434 To compile this driver as a module, choose M here and read
435 <file:Documentation/parport.txt>. The module will be called lp.
436
437 If you have several parallel ports, you can specify which ports to
438 use with the "lp" kernel command line option. (Try "man bootparam"
439 or see the documentation of your boot loader (lilo or loadlin) about
440 how to pass options to the kernel at boot time.) The syntax of the
441 "lp" command line option can be found in <file:drivers/char/lp.c>.
442
443 If you have more than 8 printers, you need to increase the LP_NO
444 macro in lp.c and the PARPORT_MAX macro in parport.h.
445
446config ENVCTRL
447 tristate "SUNW, envctrl support"
448 depends on PCI
449 help
450 Kernel support for temperature and fan monitoring on Sun SME
451 machines.
452
453 To compile this driver as a module, choose M here: the
454 module will be called envctrl.
455
456config DISPLAY7SEG
457 tristate "7-Segment Display support"
458 depends on PCI
459 ---help---
460 This is the driver for the 7-segment display and LED present on
461 Sun Microsystems CompactPCI models CP1400 and CP1500.
462
463 To compile this driver as a module, choose M here: the
464 module will be called display7seg.
465
466 If you do not have a CompactPCI model CP1400 or CP1500, or
467 another UltraSPARC-IIi-cEngine boardset with a 7-segment display,
468 you should say N to this option.
469
470config CMDLINE_BOOL
471 bool "Default bootloader kernel arguments"
472
473config CMDLINE
474 string "Initial kernel command string"
475 depends on CMDLINE_BOOL
476 default "console=ttyS0,9600 root=/dev/sda1"
477 help
478 Say Y here if you want to be able to pass default arguments to
479 the kernel. This will be overridden by the bootloader, if you
480 use one (such as SILO). This is most useful if you want to boot
481 a kernel from TFTP, and want default options to be available
482 with having them passed on the command line.
483
484 NOTE: This option WILL override the PROM bootargs setting!
485
486endmenu
487
488source "drivers/base/Kconfig"
489
490source "drivers/video/Kconfig"
491
492source "drivers/serial/Kconfig"
493
494source "drivers/sbus/char/Kconfig"
495
496source "drivers/mtd/Kconfig"
497
498source "drivers/block/Kconfig"
499
500source "drivers/ide/Kconfig"
501
502source "drivers/scsi/Kconfig"
503
504source "drivers/fc4/Kconfig"
505
506source "drivers/md/Kconfig"
507
508if PCI
509source "drivers/message/fusion/Kconfig"
510endif
511
512source "drivers/ieee1394/Kconfig"
513
514source "net/Kconfig"
515
516source "drivers/isdn/Kconfig"
517
518source "drivers/telephony/Kconfig"
519
520# This one must be before the filesystem configs. -DaveM
521
522menu "Unix98 PTY support"
523
524config UNIX98_PTYS
525 bool "Unix98 PTY support"
526 ---help---
527 A pseudo terminal (PTY) is a software device consisting of two
528 halves: a master and a slave. The slave device behaves identical to
529 a physical terminal; the master device is used by a process to
530 read data from and write data to the slave, thereby emulating a
531 terminal. Typical programs for the master side are telnet servers
532 and xterms.
533
534 Linux has traditionally used the BSD-like names /dev/ptyxx for
535 masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
536 has a number of problems. The GNU C library glibc 2.1 and later,
537 however, supports the Unix98 naming standard: in order to acquire a
538 pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
539 terminal is then made available to the process and the pseudo
540 terminal slave can be accessed as /dev/pts/<number>. What was
541 traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
542
543 The entries in /dev/pts/ are created on the fly by a virtual
544 file system; therefore, if you say Y here you should say Y to
545 "/dev/pts file system for Unix98 PTYs" as well.
546
547 If you want to say Y here, you need to have the C library glibc 2.1
548 or later (equal to libc-6.1, check with "ls -l /lib/libc.so.*").
549 Read the instructions in <file:Documentation/Changes> pertaining to
550 pseudo terminals. It's safe to say N.
551
552config UNIX98_PTY_COUNT
553 int "Maximum number of Unix98 PTYs in use (0-2048)"
554 depends on UNIX98_PTYS
555 default "256"
556 help
557 The maximum number of Unix98 PTYs that can be used at any one time.
558 The default is 256, and should be enough for desktop systems. Server
559 machines which support incoming telnet/rlogin/ssh connections and/or
560 serve several X terminals may want to increase this: every incoming
561 connection and every xterm uses up one PTY.
562
563 When not in use, each additional set of 256 PTYs occupy
564 approximately 8 KB of kernel memory on 32-bit architectures.
565
566endmenu
567
568menu "XFree86 DRI support"
569
570config DRM
571 bool "Direct Rendering Manager (XFree86 DRI support)"
572 help
573 Kernel-level support for the Direct Rendering Infrastructure (DRI)
574 introduced in XFree86 4.0. If you say Y here, you need to select
575 the module that's right for your graphics card from the list below.
576 These modules provide support for synchronization, security, and
577 DMA transfers. Please see <http://dri.sourceforge.net/> for more
578 details. You should also select and configure AGP
579 (/dev/agpgart) support.
580
581config DRM_FFB
582 tristate "Creator/Creator3D"
583 depends on DRM && BROKEN
584 help
585 Choose this option if you have one of Sun's Creator3D-based graphics
586 and frame buffer cards. Product page at
587 <http://www.sun.com/desktop/products/Graphics/creator3d.html>.
588
589config DRM_TDFX
590 tristate "3dfx Banshee/Voodoo3+"
591 depends on DRM
592 help
593 Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
594 graphics card. If M is selected, the module will be called tdfx.
595
596config DRM_R128
597 tristate "ATI Rage 128"
598 depends on DRM
599 help
600 Choose this option if you have an ATI Rage 128 graphics card. If M
601 is selected, the module will be called r128. AGP support for
602 this card is strongly suggested (unless you have a PCI version).
603
604endmenu
605
606source "drivers/input/Kconfig"
607
608source "drivers/i2c/Kconfig"
609
610source "fs/Kconfig"
611
612source "drivers/media/Kconfig"
613
614source "sound/Kconfig"
615
616source "drivers/usb/Kconfig"
617
618source "drivers/infiniband/Kconfig"
619
620source "drivers/char/watchdog/Kconfig"
621
622source "arch/sparc64/oprofile/Kconfig"
623
624source "arch/sparc64/Kconfig.debug"
625
626source "security/Kconfig"
627
628source "crypto/Kconfig"
629
630source "lib/Kconfig"
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
new file mode 100644
index 000000000000..cd8d39fb954d
--- /dev/null
+++ b/arch/sparc64/Kconfig.debug
@@ -0,0 +1,54 @@
1menu "Kernel hacking"
2
3source "lib/Kconfig.debug"
4
5config DEBUG_STACK_USAGE
6 bool "Enable stack utilization instrumentation"
7 depends on DEBUG_KERNEL
8 help
9 Enables the display of the minimum amount of free stack which each
10 task has ever had available in the sysrq-T and sysrq-P debug output.
11
12 This option will slow down process creation somewhat.
13
14config KPROBES
15 bool "Kprobes"
16 depends on DEBUG_KERNEL
17 help
18 Kprobes allows you to trap at almost any kernel address and
19 execute a callback function. register_kprobe() establishes
20 a probepoint and specifies the callback. Kprobes is useful
21 for kernel debugging, non-intrusive instrumentation and testing.
22 If in doubt, say "N".
23
24config DEBUG_DCFLUSH
25 bool "D-cache flush debugging"
26 depends on DEBUG_KERNEL
27
28config STACK_DEBUG
29 depends on DEBUG_KERNEL
30 bool "Stack Overflow Detection Support"
31
32config DEBUG_BOOTMEM
33 depends on DEBUG_KERNEL
34 bool "Debug BOOTMEM initialization"
35
36# We have a custom atomic_dec_and_lock() implementation but it's not
37# compatible with spinlock debugging so we need to fall back on
38# the generic version in that case.
39config HAVE_DEC_LOCK
40 bool
41 depends on SMP && !DEBUG_SPINLOCK
42 default y
43
44config MCOUNT
45 bool
46 depends on STACK_DEBUG
47 default y
48
49config FRAME_POINTER
50 bool
51 depends on MCOUNT
52 default y
53
54endmenu
diff --git a/arch/sparc64/Makefile b/arch/sparc64/Makefile
new file mode 100644
index 000000000000..43fe382da078
--- /dev/null
+++ b/arch/sparc64/Makefile
@@ -0,0 +1,83 @@
1# $Id: Makefile,v 1.52 2002/02/09 19:49:31 davem Exp $
2# sparc64/Makefile
3#
4# Makefile for the architecture dependent flags and dependencies on the
5# 64-bit Sparc.
6#
7# Copyright (C) 1996,1998 David S. Miller (davem@caip.rutgers.edu)
8# Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
9#
10
11CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -m64
12
13CPPFLAGS_vmlinux.lds += -Usparc
14
15CC := $(shell if $(CC) -m64 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo $(CC); else echo sparc64-linux-gcc; fi )
16
17NEW_GCC := $(call cc-option-yn, -m64 -mcmodel=medlow)
18NEW_GAS := $(shell if $(LD) -V 2>&1 | grep 'elf64_sparc' > /dev/null; then echo y; else echo n; fi)
19UNDECLARED_REGS := $(shell if $(CC) -c -x assembler /dev/null -Wa,--help | grep undeclared-regs > /dev/null; then echo y; else echo n; fi; )
20INLINE_LIMIT := $(call cc-option-yn, -m64 -finline-limit=100000)
21
22export NEW_GCC
23
24ifneq ($(NEW_GAS),y)
25AS = sparc64-linux-as
26LD = sparc64-linux-ld
27NM = sparc64-linux-nm
28AR = sparc64-linux-ar
29RANLIB = sparc64-linux-ranlib
30else
31AS := $(AS) -64
32LDFLAGS := -m elf64_sparc
33endif
34
35ifneq ($(UNDECLARED_REGS),y)
36CC_UNDECL =
37else
38CC_UNDECL = -Wa,--undeclared-regs
39AS := $(AS) --undeclared-regs
40endif
41
42ifneq ($(NEW_GCC),y)
43 CFLAGS := $(CFLAGS) -pipe -mno-fpu -mtune=ultrasparc -mmedlow \
44 -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare
45else
46 CFLAGS := $(CFLAGS) -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow \
47 -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \
48 $(CC_UNDECL)
49 AFLAGS += -m64 -mcpu=ultrasparc $(CC_UNDECL)
50endif
51
52ifeq ($(INLINE_LIMIT),y)
53 CFLAGS := $(CFLAGS) -finline-limit=100000
54endif
55
56ifeq ($(CONFIG_MCOUNT),y)
57 CFLAGS := $(CFLAGS) -pg
58endif
59
60head-y := arch/sparc64/kernel/head.o arch/sparc64/kernel/init_task.o
61
62core-y += arch/sparc64/kernel/ arch/sparc64/mm/
63core-$(CONFIG_SOLARIS_EMUL) += arch/sparc64/solaris/
64core-y += arch/sparc64/math-emu/
65libs-y += arch/sparc64/prom/ arch/sparc64/lib/
66
67# FIXME: is drivers- right?
68drivers-$(CONFIG_OPROFILE) += arch/sparc64/oprofile/
69
70boot := arch/sparc64/boot
71
72image tftpboot.img vmlinux.aout: vmlinux
73 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
74
75archclean:
76 $(Q)$(MAKE) $(clean)=$(boot)
77
78define archhelp
79 echo '* vmlinux - Standard sparc64 kernel'
80 echo ' vmlinux.aout - a.out kernel for sparc64'
81 echo ' tftpboot.img - Image prepared for tftp'
82endef
83
diff --git a/arch/sparc64/boot/Makefile b/arch/sparc64/boot/Makefile
new file mode 100644
index 000000000000..6968a6da57da
--- /dev/null
+++ b/arch/sparc64/boot/Makefile
@@ -0,0 +1,34 @@
1# $Id: Makefile,v 1.4 1997/12/15 20:08:56 ecd Exp $
2# Makefile for the Sparc64 boot stuff.
3#
4# Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5# Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6
7ROOT_IMG := /usr/src/root.img
8ELFTOAOUT := elftoaout
9
10hostprogs-y := piggyback
11targets := image tftpboot.img vmlinux.aout
12
13quiet_cmd_elftoaout = ELF2AOUT $@
14 cmd_elftoaout = $(ELFTOAOUT) vmlinux -o $@
15quiet_cmd_piggy = PIGGY $@
16 cmd_piggy = $(obj)/piggyback $@ System.map $(ROOT_IMG)
17quiet_cmd_strip = STRIP $@
18 cmd_strip = $(STRIP) -R .comment -R .note -K sun4u_init -K _end -K _start vmlinux -o $@
19
20
21# Actual linking
22$(obj)/image: vmlinux FORCE
23 $(call if_changed,strip)
24 @echo ' kernel: $@ is ready'
25
26$(obj)/tftpboot.img: vmlinux $(obj)/piggyback System.map $(ROOT_IMG) FORCE
27 $(call if_changed,elftoaout)
28 $(call if_changed,piggy)
29 @echo ' kernel: $@ is ready'
30
31$(obj)/vmlinux.aout: vmlinux FORCE
32 $(call if_changed,elftoaout)
33 @echo ' kernel: $@ is ready'
34
diff --git a/arch/sparc64/boot/piggyback.c b/arch/sparc64/boot/piggyback.c
new file mode 100644
index 000000000000..36f907408c60
--- /dev/null
+++ b/arch/sparc64/boot/piggyback.c
@@ -0,0 +1,109 @@
1/* $Id: piggyback.c,v 1.2 2000/09/19 14:34:39 anton Exp $
2 Simple utility to make a single-image install kernel with initial ramdisk
3 for Sparc64 tftpbooting without need to set up nfs.
4
5 Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
20
21#include <stdio.h>
22#include <string.h>
23#include <ctype.h>
24#include <errno.h>
25#include <fcntl.h>
26#include <dirent.h>
27#include <unistd.h>
28#include <stdlib.h>
29#include <sys/types.h>
30#include <sys/stat.h>
31
32/* Note: run this on an a.out kernel (use elftoaout for it), as PROM looks for a.out image onlly
33 usage: piggyback vmlinux System.map tail, where tail is gzipped fs of the initial ramdisk */
34
35void die(char *str)
36{
37 perror (str);
38 exit(1);
39}
40
41int main(int argc,char **argv)
42{
43 char buffer [1024], *q, *r;
44 unsigned int i, j, k, start, end, offset;
45 FILE *map;
46 struct stat s;
47 int image, tail;
48
49 if (stat (argv[3], &s) < 0) die (argv[3]);
50 map = fopen (argv[2], "r");
51 if (!map) die(argv[2]);
52 while (fgets (buffer, 1024, map)) {
53 if (!strcmp (buffer + 19, "_start\n"))
54 start = strtoul (buffer + 8, NULL, 16);
55 else if (!strcmp (buffer + 19, "_end\n"))
56 end = strtoul (buffer + 8, NULL, 16);
57 }
58 fclose (map);
59 if ((image = open(argv[1],O_RDWR)) < 0) die(argv[1]);
60 if (read(image,buffer,512) != 512) die(argv[1]);
61 if (!memcmp (buffer, "\177ELF", 4)) {
62 unsigned int *p = (unsigned int *)(buffer + *(unsigned int *)(buffer + 28));
63
64 i = p[1] + *(unsigned int *)(buffer + 24) - p[2];
65 if (lseek(image,i,0) < 0) die("lseek");
66 if (read(image,buffer,512) != 512) die(argv[1]);
67 j = 0;
68 } else if (*(unsigned int *)buffer == 0x01030107) {
69 i = j = 32;
70 } else {
71 fprintf (stderr, "Not ELF nor a.out. Don't blame me.\n");
72 exit(1);
73 }
74 k = i;
75 if (j == 32 && buffer[40] == 'H' && buffer[41] == 'd' && buffer[42] == 'r' && buffer[43] == 'S') {
76 offset = 40 + 10;
77 } else {
78 i += ((*(unsigned short *)(buffer + j + 2))<<2) - 512;
79 if (lseek(image,i,0) < 0) die("lseek");
80 if (read(image,buffer,1024) != 1024) die(argv[1]);
81 for (q = buffer, r = q + 512; q < r; q += 4) {
82 if (*q == 'H' && q[1] == 'd' && q[2] == 'r' && q[3] == 'S')
83 break;
84 }
85 if (q == r) {
86 fprintf (stderr, "Couldn't find headers signature in the kernel.\n");
87 exit(1);
88 }
89 offset = i + (q - buffer) + 10;
90 }
91 if (lseek(image, offset, 0) < 0) die ("lseek");
92 *(unsigned *)buffer = 0;
93 *(unsigned *)(buffer + 4) = 0x01000000;
94 *(unsigned *)(buffer + 8) = ((end + 32 + 8191) & ~8191);
95 *(unsigned *)(buffer + 12) = s.st_size;
96 if (write(image,buffer+2,14) != 14) die (argv[1]);
97 if (lseek(image, 4, 0) < 0) die ("lseek");
98 *(unsigned *)buffer = ((end + 32 + 8191) & ~8191) - (start & ~0x3fffffUL) + s.st_size;
99 *(unsigned *)(buffer + 4) = 0;
100 *(unsigned *)(buffer + 8) = 0;
101 if (write(image,buffer,12) != 12) die (argv[1]);
102 if (lseek(image, k - start + ((end + 32 + 8191) & ~8191), 0) < 0) die ("lseek");
103 if ((tail = open(argv[3],O_RDONLY)) < 0) die(argv[3]);
104 while ((i = read (tail,buffer,1024)) > 0)
105 if (write(image,buffer,i) != i) die (argv[1]);
106 if (close(image) < 0) die("close");
107 if (close(tail) < 0) die("close");
108 return 0;
109}
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
new file mode 100644
index 000000000000..46a6ad60a8f5
--- /dev/null
+++ b/arch/sparc64/defconfig
@@ -0,0 +1,1951 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.11
4# Sun Mar 6 20:47:29 2005
5#
6CONFIG_64BIT=y
7CONFIG_MMU=y
8CONFIG_TIME_INTERPOLATION=y
9
10#
11# Code maturity level options
12#
13CONFIG_EXPERIMENTAL=y
14CONFIG_CLEAN_COMPILE=y
15CONFIG_LOCK_KERNEL=y
16
17#
18# General setup
19#
20CONFIG_LOCALVERSION=""
21CONFIG_SWAP=y
22CONFIG_SYSVIPC=y
23CONFIG_POSIX_MQUEUE=y
24# CONFIG_BSD_PROCESS_ACCT is not set
25CONFIG_SYSCTL=y
26# CONFIG_AUDIT is not set
27CONFIG_LOG_BUF_SHIFT=15
28CONFIG_HOTPLUG=y
29CONFIG_KOBJECT_UEVENT=y
30# CONFIG_IKCONFIG is not set
31# CONFIG_EMBEDDED is not set
32CONFIG_KALLSYMS=y
33# CONFIG_KALLSYMS_ALL is not set
34# CONFIG_KALLSYMS_EXTRA_PASS is not set
35CONFIG_FUTEX=y
36CONFIG_EPOLL=y
37# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
38CONFIG_SHMEM=y
39CONFIG_CC_ALIGN_FUNCTIONS=0
40CONFIG_CC_ALIGN_LABELS=0
41CONFIG_CC_ALIGN_LOOPS=0
42CONFIG_CC_ALIGN_JUMPS=0
43# CONFIG_TINY_SHMEM is not set
44
45#
46# Loadable module support
47#
48CONFIG_MODULES=y
49CONFIG_MODULE_UNLOAD=y
50CONFIG_MODULE_FORCE_UNLOAD=y
51CONFIG_OBSOLETE_MODPARM=y
52CONFIG_MODVERSIONS=y
53CONFIG_MODULE_SRCVERSION_ALL=y
54CONFIG_KMOD=y
55CONFIG_STOP_MACHINE=y
56CONFIG_SYSVIPC_COMPAT=y
57
58#
59# General machine setup
60#
61CONFIG_BBC_I2C=m
62CONFIG_VT=y
63CONFIG_VT_CONSOLE=y
64CONFIG_HW_CONSOLE=y
65CONFIG_SMP=y
66# CONFIG_PREEMPT is not set
67CONFIG_NR_CPUS=4
68CONFIG_CPU_FREQ=y
69# CONFIG_CPU_FREQ_DEBUG is not set
70CONFIG_CPU_FREQ_STAT=m
71CONFIG_CPU_FREQ_STAT_DETAILS=y
72CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
73# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
74CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
75CONFIG_CPU_FREQ_GOV_POWERSAVE=m
76CONFIG_CPU_FREQ_GOV_USERSPACE=m
77CONFIG_CPU_FREQ_GOV_ONDEMAND=m
78CONFIG_CPU_FREQ_TABLE=y
79CONFIG_US3_FREQ=m
80CONFIG_US2E_FREQ=m
81CONFIG_SPARC64=y
82CONFIG_RWSEM_XCHGADD_ALGORITHM=y
83CONFIG_GENERIC_CALIBRATE_DELAY=y
84CONFIG_HUGETLB_PAGE_SIZE_4MB=y
85# CONFIG_HUGETLB_PAGE_SIZE_512K is not set
86# CONFIG_HUGETLB_PAGE_SIZE_64K is not set
87CONFIG_GENERIC_ISA_DMA=y
88CONFIG_SBUS=y
89CONFIG_SBUSCHAR=y
90CONFIG_SUN_AUXIO=y
91CONFIG_SUN_IO=y
92CONFIG_PCI=y
93CONFIG_PCI_DOMAINS=y
94CONFIG_RTC=y
95# CONFIG_PCI_LEGACY_PROC is not set
96# CONFIG_PCI_NAMES is not set
97CONFIG_SUN_OPENPROMFS=m
98CONFIG_SPARC32_COMPAT=y
99CONFIG_COMPAT=y
100CONFIG_UID16=y
101CONFIG_BINFMT_ELF32=y
102# CONFIG_BINFMT_AOUT32 is not set
103CONFIG_BINFMT_ELF=y
104CONFIG_BINFMT_MISC=m
105CONFIG_SOLARIS_EMUL=m
106
107#
108# Parallel port support
109#
110CONFIG_PARPORT=m
111CONFIG_PARPORT_PC=m
112CONFIG_PARPORT_PC_FIFO=y
113# CONFIG_PARPORT_PC_SUPERIO is not set
114# CONFIG_PARPORT_SUNBPP is not set
115# CONFIG_PARPORT_OTHER is not set
116CONFIG_PARPORT_1284=y
117CONFIG_PRINTER=m
118CONFIG_ENVCTRL=m
119CONFIG_DISPLAY7SEG=m
120# CONFIG_CMDLINE_BOOL is not set
121
122#
123# Generic Driver Options
124#
125CONFIG_STANDALONE=y
126# CONFIG_PREVENT_FIRMWARE_BUILD is not set
127CONFIG_FW_LOADER=m
128# CONFIG_DEBUG_DRIVER is not set
129
130#
131# Graphics support
132#
133CONFIG_FB=y
134CONFIG_FB_MODE_HELPERS=y
135CONFIG_FB_TILEBLITTING=y
136# CONFIG_FB_CIRRUS is not set
137CONFIG_FB_PM2=y
138# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
139# CONFIG_FB_ASILIANT is not set
140# CONFIG_FB_IMSTT is not set
141# CONFIG_FB_BW2 is not set
142# CONFIG_FB_CG3 is not set
143CONFIG_FB_CG6=y
144# CONFIG_FB_RIVA is not set
145# CONFIG_FB_MATROX is not set
146# CONFIG_FB_RADEON_OLD is not set
147# CONFIG_FB_RADEON is not set
148# CONFIG_FB_ATY128 is not set
149CONFIG_FB_ATY=y
150CONFIG_FB_ATY_CT=y
151# CONFIG_FB_ATY_GENERIC_LCD is not set
152# CONFIG_FB_ATY_XL_INIT is not set
153CONFIG_FB_ATY_GX=y
154# CONFIG_FB_SAVAGE is not set
155# CONFIG_FB_SIS is not set
156# CONFIG_FB_NEOMAGIC is not set
157# CONFIG_FB_KYRO is not set
158# CONFIG_FB_3DFX is not set
159# CONFIG_FB_VOODOO1 is not set
160# CONFIG_FB_TRIDENT is not set
161CONFIG_FB_SBUS=y
162CONFIG_FB_FFB=y
163# CONFIG_FB_TCX is not set
164# CONFIG_FB_CG14 is not set
165# CONFIG_FB_P9100 is not set
166# CONFIG_FB_LEO is not set
167# CONFIG_FB_PCI is not set
168# CONFIG_FB_VIRTUAL is not set
169
170#
171# Console display driver support
172#
173# CONFIG_PROM_CONSOLE is not set
174CONFIG_DUMMY_CONSOLE=y
175CONFIG_FRAMEBUFFER_CONSOLE=y
176CONFIG_FONTS=y
177# CONFIG_FONT_8x8 is not set
178# CONFIG_FONT_8x16 is not set
179# CONFIG_FONT_6x11 is not set
180# CONFIG_FONT_PEARL_8x8 is not set
181# CONFIG_FONT_ACORN_8x8 is not set
182CONFIG_FONT_SUN8x16=y
183# CONFIG_FONT_SUN12x22 is not set
184
185#
186# Logo configuration
187#
188CONFIG_LOGO=y
189# CONFIG_LOGO_LINUX_MONO is not set
190# CONFIG_LOGO_LINUX_VGA16 is not set
191# CONFIG_LOGO_LINUX_CLUT224 is not set
192CONFIG_LOGO_SUN_CLUT224=y
193# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
194
195#
196# Serial drivers
197#
198
199#
200# Non-8250 serial port support
201#
202CONFIG_SERIAL_SUNCORE=y
203CONFIG_SERIAL_SUNZILOG=y
204CONFIG_SERIAL_SUNZILOG_CONSOLE=y
205CONFIG_SERIAL_SUNSU=y
206CONFIG_SERIAL_SUNSU_CONSOLE=y
207CONFIG_SERIAL_SUNSAB=m
208CONFIG_SERIAL_CORE=y
209CONFIG_SERIAL_CORE_CONSOLE=y
210
211#
212# Misc Linux/SPARC drivers
213#
214CONFIG_SUN_OPENPROMIO=m
215CONFIG_SUN_MOSTEK_RTC=y
216CONFIG_OBP_FLASH=m
217# CONFIG_SUN_BPP is not set
218
219#
220# Memory Technology Devices (MTD)
221#
222# CONFIG_MTD is not set
223
224#
225# Block devices
226#
227# CONFIG_BLK_DEV_FD is not set
228# CONFIG_PARIDE is not set
229# CONFIG_BLK_CPQ_DA is not set
230# CONFIG_BLK_CPQ_CISS_DA is not set
231# CONFIG_BLK_DEV_DAC960 is not set
232# CONFIG_BLK_DEV_UMEM is not set
233# CONFIG_BLK_DEV_COW_COMMON is not set
234CONFIG_BLK_DEV_LOOP=m
235CONFIG_BLK_DEV_CRYPTOLOOP=m
236CONFIG_BLK_DEV_NBD=m
237CONFIG_BLK_DEV_SX8=m
238CONFIG_BLK_DEV_UB=m
239# CONFIG_BLK_DEV_RAM is not set
240CONFIG_BLK_DEV_RAM_COUNT=16
241CONFIG_INITRAMFS_SOURCE=""
242CONFIG_CDROM_PKTCDVD=m
243CONFIG_CDROM_PKTCDVD_BUFFERS=8
244CONFIG_CDROM_PKTCDVD_WCACHE=y
245
246#
247# IO Schedulers
248#
249CONFIG_IOSCHED_NOOP=y
250CONFIG_IOSCHED_AS=y
251CONFIG_IOSCHED_DEADLINE=y
252CONFIG_IOSCHED_CFQ=y
253CONFIG_ATA_OVER_ETH=m
254
255#
256# ATA/ATAPI/MFM/RLL support
257#
258CONFIG_IDE=y
259CONFIG_BLK_DEV_IDE=y
260
261#
262# Please see Documentation/ide.txt for help/info on IDE drives
263#
264# CONFIG_BLK_DEV_IDE_SATA is not set
265CONFIG_BLK_DEV_IDEDISK=y
266# CONFIG_IDEDISK_MULTI_MODE is not set
267CONFIG_BLK_DEV_IDECD=y
268CONFIG_BLK_DEV_IDETAPE=m
269# CONFIG_BLK_DEV_IDEFLOPPY is not set
270# CONFIG_BLK_DEV_IDESCSI is not set
271# CONFIG_IDE_TASK_IOCTL is not set
272
273#
274# IDE chipset support/bugfixes
275#
276CONFIG_IDE_GENERIC=y
277CONFIG_BLK_DEV_IDEPCI=y
278# CONFIG_IDEPCI_SHARE_IRQ is not set
279# CONFIG_BLK_DEV_OFFBOARD is not set
280# CONFIG_BLK_DEV_GENERIC is not set
281CONFIG_BLK_DEV_OPTI621=m
282CONFIG_BLK_DEV_IDEDMA_PCI=y
283# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
284CONFIG_IDEDMA_PCI_AUTO=y
285CONFIG_IDEDMA_ONLYDISK=y
286# CONFIG_BLK_DEV_AEC62XX is not set
287CONFIG_BLK_DEV_ALI15X3=y
288# CONFIG_WDC_ALI15X3 is not set
289CONFIG_BLK_DEV_AMD74XX=m
290CONFIG_BLK_DEV_CMD64X=m
291CONFIG_BLK_DEV_TRIFLEX=m
292CONFIG_BLK_DEV_CY82C693=m
293CONFIG_BLK_DEV_CS5520=m
294CONFIG_BLK_DEV_CS5530=m
295CONFIG_BLK_DEV_HPT34X=m
296# CONFIG_HPT34X_AUTODMA is not set
297CONFIG_BLK_DEV_HPT366=m
298CONFIG_BLK_DEV_SC1200=m
299CONFIG_BLK_DEV_PIIX=m
300CONFIG_BLK_DEV_NS87415=m
301CONFIG_BLK_DEV_PDC202XX_OLD=m
302# CONFIG_PDC202XX_BURST is not set
303CONFIG_BLK_DEV_PDC202XX_NEW=m
304# CONFIG_PDC202XX_FORCE is not set
305CONFIG_BLK_DEV_SVWKS=m
306CONFIG_BLK_DEV_SIIMAGE=m
307CONFIG_BLK_DEV_SLC90E66=m
308CONFIG_BLK_DEV_TRM290=m
309CONFIG_BLK_DEV_VIA82CXXX=m
310# CONFIG_IDE_ARM is not set
311CONFIG_BLK_DEV_IDEDMA=y
312# CONFIG_IDEDMA_IVB is not set
313CONFIG_IDEDMA_AUTO=y
314# CONFIG_BLK_DEV_HD is not set
315
316#
317# SCSI device support
318#
319CONFIG_SCSI=y
320CONFIG_SCSI_PROC_FS=y
321
322#
323# SCSI support type (disk, tape, CD-ROM)
324#
325CONFIG_BLK_DEV_SD=y
326CONFIG_CHR_DEV_ST=m
327CONFIG_CHR_DEV_OSST=m
328CONFIG_BLK_DEV_SR=m
329CONFIG_BLK_DEV_SR_VENDOR=y
330CONFIG_CHR_DEV_SG=m
331
332#
333# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
334#
335CONFIG_SCSI_MULTI_LUN=y
336CONFIG_SCSI_CONSTANTS=y
337# CONFIG_SCSI_LOGGING is not set
338
339#
340# SCSI Transport Attributes
341#
342CONFIG_SCSI_SPI_ATTRS=y
343CONFIG_SCSI_FC_ATTRS=m
344CONFIG_SCSI_ISCSI_ATTRS=m
345
346#
347# SCSI low-level drivers
348#
349CONFIG_BLK_DEV_3W_XXXX_RAID=m
350CONFIG_SCSI_3W_9XXX=m
351CONFIG_SCSI_ACARD=m
352CONFIG_SCSI_AACRAID=m
353# CONFIG_SCSI_AIC7XXX is not set
354# CONFIG_SCSI_AIC7XXX_OLD is not set
355CONFIG_SCSI_AIC79XX=m
356CONFIG_AIC79XX_CMDS_PER_DEVICE=32
357CONFIG_AIC79XX_RESET_DELAY_MS=15000
358# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
359# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
360# CONFIG_AIC79XX_DEBUG_ENABLE is not set
361CONFIG_AIC79XX_DEBUG_MASK=0
362# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
363# CONFIG_MEGARAID_NEWGEN is not set
364# CONFIG_MEGARAID_LEGACY is not set
365CONFIG_SCSI_SATA=y
366CONFIG_SCSI_SATA_AHCI=m
367CONFIG_SCSI_SATA_SVW=m
368CONFIG_SCSI_ATA_PIIX=m
369CONFIG_SCSI_SATA_NV=m
370CONFIG_SCSI_SATA_PROMISE=m
371CONFIG_SCSI_SATA_QSTOR=m
372CONFIG_SCSI_SATA_SX4=m
373CONFIG_SCSI_SATA_SIL=m
374CONFIG_SCSI_SATA_SIS=m
375CONFIG_SCSI_SATA_ULI=m
376CONFIG_SCSI_SATA_VIA=m
377CONFIG_SCSI_SATA_VITESSE=m
378CONFIG_SCSI_DMX3191D=m
379CONFIG_SCSI_EATA_PIO=m
380# CONFIG_SCSI_FUTURE_DOMAIN is not set
381CONFIG_SCSI_IPS=m
382CONFIG_SCSI_INITIO=m
383CONFIG_SCSI_INIA100=m
384CONFIG_SCSI_PPA=m
385CONFIG_SCSI_IMM=m
386# CONFIG_SCSI_IZIP_EPP16 is not set
387# CONFIG_SCSI_IZIP_SLOW_CTR is not set
388CONFIG_SCSI_SYM53C8XX_2=y
389CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
390CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
391CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
392# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
393# CONFIG_SCSI_IPR is not set
394CONFIG_SCSI_QLOGIC_ISP=m
395CONFIG_SCSI_QLOGIC_FC=y
396CONFIG_SCSI_QLOGIC_FC_FIRMWARE=y
397# CONFIG_SCSI_QLOGIC_1280 is not set
398CONFIG_SCSI_QLOGICPTI=m
399CONFIG_SCSI_QLA2XXX=y
400# CONFIG_SCSI_QLA21XX is not set
401# CONFIG_SCSI_QLA22XX is not set
402# CONFIG_SCSI_QLA2300 is not set
403# CONFIG_SCSI_QLA2322 is not set
404# CONFIG_SCSI_QLA6312 is not set
405CONFIG_SCSI_DC395x=m
406# CONFIG_SCSI_DC390T is not set
407CONFIG_SCSI_DEBUG=m
408CONFIG_SCSI_SUNESP=y
409
410#
411# Fibre Channel support
412#
413CONFIG_FC4=m
414
415#
416# FC4 drivers
417#
418CONFIG_FC4_SOC=m
419CONFIG_FC4_SOCAL=m
420
421#
422# FC4 targets
423#
424CONFIG_SCSI_PLUTO=m
425CONFIG_SCSI_FCAL=m
426
427#
428# Multi-device support (RAID and LVM)
429#
430CONFIG_MD=y
431CONFIG_BLK_DEV_MD=m
432CONFIG_MD_LINEAR=m
433CONFIG_MD_RAID0=m
434CONFIG_MD_RAID1=m
435CONFIG_MD_RAID10=m
436CONFIG_MD_RAID5=m
437CONFIG_MD_RAID6=m
438CONFIG_MD_MULTIPATH=m
439# CONFIG_MD_FAULTY is not set
440CONFIG_BLK_DEV_DM=m
441CONFIG_DM_CRYPT=m
442CONFIG_DM_SNAPSHOT=m
443CONFIG_DM_MIRROR=m
444CONFIG_DM_ZERO=m
445
446#
447# Fusion MPT device support
448#
449CONFIG_FUSION=m
450CONFIG_FUSION_MAX_SGE=40
451CONFIG_FUSION_CTL=m
452CONFIG_FUSION_LAN=m
453
454#
455# IEEE 1394 (FireWire) support
456#
457CONFIG_IEEE1394=m
458
459#
460# Subsystem Options
461#
462# CONFIG_IEEE1394_VERBOSEDEBUG is not set
463CONFIG_IEEE1394_OUI_DB=y
464CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
465CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
466
467#
468# Device Drivers
469#
470CONFIG_IEEE1394_PCILYNX=m
471CONFIG_IEEE1394_OHCI1394=m
472
473#
474# Protocol Drivers
475#
476CONFIG_IEEE1394_VIDEO1394=m
477CONFIG_IEEE1394_SBP2=m
478# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
479CONFIG_IEEE1394_ETH1394=m
480CONFIG_IEEE1394_DV1394=m
481CONFIG_IEEE1394_RAWIO=m
482CONFIG_IEEE1394_CMP=m
483CONFIG_IEEE1394_AMDTP=m
484
485#
486# Networking support
487#
488CONFIG_NET=y
489
490#
491# Networking options
492#
493CONFIG_PACKET=y
494CONFIG_PACKET_MMAP=y
495CONFIG_NETLINK_DEV=y
496CONFIG_UNIX=y
497CONFIG_NET_KEY=m
498CONFIG_INET=y
499CONFIG_IP_MULTICAST=y
500# CONFIG_IP_ADVANCED_ROUTER is not set
501# CONFIG_IP_PNP is not set
502CONFIG_NET_IPIP=m
503CONFIG_NET_IPGRE=m
504CONFIG_NET_IPGRE_BROADCAST=y
505CONFIG_IP_MROUTE=y
506CONFIG_IP_PIMSM_V1=y
507CONFIG_IP_PIMSM_V2=y
508CONFIG_ARPD=y
509CONFIG_SYN_COOKIES=y
510CONFIG_INET_AH=y
511CONFIG_INET_ESP=y
512CONFIG_INET_IPCOMP=y
513CONFIG_INET_TUNNEL=y
514CONFIG_IP_TCPDIAG=y
515# CONFIG_IP_TCPDIAG_IPV6 is not set
516
517#
518# IP: Virtual Server Configuration
519#
520CONFIG_IP_VS=m
521# CONFIG_IP_VS_DEBUG is not set
522CONFIG_IP_VS_TAB_BITS=12
523
524#
525# IPVS transport protocol load balancing support
526#
527CONFIG_IP_VS_PROTO_TCP=y
528CONFIG_IP_VS_PROTO_UDP=y
529CONFIG_IP_VS_PROTO_ESP=y
530CONFIG_IP_VS_PROTO_AH=y
531
532#
533# IPVS scheduler
534#
535CONFIG_IP_VS_RR=m
536CONFIG_IP_VS_WRR=m
537CONFIG_IP_VS_LC=m
538CONFIG_IP_VS_WLC=m
539CONFIG_IP_VS_LBLC=m
540CONFIG_IP_VS_LBLCR=m
541CONFIG_IP_VS_DH=m
542CONFIG_IP_VS_SH=m
543CONFIG_IP_VS_SED=m
544CONFIG_IP_VS_NQ=m
545
546#
547# IPVS application helper
548#
549CONFIG_IP_VS_FTP=m
550CONFIG_IPV6=m
551CONFIG_IPV6_PRIVACY=y
552CONFIG_INET6_AH=m
553CONFIG_INET6_ESP=m
554CONFIG_INET6_IPCOMP=m
555CONFIG_INET6_TUNNEL=m
556CONFIG_IPV6_TUNNEL=m
557CONFIG_NETFILTER=y
558# CONFIG_NETFILTER_DEBUG is not set
559CONFIG_BRIDGE_NETFILTER=y
560
561#
562# IP: Netfilter Configuration
563#
564CONFIG_IP_NF_CONNTRACK=m
565CONFIG_IP_NF_CT_ACCT=y
566CONFIG_IP_NF_CONNTRACK_MARK=y
567CONFIG_IP_NF_CT_PROTO_SCTP=m
568CONFIG_IP_NF_FTP=m
569CONFIG_IP_NF_IRC=m
570CONFIG_IP_NF_TFTP=m
571CONFIG_IP_NF_AMANDA=m
572CONFIG_IP_NF_QUEUE=m
573CONFIG_IP_NF_IPTABLES=m
574CONFIG_IP_NF_MATCH_LIMIT=m
575CONFIG_IP_NF_MATCH_IPRANGE=m
576CONFIG_IP_NF_MATCH_MAC=m
577CONFIG_IP_NF_MATCH_PKTTYPE=m
578CONFIG_IP_NF_MATCH_MARK=m
579CONFIG_IP_NF_MATCH_MULTIPORT=m
580CONFIG_IP_NF_MATCH_TOS=m
581CONFIG_IP_NF_MATCH_RECENT=m
582CONFIG_IP_NF_MATCH_ECN=m
583CONFIG_IP_NF_MATCH_DSCP=m
584CONFIG_IP_NF_MATCH_AH_ESP=m
585CONFIG_IP_NF_MATCH_LENGTH=m
586CONFIG_IP_NF_MATCH_TTL=m
587CONFIG_IP_NF_MATCH_TCPMSS=m
588CONFIG_IP_NF_MATCH_HELPER=m
589CONFIG_IP_NF_MATCH_STATE=m
590CONFIG_IP_NF_MATCH_CONNTRACK=m
591CONFIG_IP_NF_MATCH_OWNER=m
592CONFIG_IP_NF_MATCH_PHYSDEV=m
593CONFIG_IP_NF_MATCH_ADDRTYPE=m
594CONFIG_IP_NF_MATCH_REALM=m
595CONFIG_IP_NF_MATCH_SCTP=m
596CONFIG_IP_NF_MATCH_COMMENT=m
597CONFIG_IP_NF_MATCH_CONNMARK=m
598CONFIG_IP_NF_MATCH_HASHLIMIT=m
599CONFIG_IP_NF_FILTER=m
600CONFIG_IP_NF_TARGET_REJECT=m
601CONFIG_IP_NF_TARGET_LOG=m
602CONFIG_IP_NF_TARGET_ULOG=m
603CONFIG_IP_NF_TARGET_TCPMSS=m
604CONFIG_IP_NF_NAT=m
605CONFIG_IP_NF_NAT_NEEDED=y
606CONFIG_IP_NF_TARGET_MASQUERADE=m
607CONFIG_IP_NF_TARGET_REDIRECT=m
608CONFIG_IP_NF_TARGET_NETMAP=m
609CONFIG_IP_NF_TARGET_SAME=m
610CONFIG_IP_NF_NAT_SNMP_BASIC=m
611CONFIG_IP_NF_NAT_IRC=m
612CONFIG_IP_NF_NAT_FTP=m
613CONFIG_IP_NF_NAT_TFTP=m
614CONFIG_IP_NF_NAT_AMANDA=m
615CONFIG_IP_NF_MANGLE=m
616CONFIG_IP_NF_TARGET_TOS=m
617CONFIG_IP_NF_TARGET_ECN=m
618CONFIG_IP_NF_TARGET_DSCP=m
619CONFIG_IP_NF_TARGET_MARK=m
620CONFIG_IP_NF_TARGET_CLASSIFY=m
621CONFIG_IP_NF_TARGET_CONNMARK=m
622CONFIG_IP_NF_TARGET_CLUSTERIP=m
623CONFIG_IP_NF_RAW=m
624CONFIG_IP_NF_TARGET_NOTRACK=m
625CONFIG_IP_NF_ARPTABLES=m
626CONFIG_IP_NF_ARPFILTER=m
627CONFIG_IP_NF_ARP_MANGLE=m
628
629#
630# IPv6: Netfilter Configuration
631#
632CONFIG_IP6_NF_QUEUE=m
633CONFIG_IP6_NF_IPTABLES=m
634CONFIG_IP6_NF_MATCH_LIMIT=m
635CONFIG_IP6_NF_MATCH_MAC=m
636CONFIG_IP6_NF_MATCH_RT=m
637CONFIG_IP6_NF_MATCH_OPTS=m
638CONFIG_IP6_NF_MATCH_FRAG=m
639CONFIG_IP6_NF_MATCH_HL=m
640CONFIG_IP6_NF_MATCH_MULTIPORT=m
641CONFIG_IP6_NF_MATCH_OWNER=m
642CONFIG_IP6_NF_MATCH_MARK=m
643CONFIG_IP6_NF_MATCH_IPV6HEADER=m
644CONFIG_IP6_NF_MATCH_AHESP=m
645CONFIG_IP6_NF_MATCH_LENGTH=m
646CONFIG_IP6_NF_MATCH_EUI64=m
647CONFIG_IP6_NF_MATCH_PHYSDEV=m
648CONFIG_IP6_NF_FILTER=m
649CONFIG_IP6_NF_TARGET_LOG=m
650CONFIG_IP6_NF_MANGLE=m
651CONFIG_IP6_NF_TARGET_MARK=m
652CONFIG_IP6_NF_RAW=m
653
654#
655# DECnet: Netfilter Configuration
656#
657CONFIG_DECNET_NF_GRABULATOR=m
658
659#
660# Bridge: Netfilter Configuration
661#
662CONFIG_BRIDGE_NF_EBTABLES=m
663CONFIG_BRIDGE_EBT_BROUTE=m
664CONFIG_BRIDGE_EBT_T_FILTER=m
665CONFIG_BRIDGE_EBT_T_NAT=m
666CONFIG_BRIDGE_EBT_802_3=m
667CONFIG_BRIDGE_EBT_AMONG=m
668CONFIG_BRIDGE_EBT_ARP=m
669CONFIG_BRIDGE_EBT_IP=m
670CONFIG_BRIDGE_EBT_LIMIT=m
671CONFIG_BRIDGE_EBT_MARK=m
672CONFIG_BRIDGE_EBT_PKTTYPE=m
673CONFIG_BRIDGE_EBT_STP=m
674CONFIG_BRIDGE_EBT_VLAN=m
675CONFIG_BRIDGE_EBT_ARPREPLY=m
676CONFIG_BRIDGE_EBT_DNAT=m
677CONFIG_BRIDGE_EBT_MARK_T=m
678CONFIG_BRIDGE_EBT_REDIRECT=m
679CONFIG_BRIDGE_EBT_SNAT=m
680CONFIG_BRIDGE_EBT_LOG=m
681CONFIG_BRIDGE_EBT_ULOG=m
682CONFIG_XFRM=y
683CONFIG_XFRM_USER=m
684
685#
686# SCTP Configuration (EXPERIMENTAL)
687#
688CONFIG_IP_SCTP=m
689# CONFIG_SCTP_DBG_MSG is not set
690# CONFIG_SCTP_DBG_OBJCNT is not set
691# CONFIG_SCTP_HMAC_NONE is not set
692# CONFIG_SCTP_HMAC_SHA1 is not set
693CONFIG_SCTP_HMAC_MD5=y
694CONFIG_ATM=y
695CONFIG_ATM_CLIP=y
696# CONFIG_ATM_CLIP_NO_ICMP is not set
697CONFIG_ATM_LANE=m
698CONFIG_ATM_MPOA=m
699CONFIG_ATM_BR2684=m
700CONFIG_ATM_BR2684_IPFILTER=y
701CONFIG_BRIDGE=m
702CONFIG_VLAN_8021Q=m
703CONFIG_DECNET=m
704CONFIG_DECNET_ROUTER=y
705CONFIG_DECNET_ROUTE_FWMARK=y
706CONFIG_LLC=m
707CONFIG_LLC2=m
708CONFIG_IPX=m
709# CONFIG_IPX_INTERN is not set
710CONFIG_ATALK=m
711# CONFIG_DEV_APPLETALK is not set
712CONFIG_X25=m
713CONFIG_LAPB=m
714CONFIG_NET_DIVERT=y
715# CONFIG_ECONET is not set
716# CONFIG_WAN_ROUTER is not set
717
718#
719# QoS and/or fair queueing
720#
721CONFIG_NET_SCHED=y
722# CONFIG_NET_SCH_CLK_JIFFIES is not set
723# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
724CONFIG_NET_SCH_CLK_CPU=y
725CONFIG_NET_SCH_CBQ=m
726CONFIG_NET_SCH_HTB=m
727CONFIG_NET_SCH_HFSC=m
728CONFIG_NET_SCH_ATM=y
729CONFIG_NET_SCH_PRIO=m
730CONFIG_NET_SCH_RED=m
731CONFIG_NET_SCH_SFQ=m
732CONFIG_NET_SCH_TEQL=m
733CONFIG_NET_SCH_TBF=m
734CONFIG_NET_SCH_GRED=m
735CONFIG_NET_SCH_DSMARK=m
736CONFIG_NET_SCH_NETEM=m
737CONFIG_NET_SCH_INGRESS=m
738CONFIG_NET_QOS=y
739CONFIG_NET_ESTIMATOR=y
740CONFIG_NET_CLS=y
741CONFIG_NET_CLS_BASIC=m
742CONFIG_NET_CLS_TCINDEX=m
743CONFIG_NET_CLS_ROUTE4=m
744CONFIG_NET_CLS_ROUTE=y
745CONFIG_NET_CLS_FW=m
746CONFIG_NET_CLS_U32=m
747CONFIG_CLS_U32_PERF=y
748CONFIG_NET_CLS_IND=y
749CONFIG_CLS_U32_MARK=y
750CONFIG_NET_CLS_RSVP=m
751CONFIG_NET_CLS_RSVP6=m
752CONFIG_NET_EMATCH=y
753CONFIG_NET_EMATCH_STACK=32
754CONFIG_NET_EMATCH_CMP=m
755CONFIG_NET_EMATCH_NBYTE=m
756CONFIG_NET_EMATCH_U32=m
757CONFIG_NET_EMATCH_META=m
758CONFIG_NET_CLS_ACT=y
759CONFIG_NET_ACT_POLICE=m
760CONFIG_NET_ACT_GACT=m
761CONFIG_GACT_PROB=y
762CONFIG_NET_ACT_MIRRED=m
763CONFIG_NET_ACT_IPT=m
764CONFIG_NET_ACT_PEDIT=m
765
766#
767# Network testing
768#
769CONFIG_NET_PKTGEN=m
770CONFIG_NETPOLL=y
771# CONFIG_NETPOLL_RX is not set
772# CONFIG_NETPOLL_TRAP is not set
773CONFIG_NET_POLL_CONTROLLER=y
774CONFIG_HAMRADIO=y
775
776#
777# Packet Radio protocols
778#
779CONFIG_AX25=m
780CONFIG_AX25_DAMA_SLAVE=y
781CONFIG_NETROM=m
782CONFIG_ROSE=m
783
784#
785# AX.25 network device drivers
786#
787# CONFIG_BPQETHER is not set
788# CONFIG_BAYCOM_SER_FDX is not set
789# CONFIG_BAYCOM_SER_HDX is not set
790# CONFIG_BAYCOM_PAR is not set
791# CONFIG_YAM is not set
792CONFIG_IRDA=m
793
794#
795# IrDA protocols
796#
797CONFIG_IRLAN=m
798CONFIG_IRNET=m
799CONFIG_IRCOMM=m
800CONFIG_IRDA_ULTRA=y
801
802#
803# IrDA options
804#
805CONFIG_IRDA_CACHE_LAST_LSAP=y
806CONFIG_IRDA_FAST_RR=y
807# CONFIG_IRDA_DEBUG is not set
808
809#
810# Infrared-port device drivers
811#
812
813#
814# SIR device drivers
815#
816# CONFIG_IRTTY_SIR is not set
817
818#
819# Dongle support
820#
821
822#
823# Old SIR device drivers
824#
825
826#
827# Old Serial dongle support
828#
829
830#
831# FIR device drivers
832#
833# CONFIG_USB_IRDA is not set
834CONFIG_SIGMATEL_FIR=m
835# CONFIG_VLSI_FIR is not set
836CONFIG_BT=m
837CONFIG_BT_L2CAP=m
838CONFIG_BT_SCO=m
839CONFIG_BT_RFCOMM=m
840CONFIG_BT_RFCOMM_TTY=y
841CONFIG_BT_BNEP=m
842CONFIG_BT_BNEP_MC_FILTER=y
843CONFIG_BT_BNEP_PROTO_FILTER=y
844CONFIG_BT_CMTP=m
845CONFIG_BT_HIDP=m
846
847#
848# Bluetooth device drivers
849#
850CONFIG_BT_HCIUSB=m
851CONFIG_BT_HCIUSB_SCO=y
852CONFIG_BT_HCIUART=m
853CONFIG_BT_HCIUART_H4=y
854CONFIG_BT_HCIUART_BCSP=y
855CONFIG_BT_HCIUART_BCSP_TXCRC=y
856CONFIG_BT_HCIBCM203X=m
857CONFIG_BT_HCIBPA10X=m
858CONFIG_BT_HCIBFUSB=m
859CONFIG_BT_HCIVHCI=m
860CONFIG_NETDEVICES=y
861CONFIG_DUMMY=m
862CONFIG_BONDING=m
863CONFIG_EQUALIZER=m
864CONFIG_TUN=m
865# CONFIG_ETHERTAP is not set
866
867#
868# ARCnet devices
869#
870# CONFIG_ARCNET is not set
871
872#
873# Ethernet (10 or 100Mbit)
874#
875CONFIG_NET_ETHERNET=y
876CONFIG_MII=m
877CONFIG_SUNLANCE=y
878CONFIG_HAPPYMEAL=y
879CONFIG_SUNBMAC=m
880CONFIG_SUNQE=m
881CONFIG_SUNGEM=y
882CONFIG_NET_VENDOR_3COM=y
883CONFIG_VORTEX=m
884CONFIG_TYPHOON=m
885
886#
887# Tulip family network device support
888#
889CONFIG_NET_TULIP=y
890CONFIG_DE2104X=m
891CONFIG_TULIP=m
892# CONFIG_TULIP_MWI is not set
893# CONFIG_TULIP_MMIO is not set
894CONFIG_TULIP_NAPI=y
895CONFIG_TULIP_NAPI_HW_MITIGATION=y
896CONFIG_DE4X5=m
897CONFIG_WINBOND_840=m
898# CONFIG_DM9102 is not set
899# CONFIG_HP100 is not set
900CONFIG_NET_PCI=y
901CONFIG_PCNET32=m
902# CONFIG_AMD8111_ETH is not set
903CONFIG_ADAPTEC_STARFIRE=m
904CONFIG_ADAPTEC_STARFIRE_NAPI=y
905CONFIG_B44=m
906CONFIG_FORCEDETH=m
907CONFIG_DGRS=m
908CONFIG_EEPRO100=m
909CONFIG_E100=m
910CONFIG_FEALNX=m
911CONFIG_NATSEMI=m
912CONFIG_NE2K_PCI=m
913# CONFIG_8139CP is not set
914CONFIG_8139TOO=m
915# CONFIG_8139TOO_PIO is not set
916# CONFIG_8139TOO_TUNE_TWISTER is not set
917# CONFIG_8139TOO_8129 is not set
918# CONFIG_8139_OLD_RX_RESET is not set
919CONFIG_SIS900=m
920CONFIG_EPIC100=m
921CONFIG_SUNDANCE=m
922CONFIG_SUNDANCE_MMIO=y
923CONFIG_VIA_RHINE=m
924# CONFIG_VIA_RHINE_MMIO is not set
925
926#
927# Ethernet (1000 Mbit)
928#
929CONFIG_ACENIC=m
930# CONFIG_ACENIC_OMIT_TIGON_I is not set
931CONFIG_DL2K=m
932CONFIG_E1000=m
933CONFIG_E1000_NAPI=y
934CONFIG_MYRI_SBUS=m
935CONFIG_NS83820=m
936CONFIG_HAMACHI=m
937CONFIG_YELLOWFIN=m
938CONFIG_R8169=m
939CONFIG_R8169_NAPI=y
940CONFIG_R8169_VLAN=y
941CONFIG_SK98LIN=m
942CONFIG_VIA_VELOCITY=m
943CONFIG_TIGON3=m
944
945#
946# Ethernet (10000 Mbit)
947#
948CONFIG_IXGB=m
949CONFIG_IXGB_NAPI=y
950CONFIG_S2IO=m
951CONFIG_S2IO_NAPI=y
952CONFIG_2BUFF_MODE=y
953
954#
955# Token Ring devices
956#
957# CONFIG_TR is not set
958
959#
960# Wireless LAN (non-hamradio)
961#
962CONFIG_NET_RADIO=y
963
964#
965# Obsolete Wireless cards support (pre-802.11)
966#
967# CONFIG_STRIP is not set
968
969#
970# Wireless 802.11b ISA/PCI cards support
971#
972CONFIG_HERMES=m
973CONFIG_PLX_HERMES=m
974CONFIG_TMD_HERMES=m
975CONFIG_PCI_HERMES=m
976CONFIG_ATMEL=m
977CONFIG_PCI_ATMEL=m
978
979#
980# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
981#
982CONFIG_PRISM54=m
983CONFIG_NET_WIRELESS=y
984
985#
986# Wan interfaces
987#
988# CONFIG_WAN is not set
989
990#
991# ATM drivers
992#
993CONFIG_ATM_TCP=m
994# CONFIG_ATM_LANAI is not set
995# CONFIG_ATM_ENI is not set
996# CONFIG_ATM_FIRESTREAM is not set
997# CONFIG_ATM_ZATM is not set
998# CONFIG_ATM_IDT77252 is not set
999# CONFIG_ATM_AMBASSADOR is not set
1000# CONFIG_ATM_HORIZON is not set
1001CONFIG_ATM_FORE200E_MAYBE=m
1002CONFIG_ATM_FORE200E_PCA=y
1003CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
1004CONFIG_ATM_FORE200E_SBA=y
1005CONFIG_ATM_FORE200E_SBA_DEFAULT_FW=y
1006CONFIG_ATM_FORE200E_USE_TASKLET=y
1007CONFIG_ATM_FORE200E_TX_RETRY=16
1008CONFIG_ATM_FORE200E_DEBUG=0
1009CONFIG_ATM_FORE200E=m
1010CONFIG_ATM_HE=m
1011CONFIG_ATM_HE_USE_SUNI=y
1012CONFIG_FDDI=y
1013# CONFIG_DEFXX is not set
1014CONFIG_SKFP=m
1015CONFIG_HIPPI=y
1016# CONFIG_ROADRUNNER is not set
1017CONFIG_PLIP=m
1018CONFIG_PPP=m
1019CONFIG_PPP_MULTILINK=y
1020CONFIG_PPP_FILTER=y
1021CONFIG_PPP_ASYNC=m
1022CONFIG_PPP_SYNC_TTY=m
1023CONFIG_PPP_DEFLATE=m
1024CONFIG_PPP_BSDCOMP=m
1025CONFIG_PPPOE=m
1026CONFIG_PPPOATM=m
1027CONFIG_SLIP=m
1028CONFIG_SLIP_COMPRESSED=y
1029CONFIG_SLIP_SMART=y
1030# CONFIG_SLIP_MODE_SLIP6 is not set
1031CONFIG_NET_FC=y
1032CONFIG_SHAPER=m
1033CONFIG_NETCONSOLE=m
1034
1035#
1036# ISDN subsystem
1037#
1038CONFIG_ISDN=m
1039
1040#
1041# Old ISDN4Linux
1042#
1043# CONFIG_ISDN_I4L is not set
1044
1045#
1046# CAPI subsystem
1047#
1048CONFIG_ISDN_CAPI=m
1049# CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON is not set
1050# CONFIG_ISDN_CAPI_MIDDLEWARE is not set
1051CONFIG_ISDN_CAPI_CAPI20=m
1052
1053#
1054# CAPI hardware drivers
1055#
1056
1057#
1058# Active AVM cards
1059#
1060CONFIG_CAPI_AVM=y
1061CONFIG_ISDN_DRV_AVMB1_B1PCI=m
1062CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
1063CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
1064CONFIG_ISDN_DRV_AVMB1_T1PCI=m
1065CONFIG_ISDN_DRV_AVMB1_C4=m
1066
1067#
1068# Active Eicon DIVA Server cards
1069#
1070CONFIG_CAPI_EICON=y
1071CONFIG_ISDN_DIVAS=m
1072CONFIG_ISDN_DIVAS_BRIPCI=y
1073CONFIG_ISDN_DIVAS_PRIPCI=y
1074CONFIG_ISDN_DIVAS_DIVACAPI=m
1075CONFIG_ISDN_DIVAS_USERIDI=m
1076CONFIG_ISDN_DIVAS_MAINT=m
1077
1078#
1079# Telephony Support
1080#
1081CONFIG_PHONE=m
1082CONFIG_PHONE_IXJ=m
1083
1084#
1085# Unix98 PTY support
1086#
1087CONFIG_UNIX98_PTYS=y
1088CONFIG_UNIX98_PTY_COUNT=256
1089
1090#
1091# XFree86 DRI support
1092#
1093CONFIG_DRM=y
1094CONFIG_DRM_TDFX=m
1095# CONFIG_DRM_R128 is not set
1096
1097#
1098# Input device support
1099#
1100CONFIG_INPUT=y
1101
1102#
1103# Userland interfaces
1104#
1105CONFIG_INPUT_MOUSEDEV=y
1106CONFIG_INPUT_MOUSEDEV_PSAUX=y
1107CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
1108CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
1109# CONFIG_INPUT_JOYDEV is not set
1110# CONFIG_INPUT_TSDEV is not set
1111CONFIG_INPUT_EVDEV=y
1112# CONFIG_INPUT_EVBUG is not set
1113
1114#
1115# Input I/O drivers
1116#
1117CONFIG_GAMEPORT=m
1118CONFIG_SOUND_GAMEPORT=m
1119# CONFIG_GAMEPORT_NS558 is not set
1120# CONFIG_GAMEPORT_L4 is not set
1121# CONFIG_GAMEPORT_EMU10K1 is not set
1122# CONFIG_GAMEPORT_VORTEX is not set
1123# CONFIG_GAMEPORT_FM801 is not set
1124CONFIG_GAMEPORT_CS461X=m
1125CONFIG_SERIO=y
1126CONFIG_SERIO_I8042=y
1127# CONFIG_SERIO_SERPORT is not set
1128# CONFIG_SERIO_CT82C710 is not set
1129# CONFIG_SERIO_PARKBD is not set
1130CONFIG_SERIO_PCIPS2=m
1131CONFIG_SERIO_LIBPS2=y
1132CONFIG_SERIO_RAW=m
1133
1134#
1135# Input Device Drivers
1136#
1137CONFIG_INPUT_KEYBOARD=y
1138CONFIG_KEYBOARD_ATKBD=y
1139CONFIG_KEYBOARD_SUNKBD=y
1140CONFIG_KEYBOARD_LKKBD=m
1141# CONFIG_KEYBOARD_XTKBD is not set
1142# CONFIG_KEYBOARD_NEWTON is not set
1143CONFIG_INPUT_MOUSE=y
1144CONFIG_MOUSE_PS2=y
1145CONFIG_MOUSE_SERIAL=y
1146CONFIG_MOUSE_VSXXXAA=m
1147# CONFIG_INPUT_JOYSTICK is not set
1148# CONFIG_INPUT_TOUCHSCREEN is not set
1149CONFIG_INPUT_MISC=y
1150CONFIG_INPUT_SPARCSPKR=y
1151# CONFIG_INPUT_UINPUT is not set
1152
1153#
1154# I2C support
1155#
1156CONFIG_I2C=y
1157CONFIG_I2C_CHARDEV=m
1158
1159#
1160# I2C Algorithms
1161#
1162CONFIG_I2C_ALGOBIT=y
1163CONFIG_I2C_ALGOPCF=m
1164CONFIG_I2C_ALGOPCA=m
1165
1166#
1167# I2C Hardware Bus support
1168#
1169CONFIG_I2C_ALI1535=m
1170CONFIG_I2C_ALI1563=m
1171CONFIG_I2C_ALI15X3=m
1172CONFIG_I2C_AMD756=m
1173# CONFIG_I2C_AMD756_S4882 is not set
1174CONFIG_I2C_AMD8111=m
1175CONFIG_I2C_I801=m
1176CONFIG_I2C_I810=m
1177CONFIG_I2C_ISA=m
1178CONFIG_I2C_NFORCE2=m
1179CONFIG_I2C_PARPORT=m
1180CONFIG_I2C_PARPORT_LIGHT=m
1181CONFIG_I2C_PIIX4=m
1182CONFIG_I2C_PROSAVAGE=m
1183CONFIG_I2C_SAVAGE4=m
1184CONFIG_SCx200_ACB=m
1185CONFIG_I2C_SIS5595=m
1186CONFIG_I2C_SIS630=m
1187CONFIG_I2C_SIS96X=m
1188CONFIG_I2C_STUB=m
1189CONFIG_I2C_VIA=m
1190CONFIG_I2C_VIAPRO=m
1191CONFIG_I2C_VOODOO3=m
1192CONFIG_I2C_PCA_ISA=m
1193
1194#
1195# Hardware Sensors Chip support
1196#
1197CONFIG_I2C_SENSOR=m
1198CONFIG_SENSORS_ADM1021=m
1199CONFIG_SENSORS_ADM1025=m
1200CONFIG_SENSORS_ADM1026=m
1201CONFIG_SENSORS_ADM1031=m
1202CONFIG_SENSORS_ASB100=m
1203CONFIG_SENSORS_DS1621=m
1204CONFIG_SENSORS_FSCHER=m
1205CONFIG_SENSORS_FSCPOS=m
1206CONFIG_SENSORS_GL518SM=m
1207CONFIG_SENSORS_GL520SM=m
1208CONFIG_SENSORS_IT87=m
1209CONFIG_SENSORS_LM63=m
1210CONFIG_SENSORS_LM75=m
1211CONFIG_SENSORS_LM77=m
1212CONFIG_SENSORS_LM78=m
1213CONFIG_SENSORS_LM80=m
1214CONFIG_SENSORS_LM83=m
1215CONFIG_SENSORS_LM85=m
1216CONFIG_SENSORS_LM87=m
1217CONFIG_SENSORS_LM90=m
1218CONFIG_SENSORS_MAX1619=m
1219CONFIG_SENSORS_PC87360=m
1220CONFIG_SENSORS_SMSC47B397=m
1221CONFIG_SENSORS_SIS5595=m
1222CONFIG_SENSORS_SMSC47M1=m
1223CONFIG_SENSORS_VIA686A=m
1224CONFIG_SENSORS_W83781D=m
1225CONFIG_SENSORS_W83L785TS=m
1226CONFIG_SENSORS_W83627HF=m
1227
1228#
1229# Other I2C Chip support
1230#
1231CONFIG_SENSORS_EEPROM=m
1232CONFIG_SENSORS_PCF8574=m
1233CONFIG_SENSORS_PCF8591=m
1234CONFIG_SENSORS_RTC8564=m
1235# CONFIG_I2C_DEBUG_CORE is not set
1236# CONFIG_I2C_DEBUG_ALGO is not set
1237# CONFIG_I2C_DEBUG_BUS is not set
1238# CONFIG_I2C_DEBUG_CHIP is not set
1239
1240#
1241# File systems
1242#
1243CONFIG_EXT2_FS=y
1244CONFIG_EXT2_FS_XATTR=y
1245CONFIG_EXT2_FS_POSIX_ACL=y
1246CONFIG_EXT2_FS_SECURITY=y
1247CONFIG_EXT3_FS=y
1248CONFIG_EXT3_FS_XATTR=y
1249CONFIG_EXT3_FS_POSIX_ACL=y
1250CONFIG_EXT3_FS_SECURITY=y
1251CONFIG_JBD=y
1252# CONFIG_JBD_DEBUG is not set
1253CONFIG_FS_MBCACHE=y
1254# CONFIG_REISERFS_FS is not set
1255CONFIG_JFS_FS=m
1256CONFIG_JFS_POSIX_ACL=y
1257CONFIG_JFS_SECURITY=y
1258# CONFIG_JFS_DEBUG is not set
1259# CONFIG_JFS_STATISTICS is not set
1260CONFIG_FS_POSIX_ACL=y
1261
1262#
1263# XFS support
1264#
1265CONFIG_XFS_FS=m
1266CONFIG_XFS_EXPORT=y
1267# CONFIG_XFS_RT is not set
1268CONFIG_XFS_QUOTA=y
1269CONFIG_XFS_SECURITY=y
1270CONFIG_XFS_POSIX_ACL=y
1271CONFIG_MINIX_FS=m
1272CONFIG_ROMFS_FS=m
1273# CONFIG_QUOTA is not set
1274CONFIG_QUOTACTL=y
1275CONFIG_DNOTIFY=y
1276CONFIG_AUTOFS_FS=m
1277CONFIG_AUTOFS4_FS=m
1278
1279#
1280# CD-ROM/DVD Filesystems
1281#
1282CONFIG_ISO9660_FS=m
1283CONFIG_JOLIET=y
1284# CONFIG_ZISOFS is not set
1285CONFIG_UDF_FS=m
1286CONFIG_UDF_NLS=y
1287
1288#
1289# DOS/FAT/NT Filesystems
1290#
1291CONFIG_FAT_FS=m
1292CONFIG_MSDOS_FS=m
1293CONFIG_VFAT_FS=m
1294CONFIG_FAT_DEFAULT_CODEPAGE=437
1295CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1296# CONFIG_NTFS_FS is not set
1297
1298#
1299# Pseudo filesystems
1300#
1301CONFIG_PROC_FS=y
1302CONFIG_PROC_KCORE=y
1303CONFIG_SYSFS=y
1304CONFIG_DEVFS_FS=y
1305CONFIG_DEVFS_MOUNT=y
1306# CONFIG_DEVFS_DEBUG is not set
1307CONFIG_DEVPTS_FS_XATTR=y
1308# CONFIG_DEVPTS_FS_SECURITY is not set
1309CONFIG_TMPFS=y
1310CONFIG_TMPFS_XATTR=y
1311CONFIG_TMPFS_SECURITY=y
1312CONFIG_HUGETLBFS=y
1313CONFIG_HUGETLB_PAGE=y
1314CONFIG_RAMFS=y
1315
1316#
1317# Miscellaneous filesystems
1318#
1319CONFIG_ADFS_FS=m
1320# CONFIG_ADFS_FS_RW is not set
1321CONFIG_AFFS_FS=m
1322CONFIG_HFS_FS=m
1323CONFIG_HFSPLUS_FS=m
1324CONFIG_BEFS_FS=m
1325# CONFIG_BEFS_DEBUG is not set
1326CONFIG_BFS_FS=m
1327CONFIG_EFS_FS=m
1328CONFIG_CRAMFS=m
1329CONFIG_VXFS_FS=m
1330CONFIG_HPFS_FS=m
1331CONFIG_QNX4FS_FS=m
1332CONFIG_SYSV_FS=m
1333CONFIG_UFS_FS=m
1334CONFIG_UFS_FS_WRITE=y
1335
1336#
1337# Network File Systems
1338#
1339CONFIG_NFS_FS=m
1340CONFIG_NFS_V3=y
1341CONFIG_NFS_V4=y
1342CONFIG_NFS_DIRECTIO=y
1343CONFIG_NFSD=m
1344CONFIG_NFSD_V3=y
1345CONFIG_NFSD_V4=y
1346CONFIG_NFSD_TCP=y
1347CONFIG_LOCKD=m
1348CONFIG_LOCKD_V4=y
1349CONFIG_EXPORTFS=m
1350CONFIG_SUNRPC=m
1351CONFIG_SUNRPC_GSS=m
1352CONFIG_RPCSEC_GSS_KRB5=m
1353CONFIG_RPCSEC_GSS_SPKM3=m
1354CONFIG_SMB_FS=m
1355# CONFIG_SMB_NLS_DEFAULT is not set
1356CONFIG_CIFS=m
1357# CONFIG_CIFS_STATS is not set
1358# CONFIG_CIFS_XATTR is not set
1359# CONFIG_CIFS_EXPERIMENTAL is not set
1360CONFIG_NCP_FS=m
1361# CONFIG_NCPFS_PACKET_SIGNING is not set
1362# CONFIG_NCPFS_IOCTL_LOCKING is not set
1363# CONFIG_NCPFS_STRONG is not set
1364# CONFIG_NCPFS_NFS_NS is not set
1365# CONFIG_NCPFS_OS2_NS is not set
1366# CONFIG_NCPFS_SMALLDOS is not set
1367# CONFIG_NCPFS_NLS is not set
1368# CONFIG_NCPFS_EXTRAS is not set
1369CONFIG_CODA_FS=m
1370# CONFIG_CODA_FS_OLD_API is not set
1371CONFIG_AFS_FS=m
1372CONFIG_RXRPC=m
1373
1374#
1375# Partition Types
1376#
1377# CONFIG_PARTITION_ADVANCED is not set
1378CONFIG_MSDOS_PARTITION=y
1379CONFIG_SUN_PARTITION=y
1380
1381#
1382# Native Language Support
1383#
1384CONFIG_NLS=y
1385CONFIG_NLS_DEFAULT="iso8859-1"
1386CONFIG_NLS_CODEPAGE_437=m
1387CONFIG_NLS_CODEPAGE_737=m
1388CONFIG_NLS_CODEPAGE_775=m
1389CONFIG_NLS_CODEPAGE_850=m
1390CONFIG_NLS_CODEPAGE_852=m
1391CONFIG_NLS_CODEPAGE_855=m
1392CONFIG_NLS_CODEPAGE_857=m
1393CONFIG_NLS_CODEPAGE_860=m
1394CONFIG_NLS_CODEPAGE_861=m
1395CONFIG_NLS_CODEPAGE_862=m
1396CONFIG_NLS_CODEPAGE_863=m
1397CONFIG_NLS_CODEPAGE_864=m
1398CONFIG_NLS_CODEPAGE_865=m
1399CONFIG_NLS_CODEPAGE_866=m
1400CONFIG_NLS_CODEPAGE_869=m
1401CONFIG_NLS_CODEPAGE_936=m
1402CONFIG_NLS_CODEPAGE_950=m
1403CONFIG_NLS_CODEPAGE_932=m
1404CONFIG_NLS_CODEPAGE_949=m
1405CONFIG_NLS_CODEPAGE_874=m
1406CONFIG_NLS_ISO8859_8=m
1407CONFIG_NLS_CODEPAGE_1250=m
1408CONFIG_NLS_CODEPAGE_1251=m
1409CONFIG_NLS_ASCII=m
1410CONFIG_NLS_ISO8859_1=m
1411CONFIG_NLS_ISO8859_2=m
1412CONFIG_NLS_ISO8859_3=m
1413CONFIG_NLS_ISO8859_4=m
1414CONFIG_NLS_ISO8859_5=m
1415CONFIG_NLS_ISO8859_6=m
1416CONFIG_NLS_ISO8859_7=m
1417CONFIG_NLS_ISO8859_9=m
1418CONFIG_NLS_ISO8859_13=m
1419CONFIG_NLS_ISO8859_14=m
1420CONFIG_NLS_ISO8859_15=m
1421CONFIG_NLS_KOI8_R=m
1422CONFIG_NLS_KOI8_U=m
1423CONFIG_NLS_UTF8=m
1424
1425#
1426# Multimedia devices
1427#
1428CONFIG_VIDEO_DEV=y
1429
1430#
1431# Video For Linux
1432#
1433
1434#
1435# Video Adapters
1436#
1437CONFIG_VIDEO_BT848=m
1438CONFIG_VIDEO_BWQCAM=m
1439CONFIG_VIDEO_CQCAM=m
1440CONFIG_VIDEO_W9966=m
1441CONFIG_VIDEO_CPIA=m
1442CONFIG_VIDEO_CPIA_PP=m
1443CONFIG_VIDEO_CPIA_USB=m
1444CONFIG_VIDEO_SAA5246A=m
1445CONFIG_VIDEO_SAA5249=m
1446CONFIG_TUNER_3036=m
1447# CONFIG_VIDEO_STRADIS is not set
1448# CONFIG_VIDEO_ZORAN is not set
1449# CONFIG_VIDEO_SAA7134 is not set
1450CONFIG_VIDEO_MXB=m
1451CONFIG_VIDEO_DPC=m
1452CONFIG_VIDEO_HEXIUM_ORION=m
1453CONFIG_VIDEO_HEXIUM_GEMINI=m
1454CONFIG_VIDEO_CX88=m
1455CONFIG_VIDEO_OVCAMCHIP=m
1456
1457#
1458# Radio Adapters
1459#
1460CONFIG_RADIO_GEMTEK_PCI=m
1461CONFIG_RADIO_MAXIRADIO=m
1462CONFIG_RADIO_MAESTRO=m
1463
1464#
1465# Digital Video Broadcasting Devices
1466#
1467CONFIG_DVB=y
1468CONFIG_DVB_CORE=m
1469
1470#
1471# Supported SAA7146 based PCI Adapters
1472#
1473CONFIG_DVB_AV7110=m
1474# CONFIG_DVB_AV7110_OSD is not set
1475CONFIG_DVB_BUDGET=m
1476CONFIG_DVB_BUDGET_CI=m
1477CONFIG_DVB_BUDGET_AV=m
1478CONFIG_DVB_BUDGET_PATCH=m
1479
1480#
1481# Supported USB Adapters
1482#
1483# CONFIG_DVB_TTUSB_BUDGET is not set
1484CONFIG_DVB_TTUSB_DEC=m
1485CONFIG_DVB_DIBUSB=m
1486# CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES is not set
1487CONFIG_DVB_DIBCOM_DEBUG=y
1488CONFIG_DVB_CINERGYT2=m
1489# CONFIG_DVB_CINERGYT2_TUNING is not set
1490
1491#
1492# Supported FlexCopII (B2C2) Adapters
1493#
1494CONFIG_DVB_B2C2_SKYSTAR=m
1495CONFIG_DVB_B2C2_USB=m
1496
1497#
1498# Supported BT878 Adapters
1499#
1500CONFIG_DVB_BT8XX=m
1501
1502#
1503# Supported DVB Frontends
1504#
1505
1506#
1507# Customise DVB Frontends
1508#
1509
1510#
1511# DVB-S (satellite) frontends
1512#
1513CONFIG_DVB_STV0299=m
1514CONFIG_DVB_CX24110=m
1515CONFIG_DVB_TDA8083=m
1516CONFIG_DVB_TDA80XX=m
1517CONFIG_DVB_MT312=m
1518CONFIG_DVB_VES1X93=m
1519
1520#
1521# DVB-T (terrestrial) frontends
1522#
1523CONFIG_DVB_SP8870=m
1524CONFIG_DVB_SP887X=m
1525CONFIG_DVB_CX22700=m
1526CONFIG_DVB_CX22702=m
1527CONFIG_DVB_L64781=m
1528CONFIG_DVB_TDA1004X=m
1529CONFIG_DVB_NXT6000=m
1530CONFIG_DVB_MT352=m
1531CONFIG_DVB_DIB3000MB=m
1532CONFIG_DVB_DIB3000MC=m
1533
1534#
1535# DVB-C (cable) frontends
1536#
1537CONFIG_DVB_ATMEL_AT76C651=m
1538CONFIG_DVB_VES1820=m
1539CONFIG_DVB_TDA10021=m
1540CONFIG_DVB_STV0297=m
1541
1542#
1543# ATSC (North American/Korean Terresterial DTV) frontends
1544#
1545CONFIG_DVB_NXT2002=m
1546CONFIG_VIDEO_SAA7146=m
1547CONFIG_VIDEO_SAA7146_VV=m
1548CONFIG_VIDEO_VIDEOBUF=m
1549CONFIG_VIDEO_TUNER=m
1550CONFIG_VIDEO_BUF=m
1551CONFIG_VIDEO_BTCX=m
1552CONFIG_VIDEO_IR=m
1553CONFIG_VIDEO_TVEEPROM=m
1554
1555#
1556# Sound
1557#
1558CONFIG_SOUND=m
1559
1560#
1561# Advanced Linux Sound Architecture
1562#
1563CONFIG_SND=m
1564CONFIG_SND_TIMER=m
1565CONFIG_SND_PCM=m
1566CONFIG_SND_HWDEP=m
1567CONFIG_SND_RAWMIDI=m
1568CONFIG_SND_SEQUENCER=m
1569CONFIG_SND_SEQ_DUMMY=m
1570CONFIG_SND_OSSEMUL=y
1571CONFIG_SND_MIXER_OSS=m
1572CONFIG_SND_PCM_OSS=m
1573CONFIG_SND_SEQUENCER_OSS=y
1574CONFIG_SND_BIT32_EMUL=m
1575# CONFIG_SND_RTCTIMER is not set
1576# CONFIG_SND_VERBOSE_PRINTK is not set
1577# CONFIG_SND_DEBUG is not set
1578
1579#
1580# Generic devices
1581#
1582CONFIG_SND_MPU401_UART=m
1583CONFIG_SND_OPL3_LIB=m
1584CONFIG_SND_VX_LIB=m
1585CONFIG_SND_DUMMY=m
1586CONFIG_SND_VIRMIDI=m
1587# CONFIG_SND_MTPAV is not set
1588# CONFIG_SND_SERIAL_U16550 is not set
1589# CONFIG_SND_MPU401 is not set
1590
1591#
1592# PCI devices
1593#
1594CONFIG_SND_AC97_CODEC=m
1595CONFIG_SND_ALI5451=m
1596CONFIG_SND_ATIIXP=m
1597CONFIG_SND_ATIIXP_MODEM=m
1598CONFIG_SND_AU8810=m
1599CONFIG_SND_AU8820=m
1600CONFIG_SND_AU8830=m
1601CONFIG_SND_AZT3328=m
1602CONFIG_SND_BT87X=m
1603# CONFIG_SND_BT87X_OVERCLOCK is not set
1604CONFIG_SND_CS46XX=m
1605# CONFIG_SND_CS46XX_NEW_DSP is not set
1606CONFIG_SND_CS4281=m
1607CONFIG_SND_EMU10K1=m
1608CONFIG_SND_EMU10K1X=m
1609CONFIG_SND_CA0106=m
1610CONFIG_SND_KORG1212=m
1611CONFIG_SND_MIXART=m
1612CONFIG_SND_NM256=m
1613# CONFIG_SND_RME32 is not set
1614# CONFIG_SND_RME96 is not set
1615# CONFIG_SND_RME9652 is not set
1616# CONFIG_SND_HDSP is not set
1617CONFIG_SND_TRIDENT=m
1618CONFIG_SND_YMFPCI=m
1619CONFIG_SND_ALS4000=m
1620CONFIG_SND_CMIPCI=m
1621CONFIG_SND_ENS1370=m
1622CONFIG_SND_ENS1371=m
1623CONFIG_SND_ES1938=m
1624CONFIG_SND_ES1968=m
1625CONFIG_SND_MAESTRO3=m
1626CONFIG_SND_FM801=m
1627CONFIG_SND_FM801_TEA575X=m
1628CONFIG_SND_ICE1712=m
1629# CONFIG_SND_ICE1724 is not set
1630CONFIG_SND_INTEL8X0=m
1631CONFIG_SND_INTEL8X0M=m
1632CONFIG_SND_SONICVIBES=m
1633# CONFIG_SND_VIA82XX is not set
1634CONFIG_SND_VIA82XX_MODEM=m
1635CONFIG_SND_VX222=m
1636
1637#
1638# USB devices
1639#
1640# CONFIG_SND_USB_AUDIO is not set
1641
1642#
1643# ALSA Sparc devices
1644#
1645CONFIG_SND_SUN_AMD7930=m
1646CONFIG_SND_SUN_CS4231=m
1647
1648#
1649# USB support
1650#
1651CONFIG_USB=y
1652# CONFIG_USB_DEBUG is not set
1653
1654#
1655# Miscellaneous USB options
1656#
1657CONFIG_USB_DEVICEFS=y
1658# CONFIG_USB_BANDWIDTH is not set
1659# CONFIG_USB_DYNAMIC_MINORS is not set
1660# CONFIG_USB_OTG is not set
1661CONFIG_USB_ARCH_HAS_HCD=y
1662CONFIG_USB_ARCH_HAS_OHCI=y
1663
1664#
1665# USB Host Controller Drivers
1666#
1667CONFIG_USB_EHCI_HCD=m
1668# CONFIG_USB_EHCI_SPLIT_ISO is not set
1669# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
1670CONFIG_USB_OHCI_HCD=y
1671CONFIG_USB_UHCI_HCD=m
1672CONFIG_USB_SL811_HCD=m
1673
1674#
1675# USB Device Class drivers
1676#
1677# CONFIG_USB_AUDIO is not set
1678
1679#
1680# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
1681#
1682# CONFIG_USB_MIDI is not set
1683CONFIG_USB_ACM=m
1684CONFIG_USB_PRINTER=m
1685
1686#
1687# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
1688#
1689CONFIG_USB_STORAGE=m
1690# CONFIG_USB_STORAGE_DEBUG is not set
1691CONFIG_USB_STORAGE_RW_DETECT=y
1692# CONFIG_USB_STORAGE_DATAFAB is not set
1693CONFIG_USB_STORAGE_FREECOM=y
1694CONFIG_USB_STORAGE_ISD200=y
1695CONFIG_USB_STORAGE_DPCM=y
1696CONFIG_USB_STORAGE_HP8200e=y
1697CONFIG_USB_STORAGE_SDDR09=y
1698CONFIG_USB_STORAGE_SDDR55=y
1699# CONFIG_USB_STORAGE_JUMPSHOT is not set
1700
1701#
1702# USB Input Devices
1703#
1704CONFIG_USB_HID=y
1705CONFIG_USB_HIDINPUT=y
1706# CONFIG_HID_FF is not set
1707CONFIG_USB_HIDDEV=y
1708# CONFIG_USB_AIPTEK is not set
1709CONFIG_USB_WACOM=m
1710CONFIG_USB_KBTAB=m
1711# CONFIG_USB_POWERMATE is not set
1712CONFIG_USB_MTOUCH=m
1713CONFIG_USB_EGALAX=m
1714# CONFIG_USB_XPAD is not set
1715CONFIG_USB_ATI_REMOTE=m
1716
1717#
1718# USB Imaging devices
1719#
1720CONFIG_USB_MDC800=m
1721CONFIG_USB_MICROTEK=m
1722
1723#
1724# USB Multimedia devices
1725#
1726# CONFIG_USB_DABUSB is not set
1727# CONFIG_USB_VICAM is not set
1728# CONFIG_USB_DSBR is not set
1729# CONFIG_USB_IBMCAM is not set
1730# CONFIG_USB_KONICAWC is not set
1731# CONFIG_USB_OV511 is not set
1732# CONFIG_USB_SE401 is not set
1733CONFIG_USB_SN9C102=m
1734# CONFIG_USB_STV680 is not set
1735CONFIG_USB_W9968CF=m
1736
1737#
1738# USB Network Adapters
1739#
1740CONFIG_USB_CATC=m
1741CONFIG_USB_KAWETH=m
1742CONFIG_USB_PEGASUS=m
1743CONFIG_USB_RTL8150=m
1744CONFIG_USB_USBNET=m
1745
1746#
1747# USB Host-to-Host Cables
1748#
1749CONFIG_USB_ALI_M5632=y
1750CONFIG_USB_AN2720=y
1751CONFIG_USB_BELKIN=y
1752CONFIG_USB_GENESYS=y
1753CONFIG_USB_NET1080=y
1754CONFIG_USB_PL2301=y
1755CONFIG_USB_KC2190=y
1756
1757#
1758# Intelligent USB Devices/Gadgets
1759#
1760CONFIG_USB_ARMLINUX=y
1761CONFIG_USB_EPSON2888=y
1762CONFIG_USB_ZAURUS=y
1763CONFIG_USB_CDCETHER=y
1764
1765#
1766# USB Network Adapters
1767#
1768CONFIG_USB_AX8817X=y
1769
1770#
1771# USB port drivers
1772#
1773CONFIG_USB_USS720=m
1774
1775#
1776# USB Serial Converter support
1777#
1778CONFIG_USB_SERIAL=m
1779CONFIG_USB_SERIAL_GENERIC=y
1780CONFIG_USB_SERIAL_BELKIN=m
1781CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
1782CONFIG_USB_SERIAL_CYPRESS_M8=m
1783CONFIG_USB_SERIAL_EMPEG=m
1784CONFIG_USB_SERIAL_FTDI_SIO=m
1785# CONFIG_USB_SERIAL_VISOR is not set
1786CONFIG_USB_SERIAL_IPAQ=m
1787# CONFIG_USB_SERIAL_IR is not set
1788CONFIG_USB_SERIAL_EDGEPORT=m
1789# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
1790CONFIG_USB_SERIAL_GARMIN=m
1791CONFIG_USB_SERIAL_IPW=m
1792CONFIG_USB_SERIAL_KEYSPAN_PDA=m
1793CONFIG_USB_SERIAL_KEYSPAN=m
1794# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set
1795# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
1796# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
1797# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
1798# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
1799# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
1800# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
1801# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
1802# CONFIG_USB_SERIAL_KEYSPAN_USA19QW is not set
1803# CONFIG_USB_SERIAL_KEYSPAN_USA19QI is not set
1804# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
1805# CONFIG_USB_SERIAL_KEYSPAN_USA49WLC is not set
1806CONFIG_USB_SERIAL_KLSI=m
1807# CONFIG_USB_SERIAL_KOBIL_SCT is not set
1808CONFIG_USB_SERIAL_MCT_U232=m
1809CONFIG_USB_SERIAL_PL2303=m
1810# CONFIG_USB_SERIAL_SAFE is not set
1811CONFIG_USB_SERIAL_TI=m
1812CONFIG_USB_SERIAL_CYBERJACK=m
1813CONFIG_USB_SERIAL_XIRCOM=m
1814CONFIG_USB_SERIAL_OMNINET=m
1815CONFIG_USB_EZUSB=y
1816
1817#
1818# USB Miscellaneous drivers
1819#
1820CONFIG_USB_EMI62=m
1821CONFIG_USB_EMI26=m
1822CONFIG_USB_AUERSWALD=m
1823CONFIG_USB_RIO500=m
1824CONFIG_USB_LEGOTOWER=m
1825CONFIG_USB_LCD=m
1826CONFIG_USB_LED=m
1827CONFIG_USB_CYTHERM=m
1828CONFIG_USB_PHIDGETKIT=m
1829CONFIG_USB_PHIDGETSERVO=m
1830CONFIG_USB_IDMOUSE=m
1831CONFIG_USB_TEST=m
1832
1833#
1834# USB ATM/DSL drivers
1835#
1836CONFIG_USB_ATM=m
1837CONFIG_USB_SPEEDTOUCH=m
1838
1839#
1840# USB Gadget Support
1841#
1842# CONFIG_USB_GADGET is not set
1843
1844#
1845# InfiniBand support
1846#
1847CONFIG_INFINIBAND=m
1848CONFIG_INFINIBAND_MTHCA=m
1849# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
1850CONFIG_INFINIBAND_IPOIB=m
1851# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
1852
1853#
1854# Watchdog Cards
1855#
1856CONFIG_WATCHDOG=y
1857# CONFIG_WATCHDOG_NOWAYOUT is not set
1858
1859#
1860# Watchdog Device Drivers
1861#
1862CONFIG_SOFT_WATCHDOG=m
1863CONFIG_WATCHDOG_CP1XXX=m
1864CONFIG_WATCHDOG_RIO=m
1865
1866#
1867# PCI-based Watchdog Cards
1868#
1869CONFIG_PCIPCWATCHDOG=m
1870CONFIG_WDTPCI=m
1871CONFIG_WDT_501_PCI=y
1872
1873#
1874# USB-based Watchdog Cards
1875#
1876CONFIG_USBPCWATCHDOG=m
1877
1878#
1879# Profiling support
1880#
1881CONFIG_PROFILING=y
1882CONFIG_OPROFILE=m
1883
1884#
1885# Kernel hacking
1886#
1887CONFIG_DEBUG_KERNEL=y
1888CONFIG_MAGIC_SYSRQ=y
1889CONFIG_SCHEDSTATS=y
1890# CONFIG_DEBUG_SLAB is not set
1891# CONFIG_DEBUG_SPINLOCK is not set
1892# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1893# CONFIG_DEBUG_KOBJECT is not set
1894CONFIG_DEBUG_BUGVERBOSE=y
1895# CONFIG_DEBUG_INFO is not set
1896CONFIG_DEBUG_FS=y
1897# CONFIG_DEBUG_STACK_USAGE is not set
1898CONFIG_KPROBES=y
1899# CONFIG_DEBUG_DCFLUSH is not set
1900# CONFIG_STACK_DEBUG is not set
1901# CONFIG_DEBUG_BOOTMEM is not set
1902CONFIG_HAVE_DEC_LOCK=y
1903
1904#
1905# Security options
1906#
1907CONFIG_KEYS=y
1908# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
1909# CONFIG_SECURITY is not set
1910
1911#
1912# Cryptographic options
1913#
1914CONFIG_CRYPTO=y
1915CONFIG_CRYPTO_HMAC=y
1916CONFIG_CRYPTO_NULL=m
1917CONFIG_CRYPTO_MD4=y
1918CONFIG_CRYPTO_MD5=y
1919CONFIG_CRYPTO_SHA1=y
1920CONFIG_CRYPTO_SHA256=m
1921CONFIG_CRYPTO_SHA512=m
1922CONFIG_CRYPTO_WP512=m
1923CONFIG_CRYPTO_TGR192=m
1924CONFIG_CRYPTO_DES=y
1925CONFIG_CRYPTO_BLOWFISH=m
1926CONFIG_CRYPTO_TWOFISH=m
1927CONFIG_CRYPTO_SERPENT=m
1928CONFIG_CRYPTO_AES=m
1929CONFIG_CRYPTO_CAST5=m
1930CONFIG_CRYPTO_CAST6=m
1931CONFIG_CRYPTO_TEA=m
1932CONFIG_CRYPTO_ARC4=m
1933CONFIG_CRYPTO_KHAZAD=m
1934CONFIG_CRYPTO_ANUBIS=m
1935CONFIG_CRYPTO_DEFLATE=y
1936CONFIG_CRYPTO_MICHAEL_MIC=m
1937CONFIG_CRYPTO_CRC32C=m
1938CONFIG_CRYPTO_TEST=m
1939
1940#
1941# Hardware crypto devices
1942#
1943
1944#
1945# Library routines
1946#
1947CONFIG_CRC_CCITT=m
1948CONFIG_CRC32=y
1949CONFIG_LIBCRC32C=m
1950CONFIG_ZLIB_INFLATE=y
1951CONFIG_ZLIB_DEFLATE=y
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
new file mode 100644
index 000000000000..093281bdf85f
--- /dev/null
+++ b/arch/sparc64/kernel/Makefile
@@ -0,0 +1,44 @@
1# $Id: Makefile,v 1.70 2002/02/09 19:49:30 davem Exp $
2# Makefile for the linux kernel.
3#
4
5EXTRA_AFLAGS := -ansi
6EXTRA_CFLAGS := -Werror
7
8extra-y := head.o init_task.o vmlinux.lds
9
10obj-y := process.o setup.o cpu.o idprom.o \
11 traps.o devices.o auxio.o \
12 irq.o ptrace.o time.o sys_sparc.o signal.o \
13 unaligned.o central.o pci.o starfire.o semaphore.o \
14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o
15
16obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
17 pci_psycho.o pci_sabre.o pci_schizo.o
18obj-$(CONFIG_SMP) += smp.o trampoline.o
19obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o ioctl32.o
20obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
21obj-$(CONFIG_BINFMT_AOUT32) += binfmt_aout32.o
22obj-$(CONFIG_MODULES) += module.o
23obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o
24obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
25obj-$(CONFIG_KPROBES) += kprobes.o
26
27ifdef CONFIG_SUNOS_EMUL
28 obj-y += sys_sunos32.o sunos_ioctl32.o
29else
30 ifdef CONFIG_SOLARIS_EMUL
31 obj-y += sys_sunos32.o sunos_ioctl32.o
32 endif
33endif
34
35ifneq ($(NEW_GCC),y)
36 CMODEL_CFLAG := -mmedlow
37else
38 CMODEL_CFLAG := -m64 -mcmodel=medlow
39endif
40
41head.o: head.S ttable.S itlb_base.S dtlb_base.S dtlb_backend.S dtlb_prot.S \
42 etrap.S rtrap.S winfixup.S entry.S
43
44CFLAGS_ioctl32.o += -Ifs/
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
new file mode 100644
index 000000000000..a0716ccc2f4a
--- /dev/null
+++ b/arch/sparc64/kernel/auxio.c
@@ -0,0 +1,152 @@
1/* auxio.c: Probing for the Sparc AUXIO register at boot time.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 *
5 * Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/ioport.h>
12
13#include <asm/oplib.h>
14#include <asm/io.h>
15#include <asm/sbus.h>
16#include <asm/ebus.h>
17#include <asm/auxio.h>
18
19/* This cannot be static, as it is referenced in entry.S */
20void __iomem *auxio_register = NULL;
21
22enum auxio_type {
23 AUXIO_TYPE_NODEV,
24 AUXIO_TYPE_SBUS,
25 AUXIO_TYPE_EBUS
26};
27
28static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV;
29static DEFINE_SPINLOCK(auxio_lock);
30
31static void __auxio_sbus_set(u8 bits_on, u8 bits_off)
32{
33 if (auxio_register) {
34 unsigned char regval;
35 unsigned long flags;
36 unsigned char newval;
37
38 spin_lock_irqsave(&auxio_lock, flags);
39
40 regval = sbus_readb(auxio_register);
41 newval = regval | bits_on;
42 newval &= ~bits_off;
43 newval &= ~AUXIO_AUX1_MASK;
44 sbus_writeb(newval, auxio_register);
45
46 spin_unlock_irqrestore(&auxio_lock, flags);
47 }
48}
49
50static void __auxio_ebus_set(u8 bits_on, u8 bits_off)
51{
52 if (auxio_register) {
53 unsigned char regval;
54 unsigned long flags;
55 unsigned char newval;
56
57 spin_lock_irqsave(&auxio_lock, flags);
58
59 regval = (u8)readl(auxio_register);
60 newval = regval | bits_on;
61 newval &= ~bits_off;
62 writel((u32)newval, auxio_register);
63
64 spin_unlock_irqrestore(&auxio_lock, flags);
65 }
66}
67
68static inline void __auxio_ebus_set_led(int on)
69{
70 (on) ? __auxio_ebus_set(AUXIO_PCIO_LED, 0) :
71 __auxio_ebus_set(0, AUXIO_PCIO_LED) ;
72}
73
74static inline void __auxio_sbus_set_led(int on)
75{
76 (on) ? __auxio_sbus_set(AUXIO_AUX1_LED, 0) :
77 __auxio_sbus_set(0, AUXIO_AUX1_LED) ;
78}
79
80void auxio_set_led(int on)
81{
82 switch(auxio_devtype) {
83 case AUXIO_TYPE_SBUS:
84 __auxio_sbus_set_led(on);
85 break;
86 case AUXIO_TYPE_EBUS:
87 __auxio_ebus_set_led(on);
88 break;
89 default:
90 break;
91 }
92}
93
94static inline void __auxio_sbus_set_lte(int on)
95{
96 (on) ? __auxio_sbus_set(AUXIO_AUX1_LTE, 0) :
97 __auxio_sbus_set(0, AUXIO_AUX1_LTE) ;
98}
99
100void auxio_set_lte(int on)
101{
102 switch(auxio_devtype) {
103 case AUXIO_TYPE_SBUS:
104 __auxio_sbus_set_lte(on);
105 break;
106 case AUXIO_TYPE_EBUS:
107 /* FALL-THROUGH */
108 default:
109 break;
110 }
111}
112
113void __init auxio_probe(void)
114{
115 struct sbus_bus *sbus;
116 struct sbus_dev *sdev = NULL;
117
118 for_each_sbus(sbus) {
119 for_each_sbusdev(sdev, sbus) {
120 if(!strcmp(sdev->prom_name, "auxio"))
121 goto found_sdev;
122 }
123 }
124
125found_sdev:
126 if (sdev) {
127 auxio_devtype = AUXIO_TYPE_SBUS;
128 auxio_register = sbus_ioremap(&sdev->resource[0], 0,
129 sdev->reg_addrs[0].reg_size,
130 "auxiliaryIO");
131 }
132#ifdef CONFIG_PCI
133 else {
134 struct linux_ebus *ebus;
135 struct linux_ebus_device *edev = NULL;
136
137 for_each_ebus(ebus) {
138 for_each_ebusdev(edev, ebus) {
139 if (!strcmp(edev->prom_name, "auxio"))
140 goto ebus_done;
141 }
142 }
143 ebus_done:
144 if (edev) {
145 auxio_devtype = AUXIO_TYPE_EBUS;
146 auxio_register =
147 ioremap(edev->resource[0].start, sizeof(u32));
148 }
149 }
150 auxio_set_led(AUXIO_LED_ON);
151#endif
152}
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
new file mode 100644
index 000000000000..b2854ef221d0
--- /dev/null
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -0,0 +1,424 @@
1/*
2 * linux/fs/binfmt_aout.c
3 *
4 * Copyright (C) 1991, 1992, 1996 Linus Torvalds
5 *
6 * Hacked a bit by DaveM to make it work with 32-bit SunOS
7 * binaries on the sparc64 port.
8 */
9
10#include <linux/module.h>
11
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/a.out.h>
17#include <linux/errno.h>
18#include <linux/signal.h>
19#include <linux/string.h>
20#include <linux/fs.h>
21#include <linux/file.h>
22#include <linux/stat.h>
23#include <linux/fcntl.h>
24#include <linux/ptrace.h>
25#include <linux/user.h>
26#include <linux/slab.h>
27#include <linux/binfmts.h>
28#include <linux/personality.h>
29#include <linux/init.h>
30
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/pgalloc.h>
34
35static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs);
36static int load_aout32_library(struct file*);
37static int aout32_core_dump(long signr, struct pt_regs * regs, struct file *file);
38
39extern void dump_thread(struct pt_regs *, struct user *);
40
41static struct linux_binfmt aout32_format = {
42 NULL, THIS_MODULE, load_aout32_binary, load_aout32_library, aout32_core_dump,
43 PAGE_SIZE
44};
45
46static void set_brk(unsigned long start, unsigned long end)
47{
48 start = PAGE_ALIGN(start);
49 end = PAGE_ALIGN(end);
50 if (end <= start)
51 return;
52 down_write(&current->mm->mmap_sem);
53 do_brk(start, end - start);
54 up_write(&current->mm->mmap_sem);
55}
56
57/*
58 * These are the only things you should do on a core-file: use only these
59 * macros to write out all the necessary info.
60 */
61
62static int dump_write(struct file *file, const void *addr, int nr)
63{
64 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
65}
66
67#define DUMP_WRITE(addr, nr) \
68 if (!dump_write(file, (void *)(addr), (nr))) \
69 goto end_coredump;
70
71#define DUMP_SEEK(offset) \
72if (file->f_op->llseek) { \
73 if (file->f_op->llseek(file,(offset),0) != (offset)) \
74 goto end_coredump; \
75} else file->f_pos = (offset)
76
77/*
78 * Routine writes a core dump image in the current directory.
79 * Currently only a stub-function.
80 *
81 * Note that setuid/setgid files won't make a core-dump if the uid/gid
82 * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable"
83 * field, which also makes sure the core-dumps won't be recursive if the
84 * dumping of the process results in another error..
85 */
86
87static int aout32_core_dump(long signr, struct pt_regs *regs, struct file *file)
88{
89 mm_segment_t fs;
90 int has_dumped = 0;
91 unsigned long dump_start, dump_size;
92 struct user dump;
93# define START_DATA(u) (u.u_tsize)
94# define START_STACK(u) ((regs->u_regs[UREG_FP]) & ~(PAGE_SIZE - 1))
95
96 fs = get_fs();
97 set_fs(KERNEL_DS);
98 has_dumped = 1;
99 current->flags |= PF_DUMPCORE;
100 strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm));
101 dump.signal = signr;
102 dump_thread(regs, &dump);
103
104/* If the size of the dump file exceeds the rlimit, then see what would happen
105 if we wrote the stack, but not the data area. */
106 if ((dump.u_dsize+dump.u_ssize) >
107 current->signal->rlim[RLIMIT_CORE].rlim_cur)
108 dump.u_dsize = 0;
109
110/* Make sure we have enough room to write the stack and data areas. */
111 if ((dump.u_ssize) >
112 current->signal->rlim[RLIMIT_CORE].rlim_cur)
113 dump.u_ssize = 0;
114
115/* make sure we actually have a data and stack area to dump */
116 set_fs(USER_DS);
117 if (!access_ok(VERIFY_READ, (void __user *) START_DATA(dump), dump.u_dsize))
118 dump.u_dsize = 0;
119 if (!access_ok(VERIFY_READ, (void __user *) START_STACK(dump), dump.u_ssize))
120 dump.u_ssize = 0;
121
122 set_fs(KERNEL_DS);
123/* struct user */
124 DUMP_WRITE(&dump,sizeof(dump));
125/* now we start writing out the user space info */
126 set_fs(USER_DS);
127/* Dump the data area */
128 if (dump.u_dsize != 0) {
129 dump_start = START_DATA(dump);
130 dump_size = dump.u_dsize;
131 DUMP_WRITE(dump_start,dump_size);
132 }
133/* Now prepare to dump the stack area */
134 if (dump.u_ssize != 0) {
135 dump_start = START_STACK(dump);
136 dump_size = dump.u_ssize;
137 DUMP_WRITE(dump_start,dump_size);
138 }
139/* Finally dump the task struct. Not be used by gdb, but could be useful */
140 set_fs(KERNEL_DS);
141 DUMP_WRITE(current,sizeof(*current));
142end_coredump:
143 set_fs(fs);
144 return has_dumped;
145}
146
147/*
148 * create_aout32_tables() parses the env- and arg-strings in new user
149 * memory and creates the pointer tables from them, and puts their
150 * addresses on the "stack", returning the new stack pointer value.
151 */
152
153static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bprm)
154{
155 u32 __user *argv;
156 u32 __user *envp;
157 u32 __user *sp;
158 int argc = bprm->argc;
159 int envc = bprm->envc;
160
161 sp = (u32 __user *)((-(unsigned long)sizeof(char *))&(unsigned long)p);
162
163 /* This imposes the proper stack alignment for a new process. */
164 sp = (u32 __user *) (((unsigned long) sp) & ~7);
165 if ((envc+argc+3)&1)
166 --sp;
167
168 sp -= envc+1;
169 envp = sp;
170 sp -= argc+1;
171 argv = sp;
172 put_user(argc,--sp);
173 current->mm->arg_start = (unsigned long) p;
174 while (argc-->0) {
175 char c;
176 put_user(((u32)(unsigned long)(p)),argv++);
177 do {
178 get_user(c,p++);
179 } while (c);
180 }
181 put_user(NULL,argv);
182 current->mm->arg_end = current->mm->env_start = (unsigned long) p;
183 while (envc-->0) {
184 char c;
185 put_user(((u32)(unsigned long)(p)),envp++);
186 do {
187 get_user(c,p++);
188 } while (c);
189 }
190 put_user(NULL,envp);
191 current->mm->env_end = (unsigned long) p;
192 return sp;
193}
194
195/*
196 * These are the functions used to load a.out style executables and shared
197 * libraries. There is no binary dependent code anywhere else.
198 */
199
200static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
201{
202 struct exec ex;
203 unsigned long error;
204 unsigned long fd_offset;
205 unsigned long rlim;
206 unsigned long orig_thr_flags;
207 int retval;
208
209 ex = *((struct exec *) bprm->buf); /* exec-header */
210 if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
211 N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
212 N_TRSIZE(ex) || N_DRSIZE(ex) ||
213 bprm->file->f_dentry->d_inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
214 return -ENOEXEC;
215 }
216
217 fd_offset = N_TXTOFF(ex);
218
219 /* Check initial limits. This avoids letting people circumvent
220 * size limits imposed on them by creating programs with large
221 * arrays in the data or bss.
222 */
223 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
224 if (rlim >= RLIM_INFINITY)
225 rlim = ~0;
226 if (ex.a_data + ex.a_bss > rlim)
227 return -ENOMEM;
228
229 /* Flush all traces of the currently running executable */
230 retval = flush_old_exec(bprm);
231 if (retval)
232 return retval;
233
234 /* OK, This is the point of no return */
235 set_personality(PER_SUNOS);
236
237 current->mm->end_code = ex.a_text +
238 (current->mm->start_code = N_TXTADDR(ex));
239 current->mm->end_data = ex.a_data +
240 (current->mm->start_data = N_DATADDR(ex));
241 current->mm->brk = ex.a_bss +
242 (current->mm->start_brk = N_BSSADDR(ex));
243
244 set_mm_counter(current->mm, rss, 0);
245 current->mm->mmap = NULL;
246 compute_creds(bprm);
247 current->flags &= ~PF_FORKNOEXEC;
248 if (N_MAGIC(ex) == NMAGIC) {
249 loff_t pos = fd_offset;
250 /* Fuck me plenty... */
251 down_write(&current->mm->mmap_sem);
252 error = do_brk(N_TXTADDR(ex), ex.a_text);
253 up_write(&current->mm->mmap_sem);
254 bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
255 ex.a_text, &pos);
256 down_write(&current->mm->mmap_sem);
257 error = do_brk(N_DATADDR(ex), ex.a_data);
258 up_write(&current->mm->mmap_sem);
259 bprm->file->f_op->read(bprm->file, (char __user *)N_DATADDR(ex),
260 ex.a_data, &pos);
261 goto beyond_if;
262 }
263
264 if (N_MAGIC(ex) == OMAGIC) {
265 loff_t pos = fd_offset;
266 down_write(&current->mm->mmap_sem);
267 do_brk(N_TXTADDR(ex) & PAGE_MASK,
268 ex.a_text+ex.a_data + PAGE_SIZE - 1);
269 up_write(&current->mm->mmap_sem);
270 bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
271 ex.a_text+ex.a_data, &pos);
272 } else {
273 static unsigned long error_time;
274 if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
275 (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time) > 5*HZ)
276 {
277 printk(KERN_NOTICE "executable not page aligned\n");
278 error_time = jiffies;
279 }
280
281 if (!bprm->file->f_op->mmap) {
282 loff_t pos = fd_offset;
283 down_write(&current->mm->mmap_sem);
284 do_brk(0, ex.a_text+ex.a_data);
285 up_write(&current->mm->mmap_sem);
286 bprm->file->f_op->read(bprm->file,
287 (char __user *)N_TXTADDR(ex),
288 ex.a_text+ex.a_data, &pos);
289 goto beyond_if;
290 }
291
292 down_write(&current->mm->mmap_sem);
293 error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
294 PROT_READ | PROT_EXEC,
295 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
296 fd_offset);
297 up_write(&current->mm->mmap_sem);
298
299 if (error != N_TXTADDR(ex)) {
300 send_sig(SIGKILL, current, 0);
301 return error;
302 }
303
304 down_write(&current->mm->mmap_sem);
305 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
306 PROT_READ | PROT_WRITE | PROT_EXEC,
307 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
308 fd_offset + ex.a_text);
309 up_write(&current->mm->mmap_sem);
310 if (error != N_DATADDR(ex)) {
311 send_sig(SIGKILL, current, 0);
312 return error;
313 }
314 }
315beyond_if:
316 set_binfmt(&aout32_format);
317
318 set_brk(current->mm->start_brk, current->mm->brk);
319
320 /* Make sure STACK_TOP returns the right thing. */
321 orig_thr_flags = current_thread_info()->flags;
322 current_thread_info()->flags |= _TIF_32BIT;
323
324 retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
325 if (retval < 0) {
326 current_thread_info()->flags = orig_thr_flags;
327
328 /* Someone check-me: is this error path enough? */
329 send_sig(SIGKILL, current, 0);
330 return retval;
331 }
332
333 current->mm->start_stack =
334 (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm);
335 if (!(orig_thr_flags & _TIF_32BIT)) {
336 unsigned long pgd_cache = get_pgd_cache(current->mm->pgd);
337
338 __asm__ __volatile__("stxa\t%0, [%1] %2\n\t"
339 "membar #Sync"
340 : /* no outputs */
341 : "r" (pgd_cache),
342 "r" (TSB_REG), "i" (ASI_DMMU));
343 }
344 start_thread32(regs, ex.a_entry, current->mm->start_stack);
345 if (current->ptrace & PT_PTRACED)
346 send_sig(SIGTRAP, current, 0);
347 return 0;
348}
349
350/* N.B. Move to .h file and use code in fs/binfmt_aout.c? */
351static int load_aout32_library(struct file *file)
352{
353 struct inode * inode;
354 unsigned long bss, start_addr, len;
355 unsigned long error;
356 int retval;
357 struct exec ex;
358
359 inode = file->f_dentry->d_inode;
360
361 retval = -ENOEXEC;
362 error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
363 if (error != sizeof(ex))
364 goto out;
365
366 /* We come in here for the regular a.out style of shared libraries */
367 if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
368 N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
369 inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
370 goto out;
371 }
372
373 if (N_MAGIC(ex) == ZMAGIC && N_TXTOFF(ex) &&
374 (N_TXTOFF(ex) < inode->i_sb->s_blocksize)) {
375 printk("N_TXTOFF < BLOCK_SIZE. Please convert library\n");
376 goto out;
377 }
378
379 if (N_FLAGS(ex))
380 goto out;
381
382 /* For QMAGIC, the starting address is 0x20 into the page. We mask
383 this off to get the starting address for the page */
384
385 start_addr = ex.a_entry & 0xfffff000;
386
387 /* Now use mmap to map the library into memory. */
388 down_write(&current->mm->mmap_sem);
389 error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
390 PROT_READ | PROT_WRITE | PROT_EXEC,
391 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
392 N_TXTOFF(ex));
393 up_write(&current->mm->mmap_sem);
394 retval = error;
395 if (error != start_addr)
396 goto out;
397
398 len = PAGE_ALIGN(ex.a_text + ex.a_data);
399 bss = ex.a_text + ex.a_data + ex.a_bss;
400 if (bss > len) {
401 down_write(&current->mm->mmap_sem);
402 error = do_brk(start_addr + len, bss - len);
403 up_write(&current->mm->mmap_sem);
404 retval = error;
405 if (error != start_addr + len)
406 goto out;
407 }
408 retval = 0;
409out:
410 return retval;
411}
412
413static int __init init_aout32_binfmt(void)
414{
415 return register_binfmt(&aout32_format);
416}
417
418static void __exit exit_aout32_binfmt(void)
419{
420 unregister_binfmt(&aout32_format);
421}
422
423module_init(init_aout32_binfmt);
424module_exit(exit_aout32_binfmt);
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c
new file mode 100644
index 000000000000..a1a12d2aa353
--- /dev/null
+++ b/arch/sparc64/kernel/binfmt_elf32.c
@@ -0,0 +1,159 @@
1/*
2 * binfmt_elf32.c: Support 32-bit Sparc ELF binaries on Ultra.
3 *
4 * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
8#define ELF_ARCH EM_SPARC
9#define ELF_CLASS ELFCLASS32
10#define ELF_DATA ELFDATA2MSB;
11
12/* For the most part we present code dumps in the format
13 * Solaris does.
14 */
15typedef unsigned int elf_greg_t;
16#define ELF_NGREG 38
17typedef elf_greg_t elf_gregset_t[ELF_NGREG];
18
19/* Format is:
20 * G0 --> G7
21 * O0 --> O7
22 * L0 --> L7
23 * I0 --> I7
24 * PSR, PC, nPC, Y, WIM, TBR
25 */
26#include <asm/psrcompat.h>
27#define ELF_CORE_COPY_REGS(__elf_regs, __pt_regs) \
28do { unsigned int *dest = &(__elf_regs[0]); \
29 struct pt_regs *src = (__pt_regs); \
30 unsigned int __user *sp; \
31 int i; \
32 for(i = 0; i < 16; i++) \
33 dest[i] = (unsigned int) src->u_regs[i];\
34 /* Don't try this at home kids... */ \
35 sp = (unsigned int __user *) (src->u_regs[14] & \
36 0x00000000fffffffc); \
37 for(i = 0; i < 16; i++) \
38 __get_user(dest[i+16], &sp[i]); \
39 dest[32] = tstate_to_psr(src->tstate); \
40 dest[33] = (unsigned int) src->tpc; \
41 dest[34] = (unsigned int) src->tnpc; \
42 dest[35] = src->y; \
43 dest[36] = dest[37] = 0; /* XXX */ \
44} while(0);
45
46typedef struct {
47 union {
48 unsigned int pr_regs[32];
49 unsigned long pr_dregs[16];
50 } pr_fr;
51 unsigned int __unused;
52 unsigned int pr_fsr;
53 unsigned char pr_qcnt;
54 unsigned char pr_q_entrysize;
55 unsigned char pr_en;
56 unsigned int pr_q[64];
57} elf_fpregset_t;
58
59/* UltraSparc extensions. Still unused, but will be eventually. */
60typedef struct {
61 unsigned int pr_type;
62 unsigned int pr_align;
63 union {
64 struct {
65 union {
66 unsigned int pr_regs[32];
67 unsigned long pr_dregs[16];
68 long double pr_qregs[8];
69 } pr_xfr;
70 } pr_v8p;
71 unsigned int pr_xfsr;
72 unsigned int pr_fprs;
73 unsigned int pr_xg[8];
74 unsigned int pr_xo[8];
75 unsigned long pr_tstate;
76 unsigned int pr_filler[8];
77 } pr_un;
78} elf_xregset_t;
79
80#define elf_check_arch(x) (((x)->e_machine == EM_SPARC) || ((x)->e_machine == EM_SPARC32PLUS))
81
82#define ELF_ET_DYN_BASE 0x70000000
83
84
85#include <asm/processor.h>
86#include <linux/module.h>
87#include <linux/config.h>
88#include <linux/elfcore.h>
89#include <linux/compat.h>
90
91#define elf_prstatus elf_prstatus32
92struct elf_prstatus32
93{
94 struct elf_siginfo pr_info; /* Info associated with signal */
95 short pr_cursig; /* Current signal */
96 unsigned int pr_sigpend; /* Set of pending signals */
97 unsigned int pr_sighold; /* Set of held signals */
98 pid_t pr_pid;
99 pid_t pr_ppid;
100 pid_t pr_pgrp;
101 pid_t pr_sid;
102 struct compat_timeval pr_utime; /* User time */
103 struct compat_timeval pr_stime; /* System time */
104 struct compat_timeval pr_cutime; /* Cumulative user time */
105 struct compat_timeval pr_cstime; /* Cumulative system time */
106 elf_gregset_t pr_reg; /* GP registers */
107 int pr_fpvalid; /* True if math co-processor being used. */
108};
109
110#define elf_prpsinfo elf_prpsinfo32
111struct elf_prpsinfo32
112{
113 char pr_state; /* numeric process state */
114 char pr_sname; /* char for pr_state */
115 char pr_zomb; /* zombie */
116 char pr_nice; /* nice val */
117 unsigned int pr_flag; /* flags */
118 u16 pr_uid;
119 u16 pr_gid;
120 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
121 /* Lots missing */
122 char pr_fname[16]; /* filename of executable */
123 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
124};
125
126#include <linux/highuid.h>
127
128#undef NEW_TO_OLD_UID
129#undef NEW_TO_OLD_GID
130#define NEW_TO_OLD_UID(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
131#define NEW_TO_OLD_GID(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
132
133#include <linux/time.h>
134
135#undef cputime_to_timeval
136#define cputime_to_timeval cputime_to_compat_timeval
137static __inline__ void
138cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
139{
140 unsigned long jiffies = cputime_to_jiffies(cputime);
141 value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
142 value->tv_sec = jiffies / HZ;
143}
144
145#define elf_addr_t u32
146#undef start_thread
147#define start_thread start_thread32
148#define init_elf_binfmt init_elf32_binfmt
149
150MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit SparcLinux binaries on the Ultra");
151MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek");
152
153#undef MODULE_DESCRIPTION
154#undef MODULE_AUTHOR
155
156#undef TASK_SIZE
157#define TASK_SIZE 0xf0000000
158
159#include "../../../fs/binfmt_elf.c"
diff --git a/arch/sparc64/kernel/central.c b/arch/sparc64/kernel/central.c
new file mode 100644
index 000000000000..3d184a784968
--- /dev/null
+++ b/arch/sparc64/kernel/central.c
@@ -0,0 +1,457 @@
1/* $Id: central.c,v 1.15 2001/12/19 00:29:51 davem Exp $
2 * central.c: Central FHC driver for Sunfire/Starfire/Wildfire.
3 *
4 * Copyright (C) 1997, 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/string.h>
10#include <linux/timer.h>
11#include <linux/sched.h>
12#include <linux/delay.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15
16#include <asm/page.h>
17#include <asm/fhc.h>
18#include <asm/starfire.h>
19
20struct linux_central *central_bus = NULL;
21struct linux_fhc *fhc_list = NULL;
22
23#define IS_CENTRAL_FHC(__fhc) ((__fhc) == central_bus->child)
24
25static void central_probe_failure(int line)
26{
27 prom_printf("CENTRAL: Critical device probe failure at central.c:%d\n",
28 line);
29 prom_halt();
30}
31
32static void central_ranges_init(int cnode, struct linux_central *central)
33{
34 int success;
35
36 central->num_central_ranges = 0;
37 success = prom_getproperty(central->prom_node, "ranges",
38 (char *) central->central_ranges,
39 sizeof (central->central_ranges));
40 if (success != -1)
41 central->num_central_ranges = (success/sizeof(struct linux_prom_ranges));
42}
43
44static void fhc_ranges_init(int fnode, struct linux_fhc *fhc)
45{
46 int success;
47
48 fhc->num_fhc_ranges = 0;
49 success = prom_getproperty(fhc->prom_node, "ranges",
50 (char *) fhc->fhc_ranges,
51 sizeof (fhc->fhc_ranges));
52 if (success != -1)
53 fhc->num_fhc_ranges = (success/sizeof(struct linux_prom_ranges));
54}
55
56/* Range application routines are exported to various drivers,
57 * so do not __init this.
58 */
59static void adjust_regs(struct linux_prom_registers *regp, int nregs,
60 struct linux_prom_ranges *rangep, int nranges)
61{
62 int regc, rngc;
63
64 for (regc = 0; regc < nregs; regc++) {
65 for (rngc = 0; rngc < nranges; rngc++)
66 if (regp[regc].which_io == rangep[rngc].ot_child_space)
67 break; /* Fount it */
68 if (rngc == nranges) /* oops */
69 central_probe_failure(__LINE__);
70 regp[regc].which_io = rangep[rngc].ot_parent_space;
71 regp[regc].phys_addr -= rangep[rngc].ot_child_base;
72 regp[regc].phys_addr += rangep[rngc].ot_parent_base;
73 }
74}
75
76/* Apply probed fhc ranges to registers passed, if no ranges return. */
77void apply_fhc_ranges(struct linux_fhc *fhc,
78 struct linux_prom_registers *regs,
79 int nregs)
80{
81 if (fhc->num_fhc_ranges)
82 adjust_regs(regs, nregs, fhc->fhc_ranges,
83 fhc->num_fhc_ranges);
84}
85
86/* Apply probed central ranges to registers passed, if no ranges return. */
87void apply_central_ranges(struct linux_central *central,
88 struct linux_prom_registers *regs, int nregs)
89{
90 if (central->num_central_ranges)
91 adjust_regs(regs, nregs, central->central_ranges,
92 central->num_central_ranges);
93}
94
95void * __init central_alloc_bootmem(unsigned long size)
96{
97 void *ret;
98
99 ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
100 if (ret != NULL)
101 memset(ret, 0, size);
102
103 return ret;
104}
105
106static unsigned long prom_reg_to_paddr(struct linux_prom_registers *r)
107{
108 unsigned long ret = ((unsigned long) r->which_io) << 32;
109
110 return ret | (unsigned long) r->phys_addr;
111}
112
113static void probe_other_fhcs(void)
114{
115 struct linux_prom64_registers fpregs[6];
116 char namebuf[128];
117 int node;
118
119 node = prom_getchild(prom_root_node);
120 node = prom_searchsiblings(node, "fhc");
121 if (node == 0)
122 central_probe_failure(__LINE__);
123 while (node) {
124 struct linux_fhc *fhc;
125 int board;
126 u32 tmp;
127
128 fhc = (struct linux_fhc *)
129 central_alloc_bootmem(sizeof(struct linux_fhc));
130 if (fhc == NULL)
131 central_probe_failure(__LINE__);
132
133 /* Link it into the FHC chain. */
134 fhc->next = fhc_list;
135 fhc_list = fhc;
136
137 /* Toplevel FHCs have no parent. */
138 fhc->parent = NULL;
139
140 fhc->prom_node = node;
141 prom_getstring(node, "name", namebuf, sizeof(namebuf));
142 strcpy(fhc->prom_name, namebuf);
143 fhc_ranges_init(node, fhc);
144
145 /* Non-central FHC's have 64-bit OBP format registers. */
146 if (prom_getproperty(node, "reg",
147 (char *)&fpregs[0], sizeof(fpregs)) == -1)
148 central_probe_failure(__LINE__);
149
150 /* Only central FHC needs special ranges applied. */
151 fhc->fhc_regs.pregs = fpregs[0].phys_addr;
152 fhc->fhc_regs.ireg = fpregs[1].phys_addr;
153 fhc->fhc_regs.ffregs = fpregs[2].phys_addr;
154 fhc->fhc_regs.sregs = fpregs[3].phys_addr;
155 fhc->fhc_regs.uregs = fpregs[4].phys_addr;
156 fhc->fhc_regs.tregs = fpregs[5].phys_addr;
157
158 board = prom_getintdefault(node, "board#", -1);
159 fhc->board = board;
160
161 tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_JCTRL);
162 if ((tmp & FHC_JTAG_CTRL_MENAB) != 0)
163 fhc->jtag_master = 1;
164 else
165 fhc->jtag_master = 0;
166
167 tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_ID);
168 printk("FHC(board %d): Version[%x] PartID[%x] Manuf[%x] %s\n",
169 board,
170 (tmp & FHC_ID_VERS) >> 28,
171 (tmp & FHC_ID_PARTID) >> 12,
172 (tmp & FHC_ID_MANUF) >> 1,
173 (fhc->jtag_master ? "(JTAG Master)" : ""));
174
175 /* This bit must be set in all non-central FHC's in
176 * the system. When it is clear, this identifies
177 * the central board.
178 */
179 tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
180 tmp |= FHC_CONTROL_IXIST;
181 upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
182
183 /* Look for the next FHC. */
184 node = prom_getsibling(node);
185 if (node == 0)
186 break;
187 node = prom_searchsiblings(node, "fhc");
188 if (node == 0)
189 break;
190 }
191}
192
193static void probe_clock_board(struct linux_central *central,
194 struct linux_fhc *fhc,
195 int cnode, int fnode)
196{
197 struct linux_prom_registers cregs[3];
198 int clknode, nslots, tmp, nregs;
199
200 clknode = prom_searchsiblings(prom_getchild(fnode), "clock-board");
201 if (clknode == 0 || clknode == -1)
202 central_probe_failure(__LINE__);
203
204 nregs = prom_getproperty(clknode, "reg", (char *)&cregs[0], sizeof(cregs));
205 if (nregs == -1)
206 central_probe_failure(__LINE__);
207
208 nregs /= sizeof(struct linux_prom_registers);
209 apply_fhc_ranges(fhc, &cregs[0], nregs);
210 apply_central_ranges(central, &cregs[0], nregs);
211 central->cfreg = prom_reg_to_paddr(&cregs[0]);
212 central->clkregs = prom_reg_to_paddr(&cregs[1]);
213
214 if (nregs == 2)
215 central->clkver = 0UL;
216 else
217 central->clkver = prom_reg_to_paddr(&cregs[2]);
218
219 tmp = upa_readb(central->clkregs + CLOCK_STAT1);
220 tmp &= 0xc0;
221 switch(tmp) {
222 case 0x40:
223 nslots = 16;
224 break;
225 case 0xc0:
226 nslots = 8;
227 break;
228 case 0x80:
229 if (central->clkver != 0UL &&
230 upa_readb(central->clkver) != 0) {
231 if ((upa_readb(central->clkver) & 0x80) != 0)
232 nslots = 4;
233 else
234 nslots = 5;
235 break;
236 }
237 default:
238 nslots = 4;
239 break;
240 };
241 central->slots = nslots;
242 printk("CENTRAL: Detected %d slot Enterprise system. cfreg[%02x] cver[%02x]\n",
243 central->slots, upa_readb(central->cfreg),
244 (central->clkver ? upa_readb(central->clkver) : 0x00));
245}
246
247static void ZAP(unsigned long iclr, unsigned long imap)
248{
249 u32 imap_tmp;
250
251 upa_writel(0, iclr);
252 upa_readl(iclr);
253 imap_tmp = upa_readl(imap);
254 imap_tmp &= ~(0x80000000);
255 upa_writel(imap_tmp, imap);
256 upa_readl(imap);
257}
258
259static void init_all_fhc_hw(void)
260{
261 struct linux_fhc *fhc;
262
263 for (fhc = fhc_list; fhc != NULL; fhc = fhc->next) {
264 u32 tmp;
265
266 /* Clear all of the interrupt mapping registers
267 * just in case OBP left them in a foul state.
268 */
269 ZAP(fhc->fhc_regs.ffregs + FHC_FFREGS_ICLR,
270 fhc->fhc_regs.ffregs + FHC_FFREGS_IMAP);
271 ZAP(fhc->fhc_regs.sregs + FHC_SREGS_ICLR,
272 fhc->fhc_regs.sregs + FHC_SREGS_IMAP);
273 ZAP(fhc->fhc_regs.uregs + FHC_UREGS_ICLR,
274 fhc->fhc_regs.uregs + FHC_UREGS_IMAP);
275 ZAP(fhc->fhc_regs.tregs + FHC_TREGS_ICLR,
276 fhc->fhc_regs.tregs + FHC_TREGS_IMAP);
277
278 /* Setup FHC control register. */
279 tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
280
281 /* All non-central boards have this bit set. */
282 if (! IS_CENTRAL_FHC(fhc))
283 tmp |= FHC_CONTROL_IXIST;
284
285 /* For all FHCs, clear the firmware synchronization
286 * line and both low power mode enables.
287 */
288 tmp &= ~(FHC_CONTROL_AOFF | FHC_CONTROL_BOFF |
289 FHC_CONTROL_SLINE);
290
291 upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
292 upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
293 }
294
295}
296
297void central_probe(void)
298{
299 struct linux_prom_registers fpregs[6];
300 struct linux_fhc *fhc;
301 char namebuf[128];
302 int cnode, fnode, err;
303
304 cnode = prom_finddevice("/central");
305 if (cnode == 0 || cnode == -1) {
306 if (this_is_starfire)
307 starfire_cpu_setup();
308 return;
309 }
310
311 /* Ok we got one, grab some memory for software state. */
312 central_bus = (struct linux_central *)
313 central_alloc_bootmem(sizeof(struct linux_central));
314 if (central_bus == NULL)
315 central_probe_failure(__LINE__);
316
317 fhc = (struct linux_fhc *)
318 central_alloc_bootmem(sizeof(struct linux_fhc));
319 if (fhc == NULL)
320 central_probe_failure(__LINE__);
321
322 /* First init central. */
323 central_bus->child = fhc;
324 central_bus->prom_node = cnode;
325
326 prom_getstring(cnode, "name", namebuf, sizeof(namebuf));
327 strcpy(central_bus->prom_name, namebuf);
328
329 central_ranges_init(cnode, central_bus);
330
331 /* And then central's FHC. */
332 fhc->next = fhc_list;
333 fhc_list = fhc;
334
335 fhc->parent = central_bus;
336 fnode = prom_searchsiblings(prom_getchild(cnode), "fhc");
337 if (fnode == 0 || fnode == -1)
338 central_probe_failure(__LINE__);
339
340 fhc->prom_node = fnode;
341 prom_getstring(fnode, "name", namebuf, sizeof(namebuf));
342 strcpy(fhc->prom_name, namebuf);
343
344 fhc_ranges_init(fnode, fhc);
345
346 /* Now, map in FHC register set. */
347 if (prom_getproperty(fnode, "reg", (char *)&fpregs[0], sizeof(fpregs)) == -1)
348 central_probe_failure(__LINE__);
349
350 apply_central_ranges(central_bus, &fpregs[0], 6);
351
352 fhc->fhc_regs.pregs = prom_reg_to_paddr(&fpregs[0]);
353 fhc->fhc_regs.ireg = prom_reg_to_paddr(&fpregs[1]);
354 fhc->fhc_regs.ffregs = prom_reg_to_paddr(&fpregs[2]);
355 fhc->fhc_regs.sregs = prom_reg_to_paddr(&fpregs[3]);
356 fhc->fhc_regs.uregs = prom_reg_to_paddr(&fpregs[4]);
357 fhc->fhc_regs.tregs = prom_reg_to_paddr(&fpregs[5]);
358
359 /* Obtain board number from board status register, Central's
360 * FHC lacks "board#" property.
361 */
362 err = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_BSR);
363 fhc->board = (((err >> 16) & 0x01) |
364 ((err >> 12) & 0x0e));
365
366 fhc->jtag_master = 0;
367
368 /* Attach the clock board registers for CENTRAL. */
369 probe_clock_board(central_bus, fhc, cnode, fnode);
370
371 err = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_ID);
372 printk("FHC(board %d): Version[%x] PartID[%x] Manuf[%x] (CENTRAL)\n",
373 fhc->board,
374 ((err & FHC_ID_VERS) >> 28),
375 ((err & FHC_ID_PARTID) >> 12),
376 ((err & FHC_ID_MANUF) >> 1));
377
378 probe_other_fhcs();
379
380 init_all_fhc_hw();
381}
382
383static __inline__ void fhc_ledblink(struct linux_fhc *fhc, int on)
384{
385 u32 tmp;
386
387 tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
388
389 /* NOTE: reverse logic on this bit */
390 if (on)
391 tmp &= ~(FHC_CONTROL_RLED);
392 else
393 tmp |= FHC_CONTROL_RLED;
394 tmp &= ~(FHC_CONTROL_AOFF | FHC_CONTROL_BOFF | FHC_CONTROL_SLINE);
395
396 upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
397 upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
398}
399
400static __inline__ void central_ledblink(struct linux_central *central, int on)
401{
402 u8 tmp;
403
404 tmp = upa_readb(central->clkregs + CLOCK_CTRL);
405
406 /* NOTE: reverse logic on this bit */
407 if (on)
408 tmp &= ~(CLOCK_CTRL_RLED);
409 else
410 tmp |= CLOCK_CTRL_RLED;
411
412 upa_writeb(tmp, central->clkregs + CLOCK_CTRL);
413 upa_readb(central->clkregs + CLOCK_CTRL);
414}
415
416static struct timer_list sftimer;
417static int led_state;
418
419static void sunfire_timer(unsigned long __ignored)
420{
421 struct linux_fhc *fhc;
422
423 central_ledblink(central_bus, led_state);
424 for (fhc = fhc_list; fhc != NULL; fhc = fhc->next)
425 if (! IS_CENTRAL_FHC(fhc))
426 fhc_ledblink(fhc, led_state);
427 led_state = ! led_state;
428 sftimer.expires = jiffies + (HZ >> 1);
429 add_timer(&sftimer);
430}
431
432/* After PCI/SBUS busses have been probed, this is called to perform
433 * final initialization of all FireHose Controllers in the system.
434 */
435void firetruck_init(void)
436{
437 struct linux_central *central = central_bus;
438 u8 ctrl;
439
440 /* No central bus, nothing to do. */
441 if (central == NULL)
442 return;
443
444 /* OBP leaves it on, turn it off so clock board timer LED
445 * is in sync with FHC ones.
446 */
447 ctrl = upa_readb(central->clkregs + CLOCK_CTRL);
448 ctrl &= ~(CLOCK_CTRL_RLED);
449 upa_writeb(ctrl, central->clkregs + CLOCK_CTRL);
450
451 led_state = 0;
452 init_timer(&sftimer);
453 sftimer.data = 0;
454 sftimer.function = &sunfire_timer;
455 sftimer.expires = jiffies + (HZ >> 1);
456 add_timer(&sftimer);
457}
diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c
new file mode 100644
index 000000000000..97cf912f0853
--- /dev/null
+++ b/arch/sparc64/kernel/chmc.c
@@ -0,0 +1,458 @@
1/* $Id: chmc.c,v 1.4 2002/01/08 16:00:14 davem Exp $
2 * memctrlr.c: Driver for UltraSPARC-III memory controller.
3 *
4 * Copyright (C) 2001 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/module.h>
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/list.h>
12#include <linux/string.h>
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/errno.h>
16#include <linux/init.h>
17#include <asm/spitfire.h>
18#include <asm/chmctrl.h>
19#include <asm/oplib.h>
20#include <asm/io.h>
21
22#define CHMCTRL_NDGRPS 2
23#define CHMCTRL_NDIMMS 4
24
25#define DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS)
26
27/* OBP memory-layout property format. */
28struct obp_map {
29 unsigned char dimm_map[144];
30 unsigned char pin_map[576];
31};
32
33#define DIMM_LABEL_SZ 8
34
35struct obp_mem_layout {
36 /* One max 8-byte string label per DIMM. Usually
37 * this matches the label on the motherboard where
38 * that DIMM resides.
39 */
40 char dimm_labels[DIMMS_PER_MC][DIMM_LABEL_SZ];
41
42 /* If symmetric use map[0], else it is
43 * asymmetric and map[1] should be used.
44 */
45 char symmetric;
46
47 struct obp_map map[2];
48};
49
50#define CHMCTRL_NBANKS 4
51
52struct bank_info {
53 struct mctrl_info *mp;
54 int bank_id;
55
56 u64 raw_reg;
57 int valid;
58 int uk;
59 int um;
60 int lk;
61 int lm;
62 int interleave;
63 unsigned long base;
64 unsigned long size;
65};
66
67struct mctrl_info {
68 struct list_head list;
69 int portid;
70 int index;
71
72 struct obp_mem_layout layout_prop;
73 int layout_size;
74
75 void __iomem *regs;
76
77 u64 timing_control1;
78 u64 timing_control2;
79 u64 timing_control3;
80 u64 timing_control4;
81 u64 memaddr_control;
82
83 struct bank_info logical_banks[CHMCTRL_NBANKS];
84};
85
86static LIST_HEAD(mctrl_list);
87
88/* Does BANK decode PHYS_ADDR? */
89static int bank_match(struct bank_info *bp, unsigned long phys_addr)
90{
91 unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT;
92 unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT;
93
94 /* Bank must be enabled to match. */
95 if (bp->valid == 0)
96 return 0;
97
98 /* Would BANK match upper bits? */
99 upper_bits ^= bp->um; /* What bits are different? */
100 upper_bits = ~upper_bits; /* Invert. */
101 upper_bits |= bp->uk; /* What bits don't matter for matching? */
102 upper_bits = ~upper_bits; /* Invert. */
103
104 if (upper_bits)
105 return 0;
106
107 /* Would BANK match lower bits? */
108 lower_bits ^= bp->lm; /* What bits are different? */
109 lower_bits = ~lower_bits; /* Invert. */
110 lower_bits |= bp->lk; /* What bits don't matter for matching? */
111 lower_bits = ~lower_bits; /* Invert. */
112
113 if (lower_bits)
114 return 0;
115
116 /* I always knew you'd be the one. */
117 return 1;
118}
119
120/* Given PHYS_ADDR, search memory controller banks for a match. */
121static struct bank_info *find_bank(unsigned long phys_addr)
122{
123 struct list_head *mctrl_head = &mctrl_list;
124 struct list_head *mctrl_entry = mctrl_head->next;
125
126 for (;;) {
127 struct mctrl_info *mp =
128 list_entry(mctrl_entry, struct mctrl_info, list);
129 int bank_no;
130
131 if (mctrl_entry == mctrl_head)
132 break;
133 mctrl_entry = mctrl_entry->next;
134
135 for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) {
136 struct bank_info *bp;
137
138 bp = &mp->logical_banks[bank_no];
139 if (bank_match(bp, phys_addr))
140 return bp;
141 }
142 }
143
144 return NULL;
145}
146
147/* This is the main purpose of this driver. */
148#define SYNDROME_MIN -1
149#define SYNDROME_MAX 144
150int chmc_getunumber(int syndrome_code,
151 unsigned long phys_addr,
152 char *buf, int buflen)
153{
154 struct bank_info *bp;
155 struct obp_mem_layout *prop;
156 int bank_in_controller, first_dimm;
157
158 bp = find_bank(phys_addr);
159 if (bp == NULL ||
160 syndrome_code < SYNDROME_MIN ||
161 syndrome_code > SYNDROME_MAX) {
162 buf[0] = '?';
163 buf[1] = '?';
164 buf[2] = '?';
165 buf[3] = '\0';
166 return 0;
167 }
168
169 prop = &bp->mp->layout_prop;
170 bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1);
171 first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1));
172 first_dimm *= CHMCTRL_NDIMMS;
173
174 if (syndrome_code != SYNDROME_MIN) {
175 struct obp_map *map;
176 int qword, where_in_line, where, map_index, map_offset;
177 unsigned int map_val;
178
179 /* Yaay, single bit error so we can figure out
180 * the exact dimm.
181 */
182 if (prop->symmetric)
183 map = &prop->map[0];
184 else
185 map = &prop->map[1];
186
187 /* Covert syndrome code into the way the bits are
188 * positioned on the bus.
189 */
190 if (syndrome_code < 144 - 16)
191 syndrome_code += 16;
192 else if (syndrome_code < 144)
193 syndrome_code -= (144 - 7);
194 else if (syndrome_code < (144 + 3))
195 syndrome_code -= (144 + 3 - 4);
196 else
197 syndrome_code -= 144 + 3;
198
199 /* All this magic has to do with how a cache line
200 * comes over the wire on Safari. A 64-bit line
201 * comes over in 4 quadword cycles, each of which
202 * transmit ECC/MTAG info as well as the actual
203 * data. 144 bits per quadword, 576 total.
204 */
205#define LINE_SIZE 64
206#define LINE_ADDR_MSK (LINE_SIZE - 1)
207#define QW_PER_LINE 4
208#define QW_BYTES (LINE_SIZE / QW_PER_LINE)
209#define QW_BITS 144
210#define LAST_BIT (576 - 1)
211
212 qword = (phys_addr & LINE_ADDR_MSK) / QW_BYTES;
213 where_in_line = ((3 - qword) * QW_BITS) + syndrome_code;
214 where = (LAST_BIT - where_in_line);
215 map_index = where >> 2;
216 map_offset = where & 0x3;
217 map_val = map->dimm_map[map_index];
218 map_val = ((map_val >> ((3 - map_offset) << 1)) & (2 - 1));
219
220 sprintf(buf, "%s, pin %3d",
221 prop->dimm_labels[first_dimm + map_val],
222 map->pin_map[where_in_line]);
223 } else {
224 int dimm;
225
226 /* Multi-bit error, we just dump out all the
227 * dimm labels associated with this bank.
228 */
229 for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) {
230 sprintf(buf, "%s ",
231 prop->dimm_labels[first_dimm + dimm]);
232 buf += strlen(buf);
233 }
234 }
235 return 0;
236}
237
238/* Accessing the registers is slightly complicated. If you want
239 * to get at the memory controller which is on the same processor
240 * the code is executing, you must use special ASI load/store else
241 * you go through the global mapping.
242 */
243static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset)
244{
245 unsigned long ret;
246 int this_cpu = get_cpu();
247
248 if (mp->portid == this_cpu) {
249 __asm__ __volatile__("ldxa [%1] %2, %0"
250 : "=r" (ret)
251 : "r" (offset), "i" (ASI_MCU_CTRL_REG));
252 } else {
253 __asm__ __volatile__("ldxa [%1] %2, %0"
254 : "=r" (ret)
255 : "r" (mp->regs + offset),
256 "i" (ASI_PHYS_BYPASS_EC_E));
257 }
258 put_cpu();
259
260 return ret;
261}
262
263#if 0 /* currently unused */
264static void write_mcreg(struct mctrl_info *mp, unsigned long offset, u64 val)
265{
266 if (mp->portid == smp_processor_id()) {
267 __asm__ __volatile__("stxa %0, [%1] %2"
268 : : "r" (val),
269 "r" (offset), "i" (ASI_MCU_CTRL_REG));
270 } else {
271 __asm__ __volatile__("ldxa %0, [%1] %2"
272 : : "r" (val),
273 "r" (mp->regs + offset),
274 "i" (ASI_PHYS_BYPASS_EC_E));
275 }
276}
277#endif
278
279static void interpret_one_decode_reg(struct mctrl_info *mp, int which_bank, u64 val)
280{
281 struct bank_info *p = &mp->logical_banks[which_bank];
282
283 p->mp = mp;
284 p->bank_id = (CHMCTRL_NBANKS * mp->portid) + which_bank;
285 p->raw_reg = val;
286 p->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT;
287 p->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT;
288 p->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT;
289 p->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT;
290 p->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT;
291
292 p->base = (p->um);
293 p->base &= ~(p->uk);
294 p->base <<= PA_UPPER_BITS_SHIFT;
295
296 switch(p->lk) {
297 case 0xf:
298 default:
299 p->interleave = 1;
300 break;
301
302 case 0xe:
303 p->interleave = 2;
304 break;
305
306 case 0xc:
307 p->interleave = 4;
308 break;
309
310 case 0x8:
311 p->interleave = 8;
312 break;
313
314 case 0x0:
315 p->interleave = 16;
316 break;
317 };
318
319 /* UK[10] is reserved, and UK[11] is not set for the SDRAM
320 * bank size definition.
321 */
322 p->size = (((unsigned long)p->uk &
323 ((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT;
324 p->size /= p->interleave;
325}
326
327static void fetch_decode_regs(struct mctrl_info *mp)
328{
329 if (mp->layout_size == 0)
330 return;
331
332 interpret_one_decode_reg(mp, 0,
333 read_mcreg(mp, CHMCTRL_DECODE1));
334 interpret_one_decode_reg(mp, 1,
335 read_mcreg(mp, CHMCTRL_DECODE2));
336 interpret_one_decode_reg(mp, 2,
337 read_mcreg(mp, CHMCTRL_DECODE3));
338 interpret_one_decode_reg(mp, 3,
339 read_mcreg(mp, CHMCTRL_DECODE4));
340}
341
342static int init_one_mctrl(int node, int index)
343{
344 struct mctrl_info *mp = kmalloc(sizeof(*mp), GFP_KERNEL);
345 int portid = prom_getintdefault(node, "portid", -1);
346 struct linux_prom64_registers p_reg_prop;
347 int t;
348
349 if (!mp)
350 return -1;
351 memset(mp, 0, sizeof(*mp));
352 if (portid == -1)
353 goto fail;
354
355 mp->portid = portid;
356 mp->layout_size = prom_getproplen(node, "memory-layout");
357 if (mp->layout_size < 0)
358 mp->layout_size = 0;
359 if (mp->layout_size > sizeof(mp->layout_prop))
360 goto fail;
361
362 if (mp->layout_size > 0)
363 prom_getproperty(node, "memory-layout",
364 (char *) &mp->layout_prop,
365 mp->layout_size);
366
367 t = prom_getproperty(node, "reg",
368 (char *) &p_reg_prop,
369 sizeof(p_reg_prop));
370 if (t < 0 || p_reg_prop.reg_size != 0x48)
371 goto fail;
372
373 mp->regs = ioremap(p_reg_prop.phys_addr, p_reg_prop.reg_size);
374 if (mp->regs == NULL)
375 goto fail;
376
377 if (mp->layout_size != 0UL) {
378 mp->timing_control1 = read_mcreg(mp, CHMCTRL_TCTRL1);
379 mp->timing_control2 = read_mcreg(mp, CHMCTRL_TCTRL2);
380 mp->timing_control3 = read_mcreg(mp, CHMCTRL_TCTRL3);
381 mp->timing_control4 = read_mcreg(mp, CHMCTRL_TCTRL4);
382 mp->memaddr_control = read_mcreg(mp, CHMCTRL_MACTRL);
383 }
384
385 fetch_decode_regs(mp);
386
387 mp->index = index;
388
389 list_add(&mp->list, &mctrl_list);
390
391 /* Report the device. */
392 printk(KERN_INFO "chmc%d: US3 memory controller at %p [%s]\n",
393 mp->index,
394 mp->regs, (mp->layout_size ? "ACTIVE" : "INACTIVE"));
395
396 return 0;
397
398fail:
399 if (mp) {
400 if (mp->regs != NULL)
401 iounmap(mp->regs);
402 kfree(mp);
403 }
404 return -1;
405}
406
407static int __init probe_for_string(char *name, int index)
408{
409 int node = prom_getchild(prom_root_node);
410
411 while ((node = prom_searchsiblings(node, name)) != 0) {
412 int ret = init_one_mctrl(node, index);
413
414 if (!ret)
415 index++;
416
417 node = prom_getsibling(node);
418 if (!node)
419 break;
420 }
421
422 return index;
423}
424
425static int __init chmc_init(void)
426{
427 int index;
428
429 /* This driver is only for cheetah platforms. */
430 if (tlb_type != cheetah && tlb_type != cheetah_plus)
431 return -ENODEV;
432
433 index = probe_for_string("memory-controller", 0);
434 index = probe_for_string("mc-us3", index);
435
436 return 0;
437}
438
439static void __exit chmc_cleanup(void)
440{
441 struct list_head *head = &mctrl_list;
442 struct list_head *tmp = head->next;
443
444 for (;;) {
445 struct mctrl_info *p =
446 list_entry(tmp, struct mctrl_info, list);
447 if (tmp == head)
448 break;
449 tmp = tmp->next;
450
451 list_del(&p->list);
452 iounmap(p->regs);
453 kfree(p);
454 }
455}
456
457module_init(chmc_init);
458module_exit(chmc_cleanup);
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
new file mode 100644
index 000000000000..48756958116b
--- /dev/null
+++ b/arch/sparc64/kernel/cpu.c
@@ -0,0 +1,124 @@
1/* cpu.c: Dinky routines to look for the kind of Sparc cpu
2 * we are on.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/sched.h>
11#include <linux/smp.h>
12#include <asm/asi.h>
13#include <asm/system.h>
14#include <asm/fpumacro.h>
15#include <asm/cpudata.h>
16
17DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
18
19struct cpu_iu_info {
20 short manuf;
21 short impl;
22 char* cpu_name; /* should be enough I hope... */
23};
24
25struct cpu_fp_info {
26 short manuf;
27 short impl;
28 char fpu_vers;
29 char* fp_name;
30};
31
32struct cpu_fp_info linux_sparc_fpu[] = {
33 { 0x17, 0x10, 0, "UltraSparc I integrated FPU"},
34 { 0x22, 0x10, 0, "UltraSparc I integrated FPU"},
35 { 0x17, 0x11, 0, "UltraSparc II integrated FPU"},
36 { 0x17, 0x12, 0, "UltraSparc IIi integrated FPU"},
37 { 0x17, 0x13, 0, "UltraSparc IIe integrated FPU"},
38 { 0x3e, 0x14, 0, "UltraSparc III integrated FPU"},
39 { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"},
40 { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"},
41 { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"},
42};
43
44#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
45
46struct cpu_iu_info linux_sparc_chips[] = {
47 { 0x17, 0x10, "TI UltraSparc I (SpitFire)"},
48 { 0x22, 0x10, "TI UltraSparc I (SpitFire)"},
49 { 0x17, 0x11, "TI UltraSparc II (BlackBird)"},
50 { 0x17, 0x12, "TI UltraSparc IIi (Sabre)"},
51 { 0x17, 0x13, "TI UltraSparc IIe (Hummingbird)"},
52 { 0x3e, 0x14, "TI UltraSparc III (Cheetah)"},
53 { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"},
54 { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"},
55 { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"},
56};
57
58#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
59
60char *sparc_cpu_type = "cpu-oops";
61char *sparc_fpu_type = "fpu-oops";
62
63unsigned int fsr_storage;
64
65void __init cpu_probe(void)
66{
67 unsigned long ver, fpu_vers, manuf, impl, fprs;
68 int i;
69
70 fprs = fprs_read();
71 fprs_write(FPRS_FEF);
72 __asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]"
73 : "=&r" (ver)
74 : "r" (&fpu_vers));
75 fprs_write(fprs);
76
77 manuf = ((ver >> 48) & 0xffff);
78 impl = ((ver >> 32) & 0xffff);
79
80 fpu_vers = ((fpu_vers >> 17) & 0x7);
81
82retry:
83 for (i = 0; i < NSPARCCHIPS; i++) {
84 if (linux_sparc_chips[i].manuf == manuf) {
85 if (linux_sparc_chips[i].impl == impl) {
86 sparc_cpu_type =
87 linux_sparc_chips[i].cpu_name;
88 break;
89 }
90 }
91 }
92
93 if (i == NSPARCCHIPS) {
94 /* Maybe it is a cheetah+ derivative, report it as cheetah+
95 * in that case until we learn the real names.
96 */
97 if (manuf == 0x3e &&
98 impl > 0x15) {
99 impl = 0x15;
100 goto retry;
101 } else {
102 printk("DEBUG: manuf[%lx] impl[%lx]\n",
103 manuf, impl);
104 }
105 sparc_cpu_type = "Unknown CPU";
106 }
107
108 for (i = 0; i < NSPARCFPU; i++) {
109 if (linux_sparc_fpu[i].manuf == manuf &&
110 linux_sparc_fpu[i].impl == impl) {
111 if (linux_sparc_fpu[i].fpu_vers == fpu_vers) {
112 sparc_fpu_type =
113 linux_sparc_fpu[i].fp_name;
114 break;
115 }
116 }
117 }
118
119 if (i == NSPARCFPU) {
120 printk("DEBUG: manuf[%lx] impl[%lx] fsr.vers[%lx]\n",
121 manuf, impl, fpu_vers);
122 sparc_fpu_type = "Unknown FPU";
123 }
124}
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
new file mode 100644
index 000000000000..d710274e516b
--- /dev/null
+++ b/arch/sparc64/kernel/devices.c
@@ -0,0 +1,144 @@
1/* devices.c: Initial scan of the prom device tree for important
2 * Sparc device nodes which we need to find.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/threads.h>
10#include <linux/init.h>
11#include <linux/ioport.h>
12#include <linux/string.h>
13#include <linux/spinlock.h>
14#include <linux/errno.h>
15
16#include <asm/page.h>
17#include <asm/oplib.h>
18#include <asm/system.h>
19#include <asm/smp.h>
20#include <asm/spitfire.h>
21#include <asm/timer.h>
22#include <asm/cpudata.h>
23
24/* Used to synchronize acceses to NatSemi SUPER I/O chip configure
25 * operations in asm/ns87303.h
26 */
27DEFINE_SPINLOCK(ns87303_lock);
28
29extern void cpu_probe(void);
30extern void central_probe(void);
31
32static char *cpu_mid_prop(void)
33{
34 if (tlb_type == spitfire)
35 return "upa-portid";
36 return "portid";
37}
38
39static int check_cpu_node(int nd, int *cur_inst,
40 int (*compare)(int, int, void *), void *compare_arg,
41 int *prom_node, int *mid)
42{
43 char node_str[128];
44
45 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
46 if (strcmp(node_str, "cpu"))
47 return -ENODEV;
48
49 if (!compare(nd, *cur_inst, compare_arg)) {
50 if (prom_node)
51 *prom_node = nd;
52 if (mid)
53 *mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
54 return 0;
55 }
56
57 (*cur_inst)++;
58
59 return -ENODEV;
60}
61
62static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
63 int *prom_node, int *mid)
64{
65 int nd, cur_inst, err;
66
67 nd = prom_root_node;
68 cur_inst = 0;
69
70 err = check_cpu_node(nd, &cur_inst,
71 compare, compare_arg,
72 prom_node, mid);
73 if (err == 0)
74 return 0;
75
76 nd = prom_getchild(nd);
77 while ((nd = prom_getsibling(nd)) != 0) {
78 err = check_cpu_node(nd, &cur_inst,
79 compare, compare_arg,
80 prom_node, mid);
81 if (err == 0)
82 return 0;
83 }
84
85 return -ENODEV;
86}
87
88static int cpu_instance_compare(int nd, int instance, void *_arg)
89{
90 int desired_instance = (int) (long) _arg;
91
92 if (instance == desired_instance)
93 return 0;
94 return -ENODEV;
95}
96
97int cpu_find_by_instance(int instance, int *prom_node, int *mid)
98{
99 return __cpu_find_by(cpu_instance_compare, (void *)(long)instance,
100 prom_node, mid);
101}
102
103static int cpu_mid_compare(int nd, int instance, void *_arg)
104{
105 int desired_mid = (int) (long) _arg;
106 int this_mid;
107
108 this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
109 if (this_mid == desired_mid)
110 return 0;
111 return -ENODEV;
112}
113
114int cpu_find_by_mid(int mid, int *prom_node)
115{
116 return __cpu_find_by(cpu_mid_compare, (void *)(long)mid,
117 prom_node, NULL);
118}
119
120void __init device_scan(void)
121{
122 /* FIX ME FAST... -DaveM */
123 ioport_resource.end = 0xffffffffffffffffUL;
124
125 prom_printf("Booting Linux...\n");
126
127#ifndef CONFIG_SMP
128 {
129 int err, cpu_node;
130 err = cpu_find_by_instance(0, &cpu_node, NULL);
131 if (err) {
132 prom_printf("No cpu nodes, cannot continue\n");
133 prom_halt();
134 }
135 cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
136 "clock-frequency",
137 0);
138 }
139#endif
140
141 central_probe();
142
143 cpu_probe();
144}
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S
new file mode 100644
index 000000000000..b73a3c858770
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_backend.S
@@ -0,0 +1,181 @@
1/* $Id: dtlb_backend.S,v 1.16 2001/10/09 04:02:11 davem Exp $
2 * dtlb_backend.S: Back end to DTLB miss replacement strategy.
3 * This is included directly into the trap table.
4 *
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <asm/pgtable.h>
10#include <asm/mmu.h>
11
12#if PAGE_SHIFT == 13
13#define SZ_BITS _PAGE_SZ8K
14#elif PAGE_SHIFT == 16
15#define SZ_BITS _PAGE_SZ64K
16#elif PAGE_SHIFT == 19
17#define SZ_BITS _PAGE_SZ512K
18#elif PAGE_SHIFT == 22
19#define SZ_BITS _PAGE_SZ4M
20#endif
21
22#define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS)
23
24#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P )
25#define VPTE_SHIFT (PAGE_SHIFT - 3)
26
27/* Ways we can get here:
28 *
29 * 1) Nucleus loads and stores to/from PA-->VA direct mappings at tl>1.
30 * 2) Nucleus loads and stores to/from user/kernel window save areas.
31 * 3) VPTE misses from dtlb_base and itlb_base.
32 *
33 * We need to extract out the PMD and PGDIR indexes from the
34 * linear virtual page table access address. The PTE index
35 * is at the bottom, but we are not concerned with it. Bits
36 * 0 to 2 are clear since each PTE is 8 bytes in size. Each
37 * PMD and PGDIR entry are 4 bytes in size. Thus, this
38 * address looks something like:
39 *
40 * |---------------------------------------------------------------|
41 * | ... | PGDIR index | PMD index | PTE index | |
42 * |---------------------------------------------------------------|
43 * 63 F E D C B A 3 2 0 <- bit nr
44 *
45 * The variable bits above are defined as:
46 * A --> 3 + (PAGE_SHIFT - log2(8))
47 * --> 3 + (PAGE_SHIFT - 3) - 1
48 * (ie. this is "bit 3" + PAGE_SIZE - size of PTE entry in bits - 1)
49 * B --> A + 1
50 * C --> B + (PAGE_SHIFT - log2(4))
51 * --> B + (PAGE_SHIFT - 2) - 1
52 * (ie. this is "bit B" + PAGE_SIZE - size of PMD entry in bits - 1)
53 * D --> C + 1
54 * E --> D + (PAGE_SHIFT - log2(4))
55 * --> D + (PAGE_SHIFT - 2) - 1
56 * (ie. this is "bit D" + PAGE_SIZE - size of PGDIR entry in bits - 1)
57 * F --> E + 1
58 *
59 * (Note how "B" always evalutes to PAGE_SHIFT, all the other constants
60 * cancel out.)
61 *
62 * For 8K PAGE_SIZE (thus, PAGE_SHIFT of 13) the bit numbers are:
63 * A --> 12
64 * B --> 13
65 * C --> 23
66 * D --> 24
67 * E --> 34
68 * F --> 35
69 *
70 * For 64K PAGE_SIZE (thus, PAGE_SHIFT of 16) the bit numbers are:
71 * A --> 15
72 * B --> 16
73 * C --> 29
74 * D --> 30
75 * E --> 43
76 * F --> 44
77 *
78 * Because bits both above and below each PGDIR and PMD index need to
79 * be masked out, and the index can be as long as 14 bits (when using a
80 * 64K PAGE_SIZE, and thus a PAGE_SHIFT of 16), we need 3 instructions
81 * to extract each index out.
82 *
83 * Shifts do not pair very well on UltraSPARC-I, II, IIi, and IIe, so
84 * we try to avoid using them for the entire operation. We could setup
85 * a mask anywhere from bit 31 down to bit 10 using the sethi instruction.
86 *
87 * We need a mask covering bits B --> C and one covering D --> E.
88 * For 8K PAGE_SIZE these masks are 0x00ffe000 and 0x7ff000000.
89 * For 64K PAGE_SIZE these masks are 0x3fff0000 and 0xfffc0000000.
90 * The second in each set cannot be loaded with a single sethi
91 * instruction, because the upper bits are past bit 32. We would
92 * need to use a sethi + a shift.
93 *
94 * For the time being, we use 2 shifts and a simple "and" mask.
95 * We shift left to clear the bits above the index, we shift down
96 * to clear the bits below the index (sans the log2(4 or 8) bits)
97 * and a mask to clear the log2(4 or 8) bits. We need therefore
98 * define 4 shift counts, all of which are relative to PAGE_SHIFT.
99 *
100 * Although unsupportable for other reasons, this does mean that
101 * 512K and 4MB page sizes would be generaally supported by the
102 * kernel. (ELF binaries would break with > 64K PAGE_SIZE since
103 * the sections are only aligned that strongly).
104 *
105 * The operations performed for extraction are thus:
106 *
107 * ((X << FOO_SHIFT_LEFT) >> FOO_SHIFT_RIGHT) & ~0x3
108 *
109 */
110
111#define A (3 + (PAGE_SHIFT - 3) - 1)
112#define B (A + 1)
113#define C (B + (PAGE_SHIFT - 2) - 1)
114#define D (C + 1)
115#define E (D + (PAGE_SHIFT - 2) - 1)
116#define F (E + 1)
117
118#define PMD_SHIFT_LEFT (64 - D)
119#define PMD_SHIFT_RIGHT (64 - (D - B) - 2)
120#define PGDIR_SHIFT_LEFT (64 - F)
121#define PGDIR_SHIFT_RIGHT (64 - (F - D) - 2)
122#define LOW_MASK_BITS 0x3
123
124/* TLB1 ** ICACHE line 1: tl1 DTLB and quick VPTE miss */
125 ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
126 add %g3, %g3, %g5 ! Compute VPTE base
127 cmp %g4, %g5 ! VPTE miss?
128 bgeu,pt %xcc, 1f ! Continue here
129 andcc %g4, TAG_CONTEXT_BITS, %g5 ! tl0 miss Nucleus test
130 ba,a,pt %xcc, from_tl1_trap ! Fall to tl0 miss
1311: sllx %g6, VPTE_SHIFT, %g4 ! Position TAG_ACCESS
132 or %g4, %g5, %g4 ! Prepare TAG_ACCESS
133
134/* TLB1 ** ICACHE line 2: Quick VPTE miss */
135 mov TSB_REG, %g1 ! Grab TSB reg
136 ldxa [%g1] ASI_DMMU, %g5 ! Doing PGD caching?
137 sllx %g6, PMD_SHIFT_LEFT, %g1 ! Position PMD offset
138 be,pn %xcc, sparc64_vpte_nucleus ! Is it from Nucleus?
139 srlx %g1, PMD_SHIFT_RIGHT, %g1 ! Mask PMD offset bits
140 brnz,pt %g5, sparc64_vpte_continue ! Yep, go like smoke
141 andn %g1, LOW_MASK_BITS, %g1 ! Final PMD mask
142 sllx %g6, PGDIR_SHIFT_LEFT, %g5 ! Position PGD offset
143
144/* TLB1 ** ICACHE line 3: Quick VPTE miss */
145 srlx %g5, PGDIR_SHIFT_RIGHT, %g5 ! Mask PGD offset bits
146 andn %g5, LOW_MASK_BITS, %g5 ! Final PGD mask
147 lduwa [%g7 + %g5] ASI_PHYS_USE_EC, %g5! Load PGD
148 brz,pn %g5, vpte_noent ! Valid?
149sparc64_kpte_continue:
150 sllx %g5, 11, %g5 ! Shift into place
151sparc64_vpte_continue:
152 lduwa [%g5 + %g1] ASI_PHYS_USE_EC, %g5! Load PMD
153 sllx %g5, 11, %g5 ! Shift into place
154 brz,pn %g5, vpte_noent ! Valid?
155
156/* TLB1 ** ICACHE line 4: Quick VPTE miss */
157 mov (VALID_SZ_BITS >> 61), %g1 ! upper vpte into %g1
158 sllx %g1, 61, %g1 ! finish calc
159 or %g5, VPTE_BITS, %g5 ! Prepare VPTE data
160 or %g5, %g1, %g5 ! ...
161 mov TLB_SFSR, %g1 ! Restore %g1 value
162 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load VPTE into TLB
163 stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS
164 retry ! Load PTE once again
165
166#undef SZ_BITS
167#undef VALID_SZ_BITS
168#undef VPTE_SHIFT
169#undef VPTE_BITS
170#undef A
171#undef B
172#undef C
173#undef D
174#undef E
175#undef F
176#undef PMD_SHIFT_LEFT
177#undef PMD_SHIFT_RIGHT
178#undef PGDIR_SHIFT_LEFT
179#undef PGDIR_SHIFT_RIGHT
180#undef LOW_MASK_BITS
181
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S
new file mode 100644
index 000000000000..ded2fed23fcc
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_base.S
@@ -0,0 +1,113 @@
1/* $Id: dtlb_base.S,v 1.17 2001/10/11 22:33:52 davem Exp $
2 * dtlb_base.S: Front end to DTLB miss replacement strategy.
3 * This is included directly into the trap table.
4 *
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <asm/pgtable.h>
10#include <asm/mmu.h>
11
12/* %g1 TLB_SFSR (%g1 + %g1 == TLB_TAG_ACCESS)
13 * %g2 (KERN_HIGHBITS | KERN_LOWBITS)
14 * %g3 VPTE base (0xfffffffe00000000) Spitfire/Blackbird (44-bit VA space)
15 * (0xffe0000000000000) Cheetah (64-bit VA space)
16 * %g7 __pa(current->mm->pgd)
17 *
18 * The VPTE base value is completely magic, but note that
19 * few places in the kernel other than these TLB miss
20 * handlers know anything about the VPTE mechanism or
21 * how it works (see VPTE_SIZE, TASK_SIZE and PTRS_PER_PGD).
22 * Consider the 44-bit VADDR Ultra-I/II case as an example:
23 *
24 * VA[0 : (1<<43)] produce VPTE index [%g3 : 0]
25 * VA[0 : -(1<<43)] produce VPTE index [%g3-(1<<(43-PAGE_SHIFT+3)) : %g3]
26 *
27 * For Cheetah's 64-bit VADDR space this is:
28 *
29 * VA[0 : (1<<63)] produce VPTE index [%g3 : 0]
30 * VA[0 : -(1<<63)] produce VPTE index [%g3-(1<<(63-PAGE_SHIFT+3)) : %g3]
31 *
32 * If you're paying attention you'll notice that this means half of
33 * the VPTE table is above %g3 and half is below, low VA addresses
34 * map progressively upwards from %g3, and high VA addresses map
35 * progressively upwards towards %g3. This trick was needed to make
36 * the same 8 instruction handler work both for Spitfire/Blackbird's
37 * peculiar VA space hole configuration and the full 64-bit VA space
38 * one of Cheetah at the same time.
39 */
40
41/* Ways we can get here:
42 *
43 * 1) Nucleus loads and stores to/from PA-->VA direct mappings.
44 * 2) Nucleus loads and stores to/from vmalloc() areas.
45 * 3) User loads and stores.
46 * 4) User space accesses by nucleus at tl0
47 */
48
49#if PAGE_SHIFT == 13
50/*
51 * To compute vpte offset, we need to do ((addr >> 13) << 3),
52 * which can be optimized to (addr >> 10) if bits 10/11/12 can
53 * be guaranteed to be 0 ... mmu_context.h does guarantee this
54 * by only using 10 bits in the hwcontext value.
55 */
56#define CREATE_VPTE_OFFSET1(r1, r2)
57#define CREATE_VPTE_OFFSET2(r1, r2) \
58 srax r1, 10, r2
59#define CREATE_VPTE_NOP nop
60#else
61#define CREATE_VPTE_OFFSET1(r1, r2) \
62 srax r1, PAGE_SHIFT, r2
63#define CREATE_VPTE_OFFSET2(r1, r2) \
64 sllx r2, 3, r2
65#define CREATE_VPTE_NOP
66#endif
67
68/* DTLB ** ICACHE line 1: Quick user TLB misses */
69 ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
70 andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus?
71from_tl1_trap:
72 rdpr %tl, %g5 ! For TL==3 test
73 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
74 be,pn %xcc, 3f ! Yep, special processing
75 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
76 cmp %g5, 4 ! Last trap level?
77 be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
78 nop ! delay slot
79
80/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */
81 ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE
821: brgez,pn %g5, longpath ! Invalid, branch out
83 nop ! Delay-slot
849: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
85 retry ! Trap return
863: brlz,pt %g4, 9b ! Kernel virtual map?
87 xor %g2, %g4, %g5 ! Finish bit twiddles
88 ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc
89
90/* DTLB ** ICACHE line 3: winfixups+real_faults */
91longpath:
92 rdpr %pstate, %g5 ! Move into alternate globals
93 wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
94 rdpr %tl, %g4 ! See where we came from.
95 cmp %g4, 1 ! Is etrap/rtrap window fault?
96 mov TLB_TAG_ACCESS, %g4 ! Prepare for fault processing
97 ldxa [%g4] ASI_DMMU, %g5 ! Load faulting VA page
98 be,pt %xcc, sparc64_realfault_common ! Jump to normal fault handling
99 mov FAULT_CODE_DTLB, %g4 ! It was read from DTLB
100
101/* DTLB ** ICACHE line 4: Unused... */
102 ba,a,pt %xcc, winfix_trampoline ! Call window fixup code
103 nop
104 nop
105 nop
106 nop
107 nop
108 nop
109 CREATE_VPTE_NOP
110
111#undef CREATE_VPTE_OFFSET1
112#undef CREATE_VPTE_OFFSET2
113#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/dtlb_prot.S b/arch/sparc64/kernel/dtlb_prot.S
new file mode 100644
index 000000000000..d848bb7374bb
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_prot.S
@@ -0,0 +1,54 @@
1/* $Id: dtlb_prot.S,v 1.22 2001/04/11 23:40:32 davem Exp $
2 * dtlb_prot.S: DTLB protection trap strategy.
3 * This is included directly into the trap table.
4 *
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9/* Ways we can get here:
10 *
11 * [TL == 0] 1) User stores to readonly pages.
12 * [TL == 0] 2) Nucleus stores to user readonly pages.
13 * [TL > 0] 3) Nucleus stores to user readonly stack frame.
14 */
15
16/* PROT ** ICACHE line 1: User DTLB protection trap */
17 stxa %g0, [%g1] ASI_DMMU ! Clear SFSR FaultValid bit
18 membar #Sync ! Synchronize ASI stores
19 rdpr %pstate, %g5 ! Move into alternate globals
20 wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
21 rdpr %tl, %g1 ! Need to do a winfixup?
22 cmp %g1, 1 ! Trap level >1?
23 mov TLB_TAG_ACCESS, %g4 ! Prepare reload of vaddr
24 nop
25
26/* PROT ** ICACHE line 2: More real fault processing */
27 bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
28 ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
29 ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
30 mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
31 nop
32 nop
33 nop
34 nop
35
36/* PROT ** ICACHE line 3: Unused... */
37 nop
38 nop
39 nop
40 nop
41 nop
42 nop
43 nop
44 nop
45
46/* PROT ** ICACHE line 4: Unused... */
47 nop
48 nop
49 nop
50 nop
51 nop
52 nop
53 nop
54 nop
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
new file mode 100644
index 000000000000..6ffbeb701940
--- /dev/null
+++ b/arch/sparc64/kernel/ebus.c
@@ -0,0 +1,644 @@
1/* $Id: ebus.c,v 1.64 2001/11/08 04:41:33 davem Exp $
2 * ebus.c: PCI to EBus bridge device.
3 *
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 */
7
8#include <linux/config.h>
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <linux/string.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17
18#include <asm/system.h>
19#include <asm/page.h>
20#include <asm/pbm.h>
21#include <asm/ebus.h>
22#include <asm/oplib.h>
23#include <asm/bpp.h>
24#include <asm/irq.h>
25
26/* EBUS dma library. */
27
28#define EBDMA_CSR 0x00UL /* Control/Status */
29#define EBDMA_ADDR 0x04UL /* DMA Address */
30#define EBDMA_COUNT 0x08UL /* DMA Count */
31
32#define EBDMA_CSR_INT_PEND 0x00000001
33#define EBDMA_CSR_ERR_PEND 0x00000002
34#define EBDMA_CSR_DRAIN 0x00000004
35#define EBDMA_CSR_INT_EN 0x00000010
36#define EBDMA_CSR_RESET 0x00000080
37#define EBDMA_CSR_WRITE 0x00000100
38#define EBDMA_CSR_EN_DMA 0x00000200
39#define EBDMA_CSR_CYC_PEND 0x00000400
40#define EBDMA_CSR_DIAG_RD_DONE 0x00000800
41#define EBDMA_CSR_DIAG_WR_DONE 0x00001000
42#define EBDMA_CSR_EN_CNT 0x00002000
43#define EBDMA_CSR_TC 0x00004000
44#define EBDMA_CSR_DIS_CSR_DRN 0x00010000
45#define EBDMA_CSR_BURST_SZ_MASK 0x000c0000
46#define EBDMA_CSR_BURST_SZ_1 0x00080000
47#define EBDMA_CSR_BURST_SZ_4 0x00000000
48#define EBDMA_CSR_BURST_SZ_8 0x00040000
49#define EBDMA_CSR_BURST_SZ_16 0x000c0000
50#define EBDMA_CSR_DIAG_EN 0x00100000
51#define EBDMA_CSR_DIS_ERR_PEND 0x00400000
52#define EBDMA_CSR_TCI_DIS 0x00800000
53#define EBDMA_CSR_EN_NEXT 0x01000000
54#define EBDMA_CSR_DMA_ON 0x02000000
55#define EBDMA_CSR_A_LOADED 0x04000000
56#define EBDMA_CSR_NA_LOADED 0x08000000
57#define EBDMA_CSR_DEV_ID_MASK 0xf0000000
58
59#define EBUS_DMA_RESET_TIMEOUT 10000
60
61static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain)
62{
63 int i;
64 u32 val = 0;
65
66 writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR);
67 udelay(1);
68
69 if (no_drain)
70 return;
71
72 for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) {
73 val = readl(p->regs + EBDMA_CSR);
74
75 if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND)))
76 break;
77 udelay(10);
78 }
79}
80
81static irqreturn_t ebus_dma_irq(int irq, void *dev_id, struct pt_regs *regs)
82{
83 struct ebus_dma_info *p = dev_id;
84 unsigned long flags;
85 u32 csr = 0;
86
87 spin_lock_irqsave(&p->lock, flags);
88 csr = readl(p->regs + EBDMA_CSR);
89 writel(csr, p->regs + EBDMA_CSR);
90 spin_unlock_irqrestore(&p->lock, flags);
91
92 if (csr & EBDMA_CSR_ERR_PEND) {
93 printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name);
94 p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie);
95 return IRQ_HANDLED;
96 } else if (csr & EBDMA_CSR_INT_PEND) {
97 p->callback(p,
98 (csr & EBDMA_CSR_TC) ?
99 EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE,
100 p->client_cookie);
101 return IRQ_HANDLED;
102 }
103
104 return IRQ_NONE;
105
106}
107
108int ebus_dma_register(struct ebus_dma_info *p)
109{
110 u32 csr;
111
112 if (!p->regs)
113 return -EINVAL;
114 if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
115 EBUS_DMA_FLAG_TCI_DISABLE))
116 return -EINVAL;
117 if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback)
118 return -EINVAL;
119 if (!strlen(p->name))
120 return -EINVAL;
121
122 __ebus_dma_reset(p, 1);
123
124 csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT;
125
126 if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
127 csr |= EBDMA_CSR_TCI_DIS;
128
129 writel(csr, p->regs + EBDMA_CSR);
130
131 return 0;
132}
133EXPORT_SYMBOL(ebus_dma_register);
134
135int ebus_dma_irq_enable(struct ebus_dma_info *p, int on)
136{
137 unsigned long flags;
138 u32 csr;
139
140 if (on) {
141 if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
142 if (request_irq(p->irq, ebus_dma_irq, SA_SHIRQ, p->name, p))
143 return -EBUSY;
144 }
145
146 spin_lock_irqsave(&p->lock, flags);
147 csr = readl(p->regs + EBDMA_CSR);
148 csr |= EBDMA_CSR_INT_EN;
149 writel(csr, p->regs + EBDMA_CSR);
150 spin_unlock_irqrestore(&p->lock, flags);
151 } else {
152 spin_lock_irqsave(&p->lock, flags);
153 csr = readl(p->regs + EBDMA_CSR);
154 csr &= ~EBDMA_CSR_INT_EN;
155 writel(csr, p->regs + EBDMA_CSR);
156 spin_unlock_irqrestore(&p->lock, flags);
157
158 if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
159 free_irq(p->irq, p);
160 }
161 }
162
163 return 0;
164}
165EXPORT_SYMBOL(ebus_dma_irq_enable);
166
167void ebus_dma_unregister(struct ebus_dma_info *p)
168{
169 unsigned long flags;
170 u32 csr;
171 int irq_on = 0;
172
173 spin_lock_irqsave(&p->lock, flags);
174 csr = readl(p->regs + EBDMA_CSR);
175 if (csr & EBDMA_CSR_INT_EN) {
176 csr &= ~EBDMA_CSR_INT_EN;
177 writel(csr, p->regs + EBDMA_CSR);
178 irq_on = 1;
179 }
180 spin_unlock_irqrestore(&p->lock, flags);
181
182 if (irq_on)
183 free_irq(p->irq, p);
184}
185EXPORT_SYMBOL(ebus_dma_unregister);
186
187int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len)
188{
189 unsigned long flags;
190 u32 csr;
191 int err;
192
193 if (len >= (1 << 24))
194 return -EINVAL;
195
196 spin_lock_irqsave(&p->lock, flags);
197 csr = readl(p->regs + EBDMA_CSR);
198 err = -EINVAL;
199 if (!(csr & EBDMA_CSR_EN_DMA))
200 goto out;
201 err = -EBUSY;
202 if (csr & EBDMA_CSR_NA_LOADED)
203 goto out;
204
205 writel(len, p->regs + EBDMA_COUNT);
206 writel(bus_addr, p->regs + EBDMA_ADDR);
207 err = 0;
208
209out:
210 spin_unlock_irqrestore(&p->lock, flags);
211
212 return err;
213}
214EXPORT_SYMBOL(ebus_dma_request);
215
216void ebus_dma_prepare(struct ebus_dma_info *p, int write)
217{
218 unsigned long flags;
219 u32 csr;
220
221 spin_lock_irqsave(&p->lock, flags);
222 __ebus_dma_reset(p, 0);
223
224 csr = (EBDMA_CSR_INT_EN |
225 EBDMA_CSR_EN_CNT |
226 EBDMA_CSR_BURST_SZ_16 |
227 EBDMA_CSR_EN_NEXT);
228
229 if (write)
230 csr |= EBDMA_CSR_WRITE;
231 if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
232 csr |= EBDMA_CSR_TCI_DIS;
233
234 writel(csr, p->regs + EBDMA_CSR);
235
236 spin_unlock_irqrestore(&p->lock, flags);
237}
238EXPORT_SYMBOL(ebus_dma_prepare);
239
240unsigned int ebus_dma_residue(struct ebus_dma_info *p)
241{
242 return readl(p->regs + EBDMA_COUNT);
243}
244EXPORT_SYMBOL(ebus_dma_residue);
245
246unsigned int ebus_dma_addr(struct ebus_dma_info *p)
247{
248 return readl(p->regs + EBDMA_ADDR);
249}
250EXPORT_SYMBOL(ebus_dma_addr);
251
252void ebus_dma_enable(struct ebus_dma_info *p, int on)
253{
254 unsigned long flags;
255 u32 orig_csr, csr;
256
257 spin_lock_irqsave(&p->lock, flags);
258 orig_csr = csr = readl(p->regs + EBDMA_CSR);
259 if (on)
260 csr |= EBDMA_CSR_EN_DMA;
261 else
262 csr &= ~EBDMA_CSR_EN_DMA;
263 if ((orig_csr & EBDMA_CSR_EN_DMA) !=
264 (csr & EBDMA_CSR_EN_DMA))
265 writel(csr, p->regs + EBDMA_CSR);
266 spin_unlock_irqrestore(&p->lock, flags);
267}
268EXPORT_SYMBOL(ebus_dma_enable);
269
270struct linux_ebus *ebus_chain = NULL;
271
272#ifdef CONFIG_SUN_AUXIO
273extern void auxio_probe(void);
274#endif
275
276static inline void *ebus_alloc(size_t size)
277{
278 void *mem;
279
280 mem = kmalloc(size, GFP_ATOMIC);
281 if (!mem)
282 panic("ebus_alloc: out of memory");
283 memset((char *)mem, 0, size);
284 return mem;
285}
286
287static void __init ebus_ranges_init(struct linux_ebus *ebus)
288{
289 int success;
290
291 ebus->num_ebus_ranges = 0;
292 success = prom_getproperty(ebus->prom_node, "ranges",
293 (char *)ebus->ebus_ranges,
294 sizeof(ebus->ebus_ranges));
295 if (success != -1)
296 ebus->num_ebus_ranges = (success/sizeof(struct linux_prom_ebus_ranges));
297}
298
299static void __init ebus_intmap_init(struct linux_ebus *ebus)
300{
301 int success;
302
303 ebus->num_ebus_intmap = 0;
304 success = prom_getproperty(ebus->prom_node, "interrupt-map",
305 (char *)ebus->ebus_intmap,
306 sizeof(ebus->ebus_intmap));
307 if (success == -1)
308 return;
309
310 ebus->num_ebus_intmap = (success/sizeof(struct linux_prom_ebus_intmap));
311
312 success = prom_getproperty(ebus->prom_node, "interrupt-map-mask",
313 (char *)&ebus->ebus_intmask,
314 sizeof(ebus->ebus_intmask));
315 if (success == -1) {
316 prom_printf("%s: can't get interrupt-map-mask\n", __FUNCTION__);
317 prom_halt();
318 }
319}
320
321int __init ebus_intmap_match(struct linux_ebus *ebus,
322 struct linux_prom_registers *reg,
323 int *interrupt)
324{
325 unsigned int hi, lo, irq;
326 int i;
327
328 if (!ebus->num_ebus_intmap)
329 return 0;
330
331 hi = reg->which_io & ebus->ebus_intmask.phys_hi;
332 lo = reg->phys_addr & ebus->ebus_intmask.phys_lo;
333 irq = *interrupt & ebus->ebus_intmask.interrupt;
334 for (i = 0; i < ebus->num_ebus_intmap; i++) {
335 if ((ebus->ebus_intmap[i].phys_hi == hi) &&
336 (ebus->ebus_intmap[i].phys_lo == lo) &&
337 (ebus->ebus_intmap[i].interrupt == irq)) {
338 *interrupt = ebus->ebus_intmap[i].cinterrupt;
339 return 0;
340 }
341 }
342 return -1;
343}
344
345void __init fill_ebus_child(int node, struct linux_prom_registers *preg,
346 struct linux_ebus_child *dev, int non_standard_regs)
347{
348 int regs[PROMREG_MAX];
349 int irqs[PROMREG_MAX];
350 int i, len;
351
352 dev->prom_node = node;
353 prom_getstring(node, "name", dev->prom_name, sizeof(dev->prom_name));
354 printk(" (%s)", dev->prom_name);
355
356 len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
357 dev->num_addrs = len / sizeof(regs[0]);
358
359 if (non_standard_regs) {
360 /* This is to handle reg properties which are not
361 * in the parent relative format. One example are
362 * children of the i2c device on CompactPCI systems.
363 *
364 * So, for such devices we just record the property
365 * raw in the child resources.
366 */
367 for (i = 0; i < dev->num_addrs; i++)
368 dev->resource[i].start = regs[i];
369 } else {
370 for (i = 0; i < dev->num_addrs; i++) {
371 int rnum = regs[i];
372 if (rnum >= dev->parent->num_addrs) {
373 prom_printf("UGH: property for %s was %d, need < %d\n",
374 dev->prom_name, len, dev->parent->num_addrs);
375 panic(__FUNCTION__);
376 }
377 dev->resource[i].start = dev->parent->resource[i].start;
378 dev->resource[i].end = dev->parent->resource[i].end;
379 dev->resource[i].flags = IORESOURCE_MEM;
380 dev->resource[i].name = dev->prom_name;
381 }
382 }
383
384 for (i = 0; i < PROMINTR_MAX; i++)
385 dev->irqs[i] = PCI_IRQ_NONE;
386
387 len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs));
388 if ((len == -1) || (len == 0)) {
389 dev->num_irqs = 0;
390 /*
391 * Oh, well, some PROMs don't export interrupts
392 * property to children of EBus devices...
393 *
394 * Be smart about PS/2 keyboard and mouse.
395 */
396 if (!strcmp(dev->parent->prom_name, "8042")) {
397 if (!strcmp(dev->prom_name, "kb_ps2")) {
398 dev->num_irqs = 1;
399 dev->irqs[0] = dev->parent->irqs[0];
400 } else {
401 dev->num_irqs = 1;
402 dev->irqs[0] = dev->parent->irqs[1];
403 }
404 }
405 } else {
406 dev->num_irqs = len / sizeof(irqs[0]);
407 for (i = 0; i < dev->num_irqs; i++) {
408 struct pci_pbm_info *pbm = dev->bus->parent;
409 struct pci_controller_info *p = pbm->parent;
410
411 if (ebus_intmap_match(dev->bus, preg, &irqs[i]) != -1) {
412 dev->irqs[i] = p->irq_build(pbm,
413 dev->bus->self,
414 irqs[i]);
415 } else {
416 /* If we get a bogus interrupt property, just
417 * record the raw value instead of punting.
418 */
419 dev->irqs[i] = irqs[i];
420 }
421 }
422 }
423}
424
425static int __init child_regs_nonstandard(struct linux_ebus_device *dev)
426{
427 if (!strcmp(dev->prom_name, "i2c") ||
428 !strcmp(dev->prom_name, "SUNW,lombus"))
429 return 1;
430 return 0;
431}
432
433void __init fill_ebus_device(int node, struct linux_ebus_device *dev)
434{
435 struct linux_prom_registers regs[PROMREG_MAX];
436 struct linux_ebus_child *child;
437 int irqs[PROMINTR_MAX];
438 int i, n, len;
439
440 dev->prom_node = node;
441 prom_getstring(node, "name", dev->prom_name, sizeof(dev->prom_name));
442 printk(" [%s", dev->prom_name);
443
444 len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
445 if (len == -1) {
446 dev->num_addrs = 0;
447 goto probe_interrupts;
448 }
449
450 if (len % sizeof(struct linux_prom_registers)) {
451 prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
452 dev->prom_name, len,
453 (int)sizeof(struct linux_prom_registers));
454 prom_halt();
455 }
456 dev->num_addrs = len / sizeof(struct linux_prom_registers);
457
458 for (i = 0; i < dev->num_addrs; i++) {
459 /* XXX Learn how to interpret ebus ranges... -DaveM */
460 if (regs[i].which_io >= 0x10)
461 n = (regs[i].which_io - 0x10) >> 2;
462 else
463 n = regs[i].which_io;
464
465 dev->resource[i].start = dev->bus->self->resource[n].start;
466 dev->resource[i].start += (unsigned long)regs[i].phys_addr;
467 dev->resource[i].end =
468 (dev->resource[i].start + (unsigned long)regs[i].reg_size - 1UL);
469 dev->resource[i].flags = IORESOURCE_MEM;
470 dev->resource[i].name = dev->prom_name;
471 request_resource(&dev->bus->self->resource[n],
472 &dev->resource[i]);
473 }
474
475probe_interrupts:
476 for (i = 0; i < PROMINTR_MAX; i++)
477 dev->irqs[i] = PCI_IRQ_NONE;
478
479 len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs));
480 if ((len == -1) || (len == 0)) {
481 dev->num_irqs = 0;
482 } else {
483 dev->num_irqs = len / sizeof(irqs[0]);
484 for (i = 0; i < dev->num_irqs; i++) {
485 struct pci_pbm_info *pbm = dev->bus->parent;
486 struct pci_controller_info *p = pbm->parent;
487
488 if (ebus_intmap_match(dev->bus, &regs[0], &irqs[i]) != -1) {
489 dev->irqs[i] = p->irq_build(pbm,
490 dev->bus->self,
491 irqs[i]);
492 } else {
493 /* If we get a bogus interrupt property, just
494 * record the raw value instead of punting.
495 */
496 dev->irqs[i] = irqs[i];
497 }
498 }
499 }
500
501 if ((node = prom_getchild(node))) {
502 printk(" ->");
503 dev->children = ebus_alloc(sizeof(struct linux_ebus_child));
504
505 child = dev->children;
506 child->next = NULL;
507 child->parent = dev;
508 child->bus = dev->bus;
509 fill_ebus_child(node, &regs[0],
510 child, child_regs_nonstandard(dev));
511
512 while ((node = prom_getsibling(node)) != 0) {
513 child->next = ebus_alloc(sizeof(struct linux_ebus_child));
514
515 child = child->next;
516 child->next = NULL;
517 child->parent = dev;
518 child->bus = dev->bus;
519 fill_ebus_child(node, &regs[0],
520 child, child_regs_nonstandard(dev));
521 }
522 }
523 printk("]");
524}
525
526static struct pci_dev *find_next_ebus(struct pci_dev *start, int *is_rio_p)
527{
528 struct pci_dev *pdev = start;
529
530 do {
531 pdev = pci_find_device(PCI_VENDOR_ID_SUN, PCI_ANY_ID, pdev);
532 if (pdev &&
533 (pdev->device == PCI_DEVICE_ID_SUN_EBUS ||
534 pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS))
535 break;
536 } while (pdev != NULL);
537
538 if (pdev && (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS))
539 *is_rio_p = 1;
540 else
541 *is_rio_p = 0;
542
543 return pdev;
544}
545
546void __init ebus_init(void)
547{
548 struct pci_pbm_info *pbm;
549 struct linux_ebus_device *dev;
550 struct linux_ebus *ebus;
551 struct pci_dev *pdev;
552 struct pcidev_cookie *cookie;
553 int nd, ebusnd, is_rio;
554 int num_ebus = 0;
555
556 pdev = find_next_ebus(NULL, &is_rio);
557 if (!pdev) {
558 printk("ebus: No EBus's found.\n");
559 return;
560 }
561
562 cookie = pdev->sysdata;
563 ebusnd = cookie->prom_node;
564
565 ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus));
566 ebus->next = NULL;
567 ebus->is_rio = is_rio;
568
569 while (ebusnd) {
570 /* SUNW,pci-qfe uses four empty ebuses on it.
571 I think we should not consider them here,
572 as they have half of the properties this
573 code expects and once we do PCI hot-plug,
574 we'd have to tweak with the ebus_chain
575 in the runtime after initialization. -jj */
576 if (!prom_getchild (ebusnd)) {
577 pdev = find_next_ebus(pdev, &is_rio);
578 if (!pdev) {
579 if (ebus == ebus_chain) {
580 ebus_chain = NULL;
581 printk("ebus: No EBus's found.\n");
582 return;
583 }
584 break;
585 }
586 ebus->is_rio = is_rio;
587 cookie = pdev->sysdata;
588 ebusnd = cookie->prom_node;
589 continue;
590 }
591 printk("ebus%d:", num_ebus);
592
593 prom_getstring(ebusnd, "name", ebus->prom_name, sizeof(ebus->prom_name));
594 ebus->index = num_ebus;
595 ebus->prom_node = ebusnd;
596 ebus->self = pdev;
597 ebus->parent = pbm = cookie->pbm;
598
599 ebus_ranges_init(ebus);
600 ebus_intmap_init(ebus);
601
602 nd = prom_getchild(ebusnd);
603 if (!nd)
604 goto next_ebus;
605
606 ebus->devices = ebus_alloc(sizeof(struct linux_ebus_device));
607
608 dev = ebus->devices;
609 dev->next = NULL;
610 dev->children = NULL;
611 dev->bus = ebus;
612 fill_ebus_device(nd, dev);
613
614 while ((nd = prom_getsibling(nd)) != 0) {
615 dev->next = ebus_alloc(sizeof(struct linux_ebus_device));
616
617 dev = dev->next;
618 dev->next = NULL;
619 dev->children = NULL;
620 dev->bus = ebus;
621 fill_ebus_device(nd, dev);
622 }
623
624 next_ebus:
625 printk("\n");
626
627 pdev = find_next_ebus(pdev, &is_rio);
628 if (!pdev)
629 break;
630
631 cookie = pdev->sysdata;
632 ebusnd = cookie->prom_node;
633
634 ebus->next = ebus_alloc(sizeof(struct linux_ebus));
635 ebus = ebus->next;
636 ebus->next = NULL;
637 ebus->is_rio = is_rio;
638 ++num_ebus;
639 }
640
641#ifdef CONFIG_SUN_AUXIO
642 auxio_probe();
643#endif
644}
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
new file mode 100644
index 000000000000..a47f2d0b1a29
--- /dev/null
+++ b/arch/sparc64/kernel/entry.S
@@ -0,0 +1,1919 @@
1/* $Id: entry.S,v 1.144 2002/02/09 19:49:30 davem Exp $
2 * arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
9
10#include <linux/config.h>
11#include <linux/errno.h>
12
13#include <asm/head.h>
14#include <asm/asi.h>
15#include <asm/smp.h>
16#include <asm/ptrace.h>
17#include <asm/page.h>
18#include <asm/signal.h>
19#include <asm/pgtable.h>
20#include <asm/processor.h>
21#include <asm/visasm.h>
22#include <asm/estate.h>
23#include <asm/auxio.h>
24
25/* #define SYSCALL_TRACING 1 */
26
27#define curptr g6
28
29#define NR_SYSCALLS 284 /* Each OS is different... */
30
31 .text
32 .align 32
33
34 .globl sparc64_vpte_patchme1
35 .globl sparc64_vpte_patchme2
36/*
37 * On a second level vpte miss, check whether the original fault is to the OBP
38 * range (note that this is only possible for instruction miss, data misses to
39 * obp range do not use vpte). If so, go back directly to the faulting address.
40 * This is because we want to read the tpc, otherwise we have no way of knowing
41 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
42 * also ensures no vpte range addresses are dropped into tlb while obp is
43 * executing (see inherit_locked_prom_mappings() rant).
44 */
45sparc64_vpte_nucleus:
46 /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
47 mov 0xf, %g5
48 sllx %g5, 28, %g5
49
50 /* Is addr >= LOW_OBP_ADDRESS? */
51 cmp %g4, %g5
52 blu,pn %xcc, sparc64_vpte_patchme1
53 mov 0x1, %g5
54
55 /* Load 0x100000000, which is HI_OBP_ADDRESS. */
56 sllx %g5, 32, %g5
57
58 /* Is addr < HI_OBP_ADDRESS? */
59 cmp %g4, %g5
60 blu,pn %xcc, obp_iaddr_patch
61 nop
62
63 /* These two instructions are patched by paginig_init(). */
64sparc64_vpte_patchme1:
65 sethi %hi(0), %g5
66sparc64_vpte_patchme2:
67 or %g5, %lo(0), %g5
68
69 /* With kernel PGD in %g5, branch back into dtlb_backend. */
70 ba,pt %xcc, sparc64_kpte_continue
71 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
72
73vpte_noent:
74 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
75 * skip over the trap instruction so that the top level
76 * TLB miss handler will thing this %g5 value is just an
77 * invalid PTE, thus branching to full fault processing.
78 */
79 mov TLB_SFSR, %g1
80 stxa %g4, [%g1 + %g1] ASI_DMMU
81 done
82
83 .globl obp_iaddr_patch
84obp_iaddr_patch:
85 /* These two instructions patched by inherit_prom_mappings(). */
86 sethi %hi(0), %g5
87 or %g5, %lo(0), %g5
88
89 /* Behave as if we are at TL0. */
90 wrpr %g0, 1, %tl
91 rdpr %tpc, %g4 /* Find original faulting iaddr */
92 srlx %g4, 13, %g4 /* Throw out context bits */
93 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
94
95 /* Restore previous TAG_ACCESS. */
96 mov TLB_SFSR, %g1
97 stxa %g4, [%g1 + %g1] ASI_IMMU
98
99 /* Get PMD offset. */
100 srlx %g4, 23, %g6
101 and %g6, 0x7ff, %g6
102 sllx %g6, 2, %g6
103
104 /* Load PMD, is it valid? */
105 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
106 brz,pn %g5, longpath
107 sllx %g5, 11, %g5
108
109 /* Get PTE offset. */
110 srlx %g4, 13, %g6
111 and %g6, 0x3ff, %g6
112 sllx %g6, 3, %g6
113
114 /* Load PTE. */
115 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
116 brgez,pn %g5, longpath
117 nop
118
119 /* TLB load and return from trap. */
120 stxa %g5, [%g0] ASI_ITLB_DATA_IN
121 retry
122
123 .globl obp_daddr_patch
124obp_daddr_patch:
125 /* These two instructions patched by inherit_prom_mappings(). */
126 sethi %hi(0), %g5
127 or %g5, %lo(0), %g5
128
129 /* Get PMD offset. */
130 srlx %g4, 23, %g6
131 and %g6, 0x7ff, %g6
132 sllx %g6, 2, %g6
133
134 /* Load PMD, is it valid? */
135 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
136 brz,pn %g5, longpath
137 sllx %g5, 11, %g5
138
139 /* Get PTE offset. */
140 srlx %g4, 13, %g6
141 and %g6, 0x3ff, %g6
142 sllx %g6, 3, %g6
143
144 /* Load PTE. */
145 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
146 brgez,pn %g5, longpath
147 nop
148
149 /* TLB load and return from trap. */
150 stxa %g5, [%g0] ASI_DTLB_DATA_IN
151 retry
152
153/*
154 * On a first level data miss, check whether this is to the OBP range (note
155 * that such accesses can be made by prom, as well as by kernel using
156 * prom_getproperty on "address"), and if so, do not use vpte access ...
157 * rather, use information saved during inherit_prom_mappings() using 8k
158 * pagesize.
159 */
160kvmap:
161 /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
162 mov 0xf, %g5
163 sllx %g5, 28, %g5
164
165 /* Is addr >= LOW_OBP_ADDRESS? */
166 cmp %g4, %g5
167 blu,pn %xcc, vmalloc_addr
168 mov 0x1, %g5
169
170 /* Load 0x100000000, which is HI_OBP_ADDRESS. */
171 sllx %g5, 32, %g5
172
173 /* Is addr < HI_OBP_ADDRESS? */
174 cmp %g4, %g5
175 blu,pn %xcc, obp_daddr_patch
176 nop
177
178vmalloc_addr:
179 /* If we get here, a vmalloc addr accessed, load kernel VPTE. */
180 ldxa [%g3 + %g6] ASI_N, %g5
181 brgez,pn %g5, longpath
182 nop
183
184 /* PTE is valid, load into TLB and return from trap. */
185 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
186 retry
187
188 /* This is trivial with the new code... */
189 .globl do_fpdis
190do_fpdis:
191 sethi %hi(TSTATE_PEF), %g4 ! IEU0
192 rdpr %tstate, %g5
193 andcc %g5, %g4, %g0
194 be,pt %xcc, 1f
195 nop
196 rd %fprs, %g5
197 andcc %g5, FPRS_FEF, %g0
198 be,pt %xcc, 1f
199 nop
200
201 /* Legal state when DCR_IFPOE is set in Cheetah %dcr. */
202 sethi %hi(109f), %g7
203 ba,pt %xcc, etrap
204109: or %g7, %lo(109b), %g7
205 add %g0, %g0, %g0
206 ba,a,pt %xcc, rtrap_clr_l6
207
2081: ldub [%g6 + TI_FPSAVED], %g5 ! Load Group
209 wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles
210 andcc %g5, FPRS_FEF, %g0 ! IEU1 Group
211 be,a,pt %icc, 1f ! CTI
212 clr %g7 ! IEU0
213 ldx [%g6 + TI_GSR], %g7 ! Load Group
2141: andcc %g5, FPRS_DL, %g0 ! IEU1
215 bne,pn %icc, 2f ! CTI
216 fzero %f0 ! FPA
217 andcc %g5, FPRS_DU, %g0 ! IEU1 Group
218 bne,pn %icc, 1f ! CTI
219 fzero %f2 ! FPA
220 faddd %f0, %f2, %f4
221 fmuld %f0, %f2, %f6
222 faddd %f0, %f2, %f8
223 fmuld %f0, %f2, %f10
224 faddd %f0, %f2, %f12
225 fmuld %f0, %f2, %f14
226 faddd %f0, %f2, %f16
227 fmuld %f0, %f2, %f18
228 faddd %f0, %f2, %f20
229 fmuld %f0, %f2, %f22
230 faddd %f0, %f2, %f24
231 fmuld %f0, %f2, %f26
232 faddd %f0, %f2, %f28
233 fmuld %f0, %f2, %f30
234 faddd %f0, %f2, %f32
235 fmuld %f0, %f2, %f34
236 faddd %f0, %f2, %f36
237 fmuld %f0, %f2, %f38
238 faddd %f0, %f2, %f40
239 fmuld %f0, %f2, %f42
240 faddd %f0, %f2, %f44
241 fmuld %f0, %f2, %f46
242 faddd %f0, %f2, %f48
243 fmuld %f0, %f2, %f50
244 faddd %f0, %f2, %f52
245 fmuld %f0, %f2, %f54
246 faddd %f0, %f2, %f56
247 fmuld %f0, %f2, %f58
248 b,pt %xcc, fpdis_exit2
249 faddd %f0, %f2, %f60
2501: mov SECONDARY_CONTEXT, %g3
251 add %g6, TI_FPREGS + 0x80, %g1
252 faddd %f0, %f2, %f4
253 fmuld %f0, %f2, %f6
254 ldxa [%g3] ASI_DMMU, %g5
255cplus_fptrap_insn_1:
256 sethi %hi(0), %g2
257 stxa %g2, [%g3] ASI_DMMU
258 membar #Sync
259 add %g6, TI_FPREGS + 0xc0, %g2
260 faddd %f0, %f2, %f8
261 fmuld %f0, %f2, %f10
262 ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-(
263 ldda [%g2] ASI_BLK_S, %f48
264 faddd %f0, %f2, %f12
265 fmuld %f0, %f2, %f14
266 faddd %f0, %f2, %f16
267 fmuld %f0, %f2, %f18
268 faddd %f0, %f2, %f20
269 fmuld %f0, %f2, %f22
270 faddd %f0, %f2, %f24
271 fmuld %f0, %f2, %f26
272 faddd %f0, %f2, %f28
273 fmuld %f0, %f2, %f30
274 b,pt %xcc, fpdis_exit
275 membar #Sync
2762: andcc %g5, FPRS_DU, %g0
277 bne,pt %icc, 3f
278 fzero %f32
279 mov SECONDARY_CONTEXT, %g3
280 fzero %f34
281 ldxa [%g3] ASI_DMMU, %g5
282 add %g6, TI_FPREGS, %g1
283cplus_fptrap_insn_2:
284 sethi %hi(0), %g2
285 stxa %g2, [%g3] ASI_DMMU
286 membar #Sync
287 add %g6, TI_FPREGS + 0x40, %g2
288 faddd %f32, %f34, %f36
289 fmuld %f32, %f34, %f38
290 ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
291 ldda [%g2] ASI_BLK_S, %f16
292 faddd %f32, %f34, %f40
293 fmuld %f32, %f34, %f42
294 faddd %f32, %f34, %f44
295 fmuld %f32, %f34, %f46
296 faddd %f32, %f34, %f48
297 fmuld %f32, %f34, %f50
298 faddd %f32, %f34, %f52
299 fmuld %f32, %f34, %f54
300 faddd %f32, %f34, %f56
301 fmuld %f32, %f34, %f58
302 faddd %f32, %f34, %f60
303 fmuld %f32, %f34, %f62
304 ba,pt %xcc, fpdis_exit
305 membar #Sync
3063: mov SECONDARY_CONTEXT, %g3
307 add %g6, TI_FPREGS, %g1
308 ldxa [%g3] ASI_DMMU, %g5
309cplus_fptrap_insn_3:
310 sethi %hi(0), %g2
311 stxa %g2, [%g3] ASI_DMMU
312 membar #Sync
313 mov 0x40, %g2
314 ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
315 ldda [%g1 + %g2] ASI_BLK_S, %f16
316 add %g1, 0x80, %g1
317 ldda [%g1] ASI_BLK_S, %f32
318 ldda [%g1 + %g2] ASI_BLK_S, %f48
319 membar #Sync
320fpdis_exit:
321 stxa %g5, [%g3] ASI_DMMU
322 membar #Sync
323fpdis_exit2:
324 wr %g7, 0, %gsr
325 ldx [%g6 + TI_XFSR], %fsr
326 rdpr %tstate, %g3
327 or %g3, %g4, %g3 ! anal...
328 wrpr %g3, %tstate
329 wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits
330 retry
331
332 .align 32
333fp_other_bounce:
334 call do_fpother
335 add %sp, PTREGS_OFF, %o0
336 ba,pt %xcc, rtrap
337 clr %l6
338
339 .globl do_fpother_check_fitos
340 .align 32
341do_fpother_check_fitos:
342 sethi %hi(fp_other_bounce - 4), %g7
343 or %g7, %lo(fp_other_bounce - 4), %g7
344
345 /* NOTE: Need to preserve %g7 until we fully commit
346 * to the fitos fixup.
347 */
348 stx %fsr, [%g6 + TI_XFSR]
349 rdpr %tstate, %g3
350 andcc %g3, TSTATE_PRIV, %g0
351 bne,pn %xcc, do_fptrap_after_fsr
352 nop
353 ldx [%g6 + TI_XFSR], %g3
354 srlx %g3, 14, %g1
355 and %g1, 7, %g1
356 cmp %g1, 2 ! Unfinished FP-OP
357 bne,pn %xcc, do_fptrap_after_fsr
358 sethi %hi(1 << 23), %g1 ! Inexact
359 andcc %g3, %g1, %g0
360 bne,pn %xcc, do_fptrap_after_fsr
361 rdpr %tpc, %g1
362 lduwa [%g1] ASI_AIUP, %g3 ! This cannot ever fail
363#define FITOS_MASK 0xc1f83fe0
364#define FITOS_COMPARE 0x81a01880
365 sethi %hi(FITOS_MASK), %g1
366 or %g1, %lo(FITOS_MASK), %g1
367 and %g3, %g1, %g1
368 sethi %hi(FITOS_COMPARE), %g2
369 or %g2, %lo(FITOS_COMPARE), %g2
370 cmp %g1, %g2
371 bne,pn %xcc, do_fptrap_after_fsr
372 nop
373 std %f62, [%g6 + TI_FPREGS + (62 * 4)]
374 sethi %hi(fitos_table_1), %g1
375 and %g3, 0x1f, %g2
376 or %g1, %lo(fitos_table_1), %g1
377 sllx %g2, 2, %g2
378 jmpl %g1 + %g2, %g0
379 ba,pt %xcc, fitos_emul_continue
380
381fitos_table_1:
382 fitod %f0, %f62
383 fitod %f1, %f62
384 fitod %f2, %f62
385 fitod %f3, %f62
386 fitod %f4, %f62
387 fitod %f5, %f62
388 fitod %f6, %f62
389 fitod %f7, %f62
390 fitod %f8, %f62
391 fitod %f9, %f62
392 fitod %f10, %f62
393 fitod %f11, %f62
394 fitod %f12, %f62
395 fitod %f13, %f62
396 fitod %f14, %f62
397 fitod %f15, %f62
398 fitod %f16, %f62
399 fitod %f17, %f62
400 fitod %f18, %f62
401 fitod %f19, %f62
402 fitod %f20, %f62
403 fitod %f21, %f62
404 fitod %f22, %f62
405 fitod %f23, %f62
406 fitod %f24, %f62
407 fitod %f25, %f62
408 fitod %f26, %f62
409 fitod %f27, %f62
410 fitod %f28, %f62
411 fitod %f29, %f62
412 fitod %f30, %f62
413 fitod %f31, %f62
414
415fitos_emul_continue:
416 sethi %hi(fitos_table_2), %g1
417 srl %g3, 25, %g2
418 or %g1, %lo(fitos_table_2), %g1
419 and %g2, 0x1f, %g2
420 sllx %g2, 2, %g2
421 jmpl %g1 + %g2, %g0
422 ba,pt %xcc, fitos_emul_fini
423
424fitos_table_2:
425 fdtos %f62, %f0
426 fdtos %f62, %f1
427 fdtos %f62, %f2
428 fdtos %f62, %f3
429 fdtos %f62, %f4
430 fdtos %f62, %f5
431 fdtos %f62, %f6
432 fdtos %f62, %f7
433 fdtos %f62, %f8
434 fdtos %f62, %f9
435 fdtos %f62, %f10
436 fdtos %f62, %f11
437 fdtos %f62, %f12
438 fdtos %f62, %f13
439 fdtos %f62, %f14
440 fdtos %f62, %f15
441 fdtos %f62, %f16
442 fdtos %f62, %f17
443 fdtos %f62, %f18
444 fdtos %f62, %f19
445 fdtos %f62, %f20
446 fdtos %f62, %f21
447 fdtos %f62, %f22
448 fdtos %f62, %f23
449 fdtos %f62, %f24
450 fdtos %f62, %f25
451 fdtos %f62, %f26
452 fdtos %f62, %f27
453 fdtos %f62, %f28
454 fdtos %f62, %f29
455 fdtos %f62, %f30
456 fdtos %f62, %f31
457
458fitos_emul_fini:
459 ldd [%g6 + TI_FPREGS + (62 * 4)], %f62
460 done
461
462 .globl do_fptrap
463 .align 32
464do_fptrap:
465 stx %fsr, [%g6 + TI_XFSR]
466do_fptrap_after_fsr:
467 ldub [%g6 + TI_FPSAVED], %g3
468 rd %fprs, %g1
469 or %g3, %g1, %g3
470 stb %g3, [%g6 + TI_FPSAVED]
471 rd %gsr, %g3
472 stx %g3, [%g6 + TI_GSR]
473 mov SECONDARY_CONTEXT, %g3
474 ldxa [%g3] ASI_DMMU, %g5
475cplus_fptrap_insn_4:
476 sethi %hi(0), %g2
477 stxa %g2, [%g3] ASI_DMMU
478 membar #Sync
479 add %g6, TI_FPREGS, %g2
480 andcc %g1, FPRS_DL, %g0
481 be,pn %icc, 4f
482 mov 0x40, %g3
483 stda %f0, [%g2] ASI_BLK_S
484 stda %f16, [%g2 + %g3] ASI_BLK_S
485 andcc %g1, FPRS_DU, %g0
486 be,pn %icc, 5f
4874: add %g2, 128, %g2
488 stda %f32, [%g2] ASI_BLK_S
489 stda %f48, [%g2 + %g3] ASI_BLK_S
4905: mov SECONDARY_CONTEXT, %g1
491 membar #Sync
492 stxa %g5, [%g1] ASI_DMMU
493 membar #Sync
494 ba,pt %xcc, etrap
495 wr %g0, 0, %fprs
496
497cplus_fptrap_1:
498 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
499
500 .globl cheetah_plus_patch_fpdis
501cheetah_plus_patch_fpdis:
502 /* We configure the dTLB512_0 for 4MB pages and the
503 * dTLB512_1 for 8K pages when in context zero.
504 */
505 sethi %hi(cplus_fptrap_1), %o0
506 lduw [%o0 + %lo(cplus_fptrap_1)], %o1
507
508 set cplus_fptrap_insn_1, %o2
509 stw %o1, [%o2]
510 flush %o2
511 set cplus_fptrap_insn_2, %o2
512 stw %o1, [%o2]
513 flush %o2
514 set cplus_fptrap_insn_3, %o2
515 stw %o1, [%o2]
516 flush %o2
517 set cplus_fptrap_insn_4, %o2
518 stw %o1, [%o2]
519 flush %o2
520
521 retl
522 nop
523
524 /* The registers for cross calls will be:
525 *
526 * DATA 0: [low 32-bits] Address of function to call, jmp to this
527 * [high 32-bits] MMU Context Argument 0, place in %g5
528 * DATA 1: Address Argument 1, place in %g6
529 * DATA 2: Address Argument 2, place in %g7
530 *
531 * With this method we can do most of the cross-call tlb/cache
532 * flushing very quickly.
533 *
534 * Current CPU's IRQ worklist table is locked into %g1,
535 * don't touch.
536 */
537 .text
538 .align 32
539 .globl do_ivec
540do_ivec:
541 mov 0x40, %g3
542 ldxa [%g3 + %g0] ASI_INTR_R, %g3
543 sethi %hi(KERNBASE), %g4
544 cmp %g3, %g4
545 bgeu,pn %xcc, do_ivec_xcall
546 srlx %g3, 32, %g5
547 stxa %g0, [%g0] ASI_INTR_RECEIVE
548 membar #Sync
549
550 sethi %hi(ivector_table), %g2
551 sllx %g3, 5, %g3
552 or %g2, %lo(ivector_table), %g2
553 add %g2, %g3, %g3
554 ldx [%g3 + 0x08], %g2 /* irq_info */
555 ldub [%g3 + 0x04], %g4 /* pil */
556 brz,pn %g2, do_ivec_spurious
557 mov 1, %g2
558
559 sllx %g2, %g4, %g2
560 sllx %g4, 2, %g4
561 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */
562 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */
563 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */
564 wr %g2, 0x0, %set_softint
565 retry
566do_ivec_xcall:
567 mov 0x50, %g1
568
569 ldxa [%g1 + %g0] ASI_INTR_R, %g1
570 srl %g3, 0, %g3
571 mov 0x60, %g7
572 ldxa [%g7 + %g0] ASI_INTR_R, %g7
573 stxa %g0, [%g0] ASI_INTR_RECEIVE
574 membar #Sync
575 ba,pt %xcc, 1f
576 nop
577
578 .align 32
5791: jmpl %g3, %g0
580 nop
581
582do_ivec_spurious:
583 stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */
584 rdpr %pstate, %g5
585
586 wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate
587 sethi %hi(109f), %g7
588 ba,pt %xcc, etrap
589109: or %g7, %lo(109b), %g7
590 call catch_disabled_ivec
591 add %sp, PTREGS_OFF, %o0
592 ba,pt %xcc, rtrap
593 clr %l6
594
595 .globl save_alternate_globals
596save_alternate_globals: /* %o0 = save_area */
597 rdpr %pstate, %o5
598 andn %o5, PSTATE_IE, %o1
599 wrpr %o1, PSTATE_AG, %pstate
600 stx %g0, [%o0 + 0x00]
601 stx %g1, [%o0 + 0x08]
602 stx %g2, [%o0 + 0x10]
603 stx %g3, [%o0 + 0x18]
604 stx %g4, [%o0 + 0x20]
605 stx %g5, [%o0 + 0x28]
606 stx %g6, [%o0 + 0x30]
607 stx %g7, [%o0 + 0x38]
608 wrpr %o1, PSTATE_IG, %pstate
609 stx %g0, [%o0 + 0x40]
610 stx %g1, [%o0 + 0x48]
611 stx %g2, [%o0 + 0x50]
612 stx %g3, [%o0 + 0x58]
613 stx %g4, [%o0 + 0x60]
614 stx %g5, [%o0 + 0x68]
615 stx %g6, [%o0 + 0x70]
616 stx %g7, [%o0 + 0x78]
617 wrpr %o1, PSTATE_MG, %pstate
618 stx %g0, [%o0 + 0x80]
619 stx %g1, [%o0 + 0x88]
620 stx %g2, [%o0 + 0x90]
621 stx %g3, [%o0 + 0x98]
622 stx %g4, [%o0 + 0xa0]
623 stx %g5, [%o0 + 0xa8]
624 stx %g6, [%o0 + 0xb0]
625 stx %g7, [%o0 + 0xb8]
626 wrpr %o5, 0x0, %pstate
627 retl
628 nop
629
630 .globl restore_alternate_globals
631restore_alternate_globals: /* %o0 = save_area */
632 rdpr %pstate, %o5
633 andn %o5, PSTATE_IE, %o1
634 wrpr %o1, PSTATE_AG, %pstate
635 ldx [%o0 + 0x00], %g0
636 ldx [%o0 + 0x08], %g1
637 ldx [%o0 + 0x10], %g2
638 ldx [%o0 + 0x18], %g3
639 ldx [%o0 + 0x20], %g4
640 ldx [%o0 + 0x28], %g5
641 ldx [%o0 + 0x30], %g6
642 ldx [%o0 + 0x38], %g7
643 wrpr %o1, PSTATE_IG, %pstate
644 ldx [%o0 + 0x40], %g0
645 ldx [%o0 + 0x48], %g1
646 ldx [%o0 + 0x50], %g2
647 ldx [%o0 + 0x58], %g3
648 ldx [%o0 + 0x60], %g4
649 ldx [%o0 + 0x68], %g5
650 ldx [%o0 + 0x70], %g6
651 ldx [%o0 + 0x78], %g7
652 wrpr %o1, PSTATE_MG, %pstate
653 ldx [%o0 + 0x80], %g0
654 ldx [%o0 + 0x88], %g1
655 ldx [%o0 + 0x90], %g2
656 ldx [%o0 + 0x98], %g3
657 ldx [%o0 + 0xa0], %g4
658 ldx [%o0 + 0xa8], %g5
659 ldx [%o0 + 0xb0], %g6
660 ldx [%o0 + 0xb8], %g7
661 wrpr %o5, 0x0, %pstate
662 retl
663 nop
664
665 .globl getcc, setcc
666getcc:
667 ldx [%o0 + PT_V9_TSTATE], %o1
668 srlx %o1, 32, %o1
669 and %o1, 0xf, %o1
670 retl
671 stx %o1, [%o0 + PT_V9_G1]
672setcc:
673 ldx [%o0 + PT_V9_TSTATE], %o1
674 ldx [%o0 + PT_V9_G1], %o2
675 or %g0, %ulo(TSTATE_ICC), %o3
676 sllx %o3, 32, %o3
677 andn %o1, %o3, %o1
678 sllx %o2, 32, %o2
679 and %o2, %o3, %o2
680 or %o1, %o2, %o1
681 retl
682 stx %o1, [%o0 + PT_V9_TSTATE]
683
684 .globl utrap, utrap_ill
685utrap: brz,pn %g1, etrap
686 nop
687 save %sp, -128, %sp
688 rdpr %tstate, %l6
689 rdpr %cwp, %l7
690 andn %l6, TSTATE_CWP, %l6
691 wrpr %l6, %l7, %tstate
692 rdpr %tpc, %l6
693 rdpr %tnpc, %l7
694 wrpr %g1, 0, %tnpc
695 done
696utrap_ill:
697 call bad_trap
698 add %sp, PTREGS_OFF, %o0
699 ba,pt %xcc, rtrap
700 clr %l6
701
702#ifdef CONFIG_BLK_DEV_FD
703 .globl floppy_hardint
704floppy_hardint:
705 wr %g0, (1 << 11), %clear_softint
706 sethi %hi(doing_pdma), %g1
707 ld [%g1 + %lo(doing_pdma)], %g2
708 brz,pn %g2, floppy_dosoftint
709 sethi %hi(fdc_status), %g3
710 ldx [%g3 + %lo(fdc_status)], %g3
711 sethi %hi(pdma_vaddr), %g5
712 ldx [%g5 + %lo(pdma_vaddr)], %g4
713 sethi %hi(pdma_size), %g5
714 ldx [%g5 + %lo(pdma_size)], %g5
715
716next_byte:
717 lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7
718 andcc %g7, 0x80, %g0
719 be,pn %icc, floppy_fifo_emptied
720 andcc %g7, 0x20, %g0
721 be,pn %icc, floppy_overrun
722 andcc %g7, 0x40, %g0
723 be,pn %icc, floppy_write
724 sub %g5, 1, %g5
725
726 inc %g3
727 lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7
728 dec %g3
729 orcc %g0, %g5, %g0
730 stb %g7, [%g4]
731 bne,pn %xcc, next_byte
732 add %g4, 1, %g4
733
734 b,pt %xcc, floppy_tdone
735 nop
736
737floppy_write:
738 ldub [%g4], %g7
739 orcc %g0, %g5, %g0
740 inc %g3
741 stba %g7, [%g3] ASI_PHYS_BYPASS_EC_E
742 dec %g3
743 bne,pn %xcc, next_byte
744 add %g4, 1, %g4
745
746floppy_tdone:
747 sethi %hi(pdma_vaddr), %g1
748 stx %g4, [%g1 + %lo(pdma_vaddr)]
749 sethi %hi(pdma_size), %g1
750 stx %g5, [%g1 + %lo(pdma_size)]
751 sethi %hi(auxio_register), %g1
752 ldx [%g1 + %lo(auxio_register)], %g7
753 lduba [%g7] ASI_PHYS_BYPASS_EC_E, %g5
754 or %g5, AUXIO_AUX1_FTCNT, %g5
755/* andn %g5, AUXIO_AUX1_MASK, %g5 */
756 stba %g5, [%g7] ASI_PHYS_BYPASS_EC_E
757 andn %g5, AUXIO_AUX1_FTCNT, %g5
758/* andn %g5, AUXIO_AUX1_MASK, %g5 */
759
760 nop; nop; nop; nop; nop; nop;
761 nop; nop; nop; nop; nop; nop;
762
763 stba %g5, [%g7] ASI_PHYS_BYPASS_EC_E
764 sethi %hi(doing_pdma), %g1
765 b,pt %xcc, floppy_dosoftint
766 st %g0, [%g1 + %lo(doing_pdma)]
767
768floppy_fifo_emptied:
769 sethi %hi(pdma_vaddr), %g1
770 stx %g4, [%g1 + %lo(pdma_vaddr)]
771 sethi %hi(pdma_size), %g1
772 stx %g5, [%g1 + %lo(pdma_size)]
773 sethi %hi(irq_action), %g1
774 or %g1, %lo(irq_action), %g1
775 ldx [%g1 + (11 << 3)], %g3 ! irqaction[floppy_irq]
776 ldx [%g3 + 0x08], %g4 ! action->flags>>48==ino
777 sethi %hi(ivector_table), %g3
778 srlx %g4, 48, %g4
779 or %g3, %lo(ivector_table), %g3
780 sllx %g4, 5, %g4
781 ldx [%g3 + %g4], %g4 ! &ivector_table[ino]
782 ldx [%g4 + 0x10], %g4 ! bucket->iclr
783 stwa %g0, [%g4] ASI_PHYS_BYPASS_EC_E ! ICLR_IDLE
784 membar #Sync ! probably not needed...
785 retry
786
787floppy_overrun:
788 sethi %hi(pdma_vaddr), %g1
789 stx %g4, [%g1 + %lo(pdma_vaddr)]
790 sethi %hi(pdma_size), %g1
791 stx %g5, [%g1 + %lo(pdma_size)]
792 sethi %hi(doing_pdma), %g1
793 st %g0, [%g1 + %lo(doing_pdma)]
794
795floppy_dosoftint:
796 rdpr %pil, %g2
797 wrpr %g0, 15, %pil
798 sethi %hi(109f), %g7
799 b,pt %xcc, etrap_irq
800109: or %g7, %lo(109b), %g7
801
802 mov 11, %o0
803 mov 0, %o1
804 call sparc_floppy_irq
805 add %sp, PTREGS_OFF, %o2
806
807 b,pt %xcc, rtrap_irq
808 nop
809
810#endif /* CONFIG_BLK_DEV_FD */
811
812 /* XXX Here is stuff we still need to write... -DaveM XXX */
813 .globl netbsd_syscall
814netbsd_syscall:
815 retl
816 nop
817
818 /* These next few routines must be sure to clear the
819 * SFSR FaultValid bit so that the fast tlb data protection
820 * handler does not flush the wrong context and lock up the
821 * box.
822 */
823 .globl __do_data_access_exception
824 .globl __do_data_access_exception_tl1
825__do_data_access_exception_tl1:
826 rdpr %pstate, %g4
827 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
828 mov TLB_SFSR, %g3
829 mov DMMU_SFAR, %g5
830 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
831 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
832 stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
833 membar #Sync
834 ba,pt %xcc, winfix_dax
835 rdpr %tpc, %g3
836__do_data_access_exception:
837 rdpr %pstate, %g4
838 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
839 mov TLB_SFSR, %g3
840 mov DMMU_SFAR, %g5
841 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
842 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
843 stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
844 membar #Sync
845 sethi %hi(109f), %g7
846 ba,pt %xcc, etrap
847109: or %g7, %lo(109b), %g7
848 mov %l4, %o1
849 mov %l5, %o2
850 call data_access_exception
851 add %sp, PTREGS_OFF, %o0
852 ba,pt %xcc, rtrap
853 clr %l6
854
855 .globl __do_instruction_access_exception
856 .globl __do_instruction_access_exception_tl1
857__do_instruction_access_exception_tl1:
858 rdpr %pstate, %g4
859 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
860 mov TLB_SFSR, %g3
861 mov DMMU_SFAR, %g5
862 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
863 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
864 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
865 membar #Sync
866 sethi %hi(109f), %g7
867 ba,pt %xcc, etraptl1
868109: or %g7, %lo(109b), %g7
869 mov %l4, %o1
870 mov %l5, %o2
871 call instruction_access_exception_tl1
872 add %sp, PTREGS_OFF, %o0
873 ba,pt %xcc, rtrap
874 clr %l6
875
876__do_instruction_access_exception:
877 rdpr %pstate, %g4
878 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
879 mov TLB_SFSR, %g3
880 mov DMMU_SFAR, %g5
881 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
882 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
883 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
884 membar #Sync
885 sethi %hi(109f), %g7
886 ba,pt %xcc, etrap
887109: or %g7, %lo(109b), %g7
888 mov %l4, %o1
889 mov %l5, %o2
890 call instruction_access_exception
891 add %sp, PTREGS_OFF, %o0
892 ba,pt %xcc, rtrap
893 clr %l6
894
895 /* This is the trap handler entry point for ECC correctable
896 * errors. They are corrected, but we listen for the trap
897 * so that the event can be logged.
898 *
899 * Disrupting errors are either:
900 * 1) single-bit ECC errors during UDB reads to system
901 * memory
902 * 2) data parity errors during write-back events
903 *
904 * As far as I can make out from the manual, the CEE trap
905 * is only for correctable errors during memory read
906 * accesses by the front-end of the processor.
907 *
908 * The code below is only for trap level 1 CEE events,
909 * as it is the only situation where we can safely record
910 * and log. For trap level >1 we just clear the CE bit
911 * in the AFSR and return.
912 */
913
914 /* Our trap handling infrastructure allows us to preserve
915 * two 64-bit values during etrap for arguments to
916 * subsequent C code. Therefore we encode the information
917 * as follows:
918 *
919 * value 1) Full 64-bits of AFAR
920 * value 2) Low 33-bits of AFSR, then bits 33-->42
921 * are UDBL error status and bits 43-->52
922 * are UDBH error status
923 */
924 .align 64
925 .globl cee_trap
926cee_trap:
927 ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR
928 ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR
929 sllx %g1, 31, %g1 ! Clear reserved bits
930 srlx %g1, 31, %g1 ! in AFSR
931
932 /* NOTE: UltraSparc-I/II have high and low UDB error
933 * registers, corresponding to the two UDB units
934 * present on those chips. UltraSparc-IIi only
935 * has a single UDB, called "SDB" in the manual.
936 * For IIi the upper UDB register always reads
937 * as zero so for our purposes things will just
938 * work with the checks below.
939 */
940 ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status
941 andcc %g3, (1 << 8), %g4 ! Check CE bit
942 sllx %g3, (64 - 10), %g3 ! Clear reserved bits
943 srlx %g3, (64 - 10), %g3 ! in UDB-Low error status
944
945 sllx %g3, (33 + 0), %g3 ! Shift up to encoding area
946 or %g1, %g3, %g1 ! Or it in
947 be,pn %xcc, 1f ! Branch if CE bit was clear
948 nop
949 stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL
950 membar #Sync ! Synchronize ASI stores
9511: mov 0x18, %g5 ! Addr of UDB-High error status
952 ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it
953
954 andcc %g3, (1 << 8), %g4 ! Check CE bit
955 sllx %g3, (64 - 10), %g3 ! Clear reserved bits
956 srlx %g3, (64 - 10), %g3 ! in UDB-High error status
957 sllx %g3, (33 + 10), %g3 ! Shift up to encoding area
958 or %g1, %g3, %g1 ! Or it in
959 be,pn %xcc, 1f ! Branch if CE bit was clear
960 nop
961 nop
962
963 stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH
964 membar #Sync ! Synchronize ASI stores
9651: mov 1, %g5 ! AFSR CE bit is
966 sllx %g5, 20, %g5 ! bit 20
967 stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR
968 membar #Sync ! Synchronize ASI stores
969 sllx %g2, (64 - 41), %g2 ! Clear reserved bits
970 srlx %g2, (64 - 41), %g2 ! in latched AFAR
971
972 andn %g2, 0x0f, %g2 ! Finish resv bit clearing
973 mov %g1, %g4 ! Move AFSR+UDB* into save reg
974 mov %g2, %g5 ! Move AFAR into save reg
975 rdpr %pil, %g2
976 wrpr %g0, 15, %pil
977 ba,pt %xcc, etrap_irq
978 rd %pc, %g7
979 mov %l4, %o0
980
981 mov %l5, %o1
982 call cee_log
983 add %sp, PTREGS_OFF, %o2
984 ba,a,pt %xcc, rtrap_irq
985
986 /* Capture I/D/E-cache state into per-cpu error scoreboard.
987 *
988 * %g1: (TL>=0) ? 1 : 0
989 * %g2: scratch
990 * %g3: scratch
991 * %g4: AFSR
992 * %g5: AFAR
993 * %g6: current thread ptr
994 * %g7: scratch
995 */
996#define CHEETAH_LOG_ERROR \
997 /* Put "TL1" software bit into AFSR. */ \
998 and %g1, 0x1, %g1; \
999 sllx %g1, 63, %g2; \
1000 or %g4, %g2, %g4; \
1001 /* Get log entry pointer for this cpu at this trap level. */ \
1002 BRANCH_IF_JALAPENO(g2,g3,50f) \
1003 ldxa [%g0] ASI_SAFARI_CONFIG, %g2; \
1004 srlx %g2, 17, %g2; \
1005 ba,pt %xcc, 60f; \
1006 and %g2, 0x3ff, %g2; \
100750: ldxa [%g0] ASI_JBUS_CONFIG, %g2; \
1008 srlx %g2, 17, %g2; \
1009 and %g2, 0x1f, %g2; \
101060: sllx %g2, 9, %g2; \
1011 sethi %hi(cheetah_error_log), %g3; \
1012 ldx [%g3 + %lo(cheetah_error_log)], %g3; \
1013 brz,pn %g3, 80f; \
1014 nop; \
1015 add %g3, %g2, %g3; \
1016 sllx %g1, 8, %g1; \
1017 add %g3, %g1, %g1; \
1018 /* %g1 holds pointer to the top of the logging scoreboard */ \
1019 ldx [%g1 + 0x0], %g7; \
1020 cmp %g7, -1; \
1021 bne,pn %xcc, 80f; \
1022 nop; \
1023 stx %g4, [%g1 + 0x0]; \
1024 stx %g5, [%g1 + 0x8]; \
1025 add %g1, 0x10, %g1; \
1026 /* %g1 now points to D-cache logging area */ \
1027 set 0x3ff8, %g2; /* DC_addr mask */ \
1028 and %g5, %g2, %g2; /* DC_addr bits of AFAR */ \
1029 srlx %g5, 12, %g3; \
1030 or %g3, 1, %g3; /* PHYS tag + valid */ \
103110: ldxa [%g2] ASI_DCACHE_TAG, %g7; \
1032 cmp %g3, %g7; /* TAG match? */ \
1033 bne,pt %xcc, 13f; \
1034 nop; \
1035 /* Yep, what we want, capture state. */ \
1036 stx %g2, [%g1 + 0x20]; \
1037 stx %g7, [%g1 + 0x28]; \
1038 /* A membar Sync is required before and after utag access. */ \
1039 membar #Sync; \
1040 ldxa [%g2] ASI_DCACHE_UTAG, %g7; \
1041 membar #Sync; \
1042 stx %g7, [%g1 + 0x30]; \
1043 ldxa [%g2] ASI_DCACHE_SNOOP_TAG, %g7; \
1044 stx %g7, [%g1 + 0x38]; \
1045 clr %g3; \
104612: ldxa [%g2 + %g3] ASI_DCACHE_DATA, %g7; \
1047 stx %g7, [%g1]; \
1048 add %g3, (1 << 5), %g3; \
1049 cmp %g3, (4 << 5); \
1050 bl,pt %xcc, 12b; \
1051 add %g1, 0x8, %g1; \
1052 ba,pt %xcc, 20f; \
1053 add %g1, 0x20, %g1; \
105413: sethi %hi(1 << 14), %g7; \
1055 add %g2, %g7, %g2; \
1056 srlx %g2, 14, %g7; \
1057 cmp %g7, 4; \
1058 bl,pt %xcc, 10b; \
1059 nop; \
1060 add %g1, 0x40, %g1; \
106120: /* %g1 now points to I-cache logging area */ \
1062 set 0x1fe0, %g2; /* IC_addr mask */ \
1063 and %g5, %g2, %g2; /* IC_addr bits of AFAR */ \
1064 sllx %g2, 1, %g2; /* IC_addr[13:6]==VA[12:5] */ \
1065 srlx %g5, (13 - 8), %g3; /* Make PTAG */ \
1066 andn %g3, 0xff, %g3; /* Mask off undefined bits */ \
106721: ldxa [%g2] ASI_IC_TAG, %g7; \
1068 andn %g7, 0xff, %g7; \
1069 cmp %g3, %g7; \
1070 bne,pt %xcc, 23f; \
1071 nop; \
1072 /* Yep, what we want, capture state. */ \
1073 stx %g2, [%g1 + 0x40]; \
1074 stx %g7, [%g1 + 0x48]; \
1075 add %g2, (1 << 3), %g2; \
1076 ldxa [%g2] ASI_IC_TAG, %g7; \
1077 add %g2, (1 << 3), %g2; \
1078 stx %g7, [%g1 + 0x50]; \
1079 ldxa [%g2] ASI_IC_TAG, %g7; \
1080 add %g2, (1 << 3), %g2; \
1081 stx %g7, [%g1 + 0x60]; \
1082 ldxa [%g2] ASI_IC_TAG, %g7; \
1083 stx %g7, [%g1 + 0x68]; \
1084 sub %g2, (3 << 3), %g2; \
1085 ldxa [%g2] ASI_IC_STAG, %g7; \
1086 stx %g7, [%g1 + 0x58]; \
1087 clr %g3; \
1088 srlx %g2, 2, %g2; \
108922: ldxa [%g2 + %g3] ASI_IC_INSTR, %g7; \
1090 stx %g7, [%g1]; \
1091 add %g3, (1 << 3), %g3; \
1092 cmp %g3, (8 << 3); \
1093 bl,pt %xcc, 22b; \
1094 add %g1, 0x8, %g1; \
1095 ba,pt %xcc, 30f; \
1096 add %g1, 0x30, %g1; \
109723: sethi %hi(1 << 14), %g7; \
1098 add %g2, %g7, %g2; \
1099 srlx %g2, 14, %g7; \
1100 cmp %g7, 4; \
1101 bl,pt %xcc, 21b; \
1102 nop; \
1103 add %g1, 0x70, %g1; \
110430: /* %g1 now points to E-cache logging area */ \
1105 andn %g5, (32 - 1), %g2; /* E-cache subblock */ \
1106 stx %g2, [%g1 + 0x20]; \
1107 ldxa [%g2] ASI_EC_TAG_DATA, %g7; \
1108 stx %g7, [%g1 + 0x28]; \
1109 ldxa [%g2] ASI_EC_R, %g0; \
1110 clr %g3; \
111131: ldxa [%g3] ASI_EC_DATA, %g7; \
1112 stx %g7, [%g1 + %g3]; \
1113 add %g3, 0x8, %g3; \
1114 cmp %g3, 0x20; \
1115 bl,pt %xcc, 31b; \
1116 nop; \
111780: /* DONE */
1118
1119 /* These get patched into the trap table at boot time
1120 * once we know we have a cheetah processor.
1121 */
1122 .globl cheetah_fecc_trap_vector, cheetah_fecc_trap_vector_tl1
1123cheetah_fecc_trap_vector:
1124 membar #Sync
1125 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
1126 andn %g1, DCU_DC | DCU_IC, %g1
1127 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
1128 membar #Sync
1129 sethi %hi(cheetah_fast_ecc), %g2
1130 jmpl %g2 + %lo(cheetah_fast_ecc), %g0
1131 mov 0, %g1
1132cheetah_fecc_trap_vector_tl1:
1133 membar #Sync
1134 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
1135 andn %g1, DCU_DC | DCU_IC, %g1
1136 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
1137 membar #Sync
1138 sethi %hi(cheetah_fast_ecc), %g2
1139 jmpl %g2 + %lo(cheetah_fast_ecc), %g0
1140 mov 1, %g1
1141 .globl cheetah_cee_trap_vector, cheetah_cee_trap_vector_tl1
1142cheetah_cee_trap_vector:
1143 membar #Sync
1144 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
1145 andn %g1, DCU_IC, %g1
1146 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
1147 membar #Sync
1148 sethi %hi(cheetah_cee), %g2
1149 jmpl %g2 + %lo(cheetah_cee), %g0
1150 mov 0, %g1
1151cheetah_cee_trap_vector_tl1:
1152 membar #Sync
1153 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
1154 andn %g1, DCU_IC, %g1
1155 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
1156 membar #Sync
1157 sethi %hi(cheetah_cee), %g2
1158 jmpl %g2 + %lo(cheetah_cee), %g0
1159 mov 1, %g1
1160 .globl cheetah_deferred_trap_vector, cheetah_deferred_trap_vector_tl1
1161cheetah_deferred_trap_vector:
1162 membar #Sync
1163 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1;
1164 andn %g1, DCU_DC | DCU_IC, %g1;
1165 stxa %g1, [%g0] ASI_DCU_CONTROL_REG;
1166 membar #Sync;
1167 sethi %hi(cheetah_deferred_trap), %g2
1168 jmpl %g2 + %lo(cheetah_deferred_trap), %g0
1169 mov 0, %g1
1170cheetah_deferred_trap_vector_tl1:
1171 membar #Sync;
1172 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1;
1173 andn %g1, DCU_DC | DCU_IC, %g1;
1174 stxa %g1, [%g0] ASI_DCU_CONTROL_REG;
1175 membar #Sync;
1176 sethi %hi(cheetah_deferred_trap), %g2
1177 jmpl %g2 + %lo(cheetah_deferred_trap), %g0
1178 mov 1, %g1
1179
1180 /* Cheetah+ specific traps. These are for the new I/D cache parity
1181 * error traps. The first argument to cheetah_plus_parity_handler
1182 * is encoded as follows:
1183 *
1184 * Bit0: 0=dcache,1=icache
1185 * Bit1: 0=recoverable,1=unrecoverable
1186 */
1187 .globl cheetah_plus_dcpe_trap_vector, cheetah_plus_dcpe_trap_vector_tl1
1188cheetah_plus_dcpe_trap_vector:
1189 membar #Sync
1190 sethi %hi(do_cheetah_plus_data_parity), %g7
1191 jmpl %g7 + %lo(do_cheetah_plus_data_parity), %g0
1192 nop
1193 nop
1194 nop
1195 nop
1196 nop
1197
1198do_cheetah_plus_data_parity:
1199 ba,pt %xcc, etrap
1200 rd %pc, %g7
1201 mov 0x0, %o0
1202 call cheetah_plus_parity_error
1203 add %sp, PTREGS_OFF, %o1
1204 ba,pt %xcc, rtrap
1205 clr %l6
1206
1207cheetah_plus_dcpe_trap_vector_tl1:
1208 membar #Sync
1209 wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
1210 sethi %hi(do_dcpe_tl1), %g3
1211 jmpl %g3 + %lo(do_dcpe_tl1), %g0
1212 nop
1213 nop
1214 nop
1215 nop
1216
1217 .globl cheetah_plus_icpe_trap_vector, cheetah_plus_icpe_trap_vector_tl1
1218cheetah_plus_icpe_trap_vector:
1219 membar #Sync
1220 sethi %hi(do_cheetah_plus_insn_parity), %g7
1221 jmpl %g7 + %lo(do_cheetah_plus_insn_parity), %g0
1222 nop
1223 nop
1224 nop
1225 nop
1226 nop
1227
1228do_cheetah_plus_insn_parity:
1229 ba,pt %xcc, etrap
1230 rd %pc, %g7
1231 mov 0x1, %o0
1232 call cheetah_plus_parity_error
1233 add %sp, PTREGS_OFF, %o1
1234 ba,pt %xcc, rtrap
1235 clr %l6
1236
1237cheetah_plus_icpe_trap_vector_tl1:
1238 membar #Sync
1239 wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
1240 sethi %hi(do_icpe_tl1), %g3
1241 jmpl %g3 + %lo(do_icpe_tl1), %g0
1242 nop
1243 nop
1244 nop
1245 nop
1246
1247 /* If we take one of these traps when tl >= 1, then we
1248 * jump to interrupt globals. If some trap level above us
1249 * was also using interrupt globals, we cannot recover.
1250 * We may use all interrupt global registers except %g6.
1251 */
1252 .globl do_dcpe_tl1, do_icpe_tl1
1253do_dcpe_tl1:
1254 rdpr %tl, %g1 ! Save original trap level
1255 mov 1, %g2 ! Setup TSTATE checking loop
1256 sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit
12571: wrpr %g2, %tl ! Set trap level to check
1258 rdpr %tstate, %g4 ! Read TSTATE for this level
1259 andcc %g4, %g3, %g0 ! Interrupt globals in use?
1260 bne,a,pn %xcc, do_dcpe_tl1_fatal ! Yep, irrecoverable
1261 wrpr %g1, %tl ! Restore original trap level
1262 add %g2, 1, %g2 ! Next trap level
1263 cmp %g2, %g1 ! Hit them all yet?
1264 ble,pt %icc, 1b ! Not yet
1265 nop
1266 wrpr %g1, %tl ! Restore original trap level
1267do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
1268 /* Reset D-cache parity */
1269 sethi %hi(1 << 16), %g1 ! D-cache size
1270 mov (1 << 5), %g2 ! D-cache line size
1271 sub %g1, %g2, %g1 ! Move down 1 cacheline
12721: srl %g1, 14, %g3 ! Compute UTAG
1273 membar #Sync
1274 stxa %g3, [%g1] ASI_DCACHE_UTAG
1275 membar #Sync
1276 sub %g2, 8, %g3 ! 64-bit data word within line
12772: membar #Sync
1278 stxa %g0, [%g1 + %g3] ASI_DCACHE_DATA
1279 membar #Sync
1280 subcc %g3, 8, %g3 ! Next 64-bit data word
1281 bge,pt %icc, 2b
1282 nop
1283 subcc %g1, %g2, %g1 ! Next cacheline
1284 bge,pt %icc, 1b
1285 nop
1286 ba,pt %xcc, dcpe_icpe_tl1_common
1287 nop
1288
1289do_dcpe_tl1_fatal:
1290 sethi %hi(1f), %g7
1291 ba,pt %xcc, etraptl1
12921: or %g7, %lo(1b), %g7
1293 mov 0x2, %o0
1294 call cheetah_plus_parity_error
1295 add %sp, PTREGS_OFF, %o1
1296 ba,pt %xcc, rtrap
1297 clr %l6
1298
1299do_icpe_tl1:
1300 rdpr %tl, %g1 ! Save original trap level
1301 mov 1, %g2 ! Setup TSTATE checking loop
1302 sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit
13031: wrpr %g2, %tl ! Set trap level to check
1304 rdpr %tstate, %g4 ! Read TSTATE for this level
1305 andcc %g4, %g3, %g0 ! Interrupt globals in use?
1306 bne,a,pn %xcc, do_icpe_tl1_fatal ! Yep, irrecoverable
1307 wrpr %g1, %tl ! Restore original trap level
1308 add %g2, 1, %g2 ! Next trap level
1309 cmp %g2, %g1 ! Hit them all yet?
1310 ble,pt %icc, 1b ! Not yet
1311 nop
1312 wrpr %g1, %tl ! Restore original trap level
1313do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
1314 /* Flush I-cache */
1315 sethi %hi(1 << 15), %g1 ! I-cache size
1316 mov (1 << 5), %g2 ! I-cache line size
1317 sub %g1, %g2, %g1
13181: or %g1, (2 << 3), %g3
1319 stxa %g0, [%g3] ASI_IC_TAG
1320 membar #Sync
1321 subcc %g1, %g2, %g1
1322 bge,pt %icc, 1b
1323 nop
1324 ba,pt %xcc, dcpe_icpe_tl1_common
1325 nop
1326
1327do_icpe_tl1_fatal:
1328 sethi %hi(1f), %g7
1329 ba,pt %xcc, etraptl1
13301: or %g7, %lo(1b), %g7
1331 mov 0x3, %o0
1332 call cheetah_plus_parity_error
1333 add %sp, PTREGS_OFF, %o1
1334 ba,pt %xcc, rtrap
1335 clr %l6
1336
1337dcpe_icpe_tl1_common:
1338 /* Flush D-cache, re-enable D/I caches in DCU and finally
1339 * retry the trapping instruction.
1340 */
1341 sethi %hi(1 << 16), %g1 ! D-cache size
1342 mov (1 << 5), %g2 ! D-cache line size
1343 sub %g1, %g2, %g1
13441: stxa %g0, [%g1] ASI_DCACHE_TAG
1345 membar #Sync
1346 subcc %g1, %g2, %g1
1347 bge,pt %icc, 1b
1348 nop
1349 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
1350 or %g1, (DCU_DC | DCU_IC), %g1
1351 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
1352 membar #Sync
1353 retry
1354
1355 /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
1356 * in the trap table. That code has done a memory barrier
1357 * and has disabled both the I-cache and D-cache in the DCU
1358 * control register. The I-cache is disabled so that we may
1359 * capture the corrupted cache line, and the D-cache is disabled
1360 * because corrupt data may have been placed there and we don't
1361 * want to reference it.
1362 *
1363 * %g1 is one if this trap occurred at %tl >= 1.
1364 *
1365 * Next, we turn off error reporting so that we don't recurse.
1366 */
1367 .globl cheetah_fast_ecc
1368cheetah_fast_ecc:
1369 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
1370 andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
1371 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
1372 membar #Sync
1373
1374 /* Fetch and clear AFSR/AFAR */
1375 ldxa [%g0] ASI_AFSR, %g4
1376 ldxa [%g0] ASI_AFAR, %g5
1377 stxa %g4, [%g0] ASI_AFSR
1378 membar #Sync
1379
1380 CHEETAH_LOG_ERROR
1381
1382 rdpr %pil, %g2
1383 wrpr %g0, 15, %pil
1384 ba,pt %xcc, etrap_irq
1385 rd %pc, %g7
1386 mov %l4, %o1
1387 mov %l5, %o2
1388 call cheetah_fecc_handler
1389 add %sp, PTREGS_OFF, %o0
1390 ba,a,pt %xcc, rtrap_irq
1391
1392 /* Our caller has disabled I-cache and performed membar Sync. */
1393 .globl cheetah_cee
1394cheetah_cee:
1395 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
1396 andn %g2, ESTATE_ERROR_CEEN, %g2
1397 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
1398 membar #Sync
1399
1400 /* Fetch and clear AFSR/AFAR */
1401 ldxa [%g0] ASI_AFSR, %g4
1402 ldxa [%g0] ASI_AFAR, %g5
1403 stxa %g4, [%g0] ASI_AFSR
1404 membar #Sync
1405
1406 CHEETAH_LOG_ERROR
1407
1408 rdpr %pil, %g2
1409 wrpr %g0, 15, %pil
1410 ba,pt %xcc, etrap_irq
1411 rd %pc, %g7
1412 mov %l4, %o1
1413 mov %l5, %o2
1414 call cheetah_cee_handler
1415 add %sp, PTREGS_OFF, %o0
1416 ba,a,pt %xcc, rtrap_irq
1417
1418 /* Our caller has disabled I-cache+D-cache and performed membar Sync. */
1419 .globl cheetah_deferred_trap
1420cheetah_deferred_trap:
1421 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
1422 andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
1423 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
1424 membar #Sync
1425
1426 /* Fetch and clear AFSR/AFAR */
1427 ldxa [%g0] ASI_AFSR, %g4
1428 ldxa [%g0] ASI_AFAR, %g5
1429 stxa %g4, [%g0] ASI_AFSR
1430 membar #Sync
1431
1432 CHEETAH_LOG_ERROR
1433
1434 rdpr %pil, %g2
1435 wrpr %g0, 15, %pil
1436 ba,pt %xcc, etrap_irq
1437 rd %pc, %g7
1438 mov %l4, %o1
1439 mov %l5, %o2
1440 call cheetah_deferred_handler
1441 add %sp, PTREGS_OFF, %o0
1442 ba,a,pt %xcc, rtrap_irq
1443
1444 .globl __do_privact
1445__do_privact:
1446 mov TLB_SFSR, %g3
1447 stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
1448 membar #Sync
1449 sethi %hi(109f), %g7
1450 ba,pt %xcc, etrap
1451109: or %g7, %lo(109b), %g7
1452 call do_privact
1453 add %sp, PTREGS_OFF, %o0
1454 ba,pt %xcc, rtrap
1455 clr %l6
1456
1457 .globl do_mna
1458do_mna:
1459 rdpr %tl, %g3
1460 cmp %g3, 1
1461
1462 /* Setup %g4/%g5 now as they are used in the
1463 * winfixup code.
1464 */
1465 mov TLB_SFSR, %g3
1466 mov DMMU_SFAR, %g4
1467 ldxa [%g4] ASI_DMMU, %g4
1468 ldxa [%g3] ASI_DMMU, %g5
1469 stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
1470 membar #Sync
1471 bgu,pn %icc, winfix_mna
1472 rdpr %tpc, %g3
1473
14741: sethi %hi(109f), %g7
1475 ba,pt %xcc, etrap
1476109: or %g7, %lo(109b), %g7
1477 mov %l4, %o1
1478 mov %l5, %o2
1479 call mem_address_unaligned
1480 add %sp, PTREGS_OFF, %o0
1481 ba,pt %xcc, rtrap
1482 clr %l6
1483
1484 .globl do_lddfmna
1485do_lddfmna:
1486 sethi %hi(109f), %g7
1487 mov TLB_SFSR, %g4
1488 ldxa [%g4] ASI_DMMU, %g5
1489 stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
1490 membar #Sync
1491 mov DMMU_SFAR, %g4
1492 ldxa [%g4] ASI_DMMU, %g4
1493 ba,pt %xcc, etrap
1494109: or %g7, %lo(109b), %g7
1495 mov %l4, %o1
1496 mov %l5, %o2
1497 call handle_lddfmna
1498 add %sp, PTREGS_OFF, %o0
1499 ba,pt %xcc, rtrap
1500 clr %l6
1501
1502 .globl do_stdfmna
1503do_stdfmna:
1504 sethi %hi(109f), %g7
1505 mov TLB_SFSR, %g4
1506 ldxa [%g4] ASI_DMMU, %g5
1507 stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
1508 membar #Sync
1509 mov DMMU_SFAR, %g4
1510 ldxa [%g4] ASI_DMMU, %g4
1511 ba,pt %xcc, etrap
1512109: or %g7, %lo(109b), %g7
1513 mov %l4, %o1
1514 mov %l5, %o2
1515 call handle_stdfmna
1516 add %sp, PTREGS_OFF, %o0
1517 ba,pt %xcc, rtrap
1518 clr %l6
1519
1520 .globl breakpoint_trap
1521breakpoint_trap:
1522 call sparc_breakpoint
1523 add %sp, PTREGS_OFF, %o0
1524 ba,pt %xcc, rtrap
1525 nop
1526
1527#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
1528 defined(CONFIG_SOLARIS_EMUL_MODULE)
1529 /* SunOS uses syscall zero as the 'indirect syscall' it looks
1530 * like indir_syscall(scall_num, arg0, arg1, arg2...); etc.
1531 * This is complete brain damage.
1532 */
1533 .globl sunos_indir
1534sunos_indir:
1535 srl %o0, 0, %o0
1536 mov %o7, %l4
1537 cmp %o0, NR_SYSCALLS
1538 blu,a,pt %icc, 1f
1539 sll %o0, 0x2, %o0
1540 sethi %hi(sunos_nosys), %l6
1541 b,pt %xcc, 2f
1542 or %l6, %lo(sunos_nosys), %l6
15431: sethi %hi(sunos_sys_table), %l7
1544 or %l7, %lo(sunos_sys_table), %l7
1545 lduw [%l7 + %o0], %l6
15462: mov %o1, %o0
1547 mov %o2, %o1
1548 mov %o3, %o2
1549 mov %o4, %o3
1550 mov %o5, %o4
1551 call %l6
1552 mov %l4, %o7
1553
1554 .globl sunos_getpid
1555sunos_getpid:
1556 call sys_getppid
1557 nop
1558 call sys_getpid
1559 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
1560 b,pt %xcc, ret_sys_call
1561 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1562
1563 /* SunOS getuid() returns uid in %o0 and euid in %o1 */
1564 .globl sunos_getuid
1565sunos_getuid:
1566 call sys32_geteuid16
1567 nop
1568 call sys32_getuid16
1569 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
1570 b,pt %xcc, ret_sys_call
1571 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1572
1573 /* SunOS getgid() returns gid in %o0 and egid in %o1 */
1574 .globl sunos_getgid
1575sunos_getgid:
1576 call sys32_getegid16
1577 nop
1578 call sys32_getgid16
1579 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
1580 b,pt %xcc, ret_sys_call
1581 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1582#endif
1583
1584 /* SunOS's execv() call only specifies the argv argument, the
1585 * environment settings are the same as the calling processes.
1586 */
1587 .globl sunos_execv
1588sys_execve:
1589 sethi %hi(sparc_execve), %g1
1590 ba,pt %xcc, execve_merge
1591 or %g1, %lo(sparc_execve), %g1
1592#ifdef CONFIG_COMPAT
1593 .globl sys_execve
1594sunos_execv:
1595 stx %g0, [%sp + PTREGS_OFF + PT_V9_I2]
1596 .globl sys32_execve
1597sys32_execve:
1598 sethi %hi(sparc32_execve), %g1
1599 or %g1, %lo(sparc32_execve), %g1
1600#endif
1601execve_merge:
1602 flushw
1603 jmpl %g1, %g0
1604 add %sp, PTREGS_OFF, %o0
1605
1606 .globl sys_pipe, sys_sigpause, sys_nis_syscall
1607 .globl sys_sigsuspend, sys_rt_sigsuspend
1608 .globl sys_rt_sigreturn
1609 .globl sys_ptrace
1610 .globl sys_sigaltstack
1611 .align 32
1612sys_pipe: ba,pt %xcc, sparc_pipe
1613 add %sp, PTREGS_OFF, %o0
1614sys_nis_syscall:ba,pt %xcc, c_sys_nis_syscall
1615 add %sp, PTREGS_OFF, %o0
1616sys_memory_ordering:
1617 ba,pt %xcc, sparc_memory_ordering
1618 add %sp, PTREGS_OFF, %o1
1619sys_sigaltstack:ba,pt %xcc, do_sigaltstack
1620 add %i6, STACK_BIAS, %o2
1621#ifdef CONFIG_COMPAT
1622 .globl sys32_sigstack
1623sys32_sigstack: ba,pt %xcc, do_sys32_sigstack
1624 mov %i6, %o2
1625 .globl sys32_sigaltstack
1626sys32_sigaltstack:
1627 ba,pt %xcc, do_sys32_sigaltstack
1628 mov %i6, %o2
1629#endif
1630 .align 32
1631sys_sigsuspend: add %sp, PTREGS_OFF, %o0
1632 call do_sigsuspend
1633 add %o7, 1f-.-4, %o7
1634 nop
1635sys_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
1636 add %sp, PTREGS_OFF, %o2
1637 call do_rt_sigsuspend
1638 add %o7, 1f-.-4, %o7
1639 nop
1640#ifdef CONFIG_COMPAT
1641 .globl sys32_rt_sigsuspend
1642sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
1643 srl %o0, 0, %o0
1644 add %sp, PTREGS_OFF, %o2
1645 call do_rt_sigsuspend32
1646 add %o7, 1f-.-4, %o7
1647#endif
1648 /* NOTE: %o0 has a correct value already */
1649sys_sigpause: add %sp, PTREGS_OFF, %o1
1650 call do_sigpause
1651 add %o7, 1f-.-4, %o7
1652 nop
1653#ifdef CONFIG_COMPAT
1654 .globl sys32_sigreturn
1655sys32_sigreturn:
1656 add %sp, PTREGS_OFF, %o0
1657 call do_sigreturn32
1658 add %o7, 1f-.-4, %o7
1659 nop
1660#endif
1661sys_rt_sigreturn:
1662 add %sp, PTREGS_OFF, %o0
1663 call do_rt_sigreturn
1664 add %o7, 1f-.-4, %o7
1665 nop
1666#ifdef CONFIG_COMPAT
1667 .globl sys32_rt_sigreturn
1668sys32_rt_sigreturn:
1669 add %sp, PTREGS_OFF, %o0
1670 call do_rt_sigreturn32
1671 add %o7, 1f-.-4, %o7
1672 nop
1673#endif
1674sys_ptrace: add %sp, PTREGS_OFF, %o0
1675 call do_ptrace
1676 add %o7, 1f-.-4, %o7
1677 nop
1678 .align 32
16791: ldx [%curptr + TI_FLAGS], %l5
1680 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1681 be,pt %icc, rtrap
1682 clr %l6
1683 call syscall_trace
1684 nop
1685
1686 ba,pt %xcc, rtrap
1687 clr %l6
1688
1689 /* This is how fork() was meant to be done, 8 instruction entry.
1690 *
1691 * I questioned the following code briefly, let me clear things
1692 * up so you must not reason on it like I did.
1693 *
1694 * Know the fork_kpsr etc. we use in the sparc32 port? We don't
1695 * need it here because the only piece of window state we copy to
1696 * the child is the CWP register. Even if the parent sleeps,
1697 * we are safe because we stuck it into pt_regs of the parent
1698 * so it will not change.
1699 *
1700 * XXX This raises the question, whether we can do the same on
1701 * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim. The
1702 * XXX answer is yes. We stick fork_kpsr in UREG_G0 and
1703 * XXX fork_kwim in UREG_G1 (global registers are considered
1704 * XXX volatile across a system call in the sparc ABI I think
1705 * XXX if it isn't we can use regs->y instead, anyone who depends
1706 * XXX upon the Y register being preserved across a fork deserves
1707 * XXX to lose).
1708 *
1709 * In fact we should take advantage of that fact for other things
1710 * during system calls...
1711 */
1712 .globl sys_fork, sys_vfork, sys_clone, sparc_exit
1713 .globl ret_from_syscall
1714 .align 32
1715sys_vfork: /* Under Linux, vfork and fork are just special cases of clone. */
1716 sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
1717 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
1718 ba,pt %xcc, sys_clone
1719sys_fork: clr %o1
1720 mov SIGCHLD, %o0
1721sys_clone: flushw
1722 movrz %o1, %fp, %o1
1723 mov 0, %o3
1724 ba,pt %xcc, sparc_do_fork
1725 add %sp, PTREGS_OFF, %o2
1726ret_from_syscall:
1727 /* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in
1728 * %o7 for us. Check performance counter stuff too.
1729 */
1730 andn %o7, _TIF_NEWCHILD, %l0
1731 stx %l0, [%g6 + TI_FLAGS]
1732 call schedule_tail
1733 mov %g7, %o0
1734 andcc %l0, _TIF_PERFCTR, %g0
1735 be,pt %icc, 1f
1736 nop
1737 ldx [%g6 + TI_PCR], %o7
1738 wr %g0, %o7, %pcr
1739
1740 /* Blackbird errata workaround. See commentary in
1741 * smp.c:smp_percpu_timer_interrupt() for more
1742 * information.
1743 */
1744 ba,pt %xcc, 99f
1745 nop
1746 .align 64
174799: wr %g0, %g0, %pic
1748 rd %pic, %g0
1749
17501: b,pt %xcc, ret_sys_call
1751 ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0
1752sparc_exit: wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate
1753 rdpr %otherwin, %g1
1754 rdpr %cansave, %g3
1755 add %g3, %g1, %g3
1756 wrpr %g3, 0x0, %cansave
1757 wrpr %g0, 0x0, %otherwin
1758 wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate
1759 ba,pt %xcc, sys_exit
1760 stb %g0, [%g6 + TI_WSAVED]
1761
1762linux_sparc_ni_syscall:
1763 sethi %hi(sys_ni_syscall), %l7
1764 b,pt %xcc, 4f
1765 or %l7, %lo(sys_ni_syscall), %l7
1766
1767linux_syscall_trace32:
1768 call syscall_trace
1769 nop
1770 srl %i0, 0, %o0
1771 mov %i4, %o4
1772 srl %i1, 0, %o1
1773 srl %i2, 0, %o2
1774 b,pt %xcc, 2f
1775 srl %i3, 0, %o3
1776
1777linux_syscall_trace:
1778 call syscall_trace
1779 nop
1780 mov %i0, %o0
1781 mov %i1, %o1
1782 mov %i2, %o2
1783 mov %i3, %o3
1784 b,pt %xcc, 2f
1785 mov %i4, %o4
1786
1787
1788 /* Linux 32-bit and SunOS system calls enter here... */
1789 .align 32
1790 .globl linux_sparc_syscall32
1791linux_sparc_syscall32:
1792 /* Direct access to user regs, much faster. */
1793 cmp %g1, NR_SYSCALLS ! IEU1 Group
1794 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
1795 srl %i0, 0, %o0 ! IEU0
1796 sll %g1, 2, %l4 ! IEU0 Group
1797#ifdef SYSCALL_TRACING
1798 call syscall_trace_entry
1799 add %sp, PTREGS_OFF, %o0
1800 srl %i0, 0, %o0
1801#endif
1802 srl %i4, 0, %o4 ! IEU1
1803 lduw [%l7 + %l4], %l7 ! Load
1804 srl %i1, 0, %o1 ! IEU0 Group
1805 ldx [%curptr + TI_FLAGS], %l0 ! Load
1806
1807 srl %i5, 0, %o5 ! IEU1
1808 srl %i2, 0, %o2 ! IEU0 Group
1809 andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU0 Group
1810 bne,pn %icc, linux_syscall_trace32 ! CTI
1811 mov %i0, %l5 ! IEU1
1812 call %l7 ! CTI Group brk forced
1813 srl %i3, 0, %o3 ! IEU0
1814 ba,a,pt %xcc, 3f
1815
1816 /* Linux native and SunOS system calls enter here... */
1817 .align 32
1818 .globl linux_sparc_syscall, ret_sys_call
1819linux_sparc_syscall:
1820 /* Direct access to user regs, much faster. */
1821 cmp %g1, NR_SYSCALLS ! IEU1 Group
1822 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
1823 mov %i0, %o0 ! IEU0
1824 sll %g1, 2, %l4 ! IEU0 Group
1825#ifdef SYSCALL_TRACING
1826 call syscall_trace_entry
1827 add %sp, PTREGS_OFF, %o0
1828 mov %i0, %o0
1829#endif
1830 mov %i1, %o1 ! IEU1
1831 lduw [%l7 + %l4], %l7 ! Load
18324: mov %i2, %o2 ! IEU0 Group
1833 ldx [%curptr + TI_FLAGS], %l0 ! Load
1834
1835 mov %i3, %o3 ! IEU1
1836 mov %i4, %o4 ! IEU0 Group
1837 andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU1 Group+1 bubble
1838 bne,pn %icc, linux_syscall_trace ! CTI Group
1839 mov %i0, %l5 ! IEU0
18402: call %l7 ! CTI Group brk forced
1841 mov %i5, %o5 ! IEU0
1842 nop
1843
18443: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1845ret_sys_call:
1846#ifdef SYSCALL_TRACING
1847 mov %o0, %o1
1848 call syscall_trace_exit
1849 add %sp, PTREGS_OFF, %o0
1850 mov %o1, %o0
1851#endif
1852 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
1853 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
1854 sra %o0, 0, %o0
1855 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
1856 sllx %g2, 32, %g2
1857
1858 /* Check if force_successful_syscall_return()
1859 * was invoked.
1860 */
1861 ldx [%curptr + TI_FLAGS], %l0
1862 andcc %l0, _TIF_SYSCALL_SUCCESS, %g0
1863 be,pt %icc, 1f
1864 andn %l0, _TIF_SYSCALL_SUCCESS, %l0
1865 ba,pt %xcc, 80f
1866 stx %l0, [%curptr + TI_FLAGS]
1867
18681:
1869 cmp %o0, -ERESTART_RESTARTBLOCK
1870 bgeu,pn %xcc, 1f
1871 andcc %l0, _TIF_SYSCALL_TRACE, %l6
187280:
1873 /* System call success, clear Carry condition code. */
1874 andn %g3, %g2, %g3
1875 stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
1876 bne,pn %icc, linux_syscall_trace2
1877 add %l1, 0x4, %l2 ! npc = npc+4
1878 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1879 ba,pt %xcc, rtrap_clr_l6
1880 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1881
18821:
1883 /* System call failure, set Carry condition code.
1884 * Also, get abs(errno) to return to the process.
1885 */
1886 andcc %l0, _TIF_SYSCALL_TRACE, %l6
1887 sub %g0, %o0, %o0
1888 or %g3, %g2, %g3
1889 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1890 mov 1, %l6
1891 stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
1892 bne,pn %icc, linux_syscall_trace2
1893 add %l1, 0x4, %l2 ! npc = npc+4
1894 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1895
1896 b,pt %xcc, rtrap
1897 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1898linux_syscall_trace2:
1899 call syscall_trace
1900 nop
1901 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1902 ba,pt %xcc, rtrap
1903 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1904
1905 .align 32
1906 .globl __flushw_user
1907__flushw_user:
1908 rdpr %otherwin, %g1
1909 brz,pn %g1, 2f
1910 clr %g2
19111: save %sp, -128, %sp
1912 rdpr %otherwin, %g1
1913 brnz,pt %g1, 1b
1914 add %g2, 1, %g2
19151: sub %g2, 1, %g2
1916 brnz,pt %g2, 1b
1917 restore %g0, %g0, %g0
19182: retl
1919 nop
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
new file mode 100644
index 000000000000..50d2af1d98ae
--- /dev/null
+++ b/arch/sparc64/kernel/etrap.S
@@ -0,0 +1,301 @@
1/* $Id: etrap.S,v 1.46 2002/02/09 19:49:30 davem Exp $
2 * etrap.S: Preparing for entry into the kernel on Sparc V9.
3 *
4 * Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
8#include <linux/config.h>
9
10#include <asm/asi.h>
11#include <asm/pstate.h>
12#include <asm/ptrace.h>
13#include <asm/page.h>
14#include <asm/spitfire.h>
15#include <asm/head.h>
16#include <asm/processor.h>
17#include <asm/mmu.h>
18
19#define TASK_REGOFF (THREAD_SIZE-TRACEREG_SZ-STACKFRAME_SZ)
20#define ETRAP_PSTATE1 (PSTATE_RMO | PSTATE_PRIV)
21#define ETRAP_PSTATE2 \
22 (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE)
23
24/*
25 * On entry, %g7 is return address - 0x4.
26 * %g4 and %g5 will be preserved %l4 and %l5 respectively.
27 */
28
29 .text
30 .align 64
31 .globl etrap, etrap_irq, etraptl1
32etrap: rdpr %pil, %g2
33etrap_irq:
34 rdpr %tstate, %g1
35 sllx %g2, 20, %g3
36 andcc %g1, TSTATE_PRIV, %g0
37 or %g1, %g3, %g1
38 bne,pn %xcc, 1f
39 sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2
40 wrpr %g0, 7, %cleanwin
41
42 sethi %hi(TASK_REGOFF), %g2
43 sethi %hi(TSTATE_PEF), %g3
44 or %g2, %lo(TASK_REGOFF), %g2
45 and %g1, %g3, %g3
46 brnz,pn %g3, 1f
47 add %g6, %g2, %g2
48 wr %g0, 0, %fprs
491: rdpr %tpc, %g3
50
51 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
52 rdpr %tnpc, %g1
53 stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
54 rd %y, %g3
55 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
56 st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y]
57 save %g2, -STACK_BIAS, %sp ! Ordering here is critical
58 mov %g6, %l6
59
60 bne,pn %xcc, 3f
61 mov PRIMARY_CONTEXT, %l4
62 rdpr %canrestore, %g3
63 rdpr %wstate, %g2
64 wrpr %g0, 0, %canrestore
65 sll %g2, 3, %g2
66 mov 1, %l5
67 stb %l5, [%l6 + TI_FPDEPTH]
68
69 wrpr %g3, 0, %otherwin
70 wrpr %g2, 0, %wstate
71cplus_etrap_insn_1:
72 sethi %hi(0), %g3
73 sllx %g3, 32, %g3
74cplus_etrap_insn_2:
75 sethi %hi(0), %g2
76 or %g3, %g2, %g3
77 stxa %g3, [%l4] ASI_DMMU
78 flush %l6
79 wr %g0, ASI_AIUS, %asi
802: wrpr %g0, 0x0, %tl
81 mov %g4, %l4
82 mov %g5, %l5
83
84 mov %g7, %l2
85 wrpr %g0, ETRAP_PSTATE1, %pstate
86 stx %g1, [%sp + PTREGS_OFF + PT_V9_G1]
87 stx %g2, [%sp + PTREGS_OFF + PT_V9_G2]
88 stx %g3, [%sp + PTREGS_OFF + PT_V9_G3]
89 stx %g4, [%sp + PTREGS_OFF + PT_V9_G4]
90 stx %g5, [%sp + PTREGS_OFF + PT_V9_G5]
91 stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
92
93 stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
94 stx %i0, [%sp + PTREGS_OFF + PT_V9_I0]
95 stx %i1, [%sp + PTREGS_OFF + PT_V9_I1]
96 stx %i2, [%sp + PTREGS_OFF + PT_V9_I2]
97 stx %i3, [%sp + PTREGS_OFF + PT_V9_I3]
98 stx %i4, [%sp + PTREGS_OFF + PT_V9_I4]
99 stx %i5, [%sp + PTREGS_OFF + PT_V9_I5]
100
101 stx %i6, [%sp + PTREGS_OFF + PT_V9_I6]
102 stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
103 wrpr %g0, ETRAP_PSTATE2, %pstate
104 mov %l6, %g6
105#ifdef CONFIG_SMP
106 mov TSB_REG, %g3
107 ldxa [%g3] ASI_IMMU, %g5
108#endif
109 jmpl %l2 + 0x4, %g0
110 ldx [%g6 + TI_TASK], %g4
111
1123: ldub [%l6 + TI_FPDEPTH], %l5
113 add %l6, TI_FPSAVED + 1, %l4
114 srl %l5, 1, %l3
115 add %l5, 2, %l5
116 stb %l5, [%l6 + TI_FPDEPTH]
117 ba,pt %xcc, 2b
118 stb %g0, [%l4 + %l3]
119 nop
120
121etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
122 * We place this right after pt_regs on the trap stack.
123 * The layout is:
124 * 0x00 TL1's TSTATE
125 * 0x08 TL1's TPC
126 * 0x10 TL1's TNPC
127 * 0x18 TL1's TT
128 * ...
129 * 0x58 TL4's TT
130 * 0x60 TL
131 */
132 sub %sp, ((4 * 8) * 4) + 8, %g2
133 rdpr %tl, %g1
134
135 wrpr %g0, 1, %tl
136 rdpr %tstate, %g3
137 stx %g3, [%g2 + STACK_BIAS + 0x00]
138 rdpr %tpc, %g3
139 stx %g3, [%g2 + STACK_BIAS + 0x08]
140 rdpr %tnpc, %g3
141 stx %g3, [%g2 + STACK_BIAS + 0x10]
142 rdpr %tt, %g3
143 stx %g3, [%g2 + STACK_BIAS + 0x18]
144
145 wrpr %g0, 2, %tl
146 rdpr %tstate, %g3
147 stx %g3, [%g2 + STACK_BIAS + 0x20]
148 rdpr %tpc, %g3
149 stx %g3, [%g2 + STACK_BIAS + 0x28]
150 rdpr %tnpc, %g3
151 stx %g3, [%g2 + STACK_BIAS + 0x30]
152 rdpr %tt, %g3
153 stx %g3, [%g2 + STACK_BIAS + 0x38]
154
155 wrpr %g0, 3, %tl
156 rdpr %tstate, %g3
157 stx %g3, [%g2 + STACK_BIAS + 0x40]
158 rdpr %tpc, %g3
159 stx %g3, [%g2 + STACK_BIAS + 0x48]
160 rdpr %tnpc, %g3
161 stx %g3, [%g2 + STACK_BIAS + 0x50]
162 rdpr %tt, %g3
163 stx %g3, [%g2 + STACK_BIAS + 0x58]
164
165 wrpr %g0, 4, %tl
166 rdpr %tstate, %g3
167 stx %g3, [%g2 + STACK_BIAS + 0x60]
168 rdpr %tpc, %g3
169 stx %g3, [%g2 + STACK_BIAS + 0x68]
170 rdpr %tnpc, %g3
171 stx %g3, [%g2 + STACK_BIAS + 0x70]
172 rdpr %tt, %g3
173 stx %g3, [%g2 + STACK_BIAS + 0x78]
174
175 wrpr %g1, %tl
176 stx %g1, [%g2 + STACK_BIAS + 0x80]
177
178 rdpr %tstate, %g1
179 sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2
180 ba,pt %xcc, 1b
181 andcc %g1, TSTATE_PRIV, %g0
182
183 .align 64
184 .globl scetrap
185scetrap: rdpr %pil, %g2
186 rdpr %tstate, %g1
187 sllx %g2, 20, %g3
188 andcc %g1, TSTATE_PRIV, %g0
189 or %g1, %g3, %g1
190 bne,pn %xcc, 1f
191 sub %sp, (STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS), %g2
192 wrpr %g0, 7, %cleanwin
193
194 sllx %g1, 51, %g3
195 sethi %hi(TASK_REGOFF), %g2
196 or %g2, %lo(TASK_REGOFF), %g2
197 brlz,pn %g3, 1f
198 add %g6, %g2, %g2
199 wr %g0, 0, %fprs
2001: rdpr %tpc, %g3
201 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
202
203 rdpr %tnpc, %g1
204 stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
205 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
206 save %g2, -STACK_BIAS, %sp ! Ordering here is critical
207 mov %g6, %l6
208 bne,pn %xcc, 2f
209 mov ASI_P, %l7
210 rdpr %canrestore, %g3
211
212 rdpr %wstate, %g2
213 wrpr %g0, 0, %canrestore
214 sll %g2, 3, %g2
215 mov PRIMARY_CONTEXT, %l4
216 wrpr %g3, 0, %otherwin
217 wrpr %g2, 0, %wstate
218cplus_etrap_insn_3:
219 sethi %hi(0), %g3
220 sllx %g3, 32, %g3
221cplus_etrap_insn_4:
222 sethi %hi(0), %g2
223 or %g3, %g2, %g3
224 stxa %g3, [%l4] ASI_DMMU
225 flush %l6
226
227 mov ASI_AIUS, %l7
2282: mov %g4, %l4
229 mov %g5, %l5
230 add %g7, 0x4, %l2
231 wrpr %g0, ETRAP_PSTATE1, %pstate
232 stx %g1, [%sp + PTREGS_OFF + PT_V9_G1]
233 stx %g2, [%sp + PTREGS_OFF + PT_V9_G2]
234 sllx %l7, 24, %l7
235
236 stx %g3, [%sp + PTREGS_OFF + PT_V9_G3]
237 rdpr %cwp, %l0
238 stx %g4, [%sp + PTREGS_OFF + PT_V9_G4]
239 stx %g5, [%sp + PTREGS_OFF + PT_V9_G5]
240 stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
241 stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
242 or %l7, %l0, %l7
243 sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0
244
245 or %l7, %l0, %l7
246 wrpr %l2, %tnpc
247 wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate
248 stx %i0, [%sp + PTREGS_OFF + PT_V9_I0]
249 stx %i1, [%sp + PTREGS_OFF + PT_V9_I1]
250 stx %i2, [%sp + PTREGS_OFF + PT_V9_I2]
251 stx %i3, [%sp + PTREGS_OFF + PT_V9_I3]
252 stx %i4, [%sp + PTREGS_OFF + PT_V9_I4]
253
254 stx %i5, [%sp + PTREGS_OFF + PT_V9_I5]
255 stx %i6, [%sp + PTREGS_OFF + PT_V9_I6]
256 mov %l6, %g6
257 stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
258#ifdef CONFIG_SMP
259 mov TSB_REG, %g3
260 ldxa [%g3] ASI_IMMU, %g5
261#endif
262 ldx [%g6 + TI_TASK], %g4
263 done
264
265#undef TASK_REGOFF
266#undef ETRAP_PSTATE1
267
268cplus_einsn_1:
269 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
270cplus_einsn_2:
271 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
272
273 .globl cheetah_plus_patch_etrap
274cheetah_plus_patch_etrap:
275 /* We configure the dTLB512_0 for 4MB pages and the
276 * dTLB512_1 for 8K pages when in context zero.
277 */
278 sethi %hi(cplus_einsn_1), %o0
279 sethi %hi(cplus_etrap_insn_1), %o2
280 lduw [%o0 + %lo(cplus_einsn_1)], %o1
281 or %o2, %lo(cplus_etrap_insn_1), %o2
282 stw %o1, [%o2]
283 flush %o2
284 sethi %hi(cplus_etrap_insn_3), %o2
285 or %o2, %lo(cplus_etrap_insn_3), %o2
286 stw %o1, [%o2]
287 flush %o2
288
289 sethi %hi(cplus_einsn_2), %o0
290 sethi %hi(cplus_etrap_insn_2), %o2
291 lduw [%o0 + %lo(cplus_einsn_2)], %o1
292 or %o2, %lo(cplus_etrap_insn_2), %o2
293 stw %o1, [%o2]
294 flush %o2
295 sethi %hi(cplus_etrap_insn_4), %o2
296 or %o2, %lo(cplus_etrap_insn_4), %o2
297 stw %o1, [%o2]
298 flush %o2
299
300 retl
301 nop
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
new file mode 100644
index 000000000000..8104a56ca2d8
--- /dev/null
+++ b/arch/sparc64/kernel/head.S
@@ -0,0 +1,782 @@
1/* $Id: head.S,v 1.87 2002/02/09 19:49:31 davem Exp $
2 * head.S: Initial boot code for the Sparc64 port of Linux.
3 *
4 * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 */
9
10#include <linux/config.h>
11#include <linux/version.h>
12#include <linux/errno.h>
13#include <asm/thread_info.h>
14#include <asm/asi.h>
15#include <asm/pstate.h>
16#include <asm/ptrace.h>
17#include <asm/spitfire.h>
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/errno.h>
21#include <asm/signal.h>
22#include <asm/processor.h>
23#include <asm/lsu.h>
24#include <asm/dcr.h>
25#include <asm/dcu.h>
26#include <asm/head.h>
27#include <asm/ttable.h>
28#include <asm/mmu.h>
29
30/* This section from from _start to sparc64_boot_end should fit into
31 * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space
32 * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
33 * 0x0000.0000.0040.6000 and empty_bad_page, which is from
34 * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000.
35 */
36
37 .text
38 .globl start, _start, stext, _stext
39_start:
40start:
41_stext:
42stext:
43bootup_user_stack:
44! 0x0000000000404000
45 b sparc64_boot
46 flushw /* Flush register file. */
47
48/* This stuff has to be in sync with SILO and other potential boot loaders
49 * Fields should be kept upward compatible and whenever any change is made,
50 * HdrS version should be incremented.
51 */
52 .global root_flags, ram_flags, root_dev
53 .global sparc_ramdisk_image, sparc_ramdisk_size
54 .global sparc_ramdisk_image64
55
56 .ascii "HdrS"
57 .word LINUX_VERSION_CODE
58
59 /* History:
60 *
61 * 0x0300 : Supports being located at other than 0x4000
62 * 0x0202 : Supports kernel params string
63 * 0x0201 : Supports reboot_command
64 */
65 .half 0x0301 /* HdrS version */
66
67root_flags:
68 .half 1
69root_dev:
70 .half 0
71ram_flags:
72 .half 0
73sparc_ramdisk_image:
74 .word 0
75sparc_ramdisk_size:
76 .word 0
77 .xword reboot_command
78 .xword bootstr_info
79sparc_ramdisk_image64:
80 .xword 0
81 .word _end
82
83 /* We must be careful, 32-bit OpenBOOT will get confused if it
84 * tries to save away a register window to a 64-bit kernel
85 * stack address. Flush all windows, disable interrupts,
86 * remap if necessary, jump onto kernel trap table, then kernel
87 * stack, or else we die.
88 *
89 * PROM entry point is on %o4
90 */
91sparc64_boot:
92 BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
93 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
94 ba,pt %xcc, spitfire_boot
95 nop
96
97cheetah_plus_boot:
98 /* Preserve OBP chosen DCU and DCR register settings. */
99 ba,pt %xcc, cheetah_generic_boot
100 nop
101
102cheetah_boot:
103 mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
104 wr %g1, %asr18
105
106 sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
107 or %g7, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
108 sllx %g7, 32, %g7
109 or %g7, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g7
110 stxa %g7, [%g0] ASI_DCU_CONTROL_REG
111 membar #Sync
112
113cheetah_generic_boot:
114 mov TSB_EXTENSION_P, %g3
115 stxa %g0, [%g3] ASI_DMMU
116 stxa %g0, [%g3] ASI_IMMU
117 membar #Sync
118
119 mov TSB_EXTENSION_S, %g3
120 stxa %g0, [%g3] ASI_DMMU
121 membar #Sync
122
123 mov TSB_EXTENSION_N, %g3
124 stxa %g0, [%g3] ASI_DMMU
125 stxa %g0, [%g3] ASI_IMMU
126 membar #Sync
127
128 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
129 wr %g0, 0, %fprs
130
131 /* Just like for Spitfire, we probe itlb-2 for a mapping which
132 * matches our current %pc. We take the physical address in
133 * that mapping and use it to make our own.
134 */
135
136 /* %g5 holds the tlb data */
137 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
138 sllx %g5, 32, %g5
139 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
140
141 /* Put PADDR tlb data mask into %g3. */
142 sethi %uhi(_PAGE_PADDR), %g3
143 or %g3, %ulo(_PAGE_PADDR), %g3
144 sllx %g3, 32, %g3
145 sethi %hi(_PAGE_PADDR), %g7
146 or %g7, %lo(_PAGE_PADDR), %g7
147 or %g3, %g7, %g3
148
149 set 2 << 16, %l0 /* TLB entry walker. */
150 set 0x1fff, %l2 /* Page mask. */
151 rd %pc, %l3
152 andn %l3, %l2, %g2 /* vaddr comparator */
153
1541: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
155 membar #Sync
156 andn %g1, %l2, %g1
157 cmp %g1, %g2
158 be,pn %xcc, cheetah_got_tlbentry
159 nop
160 and %l0, (127 << 3), %g1
161 cmp %g1, (127 << 3)
162 blu,pt %xcc, 1b
163 add %l0, (1 << 3), %l0
164
165 /* Search the small TLB. OBP never maps us like that but
166 * newer SILO can.
167 */
168 clr %l0
169
1701: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
171 membar #Sync
172 andn %g1, %l2, %g1
173 cmp %g1, %g2
174 be,pn %xcc, cheetah_got_tlbentry
175 nop
176 cmp %l0, (15 << 3)
177 blu,pt %xcc, 1b
178 add %l0, (1 << 3), %l0
179
180 /* BUG() if we get here... */
181 ta 0x5
182
183cheetah_got_tlbentry:
184 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g0
185 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
186 membar #Sync
187 and %g1, %g3, %g1
188 set 0x5fff, %l0
189 andn %g1, %l0, %g1
190 or %g5, %g1, %g5
191
192 /* Clear out any KERNBASE area entries. */
193 set 2 << 16, %l0
194 sethi %hi(KERNBASE), %g3
195 sethi %hi(KERNBASE<<1), %g7
196 mov TLB_TAG_ACCESS, %l7
197
198 /* First, check ITLB */
1991: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
200 membar #Sync
201 andn %g1, %l2, %g1
202 cmp %g1, %g3
203 blu,pn %xcc, 2f
204 cmp %g1, %g7
205 bgeu,pn %xcc, 2f
206 nop
207 stxa %g0, [%l7] ASI_IMMU
208 membar #Sync
209 stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
210 membar #Sync
211
2122: and %l0, (127 << 3), %g1
213 cmp %g1, (127 << 3)
214 blu,pt %xcc, 1b
215 add %l0, (1 << 3), %l0
216
217 /* Next, check DTLB */
218 set 2 << 16, %l0
2191: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
220 membar #Sync
221 andn %g1, %l2, %g1
222 cmp %g1, %g3
223 blu,pn %xcc, 2f
224 cmp %g1, %g7
225 bgeu,pn %xcc, 2f
226 nop
227 stxa %g0, [%l7] ASI_DMMU
228 membar #Sync
229 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
230 membar #Sync
231
2322: and %l0, (511 << 3), %g1
233 cmp %g1, (511 << 3)
234 blu,pt %xcc, 1b
235 add %l0, (1 << 3), %l0
236
237 /* On Cheetah+, have to check second DTLB. */
238 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f)
239 ba,pt %xcc, 9f
240 nop
241
2422: set 3 << 16, %l0
2431: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
244 membar #Sync
245 andn %g1, %l2, %g1
246 cmp %g1, %g3
247 blu,pn %xcc, 2f
248 cmp %g1, %g7
249 bgeu,pn %xcc, 2f
250 nop
251 stxa %g0, [%l7] ASI_DMMU
252 membar #Sync
253 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
254 membar #Sync
255
2562: and %l0, (511 << 3), %g1
257 cmp %g1, (511 << 3)
258 blu,pt %xcc, 1b
259 add %l0, (1 << 3), %l0
260
2619:
262
263 /* Now lock the TTE we created into ITLB-0 and DTLB-0,
264 * entry 15 (and maybe 14 too).
265 */
266 sethi %hi(KERNBASE), %g3
267 set (0 << 16) | (15 << 3), %g7
268 stxa %g3, [%l7] ASI_DMMU
269 membar #Sync
270 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
271 membar #Sync
272 stxa %g3, [%l7] ASI_IMMU
273 membar #Sync
274 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
275 membar #Sync
276 flush %g3
277 membar #Sync
278 sethi %hi(_end), %g3 /* Check for bigkernel case */
279 or %g3, %lo(_end), %g3
280 srl %g3, 23, %g3 /* Check if _end > 8M */
281 brz,pt %g3, 1f
282 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
283 sethi %hi(0x400000), %g3
284 or %g3, %lo(0x400000), %g3
285 add %g5, %g3, %g5 /* New tte data */
286 andn %g5, (_PAGE_G), %g5
287 sethi %hi(KERNBASE+0x400000), %g3
288 or %g3, %lo(KERNBASE+0x400000), %g3
289 set (0 << 16) | (14 << 3), %g7
290 stxa %g3, [%l7] ASI_DMMU
291 membar #Sync
292 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
293 membar #Sync
294 stxa %g3, [%l7] ASI_IMMU
295 membar #Sync
296 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
297 membar #Sync
298 flush %g3
299 membar #Sync
300 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
301 ba,pt %xcc, 1f
302 nop
303
3041: set sun4u_init, %g2
305 jmpl %g2 + %g0, %g0
306 nop
307
308spitfire_boot:
309 /* Typically PROM has already enabled both MMU's and both on-chip
310 * caches, but we do it here anyway just to be paranoid.
311 */
312 mov (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
313 stxa %g1, [%g0] ASI_LSU_CONTROL
314 membar #Sync
315
316 /*
317 * Make sure we are in privileged mode, have address masking,
318 * using the ordinary globals and have enabled floating
319 * point.
320 *
321 * Again, typically PROM has left %pil at 13 or similar, and
322 * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
323 */
324 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
325 wr %g0, 0, %fprs
326
327spitfire_create_mappings:
328 /* %g5 holds the tlb data */
329 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
330 sllx %g5, 32, %g5
331 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
332
333 /* Base of physical memory cannot reliably be assumed to be
334 * at 0x0! Figure out where it happens to be. -DaveM
335 */
336
337 /* Put PADDR tlb data mask into %g3. */
338 sethi %uhi(_PAGE_PADDR_SF), %g3
339 or %g3, %ulo(_PAGE_PADDR_SF), %g3
340 sllx %g3, 32, %g3
341 sethi %hi(_PAGE_PADDR_SF), %g7
342 or %g7, %lo(_PAGE_PADDR_SF), %g7
343 or %g3, %g7, %g3
344
345 /* Walk through entire ITLB, looking for entry which maps
346 * our %pc currently, stick PADDR from there into %g5 tlb data.
347 */
348 clr %l0 /* TLB entry walker. */
349 set 0x1fff, %l2 /* Page mask. */
350 rd %pc, %l3
351 andn %l3, %l2, %g2 /* vaddr comparator */
3521:
353 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
354 ldxa [%l0] ASI_ITLB_TAG_READ, %g1
355 nop
356 nop
357 nop
358 andn %g1, %l2, %g1 /* Get vaddr */
359 cmp %g1, %g2
360 be,a,pn %xcc, spitfire_got_tlbentry
361 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
362 cmp %l0, (63 << 3)
363 blu,pt %xcc, 1b
364 add %l0, (1 << 3), %l0
365
366 /* BUG() if we get here... */
367 ta 0x5
368
369spitfire_got_tlbentry:
370 /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
371 nop
372 nop
373 nop
374 and %g1, %g3, %g1 /* Mask to just get paddr bits. */
375 set 0x5fff, %l3 /* Mask offset to get phys base. */
376 andn %g1, %l3, %g1
377
378 /* NOTE: We hold on to %g1 paddr base as we need it below to lock
379 * NOTE: the PROM cif code into the TLB.
380 */
381
382 or %g5, %g1, %g5 /* Or it into TAG being built. */
383
384 clr %l0 /* TLB entry walker. */
385 sethi %hi(KERNBASE), %g3 /* 4M lower limit */
386 sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */
387 mov TLB_TAG_ACCESS, %l7
3881:
389 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
390 ldxa [%l0] ASI_ITLB_TAG_READ, %g1
391 nop
392 nop
393 nop
394 andn %g1, %l2, %g1 /* Get vaddr */
395 cmp %g1, %g3
396 blu,pn %xcc, 2f
397 cmp %g1, %g7
398 bgeu,pn %xcc, 2f
399 nop
400 stxa %g0, [%l7] ASI_IMMU
401 stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
402 membar #Sync
4032:
404 cmp %l0, (63 << 3)
405 blu,pt %xcc, 1b
406 add %l0, (1 << 3), %l0
407
408 nop; nop; nop
409
410 clr %l0 /* TLB entry walker. */
4111:
412 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
413 ldxa [%l0] ASI_DTLB_TAG_READ, %g1
414 nop
415 nop
416 nop
417 andn %g1, %l2, %g1 /* Get vaddr */
418 cmp %g1, %g3
419 blu,pn %xcc, 2f
420 cmp %g1, %g7
421 bgeu,pn %xcc, 2f
422 nop
423 stxa %g0, [%l7] ASI_DMMU
424 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
425 membar #Sync
4262:
427 cmp %l0, (63 << 3)
428 blu,pt %xcc, 1b
429 add %l0, (1 << 3), %l0
430
431 nop; nop; nop
432
433
434 /* PROM never puts any TLB entries into the MMU with the lock bit
435 * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too.
436 */
437
438 sethi %hi(KERNBASE), %g3
439 mov (63 << 3), %g7
440 stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */
441 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
442 membar #Sync
443 stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */
444 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
445 membar #Sync
446 flush %g3
447 membar #Sync
448 sethi %hi(_end), %g3 /* Check for bigkernel case */
449 or %g3, %lo(_end), %g3
450 srl %g3, 23, %g3 /* Check if _end > 8M */
451 brz,pt %g3, 2f
452 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
453 sethi %hi(0x400000), %g3
454 or %g3, %lo(0x400000), %g3
455 add %g5, %g3, %g5 /* New tte data */
456 andn %g5, (_PAGE_G), %g5
457 sethi %hi(KERNBASE+0x400000), %g3
458 or %g3, %lo(KERNBASE+0x400000), %g3
459 mov (62 << 3), %g7
460 stxa %g3, [%l7] ASI_DMMU
461 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
462 membar #Sync
463 stxa %g3, [%l7] ASI_IMMU
464 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
465 membar #Sync
466 flush %g3
467 membar #Sync
468 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
4692: ba,pt %xcc, 1f
470 nop
4711:
472 set sun4u_init, %g2
473 jmpl %g2 + %g0, %g0
474 nop
475
476sun4u_init:
477 /* Set ctx 0 */
478 mov PRIMARY_CONTEXT, %g7
479 stxa %g0, [%g7] ASI_DMMU
480 membar #Sync
481
482 mov SECONDARY_CONTEXT, %g7
483 stxa %g0, [%g7] ASI_DMMU
484 membar #Sync
485
486 /* We are now safely (we hope) in Nucleus context (0), rewrite
487 * the KERNBASE TTE's so they no longer have the global bit set.
488 * Don't forget to setup TAG_ACCESS first 8-)
489 */
490 mov TLB_TAG_ACCESS, %g2
491 stxa %g3, [%g2] ASI_IMMU
492 stxa %g3, [%g2] ASI_DMMU
493 membar #Sync
494
495 BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup)
496
497 ba,pt %xcc, spitfire_tlb_fixup
498 nop
499
500cheetah_tlb_fixup:
501 set (0 << 16) | (15 << 3), %g7
502 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0
503 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
504 andn %g1, (_PAGE_G), %g1
505 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
506 membar #Sync
507
508 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0
509 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
510 andn %g1, (_PAGE_G), %g1
511 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
512 membar #Sync
513
514 /* Kill instruction prefetch queues. */
515 flush %g3
516 membar #Sync
517
518 mov 2, %g2 /* Set TLB type to cheetah+. */
519 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
520
521 mov 1, %g2 /* Set TLB type to cheetah. */
522
5231: sethi %hi(tlb_type), %g1
524 stw %g2, [%g1 + %lo(tlb_type)]
525
526 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
527 ba,pt %xcc, 2f
528 nop
529
5301: /* Patch context register writes to support nucleus page
531 * size correctly.
532 */
533 call cheetah_plus_patch_etrap
534 nop
535 call cheetah_plus_patch_rtrap
536 nop
537 call cheetah_plus_patch_fpdis
538 nop
539 call cheetah_plus_patch_winfixup
540 nop
541
542
5432: /* Patch copy/page operations to cheetah optimized versions. */
544 call cheetah_patch_copyops
545 nop
546 call cheetah_patch_cachetlbops
547 nop
548
549 ba,pt %xcc, tlb_fixup_done
550 nop
551
552spitfire_tlb_fixup:
553 mov (63 << 3), %g7
554 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
555 andn %g1, (_PAGE_G), %g1
556 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
557 membar #Sync
558
559 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
560 andn %g1, (_PAGE_G), %g1
561 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
562 membar #Sync
563
564 /* Kill instruction prefetch queues. */
565 flush %g3
566 membar #Sync
567
568 /* Set TLB type to spitfire. */
569 mov 0, %g2
570 sethi %hi(tlb_type), %g1
571 stw %g2, [%g1 + %lo(tlb_type)]
572
573tlb_fixup_done:
574 sethi %hi(init_thread_union), %g6
575 or %g6, %lo(init_thread_union), %g6
576 ldx [%g6 + TI_TASK], %g4
577 mov %sp, %l6
578 mov %o4, %l7
579
580#if 0 /* We don't do it like this anymore, but for historical hack value
581 * I leave this snippet here to show how crazy we can be sometimes. 8-)
582 */
583
584 /* Setup "Linux Current Register", thanks Sun 8-) */
585 wr %g0, 0x1, %pcr
586
587 /* Blackbird errata workaround. See commentary in
588 * smp.c:smp_percpu_timer_interrupt() for more
589 * information.
590 */
591 ba,pt %xcc, 99f
592 nop
593 .align 64
59499: wr %g6, %g0, %pic
595 rd %pic, %g0
596#endif
597
598 wr %g0, ASI_P, %asi
599 mov 1, %g1
600 sllx %g1, THREAD_SHIFT, %g1
601 sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
602 add %g6, %g1, %sp
603 mov 0, %fp
604
605 /* Set per-cpu pointer initially to zero, this makes
606 * the boot-cpu use the in-kernel-image per-cpu areas
607 * before setup_per_cpu_area() is invoked.
608 */
609 clr %g5
610
611 wrpr %g0, 0, %wstate
612 wrpr %g0, 0x0, %tl
613
614 /* Clear the bss */
615 sethi %hi(__bss_start), %o0
616 or %o0, %lo(__bss_start), %o0
617 sethi %hi(_end), %o1
618 or %o1, %lo(_end), %o1
619 call __bzero
620 sub %o1, %o0, %o1
621
622 mov %l6, %o1 ! OpenPROM stack
623 call prom_init
624 mov %l7, %o0 ! OpenPROM cif handler
625
626 /* Off we go.... */
627 call start_kernel
628 nop
629 /* Not reached... */
630
631/* IMPORTANT NOTE: Whenever making changes here, check
632 * trampoline.S as well. -jj */
633 .globl setup_tba
634setup_tba: /* i0 = is_starfire */
635 save %sp, -160, %sp
636
637 rdpr %tba, %g7
638 sethi %hi(prom_tba), %o1
639 or %o1, %lo(prom_tba), %o1
640 stx %g7, [%o1]
641
642 /* Setup "Linux" globals 8-) */
643 rdpr %pstate, %o1
644 mov %g6, %o2
645 wrpr %o1, (PSTATE_AG|PSTATE_IE), %pstate
646 sethi %hi(sparc64_ttable_tl0), %g1
647 wrpr %g1, %tba
648 mov %o2, %g6
649
650 /* Set up MMU globals */
651 wrpr %o1, (PSTATE_MG|PSTATE_IE), %pstate
652
653 /* Set fixed globals used by dTLB miss handler. */
654#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
655#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
656
657 mov TSB_REG, %g1
658 stxa %g0, [%g1] ASI_DMMU
659 membar #Sync
660 stxa %g0, [%g1] ASI_IMMU
661 membar #Sync
662 mov TLB_SFSR, %g1
663 sethi %uhi(KERN_HIGHBITS), %g2
664 or %g2, %ulo(KERN_HIGHBITS), %g2
665 sllx %g2, 32, %g2
666 or %g2, KERN_LOWBITS, %g2
667
668 BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base)
669 ba,pt %xcc, spitfire_vpte_base
670 nop
671
672cheetah_vpte_base:
673 sethi %uhi(VPTE_BASE_CHEETAH), %g3
674 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
675 ba,pt %xcc, 2f
676 sllx %g3, 32, %g3
677
678spitfire_vpte_base:
679 sethi %uhi(VPTE_BASE_SPITFIRE), %g3
680 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
681 sllx %g3, 32, %g3
682
6832:
684 clr %g7
685#undef KERN_HIGHBITS
686#undef KERN_LOWBITS
687
688 /* Kill PROM timer */
689 sethi %hi(0x80000000), %o2
690 sllx %o2, 32, %o2
691 wr %o2, 0, %tick_cmpr
692
693 BRANCH_IF_ANY_CHEETAH(o2,o3,1f)
694
695 ba,pt %xcc, 2f
696 nop
697
698 /* Disable STICK_INT interrupts. */
6991:
700 sethi %hi(0x80000000), %o2
701 sllx %o2, 32, %o2
702 wr %o2, %asr25
703
704 /* Ok, we're done setting up all the state our trap mechanims needs,
705 * now get back into normal globals and let the PROM know what is up.
706 */
7072:
708 wrpr %g0, %g0, %wstate
709 wrpr %o1, PSTATE_IE, %pstate
710
711 call init_irqwork_curcpu
712 nop
713
714 call prom_set_trap_table
715 sethi %hi(sparc64_ttable_tl0), %o0
716
717 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
718 ba,pt %xcc, 2f
719 nop
720
7211: /* Start using proper page size encodings in ctx register. */
722 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
723 mov PRIMARY_CONTEXT, %g1
724 sllx %g3, 32, %g3
725 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
726 or %g3, %g2, %g3
727 stxa %g3, [%g1] ASI_DMMU
728 membar #Sync
729
7302:
731 rdpr %pstate, %o1
732 or %o1, PSTATE_IE, %o1
733 wrpr %o1, 0, %pstate
734
735 ret
736 restore
737
738/*
739 * The following skips make sure the trap table in ttable.S is aligned
740 * on a 32K boundary as required by the v9 specs for TBA register.
741 */
742sparc64_boot_end:
743 .skip 0x2000 + _start - sparc64_boot_end
744bootup_user_stack_end:
745 .skip 0x2000
746
747#ifdef CONFIG_SBUS
748/* This is just a hack to fool make depend config.h discovering
749 strategy: As the .S files below need config.h, but
750 make depend does not find it for them, we include config.h
751 in head.S */
752#endif
753
754! 0x0000000000408000
755
756#include "ttable.S"
757#include "systbls.S"
758
759 .align 1024
760 .globl swapper_pg_dir
761swapper_pg_dir:
762 .word 0
763
764#include "etrap.S"
765#include "rtrap.S"
766#include "winfixup.S"
767#include "entry.S"
768
769 /* This is just anal retentiveness on my part... */
770 .align 16384
771
772 .data
773 .align 8
774 .globl prom_tba, tlb_type
775prom_tba: .xword 0
776tlb_type: .word 0 /* Must NOT end up in BSS */
777 .section ".fixup",#alloc,#execinstr
778 .globl __ret_efault
779__ret_efault:
780 ret
781 restore %g0, -EFAULT, %o0
782
diff --git a/arch/sparc64/kernel/idprom.c b/arch/sparc64/kernel/idprom.c
new file mode 100644
index 000000000000..3b6789e09a72
--- /dev/null
+++ b/arch/sparc64/kernel/idprom.c
@@ -0,0 +1,49 @@
1/* $Id: idprom.c,v 1.3 1999/08/31 06:54:53 davem Exp $
2 * idprom.c: Routines to load the idprom into kernel addresses and
3 * interpret the data contained within.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/init.h>
11
12#include <asm/oplib.h>
13#include <asm/idprom.h>
14
15struct idprom *idprom;
16static struct idprom idprom_buffer;
17
18/* Calculate the IDPROM checksum (xor of the data bytes). */
19static unsigned char __init calc_idprom_cksum(struct idprom *idprom)
20{
21 unsigned char cksum, i, *ptr = (unsigned char *)idprom;
22
23 for (i = cksum = 0; i <= 0x0E; i++)
24 cksum ^= *ptr++;
25
26 return cksum;
27}
28
29/* Create a local IDPROM copy and verify integrity. */
30void __init idprom_init(void)
31{
32 prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer));
33
34 idprom = &idprom_buffer;
35
36 if (idprom->id_format != 0x01) {
37 prom_printf("IDPROM: Warning, unknown format type!\n");
38 }
39
40 if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
41 prom_printf("IDPROM: Warning, checksum failure (nvram=%x, calc=%x)!\n",
42 idprom->id_cksum, calc_idprom_cksum(idprom));
43 }
44
45 printk("Ethernet address: %02x:%02x:%02x:%02x:%02x:%02x\n",
46 idprom->id_ethaddr[0], idprom->id_ethaddr[1],
47 idprom->id_ethaddr[2], idprom->id_ethaddr[3],
48 idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
49}
diff --git a/arch/sparc64/kernel/init_task.c b/arch/sparc64/kernel/init_task.c
new file mode 100644
index 000000000000..329b38fa5c89
--- /dev/null
+++ b/arch/sparc64/kernel/init_task.c
@@ -0,0 +1,35 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/sched.h>
4#include <linux/init_task.h>
5#include <linux/mqueue.h>
6
7#include <asm/pgtable.h>
8#include <asm/uaccess.h>
9#include <asm/processor.h>
10
11static struct fs_struct init_fs = INIT_FS;
12static struct files_struct init_files = INIT_FILES;
13static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
14static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15struct mm_struct init_mm = INIT_MM(init_mm);
16
17EXPORT_SYMBOL(init_mm);
18
19/* .text section in head.S is aligned at 2 page boundary and this gets linked
20 * right after that so that the init_thread_union is aligned properly as well.
21 * We really don't need this special alignment like the Intel does, but
22 * I do it anyways for completeness.
23 */
24__asm__ (".text");
25union thread_union init_thread_union = { INIT_THREAD_INFO(init_task) };
26
27/*
28 * Initial task structure.
29 *
30 * All other task structs will be allocated on slabs in fork.c
31 */
32EXPORT_SYMBOL(init_task);
33
34__asm__(".data");
35struct task_struct init_task = INIT_TASK(init_task);
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
new file mode 100644
index 000000000000..43fc3173d480
--- /dev/null
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -0,0 +1,597 @@
1/* $Id: ioctl32.c,v 1.136 2002/01/14 09:49:52 davem Exp $
2 * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
3 *
4 * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 2003 Pavel Machek (pavel@suse.cz)
7 *
8 * These routines maintain argument size conversion between 32bit and 64bit
9 * ioctls.
10 */
11
12#define INCLUDES
13#include "compat_ioctl.c"
14#include <linux/ncp_fs.h>
15#include <linux/syscalls.h>
16#include <asm/fbio.h>
17#include <asm/kbio.h>
18#include <asm/vuid_event.h>
19#include <asm/envctrl.h>
20#include <asm/display7seg.h>
21#include <asm/openpromio.h>
22#include <asm/audioio.h>
23#include <asm/watchdog.h>
24
25/* Use this to get at 32-bit user passed pointers.
26 * See sys_sparc32.c for description about it.
27 */
28#define A(__x) compat_ptr(__x)
29
30static __inline__ void *alloc_user_space(long len)
31{
32 struct pt_regs *regs = current_thread_info()->kregs;
33 unsigned long usp = regs->u_regs[UREG_I6];
34
35 if (!(test_thread_flag(TIF_32BIT)))
36 usp += STACK_BIAS;
37
38 return (void *) (usp - len);
39}
40
41#define CODE
42#include "compat_ioctl.c"
43
44struct fbcmap32 {
45 int index; /* first element (0 origin) */
46 int count;
47 u32 red;
48 u32 green;
49 u32 blue;
50};
51
52#define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32)
53#define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32)
54
55static int fbiogetputcmap(unsigned int fd, unsigned int cmd, unsigned long arg)
56{
57 struct fbcmap32 __user *argp = (void __user *)arg;
58 struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
59 u32 addr;
60 int ret;
61
62 ret = copy_in_user(p, argp, 2 * sizeof(int));
63 ret |= get_user(addr, &argp->red);
64 ret |= put_user(compat_ptr(addr), &p->red);
65 ret |= get_user(addr, &argp->green);
66 ret |= put_user(compat_ptr(addr), &p->green);
67 ret |= get_user(addr, &argp->blue);
68 ret |= put_user(compat_ptr(addr), &p->blue);
69 if (ret)
70 return -EFAULT;
71 return sys_ioctl(fd, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC, (unsigned long)p);
72}
73
74struct fbcursor32 {
75 short set; /* what to set, choose from the list above */
76 short enable; /* cursor on/off */
77 struct fbcurpos pos; /* cursor position */
78 struct fbcurpos hot; /* cursor hot spot */
79 struct fbcmap32 cmap; /* color map info */
80 struct fbcurpos size; /* cursor bit map size */
81 u32 image; /* cursor image bits */
82 u32 mask; /* cursor mask bits */
83};
84
85#define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32)
86#define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32)
87
88static int fbiogscursor(unsigned int fd, unsigned int cmd, unsigned long arg)
89{
90 struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
91 struct fbcursor32 __user *argp = (void __user *)arg;
92 compat_uptr_t addr;
93 int ret;
94
95 ret = copy_in_user(p, argp,
96 2 * sizeof (short) + 2 * sizeof(struct fbcurpos));
97 ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos));
98 ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int));
99 ret |= get_user(addr, &argp->cmap.red);
100 ret |= put_user(compat_ptr(addr), &p->cmap.red);
101 ret |= get_user(addr, &argp->cmap.green);
102 ret |= put_user(compat_ptr(addr), &p->cmap.green);
103 ret |= get_user(addr, &argp->cmap.blue);
104 ret |= put_user(compat_ptr(addr), &p->cmap.blue);
105 ret |= get_user(addr, &argp->mask);
106 ret |= put_user(compat_ptr(addr), &p->mask);
107 ret |= get_user(addr, &argp->image);
108 ret |= put_user(compat_ptr(addr), &p->image);
109 if (ret)
110 return -EFAULT;
111 return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p);
112}
113
114#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
115/* This really belongs in include/linux/drm.h -DaveM */
116#include "../../../drivers/char/drm/drm.h"
117
118typedef struct drm32_version {
119 int version_major; /* Major version */
120 int version_minor; /* Minor version */
121 int version_patchlevel;/* Patch level */
122 int name_len; /* Length of name buffer */
123 u32 name; /* Name of driver */
124 int date_len; /* Length of date buffer */
125 u32 date; /* User-space buffer to hold date */
126 int desc_len; /* Length of desc buffer */
127 u32 desc; /* User-space buffer to hold desc */
128} drm32_version_t;
129#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
130
131static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
132{
133 drm32_version_t __user *uversion = (drm32_version_t __user *)arg;
134 drm_version_t __user *p = compat_alloc_user_space(sizeof(*p));
135 compat_uptr_t addr;
136 int n;
137 int ret;
138
139 if (clear_user(p, 3 * sizeof(int)) ||
140 get_user(n, &uversion->name_len) ||
141 put_user(n, &p->name_len) ||
142 get_user(addr, &uversion->name) ||
143 put_user(compat_ptr(addr), &p->name) ||
144 get_user(n, &uversion->date_len) ||
145 put_user(n, &p->date_len) ||
146 get_user(addr, &uversion->date) ||
147 put_user(compat_ptr(addr), &p->date) ||
148 get_user(n, &uversion->desc_len) ||
149 put_user(n, &p->desc_len) ||
150 get_user(addr, &uversion->desc) ||
151 put_user(compat_ptr(addr), &p->desc))
152 return -EFAULT;
153
154 ret = sys_ioctl(fd, DRM_IOCTL_VERSION, (unsigned long)p);
155 if (ret)
156 return ret;
157
158 if (copy_in_user(uversion, p, 3 * sizeof(int)) ||
159 get_user(n, &p->name_len) ||
160 put_user(n, &uversion->name_len) ||
161 get_user(n, &p->date_len) ||
162 put_user(n, &uversion->date_len) ||
163 get_user(n, &p->desc_len) ||
164 put_user(n, &uversion->desc_len))
165 return -EFAULT;
166
167 return 0;
168}
169
170typedef struct drm32_unique {
171 int unique_len; /* Length of unique */
172 u32 unique; /* Unique name for driver instantiation */
173} drm32_unique_t;
174#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
175#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
176
177static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
178{
179 drm32_unique_t __user *uarg = (drm32_unique_t __user *)arg;
180 drm_unique_t __user *p = compat_alloc_user_space(sizeof(*p));
181 compat_uptr_t addr;
182 int n;
183 int ret;
184
185 if (get_user(n, &uarg->unique_len) ||
186 put_user(n, &p->unique_len) ||
187 get_user(addr, &uarg->unique) ||
188 put_user(compat_ptr(addr), &p->unique))
189 return -EFAULT;
190
191 if (cmd == DRM32_IOCTL_GET_UNIQUE)
192 ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)p);
193 else
194 ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)p);
195
196 if (ret)
197 return ret;
198
199 if (get_user(n, &p->unique_len) || put_user(n, &uarg->unique_len))
200 return -EFAULT;
201
202 return 0;
203}
204
205typedef struct drm32_map {
206 u32 offset; /* Requested physical address (0 for SAREA)*/
207 u32 size; /* Requested physical size (bytes) */
208 drm_map_type_t type; /* Type of memory to map */
209 drm_map_flags_t flags; /* Flags */
210 u32 handle; /* User-space: "Handle" to pass to mmap */
211 /* Kernel-space: kernel-virtual address */
212 int mtrr; /* MTRR slot used */
213 /* Private data */
214} drm32_map_t;
215#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
216
217static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
218{
219 drm32_map_t __user *uarg = (drm32_map_t __user *) arg;
220 drm_map_t karg;
221 mm_segment_t old_fs;
222 u32 tmp;
223 int ret;
224
225 ret = get_user(karg.offset, &uarg->offset);
226 ret |= get_user(karg.size, &uarg->size);
227 ret |= get_user(karg.type, &uarg->type);
228 ret |= get_user(karg.flags, &uarg->flags);
229 ret |= get_user(tmp, &uarg->handle);
230 ret |= get_user(karg.mtrr, &uarg->mtrr);
231 if (ret)
232 return -EFAULT;
233
234 karg.handle = (void *) (unsigned long) tmp;
235
236 old_fs = get_fs();
237 set_fs(KERNEL_DS);
238 ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
239 set_fs(old_fs);
240
241 if (!ret) {
242 ret = put_user(karg.offset, &uarg->offset);
243 ret |= put_user(karg.size, &uarg->size);
244 ret |= put_user(karg.type, &uarg->type);
245 ret |= put_user(karg.flags, &uarg->flags);
246 tmp = (u32) (long)karg.handle;
247 ret |= put_user(tmp, &uarg->handle);
248 ret |= put_user(karg.mtrr, &uarg->mtrr);
249 if (ret)
250 ret = -EFAULT;
251 }
252
253 return ret;
254}
255
256typedef struct drm32_buf_info {
257 int count; /* Entries in list */
258 u32 list; /* (drm_buf_desc_t *) */
259} drm32_buf_info_t;
260#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
261
262static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
263{
264 drm32_buf_info_t __user *uarg = (drm32_buf_info_t __user *)arg;
265 drm_buf_info_t __user *p = compat_alloc_user_space(sizeof(*p));
266 compat_uptr_t addr;
267 int n;
268 int ret;
269
270 if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
271 get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
272 return -EFAULT;
273
274 ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long)p);
275 if (ret)
276 return ret;
277
278 if (get_user(n, &p->count) || put_user(n, &uarg->count))
279 return -EFAULT;
280
281 return 0;
282}
283
284typedef struct drm32_buf_free {
285 int count;
286 u32 list; /* (int *) */
287} drm32_buf_free_t;
288#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
289
290static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
291{
292 drm32_buf_free_t __user *uarg = (drm32_buf_free_t __user *)arg;
293 drm_buf_free_t __user *p = compat_alloc_user_space(sizeof(*p));
294 compat_uptr_t addr;
295 int n;
296
297 if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
298 get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
299 return -EFAULT;
300
301 return sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long)p);
302}
303
304typedef struct drm32_buf_pub {
305 int idx; /* Index into master buflist */
306 int total; /* Buffer size */
307 int used; /* Amount of buffer in use (for DMA) */
308 u32 address; /* Address of buffer (void *) */
309} drm32_buf_pub_t;
310
311typedef struct drm32_buf_map {
312 int count; /* Length of buflist */
313 u32 virtual; /* Mmaped area in user-virtual (void *) */
314 u32 list; /* Buffer information (drm_buf_pub_t *) */
315} drm32_buf_map_t;
316#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
317
318static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
319{
320 drm32_buf_map_t __user *uarg = (drm32_buf_map_t __user *)arg;
321 drm32_buf_pub_t __user *ulist;
322 drm_buf_map_t __user *arg64;
323 drm_buf_pub_t __user *list;
324 int orig_count, ret, i;
325 int n;
326 compat_uptr_t addr;
327
328 if (get_user(orig_count, &uarg->count))
329 return -EFAULT;
330
331 arg64 = compat_alloc_user_space(sizeof(drm_buf_map_t) +
332 (size_t)orig_count * sizeof(drm_buf_pub_t));
333 list = (void __user *)(arg64 + 1);
334
335 if (put_user(orig_count, &arg64->count) ||
336 put_user(list, &arg64->list) ||
337 get_user(addr, &uarg->virtual) ||
338 put_user(compat_ptr(addr), &arg64->virtual) ||
339 get_user(addr, &uarg->list))
340 return -EFAULT;
341
342 ulist = compat_ptr(addr);
343
344 for (i = 0; i < orig_count; i++) {
345 if (get_user(n, &ulist[i].idx) ||
346 put_user(n, &list[i].idx) ||
347 get_user(n, &ulist[i].total) ||
348 put_user(n, &list[i].total) ||
349 get_user(n, &ulist[i].used) ||
350 put_user(n, &list[i].used) ||
351 get_user(addr, &ulist[i].address) ||
352 put_user(compat_ptr(addr), &list[i].address))
353 return -EFAULT;
354 }
355
356 ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) arg64);
357 if (ret)
358 return ret;
359
360 for (i = 0; i < orig_count; i++) {
361 void __user *p;
362 if (get_user(n, &list[i].idx) ||
363 put_user(n, &ulist[i].idx) ||
364 get_user(n, &list[i].total) ||
365 put_user(n, &ulist[i].total) ||
366 get_user(n, &list[i].used) ||
367 put_user(n, &ulist[i].used) ||
368 get_user(p, &list[i].address) ||
369 put_user((unsigned long)p, &ulist[i].address))
370 return -EFAULT;
371 }
372
373 if (get_user(n, &arg64->count) || put_user(n, &uarg->count))
374 return -EFAULT;
375
376 return 0;
377}
378
379typedef struct drm32_dma {
380 /* Indices here refer to the offset into
381 buflist in drm_buf_get_t. */
382 int context; /* Context handle */
383 int send_count; /* Number of buffers to send */
384 u32 send_indices; /* List of handles to buffers (int *) */
385 u32 send_sizes; /* Lengths of data to send (int *) */
386 drm_dma_flags_t flags; /* Flags */
387 int request_count; /* Number of buffers requested */
388 int request_size; /* Desired size for buffers */
389 u32 request_indices; /* Buffer information (int *) */
390 u32 request_sizes; /* (int *) */
391 int granted_count; /* Number of buffers granted */
392} drm32_dma_t;
393#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
394
395/* RED PEN The DRM layer blindly dereferences the send/request
396 * index/size arrays even though they are userland
397 * pointers. -DaveM
398 */
399static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
400{
401 drm32_dma_t __user *uarg = (drm32_dma_t __user *) arg;
402 drm_dma_t __user *p = compat_alloc_user_space(sizeof(*p));
403 compat_uptr_t addr;
404 int ret;
405
406 if (copy_in_user(p, uarg, 2 * sizeof(int)) ||
407 get_user(addr, &uarg->send_indices) ||
408 put_user(compat_ptr(addr), &p->send_indices) ||
409 get_user(addr, &uarg->send_sizes) ||
410 put_user(compat_ptr(addr), &p->send_sizes) ||
411 copy_in_user(&p->flags, &uarg->flags, sizeof(drm_dma_flags_t)) ||
412 copy_in_user(&p->request_count, &uarg->request_count, sizeof(int))||
413 copy_in_user(&p->request_size, &uarg->request_size, sizeof(int)) ||
414 get_user(addr, &uarg->request_indices) ||
415 put_user(compat_ptr(addr), &p->request_indices) ||
416 get_user(addr, &uarg->request_sizes) ||
417 put_user(compat_ptr(addr), &p->request_sizes) ||
418 copy_in_user(&p->granted_count, &uarg->granted_count, sizeof(int)))
419 return -EFAULT;
420
421 ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long)p);
422 if (ret)
423 return ret;
424
425 if (copy_in_user(uarg, p, 2 * sizeof(int)) ||
426 copy_in_user(&uarg->flags, &p->flags, sizeof(drm_dma_flags_t)) ||
427 copy_in_user(&uarg->request_count, &p->request_count, sizeof(int))||
428 copy_in_user(&uarg->request_size, &p->request_size, sizeof(int)) ||
429 copy_in_user(&uarg->granted_count, &p->granted_count, sizeof(int)))
430 return -EFAULT;
431
432 return 0;
433}
434
435typedef struct drm32_ctx_res {
436 int count;
437 u32 contexts; /* (drm_ctx_t *) */
438} drm32_ctx_res_t;
439#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
440
441static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
442{
443 drm32_ctx_res_t __user *uarg = (drm32_ctx_res_t __user *) arg;
444 drm_ctx_res_t __user *p = compat_alloc_user_space(sizeof(*p));
445 compat_uptr_t addr;
446 int ret;
447
448 if (copy_in_user(p, uarg, sizeof(int)) ||
449 get_user(addr, &uarg->contexts) ||
450 put_user(compat_ptr(addr), &p->contexts))
451 return -EFAULT;
452
453 ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long)p);
454 if (ret)
455 return ret;
456
457 if (copy_in_user(uarg, p, sizeof(int)))
458 return -EFAULT;
459
460 return 0;
461}
462
463#endif
464
465typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
466
467#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
468#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl32_handler_t)(handler), NULL },
469#define IOCTL_TABLE_START \
470 struct ioctl_trans ioctl_start[] = {
471#define IOCTL_TABLE_END \
472 };
473
474IOCTL_TABLE_START
475#include <linux/compat_ioctl.h>
476#define DECLARES
477#include "compat_ioctl.c"
478COMPATIBLE_IOCTL(TIOCSTART)
479COMPATIBLE_IOCTL(TIOCSTOP)
480COMPATIBLE_IOCTL(TIOCSLTC)
481COMPATIBLE_IOCTL(FBIOGTYPE)
482COMPATIBLE_IOCTL(FBIOSATTR)
483COMPATIBLE_IOCTL(FBIOGATTR)
484COMPATIBLE_IOCTL(FBIOSVIDEO)
485COMPATIBLE_IOCTL(FBIOGVIDEO)
486COMPATIBLE_IOCTL(FBIOGCURSOR32) /* This is not implemented yet. Later it should be converted... */
487COMPATIBLE_IOCTL(FBIOSCURPOS)
488COMPATIBLE_IOCTL(FBIOGCURPOS)
489COMPATIBLE_IOCTL(FBIOGCURMAX)
490/* Little k */
491COMPATIBLE_IOCTL(KIOCTYPE)
492COMPATIBLE_IOCTL(KIOCLAYOUT)
493COMPATIBLE_IOCTL(KIOCGTRANS)
494COMPATIBLE_IOCTL(KIOCTRANS)
495COMPATIBLE_IOCTL(KIOCCMD)
496COMPATIBLE_IOCTL(KIOCSDIRECT)
497COMPATIBLE_IOCTL(KIOCSLED)
498COMPATIBLE_IOCTL(KIOCGLED)
499COMPATIBLE_IOCTL(KIOCSRATE)
500COMPATIBLE_IOCTL(KIOCGRATE)
501COMPATIBLE_IOCTL(VUIDSFORMAT)
502COMPATIBLE_IOCTL(VUIDGFORMAT)
503/* Little v, the video4linux ioctls */
504COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
505COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
506COMPATIBLE_IOCTL(ENVCTRL_RD_WARNING_TEMPERATURE)
507COMPATIBLE_IOCTL(ENVCTRL_RD_SHUTDOWN_TEMPERATURE)
508COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_TEMPERATURE)
509COMPATIBLE_IOCTL(ENVCTRL_RD_FAN_STATUS)
510COMPATIBLE_IOCTL(ENVCTRL_RD_VOLTAGE_STATUS)
511COMPATIBLE_IOCTL(ENVCTRL_RD_SCSI_TEMPERATURE)
512COMPATIBLE_IOCTL(ENVCTRL_RD_ETHERNET_TEMPERATURE)
513COMPATIBLE_IOCTL(ENVCTRL_RD_MTHRBD_TEMPERATURE)
514COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_VOLTAGE)
515COMPATIBLE_IOCTL(ENVCTRL_RD_GLOBALADDRESS)
516/* COMPATIBLE_IOCTL(D7SIOCRD) same value as ENVCTRL_RD_VOLTAGE_STATUS */
517COMPATIBLE_IOCTL(D7SIOCWR)
518COMPATIBLE_IOCTL(D7SIOCTM)
519/* OPENPROMIO, SunOS/Solaris only, the NetBSD one's have
520 * embedded pointers in the arg which we'd need to clean up...
521 */
522COMPATIBLE_IOCTL(OPROMGETOPT)
523COMPATIBLE_IOCTL(OPROMSETOPT)
524COMPATIBLE_IOCTL(OPROMNXTOPT)
525COMPATIBLE_IOCTL(OPROMSETOPT2)
526COMPATIBLE_IOCTL(OPROMNEXT)
527COMPATIBLE_IOCTL(OPROMCHILD)
528COMPATIBLE_IOCTL(OPROMGETPROP)
529COMPATIBLE_IOCTL(OPROMNXTPROP)
530COMPATIBLE_IOCTL(OPROMU2P)
531COMPATIBLE_IOCTL(OPROMGETCONS)
532COMPATIBLE_IOCTL(OPROMGETFBNAME)
533COMPATIBLE_IOCTL(OPROMGETBOOTARGS)
534COMPATIBLE_IOCTL(OPROMSETCUR)
535COMPATIBLE_IOCTL(OPROMPCI2NODE)
536COMPATIBLE_IOCTL(OPROMPATH2NODE)
537/* Big L */
538COMPATIBLE_IOCTL(LOOP_SET_STATUS64)
539COMPATIBLE_IOCTL(LOOP_GET_STATUS64)
540/* Big A */
541COMPATIBLE_IOCTL(AUDIO_GETINFO)
542COMPATIBLE_IOCTL(AUDIO_SETINFO)
543COMPATIBLE_IOCTL(AUDIO_DRAIN)
544COMPATIBLE_IOCTL(AUDIO_GETDEV)
545COMPATIBLE_IOCTL(AUDIO_GETDEV_SUNOS)
546COMPATIBLE_IOCTL(AUDIO_FLUSH)
547COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI)
548#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
549COMPATIBLE_IOCTL(DRM_IOCTL_GET_MAGIC)
550COMPATIBLE_IOCTL(DRM_IOCTL_IRQ_BUSID)
551COMPATIBLE_IOCTL(DRM_IOCTL_AUTH_MAGIC)
552COMPATIBLE_IOCTL(DRM_IOCTL_BLOCK)
553COMPATIBLE_IOCTL(DRM_IOCTL_UNBLOCK)
554COMPATIBLE_IOCTL(DRM_IOCTL_CONTROL)
555COMPATIBLE_IOCTL(DRM_IOCTL_ADD_BUFS)
556COMPATIBLE_IOCTL(DRM_IOCTL_MARK_BUFS)
557COMPATIBLE_IOCTL(DRM_IOCTL_ADD_CTX)
558COMPATIBLE_IOCTL(DRM_IOCTL_RM_CTX)
559COMPATIBLE_IOCTL(DRM_IOCTL_MOD_CTX)
560COMPATIBLE_IOCTL(DRM_IOCTL_GET_CTX)
561COMPATIBLE_IOCTL(DRM_IOCTL_SWITCH_CTX)
562COMPATIBLE_IOCTL(DRM_IOCTL_NEW_CTX)
563COMPATIBLE_IOCTL(DRM_IOCTL_ADD_DRAW)
564COMPATIBLE_IOCTL(DRM_IOCTL_RM_DRAW)
565COMPATIBLE_IOCTL(DRM_IOCTL_LOCK)
566COMPATIBLE_IOCTL(DRM_IOCTL_UNLOCK)
567COMPATIBLE_IOCTL(DRM_IOCTL_FINISH)
568#endif /* DRM */
569COMPATIBLE_IOCTL(WIOCSTART)
570COMPATIBLE_IOCTL(WIOCSTOP)
571COMPATIBLE_IOCTL(WIOCGSTAT)
572/* And these ioctls need translation */
573/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
574HANDLE_IOCTL(FBIOPUTCMAP32, fbiogetputcmap)
575HANDLE_IOCTL(FBIOGETCMAP32, fbiogetputcmap)
576HANDLE_IOCTL(FBIOSCURSOR32, fbiogscursor)
577#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
578HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version)
579HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique)
580HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique)
581HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap)
582HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs)
583HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs)
584HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs)
585HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma)
586HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx)
587#endif /* DRM */
588#if 0
589HANDLE_IOCTL(RTC32_IRQP_READ, do_rtc_ioctl)
590HANDLE_IOCTL(RTC32_IRQP_SET, do_rtc_ioctl)
591HANDLE_IOCTL(RTC32_EPOCH_READ, do_rtc_ioctl)
592HANDLE_IOCTL(RTC32_EPOCH_SET, do_rtc_ioctl)
593#endif
594/* take care of sizeof(sizeof()) breakage */
595IOCTL_TABLE_END
596
597int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c
new file mode 100644
index 000000000000..12c93a3eee26
--- /dev/null
+++ b/arch/sparc64/kernel/iommu_common.c
@@ -0,0 +1,231 @@
1/* $Id: iommu_common.c,v 1.9 2001/12/17 07:05:09 davem Exp $
2 * iommu_common.c: UltraSparc SBUS/PCI common iommu code.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include "iommu_common.h"
8
9/* You are _strongly_ advised to enable the following debugging code
10 * any time you make changes to the sg code below, run it for a while
11 * with filesystems mounted read-only before buying the farm... -DaveM
12 */
13
14#ifdef VERIFY_SG
15static int verify_lengths(struct scatterlist *sg, int nents, int npages)
16{
17 int sg_len, dma_len;
18 int i, pgcount;
19
20 sg_len = 0;
21 for (i = 0; i < nents; i++)
22 sg_len += sg[i].length;
23
24 dma_len = 0;
25 for (i = 0; i < nents && sg[i].dma_length; i++)
26 dma_len += sg[i].dma_length;
27
28 if (sg_len != dma_len) {
29 printk("verify_lengths: Error, different, sg[%d] dma[%d]\n",
30 sg_len, dma_len);
31 return -1;
32 }
33
34 pgcount = 0;
35 for (i = 0; i < nents && sg[i].dma_length; i++) {
36 unsigned long start, end;
37
38 start = sg[i].dma_address;
39 start = start & IO_PAGE_MASK;
40
41 end = sg[i].dma_address + sg[i].dma_length;
42 end = (end + (IO_PAGE_SIZE - 1)) & IO_PAGE_MASK;
43
44 pgcount += ((end - start) >> IO_PAGE_SHIFT);
45 }
46
47 if (pgcount != npages) {
48 printk("verify_lengths: Error, page count wrong, "
49 "npages[%d] pgcount[%d]\n",
50 npages, pgcount);
51 return -1;
52 }
53
54 /* This test passes... */
55 return 0;
56}
57
58static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg, int nents, iopte_t **__iopte)
59{
60 struct scatterlist *sg = *__sg;
61 iopte_t *iopte = *__iopte;
62 u32 dlen = dma_sg->dma_length;
63 u32 daddr;
64 unsigned int sglen;
65 unsigned long sgaddr;
66
67 daddr = dma_sg->dma_address;
68 sglen = sg->length;
69 sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
70 while (dlen > 0) {
71 unsigned long paddr;
72
73 /* SG and DMA_SG must begin at the same sub-page boundary. */
74 if ((sgaddr & ~IO_PAGE_MASK) != (daddr & ~IO_PAGE_MASK)) {
75 printk("verify_one_map: Wrong start offset "
76 "sg[%08lx] dma[%08x]\n",
77 sgaddr, daddr);
78 nents = -1;
79 goto out;
80 }
81
82 /* Verify the IOPTE points to the right page. */
83 paddr = iopte_val(*iopte) & IOPTE_PAGE;
84 if ((paddr + PAGE_OFFSET) != (sgaddr & IO_PAGE_MASK)) {
85 printk("verify_one_map: IOPTE[%08lx] maps the "
86 "wrong page, should be [%08lx]\n",
87 iopte_val(*iopte), (sgaddr & IO_PAGE_MASK) - PAGE_OFFSET);
88 nents = -1;
89 goto out;
90 }
91
92 /* If this SG crosses a page, adjust to that next page
93 * boundary and loop.
94 */
95 if ((sgaddr & IO_PAGE_MASK) ^ ((sgaddr + sglen - 1) & IO_PAGE_MASK)) {
96 unsigned long next_page, diff;
97
98 next_page = (sgaddr + IO_PAGE_SIZE) & IO_PAGE_MASK;
99 diff = next_page - sgaddr;
100 sgaddr += diff;
101 daddr += diff;
102 sglen -= diff;
103 dlen -= diff;
104 if (dlen > 0)
105 iopte++;
106 continue;
107 }
108
109 /* SG wholly consumed within this page. */
110 daddr += sglen;
111 dlen -= sglen;
112
113 if (dlen > 0 && ((daddr & ~IO_PAGE_MASK) == 0))
114 iopte++;
115
116 sg++;
117 if (--nents <= 0)
118 break;
119 sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
120 sglen = sg->length;
121 }
122 if (dlen < 0) {
123 /* Transfer overrun, big problems. */
124 printk("verify_one_map: Transfer overrun by %d bytes.\n",
125 -dlen);
126 nents = -1;
127 } else {
128 /* Advance to next dma_sg implies that the next iopte will
129 * begin it.
130 */
131 iopte++;
132 }
133
134out:
135 *__sg = sg;
136 *__iopte = iopte;
137 return nents;
138}
139
140static int verify_maps(struct scatterlist *sg, int nents, iopte_t *iopte)
141{
142 struct scatterlist *dma_sg = sg;
143 struct scatterlist *orig_dma_sg = dma_sg;
144 int orig_nents = nents;
145
146 for (;;) {
147 nents = verify_one_map(dma_sg, &sg, nents, &iopte);
148 if (nents <= 0)
149 break;
150 dma_sg++;
151 if (dma_sg->dma_length == 0)
152 break;
153 }
154
155 if (nents > 0) {
156 printk("verify_maps: dma maps consumed by some sgs remain (%d)\n",
157 nents);
158 return -1;
159 }
160
161 if (nents < 0) {
162 printk("verify_maps: Error, messed up mappings, "
163 "at sg %d dma_sg %d\n",
164 (int) (orig_nents + nents), (int) (dma_sg - orig_dma_sg));
165 return -1;
166 }
167
168 /* This test passes... */
169 return 0;
170}
171
172void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int npages)
173{
174 if (verify_lengths(sg, nents, npages) < 0 ||
175 verify_maps(sg, nents, iopte) < 0) {
176 int i;
177
178 printk("verify_sglist: Crap, messed up mappings, dumping, iodma at ");
179 printk("%016lx.\n", sg->dma_address & IO_PAGE_MASK);
180
181 for (i = 0; i < nents; i++) {
182 printk("sg(%d): page_addr(%p) off(%x) length(%x) "
183 "dma_address[%016lx] dma_length[%016lx]\n",
184 i,
185 page_address(sg[i].page), sg[i].offset,
186 sg[i].length,
187 sg[i].dma_address, sg[i].dma_length);
188 }
189 }
190
191 /* Seems to be ok */
192}
193#endif
194
195unsigned long prepare_sg(struct scatterlist *sg, int nents)
196{
197 struct scatterlist *dma_sg = sg;
198 unsigned long prev;
199 u32 dent_addr, dent_len;
200
201 prev = (unsigned long) (page_address(sg->page) + sg->offset);
202 prev += (unsigned long) (dent_len = sg->length);
203 dent_addr = (u32) ((unsigned long)(page_address(sg->page) + sg->offset)
204 & (IO_PAGE_SIZE - 1UL));
205 while (--nents) {
206 unsigned long addr;
207
208 sg++;
209 addr = (unsigned long) (page_address(sg->page) + sg->offset);
210 if (! VCONTIG(prev, addr)) {
211 dma_sg->dma_address = dent_addr;
212 dma_sg->dma_length = dent_len;
213 dma_sg++;
214
215 dent_addr = ((dent_addr +
216 dent_len +
217 (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT);
218 dent_addr <<= IO_PAGE_SHIFT;
219 dent_addr += addr & (IO_PAGE_SIZE - 1UL);
220 dent_len = 0;
221 }
222 dent_len += sg->length;
223 prev = addr + sg->length;
224 }
225 dma_sg->dma_address = dent_addr;
226 dma_sg->dma_length = dent_len;
227
228 return ((unsigned long) dent_addr +
229 (unsigned long) dent_len +
230 (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT;
231}
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
new file mode 100644
index 000000000000..ad791014419c
--- /dev/null
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -0,0 +1,48 @@
1/* $Id: iommu_common.h,v 1.5 2001/12/11 09:41:01 davem Exp $
2 * iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/sched.h>
10#include <linux/mm.h>
11
12#include <asm/iommu.h>
13#include <asm/scatterlist.h>
14
15/*
16 * These give mapping size of each iommu pte/tlb.
17 */
18#define IO_PAGE_SHIFT 13
19#define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT)
20#define IO_PAGE_MASK (~(IO_PAGE_SIZE-1))
21#define IO_PAGE_ALIGN(addr) (((addr)+IO_PAGE_SIZE-1)&IO_PAGE_MASK)
22
23#define IO_TSB_ENTRIES (128*1024)
24#define IO_TSB_SIZE (IO_TSB_ENTRIES * 8)
25
26/*
27 * This is the hardwired shift in the iotlb tag/data parts.
28 */
29#define IOMMU_PAGE_SHIFT 13
30
31/* You are _strongly_ advised to enable the following debugging code
32 * any time you make changes to the sg code below, run it for a while
33 * with filesystems mounted read-only before buying the farm... -DaveM
34 */
35#undef VERIFY_SG
36
37#ifdef VERIFY_SG
38extern void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int npages);
39#endif
40
41/* Two addresses are "virtually contiguous" if and only if:
42 * 1) They are equal, or...
43 * 2) They are both on a page boundary
44 */
45#define VCONTIG(__X, __Y) (((__X) == (__Y)) || \
46 (((__X) | (__Y)) << (64UL - PAGE_SHIFT)) == 0UL)
47
48extern unsigned long prepare_sg(struct scatterlist *sg, int nents);
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
new file mode 100644
index 000000000000..a38cb5036df0
--- /dev/null
+++ b/arch/sparc64/kernel/irq.c
@@ -0,0 +1,1269 @@
1/* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/ptrace.h>
13#include <linux/errno.h>
14#include <linux/kernel_stat.h>
15#include <linux/signal.h>
16#include <linux/mm.h>
17#include <linux/interrupt.h>
18#include <linux/slab.h>
19#include <linux/random.h>
20#include <linux/init.h>
21#include <linux/delay.h>
22#include <linux/proc_fs.h>
23#include <linux/seq_file.h>
24
25#include <asm/ptrace.h>
26#include <asm/processor.h>
27#include <asm/atomic.h>
28#include <asm/system.h>
29#include <asm/irq.h>
30#include <asm/sbus.h>
31#include <asm/iommu.h>
32#include <asm/upa.h>
33#include <asm/oplib.h>
34#include <asm/timer.h>
35#include <asm/smp.h>
36#include <asm/starfire.h>
37#include <asm/uaccess.h>
38#include <asm/cache.h>
39#include <asm/cpudata.h>
40
41#ifdef CONFIG_SMP
42static void distribute_irqs(void);
43#endif
44
45/* UPA nodes send interrupt packet to UltraSparc with first data reg
46 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
47 * delivered. We must translate this into a non-vector IRQ so we can
48 * set the softint on this cpu.
49 *
50 * To make processing these packets efficient and race free we use
51 * an array of irq buckets below. The interrupt vector handler in
52 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
53 * The IVEC handler does not need to act atomically, the PIL dispatch
54 * code uses CAS to get an atomic snapshot of the list and clear it
55 * at the same time.
56 */
57
58struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
59
60/* This has to be in the main kernel image, it cannot be
61 * turned into per-cpu data. The reason is that the main
62 * kernel image is locked into the TLB and this structure
63 * is accessed from the vectored interrupt trap handler. If
64 * access to this structure takes a TLB miss it could cause
65 * the 5-level sparc v9 trap stack to overflow.
66 */
67struct irq_work_struct {
68 unsigned int irq_worklists[16];
69};
70struct irq_work_struct __irq_work[NR_CPUS];
71#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
72
73#ifdef CONFIG_PCI
74/* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
75 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
76 * for devices behind busses other than APB on Sabre systems.
77 *
78 * Currently these physical addresses are just config space accesses
79 * to the command register for that device.
80 */
81unsigned long pci_dma_wsync;
82unsigned long dma_sync_reg_table[256];
83unsigned char dma_sync_reg_table_entry = 0;
84#endif
85
86/* This is based upon code in the 32-bit Sparc kernel written mostly by
87 * David Redman (djhr@tadpole.co.uk).
88 */
89#define MAX_STATIC_ALLOC 4
90static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
91static int static_irq_count;
92
93/* This is exported so that fast IRQ handlers can get at it... -DaveM */
94struct irqaction *irq_action[NR_IRQS+1] = {
95 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
96 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
97};
98
99/* This only synchronizes entities which modify IRQ handler
100 * state and some selected user-level spots that want to
101 * read things in the table. IRQ handler processing orders
102 * its' accesses such that no locking is needed.
103 */
104static DEFINE_SPINLOCK(irq_action_lock);
105
106static void register_irq_proc (unsigned int irq);
107
108/*
109 * Upper 2b of irqaction->flags holds the ino.
110 * irqaction->mask holds the smp affinity information.
111 */
112#define put_ino_in_irqaction(action, irq) \
113 action->flags &= 0xffffffffffffUL; \
114 if (__bucket(irq) == &pil0_dummy_bucket) \
115 action->flags |= 0xdeadUL << 48; \
116 else \
117 action->flags |= __irq_ino(irq) << 48;
118#define get_ino_in_irqaction(action) (action->flags >> 48)
119
120#define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
121#define get_smpaff_in_irqaction(action) ((action)->mask)
122
123int show_interrupts(struct seq_file *p, void *v)
124{
125 unsigned long flags;
126 int i = *(loff_t *) v;
127 struct irqaction *action;
128#ifdef CONFIG_SMP
129 int j;
130#endif
131
132 spin_lock_irqsave(&irq_action_lock, flags);
133 if (i <= NR_IRQS) {
134 if (!(action = *(i + irq_action)))
135 goto out_unlock;
136 seq_printf(p, "%3d: ", i);
137#ifndef CONFIG_SMP
138 seq_printf(p, "%10u ", kstat_irqs(i));
139#else
140 for (j = 0; j < NR_CPUS; j++) {
141 if (!cpu_online(j))
142 continue;
143 seq_printf(p, "%10u ",
144 kstat_cpu(j).irqs[i]);
145 }
146#endif
147 seq_printf(p, " %s:%lx", action->name,
148 get_ino_in_irqaction(action));
149 for (action = action->next; action; action = action->next) {
150 seq_printf(p, ", %s:%lx", action->name,
151 get_ino_in_irqaction(action));
152 }
153 seq_putc(p, '\n');
154 }
155out_unlock:
156 spin_unlock_irqrestore(&irq_action_lock, flags);
157
158 return 0;
159}
160
161/* Now these are always passed a true fully specified sun4u INO. */
162void enable_irq(unsigned int irq)
163{
164 struct ino_bucket *bucket = __bucket(irq);
165 unsigned long imap;
166 unsigned long tid;
167
168 imap = bucket->imap;
169 if (imap == 0UL)
170 return;
171
172 preempt_disable();
173
174 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
175 unsigned long ver;
176
177 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
178 if ((ver >> 32) == 0x003e0016) {
179 /* We set it to our JBUS ID. */
180 __asm__ __volatile__("ldxa [%%g0] %1, %0"
181 : "=r" (tid)
182 : "i" (ASI_JBUS_CONFIG));
183 tid = ((tid & (0x1fUL<<17)) << 9);
184 tid &= IMAP_TID_JBUS;
185 } else {
186 /* We set it to our Safari AID. */
187 __asm__ __volatile__("ldxa [%%g0] %1, %0"
188 : "=r" (tid)
189 : "i" (ASI_SAFARI_CONFIG));
190 tid = ((tid & (0x3ffUL<<17)) << 9);
191 tid &= IMAP_AID_SAFARI;
192 }
193 } else if (this_is_starfire == 0) {
194 /* We set it to our UPA MID. */
195 __asm__ __volatile__("ldxa [%%g0] %1, %0"
196 : "=r" (tid)
197 : "i" (ASI_UPA_CONFIG));
198 tid = ((tid & UPA_CONFIG_MID) << 9);
199 tid &= IMAP_TID_UPA;
200 } else {
201 tid = (starfire_translate(imap, smp_processor_id()) << 26);
202 tid &= IMAP_TID_UPA;
203 }
204
205 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
206 * of this SYSIO's preconfigured IGN in the SYSIO Control
207 * Register, the hardware just mirrors that value here.
208 * However for Graphics and UPA Slave devices the full
209 * IMAP_INR field can be set by the programmer here.
210 *
211 * Things like FFB can now be handled via the new IRQ mechanism.
212 */
213 upa_writel(tid | IMAP_VALID, imap);
214
215 preempt_enable();
216}
217
218/* This now gets passed true ino's as well. */
219void disable_irq(unsigned int irq)
220{
221 struct ino_bucket *bucket = __bucket(irq);
222 unsigned long imap;
223
224 imap = bucket->imap;
225 if (imap != 0UL) {
226 u32 tmp;
227
228 /* NOTE: We do not want to futz with the IRQ clear registers
229 * and move the state to IDLE, the SCSI code does call
230 * disable_irq() to assure atomicity in the queue cmd
231 * SCSI adapter driver code. Thus we'd lose interrupts.
232 */
233 tmp = upa_readl(imap);
234 tmp &= ~IMAP_VALID;
235 upa_writel(tmp, imap);
236 }
237}
238
239/* The timer is the one "weird" interrupt which is generated by
240 * the CPU %tick register and not by some normal vectored interrupt
241 * source. To handle this special case, we use this dummy INO bucket.
242 */
243static struct ino_bucket pil0_dummy_bucket = {
244 0, /* irq_chain */
245 0, /* pil */
246 0, /* pending */
247 0, /* flags */
248 0, /* __unused */
249 NULL, /* irq_info */
250 0UL, /* iclr */
251 0UL, /* imap */
252};
253
254unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
255{
256 struct ino_bucket *bucket;
257 int ino;
258
259 if (pil == 0) {
260 if (iclr != 0UL || imap != 0UL) {
261 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
262 iclr, imap);
263 prom_halt();
264 }
265 return __irq(&pil0_dummy_bucket);
266 }
267
268 /* RULE: Both must be specified in all other cases. */
269 if (iclr == 0UL || imap == 0UL) {
270 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
271 pil, inofixup, iclr, imap);
272 prom_halt();
273 }
274
275 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
276 if (ino > NUM_IVECS) {
277 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
278 ino, pil, inofixup, iclr, imap);
279 prom_halt();
280 }
281
282 /* Ok, looks good, set it up. Don't touch the irq_chain or
283 * the pending flag.
284 */
285 bucket = &ivector_table[ino];
286 if ((bucket->flags & IBF_ACTIVE) ||
287 (bucket->irq_info != NULL)) {
288 /* This is a gross fatal error if it happens here. */
289 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
290 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
291 ino, pil, inofixup, iclr, imap);
292 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
293 bucket->pil, bucket->iclr, bucket->imap);
294 prom_printf("IRQ: Cannot continue, halting...\n");
295 prom_halt();
296 }
297 bucket->imap = imap;
298 bucket->iclr = iclr;
299 bucket->pil = pil;
300 bucket->flags = 0;
301
302 bucket->irq_info = NULL;
303
304 return __irq(bucket);
305}
306
307static void atomic_bucket_insert(struct ino_bucket *bucket)
308{
309 unsigned long pstate;
310 unsigned int *ent;
311
312 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
313 __asm__ __volatile__("wrpr %0, %1, %%pstate"
314 : : "r" (pstate), "i" (PSTATE_IE));
315 ent = irq_work(smp_processor_id(), bucket->pil);
316 bucket->irq_chain = *ent;
317 *ent = __irq(bucket);
318 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
319}
320
321int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
322 unsigned long irqflags, const char *name, void *dev_id)
323{
324 struct irqaction *action, *tmp = NULL;
325 struct ino_bucket *bucket = __bucket(irq);
326 unsigned long flags;
327 int pending = 0;
328
329 if ((bucket != &pil0_dummy_bucket) &&
330 (bucket < &ivector_table[0] ||
331 bucket >= &ivector_table[NUM_IVECS])) {
332 unsigned int *caller;
333
334 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
335 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
336 "from %p, irq %08x.\n", caller, irq);
337 return -EINVAL;
338 }
339 if (!handler)
340 return -EINVAL;
341
342 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
343 /*
344 * This function might sleep, we want to call it first,
345 * outside of the atomic block. In SA_STATIC_ALLOC case,
346 * random driver's kmalloc will fail, but it is safe.
347 * If already initialized, random driver will not reinit.
348 * Yes, this might clear the entropy pool if the wrong
349 * driver is attempted to be loaded, without actually
350 * installing a new handler, but is this really a problem,
351 * only the sysadmin is able to do this.
352 */
353 rand_initialize_irq(irq);
354 }
355
356 spin_lock_irqsave(&irq_action_lock, flags);
357
358 action = *(bucket->pil + irq_action);
359 if (action) {
360 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
361 for (tmp = action; tmp->next; tmp = tmp->next)
362 ;
363 else {
364 spin_unlock_irqrestore(&irq_action_lock, flags);
365 return -EBUSY;
366 }
367 action = NULL; /* Or else! */
368 }
369
370 /* If this is flagged as statically allocated then we use our
371 * private struct which is never freed.
372 */
373 if (irqflags & SA_STATIC_ALLOC) {
374 if (static_irq_count < MAX_STATIC_ALLOC)
375 action = &static_irqaction[static_irq_count++];
376 else
377 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
378 "using kmalloc\n", irq, name);
379 }
380 if (action == NULL)
381 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
382 GFP_ATOMIC);
383
384 if (!action) {
385 spin_unlock_irqrestore(&irq_action_lock, flags);
386 return -ENOMEM;
387 }
388
389 if (bucket == &pil0_dummy_bucket) {
390 bucket->irq_info = action;
391 bucket->flags |= IBF_ACTIVE;
392 } else {
393 if ((bucket->flags & IBF_ACTIVE) != 0) {
394 void *orig = bucket->irq_info;
395 void **vector = NULL;
396
397 if ((bucket->flags & IBF_PCI) == 0) {
398 printk("IRQ: Trying to share non-PCI bucket.\n");
399 goto free_and_ebusy;
400 }
401 if ((bucket->flags & IBF_MULTI) == 0) {
402 vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
403 if (vector == NULL)
404 goto free_and_enomem;
405
406 /* We might have slept. */
407 if ((bucket->flags & IBF_MULTI) != 0) {
408 int ent;
409
410 kfree(vector);
411 vector = (void **)bucket->irq_info;
412 for(ent = 0; ent < 4; ent++) {
413 if (vector[ent] == NULL) {
414 vector[ent] = action;
415 break;
416 }
417 }
418 if (ent == 4)
419 goto free_and_ebusy;
420 } else {
421 vector[0] = orig;
422 vector[1] = action;
423 vector[2] = NULL;
424 vector[3] = NULL;
425 bucket->irq_info = vector;
426 bucket->flags |= IBF_MULTI;
427 }
428 } else {
429 int ent;
430
431 vector = (void **)orig;
432 for (ent = 0; ent < 4; ent++) {
433 if (vector[ent] == NULL) {
434 vector[ent] = action;
435 break;
436 }
437 }
438 if (ent == 4)
439 goto free_and_ebusy;
440 }
441 } else {
442 bucket->irq_info = action;
443 bucket->flags |= IBF_ACTIVE;
444 }
445 pending = bucket->pending;
446 if (pending)
447 bucket->pending = 0;
448 }
449
450 action->handler = handler;
451 action->flags = irqflags;
452 action->name = name;
453 action->next = NULL;
454 action->dev_id = dev_id;
455 put_ino_in_irqaction(action, irq);
456 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
457
458 if (tmp)
459 tmp->next = action;
460 else
461 *(bucket->pil + irq_action) = action;
462
463 enable_irq(irq);
464
465 /* We ate the IVEC already, this makes sure it does not get lost. */
466 if (pending) {
467 atomic_bucket_insert(bucket);
468 set_softint(1 << bucket->pil);
469 }
470 spin_unlock_irqrestore(&irq_action_lock, flags);
471 if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC)))
472 register_irq_proc(__irq_ino(irq));
473
474#ifdef CONFIG_SMP
475 distribute_irqs();
476#endif
477 return 0;
478
479free_and_ebusy:
480 kfree(action);
481 spin_unlock_irqrestore(&irq_action_lock, flags);
482 return -EBUSY;
483
484free_and_enomem:
485 kfree(action);
486 spin_unlock_irqrestore(&irq_action_lock, flags);
487 return -ENOMEM;
488}
489
490EXPORT_SYMBOL(request_irq);
491
492void free_irq(unsigned int irq, void *dev_id)
493{
494 struct irqaction *action;
495 struct irqaction *tmp = NULL;
496 unsigned long flags;
497 struct ino_bucket *bucket = __bucket(irq), *bp;
498
499 if ((bucket != &pil0_dummy_bucket) &&
500 (bucket < &ivector_table[0] ||
501 bucket >= &ivector_table[NUM_IVECS])) {
502 unsigned int *caller;
503
504 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
505 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
506 "from %p, irq %08x.\n", caller, irq);
507 return;
508 }
509
510 spin_lock_irqsave(&irq_action_lock, flags);
511
512 action = *(bucket->pil + irq_action);
513 if (!action->handler) {
514 printk("Freeing free IRQ %d\n", bucket->pil);
515 return;
516 }
517 if (dev_id) {
518 for ( ; action; action = action->next) {
519 if (action->dev_id == dev_id)
520 break;
521 tmp = action;
522 }
523 if (!action) {
524 printk("Trying to free free shared IRQ %d\n", bucket->pil);
525 spin_unlock_irqrestore(&irq_action_lock, flags);
526 return;
527 }
528 } else if (action->flags & SA_SHIRQ) {
529 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
530 spin_unlock_irqrestore(&irq_action_lock, flags);
531 return;
532 }
533
534 if (action->flags & SA_STATIC_ALLOC) {
535 printk("Attempt to free statically allocated IRQ %d (%s)\n",
536 bucket->pil, action->name);
537 spin_unlock_irqrestore(&irq_action_lock, flags);
538 return;
539 }
540
541 if (action && tmp)
542 tmp->next = action->next;
543 else
544 *(bucket->pil + irq_action) = action->next;
545
546 spin_unlock_irqrestore(&irq_action_lock, flags);
547
548 synchronize_irq(irq);
549
550 spin_lock_irqsave(&irq_action_lock, flags);
551
552 if (bucket != &pil0_dummy_bucket) {
553 unsigned long imap = bucket->imap;
554 void **vector, *orig;
555 int ent;
556
557 orig = bucket->irq_info;
558 vector = (void **)orig;
559
560 if ((bucket->flags & IBF_MULTI) != 0) {
561 int other = 0;
562 void *orphan = NULL;
563 for (ent = 0; ent < 4; ent++) {
564 if (vector[ent] == action)
565 vector[ent] = NULL;
566 else if (vector[ent] != NULL) {
567 orphan = vector[ent];
568 other++;
569 }
570 }
571
572 /* Only free when no other shared irq
573 * uses this bucket.
574 */
575 if (other) {
576 if (other == 1) {
577 /* Convert back to non-shared bucket. */
578 bucket->irq_info = orphan;
579 bucket->flags &= ~(IBF_MULTI);
580 kfree(vector);
581 }
582 goto out;
583 }
584 } else {
585 bucket->irq_info = NULL;
586 }
587
588 /* This unique interrupt source is now inactive. */
589 bucket->flags &= ~IBF_ACTIVE;
590
591 /* See if any other buckets share this bucket's IMAP
592 * and are still active.
593 */
594 for (ent = 0; ent < NUM_IVECS; ent++) {
595 bp = &ivector_table[ent];
596 if (bp != bucket &&
597 bp->imap == imap &&
598 (bp->flags & IBF_ACTIVE) != 0)
599 break;
600 }
601
602 /* Only disable when no other sub-irq levels of
603 * the same IMAP are active.
604 */
605 if (ent == NUM_IVECS)
606 disable_irq(irq);
607 }
608
609out:
610 kfree(action);
611 spin_unlock_irqrestore(&irq_action_lock, flags);
612}
613
614EXPORT_SYMBOL(free_irq);
615
616#ifdef CONFIG_SMP
617void synchronize_irq(unsigned int irq)
618{
619 struct ino_bucket *bucket = __bucket(irq);
620
621#if 0
622 /* The following is how I wish I could implement this.
623 * Unfortunately the ICLR registers are read-only, you can
624 * only write ICLR_foo values to them. To get the current
625 * IRQ status you would need to get at the IRQ diag registers
626 * in the PCI/SBUS controller and the layout of those vary
627 * from one controller to the next, sigh... -DaveM
628 */
629 unsigned long iclr = bucket->iclr;
630
631 while (1) {
632 u32 tmp = upa_readl(iclr);
633
634 if (tmp == ICLR_TRANSMIT ||
635 tmp == ICLR_PENDING) {
636 cpu_relax();
637 continue;
638 }
639 break;
640 }
641#else
642 /* So we have to do this with a INPROGRESS bit just like x86. */
643 while (bucket->flags & IBF_INPROGRESS)
644 cpu_relax();
645#endif
646}
647#endif /* CONFIG_SMP */
648
649void catch_disabled_ivec(struct pt_regs *regs)
650{
651 int cpu = smp_processor_id();
652 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
653
654 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
655 * to other devices. Here a single IMAP enabled potentially multiple
656 * unique interrupt sources (which each do have a unique ICLR register.
657 *
658 * So what we do is just register that the IVEC arrived, when registered
659 * for real the request_irq() code will check the bit and signal
660 * a local CPU interrupt for it.
661 */
662#if 0
663 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
664 bucket - &ivector_table[0], regs->tpc);
665#endif
666 *irq_work(cpu, 0) = 0;
667 bucket->pending = 1;
668}
669
670/* Tune this... */
671#define FORWARD_VOLUME 12
672
673#ifdef CONFIG_SMP
674
675static inline void redirect_intr(int cpu, struct ino_bucket *bp)
676{
677 /* Ok, here is what is going on:
678 * 1) Retargeting IRQs on Starfire is very
679 * expensive so just forget about it on them.
680 * 2) Moving around very high priority interrupts
681 * is a losing game.
682 * 3) If the current cpu is idle, interrupts are
683 * useful work, so keep them here. But do not
684 * pass to our neighbour if he is not very idle.
685 * 4) If sysadmin explicitly asks for directed intrs,
686 * Just Do It.
687 */
688 struct irqaction *ap = bp->irq_info;
689 cpumask_t cpu_mask;
690 unsigned int buddy, ticks;
691
692 cpu_mask = get_smpaff_in_irqaction(ap);
693 cpus_and(cpu_mask, cpu_mask, cpu_online_map);
694 if (cpus_empty(cpu_mask))
695 cpu_mask = cpu_online_map;
696
697 if (this_is_starfire != 0 ||
698 bp->pil >= 10 || current->pid == 0)
699 goto out;
700
701 /* 'cpu' is the MID (ie. UPAID), calculate the MID
702 * of our buddy.
703 */
704 buddy = cpu + 1;
705 if (buddy >= NR_CPUS)
706 buddy = 0;
707
708 ticks = 0;
709 while (!cpu_isset(buddy, cpu_mask)) {
710 if (++buddy >= NR_CPUS)
711 buddy = 0;
712 if (++ticks > NR_CPUS) {
713 put_smpaff_in_irqaction(ap, CPU_MASK_NONE);
714 goto out;
715 }
716 }
717
718 if (buddy == cpu)
719 goto out;
720
721 /* Voo-doo programming. */
722 if (cpu_data(buddy).idle_volume < FORWARD_VOLUME)
723 goto out;
724
725 /* This just so happens to be correct on Cheetah
726 * at the moment.
727 */
728 buddy <<= 26;
729
730 /* Push it to our buddy. */
731 upa_writel(buddy | IMAP_VALID, bp->imap);
732
733out:
734 return;
735}
736
737#endif
738
739void handler_irq(int irq, struct pt_regs *regs)
740{
741 struct ino_bucket *bp, *nbp;
742 int cpu = smp_processor_id();
743
744#ifndef CONFIG_SMP
745 /*
746 * Check for TICK_INT on level 14 softint.
747 */
748 {
749 unsigned long clr_mask = 1 << irq;
750 unsigned long tick_mask = tick_ops->softint_mask;
751
752 if ((irq == 14) && (get_softint() & tick_mask)) {
753 irq = 0;
754 clr_mask = tick_mask;
755 }
756 clear_softint(clr_mask);
757 }
758#else
759 int should_forward = 1;
760
761 clear_softint(1 << irq);
762#endif
763
764 irq_enter();
765 kstat_this_cpu.irqs[irq]++;
766
767 /* Sliiiick... */
768#ifndef CONFIG_SMP
769 bp = ((irq != 0) ?
770 __bucket(xchg32(irq_work(cpu, irq), 0)) :
771 &pil0_dummy_bucket);
772#else
773 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
774#endif
775 for ( ; bp != NULL; bp = nbp) {
776 unsigned char flags = bp->flags;
777 unsigned char random = 0;
778
779 nbp = __bucket(bp->irq_chain);
780 bp->irq_chain = 0;
781
782 bp->flags |= IBF_INPROGRESS;
783
784 if ((flags & IBF_ACTIVE) != 0) {
785#ifdef CONFIG_PCI
786 if ((flags & IBF_DMA_SYNC) != 0) {
787 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
788 upa_readq(pci_dma_wsync);
789 }
790#endif
791 if ((flags & IBF_MULTI) == 0) {
792 struct irqaction *ap = bp->irq_info;
793 int ret;
794
795 ret = ap->handler(__irq(bp), ap->dev_id, regs);
796 if (ret == IRQ_HANDLED)
797 random |= ap->flags;
798 } else {
799 void **vector = (void **)bp->irq_info;
800 int ent;
801 for (ent = 0; ent < 4; ent++) {
802 struct irqaction *ap = vector[ent];
803 if (ap != NULL) {
804 int ret;
805
806 ret = ap->handler(__irq(bp),
807 ap->dev_id,
808 regs);
809 if (ret == IRQ_HANDLED)
810 random |= ap->flags;
811 }
812 }
813 }
814 /* Only the dummy bucket lacks IMAP/ICLR. */
815 if (bp->pil != 0) {
816#ifdef CONFIG_SMP
817 if (should_forward) {
818 redirect_intr(cpu, bp);
819 should_forward = 0;
820 }
821#endif
822 upa_writel(ICLR_IDLE, bp->iclr);
823
824 /* Test and add entropy */
825 if (random & SA_SAMPLE_RANDOM)
826 add_interrupt_randomness(irq);
827 }
828 } else
829 bp->pending = 1;
830
831 bp->flags &= ~IBF_INPROGRESS;
832 }
833 irq_exit();
834}
835
836#ifdef CONFIG_BLK_DEV_FD
837extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
838
839void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
840{
841 struct irqaction *action = *(irq + irq_action);
842 struct ino_bucket *bucket;
843 int cpu = smp_processor_id();
844
845 irq_enter();
846 kstat_this_cpu.irqs[irq]++;
847
848 *(irq_work(cpu, irq)) = 0;
849 bucket = get_ino_in_irqaction(action) + ivector_table;
850
851 bucket->flags |= IBF_INPROGRESS;
852
853 floppy_interrupt(irq, dev_cookie, regs);
854 upa_writel(ICLR_IDLE, bucket->iclr);
855
856 bucket->flags &= ~IBF_INPROGRESS;
857
858 irq_exit();
859}
860#endif
861
862/* The following assumes that the branch lies before the place we
863 * are branching to. This is the case for a trap vector...
864 * You have been warned.
865 */
866#define SPARC_BRANCH(dest_addr, inst_addr) \
867 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
868
869#define SPARC_NOP (0x01000000)
870
871static void install_fast_irq(unsigned int cpu_irq,
872 irqreturn_t (*handler)(int, void *, struct pt_regs *))
873{
874 extern unsigned long sparc64_ttable_tl0;
875 unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
876 unsigned int *insns;
877
878 ttent += 0x820;
879 ttent += (cpu_irq - 1) << 5;
880 insns = (unsigned int *) ttent;
881 insns[0] = SPARC_BRANCH(((unsigned long) handler),
882 ((unsigned long)&insns[0]));
883 insns[1] = SPARC_NOP;
884 __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
885}
886
887int request_fast_irq(unsigned int irq,
888 irqreturn_t (*handler)(int, void *, struct pt_regs *),
889 unsigned long irqflags, const char *name, void *dev_id)
890{
891 struct irqaction *action;
892 struct ino_bucket *bucket = __bucket(irq);
893 unsigned long flags;
894
895 /* No pil0 dummy buckets allowed here. */
896 if (bucket < &ivector_table[0] ||
897 bucket >= &ivector_table[NUM_IVECS]) {
898 unsigned int *caller;
899
900 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
901 printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "
902 "from %p, irq %08x.\n", caller, irq);
903 return -EINVAL;
904 }
905
906 if (!handler)
907 return -EINVAL;
908
909 if ((bucket->pil == 0) || (bucket->pil == 14)) {
910 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
911 return -EBUSY;
912 }
913
914 spin_lock_irqsave(&irq_action_lock, flags);
915
916 action = *(bucket->pil + irq_action);
917 if (action) {
918 if (action->flags & SA_SHIRQ)
919 panic("Trying to register fast irq when already shared.\n");
920 if (irqflags & SA_SHIRQ)
921 panic("Trying to register fast irq as shared.\n");
922 printk("request_fast_irq: Trying to register yet already owned.\n");
923 spin_unlock_irqrestore(&irq_action_lock, flags);
924 return -EBUSY;
925 }
926
927 /*
928 * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we
929 * support smp intr affinity in this path.
930 */
931 if (irqflags & SA_STATIC_ALLOC) {
932 if (static_irq_count < MAX_STATIC_ALLOC)
933 action = &static_irqaction[static_irq_count++];
934 else
935 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
936 "using kmalloc\n", bucket->pil, name);
937 }
938 if (action == NULL)
939 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
940 GFP_ATOMIC);
941 if (!action) {
942 spin_unlock_irqrestore(&irq_action_lock, flags);
943 return -ENOMEM;
944 }
945 install_fast_irq(bucket->pil, handler);
946
947 bucket->irq_info = action;
948 bucket->flags |= IBF_ACTIVE;
949
950 action->handler = handler;
951 action->flags = irqflags;
952 action->dev_id = NULL;
953 action->name = name;
954 action->next = NULL;
955 put_ino_in_irqaction(action, irq);
956 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
957
958 *(bucket->pil + irq_action) = action;
959 enable_irq(irq);
960
961 spin_unlock_irqrestore(&irq_action_lock, flags);
962
963#ifdef CONFIG_SMP
964 distribute_irqs();
965#endif
966 return 0;
967}
968
969/* We really don't need these at all on the Sparc. We only have
970 * stubs here because they are exported to modules.
971 */
972unsigned long probe_irq_on(void)
973{
974 return 0;
975}
976
977EXPORT_SYMBOL(probe_irq_on);
978
979int probe_irq_off(unsigned long mask)
980{
981 return 0;
982}
983
984EXPORT_SYMBOL(probe_irq_off);
985
986#ifdef CONFIG_SMP
987static int retarget_one_irq(struct irqaction *p, int goal_cpu)
988{
989 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
990 unsigned long imap = bucket->imap;
991 unsigned int tid;
992
993 while (!cpu_online(goal_cpu)) {
994 if (++goal_cpu >= NR_CPUS)
995 goal_cpu = 0;
996 }
997
998 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
999 tid = goal_cpu << 26;
1000 tid &= IMAP_AID_SAFARI;
1001 } else if (this_is_starfire == 0) {
1002 tid = goal_cpu << 26;
1003 tid &= IMAP_TID_UPA;
1004 } else {
1005 tid = (starfire_translate(imap, goal_cpu) << 26);
1006 tid &= IMAP_TID_UPA;
1007 }
1008 upa_writel(tid | IMAP_VALID, imap);
1009
1010 while (!cpu_online(goal_cpu)) {
1011 if (++goal_cpu >= NR_CPUS)
1012 goal_cpu = 0;
1013 }
1014
1015 return goal_cpu;
1016}
1017
1018/* Called from request_irq. */
1019static void distribute_irqs(void)
1020{
1021 unsigned long flags;
1022 int cpu, level;
1023
1024 spin_lock_irqsave(&irq_action_lock, flags);
1025 cpu = 0;
1026
1027 /*
1028 * Skip the timer at [0], and very rare error/power intrs at [15].
1029 * Also level [12], it causes problems on Ex000 systems.
1030 */
1031 for (level = 1; level < NR_IRQS; level++) {
1032 struct irqaction *p = irq_action[level];
1033 if (level == 12) continue;
1034 while(p) {
1035 cpu = retarget_one_irq(p, cpu);
1036 p = p->next;
1037 }
1038 }
1039 spin_unlock_irqrestore(&irq_action_lock, flags);
1040}
1041#endif
1042
1043
1044struct sun5_timer *prom_timers;
1045static u64 prom_limit0, prom_limit1;
1046
1047static void map_prom_timers(void)
1048{
1049 unsigned int addr[3];
1050 int tnode, err;
1051
1052 /* PROM timer node hangs out in the top level of device siblings... */
1053 tnode = prom_finddevice("/counter-timer");
1054
1055 /* Assume if node is not present, PROM uses different tick mechanism
1056 * which we should not care about.
1057 */
1058 if (tnode == 0 || tnode == -1) {
1059 prom_timers = (struct sun5_timer *) 0;
1060 return;
1061 }
1062
1063 /* If PROM is really using this, it must be mapped by him. */
1064 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
1065 if (err == -1) {
1066 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1067 prom_timers = (struct sun5_timer *) 0;
1068 return;
1069 }
1070 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
1071}
1072
1073static void kill_prom_timer(void)
1074{
1075 if (!prom_timers)
1076 return;
1077
1078 /* Save them away for later. */
1079 prom_limit0 = prom_timers->limit0;
1080 prom_limit1 = prom_timers->limit1;
1081
1082 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1083 * We turn both off here just to be paranoid.
1084 */
1085 prom_timers->limit0 = 0;
1086 prom_timers->limit1 = 0;
1087
1088 /* Wheee, eat the interrupt packet too... */
1089 __asm__ __volatile__(
1090" mov 0x40, %%g2\n"
1091" ldxa [%%g0] %0, %%g1\n"
1092" ldxa [%%g2] %1, %%g1\n"
1093" stxa %%g0, [%%g0] %0\n"
1094" membar #Sync\n"
1095 : /* no outputs */
1096 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
1097 : "g1", "g2");
1098}
1099
1100void enable_prom_timer(void)
1101{
1102 if (!prom_timers)
1103 return;
1104
1105 /* Set it to whatever was there before. */
1106 prom_timers->limit1 = prom_limit1;
1107 prom_timers->count1 = 0;
1108 prom_timers->limit0 = prom_limit0;
1109 prom_timers->count0 = 0;
1110}
1111
1112void init_irqwork_curcpu(void)
1113{
1114 register struct irq_work_struct *workp asm("o2");
1115 register unsigned long tmp asm("o3");
1116 int cpu = hard_smp_processor_id();
1117
1118 memset(__irq_work + cpu, 0, sizeof(*workp));
1119
1120 /* Make sure we are called with PSTATE_IE disabled. */
1121 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1122 : "=r" (tmp));
1123 if (tmp & PSTATE_IE) {
1124 prom_printf("BUG: init_irqwork_curcpu() called with "
1125 "PSTATE_IE enabled, bailing.\n");
1126 __asm__ __volatile__("mov %%i7, %0\n\t"
1127 : "=r" (tmp));
1128 prom_printf("BUG: Called from %lx\n", tmp);
1129 prom_halt();
1130 }
1131
1132 /* Set interrupt globals. */
1133 workp = &__irq_work[cpu];
1134 __asm__ __volatile__(
1135 "rdpr %%pstate, %0\n\t"
1136 "wrpr %0, %1, %%pstate\n\t"
1137 "mov %2, %%g6\n\t"
1138 "wrpr %0, 0x0, %%pstate\n\t"
1139 : "=&r" (tmp)
1140 : "i" (PSTATE_IG), "r" (workp));
1141}
1142
1143/* Only invoked on boot processor. */
1144void __init init_IRQ(void)
1145{
1146 map_prom_timers();
1147 kill_prom_timer();
1148 memset(&ivector_table[0], 0, sizeof(ivector_table));
1149
1150 /* We need to clear any IRQ's pending in the soft interrupt
1151 * registers, a spurious one could be left around from the
1152 * PROM timer which we just disabled.
1153 */
1154 clear_softint(get_softint());
1155
1156 /* Now that ivector table is initialized, it is safe
1157 * to receive IRQ vector traps. We will normally take
1158 * one or two right now, in case some device PROM used
1159 * to boot us wants to speak to us. We just ignore them.
1160 */
1161 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1162 "or %%g1, %0, %%g1\n\t"
1163 "wrpr %%g1, 0x0, %%pstate"
1164 : /* No outputs */
1165 : "i" (PSTATE_IE)
1166 : "g1");
1167}
1168
1169static struct proc_dir_entry * root_irq_dir;
1170static struct proc_dir_entry * irq_dir [NUM_IVECS];
1171
1172#ifdef CONFIG_SMP
1173
1174static int irq_affinity_read_proc (char *page, char **start, off_t off,
1175 int count, int *eof, void *data)
1176{
1177 struct ino_bucket *bp = ivector_table + (long)data;
1178 struct irqaction *ap = bp->irq_info;
1179 cpumask_t mask;
1180 int len;
1181
1182 mask = get_smpaff_in_irqaction(ap);
1183 if (cpus_empty(mask))
1184 mask = cpu_online_map;
1185
1186 len = cpumask_scnprintf(page, count, mask);
1187 if (count - len < 2)
1188 return -EINVAL;
1189 len += sprintf(page + len, "\n");
1190 return len;
1191}
1192
1193static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
1194{
1195 struct ino_bucket *bp = ivector_table + irq;
1196
1197 /* Users specify affinity in terms of hw cpu ids.
1198 * As soon as we do this, handler_irq() might see and take action.
1199 */
1200 put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);
1201
1202 /* Migration is simply done by the next cpu to service this
1203 * interrupt.
1204 */
1205}
1206
1207static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
1208 unsigned long count, void *data)
1209{
1210 int irq = (long) data, full_count = count, err;
1211 cpumask_t new_value;
1212
1213 err = cpumask_parse(buffer, count, new_value);
1214
1215 /*
1216 * Do not allow disabling IRQs completely - it's a too easy
1217 * way to make the system unusable accidentally :-) At least
1218 * one online CPU still has to be targeted.
1219 */
1220 cpus_and(new_value, new_value, cpu_online_map);
1221 if (cpus_empty(new_value))
1222 return -EINVAL;
1223
1224 set_intr_affinity(irq, new_value);
1225
1226 return full_count;
1227}
1228
1229#endif
1230
1231#define MAX_NAMELEN 10
1232
1233static void register_irq_proc (unsigned int irq)
1234{
1235 char name [MAX_NAMELEN];
1236
1237 if (!root_irq_dir || irq_dir[irq])
1238 return;
1239
1240 memset(name, 0, MAX_NAMELEN);
1241 sprintf(name, "%x", irq);
1242
1243 /* create /proc/irq/1234 */
1244 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1245
1246#ifdef CONFIG_SMP
1247 /* XXX SMP affinity not supported on starfire yet. */
1248 if (this_is_starfire == 0) {
1249 struct proc_dir_entry *entry;
1250
1251 /* create /proc/irq/1234/smp_affinity */
1252 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1253
1254 if (entry) {
1255 entry->nlink = 1;
1256 entry->data = (void *)(long)irq;
1257 entry->read_proc = irq_affinity_read_proc;
1258 entry->write_proc = irq_affinity_write_proc;
1259 }
1260 }
1261#endif
1262}
1263
1264void init_irq_proc (void)
1265{
1266 /* create /proc/irq */
1267 root_irq_dir = proc_mkdir("irq", NULL);
1268}
1269
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
new file mode 100644
index 000000000000..30862abee611
--- /dev/null
+++ b/arch/sparc64/kernel/isa.c
@@ -0,0 +1,329 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/pci.h>
4#include <linux/slab.h>
5#include <asm/oplib.h>
6#include <asm/isa.h>
7
8struct sparc_isa_bridge *isa_chain;
9
10static void __init fatal_err(const char *reason)
11{
12 prom_printf("ISA: fatal error, %s.\n", reason);
13}
14
15static void __init report_dev(struct sparc_isa_device *isa_dev, int child)
16{
17 if (child)
18 printk(" (%s)", isa_dev->prom_name);
19 else
20 printk(" [%s", isa_dev->prom_name);
21}
22
23static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev,
24 struct linux_prom_registers *pregs,
25 int pregs_size)
26{
27 unsigned long base, len;
28 int prop_len;
29
30 prop_len = prom_getproperty(isa_dev->prom_node, "reg",
31 (char *) pregs, pregs_size);
32
33 if (prop_len <= 0)
34 return;
35
36 /* Only the first one is interesting. */
37 len = pregs[0].reg_size;
38 base = (((unsigned long)pregs[0].which_io << 32) |
39 (unsigned long)pregs[0].phys_addr);
40 base += isa_dev->bus->parent->io_space.start;
41
42 isa_dev->resource.start = base;
43 isa_dev->resource.end = (base + len - 1UL);
44 isa_dev->resource.flags = IORESOURCE_IO;
45 isa_dev->resource.name = isa_dev->prom_name;
46
47 request_resource(&isa_dev->bus->parent->io_space,
48 &isa_dev->resource);
49}
50
51/* I can't believe they didn't put a real INO in the isa device
52 * interrupts property. The whole point of the OBP properties
53 * is to shield the kernel from IRQ routing details.
54 *
55 * The P1275 standard for ISA devices seems to also have been
56 * totally ignored.
57 *
58 * On later systems, an interrupt-map and interrupt-map-mask scheme
59 * akin to EBUS is used.
60 */
61static struct {
62 int obp_irq;
63 int pci_ino;
64} grover_irq_table[] = {
65 { 1, 0x00 }, /* dma, unknown ino at this point */
66 { 2, 0x27 }, /* floppy */
67 { 3, 0x22 }, /* parallel */
68 { 4, 0x2b }, /* serial */
69 { 5, 0x25 }, /* acpi power management */
70
71 { 0, 0x00 } /* end of table */
72};
73
74static int __init isa_dev_get_irq_using_imap(struct sparc_isa_device *isa_dev,
75 struct sparc_isa_bridge *isa_br,
76 int *interrupt,
77 struct linux_prom_registers *pregs)
78{
79 unsigned int hi, lo, irq;
80 int i;
81
82 hi = pregs->which_io & isa_br->isa_intmask.phys_hi;
83 lo = pregs->phys_addr & isa_br->isa_intmask.phys_lo;
84 irq = *interrupt & isa_br->isa_intmask.interrupt;
85 for (i = 0; i < isa_br->num_isa_intmap; i++) {
86 if ((isa_br->isa_intmap[i].phys_hi == hi) &&
87 (isa_br->isa_intmap[i].phys_lo == lo) &&
88 (isa_br->isa_intmap[i].interrupt == irq)) {
89 *interrupt = isa_br->isa_intmap[i].cinterrupt;
90 return 0;
91 }
92 }
93 return -1;
94}
95
96static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev,
97 struct linux_prom_registers *pregs)
98{
99 int irq_prop;
100
101 irq_prop = prom_getintdefault(isa_dev->prom_node,
102 "interrupts", -1);
103 if (irq_prop <= 0) {
104 goto no_irq;
105 } else {
106 struct pci_controller_info *pcic;
107 struct pci_pbm_info *pbm;
108 int i;
109
110 if (isa_dev->bus->num_isa_intmap) {
111 if (!isa_dev_get_irq_using_imap(isa_dev,
112 isa_dev->bus,
113 &irq_prop,
114 pregs))
115 goto route_irq;
116 }
117
118 for (i = 0; grover_irq_table[i].obp_irq != 0; i++) {
119 if (grover_irq_table[i].obp_irq == irq_prop) {
120 int ino = grover_irq_table[i].pci_ino;
121
122 if (ino == 0)
123 goto no_irq;
124
125 irq_prop = ino;
126 goto route_irq;
127 }
128 }
129 goto no_irq;
130
131route_irq:
132 pbm = isa_dev->bus->parent;
133 pcic = pbm->parent;
134 isa_dev->irq = pcic->irq_build(pbm, NULL, irq_prop);
135 return;
136 }
137
138no_irq:
139 isa_dev->irq = PCI_IRQ_NONE;
140}
141
142static void __init isa_fill_children(struct sparc_isa_device *parent_isa_dev)
143{
144 int node = prom_getchild(parent_isa_dev->prom_node);
145
146 if (node == 0)
147 return;
148
149 printk(" ->");
150 while (node != 0) {
151 struct linux_prom_registers regs[PROMREG_MAX];
152 struct sparc_isa_device *isa_dev;
153 int prop_len;
154
155 isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL);
156 if (!isa_dev) {
157 fatal_err("cannot allocate child isa_dev");
158 prom_halt();
159 }
160
161 memset(isa_dev, 0, sizeof(*isa_dev));
162
163 /* Link it in to parent. */
164 isa_dev->next = parent_isa_dev->child;
165 parent_isa_dev->child = isa_dev;
166
167 isa_dev->bus = parent_isa_dev->bus;
168 isa_dev->prom_node = node;
169 prop_len = prom_getproperty(node, "name",
170 (char *) isa_dev->prom_name,
171 sizeof(isa_dev->prom_name));
172 if (prop_len <= 0) {
173 fatal_err("cannot get child isa_dev OBP node name");
174 prom_halt();
175 }
176
177 prop_len = prom_getproperty(node, "compatible",
178 (char *) isa_dev->compatible,
179 sizeof(isa_dev->compatible));
180
181 /* Not having this is OK. */
182 if (prop_len <= 0)
183 isa_dev->compatible[0] = '\0';
184
185 isa_dev_get_resource(isa_dev, regs, sizeof(regs));
186 isa_dev_get_irq(isa_dev, regs);
187
188 report_dev(isa_dev, 1);
189
190 node = prom_getsibling(node);
191 }
192}
193
194static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br)
195{
196 int node = prom_getchild(isa_br->prom_node);
197
198 while (node != 0) {
199 struct linux_prom_registers regs[PROMREG_MAX];
200 struct sparc_isa_device *isa_dev;
201 int prop_len;
202
203 isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL);
204 if (!isa_dev) {
205 fatal_err("cannot allocate isa_dev");
206 prom_halt();
207 }
208
209 memset(isa_dev, 0, sizeof(*isa_dev));
210
211 /* Link it in. */
212 isa_dev->next = NULL;
213 if (isa_br->devices == NULL) {
214 isa_br->devices = isa_dev;
215 } else {
216 struct sparc_isa_device *tmp = isa_br->devices;
217
218 while (tmp->next)
219 tmp = tmp->next;
220
221 tmp->next = isa_dev;
222 }
223
224 isa_dev->bus = isa_br;
225 isa_dev->prom_node = node;
226 prop_len = prom_getproperty(node, "name",
227 (char *) isa_dev->prom_name,
228 sizeof(isa_dev->prom_name));
229 if (prop_len <= 0) {
230 fatal_err("cannot get isa_dev OBP node name");
231 prom_halt();
232 }
233
234 prop_len = prom_getproperty(node, "compatible",
235 (char *) isa_dev->compatible,
236 sizeof(isa_dev->compatible));
237
238 /* Not having this is OK. */
239 if (prop_len <= 0)
240 isa_dev->compatible[0] = '\0';
241
242 isa_dev_get_resource(isa_dev, regs, sizeof(regs));
243 isa_dev_get_irq(isa_dev, regs);
244
245 report_dev(isa_dev, 0);
246
247 isa_fill_children(isa_dev);
248
249 printk("]");
250
251 node = prom_getsibling(node);
252 }
253}
254
255void __init isa_init(void)
256{
257 struct pci_dev *pdev;
258 unsigned short vendor, device;
259 int index = 0;
260
261 vendor = PCI_VENDOR_ID_AL;
262 device = PCI_DEVICE_ID_AL_M1533;
263
264 pdev = NULL;
265 while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) {
266 struct pcidev_cookie *pdev_cookie;
267 struct pci_pbm_info *pbm;
268 struct sparc_isa_bridge *isa_br;
269 int prop_len;
270
271 pdev_cookie = pdev->sysdata;
272 if (!pdev_cookie) {
273 printk("ISA: Warning, ISA bridge ignored due to "
274 "lack of OBP data.\n");
275 continue;
276 }
277 pbm = pdev_cookie->pbm;
278
279 isa_br = kmalloc(sizeof(*isa_br), GFP_KERNEL);
280 if (!isa_br) {
281 fatal_err("cannot allocate sparc_isa_bridge");
282 prom_halt();
283 }
284
285 memset(isa_br, 0, sizeof(*isa_br));
286
287 /* Link it in. */
288 isa_br->next = isa_chain;
289 isa_chain = isa_br;
290
291 isa_br->parent = pbm;
292 isa_br->self = pdev;
293 isa_br->index = index++;
294 isa_br->prom_node = pdev_cookie->prom_node;
295 strncpy(isa_br->prom_name, pdev_cookie->prom_name,
296 sizeof(isa_br->prom_name));
297
298 prop_len = prom_getproperty(isa_br->prom_node,
299 "ranges",
300 (char *) isa_br->isa_ranges,
301 sizeof(isa_br->isa_ranges));
302 if (prop_len <= 0)
303 isa_br->num_isa_ranges = 0;
304 else
305 isa_br->num_isa_ranges =
306 (prop_len / sizeof(struct linux_prom_isa_ranges));
307
308 prop_len = prom_getproperty(isa_br->prom_node,
309 "interrupt-map",
310 (char *) isa_br->isa_intmap,
311 sizeof(isa_br->isa_intmap));
312 if (prop_len <= 0)
313 isa_br->num_isa_intmap = 0;
314 else
315 isa_br->num_isa_intmap =
316 (prop_len / sizeof(struct linux_prom_isa_intmap));
317
318 prop_len = prom_getproperty(isa_br->prom_node,
319 "interrupt-map-mask",
320 (char *) &(isa_br->isa_intmask),
321 sizeof(isa_br->isa_intmask));
322
323 printk("isa%d:", isa_br->index);
324
325 isa_fill_devices(isa_br);
326
327 printk("\n");
328 }
329}
diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S
new file mode 100644
index 000000000000..b5e32dfa4fbc
--- /dev/null
+++ b/arch/sparc64/kernel/itlb_base.S
@@ -0,0 +1,83 @@
1/* $Id: itlb_base.S,v 1.12 2002/02/09 19:49:30 davem Exp $
2 * itlb_base.S: Front end to ITLB miss replacement strategy.
3 * This is included directly into the trap table.
4 *
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#if PAGE_SHIFT == 13
10/*
11 * To compute vpte offset, we need to do ((addr >> 13) << 3),
12 * which can be optimized to (addr >> 10) if bits 10/11/12 can
13 * be guaranteed to be 0 ... mmu_context.h does guarantee this
14 * by only using 10 bits in the hwcontext value.
15 */
16#define CREATE_VPTE_OFFSET1(r1, r2) \
17 srax r1, 10, r2
18#define CREATE_VPTE_OFFSET2(r1, r2)
19#define CREATE_VPTE_NOP nop
20#else /* PAGE_SHIFT */
21#define CREATE_VPTE_OFFSET1(r1, r2) \
22 srax r1, PAGE_SHIFT, r2
23#define CREATE_VPTE_OFFSET2(r1, r2) \
24 sllx r2, 3, r2
25#define CREATE_VPTE_NOP
26#endif /* PAGE_SHIFT */
27
28
29/* Ways we can get here:
30 *
31 * 1) Nucleus instruction misses from module code.
32 * 2) All user instruction misses.
33 *
34 * All real page faults merge their code paths to the
35 * sparc64_realfault_common label below.
36 */
37
38/* ITLB ** ICACHE line 1: Quick user TLB misses */
39 ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS
40 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
41 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
42 ldxa [%g3 + %g6] ASI_P, %g5 ! Load VPTE
431: brgez,pn %g5, 3f ! Not valid, branch out
44 sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot
45 andcc %g5, %g4, %g0 ! Executable?
46 be,pn %xcc, 3f ! Nope, branch.
47 nop ! Delay-slot
482: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB
49 retry ! Trap return
503: rdpr %pstate, %g4 ! Move into alternate globals
51
52/* ITLB ** ICACHE line 2: Real faults */
53 wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate
54 rdpr %tpc, %g5 ! And load faulting VA
55 mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB
56sparc64_realfault_common: ! Called by TL0 dtlb_miss too
57 stb %g4, [%g6 + TI_FAULT_CODE]
58 stx %g5, [%g6 + TI_FAULT_ADDR]
59 ba,pt %xcc, etrap ! Save state
601: rd %pc, %g7 ! ...
61 nop
62
63/* ITLB ** ICACHE line 3: Finish faults + window fixups */
64 call do_sparc64_fault ! Call fault handler
65 add %sp, PTREGS_OFF, %o0! Compute pt_regs arg
66 ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
67 nop
68winfix_trampoline:
69 rdpr %tpc, %g3 ! Prepare winfixup TNPC
70 or %g3, 0x7c, %g3 ! Compute offset to branch
71 wrpr %g3, %tnpc ! Write it into TNPC
72 done ! Do it to it
73
74/* ITLB ** ICACHE line 4: Unused... */
75 nop
76 nop
77 nop
78 nop
79 CREATE_VPTE_NOP
80
81#undef CREATE_VPTE_OFFSET1
82#undef CREATE_VPTE_OFFSET2
83#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
new file mode 100644
index 000000000000..7066d7ba667a
--- /dev/null
+++ b/arch/sparc64/kernel/kprobes.c
@@ -0,0 +1,394 @@
1/* arch/sparc64/kernel/kprobes.c
2 *
3 * Copyright (C) 2004 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/config.h>
7#include <linux/kernel.h>
8#include <linux/kprobes.h>
9
10#include <asm/kdebug.h>
11#include <asm/signal.h>
12
13/* We do not have hardware single-stepping on sparc64.
14 * So we implement software single-stepping with breakpoint
15 * traps. The top-level scheme is similar to that used
16 * in the x86 kprobes implementation.
17 *
18 * In the kprobe->ainsn.insn[] array we store the original
19 * instruction at index zero and a break instruction at
20 * index one.
21 *
22 * When we hit a kprobe we:
23 * - Run the pre-handler
24 * - Remember "regs->tnpc" and interrupt level stored in
25 * "regs->tstate" so we can restore them later
26 * - Disable PIL interrupts
27 * - Set regs->tpc to point to kprobe->ainsn.insn[0]
28 * - Set regs->tnpc to point to kprobe->ainsn.insn[1]
29 * - Mark that we are actively in a kprobe
30 *
31 * At this point we wait for the second breakpoint at
32 * kprobe->ainsn.insn[1] to hit. When it does we:
33 * - Run the post-handler
34 * - Set regs->tpc to "remembered" regs->tnpc stored above,
35 * restore the PIL interrupt level in "regs->tstate" as well
36 * - Make any adjustments necessary to regs->tnpc in order
37 * to handle relative branches correctly. See below.
38 * - Mark that we are no longer actively in a kprobe.
39 */
40
41int arch_prepare_kprobe(struct kprobe *p)
42{
43 return 0;
44}
45
46void arch_copy_kprobe(struct kprobe *p)
47{
48 p->ainsn.insn[0] = *p->addr;
49 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
50}
51
52void arch_remove_kprobe(struct kprobe *p)
53{
54}
55
56/* kprobe_status settings */
57#define KPROBE_HIT_ACTIVE 0x00000001
58#define KPROBE_HIT_SS 0x00000002
59
60static struct kprobe *current_kprobe;
61static unsigned long current_kprobe_orig_tnpc;
62static unsigned long current_kprobe_orig_tstate_pil;
63static unsigned int kprobe_status;
64
65static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
66{
67 current_kprobe_orig_tnpc = regs->tnpc;
68 current_kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
69 regs->tstate |= TSTATE_PIL;
70
71 /*single step inline, if it a breakpoint instruction*/
72 if (p->opcode == BREAKPOINT_INSTRUCTION) {
73 regs->tpc = (unsigned long) p->addr;
74 regs->tnpc = current_kprobe_orig_tnpc;
75 } else {
76 regs->tpc = (unsigned long) &p->ainsn.insn[0];
77 regs->tnpc = (unsigned long) &p->ainsn.insn[1];
78 }
79}
80
81static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
82{
83 *p->addr = p->opcode;
84 flushi(p->addr);
85
86 regs->tpc = (unsigned long) p->addr;
87 regs->tnpc = current_kprobe_orig_tnpc;
88 regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
89 current_kprobe_orig_tstate_pil);
90}
91
92static int kprobe_handler(struct pt_regs *regs)
93{
94 struct kprobe *p;
95 void *addr = (void *) regs->tpc;
96 int ret = 0;
97
98 preempt_disable();
99
100 if (kprobe_running()) {
101 /* We *are* holding lock here, so this is safe.
102 * Disarm the probe we just hit, and ignore it.
103 */
104 p = get_kprobe(addr);
105 if (p) {
106 if (kprobe_status == KPROBE_HIT_SS) {
107 regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
108 current_kprobe_orig_tstate_pil);
109 unlock_kprobes();
110 goto no_kprobe;
111 }
112 disarm_kprobe(p, regs);
113 ret = 1;
114 } else {
115 p = current_kprobe;
116 if (p->break_handler && p->break_handler(p, regs))
117 goto ss_probe;
118 }
119 /* If it's not ours, can't be delete race, (we hold lock). */
120 goto no_kprobe;
121 }
122
123 lock_kprobes();
124 p = get_kprobe(addr);
125 if (!p) {
126 unlock_kprobes();
127 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
128 /*
129 * The breakpoint instruction was removed right
130 * after we hit it. Another cpu has removed
131 * either a probepoint or a debugger breakpoint
132 * at this address. In either case, no further
133 * handling of this interrupt is appropriate.
134 */
135 ret = 1;
136 }
137 /* Not one of ours: let kernel handle it */
138 goto no_kprobe;
139 }
140
141 kprobe_status = KPROBE_HIT_ACTIVE;
142 current_kprobe = p;
143 if (p->pre_handler && p->pre_handler(p, regs))
144 return 1;
145
146ss_probe:
147 prepare_singlestep(p, regs);
148 kprobe_status = KPROBE_HIT_SS;
149 return 1;
150
151no_kprobe:
152 preempt_enable_no_resched();
153 return ret;
154}
155
156/* If INSN is a relative control transfer instruction,
157 * return the corrected branch destination value.
158 *
159 * The original INSN location was REAL_PC, it actually
160 * executed at PC and produced destination address NPC.
161 */
162static unsigned long relbranch_fixup(u32 insn, unsigned long real_pc,
163 unsigned long pc, unsigned long npc)
164{
165 /* Branch not taken, no mods necessary. */
166 if (npc == pc + 0x4UL)
167 return real_pc + 0x4UL;
168
169 /* The three cases are call, branch w/prediction,
170 * and traditional branch.
171 */
172 if ((insn & 0xc0000000) == 0x40000000 ||
173 (insn & 0xc1c00000) == 0x00400000 ||
174 (insn & 0xc1c00000) == 0x00800000) {
175 /* The instruction did all the work for us
176 * already, just apply the offset to the correct
177 * instruction location.
178 */
179 return (real_pc + (npc - pc));
180 }
181
182 return real_pc + 0x4UL;
183}
184
185/* If INSN is an instruction which writes it's PC location
186 * into a destination register, fix that up.
187 */
188static void retpc_fixup(struct pt_regs *regs, u32 insn, unsigned long real_pc)
189{
190 unsigned long *slot = NULL;
191
192 /* Simplest cast is call, which always uses %o7 */
193 if ((insn & 0xc0000000) == 0x40000000) {
194 slot = &regs->u_regs[UREG_I7];
195 }
196
197 /* Jmpl encodes the register inside of the opcode */
198 if ((insn & 0xc1f80000) == 0x81c00000) {
199 unsigned long rd = ((insn >> 25) & 0x1f);
200
201 if (rd <= 15) {
202 slot = &regs->u_regs[rd];
203 } else {
204 /* Hard case, it goes onto the stack. */
205 flushw_all();
206
207 rd -= 16;
208 slot = (unsigned long *)
209 (regs->u_regs[UREG_FP] + STACK_BIAS);
210 slot += rd;
211 }
212 }
213 if (slot != NULL)
214 *slot = real_pc;
215}
216
217/*
218 * Called after single-stepping. p->addr is the address of the
219 * instruction whose first byte has been replaced by the breakpoint
220 * instruction. To avoid the SMP problems that can occur when we
221 * temporarily put back the original opcode to single-step, we
222 * single-stepped a copy of the instruction. The address of this
223 * copy is p->ainsn.insn.
224 *
225 * This function prepares to return from the post-single-step
226 * breakpoint trap.
227 */
228static void resume_execution(struct kprobe *p, struct pt_regs *regs)
229{
230 u32 insn = p->ainsn.insn[0];
231
232 regs->tpc = current_kprobe_orig_tnpc;
233 regs->tnpc = relbranch_fixup(insn,
234 (unsigned long) p->addr,
235 (unsigned long) &p->ainsn.insn[0],
236 regs->tnpc);
237 retpc_fixup(regs, insn, (unsigned long) p->addr);
238
239 regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
240 current_kprobe_orig_tstate_pil);
241}
242
243static inline int post_kprobe_handler(struct pt_regs *regs)
244{
245 if (!kprobe_running())
246 return 0;
247
248 if (current_kprobe->post_handler)
249 current_kprobe->post_handler(current_kprobe, regs, 0);
250
251 resume_execution(current_kprobe, regs);
252
253 unlock_kprobes();
254 preempt_enable_no_resched();
255
256 return 1;
257}
258
259/* Interrupts disabled, kprobe_lock held. */
260static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
261{
262 if (current_kprobe->fault_handler
263 && current_kprobe->fault_handler(current_kprobe, regs, trapnr))
264 return 1;
265
266 if (kprobe_status & KPROBE_HIT_SS) {
267 resume_execution(current_kprobe, regs);
268
269 unlock_kprobes();
270 preempt_enable_no_resched();
271 }
272 return 0;
273}
274
275/*
276 * Wrapper routine to for handling exceptions.
277 */
278int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
279 void *data)
280{
281 struct die_args *args = (struct die_args *)data;
282 switch (val) {
283 case DIE_DEBUG:
284 if (kprobe_handler(args->regs))
285 return NOTIFY_STOP;
286 break;
287 case DIE_DEBUG_2:
288 if (post_kprobe_handler(args->regs))
289 return NOTIFY_STOP;
290 break;
291 case DIE_GPF:
292 if (kprobe_running() &&
293 kprobe_fault_handler(args->regs, args->trapnr))
294 return NOTIFY_STOP;
295 break;
296 case DIE_PAGE_FAULT:
297 if (kprobe_running() &&
298 kprobe_fault_handler(args->regs, args->trapnr))
299 return NOTIFY_STOP;
300 break;
301 default:
302 break;
303 }
304 return NOTIFY_DONE;
305}
306
307asmlinkage void kprobe_trap(unsigned long trap_level, struct pt_regs *regs)
308{
309 BUG_ON(trap_level != 0x170 && trap_level != 0x171);
310
311 if (user_mode(regs)) {
312 local_irq_enable();
313 bad_trap(regs, trap_level);
314 return;
315 }
316
317 /* trap_level == 0x170 --> ta 0x70
318 * trap_level == 0x171 --> ta 0x71
319 */
320 if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2,
321 (trap_level == 0x170) ? "debug" : "debug_2",
322 regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
323 bad_trap(regs, trap_level);
324}
325
326/* Jprobes support. */
327static struct pt_regs jprobe_saved_regs;
328static struct pt_regs *jprobe_saved_regs_location;
329static struct sparc_stackf jprobe_saved_stack;
330
331int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
332{
333 struct jprobe *jp = container_of(p, struct jprobe, kp);
334
335 jprobe_saved_regs_location = regs;
336 memcpy(&jprobe_saved_regs, regs, sizeof(*regs));
337
338 /* Save a whole stack frame, this gets arguments
339 * pushed onto the stack after using up all the
340 * arg registers.
341 */
342 memcpy(&jprobe_saved_stack,
343 (char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
344 sizeof(jprobe_saved_stack));
345
346 regs->tpc = (unsigned long) jp->entry;
347 regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
348 regs->tstate |= TSTATE_PIL;
349
350 return 1;
351}
352
353void jprobe_return(void)
354{
355 preempt_enable_no_resched();
356 __asm__ __volatile__(
357 ".globl jprobe_return_trap_instruction\n"
358"jprobe_return_trap_instruction:\n\t"
359 "ta 0x70");
360}
361
362extern void jprobe_return_trap_instruction(void);
363
364extern void __show_regs(struct pt_regs * regs);
365
366int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
367{
368 u32 *addr = (u32 *) regs->tpc;
369
370 if (addr == (u32 *) jprobe_return_trap_instruction) {
371 if (jprobe_saved_regs_location != regs) {
372 printk("JPROBE: Current regs (%p) does not match "
373 "saved regs (%p).\n",
374 regs, jprobe_saved_regs_location);
375 printk("JPROBE: Saved registers\n");
376 __show_regs(jprobe_saved_regs_location);
377 printk("JPROBE: Current registers\n");
378 __show_regs(regs);
379 BUG();
380 }
381 /* Restore old register state. Do pt_regs
382 * first so that UREG_FP is the original one for
383 * the stack frame restore.
384 */
385 memcpy(regs, &jprobe_saved_regs, sizeof(*regs));
386
387 memcpy((char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
388 &jprobe_saved_stack,
389 sizeof(jprobe_saved_stack));
390
391 return 1;
392 }
393 return 0;
394}
diff --git a/arch/sparc64/kernel/module.c b/arch/sparc64/kernel/module.c
new file mode 100644
index 000000000000..6c83e372f75d
--- /dev/null
+++ b/arch/sparc64/kernel/module.c
@@ -0,0 +1,209 @@
1/* Kernel module help for sparc64.
2 *
3 * Copyright (C) 2001 Rusty Russell.
4 * Copyright (C) 2002 David S. Miller.
5 */
6
7#include <linux/moduleloader.h>
8#include <linux/kernel.h>
9#include <linux/elf.h>
10#include <linux/vmalloc.h>
11#include <linux/fs.h>
12#include <linux/string.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mm.h>
16
17#include <asm/processor.h>
18#include <asm/spitfire.h>
19
20static void *module_map(unsigned long size)
21{
22 struct vm_struct *area;
23
24 size = PAGE_ALIGN(size);
25 if (!size || size > MODULES_LEN)
26 return NULL;
27
28 area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
29 if (!area)
30 return NULL;
31
32 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
33}
34
35void *module_alloc(unsigned long size)
36{
37 void *ret;
38
39 /* We handle the zero case fine, unlike vmalloc */
40 if (size == 0)
41 return NULL;
42
43 ret = module_map(size);
44 if (!ret)
45 ret = ERR_PTR(-ENOMEM);
46 else
47 memset(ret, 0, size);
48
49 return ret;
50}
51
52/* Free memory returned from module_core_alloc/module_init_alloc */
53void module_free(struct module *mod, void *module_region)
54{
55 vfree(module_region);
56 /* FIXME: If module_region == mod->init_region, trim exception
57 table entries. */
58}
59
60/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
61int module_frob_arch_sections(Elf_Ehdr *hdr,
62 Elf_Shdr *sechdrs,
63 char *secstrings,
64 struct module *mod)
65{
66 unsigned int symidx;
67 Elf64_Sym *sym;
68 const char *strtab;
69 int i;
70
71 for (symidx = 0; sechdrs[symidx].sh_type != SHT_SYMTAB; symidx++) {
72 if (symidx == hdr->e_shnum-1) {
73 printk("%s: no symtab found.\n", mod->name);
74 return -ENOEXEC;
75 }
76 }
77 sym = (Elf64_Sym *)sechdrs[symidx].sh_addr;
78 strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr;
79
80 for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
81 if (sym[i].st_shndx == SHN_UNDEF &&
82 ELF64_ST_TYPE(sym[i].st_info) == STT_REGISTER)
83 sym[i].st_shndx = SHN_ABS;
84 }
85 return 0;
86}
87
88int apply_relocate(Elf64_Shdr *sechdrs,
89 const char *strtab,
90 unsigned int symindex,
91 unsigned int relsec,
92 struct module *me)
93{
94 printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n",
95 me->name);
96 return -ENOEXEC;
97}
98
99int apply_relocate_add(Elf64_Shdr *sechdrs,
100 const char *strtab,
101 unsigned int symindex,
102 unsigned int relsec,
103 struct module *me)
104{
105 unsigned int i;
106 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
107 Elf64_Sym *sym;
108 u8 *location;
109 u32 *loc32;
110
111 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
112 Elf64_Addr v;
113
114 /* This is where to make the change */
115 location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr
116 + rel[i].r_offset;
117 loc32 = (u32 *) location;
118
119 BUG_ON(((u64)location >> (u64)32) != (u64)0);
120
121 /* This is the symbol it is referring to. Note that all
122 undefined symbols have been resolved. */
123 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
124 + ELF64_R_SYM(rel[i].r_info);
125 v = sym->st_value + rel[i].r_addend;
126
127 switch (ELF64_R_TYPE(rel[i].r_info) & 0xff) {
128 case R_SPARC_64:
129 location[0] = v >> 56;
130 location[1] = v >> 48;
131 location[2] = v >> 40;
132 location[3] = v >> 32;
133 location[4] = v >> 24;
134 location[5] = v >> 16;
135 location[6] = v >> 8;
136 location[7] = v >> 0;
137 break;
138
139 case R_SPARC_32:
140 location[0] = v >> 24;
141 location[1] = v >> 16;
142 location[2] = v >> 8;
143 location[3] = v >> 0;
144 break;
145
146 case R_SPARC_WDISP30:
147 v -= (Elf64_Addr) location;
148 *loc32 = (*loc32 & ~0x3fffffff) |
149 ((v >> 2) & 0x3fffffff);
150 break;
151
152 case R_SPARC_WDISP22:
153 v -= (Elf64_Addr) location;
154 *loc32 = (*loc32 & ~0x3fffff) |
155 ((v >> 2) & 0x3fffff);
156 break;
157
158 case R_SPARC_WDISP19:
159 v -= (Elf64_Addr) location;
160 *loc32 = (*loc32 & ~0x7ffff) |
161 ((v >> 2) & 0x7ffff);
162 break;
163
164 case R_SPARC_LO10:
165 *loc32 = (*loc32 & ~0x3ff) | (v & 0x3ff);
166 break;
167
168 case R_SPARC_HI22:
169 *loc32 = (*loc32 & ~0x3fffff) |
170 ((v >> 10) & 0x3fffff);
171 break;
172
173 case R_SPARC_OLO10:
174 *loc32 = (*loc32 & ~0x1fff) |
175 (((v & 0x3ff) +
176 (ELF64_R_TYPE(rel[i].r_info) >> 8))
177 & 0x1fff);
178 break;
179
180 default:
181 printk(KERN_ERR "module %s: Unknown relocation: %x\n",
182 me->name,
183 (int) (ELF64_R_TYPE(rel[i].r_info) & 0xff));
184 return -ENOEXEC;
185 };
186 }
187 return 0;
188}
189
190int module_finalize(const Elf_Ehdr *hdr,
191 const Elf_Shdr *sechdrs,
192 struct module *me)
193{
194 /* Cheetah's I-cache is fully coherent. */
195 if (tlb_type == spitfire) {
196 unsigned long va;
197
198 flushw_all();
199 for (va = 0; va < (PAGE_SIZE << 1); va += 32)
200 spitfire_put_icache_tag(va, 0x0);
201 __asm__ __volatile__("flush %g6");
202 }
203
204 return 0;
205}
206
207void module_arch_cleanup(struct module *mod)
208{
209}
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
new file mode 100644
index 000000000000..bba140d98b1b
--- /dev/null
+++ b/arch/sparc64/kernel/pci.c
@@ -0,0 +1,805 @@
1/* $Id: pci.c,v 1.39 2002/01/05 01:13:43 davem Exp $
2 * pci.c: UltraSparc PCI controller support.
3 *
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/sched.h>
14#include <linux/capability.h>
15#include <linux/errno.h>
16#include <linux/smp_lock.h>
17#include <linux/init.h>
18
19#include <asm/uaccess.h>
20#include <asm/pbm.h>
21#include <asm/pgtable.h>
22#include <asm/irq.h>
23#include <asm/ebus.h>
24#include <asm/isa.h>
25
26unsigned long pci_memspace_mask = 0xffffffffUL;
27
28#ifndef CONFIG_PCI
29/* A "nop" PCI implementation. */
30asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn,
31 unsigned long off, unsigned long len,
32 unsigned char *buf)
33{
34 return 0;
35}
36asmlinkage int sys_pciconfig_write(unsigned long bus, unsigned long dfn,
37 unsigned long off, unsigned long len,
38 unsigned char *buf)
39{
40 return 0;
41}
42#else
43
44/* List of all PCI controllers found in the system. */
45struct pci_controller_info *pci_controller_root = NULL;
46
47/* Each PCI controller found gets a unique index. */
48int pci_num_controllers = 0;
49
50/* At boot time the user can give the kernel a command
51 * line option which controls if and how PCI devices
52 * are reordered at PCI bus probing time.
53 */
54int pci_device_reorder = 0;
55
56volatile int pci_poke_in_progress;
57volatile int pci_poke_cpu = -1;
58volatile int pci_poke_faulted;
59
60static DEFINE_SPINLOCK(pci_poke_lock);
61
62void pci_config_read8(u8 *addr, u8 *ret)
63{
64 unsigned long flags;
65 u8 byte;
66
67 spin_lock_irqsave(&pci_poke_lock, flags);
68 pci_poke_cpu = smp_processor_id();
69 pci_poke_in_progress = 1;
70 pci_poke_faulted = 0;
71 __asm__ __volatile__("membar #Sync\n\t"
72 "lduba [%1] %2, %0\n\t"
73 "membar #Sync"
74 : "=r" (byte)
75 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
76 : "memory");
77 pci_poke_in_progress = 0;
78 pci_poke_cpu = -1;
79 if (!pci_poke_faulted)
80 *ret = byte;
81 spin_unlock_irqrestore(&pci_poke_lock, flags);
82}
83
84void pci_config_read16(u16 *addr, u16 *ret)
85{
86 unsigned long flags;
87 u16 word;
88
89 spin_lock_irqsave(&pci_poke_lock, flags);
90 pci_poke_cpu = smp_processor_id();
91 pci_poke_in_progress = 1;
92 pci_poke_faulted = 0;
93 __asm__ __volatile__("membar #Sync\n\t"
94 "lduha [%1] %2, %0\n\t"
95 "membar #Sync"
96 : "=r" (word)
97 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
98 : "memory");
99 pci_poke_in_progress = 0;
100 pci_poke_cpu = -1;
101 if (!pci_poke_faulted)
102 *ret = word;
103 spin_unlock_irqrestore(&pci_poke_lock, flags);
104}
105
106void pci_config_read32(u32 *addr, u32 *ret)
107{
108 unsigned long flags;
109 u32 dword;
110
111 spin_lock_irqsave(&pci_poke_lock, flags);
112 pci_poke_cpu = smp_processor_id();
113 pci_poke_in_progress = 1;
114 pci_poke_faulted = 0;
115 __asm__ __volatile__("membar #Sync\n\t"
116 "lduwa [%1] %2, %0\n\t"
117 "membar #Sync"
118 : "=r" (dword)
119 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
120 : "memory");
121 pci_poke_in_progress = 0;
122 pci_poke_cpu = -1;
123 if (!pci_poke_faulted)
124 *ret = dword;
125 spin_unlock_irqrestore(&pci_poke_lock, flags);
126}
127
128void pci_config_write8(u8 *addr, u8 val)
129{
130 unsigned long flags;
131
132 spin_lock_irqsave(&pci_poke_lock, flags);
133 pci_poke_cpu = smp_processor_id();
134 pci_poke_in_progress = 1;
135 pci_poke_faulted = 0;
136 __asm__ __volatile__("membar #Sync\n\t"
137 "stba %0, [%1] %2\n\t"
138 "membar #Sync"
139 : /* no outputs */
140 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
141 : "memory");
142 pci_poke_in_progress = 0;
143 pci_poke_cpu = -1;
144 spin_unlock_irqrestore(&pci_poke_lock, flags);
145}
146
147void pci_config_write16(u16 *addr, u16 val)
148{
149 unsigned long flags;
150
151 spin_lock_irqsave(&pci_poke_lock, flags);
152 pci_poke_cpu = smp_processor_id();
153 pci_poke_in_progress = 1;
154 pci_poke_faulted = 0;
155 __asm__ __volatile__("membar #Sync\n\t"
156 "stha %0, [%1] %2\n\t"
157 "membar #Sync"
158 : /* no outputs */
159 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
160 : "memory");
161 pci_poke_in_progress = 0;
162 pci_poke_cpu = -1;
163 spin_unlock_irqrestore(&pci_poke_lock, flags);
164}
165
166void pci_config_write32(u32 *addr, u32 val)
167{
168 unsigned long flags;
169
170 spin_lock_irqsave(&pci_poke_lock, flags);
171 pci_poke_cpu = smp_processor_id();
172 pci_poke_in_progress = 1;
173 pci_poke_faulted = 0;
174 __asm__ __volatile__("membar #Sync\n\t"
175 "stwa %0, [%1] %2\n\t"
176 "membar #Sync"
177 : /* no outputs */
178 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
179 : "memory");
180 pci_poke_in_progress = 0;
181 pci_poke_cpu = -1;
182 spin_unlock_irqrestore(&pci_poke_lock, flags);
183}
184
185/* Probe for all PCI controllers in the system. */
186extern void sabre_init(int, char *);
187extern void psycho_init(int, char *);
188extern void schizo_init(int, char *);
189extern void schizo_plus_init(int, char *);
190extern void tomatillo_init(int, char *);
191
192static struct {
193 char *model_name;
194 void (*init)(int, char *);
195} pci_controller_table[] __initdata = {
196 { "SUNW,sabre", sabre_init },
197 { "pci108e,a000", sabre_init },
198 { "pci108e,a001", sabre_init },
199 { "SUNW,psycho", psycho_init },
200 { "pci108e,8000", psycho_init },
201 { "SUNW,schizo", schizo_init },
202 { "pci108e,8001", schizo_init },
203 { "SUNW,schizo+", schizo_plus_init },
204 { "pci108e,8002", schizo_plus_init },
205 { "SUNW,tomatillo", tomatillo_init },
206 { "pci108e,a801", tomatillo_init },
207};
208#define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
209 sizeof(pci_controller_table[0]))
210
211static int __init pci_controller_init(char *model_name, int namelen, int node)
212{
213 int i;
214
215 for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
216 if (!strncmp(model_name,
217 pci_controller_table[i].model_name,
218 namelen)) {
219 pci_controller_table[i].init(node, model_name);
220 return 1;
221 }
222 }
223 printk("PCI: Warning unknown controller, model name [%s]\n",
224 model_name);
225 printk("PCI: Ignoring controller...\n");
226
227 return 0;
228}
229
230static int __init pci_is_controller(char *model_name, int namelen, int node)
231{
232 int i;
233
234 for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
235 if (!strncmp(model_name,
236 pci_controller_table[i].model_name,
237 namelen)) {
238 return 1;
239 }
240 }
241 return 0;
242}
243
244static int __init pci_controller_scan(int (*handler)(char *, int, int))
245{
246 char namebuf[64];
247 int node;
248 int count = 0;
249
250 node = prom_getchild(prom_root_node);
251 while ((node = prom_searchsiblings(node, "pci")) != 0) {
252 int len;
253
254 if ((len = prom_getproperty(node, "model", namebuf, sizeof(namebuf))) > 0 ||
255 (len = prom_getproperty(node, "compatible", namebuf, sizeof(namebuf))) > 0) {
256 int item_len = 0;
257
258 /* Our value may be a multi-valued string in the
259 * case of some compatible properties. For sanity,
260 * only try the first one. */
261
262 while (namebuf[item_len] && len) {
263 len--;
264 item_len++;
265 }
266
267 if (handler(namebuf, item_len, node))
268 count++;
269 }
270
271 node = prom_getsibling(node);
272 if (!node)
273 break;
274 }
275
276 return count;
277}
278
279
280/* Is there some PCI controller in the system? */
281int __init pcic_present(void)
282{
283 return pci_controller_scan(pci_is_controller);
284}
285
286/* Find each controller in the system, attach and initialize
287 * software state structure for each and link into the
288 * pci_controller_root. Setup the controller enough such
289 * that bus scanning can be done.
290 */
291static void __init pci_controller_probe(void)
292{
293 printk("PCI: Probing for controllers.\n");
294
295 pci_controller_scan(pci_controller_init);
296}
297
298static void __init pci_scan_each_controller_bus(void)
299{
300 struct pci_controller_info *p;
301
302 for (p = pci_controller_root; p; p = p->next)
303 p->scan_bus(p);
304}
305
306/* Reorder the pci_dev chain, so that onboard devices come first
307 * and then come the pluggable cards.
308 */
309static void __init pci_reorder_devs(void)
310{
311 struct list_head *pci_onboard = &pci_devices;
312 struct list_head *walk = pci_onboard->next;
313
314 while (walk != pci_onboard) {
315 struct pci_dev *pdev = pci_dev_g(walk);
316 struct list_head *walk_next = walk->next;
317
318 if (pdev->irq && (__irq_ino(pdev->irq) & 0x20)) {
319 list_del(walk);
320 list_add(walk, pci_onboard);
321 }
322
323 walk = walk_next;
324 }
325}
326
327extern void clock_probe(void);
328extern void power_init(void);
329
330static int __init pcibios_init(void)
331{
332 pci_controller_probe();
333 if (pci_controller_root == NULL)
334 return 0;
335
336 pci_scan_each_controller_bus();
337
338 if (pci_device_reorder)
339 pci_reorder_devs();
340
341 isa_init();
342 ebus_init();
343 clock_probe();
344 power_init();
345
346 return 0;
347}
348
349subsys_initcall(pcibios_init);
350
351void pcibios_fixup_bus(struct pci_bus *pbus)
352{
353 struct pci_pbm_info *pbm = pbus->sysdata;
354
355 /* Generic PCI bus probing sets these to point at
356 * &io{port,mem}_resouce which is wrong for us.
357 */
358 pbus->resource[0] = &pbm->io_space;
359 pbus->resource[1] = &pbm->mem_space;
360}
361
362int pci_claim_resource(struct pci_dev *pdev, int resource)
363{
364 struct pci_pbm_info *pbm = pdev->bus->sysdata;
365 struct resource *res = &pdev->resource[resource];
366 struct resource *root;
367
368 if (!pbm)
369 return -EINVAL;
370
371 if (res->flags & IORESOURCE_IO)
372 root = &pbm->io_space;
373 else
374 root = &pbm->mem_space;
375
376 pbm->parent->resource_adjust(pdev, res, root);
377
378 return request_resource(root, res);
379}
380
381/*
382 * Given the PCI bus a device resides on, try to
383 * find an acceptable resource allocation for a
384 * specific device resource..
385 */
386static int pci_assign_bus_resource(const struct pci_bus *bus,
387 struct pci_dev *dev,
388 struct resource *res,
389 unsigned long size,
390 unsigned long min,
391 int resno)
392{
393 unsigned int type_mask;
394 int i;
395
396 type_mask = IORESOURCE_IO | IORESOURCE_MEM;
397 for (i = 0 ; i < 4; i++) {
398 struct resource *r = bus->resource[i];
399 if (!r)
400 continue;
401
402 /* type_mask must match */
403 if ((res->flags ^ r->flags) & type_mask)
404 continue;
405
406 /* Ok, try it out.. */
407 if (allocate_resource(r, res, size, min, -1, size, NULL, NULL) < 0)
408 continue;
409
410 /* PCI config space updated by caller. */
411 return 0;
412 }
413 return -EBUSY;
414}
415
416int pci_assign_resource(struct pci_dev *pdev, int resource)
417{
418 struct pcidev_cookie *pcp = pdev->sysdata;
419 struct pci_pbm_info *pbm = pcp->pbm;
420 struct resource *res = &pdev->resource[resource];
421 unsigned long min, size;
422 int err;
423
424 if (res->flags & IORESOURCE_IO)
425 min = pbm->io_space.start + 0x400UL;
426 else
427 min = pbm->mem_space.start;
428
429 size = res->end - res->start + 1;
430
431 err = pci_assign_bus_resource(pdev->bus, pdev, res, size, min, resource);
432
433 if (err < 0) {
434 printk("PCI: Failed to allocate resource %d for %s\n",
435 resource, pci_name(pdev));
436 } else {
437 /* Update PCI config space. */
438 pbm->parent->base_address_update(pdev, resource);
439 }
440
441 return err;
442}
443
444/* Sort resources by alignment */
445void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
446{
447 int i;
448
449 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
450 struct resource *r;
451 struct resource_list *list, *tmp;
452 unsigned long r_align;
453
454 r = &dev->resource[i];
455 r_align = r->end - r->start;
456
457 if (!(r->flags) || r->parent)
458 continue;
459 if (!r_align) {
460 printk(KERN_WARNING "PCI: Ignore bogus resource %d "
461 "[%lx:%lx] of %s\n",
462 i, r->start, r->end, pci_name(dev));
463 continue;
464 }
465 r_align = (i < PCI_BRIDGE_RESOURCES) ? r_align + 1 : r->start;
466 for (list = head; ; list = list->next) {
467 unsigned long align = 0;
468 struct resource_list *ln = list->next;
469 int idx;
470
471 if (ln) {
472 idx = ln->res - &ln->dev->resource[0];
473 align = (idx < PCI_BRIDGE_RESOURCES) ?
474 ln->res->end - ln->res->start + 1 :
475 ln->res->start;
476 }
477 if (r_align > align) {
478 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
479 if (!tmp)
480 panic("pdev_sort_resources(): "
481 "kmalloc() failed!\n");
482 tmp->next = ln;
483 tmp->res = r;
484 tmp->dev = dev;
485 list->next = tmp;
486 break;
487 }
488 }
489 }
490}
491
492void pcibios_update_irq(struct pci_dev *pdev, int irq)
493{
494}
495
496void pcibios_align_resource(void *data, struct resource *res,
497 unsigned long size, unsigned long align)
498{
499}
500
501int pcibios_enable_device(struct pci_dev *pdev, int mask)
502{
503 return 0;
504}
505
506void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region,
507 struct resource *res)
508{
509 struct pci_pbm_info *pbm = pdev->bus->sysdata;
510 struct resource zero_res, *root;
511
512 zero_res.start = 0;
513 zero_res.end = 0;
514 zero_res.flags = res->flags;
515
516 if (res->flags & IORESOURCE_IO)
517 root = &pbm->io_space;
518 else
519 root = &pbm->mem_space;
520
521 pbm->parent->resource_adjust(pdev, &zero_res, root);
522
523 region->start = res->start - zero_res.start;
524 region->end = res->end - zero_res.start;
525}
526
527void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
528 struct pci_bus_region *region)
529{
530 struct pci_pbm_info *pbm = pdev->bus->sysdata;
531 struct resource *root;
532
533 res->start = region->start;
534 res->end = region->end;
535
536 if (res->flags & IORESOURCE_IO)
537 root = &pbm->io_space;
538 else
539 root = &pbm->mem_space;
540
541 pbm->parent->resource_adjust(pdev, res, root);
542}
543
544char * __init pcibios_setup(char *str)
545{
546 if (!strcmp(str, "onboardfirst")) {
547 pci_device_reorder = 1;
548 return NULL;
549 }
550 if (!strcmp(str, "noreorder")) {
551 pci_device_reorder = 0;
552 return NULL;
553 }
554 return str;
555}
556
557/* Platform support for /proc/bus/pci/X/Y mmap()s. */
558
559/* If the user uses a host-bridge as the PCI device, he may use
560 * this to perform a raw mmap() of the I/O or MEM space behind
561 * that controller.
562 *
563 * This can be useful for execution of x86 PCI bios initialization code
564 * on a PCI card, like the xfree86 int10 stuff does.
565 */
566static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
567 enum pci_mmap_state mmap_state)
568{
569 struct pcidev_cookie *pcp = pdev->sysdata;
570 struct pci_pbm_info *pbm;
571 struct pci_controller_info *p;
572 unsigned long space_size, user_offset, user_size;
573
574 if (!pcp)
575 return -ENXIO;
576 pbm = pcp->pbm;
577 if (!pbm)
578 return -ENXIO;
579
580 p = pbm->parent;
581 if (p->pbms_same_domain) {
582 unsigned long lowest, highest;
583
584 lowest = ~0UL; highest = 0UL;
585 if (mmap_state == pci_mmap_io) {
586 if (p->pbm_A.io_space.flags) {
587 lowest = p->pbm_A.io_space.start;
588 highest = p->pbm_A.io_space.end + 1;
589 }
590 if (p->pbm_B.io_space.flags) {
591 if (lowest > p->pbm_B.io_space.start)
592 lowest = p->pbm_B.io_space.start;
593 if (highest < p->pbm_B.io_space.end + 1)
594 highest = p->pbm_B.io_space.end + 1;
595 }
596 space_size = highest - lowest;
597 } else {
598 if (p->pbm_A.mem_space.flags) {
599 lowest = p->pbm_A.mem_space.start;
600 highest = p->pbm_A.mem_space.end + 1;
601 }
602 if (p->pbm_B.mem_space.flags) {
603 if (lowest > p->pbm_B.mem_space.start)
604 lowest = p->pbm_B.mem_space.start;
605 if (highest < p->pbm_B.mem_space.end + 1)
606 highest = p->pbm_B.mem_space.end + 1;
607 }
608 space_size = highest - lowest;
609 }
610 } else {
611 if (mmap_state == pci_mmap_io) {
612 space_size = (pbm->io_space.end -
613 pbm->io_space.start) + 1;
614 } else {
615 space_size = (pbm->mem_space.end -
616 pbm->mem_space.start) + 1;
617 }
618 }
619
620 /* Make sure the request is in range. */
621 user_offset = vma->vm_pgoff << PAGE_SHIFT;
622 user_size = vma->vm_end - vma->vm_start;
623
624 if (user_offset >= space_size ||
625 (user_offset + user_size) > space_size)
626 return -EINVAL;
627
628 if (p->pbms_same_domain) {
629 unsigned long lowest = ~0UL;
630
631 if (mmap_state == pci_mmap_io) {
632 if (p->pbm_A.io_space.flags)
633 lowest = p->pbm_A.io_space.start;
634 if (p->pbm_B.io_space.flags &&
635 lowest > p->pbm_B.io_space.start)
636 lowest = p->pbm_B.io_space.start;
637 } else {
638 if (p->pbm_A.mem_space.flags)
639 lowest = p->pbm_A.mem_space.start;
640 if (p->pbm_B.mem_space.flags &&
641 lowest > p->pbm_B.mem_space.start)
642 lowest = p->pbm_B.mem_space.start;
643 }
644 vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT;
645 } else {
646 if (mmap_state == pci_mmap_io) {
647 vma->vm_pgoff = (pbm->io_space.start +
648 user_offset) >> PAGE_SHIFT;
649 } else {
650 vma->vm_pgoff = (pbm->mem_space.start +
651 user_offset) >> PAGE_SHIFT;
652 }
653 }
654
655 return 0;
656}
657
658/* Adjust vm_pgoff of VMA such that it is the physical page offset corresponding
659 * to the 32-bit pci bus offset for DEV requested by the user.
660 *
661 * Basically, the user finds the base address for his device which he wishes
662 * to mmap. They read the 32-bit value from the config space base register,
663 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
664 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
665 *
666 * Returns negative error code on failure, zero on success.
667 */
668static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
669 enum pci_mmap_state mmap_state)
670{
671 unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT;
672 unsigned long user32 = user_offset & pci_memspace_mask;
673 unsigned long largest_base, this_base, addr32;
674 int i;
675
676 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
677 return __pci_mmap_make_offset_bus(dev, vma, mmap_state);
678
679 /* Figure out which base address this is for. */
680 largest_base = 0UL;
681 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
682 struct resource *rp = &dev->resource[i];
683
684 /* Active? */
685 if (!rp->flags)
686 continue;
687
688 /* Same type? */
689 if (i == PCI_ROM_RESOURCE) {
690 if (mmap_state != pci_mmap_mem)
691 continue;
692 } else {
693 if ((mmap_state == pci_mmap_io &&
694 (rp->flags & IORESOURCE_IO) == 0) ||
695 (mmap_state == pci_mmap_mem &&
696 (rp->flags & IORESOURCE_MEM) == 0))
697 continue;
698 }
699
700 this_base = rp->start;
701
702 addr32 = (this_base & PAGE_MASK) & pci_memspace_mask;
703
704 if (mmap_state == pci_mmap_io)
705 addr32 &= 0xffffff;
706
707 if (addr32 <= user32 && this_base > largest_base)
708 largest_base = this_base;
709 }
710
711 if (largest_base == 0UL)
712 return -EINVAL;
713
714 /* Now construct the final physical address. */
715 if (mmap_state == pci_mmap_io)
716 vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT);
717 else
718 vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT);
719
720 return 0;
721}
722
723/* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
724 * mapping.
725 */
726static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
727 enum pci_mmap_state mmap_state)
728{
729 vma->vm_flags |= (VM_IO | VM_RESERVED);
730}
731
732/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
733 * device mapping.
734 */
735static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
736 enum pci_mmap_state mmap_state)
737{
738 /* Our io_remap_page_range/io_remap_pfn_range takes care of this,
739 do nothing. */
740}
741
742/* Perform the actual remap of the pages for a PCI device mapping, as appropriate
743 * for this architecture. The region in the process to map is described by vm_start
744 * and vm_end members of VMA, the base physical address is found in vm_pgoff.
745 * The pci device structure is provided so that architectures may make mapping
746 * decisions on a per-device or per-bus basis.
747 *
748 * Returns a negative error code on failure, zero on success.
749 */
750int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
751 enum pci_mmap_state mmap_state,
752 int write_combine)
753{
754 int ret;
755
756 ret = __pci_mmap_make_offset(dev, vma, mmap_state);
757 if (ret < 0)
758 return ret;
759
760 __pci_mmap_set_flags(dev, vma, mmap_state);
761 __pci_mmap_set_pgprot(dev, vma, mmap_state);
762
763 ret = io_remap_pfn_range(vma, vma->vm_start,
764 vma->vm_pgoff,
765 vma->vm_end - vma->vm_start,
766 vma->vm_page_prot);
767 if (ret)
768 return ret;
769
770 vma->vm_flags |= VM_IO;
771 return 0;
772}
773
774/* Return the domain nuber for this pci bus */
775
776int pci_domain_nr(struct pci_bus *pbus)
777{
778 struct pci_pbm_info *pbm = pbus->sysdata;
779 int ret;
780
781 if (pbm == NULL || pbm->parent == NULL) {
782 ret = -ENXIO;
783 } else {
784 struct pci_controller_info *p = pbm->parent;
785
786 ret = p->index;
787 if (p->pbms_same_domain == 0)
788 ret = ((ret << 1) +
789 ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
790 }
791
792 return ret;
793}
794EXPORT_SYMBOL(pci_domain_nr);
795
796int pcibios_prep_mwi(struct pci_dev *dev)
797{
798 /* We set correct PCI_CACHE_LINE_SIZE register values for every
799 * device probed on this platform. So there is nothing to check
800 * and this always succeeds.
801 */
802 return 0;
803}
804
805#endif /* !(CONFIG_PCI) */
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
new file mode 100644
index 000000000000..58310aacea28
--- /dev/null
+++ b/arch/sparc64/kernel/pci_common.c
@@ -0,0 +1,1040 @@
1/* $Id: pci_common.c,v 1.29 2002/02/01 00:56:03 davem Exp $
2 * pci_common.c: PCI controller common support.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/string.h>
8#include <linux/slab.h>
9#include <linux/init.h>
10
11#include <asm/pbm.h>
12
13/* Fix self device of BUS and hook it into BUS->self.
14 * The pci_scan_bus does not do this for the host bridge.
15 */
16void __init pci_fixup_host_bridge_self(struct pci_bus *pbus)
17{
18 struct pci_dev *pdev;
19
20 list_for_each_entry(pdev, &pbus->devices, bus_list) {
21 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_HOST) {
22 pbus->self = pdev;
23 return;
24 }
25 }
26
27 prom_printf("PCI: Critical error, cannot find host bridge PDEV.\n");
28 prom_halt();
29}
30
31/* Find the OBP PROM device tree node for a PCI device.
32 * Return zero if not found.
33 */
34static int __init find_device_prom_node(struct pci_pbm_info *pbm,
35 struct pci_dev *pdev,
36 int bus_prom_node,
37 struct linux_prom_pci_registers *pregs,
38 int *nregs)
39{
40 int node;
41
42 /*
43 * Return the PBM's PROM node in case we are it's PCI device,
44 * as the PBM's reg property is different to standard PCI reg
45 * properties. We would delete this device entry otherwise,
46 * which confuses XFree86's device probing...
47 */
48 if ((pdev->bus->number == pbm->pci_bus->number) && (pdev->devfn == 0) &&
49 (pdev->vendor == PCI_VENDOR_ID_SUN) &&
50 (pdev->device == PCI_DEVICE_ID_SUN_PBM ||
51 pdev->device == PCI_DEVICE_ID_SUN_SCHIZO ||
52 pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO ||
53 pdev->device == PCI_DEVICE_ID_SUN_SABRE ||
54 pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) {
55 *nregs = 0;
56 return bus_prom_node;
57 }
58
59 node = prom_getchild(bus_prom_node);
60 while (node != 0) {
61 int err = prom_getproperty(node, "reg",
62 (char *)pregs,
63 sizeof(*pregs) * PROMREG_MAX);
64 if (err == 0 || err == -1)
65 goto do_next_sibling;
66 if (((pregs[0].phys_hi >> 8) & 0xff) == pdev->devfn) {
67 *nregs = err / sizeof(*pregs);
68 return node;
69 }
70
71 do_next_sibling:
72 node = prom_getsibling(node);
73 }
74 return 0;
75}
76
77/* Older versions of OBP on PCI systems encode 64-bit MEM
78 * space assignments incorrectly, this fixes them up. We also
79 * take the opportunity here to hide other kinds of bogus
80 * assignments.
81 */
82static void __init fixup_obp_assignments(struct pci_dev *pdev,
83 struct pcidev_cookie *pcp)
84{
85 int i;
86
87 if (pdev->vendor == PCI_VENDOR_ID_AL &&
88 (pdev->device == PCI_DEVICE_ID_AL_M7101 ||
89 pdev->device == PCI_DEVICE_ID_AL_M1533)) {
90 int i;
91
92 /* Zap all of the normal resources, they are
93 * meaningless and generate bogus resource collision
94 * messages. This is OpenBoot's ill-fated attempt to
95 * represent the implicit resources that these devices
96 * have.
97 */
98 pcp->num_prom_assignments = 0;
99 for (i = 0; i < 6; i++) {
100 pdev->resource[i].start =
101 pdev->resource[i].end =
102 pdev->resource[i].flags = 0;
103 }
104 pdev->resource[PCI_ROM_RESOURCE].start =
105 pdev->resource[PCI_ROM_RESOURCE].end =
106 pdev->resource[PCI_ROM_RESOURCE].flags = 0;
107 return;
108 }
109
110 for (i = 0; i < pcp->num_prom_assignments; i++) {
111 struct linux_prom_pci_registers *ap;
112 int space;
113
114 ap = &pcp->prom_assignments[i];
115 space = ap->phys_hi >> 24;
116 if ((space & 0x3) == 2 &&
117 (space & 0x4) != 0) {
118 ap->phys_hi &= ~(0x7 << 24);
119 ap->phys_hi |= 0x3 << 24;
120 }
121 }
122}
123
124/* Fill in the PCI device cookie sysdata for the given
125 * PCI device. This cookie is the means by which one
126 * can get to OBP and PCI controller specific information
127 * for a PCI device.
128 */
129static void __init pdev_cookie_fillin(struct pci_pbm_info *pbm,
130 struct pci_dev *pdev,
131 int bus_prom_node)
132{
133 struct linux_prom_pci_registers pregs[PROMREG_MAX];
134 struct pcidev_cookie *pcp;
135 int device_prom_node, nregs, err;
136
137 device_prom_node = find_device_prom_node(pbm, pdev, bus_prom_node,
138 pregs, &nregs);
139 if (device_prom_node == 0) {
140 /* If it is not in the OBP device tree then
141 * there must be a damn good reason for it.
142 *
143 * So what we do is delete the device from the
144 * PCI device tree completely. This scenario
145 * is seen, for example, on CP1500 for the
146 * second EBUS/HappyMeal pair if the external
147 * connector for it is not present.
148 */
149 pci_remove_bus_device(pdev);
150 return;
151 }
152
153 pcp = kmalloc(sizeof(*pcp), GFP_ATOMIC);
154 if (pcp == NULL) {
155 prom_printf("PCI_COOKIE: Fatal malloc error, aborting...\n");
156 prom_halt();
157 }
158 pcp->pbm = pbm;
159 pcp->prom_node = device_prom_node;
160 memcpy(pcp->prom_regs, pregs, sizeof(pcp->prom_regs));
161 pcp->num_prom_regs = nregs;
162 err = prom_getproperty(device_prom_node, "name",
163 pcp->prom_name, sizeof(pcp->prom_name));
164 if (err > 0)
165 pcp->prom_name[err] = 0;
166 else
167 pcp->prom_name[0] = 0;
168
169 err = prom_getproperty(device_prom_node,
170 "assigned-addresses",
171 (char *)pcp->prom_assignments,
172 sizeof(pcp->prom_assignments));
173 if (err == 0 || err == -1)
174 pcp->num_prom_assignments = 0;
175 else
176 pcp->num_prom_assignments =
177 (err / sizeof(pcp->prom_assignments[0]));
178
179 if (strcmp(pcp->prom_name, "ebus") == 0) {
180 struct linux_prom_ebus_ranges erng[PROM_PCIRNG_MAX];
181 int iter;
182
183 /* EBUS is special... */
184 err = prom_getproperty(device_prom_node, "ranges",
185 (char *)&erng[0], sizeof(erng));
186 if (err == 0 || err == -1) {
187 prom_printf("EBUS: Fatal error, no range property\n");
188 prom_halt();
189 }
190 err = (err / sizeof(erng[0]));
191 for(iter = 0; iter < err; iter++) {
192 struct linux_prom_ebus_ranges *ep = &erng[iter];
193 struct linux_prom_pci_registers *ap;
194
195 ap = &pcp->prom_assignments[iter];
196
197 ap->phys_hi = ep->parent_phys_hi;
198 ap->phys_mid = ep->parent_phys_mid;
199 ap->phys_lo = ep->parent_phys_lo;
200 ap->size_hi = 0;
201 ap->size_lo = ep->size;
202 }
203 pcp->num_prom_assignments = err;
204 }
205
206 fixup_obp_assignments(pdev, pcp);
207
208 pdev->sysdata = pcp;
209}
210
211void __init pci_fill_in_pbm_cookies(struct pci_bus *pbus,
212 struct pci_pbm_info *pbm,
213 int prom_node)
214{
215 struct pci_dev *pdev, *pdev_next;
216 struct pci_bus *this_pbus, *pbus_next;
217
218 /* This must be _safe because the cookie fillin
219 routine can delete devices from the tree. */
220 list_for_each_entry_safe(pdev, pdev_next, &pbus->devices, bus_list)
221 pdev_cookie_fillin(pbm, pdev, prom_node);
222
223 list_for_each_entry_safe(this_pbus, pbus_next, &pbus->children, node) {
224 struct pcidev_cookie *pcp = this_pbus->self->sysdata;
225
226 pci_fill_in_pbm_cookies(this_pbus, pbm, pcp->prom_node);
227 }
228}
229
230static void __init bad_assignment(struct pci_dev *pdev,
231 struct linux_prom_pci_registers *ap,
232 struct resource *res,
233 int do_prom_halt)
234{
235 prom_printf("PCI: Bogus PROM assignment. BUS[%02x] DEVFN[%x]\n",
236 pdev->bus->number, pdev->devfn);
237 if (ap)
238 prom_printf("PCI: phys[%08x:%08x:%08x] size[%08x:%08x]\n",
239 ap->phys_hi, ap->phys_mid, ap->phys_lo,
240 ap->size_hi, ap->size_lo);
241 if (res)
242 prom_printf("PCI: RES[%016lx-->%016lx:(%lx)]\n",
243 res->start, res->end, res->flags);
244 prom_printf("Please email this information to davem@redhat.com\n");
245 if (do_prom_halt)
246 prom_halt();
247}
248
249static struct resource *
250__init get_root_resource(struct linux_prom_pci_registers *ap,
251 struct pci_pbm_info *pbm)
252{
253 int space = (ap->phys_hi >> 24) & 3;
254
255 switch (space) {
256 case 0:
257 /* Configuration space, silently ignore it. */
258 return NULL;
259
260 case 1:
261 /* 16-bit IO space */
262 return &pbm->io_space;
263
264 case 2:
265 /* 32-bit MEM space */
266 return &pbm->mem_space;
267
268 case 3:
269 /* 64-bit MEM space, these are allocated out of
270 * the 32-bit mem_space range for the PBM, ie.
271 * we just zero out the upper 32-bits.
272 */
273 return &pbm->mem_space;
274
275 default:
276 printk("PCI: What is resource space %x? "
277 "Tell davem@redhat.com about it!\n", space);
278 return NULL;
279 };
280}
281
282static struct resource *
283__init get_device_resource(struct linux_prom_pci_registers *ap,
284 struct pci_dev *pdev)
285{
286 struct resource *res;
287 int breg = (ap->phys_hi & 0xff);
288
289 switch (breg) {
290 case PCI_ROM_ADDRESS:
291 /* Unfortunately I have seen several cases where
292 * buggy FCODE uses a space value of '1' (I/O space)
293 * in the register property for the ROM address
294 * so disable this sanity check for now.
295 */
296#if 0
297 {
298 int space = (ap->phys_hi >> 24) & 3;
299
300 /* It had better be MEM space. */
301 if (space != 2)
302 bad_assignment(pdev, ap, NULL, 0);
303 }
304#endif
305 res = &pdev->resource[PCI_ROM_RESOURCE];
306 break;
307
308 case PCI_BASE_ADDRESS_0:
309 case PCI_BASE_ADDRESS_1:
310 case PCI_BASE_ADDRESS_2:
311 case PCI_BASE_ADDRESS_3:
312 case PCI_BASE_ADDRESS_4:
313 case PCI_BASE_ADDRESS_5:
314 res = &pdev->resource[(breg - PCI_BASE_ADDRESS_0) / 4];
315 break;
316
317 default:
318 bad_assignment(pdev, ap, NULL, 0);
319 res = NULL;
320 break;
321 };
322
323 return res;
324}
325
326static int __init pdev_resource_collisions_expected(struct pci_dev *pdev)
327{
328 if (pdev->vendor != PCI_VENDOR_ID_SUN)
329 return 0;
330
331 if (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS ||
332 pdev->device == PCI_DEVICE_ID_SUN_RIO_1394 ||
333 pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
334 return 1;
335
336 return 0;
337}
338
339static void __init pdev_record_assignments(struct pci_pbm_info *pbm,
340 struct pci_dev *pdev)
341{
342 struct pcidev_cookie *pcp = pdev->sysdata;
343 int i;
344
345 for (i = 0; i < pcp->num_prom_assignments; i++) {
346 struct linux_prom_pci_registers *ap;
347 struct resource *root, *res;
348
349 /* The format of this property is specified in
350 * the PCI Bus Binding to IEEE1275-1994.
351 */
352 ap = &pcp->prom_assignments[i];
353 root = get_root_resource(ap, pbm);
354 res = get_device_resource(ap, pdev);
355 if (root == NULL || res == NULL ||
356 res->flags == 0)
357 continue;
358
359 /* Ok we know which resource this PROM assignment is
360 * for, sanity check it.
361 */
362 if ((res->start & 0xffffffffUL) != ap->phys_lo)
363 bad_assignment(pdev, ap, res, 1);
364
365 /* If it is a 64-bit MEM space assignment, verify that
366 * the resource is too and that the upper 32-bits match.
367 */
368 if (((ap->phys_hi >> 24) & 3) == 3) {
369 if (((res->flags & IORESOURCE_MEM) == 0) ||
370 ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
371 != PCI_BASE_ADDRESS_MEM_TYPE_64))
372 bad_assignment(pdev, ap, res, 1);
373 if ((res->start >> 32) != ap->phys_mid)
374 bad_assignment(pdev, ap, res, 1);
375
376 /* PBM cannot generate cpu initiated PIOs
377 * to the full 64-bit space. Therefore the
378 * upper 32-bits better be zero. If it is
379 * not, just skip it and we will assign it
380 * properly ourselves.
381 */
382 if ((res->start >> 32) != 0UL) {
383 printk(KERN_ERR "PCI: OBP assigns out of range MEM address "
384 "%016lx for region %ld on device %s\n",
385 res->start, (res - &pdev->resource[0]), pci_name(pdev));
386 continue;
387 }
388 }
389
390 /* Adjust the resource into the physical address space
391 * of this PBM.
392 */
393 pbm->parent->resource_adjust(pdev, res, root);
394
395 if (request_resource(root, res) < 0) {
396 /* OK, there is some conflict. But this is fine
397 * since we'll reassign it in the fixup pass.
398 *
399 * We notify the user that OBP made an error if it
400 * is a case we don't expect.
401 */
402 if (!pdev_resource_collisions_expected(pdev)) {
403 printk(KERN_ERR "PCI: Address space collision on region %ld "
404 "[%016lx:%016lx] of device %s\n",
405 (res - &pdev->resource[0]),
406 res->start, res->end,
407 pci_name(pdev));
408 }
409 }
410 }
411}
412
413void __init pci_record_assignments(struct pci_pbm_info *pbm,
414 struct pci_bus *pbus)
415{
416 struct pci_dev *dev;
417 struct pci_bus *bus;
418
419 list_for_each_entry(dev, &pbus->devices, bus_list)
420 pdev_record_assignments(pbm, dev);
421
422 list_for_each_entry(bus, &pbus->children, node)
423 pci_record_assignments(pbm, bus);
424}
425
426/* Return non-zero if PDEV has implicit I/O resources even
427 * though it may not have an I/O base address register
428 * active.
429 */
430static int __init has_implicit_io(struct pci_dev *pdev)
431{
432 int class = pdev->class >> 8;
433
434 if (class == PCI_CLASS_NOT_DEFINED ||
435 class == PCI_CLASS_NOT_DEFINED_VGA ||
436 class == PCI_CLASS_STORAGE_IDE ||
437 (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
438 return 1;
439
440 return 0;
441}
442
443static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm,
444 struct pci_dev *pdev)
445{
446 u32 reg;
447 u16 cmd;
448 int i, io_seen, mem_seen;
449
450 io_seen = mem_seen = 0;
451 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
452 struct resource *root, *res;
453 unsigned long size, min, max, align;
454
455 res = &pdev->resource[i];
456
457 if (res->flags & IORESOURCE_IO)
458 io_seen++;
459 else if (res->flags & IORESOURCE_MEM)
460 mem_seen++;
461
462 /* If it is already assigned or the resource does
463 * not exist, there is nothing to do.
464 */
465 if (res->parent != NULL || res->flags == 0UL)
466 continue;
467
468 /* Determine the root we allocate from. */
469 if (res->flags & IORESOURCE_IO) {
470 root = &pbm->io_space;
471 min = root->start + 0x400UL;
472 max = root->end;
473 } else {
474 root = &pbm->mem_space;
475 min = root->start;
476 max = min + 0x80000000UL;
477 }
478
479 size = res->end - res->start;
480 align = size + 1;
481 if (allocate_resource(root, res, size + 1, min, max, align, NULL, NULL) < 0) {
482 /* uh oh */
483 prom_printf("PCI: Failed to allocate resource %d for %s\n",
484 i, pci_name(pdev));
485 prom_halt();
486 }
487
488 /* Update PCI config space. */
489 pbm->parent->base_address_update(pdev, i);
490 }
491
492 /* Special case, disable the ROM. Several devices
493 * act funny (ie. do not respond to memory space writes)
494 * when it is left enabled. A good example are Qlogic,ISP
495 * adapters.
496 */
497 pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &reg);
498 reg &= ~PCI_ROM_ADDRESS_ENABLE;
499 pci_write_config_dword(pdev, PCI_ROM_ADDRESS, reg);
500
501 /* If we saw I/O or MEM resources, enable appropriate
502 * bits in PCI command register.
503 */
504 if (io_seen || mem_seen) {
505 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
506 if (io_seen || has_implicit_io(pdev))
507 cmd |= PCI_COMMAND_IO;
508 if (mem_seen)
509 cmd |= PCI_COMMAND_MEMORY;
510 pci_write_config_word(pdev, PCI_COMMAND, cmd);
511 }
512
513 /* If this is a PCI bridge or an IDE controller,
514 * enable bus mastering. In the former case also
515 * set the cache line size correctly.
516 */
517 if (((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) ||
518 (((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) &&
519 ((pdev->class & 0x80) != 0))) {
520 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
521 cmd |= PCI_COMMAND_MASTER;
522 pci_write_config_word(pdev, PCI_COMMAND, cmd);
523
524 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
525 pci_write_config_byte(pdev,
526 PCI_CACHE_LINE_SIZE,
527 (64 / sizeof(u32)));
528 }
529}
530
531void __init pci_assign_unassigned(struct pci_pbm_info *pbm,
532 struct pci_bus *pbus)
533{
534 struct pci_dev *dev;
535 struct pci_bus *bus;
536
537 list_for_each_entry(dev, &pbus->devices, bus_list)
538 pdev_assign_unassigned(pbm, dev);
539
540 list_for_each_entry(bus, &pbus->children, node)
541 pci_assign_unassigned(pbm, bus);
542}
543
544static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt)
545{
546 struct linux_prom_pci_intmap bridge_local_intmap[PROM_PCIIMAP_MAX], *intmap;
547 struct linux_prom_pci_intmask bridge_local_intmask, *intmask;
548 struct pcidev_cookie *dev_pcp = pdev->sysdata;
549 struct pci_pbm_info *pbm = dev_pcp->pbm;
550 struct linux_prom_pci_registers *pregs = dev_pcp->prom_regs;
551 unsigned int hi, mid, lo, irq;
552 int i, num_intmap, map_slot;
553
554 intmap = &pbm->pbm_intmap[0];
555 intmask = &pbm->pbm_intmask;
556 num_intmap = pbm->num_pbm_intmap;
557 map_slot = 0;
558
559 /* If we are underneath a PCI bridge, use PROM register
560 * property of the parent bridge which is closest to
561 * the PBM.
562 *
563 * However if that parent bridge has interrupt map/mask
564 * properties of its own we use the PROM register property
565 * of the next child device on the path to PDEV.
566 *
567 * In detail the two cases are (note that the 'X' below is the
568 * 'next child on the path to PDEV' mentioned above):
569 *
570 * 1) PBM --> PCI bus lacking int{map,mask} --> X ... PDEV
571 *
572 * Here we use regs of 'PCI bus' device.
573 *
574 * 2) PBM --> PCI bus with int{map,mask} --> X ... PDEV
575 *
576 * Here we use regs of 'X'. Note that X can be PDEV.
577 */
578 if (pdev->bus->number != pbm->pci_first_busno) {
579 struct pcidev_cookie *bus_pcp, *regs_pcp;
580 struct pci_dev *bus_dev, *regs_dev;
581 int plen;
582
583 bus_dev = pdev->bus->self;
584 regs_dev = pdev;
585
586 while (bus_dev->bus &&
587 bus_dev->bus->number != pbm->pci_first_busno) {
588 regs_dev = bus_dev;
589 bus_dev = bus_dev->bus->self;
590 }
591
592 regs_pcp = regs_dev->sysdata;
593 pregs = regs_pcp->prom_regs;
594
595 bus_pcp = bus_dev->sysdata;
596
597 /* But if the PCI bridge has it's own interrupt map
598 * and mask properties, use that and the regs of the
599 * PCI entity at the next level down on the path to the
600 * device.
601 */
602 plen = prom_getproperty(bus_pcp->prom_node, "interrupt-map",
603 (char *) &bridge_local_intmap[0],
604 sizeof(bridge_local_intmap));
605 if (plen != -1) {
606 intmap = &bridge_local_intmap[0];
607 num_intmap = plen / sizeof(struct linux_prom_pci_intmap);
608 plen = prom_getproperty(bus_pcp->prom_node,
609 "interrupt-map-mask",
610 (char *) &bridge_local_intmask,
611 sizeof(bridge_local_intmask));
612 if (plen == -1) {
613 printk("pci_intmap_match: Warning! Bridge has intmap "
614 "but no intmask.\n");
615 printk("pci_intmap_match: Trying to recover.\n");
616 return 0;
617 }
618
619 if (pdev->bus->self != bus_dev)
620 map_slot = 1;
621 } else {
622 pregs = bus_pcp->prom_regs;
623 map_slot = 1;
624 }
625 }
626
627 if (map_slot) {
628 *interrupt = ((*interrupt
629 - 1
630 + PCI_SLOT(pdev->devfn)) & 0x3) + 1;
631 }
632
633 hi = pregs->phys_hi & intmask->phys_hi;
634 mid = pregs->phys_mid & intmask->phys_mid;
635 lo = pregs->phys_lo & intmask->phys_lo;
636 irq = *interrupt & intmask->interrupt;
637
638 for (i = 0; i < num_intmap; i++) {
639 if (intmap[i].phys_hi == hi &&
640 intmap[i].phys_mid == mid &&
641 intmap[i].phys_lo == lo &&
642 intmap[i].interrupt == irq) {
643 *interrupt = intmap[i].cinterrupt;
644 printk("PCI-IRQ: Routing bus[%2x] slot[%2x] map[%d] to INO[%02x]\n",
645 pdev->bus->number, PCI_SLOT(pdev->devfn),
646 map_slot, *interrupt);
647 return 1;
648 }
649 }
650
651 /* We will run this code even if pbm->num_pbm_intmap is zero, just so
652 * we can apply the slot mapping to the PROM interrupt property value.
653 * So do not spit out these warnings in that case.
654 */
655 if (num_intmap != 0) {
656 /* Print it both to OBP console and kernel one so that if bootup
657 * hangs here the user has the information to report.
658 */
659 prom_printf("pci_intmap_match: bus %02x, devfn %02x: ",
660 pdev->bus->number, pdev->devfn);
661 prom_printf("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n",
662 pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt);
663 prom_printf("Please email this information to davem@redhat.com\n");
664
665 printk("pci_intmap_match: bus %02x, devfn %02x: ",
666 pdev->bus->number, pdev->devfn);
667 printk("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n",
668 pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt);
669 printk("Please email this information to davem@redhat.com\n");
670 }
671
672 return 0;
673}
674
675static void __init pdev_fixup_irq(struct pci_dev *pdev)
676{
677 struct pcidev_cookie *pcp = pdev->sysdata;
678 struct pci_pbm_info *pbm = pcp->pbm;
679 struct pci_controller_info *p = pbm->parent;
680 unsigned int portid = pbm->portid;
681 unsigned int prom_irq;
682 int prom_node = pcp->prom_node;
683 int err;
684
685 /* If this is an empty EBUS device, sometimes OBP fails to
686 * give it a valid fully specified interrupts property.
687 * The EBUS hooked up to SunHME on PCI I/O boards of
688 * Ex000 systems is one such case.
689 *
690 * The interrupt is not important so just ignore it.
691 */
692 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
693 pdev->device == PCI_DEVICE_ID_SUN_EBUS &&
694 !prom_getchild(prom_node)) {
695 pdev->irq = 0;
696 return;
697 }
698
699 err = prom_getproperty(prom_node, "interrupts",
700 (char *)&prom_irq, sizeof(prom_irq));
701 if (err == 0 || err == -1) {
702 pdev->irq = 0;
703 return;
704 }
705
706 /* Fully specified already? */
707 if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) {
708 pdev->irq = p->irq_build(pbm, pdev, prom_irq);
709 goto have_irq;
710 }
711
712 /* An onboard device? (bit 5 set) */
713 if ((prom_irq & PCI_IRQ_INO) & 0x20) {
714 pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq));
715 goto have_irq;
716 }
717
718 /* Can we find a matching entry in the interrupt-map? */
719 if (pci_intmap_match(pdev, &prom_irq)) {
720 pdev->irq = p->irq_build(pbm, pdev, (portid << 6) | prom_irq);
721 goto have_irq;
722 }
723
724 /* Ok, we have to do it the hard way. */
725 {
726 unsigned int bus, slot, line;
727
728 bus = (pbm == &pbm->parent->pbm_B) ? (1 << 4) : 0;
729
730 /* If we have a legal interrupt property, use it as
731 * the IRQ line.
732 */
733 if (prom_irq > 0 && prom_irq < 5) {
734 line = ((prom_irq - 1) & 3);
735 } else {
736 u8 pci_irq_line;
737
738 /* Else just directly consult PCI config space. */
739 pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pci_irq_line);
740 line = ((pci_irq_line - 1) & 3);
741 }
742
743 /* Now figure out the slot.
744 *
745 * Basically, device number zero on the top-level bus is
746 * always the PCI host controller. Slot 0 is then device 1.
747 * PBM A supports two external slots (0 and 1), and PBM B
748 * supports 4 external slots (0, 1, 2, and 3). On-board PCI
749 * devices are wired to device numbers outside of these
750 * ranges. -DaveM
751 */
752 if (pdev->bus->number == pbm->pci_first_busno) {
753 slot = PCI_SLOT(pdev->devfn) - pbm->pci_first_slot;
754 } else {
755 struct pci_dev *bus_dev;
756
757 /* Underneath a bridge, use slot number of parent
758 * bridge which is closest to the PBM.
759 */
760 bus_dev = pdev->bus->self;
761 while (bus_dev->bus &&
762 bus_dev->bus->number != pbm->pci_first_busno)
763 bus_dev = bus_dev->bus->self;
764
765 slot = PCI_SLOT(bus_dev->devfn) - pbm->pci_first_slot;
766 }
767 slot = slot << 2;
768
769 pdev->irq = p->irq_build(pbm, pdev,
770 ((portid << 6) & PCI_IRQ_IGN) |
771 (bus | slot | line));
772 }
773
774have_irq:
775 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE,
776 pdev->irq & PCI_IRQ_INO);
777}
778
779void __init pci_fixup_irq(struct pci_pbm_info *pbm,
780 struct pci_bus *pbus)
781{
782 struct pci_dev *dev;
783 struct pci_bus *bus;
784
785 list_for_each_entry(dev, &pbus->devices, bus_list)
786 pdev_fixup_irq(dev);
787
788 list_for_each_entry(bus, &pbus->children, node)
789 pci_fixup_irq(pbm, bus);
790}
791
792static void pdev_setup_busmastering(struct pci_dev *pdev, int is_66mhz)
793{
794 u16 cmd;
795 u8 hdr_type, min_gnt, ltimer;
796
797 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
798 cmd |= PCI_COMMAND_MASTER;
799 pci_write_config_word(pdev, PCI_COMMAND, cmd);
800
801 /* Read it back, if the mastering bit did not
802 * get set, the device does not support bus
803 * mastering so we have nothing to do here.
804 */
805 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
806 if ((cmd & PCI_COMMAND_MASTER) == 0)
807 return;
808
809 /* Set correct cache line size, 64-byte on all
810 * Sparc64 PCI systems. Note that the value is
811 * measured in 32-bit words.
812 */
813 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
814 64 / sizeof(u32));
815
816 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr_type);
817 hdr_type &= ~0x80;
818 if (hdr_type != PCI_HEADER_TYPE_NORMAL)
819 return;
820
821 /* If the latency timer is already programmed with a non-zero
822 * value, assume whoever set it (OBP or whoever) knows what
823 * they are doing.
824 */
825 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ltimer);
826 if (ltimer != 0)
827 return;
828
829 /* XXX Since I'm tipping off the min grant value to
830 * XXX choose a suitable latency timer value, I also
831 * XXX considered making use of the max latency value
832 * XXX as well. Unfortunately I've seen too many bogusly
833 * XXX low settings for it to the point where it lacks
834 * XXX any usefulness. In one case, an ethernet card
835 * XXX claimed a min grant of 10 and a max latency of 5.
836 * XXX Now, if I had two such cards on the same bus I
837 * XXX could not set the desired burst period (calculated
838 * XXX from min grant) without violating the max latency
839 * XXX bound. Duh...
840 * XXX
841 * XXX I blame dumb PC bios implementors for stuff like
842 * XXX this, most of them don't even try to do something
843 * XXX sensible with latency timer values and just set some
844 * XXX default value (usually 32) into every device.
845 */
846
847 pci_read_config_byte(pdev, PCI_MIN_GNT, &min_gnt);
848
849 if (min_gnt == 0) {
850 /* If no min_gnt setting then use a default
851 * value.
852 */
853 if (is_66mhz)
854 ltimer = 16;
855 else
856 ltimer = 32;
857 } else {
858 int shift_factor;
859
860 if (is_66mhz)
861 shift_factor = 2;
862 else
863 shift_factor = 3;
864
865 /* Use a default value when the min_gnt value
866 * is erroneously high.
867 */
868 if (((unsigned int) min_gnt << shift_factor) > 512 ||
869 ((min_gnt << shift_factor) & 0xff) == 0) {
870 ltimer = 8 << shift_factor;
871 } else {
872 ltimer = min_gnt << shift_factor;
873 }
874 }
875
876 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ltimer);
877}
878
879void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
880 struct pci_bus *pbus)
881{
882 struct pci_dev *pdev;
883 int all_are_66mhz;
884 u16 status;
885
886 if (pbm->is_66mhz_capable == 0) {
887 all_are_66mhz = 0;
888 goto out;
889 }
890
891 all_are_66mhz = 1;
892 list_for_each_entry(pdev, &pbus->devices, bus_list) {
893 pci_read_config_word(pdev, PCI_STATUS, &status);
894 if (!(status & PCI_STATUS_66MHZ)) {
895 all_are_66mhz = 0;
896 break;
897 }
898 }
899out:
900 pbm->all_devs_66mhz = all_are_66mhz;
901
902 printk("PCI%d(PBM%c): Bus running at %dMHz\n",
903 pbm->parent->index,
904 (pbm == &pbm->parent->pbm_A) ? 'A' : 'B',
905 (all_are_66mhz ? 66 : 33));
906}
907
908void pci_setup_busmastering(struct pci_pbm_info *pbm,
909 struct pci_bus *pbus)
910{
911 struct pci_dev *dev;
912 struct pci_bus *bus;
913 int is_66mhz;
914
915 is_66mhz = pbm->is_66mhz_capable && pbm->all_devs_66mhz;
916
917 list_for_each_entry(dev, &pbus->devices, bus_list)
918 pdev_setup_busmastering(dev, is_66mhz);
919
920 list_for_each_entry(bus, &pbus->children, node)
921 pci_setup_busmastering(pbm, bus);
922}
923
924void pci_register_legacy_regions(struct resource *io_res,
925 struct resource *mem_res)
926{
927 struct resource *p;
928
929 /* VGA Video RAM. */
930 p = kmalloc(sizeof(*p), GFP_KERNEL);
931 if (!p)
932 return;
933
934 memset(p, 0, sizeof(*p));
935 p->name = "Video RAM area";
936 p->start = mem_res->start + 0xa0000UL;
937 p->end = p->start + 0x1ffffUL;
938 p->flags = IORESOURCE_BUSY;
939 request_resource(mem_res, p);
940
941 p = kmalloc(sizeof(*p), GFP_KERNEL);
942 if (!p)
943 return;
944
945 memset(p, 0, sizeof(*p));
946 p->name = "System ROM";
947 p->start = mem_res->start + 0xf0000UL;
948 p->end = p->start + 0xffffUL;
949 p->flags = IORESOURCE_BUSY;
950 request_resource(mem_res, p);
951
952 p = kmalloc(sizeof(*p), GFP_KERNEL);
953 if (!p)
954 return;
955
956 memset(p, 0, sizeof(*p));
957 p->name = "Video ROM";
958 p->start = mem_res->start + 0xc0000UL;
959 p->end = p->start + 0x7fffUL;
960 p->flags = IORESOURCE_BUSY;
961 request_resource(mem_res, p);
962}
963
964/* Generic helper routines for PCI error reporting. */
965void pci_scan_for_target_abort(struct pci_controller_info *p,
966 struct pci_pbm_info *pbm,
967 struct pci_bus *pbus)
968{
969 struct pci_dev *pdev;
970 struct pci_bus *bus;
971
972 list_for_each_entry(pdev, &pbus->devices, bus_list) {
973 u16 status, error_bits;
974
975 pci_read_config_word(pdev, PCI_STATUS, &status);
976 error_bits =
977 (status & (PCI_STATUS_SIG_TARGET_ABORT |
978 PCI_STATUS_REC_TARGET_ABORT));
979 if (error_bits) {
980 pci_write_config_word(pdev, PCI_STATUS, error_bits);
981 printk("PCI%d(PBM%c): Device [%s] saw Target Abort [%016x]\n",
982 p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
983 pci_name(pdev), status);
984 }
985 }
986
987 list_for_each_entry(bus, &pbus->children, node)
988 pci_scan_for_target_abort(p, pbm, bus);
989}
990
991void pci_scan_for_master_abort(struct pci_controller_info *p,
992 struct pci_pbm_info *pbm,
993 struct pci_bus *pbus)
994{
995 struct pci_dev *pdev;
996 struct pci_bus *bus;
997
998 list_for_each_entry(pdev, &pbus->devices, bus_list) {
999 u16 status, error_bits;
1000
1001 pci_read_config_word(pdev, PCI_STATUS, &status);
1002 error_bits =
1003 (status & (PCI_STATUS_REC_MASTER_ABORT));
1004 if (error_bits) {
1005 pci_write_config_word(pdev, PCI_STATUS, error_bits);
1006 printk("PCI%d(PBM%c): Device [%s] received Master Abort [%016x]\n",
1007 p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
1008 pci_name(pdev), status);
1009 }
1010 }
1011
1012 list_for_each_entry(bus, &pbus->children, node)
1013 pci_scan_for_master_abort(p, pbm, bus);
1014}
1015
1016void pci_scan_for_parity_error(struct pci_controller_info *p,
1017 struct pci_pbm_info *pbm,
1018 struct pci_bus *pbus)
1019{
1020 struct pci_dev *pdev;
1021 struct pci_bus *bus;
1022
1023 list_for_each_entry(pdev, &pbus->devices, bus_list) {
1024 u16 status, error_bits;
1025
1026 pci_read_config_word(pdev, PCI_STATUS, &status);
1027 error_bits =
1028 (status & (PCI_STATUS_PARITY |
1029 PCI_STATUS_DETECTED_PARITY));
1030 if (error_bits) {
1031 pci_write_config_word(pdev, PCI_STATUS, error_bits);
1032 printk("PCI%d(PBM%c): Device [%s] saw Parity Error [%016x]\n",
1033 p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
1034 pci_name(pdev), status);
1035 }
1036 }
1037
1038 list_for_each_entry(bus, &pbus->children, node)
1039 pci_scan_for_parity_error(p, pbm, bus);
1040}
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h
new file mode 100644
index 000000000000..6c3205962544
--- /dev/null
+++ b/arch/sparc64/kernel/pci_impl.h
@@ -0,0 +1,49 @@
1/* $Id: pci_impl.h,v 1.9 2001/06/13 06:34:30 davem Exp $
2 * pci_impl.h: Helper definitions for PCI controller support.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#ifndef PCI_IMPL_H
8#define PCI_IMPL_H
9
10#include <linux/types.h>
11#include <linux/spinlock.h>
12#include <asm/io.h>
13
14extern struct pci_controller_info *pci_controller_root;
15
16extern int pci_num_controllers;
17
18/* PCI bus scanning and fixup support. */
19extern void pci_fixup_host_bridge_self(struct pci_bus *pbus);
20extern void pci_fill_in_pbm_cookies(struct pci_bus *pbus,
21 struct pci_pbm_info *pbm,
22 int prom_node);
23extern void pci_record_assignments(struct pci_pbm_info *pbm,
24 struct pci_bus *pbus);
25extern void pci_assign_unassigned(struct pci_pbm_info *pbm,
26 struct pci_bus *pbus);
27extern void pci_fixup_irq(struct pci_pbm_info *pbm,
28 struct pci_bus *pbus);
29extern void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
30 struct pci_bus *pbus);
31extern void pci_setup_busmastering(struct pci_pbm_info *pbm,
32 struct pci_bus *pbus);
33extern void pci_register_legacy_regions(struct resource *io_res,
34 struct resource *mem_res);
35
36/* Error reporting support. */
37extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
38extern void pci_scan_for_master_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
39extern void pci_scan_for_parity_error(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
40
41/* Configuration space access. */
42extern void pci_config_read8(u8 *addr, u8 *ret);
43extern void pci_config_read16(u16 *addr, u16 *ret);
44extern void pci_config_read32(u32 *addr, u32 *ret);
45extern void pci_config_write8(u8 *addr, u8 val);
46extern void pci_config_write16(u16 *addr, u16 val);
47extern void pci_config_write32(u32 *addr, u32 val);
48
49#endif /* !(PCI_IMPL_H) */
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
new file mode 100644
index 000000000000..292983413ae2
--- /dev/null
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -0,0 +1,855 @@
1/* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/mm.h>
11
12#include <asm/pbm.h>
13
14#include "iommu_common.h"
15
16#define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
17 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
18
19/* Accessing IOMMU and Streaming Buffer registers.
20 * REG parameter is a physical address. All registers
21 * are 64-bits in size.
22 */
23#define pci_iommu_read(__reg) \
24({ u64 __ret; \
25 __asm__ __volatile__("ldxa [%1] %2, %0" \
26 : "=r" (__ret) \
27 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
28 : "memory"); \
29 __ret; \
30})
31#define pci_iommu_write(__reg, __val) \
32 __asm__ __volatile__("stxa %0, [%1] %2" \
33 : /* no outputs */ \
34 : "r" (__val), "r" (__reg), \
35 "i" (ASI_PHYS_BYPASS_EC_E))
36
37/* Must be invoked under the IOMMU lock. */
38static void __iommu_flushall(struct pci_iommu *iommu)
39{
40 unsigned long tag;
41 int entry;
42
43 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
44 for (entry = 0; entry < 16; entry++) {
45 pci_iommu_write(tag, 0);
46 tag += 8;
47 }
48
49 /* Ensure completion of previous PIO writes. */
50 (void) pci_iommu_read(iommu->write_complete_reg);
51
52 /* Now update everyone's flush point. */
53 for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
54 iommu->alloc_info[entry].flush =
55 iommu->alloc_info[entry].next;
56 }
57}
58
59#define IOPTE_CONSISTENT(CTX) \
60 (IOPTE_VALID | IOPTE_CACHE | \
61 (((CTX) << 47) & IOPTE_CONTEXT))
62
63#define IOPTE_STREAMING(CTX) \
64 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
65
66/* Existing mappings are never marked invalid, instead they
67 * are pointed to a dummy page.
68 */
69#define IOPTE_IS_DUMMY(iommu, iopte) \
70 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
71
72static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
73{
74 unsigned long val = iopte_val(*iopte);
75
76 val &= ~IOPTE_PAGE;
77 val |= iommu->dummy_page_pa;
78
79 iopte_val(*iopte) = val;
80}
81
82void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize)
83{
84 int i;
85
86 tsbsize /= sizeof(iopte_t);
87
88 for (i = 0; i < tsbsize; i++)
89 iopte_make_dummy(iommu, &iommu->page_table[i]);
90}
91
92static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
93{
94 iopte_t *iopte, *limit, *first;
95 unsigned long cnum, ent, flush_point;
96
97 cnum = 0;
98 while ((1UL << cnum) < npages)
99 cnum++;
100 iopte = (iommu->page_table +
101 (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
102
103 if (cnum == 0)
104 limit = (iommu->page_table +
105 iommu->lowest_consistent_map);
106 else
107 limit = (iopte +
108 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
109
110 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
111 flush_point = iommu->alloc_info[cnum].flush;
112
113 first = iopte;
114 for (;;) {
115 if (IOPTE_IS_DUMMY(iommu, iopte)) {
116 if ((iopte + (1 << cnum)) >= limit)
117 ent = 0;
118 else
119 ent = ent + 1;
120 iommu->alloc_info[cnum].next = ent;
121 if (ent == flush_point)
122 __iommu_flushall(iommu);
123 break;
124 }
125 iopte += (1 << cnum);
126 ent++;
127 if (iopte >= limit) {
128 iopte = (iommu->page_table +
129 (cnum <<
130 (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
131 ent = 0;
132 }
133 if (ent == flush_point)
134 __iommu_flushall(iommu);
135 if (iopte == first)
136 goto bad;
137 }
138
139 /* I've got your streaming cluster right here buddy boy... */
140 return iopte;
141
142bad:
143 printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
144 npages);
145 return NULL;
146}
147
148static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
149 unsigned long npages, unsigned long ctx)
150{
151 unsigned long cnum, ent;
152
153 cnum = 0;
154 while ((1UL << cnum) < npages)
155 cnum++;
156
157 ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
158 >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
159
160 /* If the global flush might not have caught this entry,
161 * adjust the flush point such that we will flush before
162 * ever trying to reuse it.
163 */
164#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
165 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
166 iommu->alloc_info[cnum].flush = ent;
167#undef between
168}
169
170/* We allocate consistent mappings from the end of cluster zero. */
171static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
172{
173 iopte_t *iopte;
174
175 iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
176 while (iopte > iommu->page_table) {
177 iopte--;
178 if (IOPTE_IS_DUMMY(iommu, iopte)) {
179 unsigned long tmp = npages;
180
181 while (--tmp) {
182 iopte--;
183 if (!IOPTE_IS_DUMMY(iommu, iopte))
184 break;
185 }
186 if (tmp == 0) {
187 u32 entry = (iopte - iommu->page_table);
188
189 if (entry < iommu->lowest_consistent_map)
190 iommu->lowest_consistent_map = entry;
191 return iopte;
192 }
193 }
194 }
195 return NULL;
196}
197
198/* Allocate and map kernel buffer of size SIZE using consistent mode
199 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
200 * successful and set *DMA_ADDRP to the PCI side dma address.
201 */
202void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
203{
204 struct pcidev_cookie *pcp;
205 struct pci_iommu *iommu;
206 iopte_t *iopte;
207 unsigned long flags, order, first_page, ctx;
208 void *ret;
209 int npages;
210
211 size = IO_PAGE_ALIGN(size);
212 order = get_order(size);
213 if (order >= 10)
214 return NULL;
215
216 first_page = __get_free_pages(GFP_ATOMIC, order);
217 if (first_page == 0UL)
218 return NULL;
219 memset((char *)first_page, 0, PAGE_SIZE << order);
220
221 pcp = pdev->sysdata;
222 iommu = pcp->pbm->iommu;
223
224 spin_lock_irqsave(&iommu->lock, flags);
225 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
226 if (iopte == NULL) {
227 spin_unlock_irqrestore(&iommu->lock, flags);
228 free_pages(first_page, order);
229 return NULL;
230 }
231
232 *dma_addrp = (iommu->page_table_map_base +
233 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
234 ret = (void *) first_page;
235 npages = size >> IO_PAGE_SHIFT;
236 ctx = 0;
237 if (iommu->iommu_ctxflush)
238 ctx = iommu->iommu_cur_ctx++;
239 first_page = __pa(first_page);
240 while (npages--) {
241 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
242 IOPTE_WRITE |
243 (first_page & IOPTE_PAGE));
244 iopte++;
245 first_page += IO_PAGE_SIZE;
246 }
247
248 {
249 int i;
250 u32 daddr = *dma_addrp;
251
252 npages = size >> IO_PAGE_SHIFT;
253 for (i = 0; i < npages; i++) {
254 pci_iommu_write(iommu->iommu_flush, daddr);
255 daddr += IO_PAGE_SIZE;
256 }
257 }
258
259 spin_unlock_irqrestore(&iommu->lock, flags);
260
261 return ret;
262}
263
264/* Free and unmap a consistent DMA translation. */
265void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
266{
267 struct pcidev_cookie *pcp;
268 struct pci_iommu *iommu;
269 iopte_t *iopte;
270 unsigned long flags, order, npages, i, ctx;
271
272 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
273 pcp = pdev->sysdata;
274 iommu = pcp->pbm->iommu;
275 iopte = iommu->page_table +
276 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
277
278 spin_lock_irqsave(&iommu->lock, flags);
279
280 if ((iopte - iommu->page_table) ==
281 iommu->lowest_consistent_map) {
282 iopte_t *walk = iopte + npages;
283 iopte_t *limit;
284
285 limit = (iommu->page_table +
286 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
287 while (walk < limit) {
288 if (!IOPTE_IS_DUMMY(iommu, walk))
289 break;
290 walk++;
291 }
292 iommu->lowest_consistent_map =
293 (walk - iommu->page_table);
294 }
295
296 /* Data for consistent mappings cannot enter the streaming
297 * buffers, so we only need to update the TSB. We flush
298 * the IOMMU here as well to prevent conflicts with the
299 * streaming mapping deferred tlb flush scheme.
300 */
301
302 ctx = 0;
303 if (iommu->iommu_ctxflush)
304 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
305
306 for (i = 0; i < npages; i++, iopte++)
307 iopte_make_dummy(iommu, iopte);
308
309 if (iommu->iommu_ctxflush) {
310 pci_iommu_write(iommu->iommu_ctxflush, ctx);
311 } else {
312 for (i = 0; i < npages; i++) {
313 u32 daddr = dvma + (i << IO_PAGE_SHIFT);
314
315 pci_iommu_write(iommu->iommu_flush, daddr);
316 }
317 }
318
319 spin_unlock_irqrestore(&iommu->lock, flags);
320
321 order = get_order(size);
322 if (order < 10)
323 free_pages((unsigned long)cpu, order);
324}
325
326/* Map a single buffer at PTR of SZ bytes for PCI DMA
327 * in streaming mode.
328 */
329dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
330{
331 struct pcidev_cookie *pcp;
332 struct pci_iommu *iommu;
333 struct pci_strbuf *strbuf;
334 iopte_t *base;
335 unsigned long flags, npages, oaddr;
336 unsigned long i, base_paddr, ctx;
337 u32 bus_addr, ret;
338 unsigned long iopte_protection;
339
340 pcp = pdev->sysdata;
341 iommu = pcp->pbm->iommu;
342 strbuf = &pcp->pbm->stc;
343
344 if (direction == PCI_DMA_NONE)
345 BUG();
346
347 oaddr = (unsigned long)ptr;
348 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
349 npages >>= IO_PAGE_SHIFT;
350
351 spin_lock_irqsave(&iommu->lock, flags);
352
353 base = alloc_streaming_cluster(iommu, npages);
354 if (base == NULL)
355 goto bad;
356 bus_addr = (iommu->page_table_map_base +
357 ((base - iommu->page_table) << IO_PAGE_SHIFT));
358 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
359 base_paddr = __pa(oaddr & IO_PAGE_MASK);
360 ctx = 0;
361 if (iommu->iommu_ctxflush)
362 ctx = iommu->iommu_cur_ctx++;
363 if (strbuf->strbuf_enabled)
364 iopte_protection = IOPTE_STREAMING(ctx);
365 else
366 iopte_protection = IOPTE_CONSISTENT(ctx);
367 if (direction != PCI_DMA_TODEVICE)
368 iopte_protection |= IOPTE_WRITE;
369
370 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
371 iopte_val(*base) = iopte_protection | base_paddr;
372
373 spin_unlock_irqrestore(&iommu->lock, flags);
374
375 return ret;
376
377bad:
378 spin_unlock_irqrestore(&iommu->lock, flags);
379 return PCI_DMA_ERROR_CODE;
380}
381
382/* Unmap a single streaming mode DMA translation. */
383void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
384{
385 struct pcidev_cookie *pcp;
386 struct pci_iommu *iommu;
387 struct pci_strbuf *strbuf;
388 iopte_t *base;
389 unsigned long flags, npages, i, ctx;
390
391 if (direction == PCI_DMA_NONE)
392 BUG();
393
394 pcp = pdev->sysdata;
395 iommu = pcp->pbm->iommu;
396 strbuf = &pcp->pbm->stc;
397
398 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
399 npages >>= IO_PAGE_SHIFT;
400 base = iommu->page_table +
401 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
402#ifdef DEBUG_PCI_IOMMU
403 if (IOPTE_IS_DUMMY(iommu, base))
404 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
405 bus_addr, sz, __builtin_return_address(0));
406#endif
407 bus_addr &= IO_PAGE_MASK;
408
409 spin_lock_irqsave(&iommu->lock, flags);
410
411 /* Record the context, if any. */
412 ctx = 0;
413 if (iommu->iommu_ctxflush)
414 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
415
416 /* Step 1: Kick data out of streaming buffers if necessary. */
417 if (strbuf->strbuf_enabled) {
418 u32 vaddr = bus_addr;
419
420 PCI_STC_FLUSHFLAG_INIT(strbuf);
421 if (strbuf->strbuf_ctxflush &&
422 iommu->iommu_ctxflush) {
423 unsigned long matchreg, flushreg;
424
425 flushreg = strbuf->strbuf_ctxflush;
426 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
427 do {
428 pci_iommu_write(flushreg, ctx);
429 } while(((long)pci_iommu_read(matchreg)) < 0L);
430 } else {
431 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
432 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
433 }
434
435 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
436 (void) pci_iommu_read(iommu->write_complete_reg);
437 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
438 membar("#LoadLoad");
439 }
440
441 /* Step 2: Clear out first TSB entry. */
442 iopte_make_dummy(iommu, base);
443
444 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
445 npages, ctx);
446
447 spin_unlock_irqrestore(&iommu->lock, flags);
448}
449
450#define SG_ENT_PHYS_ADDRESS(SG) \
451 (__pa(page_address((SG)->page)) + (SG)->offset)
452
453static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
454 int nused, int nelems, unsigned long iopte_protection)
455{
456 struct scatterlist *dma_sg = sg;
457 struct scatterlist *sg_end = sg + nelems;
458 int i;
459
460 for (i = 0; i < nused; i++) {
461 unsigned long pteval = ~0UL;
462 u32 dma_npages;
463
464 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
465 dma_sg->dma_length +
466 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
467 do {
468 unsigned long offset;
469 signed int len;
470
471 /* If we are here, we know we have at least one
472 * more page to map. So walk forward until we
473 * hit a page crossing, and begin creating new
474 * mappings from that spot.
475 */
476 for (;;) {
477 unsigned long tmp;
478
479 tmp = SG_ENT_PHYS_ADDRESS(sg);
480 len = sg->length;
481 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
482 pteval = tmp & IO_PAGE_MASK;
483 offset = tmp & (IO_PAGE_SIZE - 1UL);
484 break;
485 }
486 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
487 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
488 offset = 0UL;
489 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
490 break;
491 }
492 sg++;
493 }
494
495 pteval = iopte_protection | (pteval & IOPTE_PAGE);
496 while (len > 0) {
497 *iopte++ = __iopte(pteval);
498 pteval += IO_PAGE_SIZE;
499 len -= (IO_PAGE_SIZE - offset);
500 offset = 0;
501 dma_npages--;
502 }
503
504 pteval = (pteval & IOPTE_PAGE) + len;
505 sg++;
506
507 /* Skip over any tail mappings we've fully mapped,
508 * adjusting pteval along the way. Stop when we
509 * detect a page crossing event.
510 */
511 while (sg < sg_end &&
512 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
513 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
514 ((pteval ^
515 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
516 pteval += sg->length;
517 sg++;
518 }
519 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
520 pteval = ~0UL;
521 } while (dma_npages != 0);
522 dma_sg++;
523 }
524}
525
526/* Map a set of buffers described by SGLIST with NELEMS array
527 * elements in streaming mode for PCI DMA.
528 * When making changes here, inspect the assembly output. I was having
529 * hard time to kepp this routine out of using stack slots for holding variables.
530 */
531int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
532{
533 struct pcidev_cookie *pcp;
534 struct pci_iommu *iommu;
535 struct pci_strbuf *strbuf;
536 unsigned long flags, ctx, npages, iopte_protection;
537 iopte_t *base;
538 u32 dma_base;
539 struct scatterlist *sgtmp;
540 int used;
541
542 /* Fast path single entry scatterlists. */
543 if (nelems == 1) {
544 sglist->dma_address =
545 pci_map_single(pdev,
546 (page_address(sglist->page) + sglist->offset),
547 sglist->length, direction);
548 sglist->dma_length = sglist->length;
549 return 1;
550 }
551
552 pcp = pdev->sysdata;
553 iommu = pcp->pbm->iommu;
554 strbuf = &pcp->pbm->stc;
555
556 if (direction == PCI_DMA_NONE)
557 BUG();
558
559 /* Step 1: Prepare scatter list. */
560
561 npages = prepare_sg(sglist, nelems);
562
563 /* Step 2: Allocate a cluster. */
564
565 spin_lock_irqsave(&iommu->lock, flags);
566
567 base = alloc_streaming_cluster(iommu, npages);
568 if (base == NULL)
569 goto bad;
570 dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
571
572 /* Step 3: Normalize DMA addresses. */
573 used = nelems;
574
575 sgtmp = sglist;
576 while (used && sgtmp->dma_length) {
577 sgtmp->dma_address += dma_base;
578 sgtmp++;
579 used--;
580 }
581 used = nelems - used;
582
583 /* Step 4: Choose a context if necessary. */
584 ctx = 0;
585 if (iommu->iommu_ctxflush)
586 ctx = iommu->iommu_cur_ctx++;
587
588 /* Step 5: Create the mappings. */
589 if (strbuf->strbuf_enabled)
590 iopte_protection = IOPTE_STREAMING(ctx);
591 else
592 iopte_protection = IOPTE_CONSISTENT(ctx);
593 if (direction != PCI_DMA_TODEVICE)
594 iopte_protection |= IOPTE_WRITE;
595 fill_sg (base, sglist, used, nelems, iopte_protection);
596#ifdef VERIFY_SG
597 verify_sglist(sglist, nelems, base, npages);
598#endif
599
600 spin_unlock_irqrestore(&iommu->lock, flags);
601
602 return used;
603
604bad:
605 spin_unlock_irqrestore(&iommu->lock, flags);
606 return PCI_DMA_ERROR_CODE;
607}
608
609/* Unmap a set of streaming mode DMA translations. */
610void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
611{
612 struct pcidev_cookie *pcp;
613 struct pci_iommu *iommu;
614 struct pci_strbuf *strbuf;
615 iopte_t *base;
616 unsigned long flags, ctx, i, npages;
617 u32 bus_addr;
618
619 if (direction == PCI_DMA_NONE)
620 BUG();
621
622 pcp = pdev->sysdata;
623 iommu = pcp->pbm->iommu;
624 strbuf = &pcp->pbm->stc;
625
626 bus_addr = sglist->dma_address & IO_PAGE_MASK;
627
628 for (i = 1; i < nelems; i++)
629 if (sglist[i].dma_length == 0)
630 break;
631 i--;
632 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
633
634 base = iommu->page_table +
635 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
636
637#ifdef DEBUG_PCI_IOMMU
638 if (IOPTE_IS_DUMMY(iommu, base))
639 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
640#endif
641
642 spin_lock_irqsave(&iommu->lock, flags);
643
644 /* Record the context, if any. */
645 ctx = 0;
646 if (iommu->iommu_ctxflush)
647 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
648
649 /* Step 1: Kick data out of streaming buffers if necessary. */
650 if (strbuf->strbuf_enabled) {
651 u32 vaddr = (u32) bus_addr;
652
653 PCI_STC_FLUSHFLAG_INIT(strbuf);
654 if (strbuf->strbuf_ctxflush &&
655 iommu->iommu_ctxflush) {
656 unsigned long matchreg, flushreg;
657
658 flushreg = strbuf->strbuf_ctxflush;
659 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
660 do {
661 pci_iommu_write(flushreg, ctx);
662 } while(((long)pci_iommu_read(matchreg)) < 0L);
663 } else {
664 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
665 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
666 }
667
668 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
669 (void) pci_iommu_read(iommu->write_complete_reg);
670 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
671 membar("#LoadLoad");
672 }
673
674 /* Step 2: Clear out first TSB entry. */
675 iopte_make_dummy(iommu, base);
676
677 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
678 npages, ctx);
679
680 spin_unlock_irqrestore(&iommu->lock, flags);
681}
682
683/* Make physical memory consistent for a single
684 * streaming mode DMA translation after a transfer.
685 */
686void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
687{
688 struct pcidev_cookie *pcp;
689 struct pci_iommu *iommu;
690 struct pci_strbuf *strbuf;
691 unsigned long flags, ctx, npages;
692
693 pcp = pdev->sysdata;
694 iommu = pcp->pbm->iommu;
695 strbuf = &pcp->pbm->stc;
696
697 if (!strbuf->strbuf_enabled)
698 return;
699
700 spin_lock_irqsave(&iommu->lock, flags);
701
702 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
703 npages >>= IO_PAGE_SHIFT;
704 bus_addr &= IO_PAGE_MASK;
705
706 /* Step 1: Record the context, if any. */
707 ctx = 0;
708 if (iommu->iommu_ctxflush &&
709 strbuf->strbuf_ctxflush) {
710 iopte_t *iopte;
711
712 iopte = iommu->page_table +
713 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
714 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
715 }
716
717 /* Step 2: Kick data out of streaming buffers. */
718 PCI_STC_FLUSHFLAG_INIT(strbuf);
719 if (iommu->iommu_ctxflush &&
720 strbuf->strbuf_ctxflush) {
721 unsigned long matchreg, flushreg;
722
723 flushreg = strbuf->strbuf_ctxflush;
724 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
725 do {
726 pci_iommu_write(flushreg, ctx);
727 } while(((long)pci_iommu_read(matchreg)) < 0L);
728 } else {
729 unsigned long i;
730
731 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
732 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
733 }
734
735 /* Step 3: Perform flush synchronization sequence. */
736 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
737 (void) pci_iommu_read(iommu->write_complete_reg);
738 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
739 membar("#LoadLoad");
740
741 spin_unlock_irqrestore(&iommu->lock, flags);
742}
743
744/* Make physical memory consistent for a set of streaming
745 * mode DMA translations after a transfer.
746 */
747void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
748{
749 struct pcidev_cookie *pcp;
750 struct pci_iommu *iommu;
751 struct pci_strbuf *strbuf;
752 unsigned long flags, ctx;
753
754 pcp = pdev->sysdata;
755 iommu = pcp->pbm->iommu;
756 strbuf = &pcp->pbm->stc;
757
758 if (!strbuf->strbuf_enabled)
759 return;
760
761 spin_lock_irqsave(&iommu->lock, flags);
762
763 /* Step 1: Record the context, if any. */
764 ctx = 0;
765 if (iommu->iommu_ctxflush &&
766 strbuf->strbuf_ctxflush) {
767 iopte_t *iopte;
768
769 iopte = iommu->page_table +
770 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
771 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
772 }
773
774 /* Step 2: Kick data out of streaming buffers. */
775 PCI_STC_FLUSHFLAG_INIT(strbuf);
776 if (iommu->iommu_ctxflush &&
777 strbuf->strbuf_ctxflush) {
778 unsigned long matchreg, flushreg;
779
780 flushreg = strbuf->strbuf_ctxflush;
781 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
782 do {
783 pci_iommu_write(flushreg, ctx);
784 } while (((long)pci_iommu_read(matchreg)) < 0L);
785 } else {
786 unsigned long i, npages;
787 u32 bus_addr;
788
789 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
790
791 for(i = 1; i < nelems; i++)
792 if (!sglist[i].dma_length)
793 break;
794 i--;
795 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
796 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
797 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
798 }
799
800 /* Step 3: Perform flush synchronization sequence. */
801 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
802 (void) pci_iommu_read(iommu->write_complete_reg);
803 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
804 membar("#LoadLoad");
805
806 spin_unlock_irqrestore(&iommu->lock, flags);
807}
808
809static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
810{
811 struct pci_dev *ali_isa_bridge;
812 u8 val;
813
814 /* ALI sound chips generate 31-bits of DMA, a special register
815 * determines what bit 31 is emitted as.
816 */
817 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
818 PCI_DEVICE_ID_AL_M1533,
819 NULL);
820
821 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
822 if (set_bit)
823 val |= 0x01;
824 else
825 val &= ~0x01;
826 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
827 pci_dev_put(ali_isa_bridge);
828}
829
830int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
831{
832 struct pcidev_cookie *pcp = pdev->sysdata;
833 u64 dma_addr_mask;
834
835 if (pdev == NULL) {
836 dma_addr_mask = 0xffffffff;
837 } else {
838 struct pci_iommu *iommu = pcp->pbm->iommu;
839
840 dma_addr_mask = iommu->dma_addr_mask;
841
842 if (pdev->vendor == PCI_VENDOR_ID_AL &&
843 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
844 device_mask == 0x7fffffff) {
845 ali_sound_dma_hack(pdev,
846 (dma_addr_mask & 0x80000000) != 0);
847 return 1;
848 }
849 }
850
851 if (device_mask >= (1UL << 32UL))
852 return 0;
853
854 return (device_mask & dma_addr_mask) == dma_addr_mask;
855}
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
new file mode 100644
index 000000000000..3567fa879e1f
--- /dev/null
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -0,0 +1,1560 @@
1/* $Id: pci_psycho.c,v 1.33 2002/02/01 00:58:33 davem Exp $
2 * pci_psycho.c: PSYCHO/U2P specific PCI controller support.
3 *
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
7 */
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/pci.h>
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <linux/interrupt.h>
15
16#include <asm/pbm.h>
17#include <asm/iommu.h>
18#include <asm/irq.h>
19#include <asm/starfire.h>
20
21#include "pci_impl.h"
22#include "iommu_common.h"
23
24/* All PSYCHO registers are 64-bits. The following accessor
25 * routines are how they are accessed. The REG parameter
26 * is a physical address.
27 */
28#define psycho_read(__reg) \
29({ u64 __ret; \
30 __asm__ __volatile__("ldxa [%1] %2, %0" \
31 : "=r" (__ret) \
32 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
33 : "memory"); \
34 __ret; \
35})
36#define psycho_write(__reg, __val) \
37 __asm__ __volatile__("stxa %0, [%1] %2" \
38 : /* no outputs */ \
39 : "r" (__val), "r" (__reg), \
40 "i" (ASI_PHYS_BYPASS_EC_E) \
41 : "memory")
42
43/* Misc. PSYCHO PCI controller register offsets and definitions. */
44#define PSYCHO_CONTROL 0x0010UL
45#define PSYCHO_CONTROL_IMPL 0xf000000000000000UL /* Implementation of this PSYCHO*/
46#define PSYCHO_CONTROL_VER 0x0f00000000000000UL /* Version of this PSYCHO */
47#define PSYCHO_CONTROL_MID 0x00f8000000000000UL /* UPA Module ID of PSYCHO */
48#define PSYCHO_CONTROL_IGN 0x0007c00000000000UL /* Interrupt Group Number */
49#define PSYCHO_CONTROL_RESV 0x00003ffffffffff0UL /* Reserved */
50#define PSYCHO_CONTROL_APCKEN 0x0000000000000008UL /* Address Parity Check Enable */
51#define PSYCHO_CONTROL_APERR 0x0000000000000004UL /* Incoming System Addr Parerr */
52#define PSYCHO_CONTROL_IAP 0x0000000000000002UL /* Invert UPA Parity */
53#define PSYCHO_CONTROL_MODE 0x0000000000000001UL /* PSYCHO clock mode */
54#define PSYCHO_PCIA_CTRL 0x2000UL
55#define PSYCHO_PCIB_CTRL 0x4000UL
56#define PSYCHO_PCICTRL_RESV1 0xfffffff000000000UL /* Reserved */
57#define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL /* Streaming byte hole error */
58#define PSYCHO_PCICTRL_SERR 0x0000000400000000UL /* SERR signal asserted */
59#define PSYCHO_PCICTRL_SPEED 0x0000000200000000UL /* PCI speed (1 is U2P clock) */
60#define PSYCHO_PCICTRL_RESV2 0x00000001ffc00000UL /* Reserved */
61#define PSYCHO_PCICTRL_ARB_PARK 0x0000000000200000UL /* PCI arbitration parking */
62#define PSYCHO_PCICTRL_RESV3 0x00000000001ff800UL /* Reserved */
63#define PSYCHO_PCICTRL_SBH_INT 0x0000000000000400UL /* Streaming byte hole int enab */
64#define PSYCHO_PCICTRL_WEN 0x0000000000000200UL /* Power Mgmt Wake Enable */
65#define PSYCHO_PCICTRL_EEN 0x0000000000000100UL /* PCI Error Interrupt Enable */
66#define PSYCHO_PCICTRL_RESV4 0x00000000000000c0UL /* Reserved */
67#define PSYCHO_PCICTRL_AEN 0x000000000000003fUL /* PCI DVMA Arbitration Enable */
68
69/* U2P Programmer's Manual, page 13-55, configuration space
70 * address format:
71 *
72 * 32 24 23 16 15 11 10 8 7 2 1 0
73 * ---------------------------------------------------------
74 * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 |
75 * ---------------------------------------------------------
76 */
77#define PSYCHO_CONFIG_BASE(PBM) \
78 ((PBM)->config_space | (1UL << 24))
79#define PSYCHO_CONFIG_ENCODE(BUS, DEVFN, REG) \
80 (((unsigned long)(BUS) << 16) | \
81 ((unsigned long)(DEVFN) << 8) | \
82 ((unsigned long)(REG)))
83
84static void *psycho_pci_config_mkaddr(struct pci_pbm_info *pbm,
85 unsigned char bus,
86 unsigned int devfn,
87 int where)
88{
89 if (!pbm)
90 return NULL;
91 return (void *)
92 (PSYCHO_CONFIG_BASE(pbm) |
93 PSYCHO_CONFIG_ENCODE(bus, devfn, where));
94}
95
96static int psycho_out_of_range(struct pci_pbm_info *pbm,
97 unsigned char bus,
98 unsigned char devfn)
99{
100 return ((pbm->parent == 0) ||
101 ((pbm == &pbm->parent->pbm_B) &&
102 (bus == pbm->pci_first_busno) &&
103 PCI_SLOT(devfn) > 8) ||
104 ((pbm == &pbm->parent->pbm_A) &&
105 (bus == pbm->pci_first_busno) &&
106 PCI_SLOT(devfn) > 8));
107}
108
109/* PSYCHO PCI configuration space accessors. */
110
111static int psycho_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
112 int where, int size, u32 *value)
113{
114 struct pci_pbm_info *pbm = bus_dev->sysdata;
115 unsigned char bus = bus_dev->number;
116 u32 *addr;
117 u16 tmp16;
118 u8 tmp8;
119
120 switch (size) {
121 case 1:
122 *value = 0xff;
123 break;
124 case 2:
125 *value = 0xffff;
126 break;
127 case 4:
128 *value = 0xffffffff;
129 break;
130 }
131
132 addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
133 if (!addr)
134 return PCIBIOS_SUCCESSFUL;
135
136 if (psycho_out_of_range(pbm, bus, devfn))
137 return PCIBIOS_SUCCESSFUL;
138 switch (size) {
139 case 1:
140 pci_config_read8((u8 *)addr, &tmp8);
141 *value = (u32) tmp8;
142 break;
143
144 case 2:
145 if (where & 0x01) {
146 printk("pci_read_config_word: misaligned reg [%x]\n",
147 where);
148 return PCIBIOS_SUCCESSFUL;
149 }
150 pci_config_read16((u16 *)addr, &tmp16);
151 *value = (u32) tmp16;
152 break;
153
154 case 4:
155 if (where & 0x03) {
156 printk("pci_read_config_dword: misaligned reg [%x]\n",
157 where);
158 return PCIBIOS_SUCCESSFUL;
159 }
160 pci_config_read32(addr, value);
161 break;
162 }
163 return PCIBIOS_SUCCESSFUL;
164}
165
166static int psycho_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
167 int where, int size, u32 value)
168{
169 struct pci_pbm_info *pbm = bus_dev->sysdata;
170 unsigned char bus = bus_dev->number;
171 u32 *addr;
172
173 addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
174 if (!addr)
175 return PCIBIOS_SUCCESSFUL;
176
177 if (psycho_out_of_range(pbm, bus, devfn))
178 return PCIBIOS_SUCCESSFUL;
179
180 switch (size) {
181 case 1:
182 pci_config_write8((u8 *)addr, value);
183 break;
184
185 case 2:
186 if (where & 0x01) {
187 printk("pci_write_config_word: misaligned reg [%x]\n",
188 where);
189 return PCIBIOS_SUCCESSFUL;
190 }
191 pci_config_write16((u16 *)addr, value);
192 break;
193
194 case 4:
195 if (where & 0x03) {
196 printk("pci_write_config_dword: misaligned reg [%x]\n",
197 where);
198 return PCIBIOS_SUCCESSFUL;
199 }
200 pci_config_write32(addr, value);
201 }
202 return PCIBIOS_SUCCESSFUL;
203}
204
205static struct pci_ops psycho_ops = {
206 .read = psycho_read_pci_cfg,
207 .write = psycho_write_pci_cfg,
208};
209
210/* PSYCHO interrupt mapping support. */
211#define PSYCHO_IMAP_A_SLOT0 0x0c00UL
212#define PSYCHO_IMAP_B_SLOT0 0x0c20UL
213static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
214{
215 unsigned int bus = (ino & 0x10) >> 4;
216 unsigned int slot = (ino & 0x0c) >> 2;
217
218 if (bus == 0)
219 return PSYCHO_IMAP_A_SLOT0 + (slot * 8);
220 else
221 return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
222}
223
224#define PSYCHO_IMAP_SCSI 0x1000UL
225#define PSYCHO_IMAP_ETH 0x1008UL
226#define PSYCHO_IMAP_BPP 0x1010UL
227#define PSYCHO_IMAP_AU_REC 0x1018UL
228#define PSYCHO_IMAP_AU_PLAY 0x1020UL
229#define PSYCHO_IMAP_PFAIL 0x1028UL
230#define PSYCHO_IMAP_KMS 0x1030UL
231#define PSYCHO_IMAP_FLPY 0x1038UL
232#define PSYCHO_IMAP_SHW 0x1040UL
233#define PSYCHO_IMAP_KBD 0x1048UL
234#define PSYCHO_IMAP_MS 0x1050UL
235#define PSYCHO_IMAP_SER 0x1058UL
236#define PSYCHO_IMAP_TIM0 0x1060UL
237#define PSYCHO_IMAP_TIM1 0x1068UL
238#define PSYCHO_IMAP_UE 0x1070UL
239#define PSYCHO_IMAP_CE 0x1078UL
240#define PSYCHO_IMAP_A_ERR 0x1080UL
241#define PSYCHO_IMAP_B_ERR 0x1088UL
242#define PSYCHO_IMAP_PMGMT 0x1090UL
243#define PSYCHO_IMAP_GFX 0x1098UL
244#define PSYCHO_IMAP_EUPA 0x10a0UL
245
246static unsigned long __onboard_imap_off[] = {
247/*0x20*/ PSYCHO_IMAP_SCSI,
248/*0x21*/ PSYCHO_IMAP_ETH,
249/*0x22*/ PSYCHO_IMAP_BPP,
250/*0x23*/ PSYCHO_IMAP_AU_REC,
251/*0x24*/ PSYCHO_IMAP_AU_PLAY,
252/*0x25*/ PSYCHO_IMAP_PFAIL,
253/*0x26*/ PSYCHO_IMAP_KMS,
254/*0x27*/ PSYCHO_IMAP_FLPY,
255/*0x28*/ PSYCHO_IMAP_SHW,
256/*0x29*/ PSYCHO_IMAP_KBD,
257/*0x2a*/ PSYCHO_IMAP_MS,
258/*0x2b*/ PSYCHO_IMAP_SER,
259/*0x2c*/ PSYCHO_IMAP_TIM0,
260/*0x2d*/ PSYCHO_IMAP_TIM1,
261/*0x2e*/ PSYCHO_IMAP_UE,
262/*0x2f*/ PSYCHO_IMAP_CE,
263/*0x30*/ PSYCHO_IMAP_A_ERR,
264/*0x31*/ PSYCHO_IMAP_B_ERR,
265/*0x32*/ PSYCHO_IMAP_PMGMT
266};
267#define PSYCHO_ONBOARD_IRQ_BASE 0x20
268#define PSYCHO_ONBOARD_IRQ_LAST 0x32
269#define psycho_onboard_imap_offset(__ino) \
270 __onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE]
271
272#define PSYCHO_ICLR_A_SLOT0 0x1400UL
273#define PSYCHO_ICLR_SCSI 0x1800UL
274
275#define psycho_iclr_offset(ino) \
276 ((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
277 (PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
278
279/* PCI PSYCHO INO number to Sparc PIL level. */
280static unsigned char psycho_pil_table[] = {
281/*0x00*/0, 0, 0, 0, /* PCI A slot 0 Int A, B, C, D */
282/*0x04*/0, 0, 0, 0, /* PCI A slot 1 Int A, B, C, D */
283/*0x08*/0, 0, 0, 0, /* PCI A slot 2 Int A, B, C, D */
284/*0x0c*/0, 0, 0, 0, /* PCI A slot 3 Int A, B, C, D */
285/*0x10*/0, 0, 0, 0, /* PCI B slot 0 Int A, B, C, D */
286/*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */
287/*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */
288/*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */
289/*0x20*/4, /* SCSI */
290/*0x21*/5, /* Ethernet */
291/*0x22*/8, /* Parallel Port */
292/*0x23*/13, /* Audio Record */
293/*0x24*/14, /* Audio Playback */
294/*0x25*/15, /* PowerFail */
295/*0x26*/4, /* second SCSI */
296/*0x27*/11, /* Floppy */
297/*0x28*/4, /* Spare Hardware */
298/*0x29*/9, /* Keyboard */
299/*0x2a*/4, /* Mouse */
300/*0x2b*/12, /* Serial */
301/*0x2c*/10, /* Timer 0 */
302/*0x2d*/11, /* Timer 1 */
303/*0x2e*/15, /* Uncorrectable ECC */
304/*0x2f*/15, /* Correctable ECC */
305/*0x30*/15, /* PCI Bus A Error */
306/*0x31*/15, /* PCI Bus B Error */
307/*0x32*/15, /* Power Management */
308};
309
310static int __init psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
311{
312 int ret;
313
314 ret = psycho_pil_table[ino];
315 if (ret == 0 && pdev == NULL) {
316 ret = 4;
317 } else if (ret == 0) {
318 switch ((pdev->class >> 16) & 0xff) {
319 case PCI_BASE_CLASS_STORAGE:
320 ret = 4;
321 break;
322
323 case PCI_BASE_CLASS_NETWORK:
324 ret = 6;
325 break;
326
327 case PCI_BASE_CLASS_DISPLAY:
328 ret = 9;
329 break;
330
331 case PCI_BASE_CLASS_MULTIMEDIA:
332 case PCI_BASE_CLASS_MEMORY:
333 case PCI_BASE_CLASS_BRIDGE:
334 case PCI_BASE_CLASS_SERIAL:
335 ret = 10;
336 break;
337
338 default:
339 ret = 4;
340 break;
341 };
342 }
343
344 return ret;
345}
346
347static unsigned int __init psycho_irq_build(struct pci_pbm_info *pbm,
348 struct pci_dev *pdev,
349 unsigned int ino)
350{
351 struct ino_bucket *bucket;
352 unsigned long imap, iclr;
353 unsigned long imap_off, iclr_off;
354 int pil, inofixup = 0;
355
356 ino &= PCI_IRQ_INO;
357 if (ino < PSYCHO_ONBOARD_IRQ_BASE) {
358 /* PCI slot */
359 imap_off = psycho_pcislot_imap_offset(ino);
360 } else {
361 /* Onboard device */
362 if (ino > PSYCHO_ONBOARD_IRQ_LAST) {
363 prom_printf("psycho_irq_build: Wacky INO [%x]\n", ino);
364 prom_halt();
365 }
366 imap_off = psycho_onboard_imap_offset(ino);
367 }
368
369 /* Now build the IRQ bucket. */
370 pil = psycho_ino_to_pil(pdev, ino);
371
372 if (PIL_RESERVED(pil))
373 BUG();
374
375 imap = pbm->controller_regs + imap_off;
376 imap += 4;
377
378 iclr_off = psycho_iclr_offset(ino);
379 iclr = pbm->controller_regs + iclr_off;
380 iclr += 4;
381
382 if ((ino & 0x20) == 0)
383 inofixup = ino & 0x03;
384
385 bucket = __bucket(build_irq(pil, inofixup, iclr, imap));
386 bucket->flags |= IBF_PCI;
387
388 return __irq(bucket);
389}
390
391/* PSYCHO error handling support. */
392enum psycho_error_type {
393 UE_ERR, CE_ERR, PCI_ERR
394};
395
396/* Helper function of IOMMU error checking, which checks out
397 * the state of the streaming buffers. The IOMMU lock is
398 * held when this is called.
399 *
400 * For the PCI error case we know which PBM (and thus which
401 * streaming buffer) caused the error, but for the uncorrectable
402 * error case we do not. So we always check both streaming caches.
403 */
404#define PSYCHO_STRBUF_CONTROL_A 0x2800UL
405#define PSYCHO_STRBUF_CONTROL_B 0x4800UL
406#define PSYCHO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
407#define PSYCHO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
408#define PSYCHO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
409#define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
410#define PSYCHO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
411#define PSYCHO_STRBUF_FLUSH_A 0x2808UL
412#define PSYCHO_STRBUF_FLUSH_B 0x4808UL
413#define PSYCHO_STRBUF_FSYNC_A 0x2810UL
414#define PSYCHO_STRBUF_FSYNC_B 0x4810UL
415#define PSYCHO_STC_DATA_A 0xb000UL
416#define PSYCHO_STC_DATA_B 0xc000UL
417#define PSYCHO_STC_ERR_A 0xb400UL
418#define PSYCHO_STC_ERR_B 0xc400UL
419#define PSYCHO_STCERR_WRITE 0x0000000000000002UL /* Write Error */
420#define PSYCHO_STCERR_READ 0x0000000000000001UL /* Read Error */
421#define PSYCHO_STC_TAG_A 0xb800UL
422#define PSYCHO_STC_TAG_B 0xc800UL
423#define PSYCHO_STCTAG_PPN 0x0fffffff00000000UL /* Physical Page Number */
424#define PSYCHO_STCTAG_VPN 0x00000000ffffe000UL /* Virtual Page Number */
425#define PSYCHO_STCTAG_VALID 0x0000000000000002UL /* Valid */
426#define PSYCHO_STCTAG_WRITE 0x0000000000000001UL /* Writable */
427#define PSYCHO_STC_LINE_A 0xb900UL
428#define PSYCHO_STC_LINE_B 0xc900UL
429#define PSYCHO_STCLINE_LINDX 0x0000000001e00000UL /* LRU Index */
430#define PSYCHO_STCLINE_SPTR 0x00000000001f8000UL /* Dirty Data Start Pointer */
431#define PSYCHO_STCLINE_LADDR 0x0000000000007f00UL /* Line Address */
432#define PSYCHO_STCLINE_EPTR 0x00000000000000fcUL /* Dirty Data End Pointer */
433#define PSYCHO_STCLINE_VALID 0x0000000000000002UL /* Valid */
434#define PSYCHO_STCLINE_FOFN 0x0000000000000001UL /* Fetch Outstanding / Flush Necessary */
435
436static DEFINE_SPINLOCK(stc_buf_lock);
437static unsigned long stc_error_buf[128];
438static unsigned long stc_tag_buf[16];
439static unsigned long stc_line_buf[16];
440
441static void __psycho_check_one_stc(struct pci_controller_info *p,
442 struct pci_pbm_info *pbm,
443 int is_pbm_a)
444{
445 struct pci_strbuf *strbuf = &pbm->stc;
446 unsigned long regbase = p->pbm_A.controller_regs;
447 unsigned long err_base, tag_base, line_base;
448 u64 control;
449 int i;
450
451 if (is_pbm_a) {
452 err_base = regbase + PSYCHO_STC_ERR_A;
453 tag_base = regbase + PSYCHO_STC_TAG_A;
454 line_base = regbase + PSYCHO_STC_LINE_A;
455 } else {
456 err_base = regbase + PSYCHO_STC_ERR_B;
457 tag_base = regbase + PSYCHO_STC_TAG_B;
458 line_base = regbase + PSYCHO_STC_LINE_B;
459 }
460
461 spin_lock(&stc_buf_lock);
462
463 /* This is __REALLY__ dangerous. When we put the
464 * streaming buffer into diagnostic mode to probe
465 * it's tags and error status, we _must_ clear all
466 * of the line tag valid bits before re-enabling
467 * the streaming buffer. If any dirty data lives
468 * in the STC when we do this, we will end up
469 * invalidating it before it has a chance to reach
470 * main memory.
471 */
472 control = psycho_read(strbuf->strbuf_control);
473 psycho_write(strbuf->strbuf_control,
474 (control | PSYCHO_STRBUF_CTRL_DENAB));
475 for (i = 0; i < 128; i++) {
476 unsigned long val;
477
478 val = psycho_read(err_base + (i * 8UL));
479 psycho_write(err_base + (i * 8UL), 0UL);
480 stc_error_buf[i] = val;
481 }
482 for (i = 0; i < 16; i++) {
483 stc_tag_buf[i] = psycho_read(tag_base + (i * 8UL));
484 stc_line_buf[i] = psycho_read(line_base + (i * 8UL));
485 psycho_write(tag_base + (i * 8UL), 0UL);
486 psycho_write(line_base + (i * 8UL), 0UL);
487 }
488
489 /* OK, state is logged, exit diagnostic mode. */
490 psycho_write(strbuf->strbuf_control, control);
491
492 for (i = 0; i < 16; i++) {
493 int j, saw_error, first, last;
494
495 saw_error = 0;
496 first = i * 8;
497 last = first + 8;
498 for (j = first; j < last; j++) {
499 unsigned long errval = stc_error_buf[j];
500 if (errval != 0) {
501 saw_error++;
502 printk("PSYCHO%d(PBM%c): STC_ERR(%d)[wr(%d)rd(%d)]\n",
503 p->index,
504 (is_pbm_a ? 'A' : 'B'),
505 j,
506 (errval & PSYCHO_STCERR_WRITE) ? 1 : 0,
507 (errval & PSYCHO_STCERR_READ) ? 1 : 0);
508 }
509 }
510 if (saw_error != 0) {
511 unsigned long tagval = stc_tag_buf[i];
512 unsigned long lineval = stc_line_buf[i];
513 printk("PSYCHO%d(PBM%c): STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)W(%d)]\n",
514 p->index,
515 (is_pbm_a ? 'A' : 'B'),
516 i,
517 ((tagval & PSYCHO_STCTAG_PPN) >> 19UL),
518 (tagval & PSYCHO_STCTAG_VPN),
519 ((tagval & PSYCHO_STCTAG_VALID) ? 1 : 0),
520 ((tagval & PSYCHO_STCTAG_WRITE) ? 1 : 0));
521 printk("PSYCHO%d(PBM%c): STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
522 "V(%d)FOFN(%d)]\n",
523 p->index,
524 (is_pbm_a ? 'A' : 'B'),
525 i,
526 ((lineval & PSYCHO_STCLINE_LINDX) >> 21UL),
527 ((lineval & PSYCHO_STCLINE_SPTR) >> 15UL),
528 ((lineval & PSYCHO_STCLINE_LADDR) >> 8UL),
529 ((lineval & PSYCHO_STCLINE_EPTR) >> 2UL),
530 ((lineval & PSYCHO_STCLINE_VALID) ? 1 : 0),
531 ((lineval & PSYCHO_STCLINE_FOFN) ? 1 : 0));
532 }
533 }
534
535 spin_unlock(&stc_buf_lock);
536}
537
538static void __psycho_check_stc_error(struct pci_controller_info *p,
539 unsigned long afsr,
540 unsigned long afar,
541 enum psycho_error_type type)
542{
543 struct pci_pbm_info *pbm;
544
545 pbm = &p->pbm_A;
546 if (pbm->stc.strbuf_enabled)
547 __psycho_check_one_stc(p, pbm, 1);
548
549 pbm = &p->pbm_B;
550 if (pbm->stc.strbuf_enabled)
551 __psycho_check_one_stc(p, pbm, 0);
552}
553
554/* When an Uncorrectable Error or a PCI Error happens, we
555 * interrogate the IOMMU state to see if it is the cause.
556 */
557#define PSYCHO_IOMMU_CONTROL 0x0200UL
558#define PSYCHO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */
559#define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */
560#define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */
561#define PSYCHO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */
562#define PSYCHO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */
563#define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
564#define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */
565#define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */
566#define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */
567#define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */
568#define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */
569#define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */
570#define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */
571#define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */
572#define PSYCHO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */
573#define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
574#define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */
575#define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
576#define PSYCHO_IOMMU_TSBBASE 0x0208UL
577#define PSYCHO_IOMMU_FLUSH 0x0210UL
578#define PSYCHO_IOMMU_TAG 0xa580UL
579#define PSYCHO_IOMMU_TAG_ERRSTS (0x3UL << 23UL)
580#define PSYCHO_IOMMU_TAG_ERR (0x1UL << 22UL)
581#define PSYCHO_IOMMU_TAG_WRITE (0x1UL << 21UL)
582#define PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL)
583#define PSYCHO_IOMMU_TAG_SIZE (0x1UL << 19UL)
584#define PSYCHO_IOMMU_TAG_VPAGE 0x7ffffUL
585#define PSYCHO_IOMMU_DATA 0xa600UL
586#define PSYCHO_IOMMU_DATA_VALID (1UL << 30UL)
587#define PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL)
588#define PSYCHO_IOMMU_DATA_PPAGE 0xfffffffUL
589static void psycho_check_iommu_error(struct pci_controller_info *p,
590 unsigned long afsr,
591 unsigned long afar,
592 enum psycho_error_type type)
593{
594 struct pci_iommu *iommu = p->pbm_A.iommu;
595 unsigned long iommu_tag[16];
596 unsigned long iommu_data[16];
597 unsigned long flags;
598 u64 control;
599 int i;
600
601 spin_lock_irqsave(&iommu->lock, flags);
602 control = psycho_read(iommu->iommu_control);
603 if (control & PSYCHO_IOMMU_CTRL_XLTEERR) {
604 char *type_string;
605
606 /* Clear the error encountered bit. */
607 control &= ~PSYCHO_IOMMU_CTRL_XLTEERR;
608 psycho_write(iommu->iommu_control, control);
609
610 switch((control & PSYCHO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
611 case 0:
612 type_string = "Protection Error";
613 break;
614 case 1:
615 type_string = "Invalid Error";
616 break;
617 case 2:
618 type_string = "TimeOut Error";
619 break;
620 case 3:
621 default:
622 type_string = "ECC Error";
623 break;
624 };
625 printk("PSYCHO%d: IOMMU Error, type[%s]\n",
626 p->index, type_string);
627
628 /* Put the IOMMU into diagnostic mode and probe
629 * it's TLB for entries with error status.
630 *
631 * It is very possible for another DVMA to occur
632 * while we do this probe, and corrupt the system
633 * further. But we are so screwed at this point
634 * that we are likely to crash hard anyways, so
635 * get as much diagnostic information to the
636 * console as we can.
637 */
638 psycho_write(iommu->iommu_control,
639 control | PSYCHO_IOMMU_CTRL_DENAB);
640 for (i = 0; i < 16; i++) {
641 unsigned long base = p->pbm_A.controller_regs;
642
643 iommu_tag[i] =
644 psycho_read(base + PSYCHO_IOMMU_TAG + (i * 8UL));
645 iommu_data[i] =
646 psycho_read(base + PSYCHO_IOMMU_DATA + (i * 8UL));
647
648 /* Now clear out the entry. */
649 psycho_write(base + PSYCHO_IOMMU_TAG + (i * 8UL), 0);
650 psycho_write(base + PSYCHO_IOMMU_DATA + (i * 8UL), 0);
651 }
652
653 /* Leave diagnostic mode. */
654 psycho_write(iommu->iommu_control, control);
655
656 for (i = 0; i < 16; i++) {
657 unsigned long tag, data;
658
659 tag = iommu_tag[i];
660 if (!(tag & PSYCHO_IOMMU_TAG_ERR))
661 continue;
662
663 data = iommu_data[i];
664 switch((tag & PSYCHO_IOMMU_TAG_ERRSTS) >> 23UL) {
665 case 0:
666 type_string = "Protection Error";
667 break;
668 case 1:
669 type_string = "Invalid Error";
670 break;
671 case 2:
672 type_string = "TimeOut Error";
673 break;
674 case 3:
675 default:
676 type_string = "ECC Error";
677 break;
678 };
679 printk("PSYCHO%d: IOMMU TAG(%d)[error(%s) wr(%d) str(%d) sz(%dK) vpg(%08lx)]\n",
680 p->index, i, type_string,
681 ((tag & PSYCHO_IOMMU_TAG_WRITE) ? 1 : 0),
682 ((tag & PSYCHO_IOMMU_TAG_STREAM) ? 1 : 0),
683 ((tag & PSYCHO_IOMMU_TAG_SIZE) ? 64 : 8),
684 (tag & PSYCHO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
685 printk("PSYCHO%d: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
686 p->index, i,
687 ((data & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0),
688 ((data & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0),
689 (data & PSYCHO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
690 }
691 }
692 __psycho_check_stc_error(p, afsr, afar, type);
693 spin_unlock_irqrestore(&iommu->lock, flags);
694}
695
696/* Uncorrectable Errors. Cause of the error and the address are
697 * recorded in the UE_AFSR and UE_AFAR of PSYCHO. They are errors
698 * relating to UPA interface transactions.
699 */
700#define PSYCHO_UE_AFSR 0x0030UL
701#define PSYCHO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */
702#define PSYCHO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */
703#define PSYCHO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */
704#define PSYCHO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
705#define PSYCHO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */
706#define PSYCHO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/
707#define PSYCHO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
708#define PSYCHO_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
709#define PSYCHO_UEAFSR_DOFF 0x00000000e0000000UL /* Doubleword Offset */
710#define PSYCHO_UEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */
711#define PSYCHO_UEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */
712#define PSYCHO_UEAFSR_RESV2 0x00000000007fffffUL /* Reserved */
713#define PSYCHO_UE_AFAR 0x0038UL
714
715static irqreturn_t psycho_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
716{
717 struct pci_controller_info *p = dev_id;
718 unsigned long afsr_reg = p->pbm_A.controller_regs + PSYCHO_UE_AFSR;
719 unsigned long afar_reg = p->pbm_A.controller_regs + PSYCHO_UE_AFAR;
720 unsigned long afsr, afar, error_bits;
721 int reported;
722
723 /* Latch uncorrectable error status. */
724 afar = psycho_read(afar_reg);
725 afsr = psycho_read(afsr_reg);
726
727 /* Clear the primary/secondary error status bits. */
728 error_bits = afsr &
729 (PSYCHO_UEAFSR_PPIO | PSYCHO_UEAFSR_PDRD | PSYCHO_UEAFSR_PDWR |
730 PSYCHO_UEAFSR_SPIO | PSYCHO_UEAFSR_SDRD | PSYCHO_UEAFSR_SDWR);
731 if (!error_bits)
732 return IRQ_NONE;
733 psycho_write(afsr_reg, error_bits);
734
735 /* Log the error. */
736 printk("PSYCHO%d: Uncorrectable Error, primary error type[%s]\n",
737 p->index,
738 (((error_bits & PSYCHO_UEAFSR_PPIO) ?
739 "PIO" :
740 ((error_bits & PSYCHO_UEAFSR_PDRD) ?
741 "DMA Read" :
742 ((error_bits & PSYCHO_UEAFSR_PDWR) ?
743 "DMA Write" : "???")))));
744 printk("PSYCHO%d: bytemask[%04lx] dword_offset[%lx] UPA_MID[%02lx] was_block(%d)\n",
745 p->index,
746 (afsr & PSYCHO_UEAFSR_BMSK) >> 32UL,
747 (afsr & PSYCHO_UEAFSR_DOFF) >> 29UL,
748 (afsr & PSYCHO_UEAFSR_MID) >> 24UL,
749 ((afsr & PSYCHO_UEAFSR_BLK) ? 1 : 0));
750 printk("PSYCHO%d: UE AFAR [%016lx]\n", p->index, afar);
751 printk("PSYCHO%d: UE Secondary errors [", p->index);
752 reported = 0;
753 if (afsr & PSYCHO_UEAFSR_SPIO) {
754 reported++;
755 printk("(PIO)");
756 }
757 if (afsr & PSYCHO_UEAFSR_SDRD) {
758 reported++;
759 printk("(DMA Read)");
760 }
761 if (afsr & PSYCHO_UEAFSR_SDWR) {
762 reported++;
763 printk("(DMA Write)");
764 }
765 if (!reported)
766 printk("(none)");
767 printk("]\n");
768
769 /* Interrogate IOMMU for error status. */
770 psycho_check_iommu_error(p, afsr, afar, UE_ERR);
771
772 return IRQ_HANDLED;
773}
774
775/* Correctable Errors. */
776#define PSYCHO_CE_AFSR 0x0040UL
777#define PSYCHO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */
778#define PSYCHO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */
779#define PSYCHO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */
780#define PSYCHO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
781#define PSYCHO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */
782#define PSYCHO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/
783#define PSYCHO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
784#define PSYCHO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
785#define PSYCHO_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
786#define PSYCHO_CEAFSR_DOFF 0x00000000e0000000UL /* Double Offset */
787#define PSYCHO_CEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */
788#define PSYCHO_CEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */
789#define PSYCHO_CEAFSR_RESV2 0x00000000007fffffUL /* Reserved */
790#define PSYCHO_CE_AFAR 0x0040UL
791
792static irqreturn_t psycho_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
793{
794 struct pci_controller_info *p = dev_id;
795 unsigned long afsr_reg = p->pbm_A.controller_regs + PSYCHO_CE_AFSR;
796 unsigned long afar_reg = p->pbm_A.controller_regs + PSYCHO_CE_AFAR;
797 unsigned long afsr, afar, error_bits;
798 int reported;
799
800 /* Latch error status. */
801 afar = psycho_read(afar_reg);
802 afsr = psycho_read(afsr_reg);
803
804 /* Clear primary/secondary error status bits. */
805 error_bits = afsr &
806 (PSYCHO_CEAFSR_PPIO | PSYCHO_CEAFSR_PDRD | PSYCHO_CEAFSR_PDWR |
807 PSYCHO_CEAFSR_SPIO | PSYCHO_CEAFSR_SDRD | PSYCHO_CEAFSR_SDWR);
808 if (!error_bits)
809 return IRQ_NONE;
810 psycho_write(afsr_reg, error_bits);
811
812 /* Log the error. */
813 printk("PSYCHO%d: Correctable Error, primary error type[%s]\n",
814 p->index,
815 (((error_bits & PSYCHO_CEAFSR_PPIO) ?
816 "PIO" :
817 ((error_bits & PSYCHO_CEAFSR_PDRD) ?
818 "DMA Read" :
819 ((error_bits & PSYCHO_CEAFSR_PDWR) ?
820 "DMA Write" : "???")))));
821
822 /* XXX Use syndrome and afar to print out module string just like
823 * XXX UDB CE trap handler does... -DaveM
824 */
825 printk("PSYCHO%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
826 "UPA_MID[%02lx] was_block(%d)\n",
827 p->index,
828 (afsr & PSYCHO_CEAFSR_ESYND) >> 48UL,
829 (afsr & PSYCHO_CEAFSR_BMSK) >> 32UL,
830 (afsr & PSYCHO_CEAFSR_DOFF) >> 29UL,
831 (afsr & PSYCHO_CEAFSR_MID) >> 24UL,
832 ((afsr & PSYCHO_CEAFSR_BLK) ? 1 : 0));
833 printk("PSYCHO%d: CE AFAR [%016lx]\n", p->index, afar);
834 printk("PSYCHO%d: CE Secondary errors [", p->index);
835 reported = 0;
836 if (afsr & PSYCHO_CEAFSR_SPIO) {
837 reported++;
838 printk("(PIO)");
839 }
840 if (afsr & PSYCHO_CEAFSR_SDRD) {
841 reported++;
842 printk("(DMA Read)");
843 }
844 if (afsr & PSYCHO_CEAFSR_SDWR) {
845 reported++;
846 printk("(DMA Write)");
847 }
848 if (!reported)
849 printk("(none)");
850 printk("]\n");
851
852 return IRQ_HANDLED;
853}
854
855/* PCI Errors. They are signalled by the PCI bus module since they
856 * are associated with a specific bus segment.
857 */
858#define PSYCHO_PCI_AFSR_A 0x2010UL
859#define PSYCHO_PCI_AFSR_B 0x4010UL
860#define PSYCHO_PCIAFSR_PMA 0x8000000000000000UL /* Primary Master Abort Error */
861#define PSYCHO_PCIAFSR_PTA 0x4000000000000000UL /* Primary Target Abort Error */
862#define PSYCHO_PCIAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */
863#define PSYCHO_PCIAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */
864#define PSYCHO_PCIAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort Error */
865#define PSYCHO_PCIAFSR_STA 0x0400000000000000UL /* Secondary Target Abort Error */
866#define PSYCHO_PCIAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */
867#define PSYCHO_PCIAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */
868#define PSYCHO_PCIAFSR_RESV1 0x00ff000000000000UL /* Reserved */
869#define PSYCHO_PCIAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
870#define PSYCHO_PCIAFSR_BLK 0x0000000080000000UL /* Trans was block operation */
871#define PSYCHO_PCIAFSR_RESV2 0x0000000040000000UL /* Reserved */
872#define PSYCHO_PCIAFSR_MID 0x000000003e000000UL /* MID causing the error */
873#define PSYCHO_PCIAFSR_RESV3 0x0000000001ffffffUL /* Reserved */
874#define PSYCHO_PCI_AFAR_A 0x2018UL
875#define PSYCHO_PCI_AFAR_B 0x4018UL
876
877static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm_a)
878{
879 unsigned long csr_reg, csr, csr_error_bits;
880 irqreturn_t ret = IRQ_NONE;
881 u16 stat;
882
883 if (is_pbm_a) {
884 csr_reg = pbm->controller_regs + PSYCHO_PCIA_CTRL;
885 } else {
886 csr_reg = pbm->controller_regs + PSYCHO_PCIB_CTRL;
887 }
888 csr = psycho_read(csr_reg);
889 csr_error_bits =
890 csr & (PSYCHO_PCICTRL_SBH_ERR | PSYCHO_PCICTRL_SERR);
891 if (csr_error_bits) {
892 /* Clear the errors. */
893 psycho_write(csr_reg, csr);
894
895 /* Log 'em. */
896 if (csr_error_bits & PSYCHO_PCICTRL_SBH_ERR)
897 printk("%s: PCI streaming byte hole error asserted.\n",
898 pbm->name);
899 if (csr_error_bits & PSYCHO_PCICTRL_SERR)
900 printk("%s: PCI SERR signal asserted.\n", pbm->name);
901 ret = IRQ_HANDLED;
902 }
903 pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
904 if (stat & (PCI_STATUS_PARITY |
905 PCI_STATUS_SIG_TARGET_ABORT |
906 PCI_STATUS_REC_TARGET_ABORT |
907 PCI_STATUS_REC_MASTER_ABORT |
908 PCI_STATUS_SIG_SYSTEM_ERROR)) {
909 printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
910 pbm->name, stat);
911 pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
912 ret = IRQ_HANDLED;
913 }
914 return ret;
915}
916
917static irqreturn_t psycho_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
918{
919 struct pci_pbm_info *pbm = dev_id;
920 struct pci_controller_info *p = pbm->parent;
921 unsigned long afsr_reg, afar_reg;
922 unsigned long afsr, afar, error_bits;
923 int is_pbm_a, reported;
924
925 is_pbm_a = (pbm == &pbm->parent->pbm_A);
926 if (is_pbm_a) {
927 afsr_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFSR_A;
928 afar_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFAR_A;
929 } else {
930 afsr_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFSR_B;
931 afar_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFAR_B;
932 }
933
934 /* Latch error status. */
935 afar = psycho_read(afar_reg);
936 afsr = psycho_read(afsr_reg);
937
938 /* Clear primary/secondary error status bits. */
939 error_bits = afsr &
940 (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_PTA |
941 PSYCHO_PCIAFSR_PRTRY | PSYCHO_PCIAFSR_PPERR |
942 PSYCHO_PCIAFSR_SMA | PSYCHO_PCIAFSR_STA |
943 PSYCHO_PCIAFSR_SRTRY | PSYCHO_PCIAFSR_SPERR);
944 if (!error_bits)
945 return psycho_pcierr_intr_other(pbm, is_pbm_a);
946 psycho_write(afsr_reg, error_bits);
947
948 /* Log the error. */
949 printk("PSYCHO%d(PBM%c): PCI Error, primary error type[%s]\n",
950 p->index, (is_pbm_a ? 'A' : 'B'),
951 (((error_bits & PSYCHO_PCIAFSR_PMA) ?
952 "Master Abort" :
953 ((error_bits & PSYCHO_PCIAFSR_PTA) ?
954 "Target Abort" :
955 ((error_bits & PSYCHO_PCIAFSR_PRTRY) ?
956 "Excessive Retries" :
957 ((error_bits & PSYCHO_PCIAFSR_PPERR) ?
958 "Parity Error" : "???"))))));
959 printk("PSYCHO%d(PBM%c): bytemask[%04lx] UPA_MID[%02lx] was_block(%d)\n",
960 p->index, (is_pbm_a ? 'A' : 'B'),
961 (afsr & PSYCHO_PCIAFSR_BMSK) >> 32UL,
962 (afsr & PSYCHO_PCIAFSR_MID) >> 25UL,
963 (afsr & PSYCHO_PCIAFSR_BLK) ? 1 : 0);
964 printk("PSYCHO%d(PBM%c): PCI AFAR [%016lx]\n",
965 p->index, (is_pbm_a ? 'A' : 'B'), afar);
966 printk("PSYCHO%d(PBM%c): PCI Secondary errors [",
967 p->index, (is_pbm_a ? 'A' : 'B'));
968 reported = 0;
969 if (afsr & PSYCHO_PCIAFSR_SMA) {
970 reported++;
971 printk("(Master Abort)");
972 }
973 if (afsr & PSYCHO_PCIAFSR_STA) {
974 reported++;
975 printk("(Target Abort)");
976 }
977 if (afsr & PSYCHO_PCIAFSR_SRTRY) {
978 reported++;
979 printk("(Excessive Retries)");
980 }
981 if (afsr & PSYCHO_PCIAFSR_SPERR) {
982 reported++;
983 printk("(Parity Error)");
984 }
985 if (!reported)
986 printk("(none)");
987 printk("]\n");
988
989 /* For the error types shown, scan PBM's PCI bus for devices
990 * which have logged that error type.
991 */
992
993 /* If we see a Target Abort, this could be the result of an
994 * IOMMU translation error of some sort. It is extremely
995 * useful to log this information as usually it indicates
996 * a bug in the IOMMU support code or a PCI device driver.
997 */
998 if (error_bits & (PSYCHO_PCIAFSR_PTA | PSYCHO_PCIAFSR_STA)) {
999 psycho_check_iommu_error(p, afsr, afar, PCI_ERR);
1000 pci_scan_for_target_abort(p, pbm, pbm->pci_bus);
1001 }
1002 if (error_bits & (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_SMA))
1003 pci_scan_for_master_abort(p, pbm, pbm->pci_bus);
1004
1005 /* For excessive retries, PSYCHO/PBM will abort the device
1006 * and there is no way to specifically check for excessive
1007 * retries in the config space status registers. So what
1008 * we hope is that we'll catch it via the master/target
1009 * abort events.
1010 */
1011
1012 if (error_bits & (PSYCHO_PCIAFSR_PPERR | PSYCHO_PCIAFSR_SPERR))
1013 pci_scan_for_parity_error(p, pbm, pbm->pci_bus);
1014
1015 return IRQ_HANDLED;
1016}
1017
1018/* XXX What about PowerFail/PowerManagement??? -DaveM */
1019#define PSYCHO_ECC_CTRL 0x0020
1020#define PSYCHO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
1021#define PSYCHO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
1022#define PSYCHO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
1023#define PSYCHO_UE_INO 0x2e
1024#define PSYCHO_CE_INO 0x2f
1025#define PSYCHO_PCIERR_A_INO 0x30
1026#define PSYCHO_PCIERR_B_INO 0x31
1027static void __init psycho_register_error_handlers(struct pci_controller_info *p)
1028{
1029 struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
1030 unsigned long base = p->pbm_A.controller_regs;
1031 unsigned int irq, portid = pbm->portid;
1032 u64 tmp;
1033
1034 /* Build IRQs and register handlers. */
1035 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_UE_INO);
1036 if (request_irq(irq, psycho_ue_intr,
1037 SA_SHIRQ, "PSYCHO UE", p) < 0) {
1038 prom_printf("PSYCHO%d: Cannot register UE interrupt.\n",
1039 p->index);
1040 prom_halt();
1041 }
1042
1043 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_CE_INO);
1044 if (request_irq(irq, psycho_ce_intr,
1045 SA_SHIRQ, "PSYCHO CE", p) < 0) {
1046 prom_printf("PSYCHO%d: Cannot register CE interrupt.\n",
1047 p->index);
1048 prom_halt();
1049 }
1050
1051 pbm = &p->pbm_A;
1052 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_A_INO);
1053 if (request_irq(irq, psycho_pcierr_intr,
1054 SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_A) < 0) {
1055 prom_printf("PSYCHO%d(PBMA): Cannot register PciERR interrupt.\n",
1056 p->index);
1057 prom_halt();
1058 }
1059
1060 pbm = &p->pbm_B;
1061 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_B_INO);
1062 if (request_irq(irq, psycho_pcierr_intr,
1063 SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_B) < 0) {
1064 prom_printf("PSYCHO%d(PBMB): Cannot register PciERR interrupt.\n",
1065 p->index);
1066 prom_halt();
1067 }
1068
1069 /* Enable UE and CE interrupts for controller. */
1070 psycho_write(base + PSYCHO_ECC_CTRL,
1071 (PSYCHO_ECCCTRL_EE |
1072 PSYCHO_ECCCTRL_UE |
1073 PSYCHO_ECCCTRL_CE));
1074
1075 /* Enable PCI Error interrupts and clear error
1076 * bits for each PBM.
1077 */
1078 tmp = psycho_read(base + PSYCHO_PCIA_CTRL);
1079 tmp |= (PSYCHO_PCICTRL_SERR |
1080 PSYCHO_PCICTRL_SBH_ERR |
1081 PSYCHO_PCICTRL_EEN);
1082 tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
1083 psycho_write(base + PSYCHO_PCIA_CTRL, tmp);
1084
1085 tmp = psycho_read(base + PSYCHO_PCIB_CTRL);
1086 tmp |= (PSYCHO_PCICTRL_SERR |
1087 PSYCHO_PCICTRL_SBH_ERR |
1088 PSYCHO_PCICTRL_EEN);
1089 tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
1090 psycho_write(base + PSYCHO_PCIB_CTRL, tmp);
1091}
1092
1093/* PSYCHO boot time probing and initialization. */
1094static void __init psycho_resource_adjust(struct pci_dev *pdev,
1095 struct resource *res,
1096 struct resource *root)
1097{
1098 res->start += root->start;
1099 res->end += root->start;
1100}
1101
1102static void __init psycho_base_address_update(struct pci_dev *pdev, int resource)
1103{
1104 struct pcidev_cookie *pcp = pdev->sysdata;
1105 struct pci_pbm_info *pbm = pcp->pbm;
1106 struct resource *res, *root;
1107 u32 reg;
1108 int where, size, is_64bit;
1109
1110 res = &pdev->resource[resource];
1111 if (resource < 6) {
1112 where = PCI_BASE_ADDRESS_0 + (resource * 4);
1113 } else if (resource == PCI_ROM_RESOURCE) {
1114 where = pdev->rom_base_reg;
1115 } else {
1116 /* Somebody might have asked allocation of a non-standard resource */
1117 return;
1118 }
1119
1120 is_64bit = 0;
1121 if (res->flags & IORESOURCE_IO)
1122 root = &pbm->io_space;
1123 else {
1124 root = &pbm->mem_space;
1125 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
1126 == PCI_BASE_ADDRESS_MEM_TYPE_64)
1127 is_64bit = 1;
1128 }
1129
1130 size = res->end - res->start;
1131 pci_read_config_dword(pdev, where, &reg);
1132 reg = ((reg & size) |
1133 (((u32)(res->start - root->start)) & ~size));
1134 if (resource == PCI_ROM_RESOURCE) {
1135 reg |= PCI_ROM_ADDRESS_ENABLE;
1136 res->flags |= IORESOURCE_ROM_ENABLE;
1137 }
1138 pci_write_config_dword(pdev, where, reg);
1139
1140 /* This knows that the upper 32-bits of the address
1141 * must be zero. Our PCI common layer enforces this.
1142 */
1143 if (is_64bit)
1144 pci_write_config_dword(pdev, where + 4, 0);
1145}
1146
1147static void __init pbm_config_busmastering(struct pci_pbm_info *pbm)
1148{
1149 u8 *addr;
1150
1151 /* Set cache-line size to 64 bytes, this is actually
1152 * a nop but I do it for completeness.
1153 */
1154 addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
1155 0, PCI_CACHE_LINE_SIZE);
1156 pci_config_write8(addr, 64 / sizeof(u32));
1157
1158 /* Set PBM latency timer to 64 PCI clocks. */
1159 addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
1160 0, PCI_LATENCY_TIMER);
1161 pci_config_write8(addr, 64);
1162}
1163
1164static void __init pbm_scan_bus(struct pci_controller_info *p,
1165 struct pci_pbm_info *pbm)
1166{
1167 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
1168
1169 if (!cookie) {
1170 prom_printf("PSYCHO: Critical allocation failure.\n");
1171 prom_halt();
1172 }
1173
1174 /* All we care about is the PBM. */
1175 memset(cookie, 0, sizeof(*cookie));
1176 cookie->pbm = pbm;
1177
1178 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
1179 p->pci_ops,
1180 pbm);
1181 pci_fixup_host_bridge_self(pbm->pci_bus);
1182 pbm->pci_bus->self->sysdata = cookie;
1183
1184 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
1185 pci_record_assignments(pbm, pbm->pci_bus);
1186 pci_assign_unassigned(pbm, pbm->pci_bus);
1187 pci_fixup_irq(pbm, pbm->pci_bus);
1188 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
1189 pci_setup_busmastering(pbm, pbm->pci_bus);
1190}
1191
1192static void __init psycho_scan_bus(struct pci_controller_info *p)
1193{
1194 pbm_config_busmastering(&p->pbm_B);
1195 p->pbm_B.is_66mhz_capable = 0;
1196 pbm_config_busmastering(&p->pbm_A);
1197 p->pbm_A.is_66mhz_capable = 1;
1198 pbm_scan_bus(p, &p->pbm_B);
1199 pbm_scan_bus(p, &p->pbm_A);
1200
1201 /* After the PCI bus scan is complete, we can register
1202 * the error interrupt handlers.
1203 */
1204 psycho_register_error_handlers(p);
1205}
1206
1207static void __init psycho_iommu_init(struct pci_controller_info *p)
1208{
1209 struct pci_iommu *iommu = p->pbm_A.iommu;
1210 unsigned long tsbbase, i;
1211 u64 control;
1212
1213 /* Setup initial software IOMMU state. */
1214 spin_lock_init(&iommu->lock);
1215 iommu->iommu_cur_ctx = 0;
1216
1217 /* Register addresses. */
1218 iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
1219 iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE;
1220 iommu->iommu_flush = p->pbm_A.controller_regs + PSYCHO_IOMMU_FLUSH;
1221 /* PSYCHO's IOMMU lacks ctx flushing. */
1222 iommu->iommu_ctxflush = 0;
1223
1224 /* We use the main control register of PSYCHO as the write
1225 * completion register.
1226 */
1227 iommu->write_complete_reg = p->pbm_A.controller_regs + PSYCHO_CONTROL;
1228
1229 /*
1230 * Invalidate TLB Entries.
1231 */
1232 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
1233 control |= PSYCHO_IOMMU_CTRL_DENAB;
1234 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
1235 for(i = 0; i < 16; i++) {
1236 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TAG + (i * 8UL), 0);
1237 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_DATA + (i * 8UL), 0);
1238 }
1239
1240 /* Leave diag mode enabled for full-flushing done
1241 * in pci_iommu.c
1242 */
1243
1244 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
1245 if (!iommu->dummy_page) {
1246 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1247 prom_halt();
1248 }
1249 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1250 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1251
1252 /* Using assumed page size 8K with 128K entries we need 1MB iommu page
1253 * table (128K ioptes * 8 bytes per iopte). This is
1254 * page order 7 on UltraSparc.
1255 */
1256 tsbbase = __get_free_pages(GFP_KERNEL, get_order(IO_TSB_SIZE));
1257 if (!tsbbase) {
1258 prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n");
1259 prom_halt();
1260 }
1261 iommu->page_table = (iopte_t *)tsbbase;
1262 iommu->page_table_sz_bits = 17;
1263 iommu->page_table_map_base = 0xc0000000;
1264 iommu->dma_addr_mask = 0xffffffff;
1265 pci_iommu_table_init(iommu, IO_TSB_SIZE);
1266
1267 /* We start with no consistent mappings. */
1268 iommu->lowest_consistent_map =
1269 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1270
1271 for (i = 0; i < PBM_NCLUSTERS; i++) {
1272 iommu->alloc_info[i].flush = 0;
1273 iommu->alloc_info[i].next = 0;
1274 }
1275
1276 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase));
1277
1278 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
1279 control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
1280 control |= (PSYCHO_IOMMU_TSBSZ_128K | PSYCHO_IOMMU_CTRL_ENAB);
1281 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
1282
1283 /* If necessary, hook us up for starfire IRQ translations. */
1284 if(this_is_starfire)
1285 p->starfire_cookie = starfire_hookup(p->pbm_A.portid);
1286 else
1287 p->starfire_cookie = NULL;
1288}
1289
1290#define PSYCHO_IRQ_RETRY 0x1a00UL
1291#define PSYCHO_PCIA_DIAG 0x2020UL
1292#define PSYCHO_PCIB_DIAG 0x4020UL
1293#define PSYCHO_PCIDIAG_RESV 0xffffffffffffff80UL /* Reserved */
1294#define PSYCHO_PCIDIAG_DRETRY 0x0000000000000040UL /* Disable retry limit */
1295#define PSYCHO_PCIDIAG_DISYNC 0x0000000000000020UL /* Disable DMA wr / irq sync */
1296#define PSYCHO_PCIDIAG_DDWSYNC 0x0000000000000010UL /* Disable DMA wr / PIO rd sync */
1297#define PSYCHO_PCIDIAG_IDDPAR 0x0000000000000008UL /* Invert DMA data parity */
1298#define PSYCHO_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO data parity */
1299#define PSYCHO_PCIDIAG_IPAPAR 0x0000000000000002UL /* Invert PIO address parity */
1300#define PSYCHO_PCIDIAG_LPBACK 0x0000000000000001UL /* Enable loopback mode */
1301
1302static void psycho_controller_hwinit(struct pci_controller_info *p)
1303{
1304 u64 tmp;
1305
1306 /* PROM sets the IRQ retry value too low, increase it. */
1307 psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 0xff);
1308
1309 /* Enable arbiter for all PCI slots. */
1310 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL);
1311 tmp |= PSYCHO_PCICTRL_AEN;
1312 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL, tmp);
1313
1314 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIB_CTRL);
1315 tmp |= PSYCHO_PCICTRL_AEN;
1316 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_CTRL, tmp);
1317
1318 /* Disable DMA write / PIO read synchronization on
1319 * both PCI bus segments.
1320 * [ U2P Erratum 1243770, STP2223BGA data sheet ]
1321 */
1322 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_DIAG);
1323 tmp |= PSYCHO_PCIDIAG_DDWSYNC;
1324 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIA_DIAG, tmp);
1325
1326 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG);
1327 tmp |= PSYCHO_PCIDIAG_DDWSYNC;
1328 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG, tmp);
1329}
1330
1331static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
1332 struct pci_pbm_info *pbm)
1333{
1334 char *name = pbm->name;
1335
1336 sprintf(name, "PSYCHO%d PBM%c",
1337 p->index,
1338 (pbm == &p->pbm_A ? 'A' : 'B'));
1339 pbm->io_space.name = pbm->mem_space.name = name;
1340
1341 request_resource(&ioport_resource, &pbm->io_space);
1342 request_resource(&iomem_resource, &pbm->mem_space);
1343 pci_register_legacy_regions(&pbm->io_space,
1344 &pbm->mem_space);
1345}
1346
1347static void psycho_pbm_strbuf_init(struct pci_controller_info *p,
1348 struct pci_pbm_info *pbm,
1349 int is_pbm_a)
1350{
1351 unsigned long base = pbm->controller_regs;
1352 u64 control;
1353
1354 if (is_pbm_a) {
1355 pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_A;
1356 pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_A;
1357 pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_A;
1358 } else {
1359 pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_B;
1360 pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_B;
1361 pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_B;
1362 }
1363 /* PSYCHO's streaming buffer lacks ctx flushing. */
1364 pbm->stc.strbuf_ctxflush = 0;
1365 pbm->stc.strbuf_ctxmatch_base = 0;
1366
1367 pbm->stc.strbuf_flushflag = (volatile unsigned long *)
1368 ((((unsigned long)&pbm->stc.__flushflag_buf[0])
1369 + 63UL)
1370 & ~63UL);
1371 pbm->stc.strbuf_flushflag_pa = (unsigned long)
1372 __pa(pbm->stc.strbuf_flushflag);
1373
1374 /* Enable the streaming buffer. We have to be careful
1375 * just in case OBP left it with LRU locking enabled.
1376 *
1377 * It is possible to control if PBM will be rerun on
1378 * line misses. Currently I just retain whatever setting
1379 * OBP left us with. All checks so far show it having
1380 * a value of zero.
1381 */
1382#undef PSYCHO_STRBUF_RERUN_ENABLE
1383#undef PSYCHO_STRBUF_RERUN_DISABLE
1384 control = psycho_read(pbm->stc.strbuf_control);
1385 control |= PSYCHO_STRBUF_CTRL_ENAB;
1386 control &= ~(PSYCHO_STRBUF_CTRL_LENAB | PSYCHO_STRBUF_CTRL_LPTR);
1387#ifdef PSYCHO_STRBUF_RERUN_ENABLE
1388 control &= ~(PSYCHO_STRBUF_CTRL_RRDIS);
1389#else
1390#ifdef PSYCHO_STRBUF_RERUN_DISABLE
1391 control |= PSYCHO_STRBUF_CTRL_RRDIS;
1392#endif
1393#endif
1394 psycho_write(pbm->stc.strbuf_control, control);
1395
1396 pbm->stc.strbuf_enabled = 1;
1397}
1398
1399#define PSYCHO_IOSPACE_A 0x002000000UL
1400#define PSYCHO_IOSPACE_B 0x002010000UL
1401#define PSYCHO_IOSPACE_SIZE 0x00000ffffUL
1402#define PSYCHO_MEMSPACE_A 0x100000000UL
1403#define PSYCHO_MEMSPACE_B 0x180000000UL
1404#define PSYCHO_MEMSPACE_SIZE 0x07fffffffUL
1405
1406static void psycho_pbm_init(struct pci_controller_info *p,
1407 int prom_node, int is_pbm_a)
1408{
1409 unsigned int busrange[2];
1410 struct pci_pbm_info *pbm;
1411 int err;
1412
1413 if (is_pbm_a) {
1414 pbm = &p->pbm_A;
1415 pbm->pci_first_slot = 1;
1416 pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_A;
1417 pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_A;
1418 } else {
1419 pbm = &p->pbm_B;
1420 pbm->pci_first_slot = 2;
1421 pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_B;
1422 pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_B;
1423 }
1424
1425 pbm->chip_type = PBM_CHIP_TYPE_PSYCHO;
1426 pbm->chip_version =
1427 prom_getintdefault(prom_node, "version#", 0);
1428 pbm->chip_revision =
1429 prom_getintdefault(prom_node, "module-revision#", 0);
1430
1431 pbm->io_space.end = pbm->io_space.start + PSYCHO_IOSPACE_SIZE;
1432 pbm->io_space.flags = IORESOURCE_IO;
1433 pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE;
1434 pbm->mem_space.flags = IORESOURCE_MEM;
1435 pbm_register_toplevel_resources(p, pbm);
1436
1437 pbm->parent = p;
1438 pbm->prom_node = prom_node;
1439 prom_getstring(prom_node, "name",
1440 pbm->prom_name,
1441 sizeof(pbm->prom_name));
1442
1443 err = prom_getproperty(prom_node, "ranges",
1444 (char *)pbm->pbm_ranges,
1445 sizeof(pbm->pbm_ranges));
1446 if (err != -1)
1447 pbm->num_pbm_ranges =
1448 (err / sizeof(struct linux_prom_pci_ranges));
1449 else
1450 pbm->num_pbm_ranges = 0;
1451
1452 err = prom_getproperty(prom_node, "interrupt-map",
1453 (char *)pbm->pbm_intmap,
1454 sizeof(pbm->pbm_intmap));
1455 if (err != -1) {
1456 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
1457 err = prom_getproperty(prom_node, "interrupt-map-mask",
1458 (char *)&pbm->pbm_intmask,
1459 sizeof(pbm->pbm_intmask));
1460 if (err == -1) {
1461 prom_printf("PSYCHO-PBM: Fatal error, no "
1462 "interrupt-map-mask.\n");
1463 prom_halt();
1464 }
1465 } else {
1466 pbm->num_pbm_intmap = 0;
1467 memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
1468 }
1469
1470 err = prom_getproperty(prom_node, "bus-range",
1471 (char *)&busrange[0],
1472 sizeof(busrange));
1473 if (err == 0 || err == -1) {
1474 prom_printf("PSYCHO-PBM: Fatal error, no bus-range.\n");
1475 prom_halt();
1476 }
1477 pbm->pci_first_busno = busrange[0];
1478 pbm->pci_last_busno = busrange[1];
1479
1480 psycho_pbm_strbuf_init(p, pbm, is_pbm_a);
1481}
1482
1483#define PSYCHO_CONFIGSPACE 0x001000000UL
1484
1485void __init psycho_init(int node, char *model_name)
1486{
1487 struct linux_prom64_registers pr_regs[3];
1488 struct pci_controller_info *p;
1489 struct pci_iommu *iommu;
1490 u32 upa_portid;
1491 int is_pbm_a, err;
1492
1493 upa_portid = prom_getintdefault(node, "upa-portid", 0xff);
1494
1495 for(p = pci_controller_root; p; p = p->next) {
1496 if (p->pbm_A.portid == upa_portid) {
1497 is_pbm_a = (p->pbm_A.prom_node == 0);
1498 psycho_pbm_init(p, node, is_pbm_a);
1499 return;
1500 }
1501 }
1502
1503 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1504 if (!p) {
1505 prom_printf("PSYCHO: Fatal memory allocation error.\n");
1506 prom_halt();
1507 }
1508 memset(p, 0, sizeof(*p));
1509 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1510 if (!iommu) {
1511 prom_printf("PSYCHO: Fatal memory allocation error.\n");
1512 prom_halt();
1513 }
1514 memset(iommu, 0, sizeof(*iommu));
1515 p->pbm_A.iommu = p->pbm_B.iommu = iommu;
1516
1517 p->next = pci_controller_root;
1518 pci_controller_root = p;
1519
1520 p->pbm_A.portid = upa_portid;
1521 p->pbm_B.portid = upa_portid;
1522 p->index = pci_num_controllers++;
1523 p->pbms_same_domain = 0;
1524 p->scan_bus = psycho_scan_bus;
1525 p->irq_build = psycho_irq_build;
1526 p->base_address_update = psycho_base_address_update;
1527 p->resource_adjust = psycho_resource_adjust;
1528 p->pci_ops = &psycho_ops;
1529
1530 err = prom_getproperty(node, "reg",
1531 (char *)&pr_regs[0],
1532 sizeof(pr_regs));
1533 if (err == 0 || err == -1) {
1534 prom_printf("PSYCHO: Fatal error, no reg property.\n");
1535 prom_halt();
1536 }
1537
1538 p->pbm_A.controller_regs = pr_regs[2].phys_addr;
1539 p->pbm_B.controller_regs = pr_regs[2].phys_addr;
1540 printk("PCI: Found PSYCHO, control regs at %016lx\n",
1541 p->pbm_A.controller_regs);
1542
1543 p->pbm_A.config_space = p->pbm_B.config_space =
1544 (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE);
1545 printk("PSYCHO: Shared PCI config space at %016lx\n",
1546 p->pbm_A.config_space);
1547
1548 /*
1549 * Psycho's PCI MEM space is mapped to a 2GB aligned area, so
1550 * we need to adjust our MEM space mask.
1551 */
1552 pci_memspace_mask = 0x7fffffffUL;
1553
1554 psycho_controller_hwinit(p);
1555
1556 psycho_iommu_init(p);
1557
1558 is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000);
1559 psycho_pbm_init(p, node, is_pbm_a);
1560}
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
new file mode 100644
index 000000000000..5525d1ec4af8
--- /dev/null
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -0,0 +1,1702 @@
1/* $Id: pci_sabre.c,v 1.42 2002/01/23 11:27:32 davem Exp $
2 * pci_sabre.c: Sabre specific PCI controller support.
3 *
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
7 */
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/pci.h>
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <linux/interrupt.h>
15
16#include <asm/apb.h>
17#include <asm/pbm.h>
18#include <asm/iommu.h>
19#include <asm/irq.h>
20#include <asm/smp.h>
21#include <asm/oplib.h>
22
23#include "pci_impl.h"
24#include "iommu_common.h"
25
26/* All SABRE registers are 64-bits. The following accessor
27 * routines are how they are accessed. The REG parameter
28 * is a physical address.
29 */
30#define sabre_read(__reg) \
31({ u64 __ret; \
32 __asm__ __volatile__("ldxa [%1] %2, %0" \
33 : "=r" (__ret) \
34 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
35 : "memory"); \
36 __ret; \
37})
38#define sabre_write(__reg, __val) \
39 __asm__ __volatile__("stxa %0, [%1] %2" \
40 : /* no outputs */ \
41 : "r" (__val), "r" (__reg), \
42 "i" (ASI_PHYS_BYPASS_EC_E) \
43 : "memory")
44
45/* SABRE PCI controller register offsets and definitions. */
46#define SABRE_UE_AFSR 0x0030UL
47#define SABRE_UEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */
48#define SABRE_UEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */
49#define SABRE_UEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */
50#define SABRE_UEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */
51#define SABRE_UEAFSR_SDTE 0x0200000000000000UL /* Secondary DMA Translation Error */
52#define SABRE_UEAFSR_PDTE 0x0100000000000000UL /* Primary DMA Translation Error */
53#define SABRE_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */
54#define SABRE_UEAFSR_OFF 0x00000000e0000000UL /* Offset (AFAR bits [5:3] */
55#define SABRE_UEAFSR_BLK 0x0000000000800000UL /* Was block operation */
56#define SABRE_UECE_AFAR 0x0038UL
57#define SABRE_CE_AFSR 0x0040UL
58#define SABRE_CEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */
59#define SABRE_CEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */
60#define SABRE_CEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */
61#define SABRE_CEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */
62#define SABRE_CEAFSR_ESYND 0x00ff000000000000UL /* ECC Syndrome */
63#define SABRE_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */
64#define SABRE_CEAFSR_OFF 0x00000000e0000000UL /* Offset */
65#define SABRE_CEAFSR_BLK 0x0000000000800000UL /* Was block operation */
66#define SABRE_UECE_AFAR_ALIAS 0x0048UL /* Aliases to 0x0038 */
67#define SABRE_IOMMU_CONTROL 0x0200UL
68#define SABRE_IOMMUCTRL_ERRSTS 0x0000000006000000UL /* Error status bits */
69#define SABRE_IOMMUCTRL_ERR 0x0000000001000000UL /* Error present in IOTLB */
70#define SABRE_IOMMUCTRL_LCKEN 0x0000000000800000UL /* IOTLB lock enable */
71#define SABRE_IOMMUCTRL_LCKPTR 0x0000000000780000UL /* IOTLB lock pointer */
72#define SABRE_IOMMUCTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
73#define SABRE_IOMMU_TSBSZ_1K 0x0000000000000000
74#define SABRE_IOMMU_TSBSZ_2K 0x0000000000010000
75#define SABRE_IOMMU_TSBSZ_4K 0x0000000000020000
76#define SABRE_IOMMU_TSBSZ_8K 0x0000000000030000
77#define SABRE_IOMMU_TSBSZ_16K 0x0000000000040000
78#define SABRE_IOMMU_TSBSZ_32K 0x0000000000050000
79#define SABRE_IOMMU_TSBSZ_64K 0x0000000000060000
80#define SABRE_IOMMU_TSBSZ_128K 0x0000000000070000
81#define SABRE_IOMMUCTRL_TBWSZ 0x0000000000000004UL /* TSB assumed page size */
82#define SABRE_IOMMUCTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
83#define SABRE_IOMMUCTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
84#define SABRE_IOMMU_TSBBASE 0x0208UL
85#define SABRE_IOMMU_FLUSH 0x0210UL
86#define SABRE_IMAP_A_SLOT0 0x0c00UL
87#define SABRE_IMAP_B_SLOT0 0x0c20UL
88#define SABRE_IMAP_SCSI 0x1000UL
89#define SABRE_IMAP_ETH 0x1008UL
90#define SABRE_IMAP_BPP 0x1010UL
91#define SABRE_IMAP_AU_REC 0x1018UL
92#define SABRE_IMAP_AU_PLAY 0x1020UL
93#define SABRE_IMAP_PFAIL 0x1028UL
94#define SABRE_IMAP_KMS 0x1030UL
95#define SABRE_IMAP_FLPY 0x1038UL
96#define SABRE_IMAP_SHW 0x1040UL
97#define SABRE_IMAP_KBD 0x1048UL
98#define SABRE_IMAP_MS 0x1050UL
99#define SABRE_IMAP_SER 0x1058UL
100#define SABRE_IMAP_UE 0x1070UL
101#define SABRE_IMAP_CE 0x1078UL
102#define SABRE_IMAP_PCIERR 0x1080UL
103#define SABRE_IMAP_GFX 0x1098UL
104#define SABRE_IMAP_EUPA 0x10a0UL
105#define SABRE_ICLR_A_SLOT0 0x1400UL
106#define SABRE_ICLR_B_SLOT0 0x1480UL
107#define SABRE_ICLR_SCSI 0x1800UL
108#define SABRE_ICLR_ETH 0x1808UL
109#define SABRE_ICLR_BPP 0x1810UL
110#define SABRE_ICLR_AU_REC 0x1818UL
111#define SABRE_ICLR_AU_PLAY 0x1820UL
112#define SABRE_ICLR_PFAIL 0x1828UL
113#define SABRE_ICLR_KMS 0x1830UL
114#define SABRE_ICLR_FLPY 0x1838UL
115#define SABRE_ICLR_SHW 0x1840UL
116#define SABRE_ICLR_KBD 0x1848UL
117#define SABRE_ICLR_MS 0x1850UL
118#define SABRE_ICLR_SER 0x1858UL
119#define SABRE_ICLR_UE 0x1870UL
120#define SABRE_ICLR_CE 0x1878UL
121#define SABRE_ICLR_PCIERR 0x1880UL
122#define SABRE_WRSYNC 0x1c20UL
123#define SABRE_PCICTRL 0x2000UL
124#define SABRE_PCICTRL_MRLEN 0x0000001000000000UL /* Use MemoryReadLine for block loads/stores */
125#define SABRE_PCICTRL_SERR 0x0000000400000000UL /* Set when SERR asserted on PCI bus */
126#define SABRE_PCICTRL_ARBPARK 0x0000000000200000UL /* Bus Parking 0=Ultra-IIi 1=prev-bus-owner */
127#define SABRE_PCICTRL_CPUPRIO 0x0000000000100000UL /* Ultra-IIi granted every other bus cycle */
128#define SABRE_PCICTRL_ARBPRIO 0x00000000000f0000UL /* Slot which is granted every other bus cycle */
129#define SABRE_PCICTRL_ERREN 0x0000000000000100UL /* PCI Error Interrupt Enable */
130#define SABRE_PCICTRL_RTRYWE 0x0000000000000080UL /* DMA Flow Control 0=wait-if-possible 1=retry */
131#define SABRE_PCICTRL_AEN 0x000000000000000fUL /* Slot PCI arbitration enables */
132#define SABRE_PIOAFSR 0x2010UL
133#define SABRE_PIOAFSR_PMA 0x8000000000000000UL /* Primary Master Abort */
134#define SABRE_PIOAFSR_PTA 0x4000000000000000UL /* Primary Target Abort */
135#define SABRE_PIOAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */
136#define SABRE_PIOAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */
137#define SABRE_PIOAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort */
138#define SABRE_PIOAFSR_STA 0x0400000000000000UL /* Secondary Target Abort */
139#define SABRE_PIOAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */
140#define SABRE_PIOAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */
141#define SABRE_PIOAFSR_BMSK 0x0000ffff00000000UL /* Byte Mask */
142#define SABRE_PIOAFSR_BLK 0x0000000080000000UL /* Was Block Operation */
143#define SABRE_PIOAFAR 0x2018UL
144#define SABRE_PCIDIAG 0x2020UL
145#define SABRE_PCIDIAG_DRTRY 0x0000000000000040UL /* Disable PIO Retry Limit */
146#define SABRE_PCIDIAG_IPAPAR 0x0000000000000008UL /* Invert PIO Address Parity */
147#define SABRE_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO Data Parity */
148#define SABRE_PCIDIAG_IDDPAR 0x0000000000000002UL /* Invert DMA Data Parity */
149#define SABRE_PCIDIAG_ELPBK 0x0000000000000001UL /* Loopback Enable - not supported */
150#define SABRE_PCITASR 0x2028UL
151#define SABRE_PCITASR_EF 0x0000000000000080UL /* Respond to 0xe0000000-0xffffffff */
152#define SABRE_PCITASR_CD 0x0000000000000040UL /* Respond to 0xc0000000-0xdfffffff */
153#define SABRE_PCITASR_AB 0x0000000000000020UL /* Respond to 0xa0000000-0xbfffffff */
154#define SABRE_PCITASR_89 0x0000000000000010UL /* Respond to 0x80000000-0x9fffffff */
155#define SABRE_PCITASR_67 0x0000000000000008UL /* Respond to 0x60000000-0x7fffffff */
156#define SABRE_PCITASR_45 0x0000000000000004UL /* Respond to 0x40000000-0x5fffffff */
157#define SABRE_PCITASR_23 0x0000000000000002UL /* Respond to 0x20000000-0x3fffffff */
158#define SABRE_PCITASR_01 0x0000000000000001UL /* Respond to 0x00000000-0x1fffffff */
159#define SABRE_PIOBUF_DIAG 0x5000UL
160#define SABRE_DMABUF_DIAGLO 0x5100UL
161#define SABRE_DMABUF_DIAGHI 0x51c0UL
162#define SABRE_IMAP_GFX_ALIAS 0x6000UL /* Aliases to 0x1098 */
163#define SABRE_IMAP_EUPA_ALIAS 0x8000UL /* Aliases to 0x10a0 */
164#define SABRE_IOMMU_VADIAG 0xa400UL
165#define SABRE_IOMMU_TCDIAG 0xa408UL
166#define SABRE_IOMMU_TAG 0xa580UL
167#define SABRE_IOMMUTAG_ERRSTS 0x0000000001800000UL /* Error status bits */
168#define SABRE_IOMMUTAG_ERR 0x0000000000400000UL /* Error present */
169#define SABRE_IOMMUTAG_WRITE 0x0000000000200000UL /* Page is writable */
170#define SABRE_IOMMUTAG_STREAM 0x0000000000100000UL /* Streamable bit - unused */
171#define SABRE_IOMMUTAG_SIZE 0x0000000000080000UL /* 0=8k 1=16k */
172#define SABRE_IOMMUTAG_VPN 0x000000000007ffffUL /* Virtual Page Number [31:13] */
173#define SABRE_IOMMU_DATA 0xa600UL
174#define SABRE_IOMMUDATA_VALID 0x0000000040000000UL /* Valid */
175#define SABRE_IOMMUDATA_USED 0x0000000020000000UL /* Used (for LRU algorithm) */
176#define SABRE_IOMMUDATA_CACHE 0x0000000010000000UL /* Cacheable */
177#define SABRE_IOMMUDATA_PPN 0x00000000001fffffUL /* Physical Page Number [33:13] */
178#define SABRE_PCI_IRQSTATE 0xa800UL
179#define SABRE_OBIO_IRQSTATE 0xa808UL
180#define SABRE_FFBCFG 0xf000UL
181#define SABRE_FFBCFG_SPRQS 0x000000000f000000 /* Slave P_RQST queue size */
182#define SABRE_FFBCFG_ONEREAD 0x0000000000004000 /* Slave supports one outstanding read */
183#define SABRE_MCCTRL0 0xf010UL
184#define SABRE_MCCTRL0_RENAB 0x0000000080000000 /* Refresh Enable */
185#define SABRE_MCCTRL0_EENAB 0x0000000010000000 /* Enable all ECC functions */
186#define SABRE_MCCTRL0_11BIT 0x0000000000001000 /* Enable 11-bit column addressing */
187#define SABRE_MCCTRL0_DPP 0x0000000000000f00 /* DIMM Pair Present Bits */
188#define SABRE_MCCTRL0_RINTVL 0x00000000000000ff /* Refresh Interval */
189#define SABRE_MCCTRL1 0xf018UL
190#define SABRE_MCCTRL1_AMDC 0x0000000038000000 /* Advance Memdata Clock */
191#define SABRE_MCCTRL1_ARDC 0x0000000007000000 /* Advance DRAM Read Data Clock */
192#define SABRE_MCCTRL1_CSR 0x0000000000e00000 /* CAS to RAS delay for CBR refresh */
193#define SABRE_MCCTRL1_CASRW 0x00000000001c0000 /* CAS length for read/write */
194#define SABRE_MCCTRL1_RCD 0x0000000000038000 /* RAS to CAS delay */
195#define SABRE_MCCTRL1_CP 0x0000000000007000 /* CAS Precharge */
196#define SABRE_MCCTRL1_RP 0x0000000000000e00 /* RAS Precharge */
197#define SABRE_MCCTRL1_RAS 0x00000000000001c0 /* Length of RAS for refresh */
198#define SABRE_MCCTRL1_CASRW2 0x0000000000000038 /* Must be same as CASRW */
199#define SABRE_MCCTRL1_RSC 0x0000000000000007 /* RAS after CAS hold time */
200#define SABRE_RESETCTRL 0xf020UL
201
202#define SABRE_CONFIGSPACE 0x001000000UL
203#define SABRE_IOSPACE 0x002000000UL
204#define SABRE_IOSPACE_SIZE 0x000ffffffUL
205#define SABRE_MEMSPACE 0x100000000UL
206#define SABRE_MEMSPACE_SIZE 0x07fffffffUL
207
208/* UltraSparc-IIi Programmer's Manual, page 325, PCI
209 * configuration space address format:
210 *
211 * 32 24 23 16 15 11 10 8 7 2 1 0
212 * ---------------------------------------------------------
213 * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 |
214 * ---------------------------------------------------------
215 */
216#define SABRE_CONFIG_BASE(PBM) \
217 ((PBM)->config_space | (1UL << 24))
218#define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG) \
219 (((unsigned long)(BUS) << 16) | \
220 ((unsigned long)(DEVFN) << 8) | \
221 ((unsigned long)(REG)))
222
223static int hummingbird_p;
224static struct pci_bus *sabre_root_bus;
225
226static void *sabre_pci_config_mkaddr(struct pci_pbm_info *pbm,
227 unsigned char bus,
228 unsigned int devfn,
229 int where)
230{
231 if (!pbm)
232 return NULL;
233 return (void *)
234 (SABRE_CONFIG_BASE(pbm) |
235 SABRE_CONFIG_ENCODE(bus, devfn, where));
236}
237
238static int sabre_out_of_range(unsigned char devfn)
239{
240 if (hummingbird_p)
241 return 0;
242
243 return (((PCI_SLOT(devfn) == 0) && (PCI_FUNC(devfn) > 0)) ||
244 ((PCI_SLOT(devfn) == 1) && (PCI_FUNC(devfn) > 1)) ||
245 (PCI_SLOT(devfn) > 1));
246}
247
248static int __sabre_out_of_range(struct pci_pbm_info *pbm,
249 unsigned char bus,
250 unsigned char devfn)
251{
252 if (hummingbird_p)
253 return 0;
254
255 return ((pbm->parent == 0) ||
256 ((pbm == &pbm->parent->pbm_B) &&
257 (bus == pbm->pci_first_busno) &&
258 PCI_SLOT(devfn) > 8) ||
259 ((pbm == &pbm->parent->pbm_A) &&
260 (bus == pbm->pci_first_busno) &&
261 PCI_SLOT(devfn) > 8));
262}
263
264static int __sabre_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
265 int where, int size, u32 *value)
266{
267 struct pci_pbm_info *pbm = bus_dev->sysdata;
268 unsigned char bus = bus_dev->number;
269 u32 *addr;
270 u16 tmp16;
271 u8 tmp8;
272
273 switch (size) {
274 case 1:
275 *value = 0xff;
276 break;
277 case 2:
278 *value = 0xffff;
279 break;
280 case 4:
281 *value = 0xffffffff;
282 break;
283 }
284
285 addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where);
286 if (!addr)
287 return PCIBIOS_SUCCESSFUL;
288
289 if (__sabre_out_of_range(pbm, bus, devfn))
290 return PCIBIOS_SUCCESSFUL;
291
292 switch (size) {
293 case 1:
294 pci_config_read8((u8 *) addr, &tmp8);
295 *value = tmp8;
296 break;
297
298 case 2:
299 if (where & 0x01) {
300 printk("pci_read_config_word: misaligned reg [%x]\n",
301 where);
302 return PCIBIOS_SUCCESSFUL;
303 }
304 pci_config_read16((u16 *) addr, &tmp16);
305 *value = tmp16;
306 break;
307
308 case 4:
309 if (where & 0x03) {
310 printk("pci_read_config_dword: misaligned reg [%x]\n",
311 where);
312 return PCIBIOS_SUCCESSFUL;
313 }
314 pci_config_read32(addr, value);
315 break;
316 }
317
318 return PCIBIOS_SUCCESSFUL;
319}
320
321static int sabre_read_pci_cfg(struct pci_bus *bus, unsigned int devfn,
322 int where, int size, u32 *value)
323{
324 if (!bus->number && sabre_out_of_range(devfn)) {
325 switch (size) {
326 case 1:
327 *value = 0xff;
328 break;
329 case 2:
330 *value = 0xffff;
331 break;
332 case 4:
333 *value = 0xffffffff;
334 break;
335 }
336 return PCIBIOS_SUCCESSFUL;
337 }
338
339 if (bus->number || PCI_SLOT(devfn))
340 return __sabre_read_pci_cfg(bus, devfn, where, size, value);
341
342 /* When accessing PCI config space of the PCI controller itself (bus
343 * 0, device slot 0, function 0) there are restrictions. Each
344 * register must be accessed as it's natural size. Thus, for example
345 * the Vendor ID must be accessed as a 16-bit quantity.
346 */
347
348 switch (size) {
349 case 1:
350 if (where < 8) {
351 u32 tmp32;
352 u16 tmp16;
353
354 __sabre_read_pci_cfg(bus, devfn, where & ~1, 2, &tmp32);
355 tmp16 = (u16) tmp32;
356 if (where & 1)
357 *value = tmp16 >> 8;
358 else
359 *value = tmp16 & 0xff;
360 } else
361 return __sabre_read_pci_cfg(bus, devfn, where, 1, value);
362 break;
363
364 case 2:
365 if (where < 8)
366 return __sabre_read_pci_cfg(bus, devfn, where, 2, value);
367 else {
368 u32 tmp32;
369 u8 tmp8;
370
371 __sabre_read_pci_cfg(bus, devfn, where, 1, &tmp32);
372 tmp8 = (u8) tmp32;
373 *value = tmp8;
374 __sabre_read_pci_cfg(bus, devfn, where + 1, 1, &tmp32);
375 tmp8 = (u8) tmp32;
376 *value |= tmp8 << 8;
377 }
378 break;
379
380 case 4: {
381 u32 tmp32;
382 u16 tmp16;
383
384 sabre_read_pci_cfg(bus, devfn, where, 2, &tmp32);
385 tmp16 = (u16) tmp32;
386 *value = tmp16;
387 sabre_read_pci_cfg(bus, devfn, where + 2, 2, &tmp32);
388 tmp16 = (u16) tmp32;
389 *value |= tmp16 << 16;
390 break;
391 }
392 }
393 return PCIBIOS_SUCCESSFUL;
394}
395
396static int __sabre_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
397 int where, int size, u32 value)
398{
399 struct pci_pbm_info *pbm = bus_dev->sysdata;
400 unsigned char bus = bus_dev->number;
401 u32 *addr;
402
403 addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where);
404 if (!addr)
405 return PCIBIOS_SUCCESSFUL;
406
407 if (__sabre_out_of_range(pbm, bus, devfn))
408 return PCIBIOS_SUCCESSFUL;
409
410 switch (size) {
411 case 1:
412 pci_config_write8((u8 *) addr, value);
413 break;
414
415 case 2:
416 if (where & 0x01) {
417 printk("pci_write_config_word: misaligned reg [%x]\n",
418 where);
419 return PCIBIOS_SUCCESSFUL;
420 }
421 pci_config_write16((u16 *) addr, value);
422 break;
423
424 case 4:
425 if (where & 0x03) {
426 printk("pci_write_config_dword: misaligned reg [%x]\n",
427 where);
428 return PCIBIOS_SUCCESSFUL;
429 }
430 pci_config_write32(addr, value);
431 break;
432 }
433
434 return PCIBIOS_SUCCESSFUL;
435}
436
437static int sabre_write_pci_cfg(struct pci_bus *bus, unsigned int devfn,
438 int where, int size, u32 value)
439{
440 if (bus->number)
441 return __sabre_write_pci_cfg(bus, devfn, where, size, value);
442
443 if (sabre_out_of_range(devfn))
444 return PCIBIOS_SUCCESSFUL;
445
446 switch (size) {
447 case 1:
448 if (where < 8) {
449 u32 tmp32;
450 u16 tmp16;
451
452 __sabre_read_pci_cfg(bus, devfn, where & ~1, 2, &tmp32);
453 tmp16 = (u16) tmp32;
454 if (where & 1) {
455 value &= 0x00ff;
456 value |= tmp16 << 8;
457 } else {
458 value &= 0xff00;
459 value |= tmp16;
460 }
461 tmp32 = (u32) tmp16;
462 return __sabre_write_pci_cfg(bus, devfn, where & ~1, 2, tmp32);
463 } else
464 return __sabre_write_pci_cfg(bus, devfn, where, 1, value);
465 break;
466 case 2:
467 if (where < 8)
468 return __sabre_write_pci_cfg(bus, devfn, where, 2, value);
469 else {
470 __sabre_write_pci_cfg(bus, devfn, where, 1, value & 0xff);
471 __sabre_write_pci_cfg(bus, devfn, where + 1, 1, value >> 8);
472 }
473 break;
474 case 4:
475 sabre_write_pci_cfg(bus, devfn, where, 2, value & 0xffff);
476 sabre_write_pci_cfg(bus, devfn, where + 2, 2, value >> 16);
477 break;
478 }
479 return PCIBIOS_SUCCESSFUL;
480}
481
482static struct pci_ops sabre_ops = {
483 .read = sabre_read_pci_cfg,
484 .write = sabre_write_pci_cfg,
485};
486
487static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
488{
489 unsigned int bus = (ino & 0x10) >> 4;
490 unsigned int slot = (ino & 0x0c) >> 2;
491
492 if (bus == 0)
493 return SABRE_IMAP_A_SLOT0 + (slot * 8);
494 else
495 return SABRE_IMAP_B_SLOT0 + (slot * 8);
496}
497
498static unsigned long __onboard_imap_off[] = {
499/*0x20*/ SABRE_IMAP_SCSI,
500/*0x21*/ SABRE_IMAP_ETH,
501/*0x22*/ SABRE_IMAP_BPP,
502/*0x23*/ SABRE_IMAP_AU_REC,
503/*0x24*/ SABRE_IMAP_AU_PLAY,
504/*0x25*/ SABRE_IMAP_PFAIL,
505/*0x26*/ SABRE_IMAP_KMS,
506/*0x27*/ SABRE_IMAP_FLPY,
507/*0x28*/ SABRE_IMAP_SHW,
508/*0x29*/ SABRE_IMAP_KBD,
509/*0x2a*/ SABRE_IMAP_MS,
510/*0x2b*/ SABRE_IMAP_SER,
511/*0x2c*/ 0 /* reserved */,
512/*0x2d*/ 0 /* reserved */,
513/*0x2e*/ SABRE_IMAP_UE,
514/*0x2f*/ SABRE_IMAP_CE,
515/*0x30*/ SABRE_IMAP_PCIERR,
516};
517#define SABRE_ONBOARD_IRQ_BASE 0x20
518#define SABRE_ONBOARD_IRQ_LAST 0x30
519#define sabre_onboard_imap_offset(__ino) \
520 __onboard_imap_off[(__ino) - SABRE_ONBOARD_IRQ_BASE]
521
522#define sabre_iclr_offset(ino) \
523 ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
524 (SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
525
526/* PCI SABRE INO number to Sparc PIL level. */
527static unsigned char sabre_pil_table[] = {
528/*0x00*/0, 0, 0, 0, /* PCI A slot 0 Int A, B, C, D */
529/*0x04*/0, 0, 0, 0, /* PCI A slot 1 Int A, B, C, D */
530/*0x08*/0, 0, 0, 0, /* PCI A slot 2 Int A, B, C, D */
531/*0x0c*/0, 0, 0, 0, /* PCI A slot 3 Int A, B, C, D */
532/*0x10*/0, 0, 0, 0, /* PCI B slot 0 Int A, B, C, D */
533/*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */
534/*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */
535/*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */
536/*0x20*/4, /* SCSI */
537/*0x21*/5, /* Ethernet */
538/*0x22*/8, /* Parallel Port */
539/*0x23*/13, /* Audio Record */
540/*0x24*/14, /* Audio Playback */
541/*0x25*/15, /* PowerFail */
542/*0x26*/4, /* second SCSI */
543/*0x27*/11, /* Floppy */
544/*0x28*/4, /* Spare Hardware */
545/*0x29*/9, /* Keyboard */
546/*0x2a*/4, /* Mouse */
547/*0x2b*/12, /* Serial */
548/*0x2c*/10, /* Timer 0 */
549/*0x2d*/11, /* Timer 1 */
550/*0x2e*/15, /* Uncorrectable ECC */
551/*0x2f*/15, /* Correctable ECC */
552/*0x30*/15, /* PCI Bus A Error */
553/*0x31*/15, /* PCI Bus B Error */
554/*0x32*/15, /* Power Management */
555};
556
557static int __init sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
558{
559 int ret;
560
561 if (pdev &&
562 pdev->vendor == PCI_VENDOR_ID_SUN &&
563 pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
564 return 9;
565
566 ret = sabre_pil_table[ino];
567 if (ret == 0 && pdev == NULL) {
568 ret = 4;
569 } else if (ret == 0) {
570 switch ((pdev->class >> 16) & 0xff) {
571 case PCI_BASE_CLASS_STORAGE:
572 ret = 4;
573 break;
574
575 case PCI_BASE_CLASS_NETWORK:
576 ret = 6;
577 break;
578
579 case PCI_BASE_CLASS_DISPLAY:
580 ret = 9;
581 break;
582
583 case PCI_BASE_CLASS_MULTIMEDIA:
584 case PCI_BASE_CLASS_MEMORY:
585 case PCI_BASE_CLASS_BRIDGE:
586 case PCI_BASE_CLASS_SERIAL:
587 ret = 10;
588 break;
589
590 default:
591 ret = 4;
592 break;
593 };
594 }
595 return ret;
596}
597
598static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm,
599 struct pci_dev *pdev,
600 unsigned int ino)
601{
602 struct ino_bucket *bucket;
603 unsigned long imap, iclr;
604 unsigned long imap_off, iclr_off;
605 int pil, inofixup = 0;
606
607 ino &= PCI_IRQ_INO;
608 if (ino < SABRE_ONBOARD_IRQ_BASE) {
609 /* PCI slot */
610 imap_off = sabre_pcislot_imap_offset(ino);
611 } else {
612 /* onboard device */
613 if (ino > SABRE_ONBOARD_IRQ_LAST) {
614 prom_printf("sabre_irq_build: Wacky INO [%x]\n", ino);
615 prom_halt();
616 }
617 imap_off = sabre_onboard_imap_offset(ino);
618 }
619
620 /* Now build the IRQ bucket. */
621 pil = sabre_ino_to_pil(pdev, ino);
622
623 if (PIL_RESERVED(pil))
624 BUG();
625
626 imap = pbm->controller_regs + imap_off;
627 imap += 4;
628
629 iclr_off = sabre_iclr_offset(ino);
630 iclr = pbm->controller_regs + iclr_off;
631 iclr += 4;
632
633 if ((ino & 0x20) == 0)
634 inofixup = ino & 0x03;
635
636 bucket = __bucket(build_irq(pil, inofixup, iclr, imap));
637 bucket->flags |= IBF_PCI;
638
639 if (pdev) {
640 struct pcidev_cookie *pcp = pdev->sysdata;
641
642 /* When a device lives behind a bridge deeper in the
643 * PCI bus topology than APB, a special sequence must
644 * run to make sure all pending DMA transfers at the
645 * time of IRQ delivery are visible in the coherency
646 * domain by the cpu. This sequence is to perform
647 * a read on the far side of the non-APB bridge, then
648 * perform a read of Sabre's DMA write-sync register.
649 *
650 * Currently, the PCI_CONFIG register for the device
651 * is used for this read from the far side of the bridge.
652 */
653 if (pdev->bus->number != pcp->pbm->pci_first_busno) {
654 bucket->flags |= IBF_DMA_SYNC;
655 bucket->synctab_ent = dma_sync_reg_table_entry++;
656 dma_sync_reg_table[bucket->synctab_ent] =
657 (unsigned long) sabre_pci_config_mkaddr(
658 pcp->pbm,
659 pdev->bus->number, pdev->devfn, PCI_COMMAND);
660 }
661 }
662 return __irq(bucket);
663}
664
665/* SABRE error handling support. */
666static void sabre_check_iommu_error(struct pci_controller_info *p,
667 unsigned long afsr,
668 unsigned long afar)
669{
670 struct pci_iommu *iommu = p->pbm_A.iommu;
671 unsigned long iommu_tag[16];
672 unsigned long iommu_data[16];
673 unsigned long flags;
674 u64 control;
675 int i;
676
677 spin_lock_irqsave(&iommu->lock, flags);
678 control = sabre_read(iommu->iommu_control);
679 if (control & SABRE_IOMMUCTRL_ERR) {
680 char *type_string;
681
682 /* Clear the error encountered bit.
683 * NOTE: On Sabre this is write 1 to clear,
684 * which is different from Psycho.
685 */
686 sabre_write(iommu->iommu_control, control);
687 switch((control & SABRE_IOMMUCTRL_ERRSTS) >> 25UL) {
688 case 1:
689 type_string = "Invalid Error";
690 break;
691 case 3:
692 type_string = "ECC Error";
693 break;
694 default:
695 type_string = "Unknown";
696 break;
697 };
698 printk("SABRE%d: IOMMU Error, type[%s]\n",
699 p->index, type_string);
700
701 /* Enter diagnostic mode and probe for error'd
702 * entries in the IOTLB.
703 */
704 control &= ~(SABRE_IOMMUCTRL_ERRSTS | SABRE_IOMMUCTRL_ERR);
705 sabre_write(iommu->iommu_control,
706 (control | SABRE_IOMMUCTRL_DENAB));
707 for (i = 0; i < 16; i++) {
708 unsigned long base = p->pbm_A.controller_regs;
709
710 iommu_tag[i] =
711 sabre_read(base + SABRE_IOMMU_TAG + (i * 8UL));
712 iommu_data[i] =
713 sabre_read(base + SABRE_IOMMU_DATA + (i * 8UL));
714 sabre_write(base + SABRE_IOMMU_TAG + (i * 8UL), 0);
715 sabre_write(base + SABRE_IOMMU_DATA + (i * 8UL), 0);
716 }
717 sabre_write(iommu->iommu_control, control);
718
719 for (i = 0; i < 16; i++) {
720 unsigned long tag, data;
721
722 tag = iommu_tag[i];
723 if (!(tag & SABRE_IOMMUTAG_ERR))
724 continue;
725
726 data = iommu_data[i];
727 switch((tag & SABRE_IOMMUTAG_ERRSTS) >> 23UL) {
728 case 1:
729 type_string = "Invalid Error";
730 break;
731 case 3:
732 type_string = "ECC Error";
733 break;
734 default:
735 type_string = "Unknown";
736 break;
737 };
738 printk("SABRE%d: IOMMU TAG(%d)[RAW(%016lx)error(%s)wr(%d)sz(%dK)vpg(%08lx)]\n",
739 p->index, i, tag, type_string,
740 ((tag & SABRE_IOMMUTAG_WRITE) ? 1 : 0),
741 ((tag & SABRE_IOMMUTAG_SIZE) ? 64 : 8),
742 ((tag & SABRE_IOMMUTAG_VPN) << IOMMU_PAGE_SHIFT));
743 printk("SABRE%d: IOMMU DATA(%d)[RAW(%016lx)valid(%d)used(%d)cache(%d)ppg(%016lx)\n",
744 p->index, i, data,
745 ((data & SABRE_IOMMUDATA_VALID) ? 1 : 0),
746 ((data & SABRE_IOMMUDATA_USED) ? 1 : 0),
747 ((data & SABRE_IOMMUDATA_CACHE) ? 1 : 0),
748 ((data & SABRE_IOMMUDATA_PPN) << IOMMU_PAGE_SHIFT));
749 }
750 }
751 spin_unlock_irqrestore(&iommu->lock, flags);
752}
753
754static irqreturn_t sabre_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
755{
756 struct pci_controller_info *p = dev_id;
757 unsigned long afsr_reg = p->pbm_A.controller_regs + SABRE_UE_AFSR;
758 unsigned long afar_reg = p->pbm_A.controller_regs + SABRE_UECE_AFAR;
759 unsigned long afsr, afar, error_bits;
760 int reported;
761
762 /* Latch uncorrectable error status. */
763 afar = sabre_read(afar_reg);
764 afsr = sabre_read(afsr_reg);
765
766 /* Clear the primary/secondary error status bits. */
767 error_bits = afsr &
768 (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
769 SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
770 SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE);
771 if (!error_bits)
772 return IRQ_NONE;
773 sabre_write(afsr_reg, error_bits);
774
775 /* Log the error. */
776 printk("SABRE%d: Uncorrectable Error, primary error type[%s%s]\n",
777 p->index,
778 ((error_bits & SABRE_UEAFSR_PDRD) ?
779 "DMA Read" :
780 ((error_bits & SABRE_UEAFSR_PDWR) ?
781 "DMA Write" : "???")),
782 ((error_bits & SABRE_UEAFSR_PDTE) ?
783 ":Translation Error" : ""));
784 printk("SABRE%d: bytemask[%04lx] dword_offset[%lx] was_block(%d)\n",
785 p->index,
786 (afsr & SABRE_UEAFSR_BMSK) >> 32UL,
787 (afsr & SABRE_UEAFSR_OFF) >> 29UL,
788 ((afsr & SABRE_UEAFSR_BLK) ? 1 : 0));
789 printk("SABRE%d: UE AFAR [%016lx]\n", p->index, afar);
790 printk("SABRE%d: UE Secondary errors [", p->index);
791 reported = 0;
792 if (afsr & SABRE_UEAFSR_SDRD) {
793 reported++;
794 printk("(DMA Read)");
795 }
796 if (afsr & SABRE_UEAFSR_SDWR) {
797 reported++;
798 printk("(DMA Write)");
799 }
800 if (afsr & SABRE_UEAFSR_SDTE) {
801 reported++;
802 printk("(Translation Error)");
803 }
804 if (!reported)
805 printk("(none)");
806 printk("]\n");
807
808 /* Interrogate IOMMU for error status. */
809 sabre_check_iommu_error(p, afsr, afar);
810
811 return IRQ_HANDLED;
812}
813
814static irqreturn_t sabre_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
815{
816 struct pci_controller_info *p = dev_id;
817 unsigned long afsr_reg = p->pbm_A.controller_regs + SABRE_CE_AFSR;
818 unsigned long afar_reg = p->pbm_A.controller_regs + SABRE_UECE_AFAR;
819 unsigned long afsr, afar, error_bits;
820 int reported;
821
822 /* Latch error status. */
823 afar = sabre_read(afar_reg);
824 afsr = sabre_read(afsr_reg);
825
826 /* Clear primary/secondary error status bits. */
827 error_bits = afsr &
828 (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
829 SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR);
830 if (!error_bits)
831 return IRQ_NONE;
832 sabre_write(afsr_reg, error_bits);
833
834 /* Log the error. */
835 printk("SABRE%d: Correctable Error, primary error type[%s]\n",
836 p->index,
837 ((error_bits & SABRE_CEAFSR_PDRD) ?
838 "DMA Read" :
839 ((error_bits & SABRE_CEAFSR_PDWR) ?
840 "DMA Write" : "???")));
841
842 /* XXX Use syndrome and afar to print out module string just like
843 * XXX UDB CE trap handler does... -DaveM
844 */
845 printk("SABRE%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
846 "was_block(%d)\n",
847 p->index,
848 (afsr & SABRE_CEAFSR_ESYND) >> 48UL,
849 (afsr & SABRE_CEAFSR_BMSK) >> 32UL,
850 (afsr & SABRE_CEAFSR_OFF) >> 29UL,
851 ((afsr & SABRE_CEAFSR_BLK) ? 1 : 0));
852 printk("SABRE%d: CE AFAR [%016lx]\n", p->index, afar);
853 printk("SABRE%d: CE Secondary errors [", p->index);
854 reported = 0;
855 if (afsr & SABRE_CEAFSR_SDRD) {
856 reported++;
857 printk("(DMA Read)");
858 }
859 if (afsr & SABRE_CEAFSR_SDWR) {
860 reported++;
861 printk("(DMA Write)");
862 }
863 if (!reported)
864 printk("(none)");
865 printk("]\n");
866
867 return IRQ_HANDLED;
868}
869
870static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p)
871{
872 unsigned long csr_reg, csr, csr_error_bits;
873 irqreturn_t ret = IRQ_NONE;
874 u16 stat;
875
876 csr_reg = p->pbm_A.controller_regs + SABRE_PCICTRL;
877 csr = sabre_read(csr_reg);
878 csr_error_bits =
879 csr & SABRE_PCICTRL_SERR;
880 if (csr_error_bits) {
881 /* Clear the errors. */
882 sabre_write(csr_reg, csr);
883
884 /* Log 'em. */
885 if (csr_error_bits & SABRE_PCICTRL_SERR)
886 printk("SABRE%d: PCI SERR signal asserted.\n",
887 p->index);
888 ret = IRQ_HANDLED;
889 }
890 pci_read_config_word(sabre_root_bus->self,
891 PCI_STATUS, &stat);
892 if (stat & (PCI_STATUS_PARITY |
893 PCI_STATUS_SIG_TARGET_ABORT |
894 PCI_STATUS_REC_TARGET_ABORT |
895 PCI_STATUS_REC_MASTER_ABORT |
896 PCI_STATUS_SIG_SYSTEM_ERROR)) {
897 printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n",
898 p->index, stat);
899 pci_write_config_word(sabre_root_bus->self,
900 PCI_STATUS, 0xffff);
901 ret = IRQ_HANDLED;
902 }
903 return ret;
904}
905
906static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
907{
908 struct pci_controller_info *p = dev_id;
909 unsigned long afsr_reg, afar_reg;
910 unsigned long afsr, afar, error_bits;
911 int reported;
912
913 afsr_reg = p->pbm_A.controller_regs + SABRE_PIOAFSR;
914 afar_reg = p->pbm_A.controller_regs + SABRE_PIOAFAR;
915
916 /* Latch error status. */
917 afar = sabre_read(afar_reg);
918 afsr = sabre_read(afsr_reg);
919
920 /* Clear primary/secondary error status bits. */
921 error_bits = afsr &
922 (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_PTA |
923 SABRE_PIOAFSR_PRTRY | SABRE_PIOAFSR_PPERR |
924 SABRE_PIOAFSR_SMA | SABRE_PIOAFSR_STA |
925 SABRE_PIOAFSR_SRTRY | SABRE_PIOAFSR_SPERR);
926 if (!error_bits)
927 return sabre_pcierr_intr_other(p);
928 sabre_write(afsr_reg, error_bits);
929
930 /* Log the error. */
931 printk("SABRE%d: PCI Error, primary error type[%s]\n",
932 p->index,
933 (((error_bits & SABRE_PIOAFSR_PMA) ?
934 "Master Abort" :
935 ((error_bits & SABRE_PIOAFSR_PTA) ?
936 "Target Abort" :
937 ((error_bits & SABRE_PIOAFSR_PRTRY) ?
938 "Excessive Retries" :
939 ((error_bits & SABRE_PIOAFSR_PPERR) ?
940 "Parity Error" : "???"))))));
941 printk("SABRE%d: bytemask[%04lx] was_block(%d)\n",
942 p->index,
943 (afsr & SABRE_PIOAFSR_BMSK) >> 32UL,
944 (afsr & SABRE_PIOAFSR_BLK) ? 1 : 0);
945 printk("SABRE%d: PCI AFAR [%016lx]\n", p->index, afar);
946 printk("SABRE%d: PCI Secondary errors [", p->index);
947 reported = 0;
948 if (afsr & SABRE_PIOAFSR_SMA) {
949 reported++;
950 printk("(Master Abort)");
951 }
952 if (afsr & SABRE_PIOAFSR_STA) {
953 reported++;
954 printk("(Target Abort)");
955 }
956 if (afsr & SABRE_PIOAFSR_SRTRY) {
957 reported++;
958 printk("(Excessive Retries)");
959 }
960 if (afsr & SABRE_PIOAFSR_SPERR) {
961 reported++;
962 printk("(Parity Error)");
963 }
964 if (!reported)
965 printk("(none)");
966 printk("]\n");
967
968 /* For the error types shown, scan both PCI buses for devices
969 * which have logged that error type.
970 */
971
972 /* If we see a Target Abort, this could be the result of an
973 * IOMMU translation error of some sort. It is extremely
974 * useful to log this information as usually it indicates
975 * a bug in the IOMMU support code or a PCI device driver.
976 */
977 if (error_bits & (SABRE_PIOAFSR_PTA | SABRE_PIOAFSR_STA)) {
978 sabre_check_iommu_error(p, afsr, afar);
979 pci_scan_for_target_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
980 pci_scan_for_target_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
981 }
982 if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA)) {
983 pci_scan_for_master_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
984 pci_scan_for_master_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
985 }
986 /* For excessive retries, SABRE/PBM will abort the device
987 * and there is no way to specifically check for excessive
988 * retries in the config space status registers. So what
989 * we hope is that we'll catch it via the master/target
990 * abort events.
991 */
992
993 if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR)) {
994 pci_scan_for_parity_error(p, &p->pbm_A, p->pbm_A.pci_bus);
995 pci_scan_for_parity_error(p, &p->pbm_B, p->pbm_B.pci_bus);
996 }
997
998 return IRQ_HANDLED;
999}
1000
1001/* XXX What about PowerFail/PowerManagement??? -DaveM */
1002#define SABRE_UE_INO 0x2e
1003#define SABRE_CE_INO 0x2f
1004#define SABRE_PCIERR_INO 0x30
1005static void __init sabre_register_error_handlers(struct pci_controller_info *p)
1006{
1007 struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
1008 unsigned long base = pbm->controller_regs;
1009 unsigned long irq, portid = pbm->portid;
1010 u64 tmp;
1011
1012 /* We clear the error bits in the appropriate AFSR before
1013 * registering the handler so that we don't get spurious
1014 * interrupts.
1015 */
1016 sabre_write(base + SABRE_UE_AFSR,
1017 (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
1018 SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
1019 SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE));
1020 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_UE_INO);
1021 if (request_irq(irq, sabre_ue_intr,
1022 SA_SHIRQ, "SABRE UE", p) < 0) {
1023 prom_printf("SABRE%d: Cannot register UE interrupt.\n",
1024 p->index);
1025 prom_halt();
1026 }
1027
1028 sabre_write(base + SABRE_CE_AFSR,
1029 (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
1030 SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR));
1031 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_CE_INO);
1032 if (request_irq(irq, sabre_ce_intr,
1033 SA_SHIRQ, "SABRE CE", p) < 0) {
1034 prom_printf("SABRE%d: Cannot register CE interrupt.\n",
1035 p->index);
1036 prom_halt();
1037 }
1038
1039 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_PCIERR_INO);
1040 if (request_irq(irq, sabre_pcierr_intr,
1041 SA_SHIRQ, "SABRE PCIERR", p) < 0) {
1042 prom_printf("SABRE%d: Cannot register PciERR interrupt.\n",
1043 p->index);
1044 prom_halt();
1045 }
1046
1047 tmp = sabre_read(base + SABRE_PCICTRL);
1048 tmp |= SABRE_PCICTRL_ERREN;
1049 sabre_write(base + SABRE_PCICTRL, tmp);
1050}
1051
1052static void __init sabre_resource_adjust(struct pci_dev *pdev,
1053 struct resource *res,
1054 struct resource *root)
1055{
1056 struct pci_pbm_info *pbm = pdev->bus->sysdata;
1057 unsigned long base;
1058
1059 if (res->flags & IORESOURCE_IO)
1060 base = pbm->controller_regs + SABRE_IOSPACE;
1061 else
1062 base = pbm->controller_regs + SABRE_MEMSPACE;
1063
1064 res->start += base;
1065 res->end += base;
1066}
1067
1068static void __init sabre_base_address_update(struct pci_dev *pdev, int resource)
1069{
1070 struct pcidev_cookie *pcp = pdev->sysdata;
1071 struct pci_pbm_info *pbm = pcp->pbm;
1072 struct resource *res;
1073 unsigned long base;
1074 u32 reg;
1075 int where, size, is_64bit;
1076
1077 res = &pdev->resource[resource];
1078 if (resource < 6) {
1079 where = PCI_BASE_ADDRESS_0 + (resource * 4);
1080 } else if (resource == PCI_ROM_RESOURCE) {
1081 where = pdev->rom_base_reg;
1082 } else {
1083 /* Somebody might have asked allocation of a non-standard resource */
1084 return;
1085 }
1086
1087 is_64bit = 0;
1088 if (res->flags & IORESOURCE_IO)
1089 base = pbm->controller_regs + SABRE_IOSPACE;
1090 else {
1091 base = pbm->controller_regs + SABRE_MEMSPACE;
1092 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
1093 == PCI_BASE_ADDRESS_MEM_TYPE_64)
1094 is_64bit = 1;
1095 }
1096
1097 size = res->end - res->start;
1098 pci_read_config_dword(pdev, where, &reg);
1099 reg = ((reg & size) |
1100 (((u32)(res->start - base)) & ~size));
1101 if (resource == PCI_ROM_RESOURCE) {
1102 reg |= PCI_ROM_ADDRESS_ENABLE;
1103 res->flags |= IORESOURCE_ROM_ENABLE;
1104 }
1105 pci_write_config_dword(pdev, where, reg);
1106
1107 /* This knows that the upper 32-bits of the address
1108 * must be zero. Our PCI common layer enforces this.
1109 */
1110 if (is_64bit)
1111 pci_write_config_dword(pdev, where + 4, 0);
1112}
1113
1114static void __init apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus)
1115{
1116 struct pci_dev *pdev;
1117
1118 list_for_each_entry(pdev, &sabre_bus->devices, bus_list) {
1119
1120 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
1121 pdev->device == PCI_DEVICE_ID_SUN_SIMBA) {
1122 u32 word32;
1123 u16 word16;
1124
1125 sabre_read_pci_cfg(pdev->bus, pdev->devfn,
1126 PCI_COMMAND, 2, &word32);
1127 word16 = (u16) word32;
1128 word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
1129 PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY |
1130 PCI_COMMAND_IO;
1131 word32 = (u32) word16;
1132 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1133 PCI_COMMAND, 2, word32);
1134
1135 /* Status register bits are "write 1 to clear". */
1136 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1137 PCI_STATUS, 2, 0xffff);
1138 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1139 PCI_SEC_STATUS, 2, 0xffff);
1140
1141 /* Use a primary/seconday latency timer value
1142 * of 64.
1143 */
1144 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1145 PCI_LATENCY_TIMER, 1, 64);
1146 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1147 PCI_SEC_LATENCY_TIMER, 1, 64);
1148
1149 /* Enable reporting/forwarding of master aborts,
1150 * parity, and SERR.
1151 */
1152 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1153 PCI_BRIDGE_CONTROL, 1,
1154 (PCI_BRIDGE_CTL_PARITY |
1155 PCI_BRIDGE_CTL_SERR |
1156 PCI_BRIDGE_CTL_MASTER_ABORT));
1157 }
1158 }
1159}
1160
1161static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm)
1162{
1163 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
1164
1165 if (!cookie) {
1166 prom_printf("SABRE: Critical allocation failure.\n");
1167 prom_halt();
1168 }
1169
1170 /* All we care about is the PBM. */
1171 memset(cookie, 0, sizeof(*cookie));
1172 cookie->pbm = pbm;
1173
1174 return cookie;
1175}
1176
1177static void __init sabre_scan_bus(struct pci_controller_info *p)
1178{
1179 static int once;
1180 struct pci_bus *sabre_bus, *pbus;
1181 struct pci_pbm_info *pbm;
1182 struct pcidev_cookie *cookie;
1183 int sabres_scanned;
1184
1185 /* The APB bridge speaks to the Sabre host PCI bridge
1186 * at 66Mhz, but the front side of APB runs at 33Mhz
1187 * for both segments.
1188 */
1189 p->pbm_A.is_66mhz_capable = 0;
1190 p->pbm_B.is_66mhz_capable = 0;
1191
1192 /* This driver has not been verified to handle
1193 * multiple SABREs yet, so trap this.
1194 *
1195 * Also note that the SABRE host bridge is hardwired
1196 * to live at bus 0.
1197 */
1198 if (once != 0) {
1199 prom_printf("SABRE: Multiple controllers unsupported.\n");
1200 prom_halt();
1201 }
1202 once++;
1203
1204 cookie = alloc_bridge_cookie(&p->pbm_A);
1205
1206 sabre_bus = pci_scan_bus(p->pci_first_busno,
1207 p->pci_ops,
1208 &p->pbm_A);
1209 pci_fixup_host_bridge_self(sabre_bus);
1210 sabre_bus->self->sysdata = cookie;
1211
1212 sabre_root_bus = sabre_bus;
1213
1214 apb_init(p, sabre_bus);
1215
1216 sabres_scanned = 0;
1217
1218 list_for_each_entry(pbus, &sabre_bus->children, node) {
1219
1220 if (pbus->number == p->pbm_A.pci_first_busno) {
1221 pbm = &p->pbm_A;
1222 } else if (pbus->number == p->pbm_B.pci_first_busno) {
1223 pbm = &p->pbm_B;
1224 } else
1225 continue;
1226
1227 cookie = alloc_bridge_cookie(pbm);
1228 pbus->self->sysdata = cookie;
1229
1230 sabres_scanned++;
1231
1232 pbus->sysdata = pbm;
1233 pbm->pci_bus = pbus;
1234 pci_fill_in_pbm_cookies(pbus, pbm, pbm->prom_node);
1235 pci_record_assignments(pbm, pbus);
1236 pci_assign_unassigned(pbm, pbus);
1237 pci_fixup_irq(pbm, pbus);
1238 pci_determine_66mhz_disposition(pbm, pbus);
1239 pci_setup_busmastering(pbm, pbus);
1240 }
1241
1242 if (!sabres_scanned) {
1243 /* Hummingbird, no APBs. */
1244 pbm = &p->pbm_A;
1245 sabre_bus->sysdata = pbm;
1246 pbm->pci_bus = sabre_bus;
1247 pci_fill_in_pbm_cookies(sabre_bus, pbm, pbm->prom_node);
1248 pci_record_assignments(pbm, sabre_bus);
1249 pci_assign_unassigned(pbm, sabre_bus);
1250 pci_fixup_irq(pbm, sabre_bus);
1251 pci_determine_66mhz_disposition(pbm, sabre_bus);
1252 pci_setup_busmastering(pbm, sabre_bus);
1253 }
1254
1255 sabre_register_error_handlers(p);
1256}
1257
1258static void __init sabre_iommu_init(struct pci_controller_info *p,
1259 int tsbsize, unsigned long dvma_offset,
1260 u32 dma_mask)
1261{
1262 struct pci_iommu *iommu = p->pbm_A.iommu;
1263 unsigned long tsbbase, i, order;
1264 u64 control;
1265
1266 /* Setup initial software IOMMU state. */
1267 spin_lock_init(&iommu->lock);
1268 iommu->iommu_cur_ctx = 0;
1269
1270 /* Register addresses. */
1271 iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
1272 iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE;
1273 iommu->iommu_flush = p->pbm_A.controller_regs + SABRE_IOMMU_FLUSH;
1274 iommu->write_complete_reg = p->pbm_A.controller_regs + SABRE_WRSYNC;
1275 /* Sabre's IOMMU lacks ctx flushing. */
1276 iommu->iommu_ctxflush = 0;
1277
1278 /* Invalidate TLB Entries. */
1279 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
1280 control |= SABRE_IOMMUCTRL_DENAB;
1281 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
1282
1283 for(i = 0; i < 16; i++) {
1284 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TAG + (i * 8UL), 0);
1285 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_DATA + (i * 8UL), 0);
1286 }
1287
1288 /* Leave diag mode enabled for full-flushing done
1289 * in pci_iommu.c
1290 */
1291
1292 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
1293 if (!iommu->dummy_page) {
1294 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1295 prom_halt();
1296 }
1297 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1298 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1299
1300 tsbbase = __get_free_pages(GFP_KERNEL, order = get_order(tsbsize * 1024 * 8));
1301 if (!tsbbase) {
1302 prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n");
1303 prom_halt();
1304 }
1305 iommu->page_table = (iopte_t *)tsbbase;
1306 iommu->page_table_map_base = dvma_offset;
1307 iommu->dma_addr_mask = dma_mask;
1308 pci_iommu_table_init(iommu, PAGE_SIZE << order);
1309
1310 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase));
1311
1312 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
1313 control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
1314 control |= SABRE_IOMMUCTRL_ENAB;
1315 switch(tsbsize) {
1316 case 64:
1317 control |= SABRE_IOMMU_TSBSZ_64K;
1318 iommu->page_table_sz_bits = 16;
1319 break;
1320 case 128:
1321 control |= SABRE_IOMMU_TSBSZ_128K;
1322 iommu->page_table_sz_bits = 17;
1323 break;
1324 default:
1325 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
1326 prom_halt();
1327 break;
1328 }
1329 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
1330
1331 /* We start with no consistent mappings. */
1332 iommu->lowest_consistent_map =
1333 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1334
1335 for (i = 0; i < PBM_NCLUSTERS; i++) {
1336 iommu->alloc_info[i].flush = 0;
1337 iommu->alloc_info[i].next = 0;
1338 }
1339}
1340
1341static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
1342 struct pci_pbm_info *pbm)
1343{
1344 char *name = pbm->name;
1345 unsigned long ibase = p->pbm_A.controller_regs + SABRE_IOSPACE;
1346 unsigned long mbase = p->pbm_A.controller_regs + SABRE_MEMSPACE;
1347 unsigned int devfn;
1348 unsigned long first, last, i;
1349 u8 *addr, map;
1350
1351 sprintf(name, "SABRE%d PBM%c",
1352 p->index,
1353 (pbm == &p->pbm_A ? 'A' : 'B'));
1354 pbm->io_space.name = pbm->mem_space.name = name;
1355
1356 devfn = PCI_DEVFN(1, (pbm == &p->pbm_A) ? 0 : 1);
1357 addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_IO_ADDRESS_MAP);
1358 map = 0;
1359 pci_config_read8(addr, &map);
1360
1361 first = 8;
1362 last = 0;
1363 for (i = 0; i < 8; i++) {
1364 if ((map & (1 << i)) != 0) {
1365 if (first > i)
1366 first = i;
1367 if (last < i)
1368 last = i;
1369 }
1370 }
1371 pbm->io_space.start = ibase + (first << 21UL);
1372 pbm->io_space.end = ibase + (last << 21UL) + ((1 << 21UL) - 1);
1373 pbm->io_space.flags = IORESOURCE_IO;
1374
1375 addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_MEM_ADDRESS_MAP);
1376 map = 0;
1377 pci_config_read8(addr, &map);
1378
1379 first = 8;
1380 last = 0;
1381 for (i = 0; i < 8; i++) {
1382 if ((map & (1 << i)) != 0) {
1383 if (first > i)
1384 first = i;
1385 if (last < i)
1386 last = i;
1387 }
1388 }
1389 pbm->mem_space.start = mbase + (first << 29UL);
1390 pbm->mem_space.end = mbase + (last << 29UL) + ((1 << 29UL) - 1);
1391 pbm->mem_space.flags = IORESOURCE_MEM;
1392
1393 if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
1394 prom_printf("Cannot register PBM-%c's IO space.\n",
1395 (pbm == &p->pbm_A ? 'A' : 'B'));
1396 prom_halt();
1397 }
1398 if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
1399 prom_printf("Cannot register PBM-%c's MEM space.\n",
1400 (pbm == &p->pbm_A ? 'A' : 'B'));
1401 prom_halt();
1402 }
1403
1404 /* Register legacy regions if this PBM covers that area. */
1405 if (pbm->io_space.start == ibase &&
1406 pbm->mem_space.start == mbase)
1407 pci_register_legacy_regions(&pbm->io_space,
1408 &pbm->mem_space);
1409}
1410
1411static void __init sabre_pbm_init(struct pci_controller_info *p, int sabre_node, u32 dma_begin)
1412{
1413 struct pci_pbm_info *pbm;
1414 char namebuf[128];
1415 u32 busrange[2];
1416 int node, simbas_found;
1417
1418 simbas_found = 0;
1419 node = prom_getchild(sabre_node);
1420 while ((node = prom_searchsiblings(node, "pci")) != 0) {
1421 int err;
1422
1423 err = prom_getproperty(node, "model", namebuf, sizeof(namebuf));
1424 if ((err <= 0) || strncmp(namebuf, "SUNW,simba", err))
1425 goto next_pci;
1426
1427 err = prom_getproperty(node, "bus-range",
1428 (char *)&busrange[0], sizeof(busrange));
1429 if (err == 0 || err == -1) {
1430 prom_printf("APB: Error, cannot get PCI bus-range.\n");
1431 prom_halt();
1432 }
1433
1434 simbas_found++;
1435 if (busrange[0] == 1)
1436 pbm = &p->pbm_B;
1437 else
1438 pbm = &p->pbm_A;
1439 pbm->chip_type = PBM_CHIP_TYPE_SABRE;
1440 pbm->parent = p;
1441 pbm->prom_node = node;
1442 pbm->pci_first_slot = 1;
1443 pbm->pci_first_busno = busrange[0];
1444 pbm->pci_last_busno = busrange[1];
1445
1446 prom_getstring(node, "name", pbm->prom_name, sizeof(pbm->prom_name));
1447 err = prom_getproperty(node, "ranges",
1448 (char *)pbm->pbm_ranges,
1449 sizeof(pbm->pbm_ranges));
1450 if (err != -1)
1451 pbm->num_pbm_ranges =
1452 (err / sizeof(struct linux_prom_pci_ranges));
1453 else
1454 pbm->num_pbm_ranges = 0;
1455
1456 err = prom_getproperty(node, "interrupt-map",
1457 (char *)pbm->pbm_intmap,
1458 sizeof(pbm->pbm_intmap));
1459 if (err != -1) {
1460 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
1461 err = prom_getproperty(node, "interrupt-map-mask",
1462 (char *)&pbm->pbm_intmask,
1463 sizeof(pbm->pbm_intmask));
1464 if (err == -1) {
1465 prom_printf("APB: Fatal error, no interrupt-map-mask.\n");
1466 prom_halt();
1467 }
1468 } else {
1469 pbm->num_pbm_intmap = 0;
1470 memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
1471 }
1472
1473 pbm_register_toplevel_resources(p, pbm);
1474
1475 next_pci:
1476 node = prom_getsibling(node);
1477 if (!node)
1478 break;
1479 }
1480 if (simbas_found == 0) {
1481 int err;
1482
1483 /* No APBs underneath, probably this is a hummingbird
1484 * system.
1485 */
1486 pbm = &p->pbm_A;
1487 pbm->parent = p;
1488 pbm->prom_node = sabre_node;
1489 pbm->pci_first_busno = p->pci_first_busno;
1490 pbm->pci_last_busno = p->pci_last_busno;
1491
1492 prom_getstring(sabre_node, "name", pbm->prom_name, sizeof(pbm->prom_name));
1493 err = prom_getproperty(sabre_node, "ranges",
1494 (char *) pbm->pbm_ranges,
1495 sizeof(pbm->pbm_ranges));
1496 if (err != -1)
1497 pbm->num_pbm_ranges =
1498 (err / sizeof(struct linux_prom_pci_ranges));
1499 else
1500 pbm->num_pbm_ranges = 0;
1501
1502 err = prom_getproperty(sabre_node, "interrupt-map",
1503 (char *) pbm->pbm_intmap,
1504 sizeof(pbm->pbm_intmap));
1505
1506 if (err != -1) {
1507 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
1508 err = prom_getproperty(sabre_node, "interrupt-map-mask",
1509 (char *)&pbm->pbm_intmask,
1510 sizeof(pbm->pbm_intmask));
1511 if (err == -1) {
1512 prom_printf("Hummingbird: Fatal error, no interrupt-map-mask.\n");
1513 prom_halt();
1514 }
1515 } else {
1516 pbm->num_pbm_intmap = 0;
1517 memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
1518 }
1519
1520
1521 sprintf(pbm->name, "SABRE%d PBM%c", p->index,
1522 (pbm == &p->pbm_A ? 'A' : 'B'));
1523 pbm->io_space.name = pbm->mem_space.name = pbm->name;
1524
1525 /* Hack up top-level resources. */
1526 pbm->io_space.start = p->pbm_A.controller_regs + SABRE_IOSPACE;
1527 pbm->io_space.end = pbm->io_space.start + (1UL << 24) - 1UL;
1528 pbm->io_space.flags = IORESOURCE_IO;
1529
1530 pbm->mem_space.start = p->pbm_A.controller_regs + SABRE_MEMSPACE;
1531 pbm->mem_space.end = pbm->mem_space.start + (unsigned long)dma_begin - 1UL;
1532 pbm->mem_space.flags = IORESOURCE_MEM;
1533
1534 if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
1535 prom_printf("Cannot register Hummingbird's IO space.\n");
1536 prom_halt();
1537 }
1538 if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
1539 prom_printf("Cannot register Hummingbird's MEM space.\n");
1540 prom_halt();
1541 }
1542
1543 pci_register_legacy_regions(&pbm->io_space,
1544 &pbm->mem_space);
1545 }
1546}
1547
1548void __init sabre_init(int pnode, char *model_name)
1549{
1550 struct linux_prom64_registers pr_regs[2];
1551 struct pci_controller_info *p;
1552 struct pci_iommu *iommu;
1553 int tsbsize, err;
1554 u32 busrange[2];
1555 u32 vdma[2];
1556 u32 upa_portid, dma_mask;
1557 u64 clear_irq;
1558
1559 hummingbird_p = 0;
1560 if (!strcmp(model_name, "pci108e,a001"))
1561 hummingbird_p = 1;
1562 else if (!strcmp(model_name, "SUNW,sabre")) {
1563 char compat[64];
1564
1565 if (prom_getproperty(pnode, "compatible",
1566 compat, sizeof(compat)) > 0 &&
1567 !strcmp(compat, "pci108e,a001")) {
1568 hummingbird_p = 1;
1569 } else {
1570 int cpu_node;
1571
1572 /* Of course, Sun has to encode things a thousand
1573 * different ways, inconsistently.
1574 */
1575 cpu_find_by_instance(0, &cpu_node, NULL);
1576 if (prom_getproperty(cpu_node, "name",
1577 compat, sizeof(compat)) > 0 &&
1578 !strcmp(compat, "SUNW,UltraSPARC-IIe"))
1579 hummingbird_p = 1;
1580 }
1581 }
1582
1583 p = kmalloc(sizeof(*p), GFP_ATOMIC);
1584 if (!p) {
1585 prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n");
1586 prom_halt();
1587 }
1588 memset(p, 0, sizeof(*p));
1589
1590 iommu = kmalloc(sizeof(*iommu), GFP_ATOMIC);
1591 if (!iommu) {
1592 prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n");
1593 prom_halt();
1594 }
1595 memset(iommu, 0, sizeof(*iommu));
1596 p->pbm_A.iommu = p->pbm_B.iommu = iommu;
1597
1598 upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff);
1599
1600 p->next = pci_controller_root;
1601 pci_controller_root = p;
1602
1603 p->pbm_A.portid = upa_portid;
1604 p->pbm_B.portid = upa_portid;
1605 p->index = pci_num_controllers++;
1606 p->pbms_same_domain = 1;
1607 p->scan_bus = sabre_scan_bus;
1608 p->irq_build = sabre_irq_build;
1609 p->base_address_update = sabre_base_address_update;
1610 p->resource_adjust = sabre_resource_adjust;
1611 p->pci_ops = &sabre_ops;
1612
1613 /*
1614 * Map in SABRE register set and report the presence of this SABRE.
1615 */
1616 err = prom_getproperty(pnode, "reg",
1617 (char *)&pr_regs[0], sizeof(pr_regs));
1618 if(err == 0 || err == -1) {
1619 prom_printf("SABRE: Error, cannot get U2P registers "
1620 "from PROM.\n");
1621 prom_halt();
1622 }
1623
1624 /*
1625 * First REG in property is base of entire SABRE register space.
1626 */
1627 p->pbm_A.controller_regs = pr_regs[0].phys_addr;
1628 p->pbm_B.controller_regs = pr_regs[0].phys_addr;
1629 pci_dma_wsync = p->pbm_A.controller_regs + SABRE_WRSYNC;
1630
1631 printk("PCI: Found SABRE, main regs at %016lx, wsync at %016lx\n",
1632 p->pbm_A.controller_regs, pci_dma_wsync);
1633
1634 /* Clear interrupts */
1635
1636 /* PCI first */
1637 for (clear_irq = SABRE_ICLR_A_SLOT0; clear_irq < SABRE_ICLR_B_SLOT0 + 0x80; clear_irq += 8)
1638 sabre_write(p->pbm_A.controller_regs + clear_irq, 0x0UL);
1639
1640 /* Then OBIO */
1641 for (clear_irq = SABRE_ICLR_SCSI; clear_irq < SABRE_ICLR_SCSI + 0x80; clear_irq += 8)
1642 sabre_write(p->pbm_A.controller_regs + clear_irq, 0x0UL);
1643
1644 /* Error interrupts are enabled later after the bus scan. */
1645 sabre_write(p->pbm_A.controller_regs + SABRE_PCICTRL,
1646 (SABRE_PCICTRL_MRLEN | SABRE_PCICTRL_SERR |
1647 SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN));
1648
1649 /* Now map in PCI config space for entire SABRE. */
1650 p->pbm_A.config_space = p->pbm_B.config_space =
1651 (p->pbm_A.controller_regs + SABRE_CONFIGSPACE);
1652 printk("SABRE: Shared PCI config space at %016lx\n",
1653 p->pbm_A.config_space);
1654
1655 err = prom_getproperty(pnode, "virtual-dma",
1656 (char *)&vdma[0], sizeof(vdma));
1657 if(err == 0 || err == -1) {
1658 prom_printf("SABRE: Error, cannot get virtual-dma property "
1659 "from PROM.\n");
1660 prom_halt();
1661 }
1662
1663 dma_mask = vdma[0];
1664 switch(vdma[1]) {
1665 case 0x20000000:
1666 dma_mask |= 0x1fffffff;
1667 tsbsize = 64;
1668 break;
1669 case 0x40000000:
1670 dma_mask |= 0x3fffffff;
1671 tsbsize = 128;
1672 break;
1673
1674 case 0x80000000:
1675 dma_mask |= 0x7fffffff;
1676 tsbsize = 128;
1677 break;
1678 default:
1679 prom_printf("SABRE: strange virtual-dma size.\n");
1680 prom_halt();
1681 }
1682
1683 sabre_iommu_init(p, tsbsize, vdma[0], dma_mask);
1684
1685 printk("SABRE: DVMA at %08x [%08x]\n", vdma[0], vdma[1]);
1686
1687 err = prom_getproperty(pnode, "bus-range",
1688 (char *)&busrange[0], sizeof(busrange));
1689 if(err == 0 || err == -1) {
1690 prom_printf("SABRE: Error, cannot get PCI bus-range "
1691 " from PROM.\n");
1692 prom_halt();
1693 }
1694
1695 p->pci_first_busno = busrange[0];
1696 p->pci_last_busno = busrange[1];
1697
1698 /*
1699 * Look for APB underneath.
1700 */
1701 sabre_pbm_init(p, pnode, vdma[0]);
1702}
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
new file mode 100644
index 000000000000..e93fcadc3722
--- /dev/null
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -0,0 +1,2187 @@
1/* $Id: pci_schizo.c,v 1.24 2002/01/23 11:27:32 davem Exp $
2 * pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
3 *
4 * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/pci.h>
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/interrupt.h>
13
14#include <asm/pbm.h>
15#include <asm/iommu.h>
16#include <asm/irq.h>
17#include <asm/upa.h>
18
19#include "pci_impl.h"
20#include "iommu_common.h"
21
22/* All SCHIZO registers are 64-bits. The following accessor
23 * routines are how they are accessed. The REG parameter
24 * is a physical address.
25 */
26#define schizo_read(__reg) \
27({ u64 __ret; \
28 __asm__ __volatile__("ldxa [%1] %2, %0" \
29 : "=r" (__ret) \
30 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
31 : "memory"); \
32 __ret; \
33})
34#define schizo_write(__reg, __val) \
35 __asm__ __volatile__("stxa %0, [%1] %2" \
36 : /* no outputs */ \
37 : "r" (__val), "r" (__reg), \
38 "i" (ASI_PHYS_BYPASS_EC_E) \
39 : "memory")
40
41/* This is a convention that at least Excalibur and Merlin
42 * follow. I suppose the SCHIZO used in Starcat and friends
43 * will do similar.
44 *
45 * The only way I could see this changing is if the newlink
46 * block requires more space in Schizo's address space than
47 * they predicted, thus requiring an address space reorg when
48 * the newer Schizo is taped out.
49 */
50
51/* Streaming buffer control register. */
52#define SCHIZO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
53#define SCHIZO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
54#define SCHIZO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
55#define SCHIZO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
56#define SCHIZO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
57
58/* IOMMU control register. */
59#define SCHIZO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */
60#define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */
61#define SCHIZO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */
62#define SCHIZO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */
63#define SCHIZO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */
64#define SCHIZO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
65#define SCHIZO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */
66#define SCHIZO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */
67#define SCHIZO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */
68#define SCHIZO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */
69#define SCHIZO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */
70#define SCHIZO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */
71#define SCHIZO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */
72#define SCHIZO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */
73#define SCHIZO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */
74#define SCHIZO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
75#define SCHIZO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */
76#define SCHIZO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
77
78/* Schizo config space address format is nearly identical to
79 * that of PSYCHO:
80 *
81 * 32 24 23 16 15 11 10 8 7 2 1 0
82 * ---------------------------------------------------------
83 * |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 |
84 * ---------------------------------------------------------
85 */
86#define SCHIZO_CONFIG_BASE(PBM) ((PBM)->config_space)
87#define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG) \
88 (((unsigned long)(BUS) << 16) | \
89 ((unsigned long)(DEVFN) << 8) | \
90 ((unsigned long)(REG)))
91
92static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm,
93 unsigned char bus,
94 unsigned int devfn,
95 int where)
96{
97 if (!pbm)
98 return NULL;
99 bus -= pbm->pci_first_busno;
100 return (void *)
101 (SCHIZO_CONFIG_BASE(pbm) |
102 SCHIZO_CONFIG_ENCODE(bus, devfn, where));
103}
104
105/* Just make sure the bus number is in range. */
106static int schizo_out_of_range(struct pci_pbm_info *pbm,
107 unsigned char bus,
108 unsigned char devfn)
109{
110 if (bus < pbm->pci_first_busno ||
111 bus > pbm->pci_last_busno)
112 return 1;
113 return 0;
114}
115
116/* SCHIZO PCI configuration space accessors. */
117
118static int schizo_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
119 int where, int size, u32 *value)
120{
121 struct pci_pbm_info *pbm = bus_dev->sysdata;
122 unsigned char bus = bus_dev->number;
123 u32 *addr;
124 u16 tmp16;
125 u8 tmp8;
126
127 switch (size) {
128 case 1:
129 *value = 0xff;
130 break;
131 case 2:
132 *value = 0xffff;
133 break;
134 case 4:
135 *value = 0xffffffff;
136 break;
137 }
138
139 addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
140 if (!addr)
141 return PCIBIOS_SUCCESSFUL;
142
143 if (schizo_out_of_range(pbm, bus, devfn))
144 return PCIBIOS_SUCCESSFUL;
145 switch (size) {
146 case 1:
147 pci_config_read8((u8 *)addr, &tmp8);
148 *value = tmp8;
149 break;
150
151 case 2:
152 if (where & 0x01) {
153 printk("pci_read_config_word: misaligned reg [%x]\n",
154 where);
155 return PCIBIOS_SUCCESSFUL;
156 }
157 pci_config_read16((u16 *)addr, &tmp16);
158 *value = tmp16;
159 break;
160
161 case 4:
162 if (where & 0x03) {
163 printk("pci_read_config_dword: misaligned reg [%x]\n",
164 where);
165 return PCIBIOS_SUCCESSFUL;
166 }
167 pci_config_read32(addr, value);
168 break;
169 }
170 return PCIBIOS_SUCCESSFUL;
171}
172
173static int schizo_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
174 int where, int size, u32 value)
175{
176 struct pci_pbm_info *pbm = bus_dev->sysdata;
177 unsigned char bus = bus_dev->number;
178 u32 *addr;
179
180 addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
181 if (!addr)
182 return PCIBIOS_SUCCESSFUL;
183
184 if (schizo_out_of_range(pbm, bus, devfn))
185 return PCIBIOS_SUCCESSFUL;
186
187 switch (size) {
188 case 1:
189 pci_config_write8((u8 *)addr, value);
190 break;
191
192 case 2:
193 if (where & 0x01) {
194 printk("pci_write_config_word: misaligned reg [%x]\n",
195 where);
196 return PCIBIOS_SUCCESSFUL;
197 }
198 pci_config_write16((u16 *)addr, value);
199 break;
200
201 case 4:
202 if (where & 0x03) {
203 printk("pci_write_config_dword: misaligned reg [%x]\n",
204 where);
205 return PCIBIOS_SUCCESSFUL;
206 }
207
208 pci_config_write32(addr, value);
209 }
210 return PCIBIOS_SUCCESSFUL;
211}
212
213static struct pci_ops schizo_ops = {
214 .read = schizo_read_pci_cfg,
215 .write = schizo_write_pci_cfg,
216};
217
218/* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the
219 * imap/iclr registers are per-PBM.
220 */
221#define SCHIZO_IMAP_BASE 0x1000UL
222#define SCHIZO_ICLR_BASE 0x1400UL
223
224static unsigned long schizo_imap_offset(unsigned long ino)
225{
226 return SCHIZO_IMAP_BASE + (ino * 8UL);
227}
228
229static unsigned long schizo_iclr_offset(unsigned long ino)
230{
231 return SCHIZO_ICLR_BASE + (ino * 8UL);
232}
233
234/* PCI SCHIZO INO number to Sparc PIL level. This table only matters for
235 * INOs which will not have an associated PCI device struct, ie. onboard
236 * EBUS devices and PCI controller internal error interrupts.
237 */
238static unsigned char schizo_pil_table[] = {
239/*0x00*/0, 0, 0, 0, /* PCI slot 0 Int A, B, C, D */
240/*0x04*/0, 0, 0, 0, /* PCI slot 1 Int A, B, C, D */
241/*0x08*/0, 0, 0, 0, /* PCI slot 2 Int A, B, C, D */
242/*0x0c*/0, 0, 0, 0, /* PCI slot 3 Int A, B, C, D */
243/*0x10*/0, 0, 0, 0, /* PCI slot 4 Int A, B, C, D */
244/*0x14*/0, 0, 0, 0, /* PCI slot 5 Int A, B, C, D */
245/*0x18*/4, /* SCSI */
246/*0x19*/4, /* second SCSI */
247/*0x1a*/0, /* UNKNOWN */
248/*0x1b*/0, /* UNKNOWN */
249/*0x1c*/8, /* Parallel */
250/*0x1d*/5, /* Ethernet */
251/*0x1e*/8, /* Firewire-1394 */
252/*0x1f*/9, /* USB */
253/*0x20*/13, /* Audio Record */
254/*0x21*/14, /* Audio Playback */
255/*0x22*/12, /* Serial */
256/*0x23*/4, /* EBUS I2C */
257/*0x24*/10, /* RTC Clock */
258/*0x25*/11, /* Floppy */
259/*0x26*/0, /* UNKNOWN */
260/*0x27*/0, /* UNKNOWN */
261/*0x28*/0, /* UNKNOWN */
262/*0x29*/0, /* UNKNOWN */
263/*0x2a*/10, /* UPA 1 */
264/*0x2b*/10, /* UPA 2 */
265/*0x2c*/0, /* UNKNOWN */
266/*0x2d*/0, /* UNKNOWN */
267/*0x2e*/0, /* UNKNOWN */
268/*0x2f*/0, /* UNKNOWN */
269/*0x30*/15, /* Uncorrectable ECC */
270/*0x31*/15, /* Correctable ECC */
271/*0x32*/15, /* PCI Bus A Error */
272/*0x33*/15, /* PCI Bus B Error */
273/*0x34*/15, /* Safari Bus Error */
274/*0x35*/0, /* Reserved */
275/*0x36*/0, /* Reserved */
276/*0x37*/0, /* Reserved */
277/*0x38*/0, /* Reserved for NewLink */
278/*0x39*/0, /* Reserved for NewLink */
279/*0x3a*/0, /* Reserved for NewLink */
280/*0x3b*/0, /* Reserved for NewLink */
281/*0x3c*/0, /* Reserved for NewLink */
282/*0x3d*/0, /* Reserved for NewLink */
283/*0x3e*/0, /* Reserved for NewLink */
284/*0x3f*/0, /* Reserved for NewLink */
285};
286
287static int __init schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
288{
289 int ret;
290
291 if (pdev &&
292 pdev->vendor == PCI_VENDOR_ID_SUN &&
293 pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
294 return 9;
295
296 ret = schizo_pil_table[ino];
297 if (ret == 0 && pdev == NULL) {
298 ret = 4;
299 } else if (ret == 0) {
300 switch ((pdev->class >> 16) & 0xff) {
301 case PCI_BASE_CLASS_STORAGE:
302 ret = 4;
303 break;
304
305 case PCI_BASE_CLASS_NETWORK:
306 ret = 6;
307 break;
308
309 case PCI_BASE_CLASS_DISPLAY:
310 ret = 9;
311 break;
312
313 case PCI_BASE_CLASS_MULTIMEDIA:
314 case PCI_BASE_CLASS_MEMORY:
315 case PCI_BASE_CLASS_BRIDGE:
316 case PCI_BASE_CLASS_SERIAL:
317 ret = 10;
318 break;
319
320 default:
321 ret = 4;
322 break;
323 };
324 }
325
326 return ret;
327}
328
329static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
330 struct pci_dev *pdev,
331 unsigned int ino)
332{
333 struct ino_bucket *bucket;
334 unsigned long imap, iclr;
335 unsigned long imap_off, iclr_off;
336 int pil, ign_fixup;
337
338 ino &= PCI_IRQ_INO;
339 imap_off = schizo_imap_offset(ino);
340
341 /* Now build the IRQ bucket. */
342 pil = schizo_ino_to_pil(pdev, ino);
343
344 if (PIL_RESERVED(pil))
345 BUG();
346
347 imap = pbm->pbm_regs + imap_off;
348 imap += 4;
349
350 iclr_off = schizo_iclr_offset(ino);
351 iclr = pbm->pbm_regs + iclr_off;
352 iclr += 4;
353
354 /* On Schizo, no inofixup occurs. This is because each
355 * INO has it's own IMAP register. On Psycho and Sabre
356 * there is only one IMAP register for each PCI slot even
357 * though four different INOs can be generated by each
358 * PCI slot.
359 *
360 * But, for JBUS variants (essentially, Tomatillo), we have
361 * to fixup the lowest bit of the interrupt group number.
362 */
363 ign_fixup = 0;
364 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
365 if (pbm->portid & 1)
366 ign_fixup = (1 << 6);
367 }
368
369 bucket = __bucket(build_irq(pil, ign_fixup, iclr, imap));
370 bucket->flags |= IBF_PCI;
371
372 return __irq(bucket);
373}
374
375/* SCHIZO error handling support. */
376enum schizo_error_type {
377 UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR
378};
379
380static DEFINE_SPINLOCK(stc_buf_lock);
381static unsigned long stc_error_buf[128];
382static unsigned long stc_tag_buf[16];
383static unsigned long stc_line_buf[16];
384
385#define SCHIZO_UE_INO 0x30 /* Uncorrectable ECC error */
386#define SCHIZO_CE_INO 0x31 /* Correctable ECC error */
387#define SCHIZO_PCIERR_A_INO 0x32 /* PBM A PCI bus error */
388#define SCHIZO_PCIERR_B_INO 0x33 /* PBM B PCI bus error */
389#define SCHIZO_SERR_INO 0x34 /* Safari interface error */
390
391struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino)
392{
393 ino &= IMAP_INO;
394 if (p->pbm_A.ino_bitmap & (1UL << ino))
395 return &p->pbm_A;
396 if (p->pbm_B.ino_bitmap & (1UL << ino))
397 return &p->pbm_B;
398
399 printk("PCI%d: No ino_bitmap entry for ino[%x], bitmaps "
400 "PBM_A[%016lx] PBM_B[%016lx]",
401 p->index, ino,
402 p->pbm_A.ino_bitmap,
403 p->pbm_B.ino_bitmap);
404 printk("PCI%d: Using PBM_A, report this problem immediately.\n",
405 p->index);
406
407 return &p->pbm_A;
408}
409
410static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
411{
412 struct pci_pbm_info *pbm;
413 struct ino_bucket *bucket;
414 unsigned long iclr;
415
416 /* Do not clear the interrupt for the other PCI bus.
417 *
418 * This "ACK both PBM IRQs" only needs to be performed
419 * for chip-wide error interrupts.
420 */
421 if ((irq & IMAP_INO) == SCHIZO_PCIERR_A_INO ||
422 (irq & IMAP_INO) == SCHIZO_PCIERR_B_INO)
423 return;
424
425 pbm = pbm_for_ino(p, irq);
426 if (pbm == &p->pbm_A)
427 pbm = &p->pbm_B;
428 else
429 pbm = &p->pbm_A;
430
431 irq = schizo_irq_build(pbm, NULL,
432 (pbm->portid << 6) | (irq & IMAP_INO));
433 bucket = __bucket(irq);
434 iclr = bucket->iclr;
435
436 upa_writel(ICLR_IDLE, iclr);
437}
438
439#define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */
440#define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */
441#define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */
442
443#define SCHIZO_STCERR_WRITE 0x2UL
444#define SCHIZO_STCERR_READ 0x1UL
445
446#define SCHIZO_STCTAG_PPN 0x3fffffff00000000UL
447#define SCHIZO_STCTAG_VPN 0x00000000ffffe000UL
448#define SCHIZO_STCTAG_VALID 0x8000000000000000UL
449#define SCHIZO_STCTAG_READ 0x4000000000000000UL
450
451#define SCHIZO_STCLINE_LINDX 0x0000000007800000UL
452#define SCHIZO_STCLINE_SPTR 0x000000000007e000UL
453#define SCHIZO_STCLINE_LADDR 0x0000000000001fc0UL
454#define SCHIZO_STCLINE_EPTR 0x000000000000003fUL
455#define SCHIZO_STCLINE_VALID 0x0000000000600000UL
456#define SCHIZO_STCLINE_FOFN 0x0000000000180000UL
457
458static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
459 enum schizo_error_type type)
460{
461 struct pci_strbuf *strbuf = &pbm->stc;
462 unsigned long regbase = pbm->pbm_regs;
463 unsigned long err_base, tag_base, line_base;
464 u64 control;
465 int i;
466
467 err_base = regbase + SCHIZO_STC_ERR;
468 tag_base = regbase + SCHIZO_STC_TAG;
469 line_base = regbase + SCHIZO_STC_LINE;
470
471 spin_lock(&stc_buf_lock);
472
473 /* This is __REALLY__ dangerous. When we put the
474 * streaming buffer into diagnostic mode to probe
475 * it's tags and error status, we _must_ clear all
476 * of the line tag valid bits before re-enabling
477 * the streaming buffer. If any dirty data lives
478 * in the STC when we do this, we will end up
479 * invalidating it before it has a chance to reach
480 * main memory.
481 */
482 control = schizo_read(strbuf->strbuf_control);
483 schizo_write(strbuf->strbuf_control,
484 (control | SCHIZO_STRBUF_CTRL_DENAB));
485 for (i = 0; i < 128; i++) {
486 unsigned long val;
487
488 val = schizo_read(err_base + (i * 8UL));
489 schizo_write(err_base + (i * 8UL), 0UL);
490 stc_error_buf[i] = val;
491 }
492 for (i = 0; i < 16; i++) {
493 stc_tag_buf[i] = schizo_read(tag_base + (i * 8UL));
494 stc_line_buf[i] = schizo_read(line_base + (i * 8UL));
495 schizo_write(tag_base + (i * 8UL), 0UL);
496 schizo_write(line_base + (i * 8UL), 0UL);
497 }
498
499 /* OK, state is logged, exit diagnostic mode. */
500 schizo_write(strbuf->strbuf_control, control);
501
502 for (i = 0; i < 16; i++) {
503 int j, saw_error, first, last;
504
505 saw_error = 0;
506 first = i * 8;
507 last = first + 8;
508 for (j = first; j < last; j++) {
509 unsigned long errval = stc_error_buf[j];
510 if (errval != 0) {
511 saw_error++;
512 printk("%s: STC_ERR(%d)[wr(%d)rd(%d)]\n",
513 pbm->name,
514 j,
515 (errval & SCHIZO_STCERR_WRITE) ? 1 : 0,
516 (errval & SCHIZO_STCERR_READ) ? 1 : 0);
517 }
518 }
519 if (saw_error != 0) {
520 unsigned long tagval = stc_tag_buf[i];
521 unsigned long lineval = stc_line_buf[i];
522 printk("%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n",
523 pbm->name,
524 i,
525 ((tagval & SCHIZO_STCTAG_PPN) >> 19UL),
526 (tagval & SCHIZO_STCTAG_VPN),
527 ((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0),
528 ((tagval & SCHIZO_STCTAG_READ) ? 1 : 0));
529
530 /* XXX Should spit out per-bank error information... -DaveM */
531 printk("%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
532 "V(%d)FOFN(%d)]\n",
533 pbm->name,
534 i,
535 ((lineval & SCHIZO_STCLINE_LINDX) >> 23UL),
536 ((lineval & SCHIZO_STCLINE_SPTR) >> 13UL),
537 ((lineval & SCHIZO_STCLINE_LADDR) >> 6UL),
538 ((lineval & SCHIZO_STCLINE_EPTR) >> 0UL),
539 ((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0),
540 ((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0));
541 }
542 }
543
544 spin_unlock(&stc_buf_lock);
545}
546
547/* IOMMU is per-PBM in Schizo, so interrogate both for anonymous
548 * controller level errors.
549 */
550
551#define SCHIZO_IOMMU_TAG 0xa580UL
552#define SCHIZO_IOMMU_DATA 0xa600UL
553
554#define SCHIZO_IOMMU_TAG_CTXT 0x0000001ffe000000UL
555#define SCHIZO_IOMMU_TAG_ERRSTS 0x0000000001800000UL
556#define SCHIZO_IOMMU_TAG_ERR 0x0000000000400000UL
557#define SCHIZO_IOMMU_TAG_WRITE 0x0000000000200000UL
558#define SCHIZO_IOMMU_TAG_STREAM 0x0000000000100000UL
559#define SCHIZO_IOMMU_TAG_SIZE 0x0000000000080000UL
560#define SCHIZO_IOMMU_TAG_VPAGE 0x000000000007ffffUL
561
562#define SCHIZO_IOMMU_DATA_VALID 0x0000000100000000UL
563#define SCHIZO_IOMMU_DATA_CACHE 0x0000000040000000UL
564#define SCHIZO_IOMMU_DATA_PPAGE 0x000000003fffffffUL
565
566static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
567 enum schizo_error_type type)
568{
569 struct pci_iommu *iommu = pbm->iommu;
570 unsigned long iommu_tag[16];
571 unsigned long iommu_data[16];
572 unsigned long flags;
573 u64 control;
574 int i;
575
576 spin_lock_irqsave(&iommu->lock, flags);
577 control = schizo_read(iommu->iommu_control);
578 if (control & SCHIZO_IOMMU_CTRL_XLTEERR) {
579 unsigned long base;
580 char *type_string;
581
582 /* Clear the error encountered bit. */
583 control &= ~SCHIZO_IOMMU_CTRL_XLTEERR;
584 schizo_write(iommu->iommu_control, control);
585
586 switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
587 case 0:
588 type_string = "Protection Error";
589 break;
590 case 1:
591 type_string = "Invalid Error";
592 break;
593 case 2:
594 type_string = "TimeOut Error";
595 break;
596 case 3:
597 default:
598 type_string = "ECC Error";
599 break;
600 };
601 printk("%s: IOMMU Error, type[%s]\n",
602 pbm->name, type_string);
603
604 /* Put the IOMMU into diagnostic mode and probe
605 * it's TLB for entries with error status.
606 *
607 * It is very possible for another DVMA to occur
608 * while we do this probe, and corrupt the system
609 * further. But we are so screwed at this point
610 * that we are likely to crash hard anyways, so
611 * get as much diagnostic information to the
612 * console as we can.
613 */
614 schizo_write(iommu->iommu_control,
615 control | SCHIZO_IOMMU_CTRL_DENAB);
616
617 base = pbm->pbm_regs;
618
619 for (i = 0; i < 16; i++) {
620 iommu_tag[i] =
621 schizo_read(base + SCHIZO_IOMMU_TAG + (i * 8UL));
622 iommu_data[i] =
623 schizo_read(base + SCHIZO_IOMMU_DATA + (i * 8UL));
624
625 /* Now clear out the entry. */
626 schizo_write(base + SCHIZO_IOMMU_TAG + (i * 8UL), 0);
627 schizo_write(base + SCHIZO_IOMMU_DATA + (i * 8UL), 0);
628 }
629
630 /* Leave diagnostic mode. */
631 schizo_write(iommu->iommu_control, control);
632
633 for (i = 0; i < 16; i++) {
634 unsigned long tag, data;
635
636 tag = iommu_tag[i];
637 if (!(tag & SCHIZO_IOMMU_TAG_ERR))
638 continue;
639
640 data = iommu_data[i];
641 switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) {
642 case 0:
643 type_string = "Protection Error";
644 break;
645 case 1:
646 type_string = "Invalid Error";
647 break;
648 case 2:
649 type_string = "TimeOut Error";
650 break;
651 case 3:
652 default:
653 type_string = "ECC Error";
654 break;
655 };
656 printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
657 "sz(%dK) vpg(%08lx)]\n",
658 pbm->name, i, type_string,
659 (int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL),
660 ((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0),
661 ((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0),
662 ((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8),
663 (tag & SCHIZO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
664 printk("%s: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
665 pbm->name, i,
666 ((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0),
667 ((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0),
668 (data & SCHIZO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
669 }
670 }
671 if (pbm->stc.strbuf_enabled)
672 __schizo_check_stc_error_pbm(pbm, type);
673 spin_unlock_irqrestore(&iommu->lock, flags);
674}
675
676static void schizo_check_iommu_error(struct pci_controller_info *p,
677 enum schizo_error_type type)
678{
679 schizo_check_iommu_error_pbm(&p->pbm_A, type);
680 schizo_check_iommu_error_pbm(&p->pbm_B, type);
681}
682
683/* Uncorrectable ECC error status gathering. */
684#define SCHIZO_UE_AFSR 0x10030UL
685#define SCHIZO_UE_AFAR 0x10038UL
686
687#define SCHIZO_UEAFSR_PPIO 0x8000000000000000UL /* Safari */
688#define SCHIZO_UEAFSR_PDRD 0x4000000000000000UL /* Safari/Tomatillo */
689#define SCHIZO_UEAFSR_PDWR 0x2000000000000000UL /* Safari */
690#define SCHIZO_UEAFSR_SPIO 0x1000000000000000UL /* Safari */
691#define SCHIZO_UEAFSR_SDMA 0x0800000000000000UL /* Safari/Tomatillo */
692#define SCHIZO_UEAFSR_ERRPNDG 0x0300000000000000UL /* Safari */
693#define SCHIZO_UEAFSR_BMSK 0x000003ff00000000UL /* Safari */
694#define SCHIZO_UEAFSR_QOFF 0x00000000c0000000UL /* Safari/Tomatillo */
695#define SCHIZO_UEAFSR_AID 0x000000001f000000UL /* Safari/Tomatillo */
696#define SCHIZO_UEAFSR_PARTIAL 0x0000000000800000UL /* Safari */
697#define SCHIZO_UEAFSR_OWNEDIN 0x0000000000400000UL /* Safari */
698#define SCHIZO_UEAFSR_MTAGSYND 0x00000000000f0000UL /* Safari */
699#define SCHIZO_UEAFSR_MTAG 0x000000000000e000UL /* Safari */
700#define SCHIZO_UEAFSR_ECCSYND 0x00000000000001ffUL /* Safari */
701
702static irqreturn_t schizo_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
703{
704 struct pci_controller_info *p = dev_id;
705 unsigned long afsr_reg = p->pbm_B.controller_regs + SCHIZO_UE_AFSR;
706 unsigned long afar_reg = p->pbm_B.controller_regs + SCHIZO_UE_AFAR;
707 unsigned long afsr, afar, error_bits;
708 int reported, limit;
709
710 /* Latch uncorrectable error status. */
711 afar = schizo_read(afar_reg);
712
713 /* If either of the error pending bits are set in the
714 * AFSR, the error status is being actively updated by
715 * the hardware and we must re-read to get a clean value.
716 */
717 limit = 1000;
718 do {
719 afsr = schizo_read(afsr_reg);
720 } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
721
722 /* Clear the primary/secondary error status bits. */
723 error_bits = afsr &
724 (SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR |
725 SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA);
726 if (!error_bits)
727 return IRQ_NONE;
728 schizo_write(afsr_reg, error_bits);
729
730 /* Log the error. */
731 printk("PCI%d: Uncorrectable Error, primary error type[%s]\n",
732 p->index,
733 (((error_bits & SCHIZO_UEAFSR_PPIO) ?
734 "PIO" :
735 ((error_bits & SCHIZO_UEAFSR_PDRD) ?
736 "DMA Read" :
737 ((error_bits & SCHIZO_UEAFSR_PDWR) ?
738 "DMA Write" : "???")))));
739 printk("PCI%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
740 p->index,
741 (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
742 (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
743 (afsr & SCHIZO_UEAFSR_AID) >> 24UL);
744 printk("PCI%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
745 p->index,
746 (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
747 (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
748 (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
749 (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
750 (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
751 printk("PCI%d: UE AFAR [%016lx]\n", p->index, afar);
752 printk("PCI%d: UE Secondary errors [", p->index);
753 reported = 0;
754 if (afsr & SCHIZO_UEAFSR_SPIO) {
755 reported++;
756 printk("(PIO)");
757 }
758 if (afsr & SCHIZO_UEAFSR_SDMA) {
759 reported++;
760 printk("(DMA)");
761 }
762 if (!reported)
763 printk("(none)");
764 printk("]\n");
765
766 /* Interrogate IOMMU for error status. */
767 schizo_check_iommu_error(p, UE_ERR);
768
769 schizo_clear_other_err_intr(p, irq);
770
771 return IRQ_HANDLED;
772}
773
774#define SCHIZO_CE_AFSR 0x10040UL
775#define SCHIZO_CE_AFAR 0x10048UL
776
777#define SCHIZO_CEAFSR_PPIO 0x8000000000000000UL
778#define SCHIZO_CEAFSR_PDRD 0x4000000000000000UL
779#define SCHIZO_CEAFSR_PDWR 0x2000000000000000UL
780#define SCHIZO_CEAFSR_SPIO 0x1000000000000000UL
781#define SCHIZO_CEAFSR_SDMA 0x0800000000000000UL
782#define SCHIZO_CEAFSR_ERRPNDG 0x0300000000000000UL
783#define SCHIZO_CEAFSR_BMSK 0x000003ff00000000UL
784#define SCHIZO_CEAFSR_QOFF 0x00000000c0000000UL
785#define SCHIZO_CEAFSR_AID 0x000000001f000000UL
786#define SCHIZO_CEAFSR_PARTIAL 0x0000000000800000UL
787#define SCHIZO_CEAFSR_OWNEDIN 0x0000000000400000UL
788#define SCHIZO_CEAFSR_MTAGSYND 0x00000000000f0000UL
789#define SCHIZO_CEAFSR_MTAG 0x000000000000e000UL
790#define SCHIZO_CEAFSR_ECCSYND 0x00000000000001ffUL
791
792static irqreturn_t schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
793{
794 struct pci_controller_info *p = dev_id;
795 unsigned long afsr_reg = p->pbm_B.controller_regs + SCHIZO_CE_AFSR;
796 unsigned long afar_reg = p->pbm_B.controller_regs + SCHIZO_CE_AFAR;
797 unsigned long afsr, afar, error_bits;
798 int reported, limit;
799
800 /* Latch error status. */
801 afar = schizo_read(afar_reg);
802
803 /* If either of the error pending bits are set in the
804 * AFSR, the error status is being actively updated by
805 * the hardware and we must re-read to get a clean value.
806 */
807 limit = 1000;
808 do {
809 afsr = schizo_read(afsr_reg);
810 } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
811
812 /* Clear primary/secondary error status bits. */
813 error_bits = afsr &
814 (SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR |
815 SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA);
816 if (!error_bits)
817 return IRQ_NONE;
818 schizo_write(afsr_reg, error_bits);
819
820 /* Log the error. */
821 printk("PCI%d: Correctable Error, primary error type[%s]\n",
822 p->index,
823 (((error_bits & SCHIZO_CEAFSR_PPIO) ?
824 "PIO" :
825 ((error_bits & SCHIZO_CEAFSR_PDRD) ?
826 "DMA Read" :
827 ((error_bits & SCHIZO_CEAFSR_PDWR) ?
828 "DMA Write" : "???")))));
829
830 /* XXX Use syndrome and afar to print out module string just like
831 * XXX UDB CE trap handler does... -DaveM
832 */
833 printk("PCI%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
834 p->index,
835 (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
836 (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
837 (afsr & SCHIZO_UEAFSR_AID) >> 24UL);
838 printk("PCI%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
839 p->index,
840 (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
841 (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
842 (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
843 (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
844 (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
845 printk("PCI%d: CE AFAR [%016lx]\n", p->index, afar);
846 printk("PCI%d: CE Secondary errors [", p->index);
847 reported = 0;
848 if (afsr & SCHIZO_CEAFSR_SPIO) {
849 reported++;
850 printk("(PIO)");
851 }
852 if (afsr & SCHIZO_CEAFSR_SDMA) {
853 reported++;
854 printk("(DMA)");
855 }
856 if (!reported)
857 printk("(none)");
858 printk("]\n");
859
860 schizo_clear_other_err_intr(p, irq);
861
862 return IRQ_HANDLED;
863}
864
865#define SCHIZO_PCI_AFSR 0x2010UL
866#define SCHIZO_PCI_AFAR 0x2018UL
867
868#define SCHIZO_PCIAFSR_PMA 0x8000000000000000UL /* Schizo/Tomatillo */
869#define SCHIZO_PCIAFSR_PTA 0x4000000000000000UL /* Schizo/Tomatillo */
870#define SCHIZO_PCIAFSR_PRTRY 0x2000000000000000UL /* Schizo/Tomatillo */
871#define SCHIZO_PCIAFSR_PPERR 0x1000000000000000UL /* Schizo/Tomatillo */
872#define SCHIZO_PCIAFSR_PTTO 0x0800000000000000UL /* Schizo/Tomatillo */
873#define SCHIZO_PCIAFSR_PUNUS 0x0400000000000000UL /* Schizo */
874#define SCHIZO_PCIAFSR_SMA 0x0200000000000000UL /* Schizo/Tomatillo */
875#define SCHIZO_PCIAFSR_STA 0x0100000000000000UL /* Schizo/Tomatillo */
876#define SCHIZO_PCIAFSR_SRTRY 0x0080000000000000UL /* Schizo/Tomatillo */
877#define SCHIZO_PCIAFSR_SPERR 0x0040000000000000UL /* Schizo/Tomatillo */
878#define SCHIZO_PCIAFSR_STTO 0x0020000000000000UL /* Schizo/Tomatillo */
879#define SCHIZO_PCIAFSR_SUNUS 0x0010000000000000UL /* Schizo */
880#define SCHIZO_PCIAFSR_BMSK 0x000003ff00000000UL /* Schizo/Tomatillo */
881#define SCHIZO_PCIAFSR_BLK 0x0000000080000000UL /* Schizo/Tomatillo */
882#define SCHIZO_PCIAFSR_CFG 0x0000000040000000UL /* Schizo/Tomatillo */
883#define SCHIZO_PCIAFSR_MEM 0x0000000020000000UL /* Schizo/Tomatillo */
884#define SCHIZO_PCIAFSR_IO 0x0000000010000000UL /* Schizo/Tomatillo */
885
886#define SCHIZO_PCI_CTRL (0x2000UL)
887#define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */
888#define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */
889#define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */
890#define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */
891#define SCHIZO_PCICTRL_TTO_ERR (1UL << 38UL) /* Safari/Tomatillo */
892#define SCHIZO_PCICTRL_RTRY_ERR (1UL << 37UL) /* Safari/Tomatillo */
893#define SCHIZO_PCICTRL_DTO_ERR (1UL << 36UL) /* Safari/Tomatillo */
894#define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL) /* Safari */
895#define SCHIZO_PCICTRL_SERR (1UL << 34UL) /* Safari/Tomatillo */
896#define SCHIZO_PCICTRL_PCISPD (1UL << 33UL) /* Safari */
897#define SCHIZO_PCICTRL_MRM_PREF (1UL << 30UL) /* Tomatillo */
898#define SCHIZO_PCICTRL_RDO_PREF (1UL << 29UL) /* Tomatillo */
899#define SCHIZO_PCICTRL_RDL_PREF (1UL << 28UL) /* Tomatillo */
900#define SCHIZO_PCICTRL_PTO (3UL << 24UL) /* Safari/Tomatillo */
901#define SCHIZO_PCICTRL_PTO_SHIFT 24UL
902#define SCHIZO_PCICTRL_TRWSW (7UL << 21UL) /* Tomatillo */
903#define SCHIZO_PCICTRL_F_TGT_A (1UL << 20UL) /* Tomatillo */
904#define SCHIZO_PCICTRL_S_DTO_INT (1UL << 19UL) /* Safari */
905#define SCHIZO_PCICTRL_F_TGT_RT (1UL << 19UL) /* Tomatillo */
906#define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL) /* Safari */
907#define SCHIZO_PCICTRL_T_DTO_INT (1UL << 18UL) /* Tomatillo */
908#define SCHIZO_PCICTRL_EEN (1UL << 17UL) /* Safari/Tomatillo */
909#define SCHIZO_PCICTRL_PARK (1UL << 16UL) /* Safari/Tomatillo */
910#define SCHIZO_PCICTRL_PCIRST (1UL << 8UL) /* Safari */
911#define SCHIZO_PCICTRL_ARB_S (0x3fUL << 0UL) /* Safari */
912#define SCHIZO_PCICTRL_ARB_T (0xffUL << 0UL) /* Tomatillo */
913
914static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
915{
916 unsigned long csr_reg, csr, csr_error_bits;
917 irqreturn_t ret = IRQ_NONE;
918 u16 stat;
919
920 csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
921 csr = schizo_read(csr_reg);
922 csr_error_bits =
923 csr & (SCHIZO_PCICTRL_BUS_UNUS |
924 SCHIZO_PCICTRL_TTO_ERR |
925 SCHIZO_PCICTRL_RTRY_ERR |
926 SCHIZO_PCICTRL_DTO_ERR |
927 SCHIZO_PCICTRL_SBH_ERR |
928 SCHIZO_PCICTRL_SERR);
929 if (csr_error_bits) {
930 /* Clear the errors. */
931 schizo_write(csr_reg, csr);
932
933 /* Log 'em. */
934 if (csr_error_bits & SCHIZO_PCICTRL_BUS_UNUS)
935 printk("%s: Bus unusable error asserted.\n",
936 pbm->name);
937 if (csr_error_bits & SCHIZO_PCICTRL_TTO_ERR)
938 printk("%s: PCI TRDY# timeout error asserted.\n",
939 pbm->name);
940 if (csr_error_bits & SCHIZO_PCICTRL_RTRY_ERR)
941 printk("%s: PCI excessive retry error asserted.\n",
942 pbm->name);
943 if (csr_error_bits & SCHIZO_PCICTRL_DTO_ERR)
944 printk("%s: PCI discard timeout error asserted.\n",
945 pbm->name);
946 if (csr_error_bits & SCHIZO_PCICTRL_SBH_ERR)
947 printk("%s: PCI streaming byte hole error asserted.\n",
948 pbm->name);
949 if (csr_error_bits & SCHIZO_PCICTRL_SERR)
950 printk("%s: PCI SERR signal asserted.\n",
951 pbm->name);
952 ret = IRQ_HANDLED;
953 }
954 pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
955 if (stat & (PCI_STATUS_PARITY |
956 PCI_STATUS_SIG_TARGET_ABORT |
957 PCI_STATUS_REC_TARGET_ABORT |
958 PCI_STATUS_REC_MASTER_ABORT |
959 PCI_STATUS_SIG_SYSTEM_ERROR)) {
960 printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
961 pbm->name, stat);
962 pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
963 ret = IRQ_HANDLED;
964 }
965 return ret;
966}
967
968static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
969{
970 struct pci_pbm_info *pbm = dev_id;
971 struct pci_controller_info *p = pbm->parent;
972 unsigned long afsr_reg, afar_reg, base;
973 unsigned long afsr, afar, error_bits;
974 int reported;
975
976 base = pbm->pbm_regs;
977
978 afsr_reg = base + SCHIZO_PCI_AFSR;
979 afar_reg = base + SCHIZO_PCI_AFAR;
980
981 /* Latch error status. */
982 afar = schizo_read(afar_reg);
983 afsr = schizo_read(afsr_reg);
984
985 /* Clear primary/secondary error status bits. */
986 error_bits = afsr &
987 (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
988 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
989 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
990 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
991 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
992 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS);
993 if (!error_bits)
994 return schizo_pcierr_intr_other(pbm);
995 schizo_write(afsr_reg, error_bits);
996
997 /* Log the error. */
998 printk("%s: PCI Error, primary error type[%s]\n",
999 pbm->name,
1000 (((error_bits & SCHIZO_PCIAFSR_PMA) ?
1001 "Master Abort" :
1002 ((error_bits & SCHIZO_PCIAFSR_PTA) ?
1003 "Target Abort" :
1004 ((error_bits & SCHIZO_PCIAFSR_PRTRY) ?
1005 "Excessive Retries" :
1006 ((error_bits & SCHIZO_PCIAFSR_PPERR) ?
1007 "Parity Error" :
1008 ((error_bits & SCHIZO_PCIAFSR_PTTO) ?
1009 "Timeout" :
1010 ((error_bits & SCHIZO_PCIAFSR_PUNUS) ?
1011 "Bus Unusable" : "???"))))))));
1012 printk("%s: bytemask[%04lx] was_block(%d) space(%s)\n",
1013 pbm->name,
1014 (afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL,
1015 (afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0,
1016 ((afsr & SCHIZO_PCIAFSR_CFG) ?
1017 "Config" :
1018 ((afsr & SCHIZO_PCIAFSR_MEM) ?
1019 "Memory" :
1020 ((afsr & SCHIZO_PCIAFSR_IO) ?
1021 "I/O" : "???"))));
1022 printk("%s: PCI AFAR [%016lx]\n",
1023 pbm->name, afar);
1024 printk("%s: PCI Secondary errors [",
1025 pbm->name);
1026 reported = 0;
1027 if (afsr & SCHIZO_PCIAFSR_SMA) {
1028 reported++;
1029 printk("(Master Abort)");
1030 }
1031 if (afsr & SCHIZO_PCIAFSR_STA) {
1032 reported++;
1033 printk("(Target Abort)");
1034 }
1035 if (afsr & SCHIZO_PCIAFSR_SRTRY) {
1036 reported++;
1037 printk("(Excessive Retries)");
1038 }
1039 if (afsr & SCHIZO_PCIAFSR_SPERR) {
1040 reported++;
1041 printk("(Parity Error)");
1042 }
1043 if (afsr & SCHIZO_PCIAFSR_STTO) {
1044 reported++;
1045 printk("(Timeout)");
1046 }
1047 if (afsr & SCHIZO_PCIAFSR_SUNUS) {
1048 reported++;
1049 printk("(Bus Unusable)");
1050 }
1051 if (!reported)
1052 printk("(none)");
1053 printk("]\n");
1054
1055 /* For the error types shown, scan PBM's PCI bus for devices
1056 * which have logged that error type.
1057 */
1058
1059 /* If we see a Target Abort, this could be the result of an
1060 * IOMMU translation error of some sort. It is extremely
1061 * useful to log this information as usually it indicates
1062 * a bug in the IOMMU support code or a PCI device driver.
1063 */
1064 if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) {
1065 schizo_check_iommu_error(p, PCI_ERR);
1066 pci_scan_for_target_abort(p, pbm, pbm->pci_bus);
1067 }
1068 if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA))
1069 pci_scan_for_master_abort(p, pbm, pbm->pci_bus);
1070
1071 /* For excessive retries, PSYCHO/PBM will abort the device
1072 * and there is no way to specifically check for excessive
1073 * retries in the config space status registers. So what
1074 * we hope is that we'll catch it via the master/target
1075 * abort events.
1076 */
1077
1078 if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR))
1079 pci_scan_for_parity_error(p, pbm, pbm->pci_bus);
1080
1081 schizo_clear_other_err_intr(p, irq);
1082
1083 return IRQ_HANDLED;
1084}
1085
1086#define SCHIZO_SAFARI_ERRLOG 0x10018UL
1087
1088#define SAFARI_ERRLOG_ERROUT 0x8000000000000000UL
1089
1090#define BUS_ERROR_BADCMD 0x4000000000000000UL /* Schizo/Tomatillo */
1091#define BUS_ERROR_SSMDIS 0x2000000000000000UL /* Safari */
1092#define BUS_ERROR_BADMA 0x1000000000000000UL /* Safari */
1093#define BUS_ERROR_BADMB 0x0800000000000000UL /* Safari */
1094#define BUS_ERROR_BADMC 0x0400000000000000UL /* Safari */
1095#define BUS_ERROR_SNOOP_GR 0x0000000000200000UL /* Tomatillo */
1096#define BUS_ERROR_SNOOP_PCI 0x0000000000100000UL /* Tomatillo */
1097#define BUS_ERROR_SNOOP_RD 0x0000000000080000UL /* Tomatillo */
1098#define BUS_ERROR_SNOOP_RDS 0x0000000000020000UL /* Tomatillo */
1099#define BUS_ERROR_SNOOP_RDSA 0x0000000000010000UL /* Tomatillo */
1100#define BUS_ERROR_SNOOP_OWN 0x0000000000008000UL /* Tomatillo */
1101#define BUS_ERROR_SNOOP_RDO 0x0000000000004000UL /* Tomatillo */
1102#define BUS_ERROR_CPU1PS 0x0000000000002000UL /* Safari */
1103#define BUS_ERROR_WDATA_PERR 0x0000000000002000UL /* Tomatillo */
1104#define BUS_ERROR_CPU1PB 0x0000000000001000UL /* Safari */
1105#define BUS_ERROR_CTRL_PERR 0x0000000000001000UL /* Tomatillo */
1106#define BUS_ERROR_CPU0PS 0x0000000000000800UL /* Safari */
1107#define BUS_ERROR_SNOOP_ERR 0x0000000000000800UL /* Tomatillo */
1108#define BUS_ERROR_CPU0PB 0x0000000000000400UL /* Safari */
1109#define BUS_ERROR_JBUS_ILL_B 0x0000000000000400UL /* Tomatillo */
1110#define BUS_ERROR_CIQTO 0x0000000000000200UL /* Safari */
1111#define BUS_ERROR_LPQTO 0x0000000000000100UL /* Safari */
1112#define BUS_ERROR_JBUS_ILL_C 0x0000000000000100UL /* Tomatillo */
1113#define BUS_ERROR_SFPQTO 0x0000000000000080UL /* Safari */
1114#define BUS_ERROR_UFPQTO 0x0000000000000040UL /* Safari */
1115#define BUS_ERROR_RD_PERR 0x0000000000000040UL /* Tomatillo */
1116#define BUS_ERROR_APERR 0x0000000000000020UL /* Safari/Tomatillo */
1117#define BUS_ERROR_UNMAP 0x0000000000000010UL /* Safari/Tomatillo */
1118#define BUS_ERROR_BUSERR 0x0000000000000004UL /* Safari/Tomatillo */
1119#define BUS_ERROR_TIMEOUT 0x0000000000000002UL /* Safari/Tomatillo */
1120#define BUS_ERROR_ILL 0x0000000000000001UL /* Safari */
1121
1122/* We only expect UNMAP errors here. The rest of the Safari errors
1123 * are marked fatal and thus cause a system reset.
1124 */
1125static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id, struct pt_regs *regs)
1126{
1127 struct pci_controller_info *p = dev_id;
1128 u64 errlog;
1129
1130 errlog = schizo_read(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRLOG);
1131 schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRLOG,
1132 errlog & ~(SAFARI_ERRLOG_ERROUT));
1133
1134 if (!(errlog & BUS_ERROR_UNMAP)) {
1135 printk("PCI%d: Unexpected Safari/JBUS error interrupt, errlog[%016lx]\n",
1136 p->index, errlog);
1137
1138 schizo_clear_other_err_intr(p, irq);
1139 return IRQ_HANDLED;
1140 }
1141
1142 printk("PCI%d: Safari/JBUS interrupt, UNMAPPED error, interrogating IOMMUs.\n",
1143 p->index);
1144 schizo_check_iommu_error(p, SAFARI_ERR);
1145
1146 schizo_clear_other_err_intr(p, irq);
1147 return IRQ_HANDLED;
1148}
1149
1150/* Nearly identical to PSYCHO equivalents... */
1151#define SCHIZO_ECC_CTRL 0x10020UL
1152#define SCHIZO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
1153#define SCHIZO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
1154#define SCHIZO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
1155
1156#define SCHIZO_SAFARI_ERRCTRL 0x10008UL
1157#define SCHIZO_SAFERRCTRL_EN 0x8000000000000000UL
1158#define SCHIZO_SAFARI_IRQCTRL 0x10010UL
1159#define SCHIZO_SAFIRQCTRL_EN 0x8000000000000000UL
1160
1161/* How the Tomatillo IRQs are routed around is pure guesswork here.
1162 *
1163 * All the Tomatillo devices I see in prtconf dumps seem to have only
1164 * a single PCI bus unit attached to it. It would seem they are seperate
1165 * devices because their PortID (ie. JBUS ID) values are all different
1166 * and thus the registers are mapped to totally different locations.
1167 *
1168 * However, two Tomatillo's look "similar" in that the only difference
1169 * in their PortID is the lowest bit.
1170 *
1171 * So if we were to ignore this lower bit, it certainly looks like two
1172 * PCI bus units of the same Tomatillo. I still have not really
1173 * figured this out...
1174 */
1175static void __init tomatillo_register_error_handlers(struct pci_controller_info *p)
1176{
1177 struct pci_pbm_info *pbm;
1178 unsigned int irq;
1179 struct ino_bucket *bucket;
1180 u64 tmp, err_mask, err_no_mask;
1181
1182 /* Build IRQs and register handlers. */
1183 pbm = pbm_for_ino(p, SCHIZO_UE_INO);
1184 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO);
1185 if (request_irq(irq, schizo_ue_intr,
1186 SA_SHIRQ, "TOMATILLO UE", p) < 0) {
1187 prom_printf("%s: Cannot register UE interrupt.\n",
1188 pbm->name);
1189 prom_halt();
1190 }
1191 bucket = __bucket(irq);
1192 tmp = upa_readl(bucket->imap);
1193 upa_writel(tmp, (pbm->pbm_regs +
1194 schizo_imap_offset(SCHIZO_UE_INO) + 4));
1195
1196 pbm = pbm_for_ino(p, SCHIZO_CE_INO);
1197 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO);
1198 if (request_irq(irq, schizo_ce_intr,
1199 SA_SHIRQ, "TOMATILLO CE", p) < 0) {
1200 prom_printf("%s: Cannot register CE interrupt.\n",
1201 pbm->name);
1202 prom_halt();
1203 }
1204 bucket = __bucket(irq);
1205 tmp = upa_readl(bucket->imap);
1206 upa_writel(tmp, (pbm->pbm_regs +
1207 schizo_imap_offset(SCHIZO_CE_INO) + 4));
1208
1209 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
1210 irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) |
1211 SCHIZO_PCIERR_A_INO));
1212 if (request_irq(irq, schizo_pcierr_intr,
1213 SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) {
1214 prom_printf("%s: Cannot register PBM A PciERR interrupt.\n",
1215 pbm->name);
1216 prom_halt();
1217 }
1218 bucket = __bucket(irq);
1219 tmp = upa_readl(bucket->imap);
1220 upa_writel(tmp, (pbm->pbm_regs +
1221 schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
1222
1223 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
1224 irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) |
1225 SCHIZO_PCIERR_B_INO));
1226 if (request_irq(irq, schizo_pcierr_intr,
1227 SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) {
1228 prom_printf("%s: Cannot register PBM B PciERR interrupt.\n",
1229 pbm->name);
1230 prom_halt();
1231 }
1232 bucket = __bucket(irq);
1233 tmp = upa_readl(bucket->imap);
1234 upa_writel(tmp, (pbm->pbm_regs +
1235 schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
1236
1237 pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
1238 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO);
1239 if (request_irq(irq, schizo_safarierr_intr,
1240 SA_SHIRQ, "TOMATILLO SERR", p) < 0) {
1241 prom_printf("%s: Cannot register SafariERR interrupt.\n",
1242 pbm->name);
1243 prom_halt();
1244 }
1245 bucket = __bucket(irq);
1246 tmp = upa_readl(bucket->imap);
1247 upa_writel(tmp, (pbm->pbm_regs +
1248 schizo_imap_offset(SCHIZO_SERR_INO) + 4));
1249
1250 /* Enable UE and CE interrupts for controller. */
1251 schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL,
1252 (SCHIZO_ECCCTRL_EE |
1253 SCHIZO_ECCCTRL_UE |
1254 SCHIZO_ECCCTRL_CE));
1255
1256 schizo_write(p->pbm_B.controller_regs + SCHIZO_ECC_CTRL,
1257 (SCHIZO_ECCCTRL_EE |
1258 SCHIZO_ECCCTRL_UE |
1259 SCHIZO_ECCCTRL_CE));
1260
1261 /* Enable PCI Error interrupts and clear error
1262 * bits.
1263 */
1264 err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
1265 SCHIZO_PCICTRL_TTO_ERR |
1266 SCHIZO_PCICTRL_RTRY_ERR |
1267 SCHIZO_PCICTRL_SERR |
1268 SCHIZO_PCICTRL_EEN);
1269
1270 err_no_mask = SCHIZO_PCICTRL_DTO_ERR;
1271
1272 tmp = schizo_read(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL);
1273 tmp |= err_mask;
1274 tmp &= ~err_no_mask;
1275 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL, tmp);
1276
1277 tmp = schizo_read(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL);
1278 tmp |= err_mask;
1279 tmp &= ~err_no_mask;
1280 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL, tmp);
1281
1282 err_mask = (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
1283 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
1284 SCHIZO_PCIAFSR_PTTO |
1285 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
1286 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
1287 SCHIZO_PCIAFSR_STTO);
1288
1289 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_AFSR, err_mask);
1290 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_AFSR, err_mask);
1291
1292 err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SNOOP_GR |
1293 BUS_ERROR_SNOOP_PCI | BUS_ERROR_SNOOP_RD |
1294 BUS_ERROR_SNOOP_RDS | BUS_ERROR_SNOOP_RDSA |
1295 BUS_ERROR_SNOOP_OWN | BUS_ERROR_SNOOP_RDO |
1296 BUS_ERROR_WDATA_PERR | BUS_ERROR_CTRL_PERR |
1297 BUS_ERROR_SNOOP_ERR | BUS_ERROR_JBUS_ILL_B |
1298 BUS_ERROR_JBUS_ILL_C | BUS_ERROR_RD_PERR |
1299 BUS_ERROR_APERR | BUS_ERROR_UNMAP |
1300 BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT);
1301
1302 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_ERRCTRL,
1303 (SCHIZO_SAFERRCTRL_EN | err_mask));
1304 schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRCTRL,
1305 (SCHIZO_SAFERRCTRL_EN | err_mask));
1306
1307 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_IRQCTRL,
1308 (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
1309 schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_IRQCTRL,
1310 (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
1311}
1312
1313static void __init schizo_register_error_handlers(struct pci_controller_info *p)
1314{
1315 struct pci_pbm_info *pbm;
1316 unsigned int irq;
1317 struct ino_bucket *bucket;
1318 u64 tmp, err_mask, err_no_mask;
1319
1320 /* Build IRQs and register handlers. */
1321 pbm = pbm_for_ino(p, SCHIZO_UE_INO);
1322 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO);
1323 if (request_irq(irq, schizo_ue_intr,
1324 SA_SHIRQ, "SCHIZO UE", p) < 0) {
1325 prom_printf("%s: Cannot register UE interrupt.\n",
1326 pbm->name);
1327 prom_halt();
1328 }
1329 bucket = __bucket(irq);
1330 tmp = upa_readl(bucket->imap);
1331 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4));
1332
1333 pbm = pbm_for_ino(p, SCHIZO_CE_INO);
1334 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO);
1335 if (request_irq(irq, schizo_ce_intr,
1336 SA_SHIRQ, "SCHIZO CE", p) < 0) {
1337 prom_printf("%s: Cannot register CE interrupt.\n",
1338 pbm->name);
1339 prom_halt();
1340 }
1341 bucket = __bucket(irq);
1342 tmp = upa_readl(bucket->imap);
1343 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4));
1344
1345 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
1346 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO);
1347 if (request_irq(irq, schizo_pcierr_intr,
1348 SA_SHIRQ, "SCHIZO PCIERR", pbm) < 0) {
1349 prom_printf("%s: Cannot register PBM A PciERR interrupt.\n",
1350 pbm->name);
1351 prom_halt();
1352 }
1353 bucket = __bucket(irq);
1354 tmp = upa_readl(bucket->imap);
1355 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
1356
1357 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
1358 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO);
1359 if (request_irq(irq, schizo_pcierr_intr,
1360 SA_SHIRQ, "SCHIZO PCIERR", &p->pbm_B) < 0) {
1361 prom_printf("%s: Cannot register PBM B PciERR interrupt.\n",
1362 pbm->name);
1363 prom_halt();
1364 }
1365 bucket = __bucket(irq);
1366 tmp = upa_readl(bucket->imap);
1367 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
1368
1369 pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
1370 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO);
1371 if (request_irq(irq, schizo_safarierr_intr,
1372 SA_SHIRQ, "SCHIZO SERR", p) < 0) {
1373 prom_printf("%s: Cannot register SafariERR interrupt.\n",
1374 pbm->name);
1375 prom_halt();
1376 }
1377 bucket = __bucket(irq);
1378 tmp = upa_readl(bucket->imap);
1379 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4));
1380
1381 /* Enable UE and CE interrupts for controller. */
1382 schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL,
1383 (SCHIZO_ECCCTRL_EE |
1384 SCHIZO_ECCCTRL_UE |
1385 SCHIZO_ECCCTRL_CE));
1386
1387 err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
1388 SCHIZO_PCICTRL_ESLCK |
1389 SCHIZO_PCICTRL_TTO_ERR |
1390 SCHIZO_PCICTRL_RTRY_ERR |
1391 SCHIZO_PCICTRL_SBH_ERR |
1392 SCHIZO_PCICTRL_SERR |
1393 SCHIZO_PCICTRL_EEN);
1394
1395 err_no_mask = (SCHIZO_PCICTRL_DTO_ERR |
1396 SCHIZO_PCICTRL_SBH_INT);
1397
1398 /* Enable PCI Error interrupts and clear error
1399 * bits for each PBM.
1400 */
1401 tmp = schizo_read(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL);
1402 tmp |= err_mask;
1403 tmp &= ~err_no_mask;
1404 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL, tmp);
1405
1406 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_AFSR,
1407 (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
1408 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
1409 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
1410 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
1411 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
1412 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS));
1413
1414 tmp = schizo_read(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL);
1415 tmp |= err_mask;
1416 tmp &= ~err_no_mask;
1417 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL, tmp);
1418
1419 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_AFSR,
1420 (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
1421 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
1422 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
1423 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
1424 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
1425 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS));
1426
1427 /* Make all Safari error conditions fatal except unmapped
1428 * errors which we make generate interrupts.
1429 */
1430 err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SSMDIS |
1431 BUS_ERROR_BADMA | BUS_ERROR_BADMB |
1432 BUS_ERROR_BADMC |
1433 BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
1434 BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB |
1435 BUS_ERROR_CIQTO |
1436 BUS_ERROR_LPQTO | BUS_ERROR_SFPQTO |
1437 BUS_ERROR_UFPQTO | BUS_ERROR_APERR |
1438 BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT |
1439 BUS_ERROR_ILL);
1440#if 1
1441 /* XXX Something wrong with some Excalibur systems
1442 * XXX Sun is shipping. The behavior on a 2-cpu
1443 * XXX machine is that both CPU1 parity error bits
1444 * XXX are set and are immediately set again when
1445 * XXX their error status bits are cleared. Just
1446 * XXX ignore them for now. -DaveM
1447 */
1448 err_mask &= ~(BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
1449 BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB);
1450#endif
1451
1452 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_ERRCTRL,
1453 (SCHIZO_SAFERRCTRL_EN | err_mask));
1454
1455 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_IRQCTRL,
1456 (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
1457}
1458
1459static void __init pbm_config_busmastering(struct pci_pbm_info *pbm)
1460{
1461 u8 *addr;
1462
1463 /* Set cache-line size to 64 bytes, this is actually
1464 * a nop but I do it for completeness.
1465 */
1466 addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
1467 0, PCI_CACHE_LINE_SIZE);
1468 pci_config_write8(addr, 64 / sizeof(u32));
1469
1470 /* Set PBM latency timer to 64 PCI clocks. */
1471 addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
1472 0, PCI_LATENCY_TIMER);
1473 pci_config_write8(addr, 64);
1474}
1475
1476static void __init pbm_scan_bus(struct pci_controller_info *p,
1477 struct pci_pbm_info *pbm)
1478{
1479 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
1480
1481 if (!cookie) {
1482 prom_printf("%s: Critical allocation failure.\n", pbm->name);
1483 prom_halt();
1484 }
1485
1486 /* All we care about is the PBM. */
1487 memset(cookie, 0, sizeof(*cookie));
1488 cookie->pbm = pbm;
1489
1490 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
1491 p->pci_ops,
1492 pbm);
1493 pci_fixup_host_bridge_self(pbm->pci_bus);
1494 pbm->pci_bus->self->sysdata = cookie;
1495
1496 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
1497 pci_record_assignments(pbm, pbm->pci_bus);
1498 pci_assign_unassigned(pbm, pbm->pci_bus);
1499 pci_fixup_irq(pbm, pbm->pci_bus);
1500 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
1501 pci_setup_busmastering(pbm, pbm->pci_bus);
1502}
1503
1504static void __init __schizo_scan_bus(struct pci_controller_info *p,
1505 int chip_type)
1506{
1507 if (!p->pbm_B.prom_node || !p->pbm_A.prom_node) {
1508 printk("PCI: Only one PCI bus module of controller found.\n");
1509 printk("PCI: Ignoring entire controller.\n");
1510 return;
1511 }
1512
1513 pbm_config_busmastering(&p->pbm_B);
1514 p->pbm_B.is_66mhz_capable =
1515 prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
1516 pbm_config_busmastering(&p->pbm_A);
1517 p->pbm_A.is_66mhz_capable =
1518 prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
1519 pbm_scan_bus(p, &p->pbm_B);
1520 pbm_scan_bus(p, &p->pbm_A);
1521
1522 /* After the PCI bus scan is complete, we can register
1523 * the error interrupt handlers.
1524 */
1525 if (chip_type == PBM_CHIP_TYPE_TOMATILLO)
1526 tomatillo_register_error_handlers(p);
1527 else
1528 schizo_register_error_handlers(p);
1529}
1530
1531static void __init schizo_scan_bus(struct pci_controller_info *p)
1532{
1533 __schizo_scan_bus(p, PBM_CHIP_TYPE_SCHIZO);
1534}
1535
1536static void __init tomatillo_scan_bus(struct pci_controller_info *p)
1537{
1538 __schizo_scan_bus(p, PBM_CHIP_TYPE_TOMATILLO);
1539}
1540
1541static void __init schizo_base_address_update(struct pci_dev *pdev, int resource)
1542{
1543 struct pcidev_cookie *pcp = pdev->sysdata;
1544 struct pci_pbm_info *pbm = pcp->pbm;
1545 struct resource *res, *root;
1546 u32 reg;
1547 int where, size, is_64bit;
1548
1549 res = &pdev->resource[resource];
1550 if (resource < 6) {
1551 where = PCI_BASE_ADDRESS_0 + (resource * 4);
1552 } else if (resource == PCI_ROM_RESOURCE) {
1553 where = pdev->rom_base_reg;
1554 } else {
1555 /* Somebody might have asked allocation of a non-standard resource */
1556 return;
1557 }
1558
1559 is_64bit = 0;
1560 if (res->flags & IORESOURCE_IO)
1561 root = &pbm->io_space;
1562 else {
1563 root = &pbm->mem_space;
1564 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
1565 == PCI_BASE_ADDRESS_MEM_TYPE_64)
1566 is_64bit = 1;
1567 }
1568
1569 size = res->end - res->start;
1570 pci_read_config_dword(pdev, where, &reg);
1571 reg = ((reg & size) |
1572 (((u32)(res->start - root->start)) & ~size));
1573 if (resource == PCI_ROM_RESOURCE) {
1574 reg |= PCI_ROM_ADDRESS_ENABLE;
1575 res->flags |= IORESOURCE_ROM_ENABLE;
1576 }
1577 pci_write_config_dword(pdev, where, reg);
1578
1579 /* This knows that the upper 32-bits of the address
1580 * must be zero. Our PCI common layer enforces this.
1581 */
1582 if (is_64bit)
1583 pci_write_config_dword(pdev, where + 4, 0);
1584}
1585
1586static void __init schizo_resource_adjust(struct pci_dev *pdev,
1587 struct resource *res,
1588 struct resource *root)
1589{
1590 res->start += root->start;
1591 res->end += root->start;
1592}
1593
1594/* Use ranges property to determine where PCI MEM, I/O, and Config
1595 * space are for this PCI bus module.
1596 */
1597static void schizo_determine_mem_io_space(struct pci_pbm_info *pbm)
1598{
1599 int i, saw_cfg, saw_mem, saw_io;
1600
1601 saw_cfg = saw_mem = saw_io = 0;
1602 for (i = 0; i < pbm->num_pbm_ranges; i++) {
1603 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
1604 unsigned long a;
1605 int type;
1606
1607 type = (pr->child_phys_hi >> 24) & 0x3;
1608 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
1609 ((unsigned long)pr->parent_phys_lo << 0UL));
1610
1611 switch (type) {
1612 case 0:
1613 /* PCI config space, 16MB */
1614 pbm->config_space = a;
1615 saw_cfg = 1;
1616 break;
1617
1618 case 1:
1619 /* 16-bit IO space, 16MB */
1620 pbm->io_space.start = a;
1621 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
1622 pbm->io_space.flags = IORESOURCE_IO;
1623 saw_io = 1;
1624 break;
1625
1626 case 2:
1627 /* 32-bit MEM space, 2GB */
1628 pbm->mem_space.start = a;
1629 pbm->mem_space.end = a + (0x80000000UL - 1UL);
1630 pbm->mem_space.flags = IORESOURCE_MEM;
1631 saw_mem = 1;
1632 break;
1633
1634 default:
1635 break;
1636 };
1637 }
1638
1639 if (!saw_cfg || !saw_io || !saw_mem) {
1640 prom_printf("%s: Fatal error, missing %s PBM range.\n",
1641 pbm->name,
1642 ((!saw_cfg ?
1643 "CFG" :
1644 (!saw_io ?
1645 "IO" : "MEM"))));
1646 prom_halt();
1647 }
1648
1649 printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n",
1650 pbm->name,
1651 pbm->config_space,
1652 pbm->io_space.start,
1653 pbm->mem_space.start);
1654}
1655
1656static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
1657 struct pci_pbm_info *pbm)
1658{
1659 pbm->io_space.name = pbm->mem_space.name = pbm->name;
1660
1661 request_resource(&ioport_resource, &pbm->io_space);
1662 request_resource(&iomem_resource, &pbm->mem_space);
1663 pci_register_legacy_regions(&pbm->io_space,
1664 &pbm->mem_space);
1665}
1666
1667#define SCHIZO_STRBUF_CONTROL (0x02800UL)
1668#define SCHIZO_STRBUF_FLUSH (0x02808UL)
1669#define SCHIZO_STRBUF_FSYNC (0x02810UL)
1670#define SCHIZO_STRBUF_CTXFLUSH (0x02818UL)
1671#define SCHIZO_STRBUF_CTXMATCH (0x10000UL)
1672
1673static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
1674{
1675 unsigned long base = pbm->pbm_regs;
1676 u64 control;
1677
1678 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
1679 /* TOMATILLO lacks streaming cache. */
1680 return;
1681 }
1682
1683 /* SCHIZO has context flushing. */
1684 pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL;
1685 pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH;
1686 pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC;
1687 pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH;
1688 pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH;
1689
1690 pbm->stc.strbuf_flushflag = (volatile unsigned long *)
1691 ((((unsigned long)&pbm->stc.__flushflag_buf[0])
1692 + 63UL)
1693 & ~63UL);
1694 pbm->stc.strbuf_flushflag_pa = (unsigned long)
1695 __pa(pbm->stc.strbuf_flushflag);
1696
1697 /* Turn off LRU locking and diag mode, enable the
1698 * streaming buffer and leave the rerun-disable
1699 * setting however OBP set it.
1700 */
1701 control = schizo_read(pbm->stc.strbuf_control);
1702 control &= ~(SCHIZO_STRBUF_CTRL_LPTR |
1703 SCHIZO_STRBUF_CTRL_LENAB |
1704 SCHIZO_STRBUF_CTRL_DENAB);
1705 control |= SCHIZO_STRBUF_CTRL_ENAB;
1706 schizo_write(pbm->stc.strbuf_control, control);
1707
1708 pbm->stc.strbuf_enabled = 1;
1709}
1710
1711#define SCHIZO_IOMMU_CONTROL (0x00200UL)
1712#define SCHIZO_IOMMU_TSBBASE (0x00208UL)
1713#define SCHIZO_IOMMU_FLUSH (0x00210UL)
1714#define SCHIZO_IOMMU_CTXFLUSH (0x00218UL)
1715
1716static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1717{
1718 struct pci_iommu *iommu = pbm->iommu;
1719 unsigned long tsbbase, i, tagbase, database, order;
1720 u32 vdma[2], dma_mask;
1721 u64 control;
1722 int err, tsbsize;
1723
1724 err = prom_getproperty(pbm->prom_node, "virtual-dma",
1725 (char *)&vdma[0], sizeof(vdma));
1726 if (err == 0 || err == -1) {
1727 /* No property, use default values. */
1728 vdma[0] = 0xc0000000;
1729 vdma[1] = 0x40000000;
1730 }
1731
1732 dma_mask = vdma[0];
1733 switch (vdma[1]) {
1734 case 0x20000000:
1735 dma_mask |= 0x1fffffff;
1736 tsbsize = 64;
1737 break;
1738
1739 case 0x40000000:
1740 dma_mask |= 0x3fffffff;
1741 tsbsize = 128;
1742 break;
1743
1744 case 0x80000000:
1745 dma_mask |= 0x7fffffff;
1746 tsbsize = 128;
1747 break;
1748
1749 default:
1750 prom_printf("SCHIZO: strange virtual-dma size.\n");
1751 prom_halt();
1752 };
1753
1754 /* Setup initial software IOMMU state. */
1755 spin_lock_init(&iommu->lock);
1756 iommu->iommu_cur_ctx = 0;
1757
1758 /* Register addresses, SCHIZO has iommu ctx flushing. */
1759 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
1760 iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
1761 iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH;
1762 iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH;
1763
1764 /* We use the main control/status register of SCHIZO as the write
1765 * completion register.
1766 */
1767 iommu->write_complete_reg = pbm->controller_regs + 0x10000UL;
1768
1769 /*
1770 * Invalidate TLB Entries.
1771 */
1772 control = schizo_read(iommu->iommu_control);
1773 control |= SCHIZO_IOMMU_CTRL_DENAB;
1774 schizo_write(iommu->iommu_control, control);
1775
1776 tagbase = SCHIZO_IOMMU_TAG, database = SCHIZO_IOMMU_DATA;
1777
1778 for(i = 0; i < 16; i++) {
1779 schizo_write(pbm->pbm_regs + tagbase + (i * 8UL), 0);
1780 schizo_write(pbm->pbm_regs + database + (i * 8UL), 0);
1781 }
1782
1783 /* Leave diag mode enabled for full-flushing done
1784 * in pci_iommu.c
1785 */
1786
1787 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
1788 if (!iommu->dummy_page) {
1789 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1790 prom_halt();
1791 }
1792 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1793 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1794
1795 /* Using assumed page size 8K with 128K entries we need 1MB iommu page
1796 * table (128K ioptes * 8 bytes per iopte). This is
1797 * page order 7 on UltraSparc.
1798 */
1799 order = get_order(tsbsize * 8 * 1024);
1800 tsbbase = __get_free_pages(GFP_KERNEL, order);
1801 if (!tsbbase) {
1802 prom_printf("%s: Error, gfp(tsb) failed.\n", pbm->name);
1803 prom_halt();
1804 }
1805
1806 iommu->page_table = (iopte_t *)tsbbase;
1807 iommu->page_table_map_base = vdma[0];
1808 iommu->dma_addr_mask = dma_mask;
1809 pci_iommu_table_init(iommu, PAGE_SIZE << order);
1810
1811 switch (tsbsize) {
1812 case 64:
1813 iommu->page_table_sz_bits = 16;
1814 break;
1815
1816 case 128:
1817 iommu->page_table_sz_bits = 17;
1818 break;
1819
1820 default:
1821 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
1822 prom_halt();
1823 break;
1824 };
1825
1826 /* We start with no consistent mappings. */
1827 iommu->lowest_consistent_map =
1828 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1829
1830 for (i = 0; i < PBM_NCLUSTERS; i++) {
1831 iommu->alloc_info[i].flush = 0;
1832 iommu->alloc_info[i].next = 0;
1833 }
1834
1835 schizo_write(iommu->iommu_tsbbase, __pa(tsbbase));
1836
1837 control = schizo_read(iommu->iommu_control);
1838 control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
1839 switch (tsbsize) {
1840 case 64:
1841 control |= SCHIZO_IOMMU_TSBSZ_64K;
1842 break;
1843 case 128:
1844 control |= SCHIZO_IOMMU_TSBSZ_128K;
1845 break;
1846 };
1847
1848 control |= SCHIZO_IOMMU_CTRL_ENAB;
1849 schizo_write(iommu->iommu_control, control);
1850}
1851
1852#define SCHIZO_PCI_IRQ_RETRY (0x1a00UL)
1853#define SCHIZO_IRQ_RETRY_INF 0xffUL
1854
1855#define SCHIZO_PCI_DIAG (0x2020UL)
1856#define SCHIZO_PCIDIAG_D_BADECC (1UL << 10UL) /* Disable BAD ECC errors (Schizo) */
1857#define SCHIZO_PCIDIAG_D_BYPASS (1UL << 9UL) /* Disable MMU bypass mode (Schizo/Tomatillo) */
1858#define SCHIZO_PCIDIAG_D_TTO (1UL << 8UL) /* Disable TTO errors (Schizo/Tomatillo) */
1859#define SCHIZO_PCIDIAG_D_RTRYARB (1UL << 7UL) /* Disable retry arbitration (Schizo) */
1860#define SCHIZO_PCIDIAG_D_RETRY (1UL << 6UL) /* Disable retry limit (Schizo/Tomatillo) */
1861#define SCHIZO_PCIDIAG_D_INTSYNC (1UL << 5UL) /* Disable interrupt/DMA synch (Schizo/Tomatillo) */
1862#define SCHIZO_PCIDIAG_I_DMA_PARITY (1UL << 3UL) /* Invert DMA parity (Schizo/Tomatillo) */
1863#define SCHIZO_PCIDIAG_I_PIOD_PARITY (1UL << 2UL) /* Invert PIO data parity (Schizo/Tomatillo) */
1864#define SCHIZO_PCIDIAG_I_PIOA_PARITY (1UL << 1UL) /* Invert PIO address parity (Schizo/Tomatillo) */
1865
1866#define TOMATILLO_PCI_IOC_CSR (0x2248UL)
1867#define TOMATILLO_IOC_PART_WPENAB 0x0000000000080000UL
1868#define TOMATILLO_IOC_RDMULT_PENAB 0x0000000000040000UL
1869#define TOMATILLO_IOC_RDONE_PENAB 0x0000000000020000UL
1870#define TOMATILLO_IOC_RDLINE_PENAB 0x0000000000010000UL
1871#define TOMATILLO_IOC_RDMULT_PLEN 0x000000000000c000UL
1872#define TOMATILLO_IOC_RDMULT_PLEN_SHIFT 14UL
1873#define TOMATILLO_IOC_RDONE_PLEN 0x0000000000003000UL
1874#define TOMATILLO_IOC_RDONE_PLEN_SHIFT 12UL
1875#define TOMATILLO_IOC_RDLINE_PLEN 0x0000000000000c00UL
1876#define TOMATILLO_IOC_RDLINE_PLEN_SHIFT 10UL
1877#define TOMATILLO_IOC_PREF_OFF 0x00000000000003f8UL
1878#define TOMATILLO_IOC_PREF_OFF_SHIFT 3UL
1879#define TOMATILLO_IOC_RDMULT_CPENAB 0x0000000000000004UL
1880#define TOMATILLO_IOC_RDONE_CPENAB 0x0000000000000002UL
1881#define TOMATILLO_IOC_RDLINE_CPENAB 0x0000000000000001UL
1882
1883#define TOMATILLO_PCI_IOC_TDIAG (0x2250UL)
1884#define TOMATILLO_PCI_IOC_DDIAG (0x2290UL)
1885
1886static void __init schizo_pbm_hw_init(struct pci_pbm_info *pbm)
1887{
1888 u64 tmp;
1889
1890 /* Set IRQ retry to infinity. */
1891 schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY,
1892 SCHIZO_IRQ_RETRY_INF);
1893
1894 /* Enable arbiter for all PCI slots. Also, disable PCI interval
1895 * timer so that DTO (Discard TimeOuts) are not reported because
1896 * some Schizo revisions report them erroneously.
1897 */
1898 tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL);
1899 if (pbm->chip_type == PBM_CHIP_TYPE_SCHIZO_PLUS &&
1900 pbm->chip_version == 0x5 &&
1901 pbm->chip_revision == 0x1)
1902 tmp |= 0x0f;
1903 else
1904 tmp |= 0xff;
1905
1906 tmp &= ~SCHIZO_PCICTRL_PTO;
1907 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
1908 pbm->chip_version >= 0x2)
1909 tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT;
1910 else
1911 tmp |= 0x1UL << SCHIZO_PCICTRL_PTO_SHIFT;
1912
1913 if (!prom_getbool(pbm->prom_node, "no-bus-parking"))
1914 tmp |= SCHIZO_PCICTRL_PARK;
1915
1916 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
1917 pbm->chip_version <= 0x1)
1918 tmp |= (1UL << 61);
1919 else
1920 tmp &= ~(1UL << 61);
1921
1922 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO)
1923 tmp |= (SCHIZO_PCICTRL_MRM_PREF |
1924 SCHIZO_PCICTRL_RDO_PREF |
1925 SCHIZO_PCICTRL_RDL_PREF);
1926
1927 schizo_write(pbm->pbm_regs + SCHIZO_PCI_CTRL, tmp);
1928
1929 tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_DIAG);
1930 tmp &= ~(SCHIZO_PCIDIAG_D_RTRYARB |
1931 SCHIZO_PCIDIAG_D_RETRY |
1932 SCHIZO_PCIDIAG_D_INTSYNC);
1933 schizo_write(pbm->pbm_regs + SCHIZO_PCI_DIAG, tmp);
1934
1935 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
1936 /* Clear prefetch lengths to workaround a bug in
1937 * Jalapeno...
1938 */
1939 tmp = (TOMATILLO_IOC_PART_WPENAB |
1940 (1 << TOMATILLO_IOC_PREF_OFF_SHIFT) |
1941 TOMATILLO_IOC_RDMULT_CPENAB |
1942 TOMATILLO_IOC_RDONE_CPENAB |
1943 TOMATILLO_IOC_RDLINE_CPENAB);
1944
1945 schizo_write(pbm->pbm_regs + TOMATILLO_PCI_IOC_CSR,
1946 tmp);
1947 }
1948}
1949
1950static void __init schizo_pbm_init(struct pci_controller_info *p,
1951 int prom_node, u32 portid,
1952 int chip_type)
1953{
1954 struct linux_prom64_registers pr_regs[4];
1955 unsigned int busrange[2];
1956 struct pci_pbm_info *pbm;
1957 const char *chipset_name;
1958 u32 ino_bitmap[2];
1959 int is_pbm_a;
1960 int err;
1961
1962 switch (chip_type) {
1963 case PBM_CHIP_TYPE_TOMATILLO:
1964 chipset_name = "TOMATILLO";
1965 break;
1966
1967 case PBM_CHIP_TYPE_SCHIZO_PLUS:
1968 chipset_name = "SCHIZO+";
1969 break;
1970
1971 case PBM_CHIP_TYPE_SCHIZO:
1972 default:
1973 chipset_name = "SCHIZO";
1974 break;
1975 };
1976
1977 /* For SCHIZO, three OBP regs:
1978 * 1) PBM controller regs
1979 * 2) Schizo front-end controller regs (same for both PBMs)
1980 * 3) PBM PCI config space
1981 *
1982 * For TOMATILLO, four OBP regs:
1983 * 1) PBM controller regs
1984 * 2) Tomatillo front-end controller regs
1985 * 3) PBM PCI config space
1986 * 4) Ichip regs
1987 */
1988 err = prom_getproperty(prom_node, "reg",
1989 (char *)&pr_regs[0],
1990 sizeof(pr_regs));
1991 if (err == 0 || err == -1) {
1992 prom_printf("%s: Fatal error, no reg property.\n",
1993 chipset_name);
1994 prom_halt();
1995 }
1996
1997 is_pbm_a = ((pr_regs[0].phys_addr & 0x00700000) == 0x00600000);
1998
1999 if (is_pbm_a)
2000 pbm = &p->pbm_A;
2001 else
2002 pbm = &p->pbm_B;
2003
2004 pbm->portid = portid;
2005 pbm->parent = p;
2006 pbm->prom_node = prom_node;
2007 pbm->pci_first_slot = 1;
2008
2009 pbm->chip_type = chip_type;
2010 pbm->chip_version =
2011 prom_getintdefault(prom_node, "version#", 0);
2012 pbm->chip_revision =
2013 prom_getintdefault(prom_node, "module-revision#", 0);
2014
2015 pbm->pbm_regs = pr_regs[0].phys_addr;
2016 pbm->controller_regs = pr_regs[1].phys_addr - 0x10000UL;
2017
2018 sprintf(pbm->name,
2019 (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
2020 "TOMATILLO%d PBM%c" :
2021 "SCHIZO%d PBM%c"),
2022 p->index,
2023 (pbm == &p->pbm_A ? 'A' : 'B'));
2024
2025 printk("%s: ver[%x:%x], portid %x, "
2026 "cregs[%lx] pregs[%lx]\n",
2027 pbm->name,
2028 pbm->chip_version, pbm->chip_revision,
2029 pbm->portid,
2030 pbm->controller_regs,
2031 pbm->pbm_regs);
2032
2033 schizo_pbm_hw_init(pbm);
2034
2035 prom_getstring(prom_node, "name",
2036 pbm->prom_name,
2037 sizeof(pbm->prom_name));
2038
2039 err = prom_getproperty(prom_node, "ranges",
2040 (char *) pbm->pbm_ranges,
2041 sizeof(pbm->pbm_ranges));
2042 if (err == 0 || err == -1) {
2043 prom_printf("%s: Fatal error, no ranges property.\n",
2044 pbm->name);
2045 prom_halt();
2046 }
2047
2048 pbm->num_pbm_ranges =
2049 (err / sizeof(struct linux_prom_pci_ranges));
2050
2051 schizo_determine_mem_io_space(pbm);
2052 pbm_register_toplevel_resources(p, pbm);
2053
2054 err = prom_getproperty(prom_node, "interrupt-map",
2055 (char *)pbm->pbm_intmap,
2056 sizeof(pbm->pbm_intmap));
2057 if (err != -1) {
2058 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
2059 err = prom_getproperty(prom_node, "interrupt-map-mask",
2060 (char *)&pbm->pbm_intmask,
2061 sizeof(pbm->pbm_intmask));
2062 if (err == -1) {
2063 prom_printf("%s: Fatal error, no "
2064 "interrupt-map-mask.\n", pbm->name);
2065 prom_halt();
2066 }
2067 } else {
2068 pbm->num_pbm_intmap = 0;
2069 memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
2070 }
2071
2072 err = prom_getproperty(prom_node, "ino-bitmap",
2073 (char *) &ino_bitmap[0],
2074 sizeof(ino_bitmap));
2075 if (err == 0 || err == -1) {
2076 prom_printf("%s: Fatal error, no ino-bitmap.\n", pbm->name);
2077 prom_halt();
2078 }
2079 pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) |
2080 ((u64)ino_bitmap[0] << 0UL));
2081
2082 err = prom_getproperty(prom_node, "bus-range",
2083 (char *)&busrange[0],
2084 sizeof(busrange));
2085 if (err == 0 || err == -1) {
2086 prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
2087 prom_halt();
2088 }
2089 pbm->pci_first_busno = busrange[0];
2090 pbm->pci_last_busno = busrange[1];
2091
2092 schizo_pbm_iommu_init(pbm);
2093 schizo_pbm_strbuf_init(pbm);
2094}
2095
2096static inline int portid_compare(u32 x, u32 y, int chip_type)
2097{
2098 if (chip_type == PBM_CHIP_TYPE_TOMATILLO) {
2099 if (x == (y ^ 1))
2100 return 1;
2101 return 0;
2102 }
2103 return (x == y);
2104}
2105
2106static void __init __schizo_init(int node, char *model_name, int chip_type)
2107{
2108 struct pci_controller_info *p;
2109 struct pci_iommu *iommu;
2110 int is_pbm_a;
2111 u32 portid;
2112
2113 portid = prom_getintdefault(node, "portid", 0xff);
2114
2115 for(p = pci_controller_root; p; p = p->next) {
2116 struct pci_pbm_info *pbm;
2117
2118 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
2119 continue;
2120
2121 pbm = (p->pbm_A.prom_node ?
2122 &p->pbm_A :
2123 &p->pbm_B);
2124
2125 if (portid_compare(pbm->portid, portid, chip_type)) {
2126 is_pbm_a = (p->pbm_A.prom_node == 0);
2127 schizo_pbm_init(p, node, portid, chip_type);
2128 return;
2129 }
2130 }
2131
2132 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
2133 if (!p) {
2134 prom_printf("SCHIZO: Fatal memory allocation error.\n");
2135 prom_halt();
2136 }
2137 memset(p, 0, sizeof(*p));
2138
2139 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
2140 if (!iommu) {
2141 prom_printf("SCHIZO: Fatal memory allocation error.\n");
2142 prom_halt();
2143 }
2144 memset(iommu, 0, sizeof(*iommu));
2145 p->pbm_A.iommu = iommu;
2146
2147 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
2148 if (!iommu) {
2149 prom_printf("SCHIZO: Fatal memory allocation error.\n");
2150 prom_halt();
2151 }
2152 memset(iommu, 0, sizeof(*iommu));
2153 p->pbm_B.iommu = iommu;
2154
2155 p->next = pci_controller_root;
2156 pci_controller_root = p;
2157
2158 p->index = pci_num_controllers++;
2159 p->pbms_same_domain = 0;
2160 p->scan_bus = (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
2161 tomatillo_scan_bus :
2162 schizo_scan_bus);
2163 p->irq_build = schizo_irq_build;
2164 p->base_address_update = schizo_base_address_update;
2165 p->resource_adjust = schizo_resource_adjust;
2166 p->pci_ops = &schizo_ops;
2167
2168 /* Like PSYCHO we have a 2GB aligned area for memory space. */
2169 pci_memspace_mask = 0x7fffffffUL;
2170
2171 schizo_pbm_init(p, node, portid, chip_type);
2172}
2173
2174void __init schizo_init(int node, char *model_name)
2175{
2176 __schizo_init(node, model_name, PBM_CHIP_TYPE_SCHIZO);
2177}
2178
2179void __init schizo_plus_init(int node, char *model_name)
2180{
2181 __schizo_init(node, model_name, PBM_CHIP_TYPE_SCHIZO_PLUS);
2182}
2183
2184void __init tomatillo_init(int node, char *model_name)
2185{
2186 __schizo_init(node, model_name, PBM_CHIP_TYPE_TOMATILLO);
2187}
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
new file mode 100644
index 000000000000..52f14e399b1c
--- /dev/null
+++ b/arch/sparc64/kernel/power.c
@@ -0,0 +1,150 @@
1/* $Id: power.c,v 1.10 2001/12/11 01:57:16 davem Exp $
2 * power.c: Power management driver.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/sched.h>
12#include <linux/signal.h>
13#include <linux/delay.h>
14#include <linux/interrupt.h>
15
16#include <asm/system.h>
17#include <asm/ebus.h>
18#include <asm/auxio.h>
19
20#define __KERNEL_SYSCALLS__
21#include <linux/unistd.h>
22
23/*
24 * sysctl - toggle power-off restriction for serial console
25 * systems in machine_power_off()
26 */
27int scons_pwroff = 1;
28
29#ifdef CONFIG_PCI
30static void __iomem *power_reg;
31
32static DECLARE_WAIT_QUEUE_HEAD(powerd_wait);
33static int button_pressed;
34
35static irqreturn_t power_handler(int irq, void *dev_id, struct pt_regs *regs)
36{
37 if (button_pressed == 0) {
38 button_pressed = 1;
39 wake_up(&powerd_wait);
40 }
41
42 /* FIXME: Check registers for status... */
43 return IRQ_HANDLED;
44}
45#endif /* CONFIG_PCI */
46
47extern void machine_halt(void);
48extern void machine_alt_power_off(void);
49static void (*poweroff_method)(void) = machine_alt_power_off;
50
51void machine_power_off(void)
52{
53 if (!serial_console || scons_pwroff) {
54#ifdef CONFIG_PCI
55 if (power_reg) {
56 /* Both register bits seem to have the
57 * same effect, so until I figure out
58 * what the difference is...
59 */
60 writel(AUXIO_PCIO_CPWR_OFF | AUXIO_PCIO_SPWR_OFF, power_reg);
61 } else
62#endif /* CONFIG_PCI */
63 if (poweroff_method != NULL) {
64 poweroff_method();
65 /* not reached */
66 }
67 }
68 machine_halt();
69}
70
71EXPORT_SYMBOL(machine_power_off);
72
73#ifdef CONFIG_PCI
74static int powerd(void *__unused)
75{
76 static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
77 char *argv[] = { "/sbin/shutdown", "-h", "now", NULL };
78 DECLARE_WAITQUEUE(wait, current);
79
80 daemonize("powerd");
81
82 add_wait_queue(&powerd_wait, &wait);
83again:
84 for (;;) {
85 set_task_state(current, TASK_INTERRUPTIBLE);
86 if (button_pressed)
87 break;
88 flush_signals(current);
89 schedule();
90 }
91 __set_current_state(TASK_RUNNING);
92 remove_wait_queue(&powerd_wait, &wait);
93
94 /* Ok, down we go... */
95 button_pressed = 0;
96 if (execve("/sbin/shutdown", argv, envp) < 0) {
97 printk("powerd: shutdown execution failed\n");
98 add_wait_queue(&powerd_wait, &wait);
99 goto again;
100 }
101 return 0;
102}
103
104static int __init has_button_interrupt(struct linux_ebus_device *edev)
105{
106 if (edev->irqs[0] == PCI_IRQ_NONE)
107 return 0;
108 if (!prom_node_has_property(edev->prom_node, "button"))
109 return 0;
110
111 return 1;
112}
113
114void __init power_init(void)
115{
116 struct linux_ebus *ebus;
117 struct linux_ebus_device *edev;
118 static int invoked;
119
120 if (invoked)
121 return;
122 invoked = 1;
123
124 for_each_ebus(ebus) {
125 for_each_ebusdev(edev, ebus) {
126 if (!strcmp(edev->prom_name, "power"))
127 goto found;
128 }
129 }
130 return;
131
132found:
133 power_reg = ioremap(edev->resource[0].start, 0x4);
134 printk("power: Control reg at %p ... ", power_reg);
135 poweroff_method = machine_halt; /* able to use the standard halt */
136 if (has_button_interrupt(edev)) {
137 if (kernel_thread(powerd, NULL, CLONE_FS) < 0) {
138 printk("Failed to start power daemon.\n");
139 return;
140 }
141 printk("powerd running.\n");
142
143 if (request_irq(edev->irqs[0],
144 power_handler, SA_SHIRQ, "power", NULL) < 0)
145 printk("power: Error, cannot register IRQ handler.\n");
146 } else {
147 printk("not using powerd.\n");
148 }
149}
150#endif /* CONFIG_PCI */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
new file mode 100644
index 000000000000..26d3ec41da1c
--- /dev/null
+++ b/arch/sparc64/kernel/process.c
@@ -0,0 +1,869 @@
1/* $Id: process.c,v 1.131 2002/02/09 19:49:30 davem Exp $
2 * arch/sparc64/kernel/process.c
3 *
4 * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9/*
10 * This file handles the architecture-dependent parts of process handling..
11 */
12
13#include <stdarg.h>
14
15#include <linux/config.h>
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/kallsyms.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/stddef.h>
25#include <linux/ptrace.h>
26#include <linux/slab.h>
27#include <linux/user.h>
28#include <linux/a.out.h>
29#include <linux/config.h>
30#include <linux/reboot.h>
31#include <linux/delay.h>
32#include <linux/compat.h>
33#include <linux/init.h>
34
35#include <asm/oplib.h>
36#include <asm/uaccess.h>
37#include <asm/system.h>
38#include <asm/page.h>
39#include <asm/pgalloc.h>
40#include <asm/pgtable.h>
41#include <asm/processor.h>
42#include <asm/pstate.h>
43#include <asm/elf.h>
44#include <asm/fpumacro.h>
45#include <asm/head.h>
46#include <asm/cpudata.h>
47#include <asm/unistd.h>
48
49/* #define VERBOSE_SHOWREGS */
50
51/*
52 * Nothing special yet...
53 */
54void default_idle(void)
55{
56}
57
58#ifndef CONFIG_SMP
59
60/*
61 * the idle loop on a Sparc... ;)
62 */
63void cpu_idle(void)
64{
65 if (current->pid != 0)
66 return;
67
68 /* endless idle loop with no priority at all */
69 for (;;) {
70 /* If current->work.need_resched is zero we should really
71 * setup for a system wakup event and execute a shutdown
72 * instruction.
73 *
74 * But this requires writing back the contents of the
75 * L2 cache etc. so implement this later. -DaveM
76 */
77 while (!need_resched())
78 barrier();
79
80 schedule();
81 check_pgt_cache();
82 }
83 return;
84}
85
86#else
87
88/*
89 * the idle loop on a UltraMultiPenguin...
90 */
91#define idle_me_harder() (cpu_data(smp_processor_id()).idle_volume += 1)
92#define unidle_me() (cpu_data(smp_processor_id()).idle_volume = 0)
93void cpu_idle(void)
94{
95 set_thread_flag(TIF_POLLING_NRFLAG);
96 while(1) {
97 if (need_resched()) {
98 unidle_me();
99 clear_thread_flag(TIF_POLLING_NRFLAG);
100 schedule();
101 set_thread_flag(TIF_POLLING_NRFLAG);
102 check_pgt_cache();
103 }
104 idle_me_harder();
105
106 /* The store ordering is so that IRQ handlers on
107 * other cpus see our increasing idleness for the buddy
108 * redistribution algorithm. -DaveM
109 */
110 membar("#StoreStore | #StoreLoad");
111 }
112}
113
114#endif
115
116extern char reboot_command [];
117
118extern void (*prom_palette)(int);
119extern void (*prom_keyboard)(void);
120
121void machine_halt(void)
122{
123 if (!serial_console && prom_palette)
124 prom_palette (1);
125 if (prom_keyboard)
126 prom_keyboard();
127 prom_halt();
128 panic("Halt failed!");
129}
130
131EXPORT_SYMBOL(machine_halt);
132
133void machine_alt_power_off(void)
134{
135 if (!serial_console && prom_palette)
136 prom_palette(1);
137 if (prom_keyboard)
138 prom_keyboard();
139 prom_halt_power_off();
140 panic("Power-off failed!");
141}
142
143void machine_restart(char * cmd)
144{
145 char *p;
146
147 p = strchr (reboot_command, '\n');
148 if (p) *p = 0;
149 if (!serial_console && prom_palette)
150 prom_palette (1);
151 if (prom_keyboard)
152 prom_keyboard();
153 if (cmd)
154 prom_reboot(cmd);
155 if (*reboot_command)
156 prom_reboot(reboot_command);
157 prom_reboot("");
158 panic("Reboot failed!");
159}
160
161EXPORT_SYMBOL(machine_restart);
162
163static void show_regwindow32(struct pt_regs *regs)
164{
165 struct reg_window32 __user *rw;
166 struct reg_window32 r_w;
167 mm_segment_t old_fs;
168
169 __asm__ __volatile__ ("flushw");
170 rw = compat_ptr((unsigned)regs->u_regs[14]);
171 old_fs = get_fs();
172 set_fs (USER_DS);
173 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
174 set_fs (old_fs);
175 return;
176 }
177
178 set_fs (old_fs);
179 printk("l0: %08x l1: %08x l2: %08x l3: %08x "
180 "l4: %08x l5: %08x l6: %08x l7: %08x\n",
181 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
182 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
183 printk("i0: %08x i1: %08x i2: %08x i3: %08x "
184 "i4: %08x i5: %08x i6: %08x i7: %08x\n",
185 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
186 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
187}
188
189static void show_regwindow(struct pt_regs *regs)
190{
191 struct reg_window __user *rw;
192 struct reg_window *rwk;
193 struct reg_window r_w;
194 mm_segment_t old_fs;
195
196 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
197 __asm__ __volatile__ ("flushw");
198 rw = (struct reg_window __user *)
199 (regs->u_regs[14] + STACK_BIAS);
200 rwk = (struct reg_window *)
201 (regs->u_regs[14] + STACK_BIAS);
202 if (!(regs->tstate & TSTATE_PRIV)) {
203 old_fs = get_fs();
204 set_fs (USER_DS);
205 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
206 set_fs (old_fs);
207 return;
208 }
209 rwk = &r_w;
210 set_fs (old_fs);
211 }
212 } else {
213 show_regwindow32(regs);
214 return;
215 }
216 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
217 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
218 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
219 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
220 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
221 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
222 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
223 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
224 if (regs->tstate & TSTATE_PRIV)
225 print_symbol("I7: <%s>\n", rwk->ins[7]);
226}
227
228void show_stackframe(struct sparc_stackf *sf)
229{
230 unsigned long size;
231 unsigned long *stk;
232 int i;
233
234 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n"
235 "l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
236 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
237 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
238 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n"
239 "i4: %016lx i5: %016lx fp: %016lx ret_pc: %016lx\n",
240 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
241 sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
242 printk("sp: %016lx x0: %016lx x1: %016lx x2: %016lx\n"
243 "x3: %016lx x4: %016lx x5: %016lx xx: %016lx\n",
244 (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
245 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
246 sf->xxargs[0]);
247 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
248 size -= STACKFRAME_SZ;
249 stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
250 i = 0;
251 do {
252 printk("s%d: %016lx\n", i++, *stk++);
253 } while ((size -= sizeof(unsigned long)));
254}
255
256void show_stackframe32(struct sparc_stackf32 *sf)
257{
258 unsigned long size;
259 unsigned *stk;
260 int i;
261
262 printk("l0: %08x l1: %08x l2: %08x l3: %08x\n",
263 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3]);
264 printk("l4: %08x l5: %08x l6: %08x l7: %08x\n",
265 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
266 printk("i0: %08x i1: %08x i2: %08x i3: %08x\n",
267 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3]);
268 printk("i4: %08x i5: %08x fp: %08x ret_pc: %08x\n",
269 sf->ins[4], sf->ins[5], sf->fp, sf->callers_pc);
270 printk("sp: %08x x0: %08x x1: %08x x2: %08x\n"
271 "x3: %08x x4: %08x x5: %08x xx: %08x\n",
272 sf->structptr, sf->xargs[0], sf->xargs[1],
273 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
274 sf->xxargs[0]);
275 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
276 size -= STACKFRAME32_SZ;
277 stk = (unsigned *)((unsigned long)sf + STACKFRAME32_SZ);
278 i = 0;
279 do {
280 printk("s%d: %08x\n", i++, *stk++);
281 } while ((size -= sizeof(unsigned)));
282}
283
284#ifdef CONFIG_SMP
285static DEFINE_SPINLOCK(regdump_lock);
286#endif
287
288void __show_regs(struct pt_regs * regs)
289{
290#ifdef CONFIG_SMP
291 unsigned long flags;
292
293 /* Protect against xcall ipis which might lead to livelock on the lock */
294 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
295 "wrpr %0, %1, %%pstate"
296 : "=r" (flags)
297 : "i" (PSTATE_IE));
298 spin_lock(&regdump_lock);
299#endif
300 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
301 regs->tpc, regs->tnpc, regs->y, print_tainted());
302 print_symbol("TPC: <%s>\n", regs->tpc);
303 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
304 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
305 regs->u_regs[3]);
306 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
307 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
308 regs->u_regs[7]);
309 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
310 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
311 regs->u_regs[11]);
312 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
313 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
314 regs->u_regs[15]);
315 print_symbol("RPC: <%s>\n", regs->u_regs[15]);
316 show_regwindow(regs);
317#ifdef CONFIG_SMP
318 spin_unlock(&regdump_lock);
319 __asm__ __volatile__("wrpr %0, 0, %%pstate"
320 : : "r" (flags));
321#endif
322}
323
324#ifdef VERBOSE_SHOWREGS
325static void idump_from_user (unsigned int *pc)
326{
327 int i;
328 int code;
329
330 if((((unsigned long) pc) & 3))
331 return;
332
333 pc -= 3;
334 for(i = -3; i < 6; i++) {
335 get_user(code, pc);
336 printk("%c%08x%c",i?' ':'<',code,i?' ':'>');
337 pc++;
338 }
339 printk("\n");
340}
341#endif
342
343void show_regs(struct pt_regs *regs)
344{
345#ifdef VERBOSE_SHOWREGS
346 extern long etrap, etraptl1;
347#endif
348 __show_regs(regs);
349#ifdef CONFIG_SMP
350 {
351 extern void smp_report_regs(void);
352
353 smp_report_regs();
354 }
355#endif
356
357#ifdef VERBOSE_SHOWREGS
358 if (regs->tpc >= &etrap && regs->tpc < &etraptl1 &&
359 regs->u_regs[14] >= (long)current - PAGE_SIZE &&
360 regs->u_regs[14] < (long)current + 6 * PAGE_SIZE) {
361 printk ("*********parent**********\n");
362 __show_regs((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF));
363 idump_from_user(((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF))->tpc);
364 printk ("*********endpar**********\n");
365 }
366#endif
367}
368
369void show_regs32(struct pt_regs32 *regs)
370{
371 printk("PSR: %08x PC: %08x NPC: %08x Y: %08x %s\n", regs->psr,
372 regs->pc, regs->npc, regs->y, print_tainted());
373 printk("g0: %08x g1: %08x g2: %08x g3: %08x ",
374 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
375 regs->u_regs[3]);
376 printk("g4: %08x g5: %08x g6: %08x g7: %08x\n",
377 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
378 regs->u_regs[7]);
379 printk("o0: %08x o1: %08x o2: %08x o3: %08x ",
380 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
381 regs->u_regs[11]);
382 printk("o4: %08x o5: %08x sp: %08x ret_pc: %08x\n",
383 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
384 regs->u_regs[15]);
385}
386
387unsigned long thread_saved_pc(struct task_struct *tsk)
388{
389 struct thread_info *ti = tsk->thread_info;
390 unsigned long ret = 0xdeadbeefUL;
391
392 if (ti && ti->ksp) {
393 unsigned long *sp;
394 sp = (unsigned long *)(ti->ksp + STACK_BIAS);
395 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
396 sp[14]) {
397 unsigned long *fp;
398 fp = (unsigned long *)(sp[14] + STACK_BIAS);
399 if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
400 ret = fp[15];
401 }
402 }
403 return ret;
404}
405
406/* Free current thread data structures etc.. */
407void exit_thread(void)
408{
409 struct thread_info *t = current_thread_info();
410
411 if (t->utraps) {
412 if (t->utraps[0] < 2)
413 kfree (t->utraps);
414 else
415 t->utraps[0]--;
416 }
417
418 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
419 t->user_cntd0 = t->user_cntd1 = NULL;
420 t->pcr_reg = 0;
421 write_pcr(0);
422 }
423}
424
425void flush_thread(void)
426{
427 struct thread_info *t = current_thread_info();
428
429 if (t->flags & _TIF_ABI_PENDING)
430 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
431
432 if (t->task->mm) {
433 unsigned long pgd_cache = 0UL;
434 if (test_thread_flag(TIF_32BIT)) {
435 struct mm_struct *mm = t->task->mm;
436 pgd_t *pgd0 = &mm->pgd[0];
437 pud_t *pud0 = pud_offset(pgd0, 0);
438
439 if (pud_none(*pud0)) {
440 pmd_t *page = pmd_alloc_one(mm, 0);
441 pud_set(pud0, page);
442 }
443 pgd_cache = get_pgd_cache(pgd0);
444 }
445 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
446 "membar #Sync"
447 : /* no outputs */
448 : "r" (pgd_cache),
449 "r" (TSB_REG),
450 "i" (ASI_DMMU));
451 }
452 set_thread_wsaved(0);
453
454 /* Turn off performance counters if on. */
455 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
456 t->user_cntd0 = t->user_cntd1 = NULL;
457 t->pcr_reg = 0;
458 write_pcr(0);
459 }
460
461 /* Clear FPU register state. */
462 t->fpsaved[0] = 0;
463
464 if (get_thread_current_ds() != ASI_AIUS)
465 set_fs(USER_DS);
466
467 /* Init new signal delivery disposition. */
468 clear_thread_flag(TIF_NEWSIGNALS);
469}
470
471/* It's a bit more tricky when 64-bit tasks are involved... */
472static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
473{
474 unsigned long fp, distance, rval;
475
476 if (!(test_thread_flag(TIF_32BIT))) {
477 csp += STACK_BIAS;
478 psp += STACK_BIAS;
479 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
480 fp += STACK_BIAS;
481 } else
482 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
483
484 /* Now 8-byte align the stack as this is mandatory in the
485 * Sparc ABI due to how register windows work. This hides
486 * the restriction from thread libraries etc. -DaveM
487 */
488 csp &= ~7UL;
489
490 distance = fp - psp;
491 rval = (csp - distance);
492 if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
493 rval = 0;
494 else if (test_thread_flag(TIF_32BIT)) {
495 if (put_user(((u32)csp),
496 &(((struct reg_window32 __user *)rval)->ins[6])))
497 rval = 0;
498 } else {
499 if (put_user(((u64)csp - STACK_BIAS),
500 &(((struct reg_window __user *)rval)->ins[6])))
501 rval = 0;
502 else
503 rval = rval - STACK_BIAS;
504 }
505
506 return rval;
507}
508
509/* Standard stuff. */
510static inline void shift_window_buffer(int first_win, int last_win,
511 struct thread_info *t)
512{
513 int i;
514
515 for (i = first_win; i < last_win; i++) {
516 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
517 memcpy(&t->reg_window[i], &t->reg_window[i+1],
518 sizeof(struct reg_window));
519 }
520}
521
522void synchronize_user_stack(void)
523{
524 struct thread_info *t = current_thread_info();
525 unsigned long window;
526
527 flush_user_windows();
528 if ((window = get_thread_wsaved()) != 0) {
529 int winsize = sizeof(struct reg_window);
530 int bias = 0;
531
532 if (test_thread_flag(TIF_32BIT))
533 winsize = sizeof(struct reg_window32);
534 else
535 bias = STACK_BIAS;
536
537 window -= 1;
538 do {
539 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
540 struct reg_window *rwin = &t->reg_window[window];
541
542 if (!copy_to_user((char __user *)sp, rwin, winsize)) {
543 shift_window_buffer(window, get_thread_wsaved() - 1, t);
544 set_thread_wsaved(get_thread_wsaved() - 1);
545 }
546 } while (window--);
547 }
548}
549
550void fault_in_user_windows(void)
551{
552 struct thread_info *t = current_thread_info();
553 unsigned long window;
554 int winsize = sizeof(struct reg_window);
555 int bias = 0;
556
557 if (test_thread_flag(TIF_32BIT))
558 winsize = sizeof(struct reg_window32);
559 else
560 bias = STACK_BIAS;
561
562 flush_user_windows();
563 window = get_thread_wsaved();
564
565 if (window != 0) {
566 window -= 1;
567 do {
568 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
569 struct reg_window *rwin = &t->reg_window[window];
570
571 if (copy_to_user((char __user *)sp, rwin, winsize))
572 goto barf;
573 } while (window--);
574 }
575 set_thread_wsaved(0);
576 return;
577
578barf:
579 set_thread_wsaved(window + 1);
580 do_exit(SIGILL);
581}
582
583asmlinkage long sparc_do_fork(unsigned long clone_flags,
584 unsigned long stack_start,
585 struct pt_regs *regs,
586 unsigned long stack_size)
587{
588 int __user *parent_tid_ptr, *child_tid_ptr;
589
590#ifdef CONFIG_COMPAT
591 if (test_thread_flag(TIF_32BIT)) {
592 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
593 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
594 } else
595#endif
596 {
597 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
598 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
599 }
600
601 return do_fork(clone_flags, stack_start,
602 regs, stack_size,
603 parent_tid_ptr, child_tid_ptr);
604}
605
606/* Copy a Sparc thread. The fork() return value conventions
607 * under SunOS are nothing short of bletcherous:
608 * Parent --> %o0 == childs pid, %o1 == 0
609 * Child --> %o0 == parents pid, %o1 == 1
610 */
611int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
612 unsigned long unused,
613 struct task_struct *p, struct pt_regs *regs)
614{
615 struct thread_info *t = p->thread_info;
616 char *child_trap_frame;
617
618#ifdef CONFIG_DEBUG_SPINLOCK
619 p->thread.smp_lock_count = 0;
620 p->thread.smp_lock_pc = 0;
621#endif
622
623 /* Calculate offset to stack_frame & pt_regs */
624 child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
625 memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
626
627 t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
628 _TIF_NEWCHILD |
629 (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
630 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
631 t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf));
632 t->fpsaved[0] = 0;
633
634 if (regs->tstate & TSTATE_PRIV) {
635 /* Special case, if we are spawning a kernel thread from
636 * a userspace task (via KMOD, NFS, or similar) we must
637 * disable performance counters in the child because the
638 * address space and protection realm are changing.
639 */
640 if (t->flags & _TIF_PERFCTR) {
641 t->user_cntd0 = t->user_cntd1 = NULL;
642 t->pcr_reg = 0;
643 t->flags &= ~_TIF_PERFCTR;
644 }
645 t->kregs->u_regs[UREG_FP] = t->ksp;
646 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
647 flush_register_windows();
648 memcpy((void *)(t->ksp + STACK_BIAS),
649 (void *)(regs->u_regs[UREG_FP] + STACK_BIAS),
650 sizeof(struct sparc_stackf));
651 t->kregs->u_regs[UREG_G6] = (unsigned long) t;
652 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
653 } else {
654 if (t->flags & _TIF_32BIT) {
655 sp &= 0x00000000ffffffffUL;
656 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
657 }
658 t->kregs->u_regs[UREG_FP] = sp;
659 t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT);
660 if (sp != regs->u_regs[UREG_FP]) {
661 unsigned long csp;
662
663 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
664 if (!csp)
665 return -EFAULT;
666 t->kregs->u_regs[UREG_FP] = csp;
667 }
668 if (t->utraps)
669 t->utraps[0]++;
670 }
671
672 /* Set the return value for the child. */
673 t->kregs->u_regs[UREG_I0] = current->pid;
674 t->kregs->u_regs[UREG_I1] = 1;
675
676 /* Set the second return value for the parent. */
677 regs->u_regs[UREG_I1] = 0;
678
679 if (clone_flags & CLONE_SETTLS)
680 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
681
682 return 0;
683}
684
685/*
686 * This is the mechanism for creating a new kernel thread.
687 *
688 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
689 * who haven't done an "execve()") should use this: it will work within
690 * a system call from a "real" process, but the process memory space will
691 * not be free'd until both the parent and the child have exited.
692 */
693pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
694{
695 long retval;
696
697 /* If the parent runs before fn(arg) is called by the child,
698 * the input registers of this function can be clobbered.
699 * So we stash 'fn' and 'arg' into global registers which
700 * will not be modified by the parent.
701 */
702 __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */
703 "mov %5, %%g3\n\t" /* Save ARG into global */
704 "mov %1, %%g1\n\t" /* Clone syscall nr. */
705 "mov %2, %%o0\n\t" /* Clone flags. */
706 "mov 0, %%o1\n\t" /* usp arg == 0 */
707 "t 0x6d\n\t" /* Linux/Sparc clone(). */
708 "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
709 " mov %%o0, %0\n\t"
710 "jmpl %%g2, %%o7\n\t" /* Call the function. */
711 " mov %%g3, %%o0\n\t" /* Set arg in delay. */
712 "mov %3, %%g1\n\t"
713 "t 0x6d\n\t" /* Linux/Sparc exit(). */
714 /* Notreached by child. */
715 "1:" :
716 "=r" (retval) :
717 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
718 "i" (__NR_exit), "r" (fn), "r" (arg) :
719 "g1", "g2", "g3", "o0", "o1", "memory", "cc");
720 return retval;
721}
722
723/*
724 * fill in the user structure for a core dump..
725 */
726void dump_thread(struct pt_regs * regs, struct user * dump)
727{
728 /* Only should be used for SunOS and ancient a.out
729 * SparcLinux binaries... Not worth implementing.
730 */
731 memset(dump, 0, sizeof(struct user));
732}
733
734typedef struct {
735 union {
736 unsigned int pr_regs[32];
737 unsigned long pr_dregs[16];
738 } pr_fr;
739 unsigned int __unused;
740 unsigned int pr_fsr;
741 unsigned char pr_qcnt;
742 unsigned char pr_q_entrysize;
743 unsigned char pr_en;
744 unsigned int pr_q[64];
745} elf_fpregset_t32;
746
747/*
748 * fill in the fpu structure for a core dump.
749 */
750int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
751{
752 unsigned long *kfpregs = current_thread_info()->fpregs;
753 unsigned long fprs = current_thread_info()->fpsaved[0];
754
755 if (test_thread_flag(TIF_32BIT)) {
756 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
757
758 if (fprs & FPRS_DL)
759 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
760 sizeof(unsigned int) * 32);
761 else
762 memset(&fpregs32->pr_fr.pr_regs[0], 0,
763 sizeof(unsigned int) * 32);
764 fpregs32->pr_qcnt = 0;
765 fpregs32->pr_q_entrysize = 8;
766 memset(&fpregs32->pr_q[0], 0,
767 (sizeof(unsigned int) * 64));
768 if (fprs & FPRS_FEF) {
769 fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
770 fpregs32->pr_en = 1;
771 } else {
772 fpregs32->pr_fsr = 0;
773 fpregs32->pr_en = 0;
774 }
775 } else {
776 if(fprs & FPRS_DL)
777 memcpy(&fpregs->pr_regs[0], kfpregs,
778 sizeof(unsigned int) * 32);
779 else
780 memset(&fpregs->pr_regs[0], 0,
781 sizeof(unsigned int) * 32);
782 if(fprs & FPRS_DU)
783 memcpy(&fpregs->pr_regs[16], kfpregs+16,
784 sizeof(unsigned int) * 32);
785 else
786 memset(&fpregs->pr_regs[16], 0,
787 sizeof(unsigned int) * 32);
788 if(fprs & FPRS_FEF) {
789 fpregs->pr_fsr = current_thread_info()->xfsr[0];
790 fpregs->pr_gsr = current_thread_info()->gsr[0];
791 } else {
792 fpregs->pr_fsr = fpregs->pr_gsr = 0;
793 }
794 fpregs->pr_fprs = fprs;
795 }
796 return 1;
797}
798
799/*
800 * sparc_execve() executes a new program after the asm stub has set
801 * things up for us. This should basically do what I want it to.
802 */
803asmlinkage int sparc_execve(struct pt_regs *regs)
804{
805 int error, base = 0;
806 char *filename;
807
808 /* User register window flush is done by entry.S */
809
810 /* Check for indirect call. */
811 if (regs->u_regs[UREG_G1] == 0)
812 base = 1;
813
814 filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
815 error = PTR_ERR(filename);
816 if (IS_ERR(filename))
817 goto out;
818 error = do_execve(filename,
819 (char __user * __user *)
820 regs->u_regs[base + UREG_I1],
821 (char __user * __user *)
822 regs->u_regs[base + UREG_I2], regs);
823 putname(filename);
824 if (!error) {
825 fprs_write(0);
826 current_thread_info()->xfsr[0] = 0;
827 current_thread_info()->fpsaved[0] = 0;
828 regs->tstate &= ~TSTATE_PEF;
829 task_lock(current);
830 current->ptrace &= ~PT_DTRACE;
831 task_unlock(current);
832 }
833out:
834 return error;
835}
836
837unsigned long get_wchan(struct task_struct *task)
838{
839 unsigned long pc, fp, bias = 0;
840 unsigned long thread_info_base;
841 struct reg_window *rw;
842 unsigned long ret = 0;
843 int count = 0;
844
845 if (!task || task == current ||
846 task->state == TASK_RUNNING)
847 goto out;
848
849 thread_info_base = (unsigned long) task->thread_info;
850 bias = STACK_BIAS;
851 fp = task->thread_info->ksp + bias;
852
853 do {
854 /* Bogus frame pointer? */
855 if (fp < (thread_info_base + sizeof(struct thread_info)) ||
856 fp >= (thread_info_base + THREAD_SIZE))
857 break;
858 rw = (struct reg_window *) fp;
859 pc = rw->ins[7];
860 if (!in_sched_functions(pc)) {
861 ret = pc;
862 goto out;
863 }
864 fp = rw->ins[6] + bias;
865 } while (++count < 16);
866
867out:
868 return ret;
869}
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
new file mode 100644
index 000000000000..1722dc51b0d8
--- /dev/null
+++ b/arch/sparc64/kernel/ptrace.c
@@ -0,0 +1,646 @@
1/* ptrace.c: Sparc process tracing support.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 *
6 * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
7 * and David Mosberger.
8 *
9 * Added Linux support -miguel (weird, eh?, the original code was meant
10 * to emulate SunOS).
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/errno.h>
17#include <linux/ptrace.h>
18#include <linux/user.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/security.h>
22
23#include <asm/asi.h>
24#include <asm/pgtable.h>
25#include <asm/system.h>
26#include <asm/uaccess.h>
27#include <asm/psrcompat.h>
28#include <asm/visasm.h>
29#include <asm/spitfire.h>
30
31/* Returning from ptrace is a bit tricky because the syscall return
32 * low level code assumes any value returned which is negative and
33 * is a valid errno will mean setting the condition codes to indicate
34 * an error return. This doesn't work, so we have this hook.
35 */
36static inline void pt_error_return(struct pt_regs *regs, unsigned long error)
37{
38 regs->u_regs[UREG_I0] = error;
39 regs->tstate |= (TSTATE_ICARRY | TSTATE_XCARRY);
40 regs->tpc = regs->tnpc;
41 regs->tnpc += 4;
42}
43
44static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
45{
46 regs->u_regs[UREG_I0] = value;
47 regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
48 regs->tpc = regs->tnpc;
49 regs->tnpc += 4;
50}
51
52static inline void
53pt_succ_return_linux(struct pt_regs *regs, unsigned long value, void __user *addr)
54{
55 if (test_thread_flag(TIF_32BIT)) {
56 if (put_user(value, (unsigned int __user *) addr)) {
57 pt_error_return(regs, EFAULT);
58 return;
59 }
60 } else {
61 if (put_user(value, (long __user *) addr)) {
62 pt_error_return(regs, EFAULT);
63 return;
64 }
65 }
66 regs->u_regs[UREG_I0] = 0;
67 regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
68 regs->tpc = regs->tnpc;
69 regs->tnpc += 4;
70}
71
72static void
73pt_os_succ_return (struct pt_regs *regs, unsigned long val, void __user *addr)
74{
75 if (current->personality == PER_SUNOS)
76 pt_succ_return (regs, val);
77 else
78 pt_succ_return_linux (regs, val, addr);
79}
80
81/* #define ALLOW_INIT_TRACING */
82/* #define DEBUG_PTRACE */
83
84#ifdef DEBUG_PTRACE
85char *pt_rq [] = {
86 /* 0 */ "TRACEME", "PEEKTEXT", "PEEKDATA", "PEEKUSR",
87 /* 4 */ "POKETEXT", "POKEDATA", "POKEUSR", "CONT",
88 /* 8 */ "KILL", "SINGLESTEP", "SUNATTACH", "SUNDETACH",
89 /* 12 */ "GETREGS", "SETREGS", "GETFPREGS", "SETFPREGS",
90 /* 16 */ "READDATA", "WRITEDATA", "READTEXT", "WRITETEXT",
91 /* 20 */ "GETFPAREGS", "SETFPAREGS", "unknown", "unknown",
92 /* 24 */ "SYSCALL", ""
93};
94#endif
95
96/*
97 * Called by kernel/ptrace.c when detaching..
98 *
99 * Make sure single step bits etc are not set.
100 */
101void ptrace_disable(struct task_struct *child)
102{
103 /* nothing to do */
104}
105
106asmlinkage void do_ptrace(struct pt_regs *regs)
107{
108 int request = regs->u_regs[UREG_I0];
109 pid_t pid = regs->u_regs[UREG_I1];
110 unsigned long addr = regs->u_regs[UREG_I2];
111 unsigned long data = regs->u_regs[UREG_I3];
112 unsigned long addr2 = regs->u_regs[UREG_I4];
113 struct task_struct *child;
114 int ret;
115
116 if (test_thread_flag(TIF_32BIT)) {
117 addr &= 0xffffffffUL;
118 data &= 0xffffffffUL;
119 addr2 &= 0xffffffffUL;
120 }
121 lock_kernel();
122#ifdef DEBUG_PTRACE
123 {
124 char *s;
125
126 if ((request >= 0) && (request <= 24))
127 s = pt_rq [request];
128 else
129 s = "unknown";
130
131 if (request == PTRACE_POKEDATA && data == 0x91d02001){
132 printk ("do_ptrace: breakpoint pid=%d, addr=%016lx addr2=%016lx\n",
133 pid, addr, addr2);
134 } else
135 printk("do_ptrace: rq=%s(%d) pid=%d addr=%016lx data=%016lx addr2=%016lx\n",
136 s, request, pid, addr, data, addr2);
137 }
138#endif
139 if (request == PTRACE_TRACEME) {
140 int ret;
141
142 /* are we already being traced? */
143 if (current->ptrace & PT_PTRACED) {
144 pt_error_return(regs, EPERM);
145 goto out;
146 }
147 ret = security_ptrace(current->parent, current);
148 if (ret) {
149 pt_error_return(regs, -ret);
150 goto out;
151 }
152
153 /* set the ptrace bit in the process flags. */
154 current->ptrace |= PT_PTRACED;
155 pt_succ_return(regs, 0);
156 goto out;
157 }
158#ifndef ALLOW_INIT_TRACING
159 if (pid == 1) {
160 /* Can't dork with init. */
161 pt_error_return(regs, EPERM);
162 goto out;
163 }
164#endif
165 read_lock(&tasklist_lock);
166 child = find_task_by_pid(pid);
167 if (child)
168 get_task_struct(child);
169 read_unlock(&tasklist_lock);
170
171 if (!child) {
172 pt_error_return(regs, ESRCH);
173 goto out;
174 }
175
176 if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
177 || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
178 if (ptrace_attach(child)) {
179 pt_error_return(regs, EPERM);
180 goto out_tsk;
181 }
182 pt_succ_return(regs, 0);
183 goto out_tsk;
184 }
185
186 ret = ptrace_check_attach(child, request == PTRACE_KILL);
187 if (ret < 0) {
188 pt_error_return(regs, -ret);
189 goto out_tsk;
190 }
191
192 if (!(test_thread_flag(TIF_32BIT)) &&
193 ((request == PTRACE_READDATA64) ||
194 (request == PTRACE_WRITEDATA64) ||
195 (request == PTRACE_READTEXT64) ||
196 (request == PTRACE_WRITETEXT64) ||
197 (request == PTRACE_PEEKTEXT64) ||
198 (request == PTRACE_POKETEXT64) ||
199 (request == PTRACE_PEEKDATA64) ||
200 (request == PTRACE_POKEDATA64))) {
201 addr = regs->u_regs[UREG_G2];
202 addr2 = regs->u_regs[UREG_G3];
203 request -= 30; /* wheee... */
204 }
205
206 switch(request) {
207 case PTRACE_PEEKTEXT: /* read word at location addr. */
208 case PTRACE_PEEKDATA: {
209 unsigned long tmp64;
210 unsigned int tmp32;
211 int res, copied;
212
213 res = -EIO;
214 if (test_thread_flag(TIF_32BIT)) {
215 copied = access_process_vm(child, addr,
216 &tmp32, sizeof(tmp32), 0);
217 tmp64 = (unsigned long) tmp32;
218 if (copied == sizeof(tmp32))
219 res = 0;
220 } else {
221 copied = access_process_vm(child, addr,
222 &tmp64, sizeof(tmp64), 0);
223 if (copied == sizeof(tmp64))
224 res = 0;
225 }
226 if (res < 0)
227 pt_error_return(regs, -res);
228 else
229 pt_os_succ_return(regs, tmp64, (void __user *) data);
230 goto flush_and_out;
231 }
232
233 case PTRACE_POKETEXT: /* write the word at location addr. */
234 case PTRACE_POKEDATA: {
235 unsigned long tmp64;
236 unsigned int tmp32;
237 int copied, res = -EIO;
238
239 if (test_thread_flag(TIF_32BIT)) {
240 tmp32 = data;
241 copied = access_process_vm(child, addr,
242 &tmp32, sizeof(tmp32), 1);
243 if (copied == sizeof(tmp32))
244 res = 0;
245 } else {
246 tmp64 = data;
247 copied = access_process_vm(child, addr,
248 &tmp64, sizeof(tmp64), 1);
249 if (copied == sizeof(tmp64))
250 res = 0;
251 }
252 if (res < 0)
253 pt_error_return(regs, -res);
254 else
255 pt_succ_return(regs, res);
256 goto flush_and_out;
257 }
258
259 case PTRACE_GETREGS: {
260 struct pt_regs32 __user *pregs =
261 (struct pt_regs32 __user *) addr;
262 struct pt_regs *cregs = child->thread_info->kregs;
263 int rval;
264
265 if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
266 __put_user(cregs->tpc, (&pregs->pc)) ||
267 __put_user(cregs->tnpc, (&pregs->npc)) ||
268 __put_user(cregs->y, (&pregs->y))) {
269 pt_error_return(regs, EFAULT);
270 goto out_tsk;
271 }
272 for (rval = 1; rval < 16; rval++)
273 if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
274 pt_error_return(regs, EFAULT);
275 goto out_tsk;
276 }
277 pt_succ_return(regs, 0);
278#ifdef DEBUG_PTRACE
279 printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
280#endif
281 goto out_tsk;
282 }
283
284 case PTRACE_GETREGS64: {
285 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
286 struct pt_regs *cregs = child->thread_info->kregs;
287 unsigned long tpc = cregs->tpc;
288 int rval;
289
290 if ((child->thread_info->flags & _TIF_32BIT) != 0)
291 tpc &= 0xffffffff;
292 if (__put_user(cregs->tstate, (&pregs->tstate)) ||
293 __put_user(tpc, (&pregs->tpc)) ||
294 __put_user(cregs->tnpc, (&pregs->tnpc)) ||
295 __put_user(cregs->y, (&pregs->y))) {
296 pt_error_return(regs, EFAULT);
297 goto out_tsk;
298 }
299 for (rval = 1; rval < 16; rval++)
300 if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
301 pt_error_return(regs, EFAULT);
302 goto out_tsk;
303 }
304 pt_succ_return(regs, 0);
305#ifdef DEBUG_PTRACE
306 printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
307#endif
308 goto out_tsk;
309 }
310
311 case PTRACE_SETREGS: {
312 struct pt_regs32 __user *pregs =
313 (struct pt_regs32 __user *) addr;
314 struct pt_regs *cregs = child->thread_info->kregs;
315 unsigned int psr, pc, npc, y;
316 int i;
317
318 /* Must be careful, tracing process can only set certain
319 * bits in the psr.
320 */
321 if (__get_user(psr, (&pregs->psr)) ||
322 __get_user(pc, (&pregs->pc)) ||
323 __get_user(npc, (&pregs->npc)) ||
324 __get_user(y, (&pregs->y))) {
325 pt_error_return(regs, EFAULT);
326 goto out_tsk;
327 }
328 cregs->tstate &= ~(TSTATE_ICC);
329 cregs->tstate |= psr_to_tstate_icc(psr);
330 if (!((pc | npc) & 3)) {
331 cregs->tpc = pc;
332 cregs->tnpc = npc;
333 }
334 cregs->y = y;
335 for (i = 1; i < 16; i++) {
336 if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
337 pt_error_return(regs, EFAULT);
338 goto out_tsk;
339 }
340 }
341 pt_succ_return(regs, 0);
342 goto out_tsk;
343 }
344
345 case PTRACE_SETREGS64: {
346 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
347 struct pt_regs *cregs = child->thread_info->kregs;
348 unsigned long tstate, tpc, tnpc, y;
349 int i;
350
351 /* Must be careful, tracing process can only set certain
352 * bits in the psr.
353 */
354 if (__get_user(tstate, (&pregs->tstate)) ||
355 __get_user(tpc, (&pregs->tpc)) ||
356 __get_user(tnpc, (&pregs->tnpc)) ||
357 __get_user(y, (&pregs->y))) {
358 pt_error_return(regs, EFAULT);
359 goto out_tsk;
360 }
361 if ((child->thread_info->flags & _TIF_32BIT) != 0) {
362 tpc &= 0xffffffff;
363 tnpc &= 0xffffffff;
364 }
365 tstate &= (TSTATE_ICC | TSTATE_XCC);
366 cregs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
367 cregs->tstate |= tstate;
368 if (!((tpc | tnpc) & 3)) {
369 cregs->tpc = tpc;
370 cregs->tnpc = tnpc;
371 }
372 cregs->y = y;
373 for (i = 1; i < 16; i++) {
374 if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
375 pt_error_return(regs, EFAULT);
376 goto out_tsk;
377 }
378 }
379 pt_succ_return(regs, 0);
380 goto out_tsk;
381 }
382
383 case PTRACE_GETFPREGS: {
384 struct fps {
385 unsigned int regs[32];
386 unsigned int fsr;
387 unsigned int flags;
388 unsigned int extra;
389 unsigned int fpqd;
390 struct fq {
391 unsigned int insnaddr;
392 unsigned int insn;
393 } fpq[16];
394 };
395 struct fps __user *fps = (struct fps __user *) addr;
396 unsigned long *fpregs = child->thread_info->fpregs;
397
398 if (copy_to_user(&fps->regs[0], fpregs,
399 (32 * sizeof(unsigned int))) ||
400 __put_user(child->thread_info->xfsr[0], (&fps->fsr)) ||
401 __put_user(0, (&fps->fpqd)) ||
402 __put_user(0, (&fps->flags)) ||
403 __put_user(0, (&fps->extra)) ||
404 clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) {
405 pt_error_return(regs, EFAULT);
406 goto out_tsk;
407 }
408 pt_succ_return(regs, 0);
409 goto out_tsk;
410 }
411
412 case PTRACE_GETFPREGS64: {
413 struct fps {
414 unsigned int regs[64];
415 unsigned long fsr;
416 };
417 struct fps __user *fps = (struct fps __user *) addr;
418 unsigned long *fpregs = child->thread_info->fpregs;
419
420 if (copy_to_user(&fps->regs[0], fpregs,
421 (64 * sizeof(unsigned int))) ||
422 __put_user(child->thread_info->xfsr[0], (&fps->fsr))) {
423 pt_error_return(regs, EFAULT);
424 goto out_tsk;
425 }
426 pt_succ_return(regs, 0);
427 goto out_tsk;
428 }
429
430 case PTRACE_SETFPREGS: {
431 struct fps {
432 unsigned int regs[32];
433 unsigned int fsr;
434 unsigned int flags;
435 unsigned int extra;
436 unsigned int fpqd;
437 struct fq {
438 unsigned int insnaddr;
439 unsigned int insn;
440 } fpq[16];
441 };
442 struct fps __user *fps = (struct fps __user *) addr;
443 unsigned long *fpregs = child->thread_info->fpregs;
444 unsigned fsr;
445
446 if (copy_from_user(fpregs, &fps->regs[0],
447 (32 * sizeof(unsigned int))) ||
448 __get_user(fsr, (&fps->fsr))) {
449 pt_error_return(regs, EFAULT);
450 goto out_tsk;
451 }
452 child->thread_info->xfsr[0] &= 0xffffffff00000000UL;
453 child->thread_info->xfsr[0] |= fsr;
454 if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
455 child->thread_info->gsr[0] = 0;
456 child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
457 pt_succ_return(regs, 0);
458 goto out_tsk;
459 }
460
461 case PTRACE_SETFPREGS64: {
462 struct fps {
463 unsigned int regs[64];
464 unsigned long fsr;
465 };
466 struct fps __user *fps = (struct fps __user *) addr;
467 unsigned long *fpregs = child->thread_info->fpregs;
468
469 if (copy_from_user(fpregs, &fps->regs[0],
470 (64 * sizeof(unsigned int))) ||
471 __get_user(child->thread_info->xfsr[0], (&fps->fsr))) {
472 pt_error_return(regs, EFAULT);
473 goto out_tsk;
474 }
475 if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
476 child->thread_info->gsr[0] = 0;
477 child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
478 pt_succ_return(regs, 0);
479 goto out_tsk;
480 }
481
482 case PTRACE_READTEXT:
483 case PTRACE_READDATA: {
484 int res = ptrace_readdata(child, addr,
485 (char __user *)addr2, data);
486 if (res == data) {
487 pt_succ_return(regs, 0);
488 goto flush_and_out;
489 }
490 if (res >= 0)
491 res = -EIO;
492 pt_error_return(regs, -res);
493 goto flush_and_out;
494 }
495
496 case PTRACE_WRITETEXT:
497 case PTRACE_WRITEDATA: {
498 int res = ptrace_writedata(child, (char __user *) addr2,
499 addr, data);
500 if (res == data) {
501 pt_succ_return(regs, 0);
502 goto flush_and_out;
503 }
504 if (res >= 0)
505 res = -EIO;
506 pt_error_return(regs, -res);
507 goto flush_and_out;
508 }
509 case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
510 addr = 1;
511
512 case PTRACE_CONT: { /* restart after signal. */
513 if (data > _NSIG) {
514 pt_error_return(regs, EIO);
515 goto out_tsk;
516 }
517 if (addr != 1) {
518 unsigned long pc_mask = ~0UL;
519
520 if ((child->thread_info->flags & _TIF_32BIT) != 0)
521 pc_mask = 0xffffffff;
522
523 if (addr & 3) {
524 pt_error_return(regs, EINVAL);
525 goto out_tsk;
526 }
527#ifdef DEBUG_PTRACE
528 printk ("Original: %016lx %016lx\n",
529 child->thread_info->kregs->tpc,
530 child->thread_info->kregs->tnpc);
531 printk ("Continuing with %016lx %016lx\n", addr, addr+4);
532#endif
533 child->thread_info->kregs->tpc = (addr & pc_mask);
534 child->thread_info->kregs->tnpc = ((addr + 4) & pc_mask);
535 }
536
537 if (request == PTRACE_SYSCALL) {
538 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
539 } else {
540 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
541 }
542
543 child->exit_code = data;
544#ifdef DEBUG_PTRACE
545 printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
546 child->pid, child->exit_code,
547 child->thread_info->kregs->tpc,
548 child->thread_info->kregs->tnpc);
549
550#endif
551 wake_up_process(child);
552 pt_succ_return(regs, 0);
553 goto out_tsk;
554 }
555
556/*
557 * make the child exit. Best I can do is send it a sigkill.
558 * perhaps it should be put in the status that it wants to
559 * exit.
560 */
561 case PTRACE_KILL: {
562 if (child->exit_state == EXIT_ZOMBIE) { /* already dead */
563 pt_succ_return(regs, 0);
564 goto out_tsk;
565 }
566 child->exit_code = SIGKILL;
567 wake_up_process(child);
568 pt_succ_return(regs, 0);
569 goto out_tsk;
570 }
571
572 case PTRACE_SUNDETACH: { /* detach a process that was attached. */
573 int error = ptrace_detach(child, data);
574 if (error) {
575 pt_error_return(regs, EIO);
576 goto out_tsk;
577 }
578 pt_succ_return(regs, 0);
579 goto out_tsk;
580 }
581
582 /* PTRACE_DUMPCORE unsupported... */
583
584 default: {
585 int err = ptrace_request(child, request, addr, data);
586 if (err)
587 pt_error_return(regs, -err);
588 else
589 pt_succ_return(regs, 0);
590 goto out_tsk;
591 }
592 }
593flush_and_out:
594 {
595 unsigned long va;
596
597 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
598 for (va = 0; va < (1 << 16); va += (1 << 5))
599 spitfire_put_dcache_tag(va, 0x0);
600 /* No need to mess with I-cache on Cheetah. */
601 } else {
602 for (va = 0; va < L1DCACHE_SIZE; va += 32)
603 spitfire_put_dcache_tag(va, 0x0);
604 if (request == PTRACE_PEEKTEXT ||
605 request == PTRACE_POKETEXT ||
606 request == PTRACE_READTEXT ||
607 request == PTRACE_WRITETEXT) {
608 for (va = 0; va < (PAGE_SIZE << 1); va += 32)
609 spitfire_put_icache_tag(va, 0x0);
610 __asm__ __volatile__("flush %g6");
611 }
612 }
613 }
614out_tsk:
615 if (child)
616 put_task_struct(child);
617out:
618 unlock_kernel();
619}
620
621asmlinkage void syscall_trace(void)
622{
623#ifdef DEBUG_PTRACE
624 printk("%s [%d]: syscall_trace\n", current->comm, current->pid);
625#endif
626 if (!test_thread_flag(TIF_SYSCALL_TRACE))
627 return;
628 if (!(current->ptrace & PT_PTRACED))
629 return;
630 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
631 ? 0x80 : 0));
632
633 /*
634 * this isn't the same as continuing with a signal, but it will do
635 * for normal use. strace only continues with a signal if the
636 * stopping signal is not SIGTRAP. -brl
637 */
638#ifdef DEBUG_PTRACE
639 printk("%s [%d]: syscall_trace exit= %x\n", current->comm,
640 current->pid, current->exit_code);
641#endif
642 if (current->exit_code) {
643 send_sig (current->exit_code, current, 1);
644 current->exit_code = 0;
645 }
646}
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
new file mode 100644
index 000000000000..0696ed4b9d64
--- /dev/null
+++ b/arch/sparc64/kernel/rtrap.S
@@ -0,0 +1,362 @@
1/* $Id: rtrap.S,v 1.61 2002/02/09 19:49:31 davem Exp $
2 * rtrap.S: Preparing for return from trap on Sparc V9.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/config.h>
9
10#include <asm/asi.h>
11#include <asm/pstate.h>
12#include <asm/ptrace.h>
13#include <asm/spitfire.h>
14#include <asm/head.h>
15#include <asm/visasm.h>
16#include <asm/processor.h>
17
18#define RTRAP_PSTATE (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
19#define RTRAP_PSTATE_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV)
20#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
21
22 /* Register %l6 keeps track of whether we are returning
23 * from a system call or not. It is cleared if we call
24 * do_notify_resume, and it must not be otherwise modified
25 * until we fully commit to returning to userspace.
26 */
27
28 .text
29 .align 32
30__handle_softirq:
31 call do_softirq
32 nop
33 ba,a,pt %xcc, __handle_softirq_continue
34 nop
35__handle_preemption:
36 call schedule
37 wrpr %g0, RTRAP_PSTATE, %pstate
38 ba,pt %xcc, __handle_preemption_continue
39 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
40
41__handle_user_windows:
42 call fault_in_user_windows
43 wrpr %g0, RTRAP_PSTATE, %pstate
44 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
45 /* Redo sched+sig checks */
46 ldx [%g6 + TI_FLAGS], %l0
47 andcc %l0, _TIF_NEED_RESCHED, %g0
48
49 be,pt %xcc, 1f
50 nop
51 call schedule
52 wrpr %g0, RTRAP_PSTATE, %pstate
53 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
54 ldx [%g6 + TI_FLAGS], %l0
55
561: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
57 be,pt %xcc, __handle_user_windows_continue
58 nop
59 clr %o0
60 mov %l5, %o2
61 mov %l6, %o3
62 add %sp, PTREGS_OFF, %o1
63 mov %l0, %o4
64
65 call do_notify_resume
66 wrpr %g0, RTRAP_PSTATE, %pstate
67 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
68 clr %l6
69 /* Signal delivery can modify pt_regs tstate, so we must
70 * reload it.
71 */
72 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
73 sethi %hi(0xf << 20), %l4
74 and %l1, %l4, %l4
75 ba,pt %xcc, __handle_user_windows_continue
76
77 andn %l1, %l4, %l1
78__handle_perfctrs:
79 call update_perfctrs
80 wrpr %g0, RTRAP_PSTATE, %pstate
81 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
82 ldub [%g6 + TI_WSAVED], %o2
83 brz,pt %o2, 1f
84 nop
85 /* Redo userwin+sched+sig checks */
86 call fault_in_user_windows
87
88 wrpr %g0, RTRAP_PSTATE, %pstate
89 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
90 ldx [%g6 + TI_FLAGS], %l0
91 andcc %l0, _TIF_NEED_RESCHED, %g0
92 be,pt %xcc, 1f
93
94 nop
95 call schedule
96 wrpr %g0, RTRAP_PSTATE, %pstate
97 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
98 ldx [%g6 + TI_FLAGS], %l0
991: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
100
101 be,pt %xcc, __handle_perfctrs_continue
102 sethi %hi(TSTATE_PEF), %o0
103 clr %o0
104 mov %l5, %o2
105 mov %l6, %o3
106 add %sp, PTREGS_OFF, %o1
107 mov %l0, %o4
108 call do_notify_resume
109
110 wrpr %g0, RTRAP_PSTATE, %pstate
111 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
112 clr %l6
113 /* Signal delivery can modify pt_regs tstate, so we must
114 * reload it.
115 */
116 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
117 sethi %hi(0xf << 20), %l4
118 and %l1, %l4, %l4
119 andn %l1, %l4, %l1
120 ba,pt %xcc, __handle_perfctrs_continue
121
122 sethi %hi(TSTATE_PEF), %o0
123__handle_userfpu:
124 rd %fprs, %l5
125 andcc %l5, FPRS_FEF, %g0
126 sethi %hi(TSTATE_PEF), %o0
127 be,a,pn %icc, __handle_userfpu_continue
128 andn %l1, %o0, %l1
129 ba,a,pt %xcc, __handle_userfpu_continue
130
131__handle_signal:
132 clr %o0
133 mov %l5, %o2
134 mov %l6, %o3
135 add %sp, PTREGS_OFF, %o1
136 mov %l0, %o4
137 call do_notify_resume
138 wrpr %g0, RTRAP_PSTATE, %pstate
139 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
140 clr %l6
141
142 /* Signal delivery can modify pt_regs tstate, so we must
143 * reload it.
144 */
145 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
146 sethi %hi(0xf << 20), %l4
147 and %l1, %l4, %l4
148 ba,pt %xcc, __handle_signal_continue
149 andn %l1, %l4, %l1
150
151 .align 64
152 .globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall
153rtrap_irq:
154rtrap_clr_l6: clr %l6
155rtrap:
156 ldub [%g6 + TI_CPU], %l0
157 sethi %hi(irq_stat), %l2 ! &softirq_active
158 or %l2, %lo(irq_stat), %l2 ! &softirq_active
159irqsz_patchme: sllx %l0, 0, %l0
160 lduw [%l2 + %l0], %l1 ! softirq_pending
161 cmp %l1, 0
162
163 /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
164 bne,pn %icc, __handle_softirq
165 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
166__handle_softirq_continue:
167rtrap_xcall:
168 sethi %hi(0xf << 20), %l4
169 andcc %l1, TSTATE_PRIV, %l3
170 and %l1, %l4, %l4
171 bne,pn %icc, to_kernel
172 andn %l1, %l4, %l1
173
174 /* We must hold IRQs off and atomically test schedule+signal
175 * state, then hold them off all the way back to userspace.
176 * If we are returning to kernel, none of this matters.
177 *
178 * If we do not do this, there is a window where we would do
179 * the tests, later the signal/resched event arrives but we do
180 * not process it since we are still in kernel mode. It would
181 * take until the next local IRQ before the signal/resched
182 * event would be handled.
183 *
184 * This also means that if we have to deal with performance
185 * counters or user windows, we have to redo all of these
186 * sched+signal checks with IRQs disabled.
187 */
188to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
189 wrpr 0, %pil
190__handle_preemption_continue:
191 ldx [%g6 + TI_FLAGS], %l0
192 sethi %hi(_TIF_USER_WORK_MASK), %o0
193 or %o0, %lo(_TIF_USER_WORK_MASK), %o0
194 andcc %l0, %o0, %g0
195 sethi %hi(TSTATE_PEF), %o0
196 be,pt %xcc, user_nowork
197 andcc %l1, %o0, %g0
198 andcc %l0, _TIF_NEED_RESCHED, %g0
199 bne,pn %xcc, __handle_preemption
200 andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
201 bne,pn %xcc, __handle_signal
202__handle_signal_continue:
203 ldub [%g6 + TI_WSAVED], %o2
204 brnz,pn %o2, __handle_user_windows
205 nop
206__handle_user_windows_continue:
207 ldx [%g6 + TI_FLAGS], %l5
208 andcc %l5, _TIF_PERFCTR, %g0
209 sethi %hi(TSTATE_PEF), %o0
210 bne,pn %xcc, __handle_perfctrs
211__handle_perfctrs_continue:
212 andcc %l1, %o0, %g0
213
214 /* This fpdepth clear is necessary for non-syscall rtraps only */
215user_nowork:
216 bne,pn %xcc, __handle_userfpu
217 stb %g0, [%g6 + TI_FPDEPTH]
218__handle_userfpu_continue:
219
220rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
221 ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
222
223 ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
224 ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
225 ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
226 mov TSB_REG, %g6
227 brnz,a,pn %l3, 1f
228 ldxa [%g6] ASI_IMMU, %g5
2291: ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
230 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
231 wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
232 ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
233 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
234
235 ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
236 ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
237 ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
238 ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
239 ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
240 ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
241 ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
242 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
243
244 ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
245 wr %o3, %g0, %y
246 srl %l4, 20, %l4
247 wrpr %l4, 0x0, %pil
248 wrpr %g0, 0x1, %tl
249 wrpr %l1, %g0, %tstate
250 wrpr %l2, %g0, %tpc
251 wrpr %o2, %g0, %tnpc
252
253 brnz,pn %l3, kern_rtt
254 mov PRIMARY_CONTEXT, %l7
255 ldxa [%l7 + %l7] ASI_DMMU, %l0
256cplus_rtrap_insn_1:
257 sethi %hi(0), %l1
258 sllx %l1, 32, %l1
259 or %l0, %l1, %l0
260 stxa %l0, [%l7] ASI_DMMU
261 flush %g6
262 rdpr %wstate, %l1
263 rdpr %otherwin, %l2
264 srl %l1, 3, %l1
265
266 wrpr %l2, %g0, %canrestore
267 wrpr %l1, %g0, %wstate
268 wrpr %g0, %g0, %otherwin
269 restore
270 rdpr %canrestore, %g1
271 wrpr %g1, 0x0, %cleanwin
272 retry
273 nop
274
275kern_rtt: restore
276 retry
277to_kernel:
278#ifdef CONFIG_PREEMPT
279 ldsw [%g6 + TI_PRE_COUNT], %l5
280 brnz %l5, kern_fpucheck
281 ldx [%g6 + TI_FLAGS], %l5
282 andcc %l5, _TIF_NEED_RESCHED, %g0
283 be,pt %xcc, kern_fpucheck
284 srl %l4, 20, %l5
285 cmp %l5, 0
286 bne,pn %xcc, kern_fpucheck
287 sethi %hi(PREEMPT_ACTIVE), %l6
288 stw %l6, [%g6 + TI_PRE_COUNT]
289 call schedule
290 nop
291 ba,pt %xcc, rtrap
292 stw %g0, [%g6 + TI_PRE_COUNT]
293#endif
294kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
295 brz,pt %l5, rt_continue
296 srl %l5, 1, %o0
297 add %g6, TI_FPSAVED, %l6
298 ldub [%l6 + %o0], %l2
299 sub %l5, 2, %l5
300
301 add %g6, TI_GSR, %o1
302 andcc %l2, (FPRS_FEF|FPRS_DU), %g0
303 be,pt %icc, 2f
304 and %l2, FPRS_DL, %l6
305 andcc %l2, FPRS_FEF, %g0
306 be,pn %icc, 5f
307 sll %o0, 3, %o5
308 rd %fprs, %g1
309
310 wr %g1, FPRS_FEF, %fprs
311 ldx [%o1 + %o5], %g1
312 add %g6, TI_XFSR, %o1
313 membar #StoreLoad | #LoadLoad
314 sll %o0, 8, %o2
315 add %g6, TI_FPREGS, %o3
316 brz,pn %l6, 1f
317 add %g6, TI_FPREGS+0x40, %o4
318
319 ldda [%o3 + %o2] ASI_BLK_P, %f0
320 ldda [%o4 + %o2] ASI_BLK_P, %f16
3211: andcc %l2, FPRS_DU, %g0
322 be,pn %icc, 1f
323 wr %g1, 0, %gsr
324 add %o2, 0x80, %o2
325 ldda [%o3 + %o2] ASI_BLK_P, %f32
326 ldda [%o4 + %o2] ASI_BLK_P, %f48
327
3281: membar #Sync
329 ldx [%o1 + %o5], %fsr
3302: stb %l5, [%g6 + TI_FPDEPTH]
331 ba,pt %xcc, rt_continue
332 nop
3335: wr %g0, FPRS_FEF, %fprs
334 membar #StoreLoad | #LoadLoad
335 sll %o0, 8, %o2
336
337 add %g6, TI_FPREGS+0x80, %o3
338 add %g6, TI_FPREGS+0xc0, %o4
339 ldda [%o3 + %o2] ASI_BLK_P, %f32
340 ldda [%o4 + %o2] ASI_BLK_P, %f48
341 membar #Sync
342 wr %g0, FPRS_DU, %fprs
343 ba,pt %xcc, rt_continue
344 stb %l5, [%g6 + TI_FPDEPTH]
345
346cplus_rinsn_1:
347 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
348
349 .globl cheetah_plus_patch_rtrap
350cheetah_plus_patch_rtrap:
351 /* We configure the dTLB512_0 for 4MB pages and the
352 * dTLB512_1 for 8K pages when in context zero.
353 */
354 sethi %hi(cplus_rinsn_1), %o0
355 sethi %hi(cplus_rtrap_insn_1), %o2
356 lduw [%o0 + %lo(cplus_rinsn_1)], %o1
357 or %o2, %lo(cplus_rtrap_insn_1), %o2
358 stw %o1, [%o2]
359 flush %o2
360
361 retl
362 nop
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
new file mode 100644
index 000000000000..14d9c3a21b9a
--- /dev/null
+++ b/arch/sparc64/kernel/sbus.c
@@ -0,0 +1,1243 @@
1/* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
2 * sbus.c: UltraSparc SBUS controller support.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/mm.h>
10#include <linux/spinlock.h>
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14
15#include <asm/page.h>
16#include <asm/sbus.h>
17#include <asm/io.h>
18#include <asm/upa.h>
19#include <asm/cache.h>
20#include <asm/dma.h>
21#include <asm/irq.h>
22#include <asm/starfire.h>
23
24#include "iommu_common.h"
25
26/* These should be allocated on an SMP_CACHE_BYTES
27 * aligned boundary for optimal performance.
28 *
29 * On SYSIO, using an 8K page size we have 1GB of SBUS
30 * DMA space mapped. We divide this space into equally
31 * sized clusters. We allocate a DMA mapping from the
32 * cluster that matches the order of the allocation, or
33 * if the order is greater than the number of clusters,
34 * we try to allocate from the last cluster.
35 */
36
37#define NCLUSTERS 8UL
38#define ONE_GIG (1UL * 1024UL * 1024UL * 1024UL)
39#define CLUSTER_SIZE (ONE_GIG / NCLUSTERS)
40#define CLUSTER_MASK (CLUSTER_SIZE - 1)
41#define CLUSTER_NPAGES (CLUSTER_SIZE >> IO_PAGE_SHIFT)
42#define MAP_BASE ((u32)0xc0000000)
43
44struct sbus_iommu {
45/*0x00*/spinlock_t lock;
46
47/*0x08*/iopte_t *page_table;
48/*0x10*/unsigned long strbuf_regs;
49/*0x18*/unsigned long iommu_regs;
50/*0x20*/unsigned long sbus_control_reg;
51
52/*0x28*/volatile unsigned long strbuf_flushflag;
53
54 /* If NCLUSTERS is ever decresed to 4 or lower,
55 * you must increase the size of the type of
56 * these counters. You have been duly warned. -DaveM
57 */
58/*0x30*/struct {
59 u16 next;
60 u16 flush;
61 } alloc_info[NCLUSTERS];
62
63 /* The lowest used consistent mapping entry. Since
64 * we allocate consistent maps out of cluster 0 this
65 * is relative to the beginning of closter 0.
66 */
67/*0x50*/u32 lowest_consistent_map;
68};
69
70/* Offsets from iommu_regs */
71#define SYSIO_IOMMUREG_BASE 0x2400UL
72#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
73#define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
74#define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
75#define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
76#define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
77#define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
78#define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
79#define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
80
81#define IOMMU_DRAM_VALID (1UL << 30UL)
82
83static void __iommu_flushall(struct sbus_iommu *iommu)
84{
85 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
86 int entry;
87
88 for (entry = 0; entry < 16; entry++) {
89 upa_writeq(0, tag);
90 tag += 8UL;
91 }
92 upa_readq(iommu->sbus_control_reg);
93
94 for (entry = 0; entry < NCLUSTERS; entry++) {
95 iommu->alloc_info[entry].flush =
96 iommu->alloc_info[entry].next;
97 }
98}
99
100static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
101{
102 while (npages--)
103 upa_writeq(base + (npages << IO_PAGE_SHIFT),
104 iommu->iommu_regs + IOMMU_FLUSH);
105 upa_readq(iommu->sbus_control_reg);
106}
107
108/* Offsets from strbuf_regs */
109#define SYSIO_STRBUFREG_BASE 0x2800UL
110#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
111#define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
112#define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
113#define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
114#define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
115#define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
116#define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
117
118#define STRBUF_TAG_VALID 0x02UL
119
120static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
121{
122 iommu->strbuf_flushflag = 0UL;
123 while (npages--)
124 upa_writeq(base + (npages << IO_PAGE_SHIFT),
125 iommu->strbuf_regs + STRBUF_PFLUSH);
126
127 /* Whoopee cushion! */
128 upa_writeq(__pa(&iommu->strbuf_flushflag),
129 iommu->strbuf_regs + STRBUF_FSYNC);
130 upa_readq(iommu->sbus_control_reg);
131 while (iommu->strbuf_flushflag == 0UL)
132 membar("#LoadLoad");
133}
134
135static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
136{
137 iopte_t *iopte, *limit, *first, *cluster;
138 unsigned long cnum, ent, nent, flush_point, found;
139
140 cnum = 0;
141 nent = 1;
142 while ((1UL << cnum) < npages)
143 cnum++;
144 if(cnum >= NCLUSTERS) {
145 nent = 1UL << (cnum - NCLUSTERS);
146 cnum = NCLUSTERS - 1;
147 }
148 iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
149
150 if (cnum == 0)
151 limit = (iommu->page_table +
152 iommu->lowest_consistent_map);
153 else
154 limit = (iopte + CLUSTER_NPAGES);
155
156 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
157 flush_point = iommu->alloc_info[cnum].flush;
158
159 first = iopte;
160 cluster = NULL;
161 found = 0;
162 for (;;) {
163 if (iopte_val(*iopte) == 0UL) {
164 found++;
165 if (!cluster)
166 cluster = iopte;
167 } else {
168 /* Used cluster in the way */
169 cluster = NULL;
170 found = 0;
171 }
172
173 if (found == nent)
174 break;
175
176 iopte += (1 << cnum);
177 ent++;
178 if (iopte >= limit) {
179 iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
180 ent = 0;
181
182 /* Multiple cluster allocations must not wrap */
183 cluster = NULL;
184 found = 0;
185 }
186 if (ent == flush_point)
187 __iommu_flushall(iommu);
188 if (iopte == first)
189 goto bad;
190 }
191
192 /* ent/iopte points to the last cluster entry we're going to use,
193 * so save our place for the next allocation.
194 */
195 if ((iopte + (1 << cnum)) >= limit)
196 ent = 0;
197 else
198 ent = ent + 1;
199 iommu->alloc_info[cnum].next = ent;
200 if (ent == flush_point)
201 __iommu_flushall(iommu);
202
203 /* I've got your streaming cluster right here buddy boy... */
204 return cluster;
205
206bad:
207 printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
208 npages);
209 return NULL;
210}
211
212static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
213{
214 unsigned long cnum, ent, nent;
215 iopte_t *iopte;
216
217 cnum = 0;
218 nent = 1;
219 while ((1UL << cnum) < npages)
220 cnum++;
221 if(cnum >= NCLUSTERS) {
222 nent = 1UL << (cnum - NCLUSTERS);
223 cnum = NCLUSTERS - 1;
224 }
225 ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
226 iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
227 do {
228 iopte_val(*iopte) = 0UL;
229 iopte += 1 << cnum;
230 } while(--nent);
231
232 /* If the global flush might not have caught this entry,
233 * adjust the flush point such that we will flush before
234 * ever trying to reuse it.
235 */
236#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
237 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
238 iommu->alloc_info[cnum].flush = ent;
239#undef between
240}
241
242/* We allocate consistent mappings from the end of cluster zero. */
243static iopte_t *alloc_consistent_cluster(struct sbus_iommu *iommu, unsigned long npages)
244{
245 iopte_t *iopte;
246
247 iopte = iommu->page_table + (1 * CLUSTER_NPAGES);
248 while (iopte > iommu->page_table) {
249 iopte--;
250 if (!(iopte_val(*iopte) & IOPTE_VALID)) {
251 unsigned long tmp = npages;
252
253 while (--tmp) {
254 iopte--;
255 if (iopte_val(*iopte) & IOPTE_VALID)
256 break;
257 }
258 if (tmp == 0) {
259 u32 entry = (iopte - iommu->page_table);
260
261 if (entry < iommu->lowest_consistent_map)
262 iommu->lowest_consistent_map = entry;
263 return iopte;
264 }
265 }
266 }
267 return NULL;
268}
269
270static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
271{
272 iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
273
274 if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) {
275 iopte_t *walk = iopte + npages;
276 iopte_t *limit;
277
278 limit = iommu->page_table + CLUSTER_NPAGES;
279 while (walk < limit) {
280 if (iopte_val(*walk) != 0UL)
281 break;
282 walk++;
283 }
284 iommu->lowest_consistent_map =
285 (walk - iommu->page_table);
286 }
287
288 while (npages--)
289 *iopte++ = __iopte(0UL);
290}
291
292void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
293{
294 unsigned long order, first_page, flags;
295 struct sbus_iommu *iommu;
296 iopte_t *iopte;
297 void *ret;
298 int npages;
299
300 if (size <= 0 || sdev == NULL || dvma_addr == NULL)
301 return NULL;
302
303 size = IO_PAGE_ALIGN(size);
304 order = get_order(size);
305 if (order >= 10)
306 return NULL;
307 first_page = __get_free_pages(GFP_KERNEL, order);
308 if (first_page == 0UL)
309 return NULL;
310 memset((char *)first_page, 0, PAGE_SIZE << order);
311
312 iommu = sdev->bus->iommu;
313
314 spin_lock_irqsave(&iommu->lock, flags);
315 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
316 if (iopte == NULL) {
317 spin_unlock_irqrestore(&iommu->lock, flags);
318 free_pages(first_page, order);
319 return NULL;
320 }
321
322 /* Ok, we're committed at this point. */
323 *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
324 ret = (void *) first_page;
325 npages = size >> IO_PAGE_SHIFT;
326 while (npages--) {
327 *iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |
328 (__pa(first_page) & IOPTE_PAGE));
329 first_page += IO_PAGE_SIZE;
330 }
331 iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT);
332 spin_unlock_irqrestore(&iommu->lock, flags);
333
334 return ret;
335}
336
337void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
338{
339 unsigned long order, npages;
340 struct sbus_iommu *iommu;
341
342 if (size <= 0 || sdev == NULL || cpu == NULL)
343 return;
344
345 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
346 iommu = sdev->bus->iommu;
347
348 spin_lock_irq(&iommu->lock);
349 free_consistent_cluster(iommu, dvma, npages);
350 iommu_flush(iommu, dvma, npages);
351 spin_unlock_irq(&iommu->lock);
352
353 order = get_order(size);
354 if (order < 10)
355 free_pages((unsigned long)cpu, order);
356}
357
358dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t size, int dir)
359{
360 struct sbus_iommu *iommu = sdev->bus->iommu;
361 unsigned long npages, pbase, flags;
362 iopte_t *iopte;
363 u32 dma_base, offset;
364 unsigned long iopte_bits;
365
366 if (dir == SBUS_DMA_NONE)
367 BUG();
368
369 pbase = (unsigned long) ptr;
370 offset = (u32) (pbase & ~IO_PAGE_MASK);
371 size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK));
372 pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK);
373
374 spin_lock_irqsave(&iommu->lock, flags);
375 npages = size >> IO_PAGE_SHIFT;
376 iopte = alloc_streaming_cluster(iommu, npages);
377 if (iopte == NULL)
378 goto bad;
379 dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
380 npages = size >> IO_PAGE_SHIFT;
381 iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
382 if (dir != SBUS_DMA_TODEVICE)
383 iopte_bits |= IOPTE_WRITE;
384 while (npages--) {
385 *iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE));
386 pbase += IO_PAGE_SIZE;
387 }
388 npages = size >> IO_PAGE_SHIFT;
389 spin_unlock_irqrestore(&iommu->lock, flags);
390
391 return (dma_base | offset);
392
393bad:
394 spin_unlock_irqrestore(&iommu->lock, flags);
395 BUG();
396 return 0;
397}
398
399void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction)
400{
401 struct sbus_iommu *iommu = sdev->bus->iommu;
402 u32 dma_base = dma_addr & IO_PAGE_MASK;
403 unsigned long flags;
404
405 size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base);
406
407 spin_lock_irqsave(&iommu->lock, flags);
408 free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
409 strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
410 spin_unlock_irqrestore(&iommu->lock, flags);
411}
412
413#define SG_ENT_PHYS_ADDRESS(SG) \
414 (__pa(page_address((SG)->page)) + (SG)->offset)
415
416static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, int nelems, unsigned long iopte_bits)
417{
418 struct scatterlist *dma_sg = sg;
419 struct scatterlist *sg_end = sg + nelems;
420 int i;
421
422 for (i = 0; i < nused; i++) {
423 unsigned long pteval = ~0UL;
424 u32 dma_npages;
425
426 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
427 dma_sg->dma_length +
428 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
429 do {
430 unsigned long offset;
431 signed int len;
432
433 /* If we are here, we know we have at least one
434 * more page to map. So walk forward until we
435 * hit a page crossing, and begin creating new
436 * mappings from that spot.
437 */
438 for (;;) {
439 unsigned long tmp;
440
441 tmp = (unsigned long) SG_ENT_PHYS_ADDRESS(sg);
442 len = sg->length;
443 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
444 pteval = tmp & IO_PAGE_MASK;
445 offset = tmp & (IO_PAGE_SIZE - 1UL);
446 break;
447 }
448 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
449 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
450 offset = 0UL;
451 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
452 break;
453 }
454 sg++;
455 }
456
457 pteval = ((pteval & IOPTE_PAGE) | iopte_bits);
458 while (len > 0) {
459 *iopte++ = __iopte(pteval);
460 pteval += IO_PAGE_SIZE;
461 len -= (IO_PAGE_SIZE - offset);
462 offset = 0;
463 dma_npages--;
464 }
465
466 pteval = (pteval & IOPTE_PAGE) + len;
467 sg++;
468
469 /* Skip over any tail mappings we've fully mapped,
470 * adjusting pteval along the way. Stop when we
471 * detect a page crossing event.
472 */
473 while (sg < sg_end &&
474 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
475 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
476 ((pteval ^
477 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
478 pteval += sg->length;
479 sg++;
480 }
481 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
482 pteval = ~0UL;
483 } while (dma_npages != 0);
484 dma_sg++;
485 }
486}
487
488int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int dir)
489{
490 struct sbus_iommu *iommu = sdev->bus->iommu;
491 unsigned long flags, npages;
492 iopte_t *iopte;
493 u32 dma_base;
494 struct scatterlist *sgtmp;
495 int used;
496 unsigned long iopte_bits;
497
498 if (dir == SBUS_DMA_NONE)
499 BUG();
500
501 /* Fast path single entry scatterlists. */
502 if (nents == 1) {
503 sg->dma_address =
504 sbus_map_single(sdev,
505 (page_address(sg->page) + sg->offset),
506 sg->length, dir);
507 sg->dma_length = sg->length;
508 return 1;
509 }
510
511 npages = prepare_sg(sg, nents);
512
513 spin_lock_irqsave(&iommu->lock, flags);
514 iopte = alloc_streaming_cluster(iommu, npages);
515 if (iopte == NULL)
516 goto bad;
517 dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
518
519 /* Normalize DVMA addresses. */
520 sgtmp = sg;
521 used = nents;
522
523 while (used && sgtmp->dma_length) {
524 sgtmp->dma_address += dma_base;
525 sgtmp++;
526 used--;
527 }
528 used = nents - used;
529
530 iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
531 if (dir != SBUS_DMA_TODEVICE)
532 iopte_bits |= IOPTE_WRITE;
533
534 fill_sg(iopte, sg, used, nents, iopte_bits);
535#ifdef VERIFY_SG
536 verify_sglist(sg, nents, iopte, npages);
537#endif
538 spin_unlock_irqrestore(&iommu->lock, flags);
539
540 return used;
541
542bad:
543 spin_unlock_irqrestore(&iommu->lock, flags);
544 BUG();
545 return 0;
546}
547
548void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
549{
550 unsigned long size, flags;
551 struct sbus_iommu *iommu;
552 u32 dvma_base;
553 int i;
554
555 /* Fast path single entry scatterlists. */
556 if (nents == 1) {
557 sbus_unmap_single(sdev, sg->dma_address, sg->dma_length, direction);
558 return;
559 }
560
561 dvma_base = sg[0].dma_address & IO_PAGE_MASK;
562 for (i = 0; i < nents; i++) {
563 if (sg[i].dma_length == 0)
564 break;
565 }
566 i--;
567 size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - dvma_base;
568
569 iommu = sdev->bus->iommu;
570 spin_lock_irqsave(&iommu->lock, flags);
571 free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
572 strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
573 spin_unlock_irqrestore(&iommu->lock, flags);
574}
575
576void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
577{
578 struct sbus_iommu *iommu = sdev->bus->iommu;
579 unsigned long flags;
580
581 size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
582
583 spin_lock_irqsave(&iommu->lock, flags);
584 strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
585 spin_unlock_irqrestore(&iommu->lock, flags);
586}
587
588void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
589{
590}
591
592void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
593{
594 struct sbus_iommu *iommu = sdev->bus->iommu;
595 unsigned long flags, size;
596 u32 base;
597 int i;
598
599 base = sg[0].dma_address & IO_PAGE_MASK;
600 for (i = 0; i < nents; i++) {
601 if (sg[i].dma_length == 0)
602 break;
603 }
604 i--;
605 size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
606
607 spin_lock_irqsave(&iommu->lock, flags);
608 strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
609 spin_unlock_irqrestore(&iommu->lock, flags);
610}
611
612void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
613{
614}
615
616/* Enable 64-bit DVMA mode for the given device. */
617void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
618{
619 struct sbus_iommu *iommu = sdev->bus->iommu;
620 int slot = sdev->slot;
621 unsigned long cfg_reg;
622 u64 val;
623
624 cfg_reg = iommu->sbus_control_reg;
625 switch (slot) {
626 case 0:
627 cfg_reg += 0x20UL;
628 break;
629 case 1:
630 cfg_reg += 0x28UL;
631 break;
632 case 2:
633 cfg_reg += 0x30UL;
634 break;
635 case 3:
636 cfg_reg += 0x38UL;
637 break;
638 case 13:
639 cfg_reg += 0x40UL;
640 break;
641 case 14:
642 cfg_reg += 0x48UL;
643 break;
644 case 15:
645 cfg_reg += 0x50UL;
646 break;
647
648 default:
649 return;
650 };
651
652 val = upa_readq(cfg_reg);
653 if (val & (1UL << 14UL)) {
654 /* Extended transfer mode already enabled. */
655 return;
656 }
657
658 val |= (1UL << 14UL);
659
660 if (bursts & DMA_BURST8)
661 val |= (1UL << 1UL);
662 if (bursts & DMA_BURST16)
663 val |= (1UL << 2UL);
664 if (bursts & DMA_BURST32)
665 val |= (1UL << 3UL);
666 if (bursts & DMA_BURST64)
667 val |= (1UL << 4UL);
668 upa_writeq(val, cfg_reg);
669}
670
671/* SBUS SYSIO INO number to Sparc PIL level. */
672static unsigned char sysio_ino_to_pil[] = {
673 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 0 */
674 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 1 */
675 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 2 */
676 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 3 */
677 4, /* Onboard SCSI */
678 5, /* Onboard Ethernet */
679/*XXX*/ 8, /* Onboard BPP */
680 0, /* Bogon */
681 13, /* Audio */
682/*XXX*/15, /* PowerFail */
683 0, /* Bogon */
684 0, /* Bogon */
685 12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
686 11, /* Floppy */
687 0, /* Spare Hardware (bogon for now) */
688 0, /* Keyboard (bogon for now) */
689 0, /* Mouse (bogon for now) */
690 0, /* Serial (bogon for now) */
691 0, 0, /* Bogon, Bogon */
692 10, /* Timer 0 */
693 11, /* Timer 1 */
694 0, 0, /* Bogon, Bogon */
695 15, /* Uncorrectable SBUS Error */
696 15, /* Correctable SBUS Error */
697 15, /* SBUS Error */
698/*XXX*/ 0, /* Power Management (bogon for now) */
699};
700
701/* INO number to IMAP register offset for SYSIO external IRQ's.
702 * This should conform to both Sunfire/Wildfire server and Fusion
703 * desktop designs.
704 */
705#define SYSIO_IMAP_SLOT0 0x2c04UL
706#define SYSIO_IMAP_SLOT1 0x2c0cUL
707#define SYSIO_IMAP_SLOT2 0x2c14UL
708#define SYSIO_IMAP_SLOT3 0x2c1cUL
709#define SYSIO_IMAP_SCSI 0x3004UL
710#define SYSIO_IMAP_ETH 0x300cUL
711#define SYSIO_IMAP_BPP 0x3014UL
712#define SYSIO_IMAP_AUDIO 0x301cUL
713#define SYSIO_IMAP_PFAIL 0x3024UL
714#define SYSIO_IMAP_KMS 0x302cUL
715#define SYSIO_IMAP_FLPY 0x3034UL
716#define SYSIO_IMAP_SHW 0x303cUL
717#define SYSIO_IMAP_KBD 0x3044UL
718#define SYSIO_IMAP_MS 0x304cUL
719#define SYSIO_IMAP_SER 0x3054UL
720#define SYSIO_IMAP_TIM0 0x3064UL
721#define SYSIO_IMAP_TIM1 0x306cUL
722#define SYSIO_IMAP_UE 0x3074UL
723#define SYSIO_IMAP_CE 0x307cUL
724#define SYSIO_IMAP_SBERR 0x3084UL
725#define SYSIO_IMAP_PMGMT 0x308cUL
726#define SYSIO_IMAP_GFX 0x3094UL
727#define SYSIO_IMAP_EUPA 0x309cUL
728
729#define bogon ((unsigned long) -1)
730static unsigned long sysio_irq_offsets[] = {
731 /* SBUS Slot 0 --> 3, level 1 --> 7 */
732 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
733 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
734 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
735 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
736 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
737 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
738 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
739 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
740
741 /* Onboard devices (not relevant/used on SunFire). */
742 SYSIO_IMAP_SCSI,
743 SYSIO_IMAP_ETH,
744 SYSIO_IMAP_BPP,
745 bogon,
746 SYSIO_IMAP_AUDIO,
747 SYSIO_IMAP_PFAIL,
748 bogon,
749 bogon,
750 SYSIO_IMAP_KMS,
751 SYSIO_IMAP_FLPY,
752 SYSIO_IMAP_SHW,
753 SYSIO_IMAP_KBD,
754 SYSIO_IMAP_MS,
755 SYSIO_IMAP_SER,
756 bogon,
757 bogon,
758 SYSIO_IMAP_TIM0,
759 SYSIO_IMAP_TIM1,
760 bogon,
761 bogon,
762 SYSIO_IMAP_UE,
763 SYSIO_IMAP_CE,
764 SYSIO_IMAP_SBERR,
765 SYSIO_IMAP_PMGMT,
766};
767
768#undef bogon
769
770#define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
771
772/* Convert Interrupt Mapping register pointer to associated
773 * Interrupt Clear register pointer, SYSIO specific version.
774 */
775#define SYSIO_ICLR_UNUSED0 0x3400UL
776#define SYSIO_ICLR_SLOT0 0x340cUL
777#define SYSIO_ICLR_SLOT1 0x344cUL
778#define SYSIO_ICLR_SLOT2 0x348cUL
779#define SYSIO_ICLR_SLOT3 0x34ccUL
780static unsigned long sysio_imap_to_iclr(unsigned long imap)
781{
782 unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
783 return imap + diff;
784}
785
786unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
787{
788 struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
789 struct sbus_iommu *iommu = sbus->iommu;
790 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
791 unsigned long imap, iclr;
792 int pil, sbus_level = 0;
793
794 pil = sysio_ino_to_pil[ino];
795 if (!pil) {
796 printk("sbus_irq_build: Bad SYSIO INO[%x]\n", ino);
797 panic("Bad SYSIO IRQ translations...");
798 }
799
800 if (PIL_RESERVED(pil))
801 BUG();
802
803 imap = sysio_irq_offsets[ino];
804 if (imap == ((unsigned long)-1)) {
805 prom_printf("get_irq_translations: Bad SYSIO INO[%x] cpu[%d]\n",
806 ino, pil);
807 prom_halt();
808 }
809 imap += reg_base;
810
811 /* SYSIO inconsistency. For external SLOTS, we have to select
812 * the right ICLR register based upon the lower SBUS irq level
813 * bits.
814 */
815 if (ino >= 0x20) {
816 iclr = sysio_imap_to_iclr(imap);
817 } else {
818 int sbus_slot = (ino & 0x18)>>3;
819
820 sbus_level = ino & 0x7;
821
822 switch(sbus_slot) {
823 case 0:
824 iclr = reg_base + SYSIO_ICLR_SLOT0;
825 break;
826 case 1:
827 iclr = reg_base + SYSIO_ICLR_SLOT1;
828 break;
829 case 2:
830 iclr = reg_base + SYSIO_ICLR_SLOT2;
831 break;
832 default:
833 case 3:
834 iclr = reg_base + SYSIO_ICLR_SLOT3;
835 break;
836 };
837
838 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
839 }
840 return build_irq(pil, sbus_level, iclr, imap);
841}
842
843/* Error interrupt handling. */
844#define SYSIO_UE_AFSR 0x0030UL
845#define SYSIO_UE_AFAR 0x0038UL
846#define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
847#define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
848#define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
849#define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
850#define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
851#define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
852#define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
853#define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
854#define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
855#define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
856#define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
857static irqreturn_t sysio_ue_handler(int irq, void *dev_id, struct pt_regs *regs)
858{
859 struct sbus_bus *sbus = dev_id;
860 struct sbus_iommu *iommu = sbus->iommu;
861 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
862 unsigned long afsr_reg, afar_reg;
863 unsigned long afsr, afar, error_bits;
864 int reported;
865
866 afsr_reg = reg_base + SYSIO_UE_AFSR;
867 afar_reg = reg_base + SYSIO_UE_AFAR;
868
869 /* Latch error status. */
870 afsr = upa_readq(afsr_reg);
871 afar = upa_readq(afar_reg);
872
873 /* Clear primary/secondary error status bits. */
874 error_bits = afsr &
875 (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
876 SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
877 upa_writeq(error_bits, afsr_reg);
878
879 /* Log the error. */
880 printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
881 sbus->portid,
882 (((error_bits & SYSIO_UEAFSR_PPIO) ?
883 "PIO" :
884 ((error_bits & SYSIO_UEAFSR_PDRD) ?
885 "DVMA Read" :
886 ((error_bits & SYSIO_UEAFSR_PDWR) ?
887 "DVMA Write" : "???")))));
888 printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
889 sbus->portid,
890 (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
891 (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
892 (afsr & SYSIO_UEAFSR_MID) >> 37UL);
893 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
894 printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
895 reported = 0;
896 if (afsr & SYSIO_UEAFSR_SPIO) {
897 reported++;
898 printk("(PIO)");
899 }
900 if (afsr & SYSIO_UEAFSR_SDRD) {
901 reported++;
902 printk("(DVMA Read)");
903 }
904 if (afsr & SYSIO_UEAFSR_SDWR) {
905 reported++;
906 printk("(DVMA Write)");
907 }
908 if (!reported)
909 printk("(none)");
910 printk("]\n");
911
912 return IRQ_HANDLED;
913}
914
915#define SYSIO_CE_AFSR 0x0040UL
916#define SYSIO_CE_AFAR 0x0048UL
917#define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
918#define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
919#define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
920#define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
921#define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
922#define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
923#define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
924#define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
925#define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
926#define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
927#define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
928#define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
929static irqreturn_t sysio_ce_handler(int irq, void *dev_id, struct pt_regs *regs)
930{
931 struct sbus_bus *sbus = dev_id;
932 struct sbus_iommu *iommu = sbus->iommu;
933 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
934 unsigned long afsr_reg, afar_reg;
935 unsigned long afsr, afar, error_bits;
936 int reported;
937
938 afsr_reg = reg_base + SYSIO_CE_AFSR;
939 afar_reg = reg_base + SYSIO_CE_AFAR;
940
941 /* Latch error status. */
942 afsr = upa_readq(afsr_reg);
943 afar = upa_readq(afar_reg);
944
945 /* Clear primary/secondary error status bits. */
946 error_bits = afsr &
947 (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
948 SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
949 upa_writeq(error_bits, afsr_reg);
950
951 printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
952 sbus->portid,
953 (((error_bits & SYSIO_CEAFSR_PPIO) ?
954 "PIO" :
955 ((error_bits & SYSIO_CEAFSR_PDRD) ?
956 "DVMA Read" :
957 ((error_bits & SYSIO_CEAFSR_PDWR) ?
958 "DVMA Write" : "???")))));
959
960 /* XXX Use syndrome and afar to print out module string just like
961 * XXX UDB CE trap handler does... -DaveM
962 */
963 printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
964 sbus->portid,
965 (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
966 (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
967 (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
968 (afsr & SYSIO_CEAFSR_MID) >> 37UL);
969 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
970
971 printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
972 reported = 0;
973 if (afsr & SYSIO_CEAFSR_SPIO) {
974 reported++;
975 printk("(PIO)");
976 }
977 if (afsr & SYSIO_CEAFSR_SDRD) {
978 reported++;
979 printk("(DVMA Read)");
980 }
981 if (afsr & SYSIO_CEAFSR_SDWR) {
982 reported++;
983 printk("(DVMA Write)");
984 }
985 if (!reported)
986 printk("(none)");
987 printk("]\n");
988
989 return IRQ_HANDLED;
990}
991
992#define SYSIO_SBUS_AFSR 0x2010UL
993#define SYSIO_SBUS_AFAR 0x2018UL
994#define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
995#define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
996#define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
997#define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
998#define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
999#define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
1000#define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
1001#define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
1002#define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
1003#define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
1004#define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
1005#define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
1006static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id, struct pt_regs *regs)
1007{
1008 struct sbus_bus *sbus = dev_id;
1009 struct sbus_iommu *iommu = sbus->iommu;
1010 unsigned long afsr_reg, afar_reg, reg_base;
1011 unsigned long afsr, afar, error_bits;
1012 int reported;
1013
1014 reg_base = iommu->sbus_control_reg - 0x2000UL;
1015 afsr_reg = reg_base + SYSIO_SBUS_AFSR;
1016 afar_reg = reg_base + SYSIO_SBUS_AFAR;
1017
1018 afsr = upa_readq(afsr_reg);
1019 afar = upa_readq(afar_reg);
1020
1021 /* Clear primary/secondary error status bits. */
1022 error_bits = afsr &
1023 (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
1024 SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
1025 upa_writeq(error_bits, afsr_reg);
1026
1027 /* Log the error. */
1028 printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
1029 sbus->portid,
1030 (((error_bits & SYSIO_SBAFSR_PLE) ?
1031 "Late PIO Error" :
1032 ((error_bits & SYSIO_SBAFSR_PTO) ?
1033 "Time Out" :
1034 ((error_bits & SYSIO_SBAFSR_PBERR) ?
1035 "Error Ack" : "???")))),
1036 (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
1037 printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
1038 sbus->portid,
1039 (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
1040 (afsr & SYSIO_SBAFSR_MID) >> 37UL);
1041 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
1042 printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
1043 reported = 0;
1044 if (afsr & SYSIO_SBAFSR_SLE) {
1045 reported++;
1046 printk("(Late PIO Error)");
1047 }
1048 if (afsr & SYSIO_SBAFSR_STO) {
1049 reported++;
1050 printk("(Time Out)");
1051 }
1052 if (afsr & SYSIO_SBAFSR_SBERR) {
1053 reported++;
1054 printk("(Error Ack)");
1055 }
1056 if (!reported)
1057 printk("(none)");
1058 printk("]\n");
1059
1060 /* XXX check iommu/strbuf for further error status XXX */
1061
1062 return IRQ_HANDLED;
1063}
1064
1065#define ECC_CONTROL 0x0020UL
1066#define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
1067#define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
1068#define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
1069
1070#define SYSIO_UE_INO 0x34
1071#define SYSIO_CE_INO 0x35
1072#define SYSIO_SBUSERR_INO 0x36
1073
1074static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
1075{
1076 struct sbus_iommu *iommu = sbus->iommu;
1077 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
1078 unsigned int irq;
1079 u64 control;
1080
1081 irq = sbus_build_irq(sbus, SYSIO_UE_INO);
1082 if (request_irq(irq, sysio_ue_handler,
1083 SA_SHIRQ, "SYSIO UE", sbus) < 0) {
1084 prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
1085 sbus->portid);
1086 prom_halt();
1087 }
1088
1089 irq = sbus_build_irq(sbus, SYSIO_CE_INO);
1090 if (request_irq(irq, sysio_ce_handler,
1091 SA_SHIRQ, "SYSIO CE", sbus) < 0) {
1092 prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
1093 sbus->portid);
1094 prom_halt();
1095 }
1096
1097 irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
1098 if (request_irq(irq, sysio_sbus_error_handler,
1099 SA_SHIRQ, "SYSIO SBUS Error", sbus) < 0) {
1100 prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
1101 sbus->portid);
1102 prom_halt();
1103 }
1104
1105 /* Now turn the error interrupts on and also enable ECC checking. */
1106 upa_writeq((SYSIO_ECNTRL_ECCEN |
1107 SYSIO_ECNTRL_UEEN |
1108 SYSIO_ECNTRL_CEEN),
1109 reg_base + ECC_CONTROL);
1110
1111 control = upa_readq(iommu->sbus_control_reg);
1112 control |= 0x100UL; /* SBUS Error Interrupt Enable */
1113 upa_writeq(control, iommu->sbus_control_reg);
1114}
1115
1116/* Boot time initialization. */
1117void __init sbus_iommu_init(int prom_node, struct sbus_bus *sbus)
1118{
1119 struct linux_prom64_registers rprop;
1120 struct sbus_iommu *iommu;
1121 unsigned long regs, tsb_base;
1122 u64 control;
1123 int err, i;
1124
1125 sbus->portid = prom_getintdefault(sbus->prom_node,
1126 "upa-portid", -1);
1127
1128 err = prom_getproperty(prom_node, "reg",
1129 (char *)&rprop, sizeof(rprop));
1130 if (err < 0) {
1131 prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
1132 prom_halt();
1133 }
1134 regs = rprop.phys_addr;
1135
1136 iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
1137 if (iommu == NULL) {
1138 prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
1139 prom_halt();
1140 }
1141
1142 /* Align on E$ line boundary. */
1143 iommu = (struct sbus_iommu *)
1144 (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
1145 ~(SMP_CACHE_BYTES - 1UL));
1146
1147 memset(iommu, 0, sizeof(*iommu));
1148
1149 /* We start with no consistent mappings. */
1150 iommu->lowest_consistent_map = CLUSTER_NPAGES;
1151
1152 for (i = 0; i < NCLUSTERS; i++) {
1153 iommu->alloc_info[i].flush = 0;
1154 iommu->alloc_info[i].next = 0;
1155 }
1156
1157 /* Setup spinlock. */
1158 spin_lock_init(&iommu->lock);
1159
1160 /* Init register offsets. */
1161 iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
1162 iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
1163
1164 /* The SYSIO SBUS control register is used for dummy reads
1165 * in order to ensure write completion.
1166 */
1167 iommu->sbus_control_reg = regs + 0x2000UL;
1168
1169 /* Link into SYSIO software state. */
1170 sbus->iommu = iommu;
1171
1172 printk("SYSIO: UPA portID %x, at %016lx\n",
1173 sbus->portid, regs);
1174
1175 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
1176 control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
1177 control = ((7UL << 16UL) |
1178 (0UL << 2UL) |
1179 (1UL << 1UL) |
1180 (1UL << 0UL));
1181
1182 /* Using the above configuration we need 1MB iommu page
1183 * table (128K ioptes * 8 bytes per iopte). This is
1184 * page order 7 on UltraSparc.
1185 */
1186 tsb_base = __get_free_pages(GFP_ATOMIC, get_order(IO_TSB_SIZE));
1187 if (tsb_base == 0UL) {
1188 prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
1189 prom_halt();
1190 }
1191
1192 iommu->page_table = (iopte_t *) tsb_base;
1193 memset(iommu->page_table, 0, IO_TSB_SIZE);
1194
1195 upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
1196
1197 /* Clean out any cruft in the IOMMU using
1198 * diagnostic accesses.
1199 */
1200 for (i = 0; i < 16; i++) {
1201 unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
1202 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
1203
1204 dram += (unsigned long)i * 8UL;
1205 tag += (unsigned long)i * 8UL;
1206 upa_writeq(0, dram);
1207 upa_writeq(0, tag);
1208 }
1209 upa_readq(iommu->sbus_control_reg);
1210
1211 /* Give the TSB to SYSIO. */
1212 upa_writeq(__pa(tsb_base), iommu->iommu_regs + IOMMU_TSBBASE);
1213
1214 /* Setup streaming buffer, DE=1 SB_EN=1 */
1215 control = (1UL << 1UL) | (1UL << 0UL);
1216 upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
1217
1218 /* Clear out the tags using diagnostics. */
1219 for (i = 0; i < 16; i++) {
1220 unsigned long ptag, ltag;
1221
1222 ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
1223 ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
1224 ptag += (unsigned long)i * 8UL;
1225 ltag += (unsigned long)i * 8UL;
1226
1227 upa_writeq(0UL, ptag);
1228 upa_writeq(0UL, ltag);
1229 }
1230
1231 /* Enable DVMA arbitration for all devices/slots. */
1232 control = upa_readq(iommu->sbus_control_reg);
1233 control |= 0x3fUL;
1234 upa_writeq(control, iommu->sbus_control_reg);
1235
1236 /* Now some Xfire specific grot... */
1237 if (this_is_starfire)
1238 sbus->starfire_cookie = starfire_hookup(sbus->portid);
1239 else
1240 sbus->starfire_cookie = NULL;
1241
1242 sysio_register_error_handlers(sbus);
1243}
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c
new file mode 100644
index 000000000000..63496c43fe17
--- /dev/null
+++ b/arch/sparc64/kernel/semaphore.c
@@ -0,0 +1,251 @@
1/* $Id: semaphore.c,v 1.9 2001/11/18 00:12:56 davem Exp $
2 * semaphore.c: Sparc64 semaphore implementation.
3 *
4 * This is basically the PPC semaphore scheme ported to use
5 * the sparc64 atomic instructions, so see the PPC code for
6 * credits.
7 */
8
9#include <linux/sched.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12
13/*
14 * Atomically update sem->count.
15 * This does the equivalent of the following:
16 *
17 * old_count = sem->count;
18 * tmp = MAX(old_count, 0) + incr;
19 * sem->count = tmp;
20 * return old_count;
21 */
22static __inline__ int __sem_update_count(struct semaphore *sem, int incr)
23{
24 int old_count, tmp;
25
26 __asm__ __volatile__("\n"
27" ! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n"
28"1: ldsw [%3], %0\n"
29" mov %0, %1\n"
30" cmp %0, 0\n"
31" movl %%icc, 0, %1\n"
32" add %1, %4, %1\n"
33" cas [%3], %0, %1\n"
34" cmp %0, %1\n"
35" bne,pn %%icc, 1b\n"
36" membar #StoreLoad | #StoreStore\n"
37 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
38 : "r" (&sem->count), "r" (incr), "m" (sem->count)
39 : "cc");
40
41 return old_count;
42}
43
44static void __up(struct semaphore *sem)
45{
46 __sem_update_count(sem, 1);
47 wake_up(&sem->wait);
48}
49
50void up(struct semaphore *sem)
51{
52 /* This atomically does:
53 * old_val = sem->count;
54 * new_val = sem->count + 1;
55 * sem->count = new_val;
56 * if (old_val < 0)
57 * __up(sem);
58 *
59 * The (old_val < 0) test is equivalent to
60 * the more straightforward (new_val <= 0),
61 * but it is easier to test the former because
62 * of how the CAS instruction works.
63 */
64
65 __asm__ __volatile__("\n"
66" ! up sem(%0)\n"
67" membar #StoreLoad | #LoadLoad\n"
68"1: lduw [%0], %%g1\n"
69" add %%g1, 1, %%g7\n"
70" cas [%0], %%g1, %%g7\n"
71" cmp %%g1, %%g7\n"
72" bne,pn %%icc, 1b\n"
73" addcc %%g7, 1, %%g0\n"
74" ble,pn %%icc, 3f\n"
75" membar #StoreLoad | #StoreStore\n"
76"2:\n"
77" .subsection 2\n"
78"3: mov %0, %%g1\n"
79" save %%sp, -160, %%sp\n"
80" call %1\n"
81" mov %%g1, %%o0\n"
82" ba,pt %%xcc, 2b\n"
83" restore\n"
84" .previous\n"
85 : : "r" (sem), "i" (__up)
86 : "g1", "g2", "g3", "g7", "memory", "cc");
87}
88
89static void __sched __down(struct semaphore * sem)
90{
91 struct task_struct *tsk = current;
92 DECLARE_WAITQUEUE(wait, tsk);
93
94 tsk->state = TASK_UNINTERRUPTIBLE;
95 add_wait_queue_exclusive(&sem->wait, &wait);
96
97 while (__sem_update_count(sem, -1) <= 0) {
98 schedule();
99 tsk->state = TASK_UNINTERRUPTIBLE;
100 }
101 remove_wait_queue(&sem->wait, &wait);
102 tsk->state = TASK_RUNNING;
103
104 wake_up(&sem->wait);
105}
106
107void __sched down(struct semaphore *sem)
108{
109 might_sleep();
110 /* This atomically does:
111 * old_val = sem->count;
112 * new_val = sem->count - 1;
113 * sem->count = new_val;
114 * if (old_val < 1)
115 * __down(sem);
116 *
117 * The (old_val < 1) test is equivalent to
118 * the more straightforward (new_val < 0),
119 * but it is easier to test the former because
120 * of how the CAS instruction works.
121 */
122
123 __asm__ __volatile__("\n"
124" ! down sem(%0)\n"
125"1: lduw [%0], %%g1\n"
126" sub %%g1, 1, %%g7\n"
127" cas [%0], %%g1, %%g7\n"
128" cmp %%g1, %%g7\n"
129" bne,pn %%icc, 1b\n"
130" cmp %%g7, 1\n"
131" bl,pn %%icc, 3f\n"
132" membar #StoreLoad | #StoreStore\n"
133"2:\n"
134" .subsection 2\n"
135"3: mov %0, %%g1\n"
136" save %%sp, -160, %%sp\n"
137" call %1\n"
138" mov %%g1, %%o0\n"
139" ba,pt %%xcc, 2b\n"
140" restore\n"
141" .previous\n"
142 : : "r" (sem), "i" (__down)
143 : "g1", "g2", "g3", "g7", "memory", "cc");
144}
145
146int down_trylock(struct semaphore *sem)
147{
148 int ret;
149
150 /* This atomically does:
151 * old_val = sem->count;
152 * new_val = sem->count - 1;
153 * if (old_val < 1) {
154 * ret = 1;
155 * } else {
156 * sem->count = new_val;
157 * ret = 0;
158 * }
159 *
160 * The (old_val < 1) test is equivalent to
161 * the more straightforward (new_val < 0),
162 * but it is easier to test the former because
163 * of how the CAS instruction works.
164 */
165
166 __asm__ __volatile__("\n"
167" ! down_trylock sem(%1) ret(%0)\n"
168"1: lduw [%1], %%g1\n"
169" sub %%g1, 1, %%g7\n"
170" cmp %%g1, 1\n"
171" bl,pn %%icc, 2f\n"
172" mov 1, %0\n"
173" cas [%1], %%g1, %%g7\n"
174" cmp %%g1, %%g7\n"
175" bne,pn %%icc, 1b\n"
176" mov 0, %0\n"
177" membar #StoreLoad | #StoreStore\n"
178"2:\n"
179 : "=&r" (ret)
180 : "r" (sem)
181 : "g1", "g7", "memory", "cc");
182
183 return ret;
184}
185
186static int __sched __down_interruptible(struct semaphore * sem)
187{
188 int retval = 0;
189 struct task_struct *tsk = current;
190 DECLARE_WAITQUEUE(wait, tsk);
191
192 tsk->state = TASK_INTERRUPTIBLE;
193 add_wait_queue_exclusive(&sem->wait, &wait);
194
195 while (__sem_update_count(sem, -1) <= 0) {
196 if (signal_pending(current)) {
197 __sem_update_count(sem, 0);
198 retval = -EINTR;
199 break;
200 }
201 schedule();
202 tsk->state = TASK_INTERRUPTIBLE;
203 }
204 tsk->state = TASK_RUNNING;
205 remove_wait_queue(&sem->wait, &wait);
206 wake_up(&sem->wait);
207 return retval;
208}
209
210int __sched down_interruptible(struct semaphore *sem)
211{
212 int ret = 0;
213
214 might_sleep();
215 /* This atomically does:
216 * old_val = sem->count;
217 * new_val = sem->count - 1;
218 * sem->count = new_val;
219 * if (old_val < 1)
220 * ret = __down_interruptible(sem);
221 *
222 * The (old_val < 1) test is equivalent to
223 * the more straightforward (new_val < 0),
224 * but it is easier to test the former because
225 * of how the CAS instruction works.
226 */
227
228 __asm__ __volatile__("\n"
229" ! down_interruptible sem(%2) ret(%0)\n"
230"1: lduw [%2], %%g1\n"
231" sub %%g1, 1, %%g7\n"
232" cas [%2], %%g1, %%g7\n"
233" cmp %%g1, %%g7\n"
234" bne,pn %%icc, 1b\n"
235" cmp %%g7, 1\n"
236" bl,pn %%icc, 3f\n"
237" membar #StoreLoad | #StoreStore\n"
238"2:\n"
239" .subsection 2\n"
240"3: mov %2, %%g1\n"
241" save %%sp, -160, %%sp\n"
242" call %3\n"
243" mov %%g1, %%o0\n"
244" ba,pt %%xcc, 2b\n"
245" restore\n"
246" .previous\n"
247 : "=r" (ret)
248 : "0" (ret), "r" (sem), "i" (__down_interruptible)
249 : "g1", "g2", "g3", "g7", "memory", "cc");
250 return ret;
251}
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
new file mode 100644
index 000000000000..12c3d84b7460
--- /dev/null
+++ b/arch/sparc64/kernel/setup.c
@@ -0,0 +1,731 @@
1/* $Id: setup.c,v 1.72 2002/02/09 19:49:30 davem Exp $
2 * linux/arch/sparc64/kernel/setup.c
3 *
4 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/errno.h>
9#include <linux/sched.h>
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/stddef.h>
13#include <linux/unistd.h>
14#include <linux/ptrace.h>
15#include <linux/slab.h>
16#include <asm/smp.h>
17#include <linux/user.h>
18#include <linux/a.out.h>
19#include <linux/tty.h>
20#include <linux/delay.h>
21#include <linux/config.h>
22#include <linux/fs.h>
23#include <linux/seq_file.h>
24#include <linux/syscalls.h>
25#include <linux/kdev_t.h>
26#include <linux/major.h>
27#include <linux/string.h>
28#include <linux/init.h>
29#include <linux/inet.h>
30#include <linux/console.h>
31#include <linux/root_dev.h>
32#include <linux/interrupt.h>
33#include <linux/cpu.h>
34#include <linux/initrd.h>
35
36#include <asm/segment.h>
37#include <asm/system.h>
38#include <asm/io.h>
39#include <asm/processor.h>
40#include <asm/oplib.h>
41#include <asm/page.h>
42#include <asm/pgtable.h>
43#include <asm/idprom.h>
44#include <asm/head.h>
45#include <asm/starfire.h>
46#include <asm/mmu_context.h>
47#include <asm/timer.h>
48#include <asm/sections.h>
49#include <asm/setup.h>
50#include <asm/mmu.h>
51
52#ifdef CONFIG_IP_PNP
53#include <net/ipconfig.h>
54#endif
55
56struct screen_info screen_info = {
57 0, 0, /* orig-x, orig-y */
58 0, /* unused */
59 0, /* orig-video-page */
60 0, /* orig-video-mode */
61 128, /* orig-video-cols */
62 0, 0, 0, /* unused, ega_bx, unused */
63 54, /* orig-video-lines */
64 0, /* orig-video-isVGA */
65 16 /* orig-video-points */
66};
67
68/* Typing sync at the prom prompt calls the function pointed to by
69 * the sync callback which I set to the following function.
70 * This should sync all filesystems and return, for now it just
71 * prints out pretty messages and returns.
72 */
73
74void (*prom_palette)(int);
75void (*prom_keyboard)(void);
76
77static void
78prom_console_write(struct console *con, const char *s, unsigned n)
79{
80 prom_write(s, n);
81}
82
83static struct console prom_console = {
84 .name = "prom",
85 .write = prom_console_write,
86 .flags = CON_CONSDEV | CON_ENABLED,
87 .index = -1,
88};
89
90#define PROM_TRUE -1
91#define PROM_FALSE 0
92
93/* Pretty sick eh? */
94int prom_callback(long *args)
95{
96 struct console *cons, *saved_console = NULL;
97 unsigned long flags;
98 char *cmd;
99 extern spinlock_t prom_entry_lock;
100
101 if (!args)
102 return -1;
103 if (!(cmd = (char *)args[0]))
104 return -1;
105
106 /*
107 * The callback can be invoked on the cpu that first dropped
108 * into prom_cmdline after taking the serial interrupt, or on
109 * a slave processor that was smp_captured() if the
110 * administrator has done a switch-cpu inside obp. In either
111 * case, the cpu is marked as in-interrupt. Drop IRQ locks.
112 */
113 irq_exit();
114
115 /* XXX Revisit the locking here someday. This is a debugging
116 * XXX feature so it isnt all that critical. -DaveM
117 */
118 local_irq_save(flags);
119
120 spin_unlock(&prom_entry_lock);
121 cons = console_drivers;
122 while (cons) {
123 unregister_console(cons);
124 cons->flags &= ~(CON_PRINTBUFFER);
125 cons->next = saved_console;
126 saved_console = cons;
127 cons = console_drivers;
128 }
129 register_console(&prom_console);
130 if (!strcmp(cmd, "sync")) {
131 prom_printf("PROM `%s' command...\n", cmd);
132 show_free_areas();
133 if (current->pid != 0) {
134 local_irq_enable();
135 sys_sync();
136 local_irq_disable();
137 }
138 args[2] = 0;
139 args[args[1] + 3] = -1;
140 prom_printf("Returning to PROM\n");
141 } else if (!strcmp(cmd, "va>tte-data")) {
142 unsigned long ctx, va;
143 unsigned long tte = 0;
144 long res = PROM_FALSE;
145
146 ctx = args[3];
147 va = args[4];
148 if (ctx) {
149 /*
150 * Find process owning ctx, lookup mapping.
151 */
152 struct task_struct *p;
153 struct mm_struct *mm = NULL;
154 pgd_t *pgdp;
155 pud_t *pudp;
156 pmd_t *pmdp;
157 pte_t *ptep;
158
159 for_each_process(p) {
160 mm = p->mm;
161 if (CTX_NRBITS(mm->context) == ctx)
162 break;
163 }
164 if (!mm ||
165 CTX_NRBITS(mm->context) != ctx)
166 goto done;
167
168 pgdp = pgd_offset(mm, va);
169 if (pgd_none(*pgdp))
170 goto done;
171 pudp = pud_offset(pgdp, va);
172 if (pud_none(*pudp))
173 goto done;
174 pmdp = pmd_offset(pudp, va);
175 if (pmd_none(*pmdp))
176 goto done;
177
178 /* Preemption implicitly disabled by virtue of
179 * being called from inside OBP.
180 */
181 ptep = pte_offset_map(pmdp, va);
182 if (pte_present(*ptep)) {
183 tte = pte_val(*ptep);
184 res = PROM_TRUE;
185 }
186 pte_unmap(ptep);
187 goto done;
188 }
189
190 if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
191 unsigned long kernel_pctx = 0;
192
193 if (tlb_type == cheetah_plus)
194 kernel_pctx |= (CTX_CHEETAH_PLUS_NUC |
195 CTX_CHEETAH_PLUS_CTX0);
196
197 /* Spitfire Errata #32 workaround */
198 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
199 "flush %%g6"
200 : /* No outputs */
201 : "r" (kernel_pctx),
202 "r" (PRIMARY_CONTEXT),
203 "i" (ASI_DMMU));
204
205 /*
206 * Locked down tlb entry.
207 */
208
209 if (tlb_type == spitfire)
210 tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
211 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
212 tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);
213
214 res = PROM_TRUE;
215 goto done;
216 }
217
218 if (va < PGDIR_SIZE) {
219 /*
220 * vmalloc or prom_inherited mapping.
221 */
222 pgd_t *pgdp;
223 pud_t *pudp;
224 pmd_t *pmdp;
225 pte_t *ptep;
226 int error;
227
228 if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) {
229 tte = prom_virt_to_phys(va, &error);
230 if (!error)
231 res = PROM_TRUE;
232 goto done;
233 }
234 pgdp = pgd_offset_k(va);
235 if (pgd_none(*pgdp))
236 goto done;
237 pudp = pud_offset(pgdp, va);
238 if (pud_none(*pudp))
239 goto done;
240 pmdp = pmd_offset(pudp, va);
241 if (pmd_none(*pmdp))
242 goto done;
243
244 /* Preemption implicitly disabled by virtue of
245 * being called from inside OBP.
246 */
247 ptep = pte_offset_kernel(pmdp, va);
248 if (pte_present(*ptep)) {
249 tte = pte_val(*ptep);
250 res = PROM_TRUE;
251 }
252 goto done;
253 }
254
255 if (va < PAGE_OFFSET) {
256 /*
257 * No mappings here.
258 */
259 goto done;
260 }
261
262 if (va & (1UL << 40)) {
263 /*
264 * I/O page.
265 */
266
267 tte = (__pa(va) & _PAGE_PADDR) |
268 _PAGE_VALID | _PAGE_SZ4MB |
269 _PAGE_E | _PAGE_P | _PAGE_W;
270 res = PROM_TRUE;
271 goto done;
272 }
273
274 /*
275 * Normal page.
276 */
277 tte = (__pa(va) & _PAGE_PADDR) |
278 _PAGE_VALID | _PAGE_SZ4MB |
279 _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W;
280 res = PROM_TRUE;
281
282 done:
283 if (res == PROM_TRUE) {
284 args[2] = 3;
285 args[args[1] + 3] = 0;
286 args[args[1] + 4] = res;
287 args[args[1] + 5] = tte;
288 } else {
289 args[2] = 2;
290 args[args[1] + 3] = 0;
291 args[args[1] + 4] = res;
292 }
293 } else if (!strcmp(cmd, ".soft1")) {
294 unsigned long tte;
295
296 tte = args[3];
297 prom_printf("%lx:\"%s%s%s%s%s\" ",
298 (tte & _PAGE_SOFT) >> 7,
299 tte & _PAGE_MODIFIED ? "M" : "-",
300 tte & _PAGE_ACCESSED ? "A" : "-",
301 tte & _PAGE_READ ? "W" : "-",
302 tte & _PAGE_WRITE ? "R" : "-",
303 tte & _PAGE_PRESENT ? "P" : "-");
304
305 args[2] = 2;
306 args[args[1] + 3] = 0;
307 args[args[1] + 4] = PROM_TRUE;
308 } else if (!strcmp(cmd, ".soft2")) {
309 unsigned long tte;
310
311 tte = args[3];
312 prom_printf("%lx ", (tte & 0x07FC000000000000UL) >> 50);
313
314 args[2] = 2;
315 args[args[1] + 3] = 0;
316 args[args[1] + 4] = PROM_TRUE;
317 } else {
318 prom_printf("unknown PROM `%s' command...\n", cmd);
319 }
320 unregister_console(&prom_console);
321 while (saved_console) {
322 cons = saved_console;
323 saved_console = cons->next;
324 register_console(cons);
325 }
326 spin_lock(&prom_entry_lock);
327 local_irq_restore(flags);
328
329 /*
330 * Restore in-interrupt status for a resume from obp.
331 */
332 irq_enter();
333 return 0;
334}
335
336unsigned int boot_flags = 0;
337#define BOOTME_DEBUG 0x1
338#define BOOTME_SINGLE 0x2
339
340/* Exported for mm/init.c:paging_init. */
341unsigned long cmdline_memory_size = 0;
342
343static struct console prom_debug_console = {
344 .name = "debug",
345 .write = prom_console_write,
346 .flags = CON_PRINTBUFFER,
347 .index = -1,
348};
349
350/* XXX Implement this at some point... */
351void kernel_enter_debugger(void)
352{
353}
354
355int obp_system_intr(void)
356{
357 if (boot_flags & BOOTME_DEBUG) {
358 printk("OBP: system interrupted\n");
359 prom_halt();
360 return 1;
361 }
362 return 0;
363}
364
365/*
366 * Process kernel command line switches that are specific to the
367 * SPARC or that require special low-level processing.
368 */
369static void __init process_switch(char c)
370{
371 switch (c) {
372 case 'd':
373 boot_flags |= BOOTME_DEBUG;
374 break;
375 case 's':
376 boot_flags |= BOOTME_SINGLE;
377 break;
378 case 'h':
379 prom_printf("boot_flags_init: Halt!\n");
380 prom_halt();
381 break;
382 case 'p':
383 /* Use PROM debug console. */
384 register_console(&prom_debug_console);
385 break;
386 default:
387 printk("Unknown boot switch (-%c)\n", c);
388 break;
389 }
390}
391
392static void __init process_console(char *commands)
393{
394 serial_console = 0;
395 commands += 8;
396 /* Linux-style serial */
397 if (!strncmp(commands, "ttyS", 4))
398 serial_console = simple_strtoul(commands + 4, NULL, 10) + 1;
399 else if (!strncmp(commands, "tty", 3)) {
400 char c = *(commands + 3);
401 /* Solaris-style serial */
402 if (c == 'a' || c == 'b') {
403 serial_console = c - 'a' + 1;
404 prom_printf ("Using /dev/tty%c as console.\n", c);
405 }
406 /* else Linux-style fbcon, not serial */
407 }
408#if defined(CONFIG_PROM_CONSOLE)
409 if (!strncmp(commands, "prom", 4)) {
410 char *p;
411
412 for (p = commands - 8; *p && *p != ' '; p++)
413 *p = ' ';
414 conswitchp = &prom_con;
415 }
416#endif
417}
418
419static void __init boot_flags_init(char *commands)
420{
421 while (*commands) {
422 /* Move to the start of the next "argument". */
423 while (*commands && *commands == ' ')
424 commands++;
425
426 /* Process any command switches, otherwise skip it. */
427 if (*commands == '\0')
428 break;
429 if (*commands == '-') {
430 commands++;
431 while (*commands && *commands != ' ')
432 process_switch(*commands++);
433 continue;
434 }
435 if (!strncmp(commands, "console=", 8)) {
436 process_console(commands);
437 } else if (!strncmp(commands, "mem=", 4)) {
438 /*
439 * "mem=XXX[kKmM]" overrides the PROM-reported
440 * memory size.
441 */
442 cmdline_memory_size = simple_strtoul(commands + 4,
443 &commands, 0);
444 if (*commands == 'K' || *commands == 'k') {
445 cmdline_memory_size <<= 10;
446 commands++;
447 } else if (*commands=='M' || *commands=='m') {
448 cmdline_memory_size <<= 20;
449 commands++;
450 }
451 }
452 while (*commands && *commands != ' ')
453 commands++;
454 }
455}
456
457extern int prom_probe_memory(void);
458extern unsigned long start, end;
459extern void panic_setup(char *, int *);
460
461extern unsigned short root_flags;
462extern unsigned short root_dev;
463extern unsigned short ram_flags;
464#define RAMDISK_IMAGE_START_MASK 0x07FF
465#define RAMDISK_PROMPT_FLAG 0x8000
466#define RAMDISK_LOAD_FLAG 0x4000
467
468extern int root_mountflags;
469
470char reboot_command[COMMAND_LINE_SIZE];
471
472static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
473
474void register_prom_callbacks(void)
475{
476 prom_setcallback(prom_callback);
477 prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; "
478 "' linux-va>tte-data to va>tte-data");
479 prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; "
480 "' linux-.soft1 to .soft1");
481 prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; "
482 "' linux-.soft2 to .soft2");
483}
484
485extern void paging_init(void);
486
487void __init setup_arch(char **cmdline_p)
488{
489 unsigned long highest_paddr;
490 int i;
491
492 /* Initialize PROM console and command line. */
493 *cmdline_p = prom_getbootargs();
494 strcpy(saved_command_line, *cmdline_p);
495
496 printk("ARCH: SUN4U\n");
497
498#ifdef CONFIG_DUMMY_CONSOLE
499 conswitchp = &dummy_con;
500#elif defined(CONFIG_PROM_CONSOLE)
501 conswitchp = &prom_con;
502#endif
503
504#ifdef CONFIG_SMP
505 i = (unsigned long)&irq_stat[1] - (unsigned long)&irq_stat[0];
506 if ((i == SMP_CACHE_BYTES) || (i == (2 * SMP_CACHE_BYTES))) {
507 extern unsigned int irqsz_patchme[1];
508 irqsz_patchme[0] |= ((i == SMP_CACHE_BYTES) ? SMP_CACHE_BYTES_SHIFT : \
509 SMP_CACHE_BYTES_SHIFT + 1);
510 flushi((long)&irqsz_patchme[0]);
511 } else {
512 prom_printf("Unexpected size of irq_stat[] elements\n");
513 prom_halt();
514 }
515#endif
516 /* Work out if we are starfire early on */
517 check_if_starfire();
518
519 boot_flags_init(*cmdline_p);
520
521 idprom_init();
522 (void) prom_probe_memory();
523
524 /* In paging_init() we tip off this value to see if we need
525 * to change init_mm.pgd to point to the real alias mapping.
526 */
527 phys_base = 0xffffffffffffffffUL;
528 highest_paddr = 0UL;
529 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
530 unsigned long top;
531
532 if (sp_banks[i].base_addr < phys_base)
533 phys_base = sp_banks[i].base_addr;
534 top = sp_banks[i].base_addr +
535 sp_banks[i].num_bytes;
536 if (highest_paddr < top)
537 highest_paddr = top;
538 }
539 pfn_base = phys_base >> PAGE_SHIFT;
540
541 switch (tlb_type) {
542 default:
543 case spitfire:
544 kern_base = spitfire_get_itlb_data(sparc64_highest_locked_tlbent());
545 kern_base &= _PAGE_PADDR_SF;
546 break;
547
548 case cheetah:
549 case cheetah_plus:
550 kern_base = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
551 kern_base &= _PAGE_PADDR;
552 break;
553 };
554
555 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
556
557 if (!root_flags)
558 root_mountflags &= ~MS_RDONLY;
559 ROOT_DEV = old_decode_dev(root_dev);
560#ifdef CONFIG_BLK_DEV_INITRD
561 rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
562 rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
563 rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
564#endif
565
566 init_task.thread_info->kregs = &fake_swapper_regs;
567
568#ifdef CONFIG_IP_PNP
569 if (!ic_set_manually) {
570 int chosen = prom_finddevice ("/chosen");
571 u32 cl, sv, gw;
572
573 cl = prom_getintdefault (chosen, "client-ip", 0);
574 sv = prom_getintdefault (chosen, "server-ip", 0);
575 gw = prom_getintdefault (chosen, "gateway-ip", 0);
576 if (cl && sv) {
577 ic_myaddr = cl;
578 ic_servaddr = sv;
579 if (gw)
580 ic_gateway = gw;
581#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
582 ic_proto_enabled = 0;
583#endif
584 }
585 }
586#endif
587
588 paging_init();
589}
590
591static int __init set_preferred_console(void)
592{
593 int idev, odev;
594
595 /* The user has requested a console so this is already set up. */
596 if (serial_console >= 0)
597 return -EBUSY;
598
599 idev = prom_query_input_device();
600 odev = prom_query_output_device();
601 if (idev == PROMDEV_IKBD && odev == PROMDEV_OSCREEN) {
602 serial_console = 0;
603 } else if (idev == PROMDEV_ITTYA && odev == PROMDEV_OTTYA) {
604 serial_console = 1;
605 } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
606 serial_console = 2;
607 } else {
608 prom_printf("Inconsistent console: "
609 "input %d, output %d\n",
610 idev, odev);
611 prom_halt();
612 }
613
614 if (serial_console)
615 return add_preferred_console("ttyS", serial_console - 1, NULL);
616
617 return -ENODEV;
618}
619console_initcall(set_preferred_console);
620
621/* BUFFER is PAGE_SIZE bytes long. */
622
623extern char *sparc_cpu_type;
624extern char *sparc_fpu_type;
625
626extern void smp_info(struct seq_file *);
627extern void smp_bogo(struct seq_file *);
628extern void mmu_info(struct seq_file *);
629
630static int show_cpuinfo(struct seq_file *m, void *__unused)
631{
632 seq_printf(m,
633 "cpu\t\t: %s\n"
634 "fpu\t\t: %s\n"
635 "promlib\t\t: Version 3 Revision %d\n"
636 "prom\t\t: %d.%d.%d\n"
637 "type\t\t: sun4u\n"
638 "ncpus probed\t: %ld\n"
639 "ncpus active\t: %ld\n"
640#ifndef CONFIG_SMP
641 "Cpu0Bogo\t: %lu.%02lu\n"
642 "Cpu0ClkTck\t: %016lx\n"
643#endif
644 ,
645 sparc_cpu_type,
646 sparc_fpu_type,
647 prom_rev,
648 prom_prev >> 16,
649 (prom_prev >> 8) & 0xff,
650 prom_prev & 0xff,
651 (long)num_possible_cpus(),
652 (long)num_online_cpus()
653#ifndef CONFIG_SMP
654 , cpu_data(0).udelay_val/(500000/HZ),
655 (cpu_data(0).udelay_val/(5000/HZ)) % 100,
656 cpu_data(0).clock_tick
657#endif
658 );
659#ifdef CONFIG_SMP
660 smp_bogo(m);
661#endif
662 mmu_info(m);
663#ifdef CONFIG_SMP
664 smp_info(m);
665#endif
666 return 0;
667}
668
669static void *c_start(struct seq_file *m, loff_t *pos)
670{
671 /* The pointer we are returning is arbitrary,
672 * it just has to be non-NULL and not IS_ERR
673 * in the success case.
674 */
675 return *pos == 0 ? &c_start : NULL;
676}
677
678static void *c_next(struct seq_file *m, void *v, loff_t *pos)
679{
680 ++*pos;
681 return c_start(m, pos);
682}
683
684static void c_stop(struct seq_file *m, void *v)
685{
686}
687
688struct seq_operations cpuinfo_op = {
689 .start =c_start,
690 .next = c_next,
691 .stop = c_stop,
692 .show = show_cpuinfo,
693};
694
695extern int stop_a_enabled;
696
697void sun_do_break(void)
698{
699 if (!stop_a_enabled)
700 return;
701
702 prom_printf("\n");
703 flush_user_windows();
704
705 prom_cmdline();
706}
707
708int serial_console = -1;
709int stop_a_enabled = 1;
710
711static int __init topology_init(void)
712{
713 int i, err;
714
715 err = -ENOMEM;
716 for (i = 0; i < NR_CPUS; i++) {
717 if (cpu_possible(i)) {
718 struct cpu *p = kmalloc(sizeof(*p), GFP_KERNEL);
719
720 if (p) {
721 memset(p, 0, sizeof(*p));
722 register_cpu(p, i, NULL);
723 err = 0;
724 }
725 }
726 }
727
728 return err;
729}
730
731subsys_initcall(topology_init);
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
new file mode 100644
index 000000000000..b27934671c35
--- /dev/null
+++ b/arch/sparc64/kernel/signal.c
@@ -0,0 +1,688 @@
1/* $Id: signal.c,v 1.60 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#include <linux/config.h>
12#ifdef CONFIG_SPARC32_COMPAT
13#include <linux/compat.h> /* for compat_old_sigset_t */
14#endif
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/signal.h>
18#include <linux/errno.h>
19#include <linux/wait.h>
20#include <linux/ptrace.h>
21#include <linux/unistd.h>
22#include <linux/mm.h>
23#include <linux/tty.h>
24#include <linux/smp_lock.h>
25#include <linux/binfmts.h>
26#include <linux/bitops.h>
27
28#include <asm/uaccess.h>
29#include <asm/ptrace.h>
30#include <asm/svr4.h>
31#include <asm/pgtable.h>
32#include <asm/fpumacro.h>
33#include <asm/uctx.h>
34#include <asm/siginfo.h>
35#include <asm/visasm.h>
36
37#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
38
39static int do_signal(sigset_t *oldset, struct pt_regs * regs,
40 unsigned long orig_o0, int ret_from_syscall);
41
42/* {set, get}context() needed for 64-bit SparcLinux userland. */
43asmlinkage void sparc64_set_context(struct pt_regs *regs)
44{
45 struct ucontext __user *ucp = (struct ucontext __user *)
46 regs->u_regs[UREG_I0];
47 mc_gregset_t __user *grp;
48 unsigned long pc, npc, tstate;
49 unsigned long fp, i7;
50 unsigned char fenab;
51 int err;
52
53 flush_user_windows();
54 if (get_thread_wsaved() ||
55 (((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
56 (!__access_ok(ucp, sizeof(*ucp))))
57 goto do_sigsegv;
58 grp = &ucp->uc_mcontext.mc_gregs;
59 err = __get_user(pc, &((*grp)[MC_PC]));
60 err |= __get_user(npc, &((*grp)[MC_NPC]));
61 if (err || ((pc | npc) & 3))
62 goto do_sigsegv;
63 if (regs->u_regs[UREG_I1]) {
64 sigset_t set;
65
66 if (_NSIG_WORDS == 1) {
67 if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
68 goto do_sigsegv;
69 } else {
70 if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
71 goto do_sigsegv;
72 }
73 sigdelsetmask(&set, ~_BLOCKABLE);
74 spin_lock_irq(&current->sighand->siglock);
75 current->blocked = set;
76 recalc_sigpending();
77 spin_unlock_irq(&current->sighand->siglock);
78 }
79 if (test_thread_flag(TIF_32BIT)) {
80 pc &= 0xffffffff;
81 npc &= 0xffffffff;
82 }
83 regs->tpc = pc;
84 regs->tnpc = npc;
85 err |= __get_user(regs->y, &((*grp)[MC_Y]));
86 err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
87 regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
88 regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
89 err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
90 err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
91 err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
92 err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
93 err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
94 err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
95 err |= __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
96 err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
97 err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
98 err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
99 err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
100 err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
101 err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
102 err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
103 err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));
104
105 err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
106 err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
107 err |= __put_user(fp,
108 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
109 err |= __put_user(i7,
110 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
111
112 err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
113 if (fenab) {
114 unsigned long *fpregs = current_thread_info()->fpregs;
115 unsigned long fprs;
116
117 fprs_write(0);
118 err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
119 if (fprs & FPRS_DL)
120 err |= copy_from_user(fpregs,
121 &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
122 (sizeof(unsigned int) * 32));
123 if (fprs & FPRS_DU)
124 err |= copy_from_user(fpregs+16,
125 ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
126 (sizeof(unsigned int) * 32));
127 err |= __get_user(current_thread_info()->xfsr[0],
128 &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
129 err |= __get_user(current_thread_info()->gsr[0],
130 &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
131 regs->tstate &= ~TSTATE_PEF;
132 }
133 if (err)
134 goto do_sigsegv;
135
136 return;
137do_sigsegv:
138 force_sig(SIGSEGV, current);
139}
140
141asmlinkage void sparc64_get_context(struct pt_regs *regs)
142{
143 struct ucontext __user *ucp = (struct ucontext __user *)
144 regs->u_regs[UREG_I0];
145 mc_gregset_t __user *grp;
146 mcontext_t __user *mcp;
147 unsigned long fp, i7;
148 unsigned char fenab;
149 int err;
150
151 synchronize_user_stack();
152 if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp)))
153 goto do_sigsegv;
154
155#if 1
156 fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
157#else
158 fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF);
159#endif
160
161 mcp = &ucp->uc_mcontext;
162 grp = &mcp->mc_gregs;
163
164 /* Skip over the trap instruction, first. */
165 if (test_thread_flag(TIF_32BIT)) {
166 regs->tpc = (regs->tnpc & 0xffffffff);
167 regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
168 } else {
169 regs->tpc = regs->tnpc;
170 regs->tnpc += 4;
171 }
172 err = 0;
173 if (_NSIG_WORDS == 1)
174 err |= __put_user(current->blocked.sig[0],
175 (unsigned long __user *)&ucp->uc_sigmask);
176 else
177 err |= __copy_to_user(&ucp->uc_sigmask, &current->blocked,
178 sizeof(sigset_t));
179
180 err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
181 err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
182 err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
183 err |= __put_user(regs->y, &((*grp)[MC_Y]));
184 err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
185 err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
186 err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
187 err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
188 err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
189 err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
190 err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7]));
191 err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
192 err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
193 err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
194 err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
195 err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
196 err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
197 err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
198 err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));
199
200 err |= __get_user(fp,
201 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
202 err |= __get_user(i7,
203 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
204 err |= __put_user(fp, &(mcp->mc_fp));
205 err |= __put_user(i7, &(mcp->mc_i7));
206
207 err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
208 if (fenab) {
209 unsigned long *fpregs = current_thread_info()->fpregs;
210 unsigned long fprs;
211
212 fprs = current_thread_info()->fpsaved[0];
213 if (fprs & FPRS_DL)
214 err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
215 (sizeof(unsigned int) * 32));
216 if (fprs & FPRS_DU)
217 err |= copy_to_user(
218 ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
219 (sizeof(unsigned int) * 32));
220 err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
221 err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
222 err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
223 }
224 if (err)
225 goto do_sigsegv;
226
227 return;
228do_sigsegv:
229 force_sig(SIGSEGV, current);
230}
231
232struct rt_signal_frame {
233 struct sparc_stackf ss;
234 siginfo_t info;
235 struct pt_regs regs;
236 __siginfo_fpu_t __user *fpu_save;
237 stack_t stack;
238 sigset_t mask;
239 __siginfo_fpu_t fpu_state;
240};
241
242/* Align macros */
243#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
244
245/*
246 * atomically swap in the new signal mask, and wait for a signal.
247 * This is really tricky on the Sparc, watch out...
248 */
249asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
250{
251 sigset_t saveset;
252
253#ifdef CONFIG_SPARC32_COMPAT
254 if (test_thread_flag(TIF_32BIT)) {
255 extern asmlinkage void _sigpause32_common(compat_old_sigset_t,
256 struct pt_regs *);
257 _sigpause32_common(set, regs);
258 return;
259 }
260#endif
261 set &= _BLOCKABLE;
262 spin_lock_irq(&current->sighand->siglock);
263 saveset = current->blocked;
264 siginitset(&current->blocked, set);
265 recalc_sigpending();
266 spin_unlock_irq(&current->sighand->siglock);
267
268 if (test_thread_flag(TIF_32BIT)) {
269 regs->tpc = (regs->tnpc & 0xffffffff);
270 regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
271 } else {
272 regs->tpc = regs->tnpc;
273 regs->tnpc += 4;
274 }
275
276 /* Condition codes and return value where set here for sigpause,
277 * and so got used by setup_frame, which again causes sigreturn()
278 * to return -EINTR.
279 */
280 while (1) {
281 current->state = TASK_INTERRUPTIBLE;
282 schedule();
283 /*
284 * Return -EINTR and set condition code here,
285 * so the interrupted system call actually returns
286 * these.
287 */
288 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
289 regs->u_regs[UREG_I0] = EINTR;
290 if (do_signal(&saveset, regs, 0, 0))
291 return;
292 }
293}
294
295asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
296{
297 _sigpause_common(set, regs);
298}
299
300asmlinkage void do_sigsuspend(struct pt_regs *regs)
301{
302 _sigpause_common(regs->u_regs[UREG_I0], regs);
303}
304
305asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, struct pt_regs *regs)
306{
307 sigset_t oldset, set;
308
309 /* XXX: Don't preclude handling different sized sigset_t's. */
310 if (sigsetsize != sizeof(sigset_t)) {
311 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
312 regs->u_regs[UREG_I0] = EINVAL;
313 return;
314 }
315 if (copy_from_user(&set, uset, sizeof(set))) {
316 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
317 regs->u_regs[UREG_I0] = EFAULT;
318 return;
319 }
320
321 sigdelsetmask(&set, ~_BLOCKABLE);
322 spin_lock_irq(&current->sighand->siglock);
323 oldset = current->blocked;
324 current->blocked = set;
325 recalc_sigpending();
326 spin_unlock_irq(&current->sighand->siglock);
327
328 if (test_thread_flag(TIF_32BIT)) {
329 regs->tpc = (regs->tnpc & 0xffffffff);
330 regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
331 } else {
332 regs->tpc = regs->tnpc;
333 regs->tnpc += 4;
334 }
335
336 /* Condition codes and return value where set here for sigpause,
337 * and so got used by setup_frame, which again causes sigreturn()
338 * to return -EINTR.
339 */
340 while (1) {
341 current->state = TASK_INTERRUPTIBLE;
342 schedule();
343 /*
344 * Return -EINTR and set condition code here,
345 * so the interrupted system call actually returns
346 * these.
347 */
348 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
349 regs->u_regs[UREG_I0] = EINTR;
350 if (do_signal(&oldset, regs, 0, 0))
351 return;
352 }
353}
354
355static inline int
356restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
357{
358 unsigned long *fpregs = current_thread_info()->fpregs;
359 unsigned long fprs;
360 int err;
361
362 err = __get_user(fprs, &fpu->si_fprs);
363 fprs_write(0);
364 regs->tstate &= ~TSTATE_PEF;
365 if (fprs & FPRS_DL)
366 err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
367 (sizeof(unsigned int) * 32));
368 if (fprs & FPRS_DU)
369 err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
370 (sizeof(unsigned int) * 32));
371 err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
372 err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
373 current_thread_info()->fpsaved[0] |= fprs;
374 return err;
375}
376
377void do_rt_sigreturn(struct pt_regs *regs)
378{
379 struct rt_signal_frame __user *sf;
380 unsigned long tpc, tnpc, tstate;
381 __siginfo_fpu_t __user *fpu_save;
382 mm_segment_t old_fs;
383 sigset_t set;
384 stack_t st;
385 int err;
386
387 /* Always make any pending restarted system calls return -EINTR */
388 current_thread_info()->restart_block.fn = do_no_restart_syscall;
389
390 synchronize_user_stack ();
391 sf = (struct rt_signal_frame __user *)
392 (regs->u_regs [UREG_FP] + STACK_BIAS);
393
394 /* 1. Make sure we are not getting garbage from the user */
395 if (((unsigned long) sf) & 3)
396 goto segv;
397
398 err = get_user(tpc, &sf->regs.tpc);
399 err |= __get_user(tnpc, &sf->regs.tnpc);
400 if (test_thread_flag(TIF_32BIT)) {
401 tpc &= 0xffffffff;
402 tnpc &= 0xffffffff;
403 }
404 err |= ((tpc | tnpc) & 3);
405
406 /* 2. Restore the state */
407 err |= __get_user(regs->y, &sf->regs.y);
408 err |= __get_user(tstate, &sf->regs.tstate);
409 err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));
410
411 /* User can only change condition codes and %asi in %tstate. */
412 regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
413 regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
414
415 err |= __get_user(fpu_save, &sf->fpu_save);
416 if (fpu_save)
417 err |= restore_fpu_state(regs, &sf->fpu_state);
418
419 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
420 err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
421
422 if (err)
423 goto segv;
424
425 regs->tpc = tpc;
426 regs->tnpc = tnpc;
427
428 /* It is more difficult to avoid calling this function than to
429 call it and ignore errors. */
430 old_fs = get_fs();
431 set_fs(KERNEL_DS);
432 do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
433 set_fs(old_fs);
434
435 sigdelsetmask(&set, ~_BLOCKABLE);
436 spin_lock_irq(&current->sighand->siglock);
437 current->blocked = set;
438 recalc_sigpending();
439 spin_unlock_irq(&current->sighand->siglock);
440 return;
441segv:
442 force_sig(SIGSEGV, current);
443}
444
445/* Checks if the fp is valid */
446static int invalid_frame_pointer(void __user *fp, int fplen)
447{
448 if (((unsigned long) fp) & 7)
449 return 1;
450 return 0;
451}
452
453static inline int
454save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
455{
456 unsigned long *fpregs = (unsigned long *)(regs+1);
457 unsigned long fprs;
458 int err = 0;
459
460 fprs = current_thread_info()->fpsaved[0];
461 if (fprs & FPRS_DL)
462 err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
463 (sizeof(unsigned int) * 32));
464 if (fprs & FPRS_DU)
465 err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
466 (sizeof(unsigned int) * 32));
467 err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
468 err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
469 err |= __put_user(fprs, &fpu->si_fprs);
470
471 return err;
472}
473
474static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize)
475{
476 unsigned long sp;
477
478 sp = regs->u_regs[UREG_FP] + STACK_BIAS;
479
480 /* This is the X/Open sanctioned signal stack switching. */
481 if (ka->sa.sa_flags & SA_ONSTACK) {
482 if (!on_sig_stack(sp) &&
483 !((current->sas_ss_sp + current->sas_ss_size) & 7))
484 sp = current->sas_ss_sp + current->sas_ss_size;
485 }
486 return (void __user *)(sp - framesize);
487}
488
489static inline void
490setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
491 int signo, sigset_t *oldset, siginfo_t *info)
492{
493 struct rt_signal_frame __user *sf;
494 int sigframe_size, err;
495
496 /* 1. Make sure everything is clean */
497 synchronize_user_stack();
498 save_and_clear_fpu();
499
500 sigframe_size = RT_ALIGNEDSZ;
501 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
502 sigframe_size -= sizeof(__siginfo_fpu_t);
503
504 sf = (struct rt_signal_frame __user *)
505 get_sigframe(ka, regs, sigframe_size);
506
507 if (invalid_frame_pointer (sf, sigframe_size))
508 goto sigill;
509
510 if (get_thread_wsaved() != 0)
511 goto sigill;
512
513 /* 2. Save the current process state */
514 err = copy_to_user(&sf->regs, regs, sizeof (*regs));
515
516 if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
517 err |= save_fpu_state(regs, &sf->fpu_state);
518 err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
519 } else {
520 err |= __put_user(0, &sf->fpu_save);
521 }
522
523 /* Setup sigaltstack */
524 err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
525 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
526 err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
527
528 err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t));
529
530 err |= copy_in_user((u64 __user *)sf,
531 (u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS),
532 sizeof(struct reg_window));
533
534 if (info)
535 err |= copy_siginfo_to_user(&sf->info, info);
536 else {
537 err |= __put_user(signo, &sf->info.si_signo);
538 err |= __put_user(SI_NOINFO, &sf->info.si_code);
539 }
540 if (err)
541 goto sigsegv;
542
543 /* 3. signal handler back-trampoline and parameters */
544 regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
545 regs->u_regs[UREG_I0] = signo;
546 regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
547
548 /* The sigcontext is passed in this way because of how it
549 * is defined in GLIBC's /usr/include/bits/sigcontext.h
550 * for sparc64. It includes the 128 bytes of siginfo_t.
551 */
552 regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
553
554 /* 5. signal handler */
555 regs->tpc = (unsigned long) ka->sa.sa_handler;
556 regs->tnpc = (regs->tpc + 4);
557 if (test_thread_flag(TIF_32BIT)) {
558 regs->tpc &= 0xffffffff;
559 regs->tnpc &= 0xffffffff;
560 }
561 /* 4. return to kernel instructions */
562 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
563 return;
564
565sigill:
566 do_exit(SIGILL);
567sigsegv:
568 force_sigsegv(signo, current);
569}
570
571static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
572 siginfo_t *info,
573 sigset_t *oldset, struct pt_regs *regs)
574{
575 setup_rt_frame(ka, regs, signr, oldset,
576 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
577 if (!(ka->sa.sa_flags & SA_NOMASK)) {
578 spin_lock_irq(&current->sighand->siglock);
579 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
580 sigaddset(&current->blocked,signr);
581 recalc_sigpending();
582 spin_unlock_irq(&current->sighand->siglock);
583 }
584}
585
586static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
587 struct sigaction *sa)
588{
589 switch (regs->u_regs[UREG_I0]) {
590 case ERESTART_RESTARTBLOCK:
591 case ERESTARTNOHAND:
592 no_system_call_restart:
593 regs->u_regs[UREG_I0] = EINTR;
594 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
595 break;
596 case ERESTARTSYS:
597 if (!(sa->sa_flags & SA_RESTART))
598 goto no_system_call_restart;
599 /* fallthrough */
600 case ERESTARTNOINTR:
601 regs->u_regs[UREG_I0] = orig_i0;
602 regs->tpc -= 4;
603 regs->tnpc -= 4;
604 }
605}
606
607/* Note that 'init' is a special process: it doesn't get signals it doesn't
608 * want to handle. Thus you cannot kill init even with a SIGKILL even by
609 * mistake.
610 */
611static int do_signal(sigset_t *oldset, struct pt_regs * regs,
612 unsigned long orig_i0, int restart_syscall)
613{
614 siginfo_t info;
615 struct signal_deliver_cookie cookie;
616 struct k_sigaction ka;
617 int signr;
618
619 cookie.restart_syscall = restart_syscall;
620 cookie.orig_i0 = orig_i0;
621
622 if (!oldset)
623 oldset = &current->blocked;
624
625#ifdef CONFIG_SPARC32_COMPAT
626 if (test_thread_flag(TIF_32BIT)) {
627 extern int do_signal32(sigset_t *, struct pt_regs *,
628 unsigned long, int);
629 return do_signal32(oldset, regs, orig_i0,
630 cookie.restart_syscall);
631 }
632#endif
633
634 signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
635 if (signr > 0) {
636 if (cookie.restart_syscall)
637 syscall_restart(orig_i0, regs, &ka.sa);
638 handle_signal(signr, &ka, &info, oldset, regs);
639 return 1;
640 }
641 if (cookie.restart_syscall &&
642 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
643 regs->u_regs[UREG_I0] == ERESTARTSYS ||
644 regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
645 /* replay the system call when we are done */
646 regs->u_regs[UREG_I0] = cookie.orig_i0;
647 regs->tpc -= 4;
648 regs->tnpc -= 4;
649 }
650 if (cookie.restart_syscall &&
651 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
652 regs->u_regs[UREG_G1] = __NR_restart_syscall;
653 regs->tpc -= 4;
654 regs->tnpc -= 4;
655 }
656 return 0;
657}
658
659void do_notify_resume(sigset_t *oldset, struct pt_regs *regs,
660 unsigned long orig_i0, int restart_syscall,
661 unsigned long thread_info_flags)
662{
663 if (thread_info_flags & _TIF_SIGPENDING)
664 do_signal(oldset, regs, orig_i0, restart_syscall);
665}
666
667void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
668{
669 struct signal_deliver_cookie *cp = cookie;
670
671 if (cp->restart_syscall &&
672 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
673 regs->u_regs[UREG_I0] == ERESTARTSYS ||
674 regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
675 /* replay the system call when we are done */
676 regs->u_regs[UREG_I0] = cp->orig_i0;
677 regs->tpc -= 4;
678 regs->tnpc -= 4;
679 cp->restart_syscall = 0;
680 }
681 if (cp->restart_syscall &&
682 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
683 regs->u_regs[UREG_G1] = __NR_restart_syscall;
684 regs->tpc -= 4;
685 regs->tnpc -= 4;
686 cp->restart_syscall = 0;
687 }
688}
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
new file mode 100644
index 000000000000..859255cf6762
--- /dev/null
+++ b/arch/sparc64/kernel/signal32.c
@@ -0,0 +1,1469 @@
1/* $Id: signal32.c,v 1.74 2002/02/09 19:49:30 davem Exp $
2 * arch/sparc64/kernel/signal32.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/signal.h>
14#include <linux/errno.h>
15#include <linux/wait.h>
16#include <linux/ptrace.h>
17#include <linux/unistd.h>
18#include <linux/mm.h>
19#include <linux/tty.h>
20#include <linux/smp_lock.h>
21#include <linux/binfmts.h>
22#include <linux/compat.h>
23#include <linux/bitops.h>
24
25#include <asm/uaccess.h>
26#include <asm/ptrace.h>
27#include <asm/svr4.h>
28#include <asm/pgtable.h>
29#include <asm/psrcompat.h>
30#include <asm/fpumacro.h>
31#include <asm/visasm.h>
32
33#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
34
35int do_signal32(sigset_t *oldset, struct pt_regs *regs,
36 unsigned long orig_o0, int ret_from_syscall);
37
38/* Signal frames: the original one (compatible with SunOS):
39 *
40 * Set up a signal frame... Make the stack look the way SunOS
41 * expects it to look which is basically:
42 *
43 * ---------------------------------- <-- %sp at signal time
44 * Struct sigcontext
45 * Signal address
46 * Ptr to sigcontext area above
47 * Signal code
48 * The signal number itself
49 * One register window
50 * ---------------------------------- <-- New %sp
51 */
52struct signal_sframe32 {
53 struct reg_window32 sig_window;
54 int sig_num;
55 int sig_code;
56 /* struct sigcontext32 * */ u32 sig_scptr;
57 int sig_address;
58 struct sigcontext32 sig_context;
59 unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
60};
61
62/* This magic should be in g_upper[0] for all upper parts
63 * to be valid.
64 */
65#define SIGINFO_EXTRA_V8PLUS_MAGIC 0x130e269
66typedef struct {
67 unsigned int g_upper[8];
68 unsigned int o_upper[8];
69 unsigned int asi;
70} siginfo_extra_v8plus_t;
71
72/*
73 * And the new one, intended to be used for Linux applications only
74 * (we have enough in there to work with clone).
75 * All the interesting bits are in the info field.
76 */
77struct new_signal_frame32 {
78 struct sparc_stackf32 ss;
79 __siginfo32_t info;
80 /* __siginfo_fpu32_t * */ u32 fpu_save;
81 unsigned int insns[2];
82 unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
83 unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
84 /* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
85 siginfo_extra_v8plus_t v8plus;
86 __siginfo_fpu_t fpu_state;
87};
88
89typedef struct compat_siginfo{
90 int si_signo;
91 int si_errno;
92 int si_code;
93
94 union {
95 int _pad[SI_PAD_SIZE32];
96
97 /* kill() */
98 struct {
99 compat_pid_t _pid; /* sender's pid */
100 unsigned int _uid; /* sender's uid */
101 } _kill;
102
103 /* POSIX.1b timers */
104 struct {
105 timer_t _tid; /* timer id */
106 int _overrun; /* overrun count */
107 compat_sigval_t _sigval; /* same as below */
108 int _sys_private; /* not to be passed to user */
109 } _timer;
110
111 /* POSIX.1b signals */
112 struct {
113 compat_pid_t _pid; /* sender's pid */
114 unsigned int _uid; /* sender's uid */
115 compat_sigval_t _sigval;
116 } _rt;
117
118 /* SIGCHLD */
119 struct {
120 compat_pid_t _pid; /* which child */
121 unsigned int _uid; /* sender's uid */
122 int _status; /* exit code */
123 compat_clock_t _utime;
124 compat_clock_t _stime;
125 } _sigchld;
126
127 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
128 struct {
129 u32 _addr; /* faulting insn/memory ref. */
130 int _trapno;
131 } _sigfault;
132
133 /* SIGPOLL */
134 struct {
135 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
136 int _fd;
137 } _sigpoll;
138 } _sifields;
139}compat_siginfo_t;
140
141struct rt_signal_frame32 {
142 struct sparc_stackf32 ss;
143 compat_siginfo_t info;
144 struct pt_regs32 regs;
145 compat_sigset_t mask;
146 /* __siginfo_fpu32_t * */ u32 fpu_save;
147 unsigned int insns[2];
148 stack_t32 stack;
149 unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
150 /* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
151 siginfo_extra_v8plus_t v8plus;
152 __siginfo_fpu_t fpu_state;
153};
154
155/* Align macros */
156#define SF_ALIGNEDSZ (((sizeof(struct signal_sframe32) + 7) & (~7)))
157#define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame32) + 7) & (~7)))
158#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7)))
159
160int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
161{
162 int err;
163
164 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
165 return -EFAULT;
166
167 /* If you change siginfo_t structure, please be sure
168 this code is fixed accordingly.
169 It should never copy any pad contained in the structure
170 to avoid security leaks, but must copy the generic
171 3 ints plus the relevant union member.
172 This routine must convert siginfo from 64bit to 32bit as well
173 at the same time. */
174 err = __put_user(from->si_signo, &to->si_signo);
175 err |= __put_user(from->si_errno, &to->si_errno);
176 err |= __put_user((short)from->si_code, &to->si_code);
177 if (from->si_code < 0)
178 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
179 else {
180 switch (from->si_code >> 16) {
181 case __SI_TIMER >> 16:
182 err |= __put_user(from->si_tid, &to->si_tid);
183 err |= __put_user(from->si_overrun, &to->si_overrun);
184 err |= __put_user(from->si_int, &to->si_int);
185 break;
186 case __SI_CHLD >> 16:
187 err |= __put_user(from->si_utime, &to->si_utime);
188 err |= __put_user(from->si_stime, &to->si_stime);
189 err |= __put_user(from->si_status, &to->si_status);
190 default:
191 err |= __put_user(from->si_pid, &to->si_pid);
192 err |= __put_user(from->si_uid, &to->si_uid);
193 break;
194 case __SI_FAULT >> 16:
195 case __SI_POLL >> 16:
196 err |= __put_user(from->si_trapno, &to->si_trapno);
197 err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
198 break;
199 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
200 case __SI_MESGQ >> 16:
201 err |= __put_user(from->si_pid, &to->si_pid);
202 err |= __put_user(from->si_uid, &to->si_uid);
203 err |= __put_user(from->si_int, &to->si_int);
204 break;
205 }
206 }
207 return err;
208}
209
210/* CAUTION: This is just a very minimalist implementation for the
211 * sake of compat_sys_rt_sigqueueinfo()
212 */
213int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
214{
215 if (!access_ok(VERIFY_WRITE, from, sizeof(compat_siginfo_t)))
216 return -EFAULT;
217
218 if (copy_from_user(to, from, 3*sizeof(int)) ||
219 copy_from_user(to->_sifields._pad, from->_sifields._pad,
220 SI_PAD_SIZE))
221 return -EFAULT;
222
223 return 0;
224}
225
226/*
227 * atomically swap in the new signal mask, and wait for a signal.
228 * This is really tricky on the Sparc, watch out...
229 */
230asmlinkage void _sigpause32_common(compat_old_sigset_t set, struct pt_regs *regs)
231{
232 sigset_t saveset;
233
234 set &= _BLOCKABLE;
235 spin_lock_irq(&current->sighand->siglock);
236 saveset = current->blocked;
237 siginitset(&current->blocked, set);
238 recalc_sigpending();
239 spin_unlock_irq(&current->sighand->siglock);
240
241 regs->tpc = regs->tnpc;
242 regs->tnpc += 4;
243 if (test_thread_flag(TIF_32BIT)) {
244 regs->tpc &= 0xffffffff;
245 regs->tnpc &= 0xffffffff;
246 }
247
248 /* Condition codes and return value where set here for sigpause,
249 * and so got used by setup_frame, which again causes sigreturn()
250 * to return -EINTR.
251 */
252 while (1) {
253 current->state = TASK_INTERRUPTIBLE;
254 schedule();
255 /*
256 * Return -EINTR and set condition code here,
257 * so the interrupted system call actually returns
258 * these.
259 */
260 regs->tstate |= TSTATE_ICARRY;
261 regs->u_regs[UREG_I0] = EINTR;
262 if (do_signal32(&saveset, regs, 0, 0))
263 return;
264 }
265}
266
267asmlinkage void do_rt_sigsuspend32(u32 uset, size_t sigsetsize, struct pt_regs *regs)
268{
269 sigset_t oldset, set;
270 compat_sigset_t set32;
271
272 /* XXX: Don't preclude handling different sized sigset_t's. */
273 if (((compat_size_t)sigsetsize) != sizeof(sigset_t)) {
274 regs->tstate |= TSTATE_ICARRY;
275 regs->u_regs[UREG_I0] = EINVAL;
276 return;
277 }
278 if (copy_from_user(&set32, compat_ptr(uset), sizeof(set32))) {
279 regs->tstate |= TSTATE_ICARRY;
280 regs->u_regs[UREG_I0] = EFAULT;
281 return;
282 }
283 switch (_NSIG_WORDS) {
284 case 4: set.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32);
285 case 3: set.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32);
286 case 2: set.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32);
287 case 1: set.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32);
288 }
289 sigdelsetmask(&set, ~_BLOCKABLE);
290 spin_lock_irq(&current->sighand->siglock);
291 oldset = current->blocked;
292 current->blocked = set;
293 recalc_sigpending();
294 spin_unlock_irq(&current->sighand->siglock);
295
296 regs->tpc = regs->tnpc;
297 regs->tnpc += 4;
298 if (test_thread_flag(TIF_32BIT)) {
299 regs->tpc &= 0xffffffff;
300 regs->tnpc &= 0xffffffff;
301 }
302
303 /* Condition codes and return value where set here for sigpause,
304 * and so got used by setup_frame, which again causes sigreturn()
305 * to return -EINTR.
306 */
307 while (1) {
308 current->state = TASK_INTERRUPTIBLE;
309 schedule();
310 /*
311 * Return -EINTR and set condition code here,
312 * so the interrupted system call actually returns
313 * these.
314 */
315 regs->tstate |= TSTATE_ICARRY;
316 regs->u_regs[UREG_I0] = EINTR;
317 if (do_signal32(&oldset, regs, 0, 0))
318 return;
319 }
320}
321
322static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
323{
324 unsigned long *fpregs = current_thread_info()->fpregs;
325 unsigned long fprs;
326 int err;
327
328 err = __get_user(fprs, &fpu->si_fprs);
329 fprs_write(0);
330 regs->tstate &= ~TSTATE_PEF;
331 if (fprs & FPRS_DL)
332 err |= copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 32));
333 if (fprs & FPRS_DU)
334 err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], (sizeof(unsigned int) * 32));
335 err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
336 err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
337 current_thread_info()->fpsaved[0] |= fprs;
338 return err;
339}
340
341void do_new_sigreturn32(struct pt_regs *regs)
342{
343 struct new_signal_frame32 __user *sf;
344 unsigned int psr;
345 unsigned pc, npc, fpu_save;
346 sigset_t set;
347 unsigned seta[_COMPAT_NSIG_WORDS];
348 int err, i;
349
350 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
351 sf = (struct new_signal_frame32 __user *) regs->u_regs[UREG_FP];
352
353 /* 1. Make sure we are not getting garbage from the user */
354 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
355 (((unsigned long) sf) & 3))
356 goto segv;
357
358 get_user(pc, &sf->info.si_regs.pc);
359 __get_user(npc, &sf->info.si_regs.npc);
360
361 if ((pc | npc) & 3)
362 goto segv;
363
364 if (test_thread_flag(TIF_32BIT)) {
365 pc &= 0xffffffff;
366 npc &= 0xffffffff;
367 }
368 regs->tpc = pc;
369 regs->tnpc = npc;
370
371 /* 2. Restore the state */
372 err = __get_user(regs->y, &sf->info.si_regs.y);
373 err |= __get_user(psr, &sf->info.si_regs.psr);
374
375 for (i = UREG_G1; i <= UREG_I7; i++)
376 err |= __get_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
377 if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
378 err |= __get_user(i, &sf->v8plus.g_upper[0]);
379 if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
380 unsigned long asi;
381
382 for (i = UREG_G1; i <= UREG_I7; i++)
383 err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
384 err |= __get_user(asi, &sf->v8plus.asi);
385 regs->tstate &= ~TSTATE_ASI;
386 regs->tstate |= ((asi & 0xffUL) << 24UL);
387 }
388 }
389
390 /* User can only change condition codes in %tstate. */
391 regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
392 regs->tstate |= psr_to_tstate_icc(psr);
393
394 err |= __get_user(fpu_save, &sf->fpu_save);
395 if (fpu_save)
396 err |= restore_fpu_state32(regs, &sf->fpu_state);
397 err |= __get_user(seta[0], &sf->info.si_mask);
398 err |= copy_from_user(seta+1, &sf->extramask,
399 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
400 if (err)
401 goto segv;
402 switch (_NSIG_WORDS) {
403 case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
404 case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
405 case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
406 case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
407 }
408 sigdelsetmask(&set, ~_BLOCKABLE);
409 spin_lock_irq(&current->sighand->siglock);
410 current->blocked = set;
411 recalc_sigpending();
412 spin_unlock_irq(&current->sighand->siglock);
413 return;
414
415segv:
416 force_sig(SIGSEGV, current);
417}
418
419asmlinkage void do_sigreturn32(struct pt_regs *regs)
420{
421 struct sigcontext32 __user *scptr;
422 unsigned int pc, npc, psr;
423 sigset_t set;
424 unsigned int seta[_COMPAT_NSIG_WORDS];
425 int err;
426
427 /* Always make any pending restarted system calls return -EINTR */
428 current_thread_info()->restart_block.fn = do_no_restart_syscall;
429
430 synchronize_user_stack();
431 if (test_thread_flag(TIF_NEWSIGNALS)) {
432 do_new_sigreturn32(regs);
433 return;
434 }
435
436 scptr = (struct sigcontext32 __user *)
437 (regs->u_regs[UREG_I0] & 0x00000000ffffffffUL);
438 /* Check sanity of the user arg. */
439 if (!access_ok(VERIFY_READ, scptr, sizeof(struct sigcontext32)) ||
440 (((unsigned long) scptr) & 3))
441 goto segv;
442
443 err = __get_user(pc, &scptr->sigc_pc);
444 err |= __get_user(npc, &scptr->sigc_npc);
445
446 if ((pc | npc) & 3)
447 goto segv; /* Nice try. */
448
449 err |= __get_user(seta[0], &scptr->sigc_mask);
450 /* Note that scptr + 1 points to extramask */
451 err |= copy_from_user(seta+1, scptr + 1,
452 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
453 if (err)
454 goto segv;
455 switch (_NSIG_WORDS) {
456 case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
457 case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
458 case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
459 case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
460 }
461 sigdelsetmask(&set, ~_BLOCKABLE);
462 spin_lock_irq(&current->sighand->siglock);
463 current->blocked = set;
464 recalc_sigpending();
465 spin_unlock_irq(&current->sighand->siglock);
466
467 if (test_thread_flag(TIF_32BIT)) {
468 pc &= 0xffffffff;
469 npc &= 0xffffffff;
470 }
471 regs->tpc = pc;
472 regs->tnpc = npc;
473 err = __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp);
474 err |= __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0);
475 err |= __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1);
476
477 /* User can only change condition codes in %tstate. */
478 err |= __get_user(psr, &scptr->sigc_psr);
479 if (err)
480 goto segv;
481 regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
482 regs->tstate |= psr_to_tstate_icc(psr);
483 return;
484
485segv:
486 force_sig(SIGSEGV, current);
487}
488
489asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
490{
491 struct rt_signal_frame32 __user *sf;
492 unsigned int psr, pc, npc, fpu_save, u_ss_sp;
493 mm_segment_t old_fs;
494 sigset_t set;
495 compat_sigset_t seta;
496 stack_t st;
497 int err, i;
498
499 /* Always make any pending restarted system calls return -EINTR */
500 current_thread_info()->restart_block.fn = do_no_restart_syscall;
501
502 synchronize_user_stack();
503 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
504 sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
505
506 /* 1. Make sure we are not getting garbage from the user */
507 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
508 (((unsigned long) sf) & 3))
509 goto segv;
510
511 get_user(pc, &sf->regs.pc);
512 __get_user(npc, &sf->regs.npc);
513
514 if ((pc | npc) & 3)
515 goto segv;
516
517 if (test_thread_flag(TIF_32BIT)) {
518 pc &= 0xffffffff;
519 npc &= 0xffffffff;
520 }
521 regs->tpc = pc;
522 regs->tnpc = npc;
523
524 /* 2. Restore the state */
525 err = __get_user(regs->y, &sf->regs.y);
526 err |= __get_user(psr, &sf->regs.psr);
527
528 for (i = UREG_G1; i <= UREG_I7; i++)
529 err |= __get_user(regs->u_regs[i], &sf->regs.u_regs[i]);
530 if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
531 err |= __get_user(i, &sf->v8plus.g_upper[0]);
532 if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
533 unsigned long asi;
534
535 for (i = UREG_G1; i <= UREG_I7; i++)
536 err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
537 err |= __get_user(asi, &sf->v8plus.asi);
538 regs->tstate &= ~TSTATE_ASI;
539 regs->tstate |= ((asi & 0xffUL) << 24UL);
540 }
541 }
542
543 /* User can only change condition codes in %tstate. */
544 regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
545 regs->tstate |= psr_to_tstate_icc(psr);
546
547 err |= __get_user(fpu_save, &sf->fpu_save);
548 if (fpu_save)
549 err |= restore_fpu_state32(regs, &sf->fpu_state);
550 err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t));
551 err |= __get_user(u_ss_sp, &sf->stack.ss_sp);
552 st.ss_sp = compat_ptr(u_ss_sp);
553 err |= __get_user(st.ss_flags, &sf->stack.ss_flags);
554 err |= __get_user(st.ss_size, &sf->stack.ss_size);
555 if (err)
556 goto segv;
557
558 /* It is more difficult to avoid calling this function than to
559 call it and ignore errors. */
560 old_fs = get_fs();
561 set_fs(KERNEL_DS);
562 do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf);
563 set_fs(old_fs);
564
565 switch (_NSIG_WORDS) {
566 case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32);
567 case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32);
568 case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
569 case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
570 }
571 sigdelsetmask(&set, ~_BLOCKABLE);
572 spin_lock_irq(&current->sighand->siglock);
573 current->blocked = set;
574 recalc_sigpending();
575 spin_unlock_irq(&current->sighand->siglock);
576 return;
577segv:
578 force_sig(SIGSEGV, current);
579}
580
581/* Checks if the fp is valid */
582static int invalid_frame_pointer(void __user *fp, int fplen)
583{
584 if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
585 return 1;
586 return 0;
587}
588
589static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
590{
591 unsigned long sp;
592
593 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
594 sp = regs->u_regs[UREG_FP];
595
596 /* This is the X/Open sanctioned signal stack switching. */
597 if (sa->sa_flags & SA_ONSTACK) {
598 if (!on_sig_stack(sp) && !((current->sas_ss_sp + current->sas_ss_size) & 7))
599 sp = current->sas_ss_sp + current->sas_ss_size;
600 }
601 return (void __user *)(sp - framesize);
602}
603
604static void
605setup_frame32(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *oldset, siginfo_t *info)
606{
607 struct signal_sframe32 __user *sframep;
608 struct sigcontext32 __user *sc;
609 unsigned int seta[_COMPAT_NSIG_WORDS];
610 int err = 0;
611 void __user *sig_address;
612 int sig_code;
613 unsigned long pc = regs->tpc;
614 unsigned long npc = regs->tnpc;
615 unsigned int psr;
616
617 if (test_thread_flag(TIF_32BIT)) {
618 pc &= 0xffffffff;
619 npc &= 0xffffffff;
620 }
621
622 synchronize_user_stack();
623 save_and_clear_fpu();
624
625 sframep = (struct signal_sframe32 __user *)
626 get_sigframe(sa, regs, SF_ALIGNEDSZ);
627 if (invalid_frame_pointer(sframep, sizeof(*sframep))){
628 /* Don't change signal code and address, so that
629 * post mortem debuggers can have a look.
630 */
631 do_exit(SIGILL);
632 }
633
634 sc = &sframep->sig_context;
635
636 /* We've already made sure frame pointer isn't in kernel space... */
637 err = __put_user((sas_ss_flags(regs->u_regs[UREG_FP]) == SS_ONSTACK),
638 &sc->sigc_onstack);
639
640 switch (_NSIG_WORDS) {
641 case 4: seta[7] = (oldset->sig[3] >> 32);
642 seta[6] = oldset->sig[3];
643 case 3: seta[5] = (oldset->sig[2] >> 32);
644 seta[4] = oldset->sig[2];
645 case 2: seta[3] = (oldset->sig[1] >> 32);
646 seta[2] = oldset->sig[1];
647 case 1: seta[1] = (oldset->sig[0] >> 32);
648 seta[0] = oldset->sig[0];
649 }
650 err |= __put_user(seta[0], &sc->sigc_mask);
651 err |= __copy_to_user(sframep->extramask, seta + 1,
652 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
653 err |= __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp);
654 err |= __put_user(pc, &sc->sigc_pc);
655 err |= __put_user(npc, &sc->sigc_npc);
656 psr = tstate_to_psr(regs->tstate);
657 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
658 psr |= PSR_EF;
659 err |= __put_user(psr, &sc->sigc_psr);
660 err |= __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
661 err |= __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
662 err |= __put_user(get_thread_wsaved(), &sc->sigc_oswins);
663
664 err |= copy_in_user((u32 __user *)sframep,
665 (u32 __user *)(regs->u_regs[UREG_FP]),
666 sizeof(struct reg_window32));
667
668 set_thread_wsaved(0); /* So process is allowed to execute. */
669 err |= __put_user(signr, &sframep->sig_num);
670 sig_address = NULL;
671 sig_code = 0;
672 if (SI_FROMKERNEL (info) && (info->si_code & __SI_MASK) == __SI_FAULT) {
673 sig_address = info->si_addr;
674 switch (signr) {
675 case SIGSEGV:
676 switch (info->si_code) {
677 case SEGV_MAPERR: sig_code = SUBSIG_NOMAPPING; break;
678 default: sig_code = SUBSIG_PROTECTION; break;
679 }
680 break;
681 case SIGILL:
682 switch (info->si_code) {
683 case ILL_ILLOPC: sig_code = SUBSIG_ILLINST; break;
684 case ILL_PRVOPC: sig_code = SUBSIG_PRIVINST; break;
685 case ILL_ILLTRP: sig_code = SUBSIG_BADTRAP(info->si_trapno); break;
686 default: sig_code = SUBSIG_STACK; break;
687 }
688 break;
689 case SIGFPE:
690 switch (info->si_code) {
691 case FPE_INTDIV: sig_code = SUBSIG_IDIVZERO; break;
692 case FPE_INTOVF: sig_code = SUBSIG_FPINTOVFL; break;
693 case FPE_FLTDIV: sig_code = SUBSIG_FPDIVZERO; break;
694 case FPE_FLTOVF: sig_code = SUBSIG_FPOVFLOW; break;
695 case FPE_FLTUND: sig_code = SUBSIG_FPUNFLOW; break;
696 case FPE_FLTRES: sig_code = SUBSIG_FPINEXACT; break;
697 case FPE_FLTINV: sig_code = SUBSIG_FPOPERROR; break;
698 default: sig_code = SUBSIG_FPERROR; break;
699 }
700 break;
701 case SIGBUS:
702 switch (info->si_code) {
703 case BUS_ADRALN: sig_code = SUBSIG_ALIGNMENT; break;
704 case BUS_ADRERR: sig_code = SUBSIG_MISCERROR; break;
705 default: sig_code = SUBSIG_BUSTIMEOUT; break;
706 }
707 break;
708 case SIGEMT:
709 switch (info->si_code) {
710 case EMT_TAGOVF: sig_code = SUBSIG_TAG; break;
711 }
712 break;
713 case SIGSYS:
714 if (info->si_code == (__SI_FAULT|0x100)) {
715 /* See sys_sunos32.c */
716 sig_code = info->si_trapno;
717 break;
718 }
719 default:
720 sig_address = NULL;
721 }
722 }
723 err |= __put_user(ptr_to_compat(sig_address), &sframep->sig_address);
724 err |= __put_user(sig_code, &sframep->sig_code);
725 err |= __put_user(ptr_to_compat(sc), &sframep->sig_scptr);
726 if (err)
727 goto sigsegv;
728
729 regs->u_regs[UREG_FP] = (unsigned long) sframep;
730 regs->tpc = (unsigned long) sa->sa_handler;
731 regs->tnpc = (regs->tpc + 4);
732 if (test_thread_flag(TIF_32BIT)) {
733 regs->tpc &= 0xffffffff;
734 regs->tnpc &= 0xffffffff;
735 }
736 return;
737
738sigsegv:
739 force_sigsegv(signr, current);
740}
741
742
743static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
744{
745 unsigned long *fpregs = current_thread_info()->fpregs;
746 unsigned long fprs;
747 int err = 0;
748
749 fprs = current_thread_info()->fpsaved[0];
750 if (fprs & FPRS_DL)
751 err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
752 (sizeof(unsigned int) * 32));
753 if (fprs & FPRS_DU)
754 err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
755 (sizeof(unsigned int) * 32));
756 err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
757 err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
758 err |= __put_user(fprs, &fpu->si_fprs);
759
760 return err;
761}
762
763static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
764 int signo, sigset_t *oldset)
765{
766 struct new_signal_frame32 __user *sf;
767 int sigframe_size;
768 u32 psr;
769 int i, err;
770 unsigned int seta[_COMPAT_NSIG_WORDS];
771
772 /* 1. Make sure everything is clean */
773 synchronize_user_stack();
774 save_and_clear_fpu();
775
776 sigframe_size = NF_ALIGNEDSZ;
777 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
778 sigframe_size -= sizeof(__siginfo_fpu_t);
779
780 sf = (struct new_signal_frame32 __user *)
781 get_sigframe(&ka->sa, regs, sigframe_size);
782
783 if (invalid_frame_pointer(sf, sigframe_size))
784 goto sigill;
785
786 if (get_thread_wsaved() != 0)
787 goto sigill;
788
789 /* 2. Save the current process state */
790 if (test_thread_flag(TIF_32BIT)) {
791 regs->tpc &= 0xffffffff;
792 regs->tnpc &= 0xffffffff;
793 }
794 err = put_user(regs->tpc, &sf->info.si_regs.pc);
795 err |= __put_user(regs->tnpc, &sf->info.si_regs.npc);
796 err |= __put_user(regs->y, &sf->info.si_regs.y);
797 psr = tstate_to_psr(regs->tstate);
798 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
799 psr |= PSR_EF;
800 err |= __put_user(psr, &sf->info.si_regs.psr);
801 for (i = 0; i < 16; i++)
802 err |= __put_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
803 err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
804 err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
805 for (i = 1; i < 16; i++)
806 err |= __put_user(((u32 *)regs->u_regs)[2*i],
807 &sf->v8plus.g_upper[i]);
808 err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
809 &sf->v8plus.asi);
810
811 if (psr & PSR_EF) {
812 err |= save_fpu_state32(regs, &sf->fpu_state);
813 err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
814 } else {
815 err |= __put_user(0, &sf->fpu_save);
816 }
817
818 switch (_NSIG_WORDS) {
819 case 4: seta[7] = (oldset->sig[3] >> 32);
820 seta[6] = oldset->sig[3];
821 case 3: seta[5] = (oldset->sig[2] >> 32);
822 seta[4] = oldset->sig[2];
823 case 2: seta[3] = (oldset->sig[1] >> 32);
824 seta[2] = oldset->sig[1];
825 case 1: seta[1] = (oldset->sig[0] >> 32);
826 seta[0] = oldset->sig[0];
827 }
828 err |= __put_user(seta[0], &sf->info.si_mask);
829 err |= __copy_to_user(sf->extramask, seta + 1,
830 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
831
832 err |= copy_in_user((u32 __user *)sf,
833 (u32 __user *)(regs->u_regs[UREG_FP]),
834 sizeof(struct reg_window32));
835
836 if (err)
837 goto sigsegv;
838
839 /* 3. signal handler back-trampoline and parameters */
840 regs->u_regs[UREG_FP] = (unsigned long) sf;
841 regs->u_regs[UREG_I0] = signo;
842 regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
843 regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
844
845 /* 4. signal handler */
846 regs->tpc = (unsigned long) ka->sa.sa_handler;
847 regs->tnpc = (regs->tpc + 4);
848 if (test_thread_flag(TIF_32BIT)) {
849 regs->tpc &= 0xffffffff;
850 regs->tnpc &= 0xffffffff;
851 }
852
853 /* 5. return to kernel instructions */
854 if (ka->ka_restorer) {
855 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
856 } else {
857 /* Flush instruction space. */
858 unsigned long address = ((unsigned long)&(sf->insns[0]));
859 pgd_t *pgdp = pgd_offset(current->mm, address);
860 pud_t *pudp = pud_offset(pgdp, address);
861 pmd_t *pmdp = pmd_offset(pudp, address);
862 pte_t *ptep;
863
864 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
865
866 err = __put_user(0x821020d8, &sf->insns[0]); /*mov __NR_sigreturn, %g1*/
867 err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/
868 if (err)
869 goto sigsegv;
870
871 preempt_disable();
872 ptep = pte_offset_map(pmdp, address);
873 if (pte_present(*ptep)) {
874 unsigned long page = (unsigned long)
875 page_address(pte_page(*ptep));
876
877 __asm__ __volatile__(
878 " membar #StoreStore\n"
879 " flush %0 + %1"
880 : : "r" (page), "r" (address & (PAGE_SIZE - 1))
881 : "memory");
882 }
883 pte_unmap(ptep);
884 preempt_enable();
885 }
886 return;
887
888sigill:
889 do_exit(SIGILL);
890sigsegv:
891 force_sigsegv(signo, current);
892}
893
894/* Setup a Solaris stack frame */
895static void
896setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
897 struct pt_regs *regs, int signr, sigset_t *oldset)
898{
899 svr4_signal_frame_t __user *sfp;
900 svr4_gregset_t __user *gr;
901 svr4_siginfo_t __user *si;
902 svr4_mcontext_t __user *mc;
903 svr4_gwindows_t __user *gw;
904 svr4_ucontext_t __user *uc;
905 svr4_sigset_t setv;
906 unsigned int psr;
907 int i, err;
908
909 synchronize_user_stack();
910 save_and_clear_fpu();
911
912 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
913 sfp = (svr4_signal_frame_t __user *)
914 get_sigframe(sa, regs,
915 sizeof(struct reg_window32) + SVR4_SF_ALIGNED);
916
917 if (invalid_frame_pointer(sfp, sizeof(*sfp)))
918 do_exit(SIGILL);
919
920 /* Start with a clean frame pointer and fill it */
921 err = clear_user(sfp, sizeof(*sfp));
922
923 /* Setup convenience variables */
924 si = &sfp->si;
925 uc = &sfp->uc;
926 gw = &sfp->gw;
927 mc = &uc->mcontext;
928 gr = &mc->greg;
929
930 /* FIXME: where am I supposed to put this?
931 * sc->sigc_onstack = old_status;
932 * anyways, it does not look like it is used for anything at all.
933 */
934 setv.sigbits[0] = oldset->sig[0];
935 setv.sigbits[1] = (oldset->sig[0] >> 32);
936 if (_NSIG_WORDS >= 2) {
937 setv.sigbits[2] = oldset->sig[1];
938 setv.sigbits[3] = (oldset->sig[1] >> 32);
939 err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t));
940 } else
941 err |= __copy_to_user(&uc->sigmask, &setv,
942 2 * sizeof(unsigned int));
943
944 /* Store registers */
945 if (test_thread_flag(TIF_32BIT)) {
946 regs->tpc &= 0xffffffff;
947 regs->tnpc &= 0xffffffff;
948 }
949 err |= __put_user(regs->tpc, &((*gr)[SVR4_PC]));
950 err |= __put_user(regs->tnpc, &((*gr)[SVR4_NPC]));
951 psr = tstate_to_psr(regs->tstate);
952 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
953 psr |= PSR_EF;
954 err |= __put_user(psr, &((*gr)[SVR4_PSR]));
955 err |= __put_user(regs->y, &((*gr)[SVR4_Y]));
956
957 /* Copy g[1..7] and o[0..7] registers */
958 for (i = 0; i < 7; i++)
959 err |= __put_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
960 for (i = 0; i < 8; i++)
961 err |= __put_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
962
963 /* Setup sigaltstack */
964 err |= __put_user(current->sas_ss_sp, &uc->stack.sp);
965 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags);
966 err |= __put_user(current->sas_ss_size, &uc->stack.size);
967
968 /* Save the currently window file: */
969
970 /* 1. Link sfp->uc->gwins to our windows */
971 err |= __put_user(ptr_to_compat(gw), &mc->gwin);
972
973 /* 2. Number of windows to restore at setcontext (): */
974 err |= __put_user(get_thread_wsaved(), &gw->count);
975
976 /* 3. We just pay attention to the gw->count field on setcontext */
977 set_thread_wsaved(0); /* So process is allowed to execute. */
978
979 /* Setup the signal information. Solaris expects a bunch of
980 * information to be passed to the signal handler, we don't provide
981 * that much currently, should use siginfo.
982 */
983 err |= __put_user(signr, &si->siginfo.signo);
984 err |= __put_user(SVR4_SINOINFO, &si->siginfo.code);
985 if (err)
986 goto sigsegv;
987
988 regs->u_regs[UREG_FP] = (unsigned long) sfp;
989 regs->tpc = (unsigned long) sa->sa_handler;
990 regs->tnpc = (regs->tpc + 4);
991 if (test_thread_flag(TIF_32BIT)) {
992 regs->tpc &= 0xffffffff;
993 regs->tnpc &= 0xffffffff;
994 }
995
996 /* Arguments passed to signal handler */
997 if (regs->u_regs[14]){
998 struct reg_window32 __user *rw = (struct reg_window32 __user *)
999 (regs->u_regs[14] & 0x00000000ffffffffUL);
1000
1001 err |= __put_user(signr, &rw->ins[0]);
1002 err |= __put_user((u64)si, &rw->ins[1]);
1003 err |= __put_user((u64)uc, &rw->ins[2]);
1004 err |= __put_user((u64)sfp, &rw->ins[6]); /* frame pointer */
1005 if (err)
1006 goto sigsegv;
1007
1008 regs->u_regs[UREG_I0] = signr;
1009 regs->u_regs[UREG_I1] = (u32)(u64) si;
1010 regs->u_regs[UREG_I2] = (u32)(u64) uc;
1011 }
1012 return;
1013
1014sigsegv:
1015 force_sigsegv(signr, current);
1016}
1017
1018asmlinkage int
1019svr4_getcontext(svr4_ucontext_t __user *uc, struct pt_regs *regs)
1020{
1021 svr4_gregset_t __user *gr;
1022 svr4_mcontext_t __user *mc;
1023 svr4_sigset_t setv;
1024 int i, err;
1025 u32 psr;
1026
1027 synchronize_user_stack();
1028 save_and_clear_fpu();
1029
1030 if (get_thread_wsaved())
1031 do_exit(SIGSEGV);
1032
1033 err = clear_user(uc, sizeof(*uc));
1034
1035 /* Setup convenience variables */
1036 mc = &uc->mcontext;
1037 gr = &mc->greg;
1038
1039 setv.sigbits[0] = current->blocked.sig[0];
1040 setv.sigbits[1] = (current->blocked.sig[0] >> 32);
1041 if (_NSIG_WORDS >= 2) {
1042 setv.sigbits[2] = current->blocked.sig[1];
1043 setv.sigbits[3] = (current->blocked.sig[1] >> 32);
1044 err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t));
1045 } else
1046 err |= __copy_to_user(&uc->sigmask, &setv, 2 * sizeof(unsigned));
1047
1048 /* Store registers */
1049 if (test_thread_flag(TIF_32BIT)) {
1050 regs->tpc &= 0xffffffff;
1051 regs->tnpc &= 0xffffffff;
1052 }
1053 err |= __put_user(regs->tpc, &uc->mcontext.greg[SVR4_PC]);
1054 err |= __put_user(regs->tnpc, &uc->mcontext.greg[SVR4_NPC]);
1055
1056 psr = tstate_to_psr(regs->tstate) & ~PSR_EF;
1057 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
1058 psr |= PSR_EF;
1059 err |= __put_user(psr, &uc->mcontext.greg[SVR4_PSR]);
1060
1061 err |= __put_user(regs->y, &uc->mcontext.greg[SVR4_Y]);
1062
1063 /* Copy g[1..7] and o[0..7] registers */
1064 for (i = 0; i < 7; i++)
1065 err |= __put_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
1066 for (i = 0; i < 8; i++)
1067 err |= __put_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
1068
1069 /* Setup sigaltstack */
1070 err |= __put_user(current->sas_ss_sp, &uc->stack.sp);
1071 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags);
1072 err |= __put_user(current->sas_ss_size, &uc->stack.size);
1073
1074 /* The register file is not saved
1075 * we have already stuffed all of it with sync_user_stack
1076 */
1077 return (err ? -EFAULT : 0);
1078}
1079
1080
1081/* Set the context for a svr4 application, this is Solaris way to sigreturn */
1082asmlinkage int svr4_setcontext(svr4_ucontext_t __user *c, struct pt_regs *regs)
1083{
1084 svr4_gregset_t __user *gr;
1085 mm_segment_t old_fs;
1086 u32 pc, npc, psr, u_ss_sp;
1087 sigset_t set;
1088 svr4_sigset_t setv;
1089 int i, err;
1090 stack_t st;
1091
1092 /* Fixme: restore windows, or is this already taken care of in
1093 * svr4_setup_frame when sync_user_windows is done?
1094 */
1095 flush_user_windows();
1096
1097 if (get_thread_wsaved())
1098 goto sigsegv;
1099
1100 if (((unsigned long) c) & 3){
1101 printk("Unaligned structure passed\n");
1102 goto sigsegv;
1103 }
1104
1105 if (!__access_ok(c, sizeof(*c))) {
1106 /* Miguel, add nice debugging msg _here_. ;-) */
1107 goto sigsegv;
1108 }
1109
1110 /* Check for valid PC and nPC */
1111 gr = &c->mcontext.greg;
1112 err = __get_user(pc, &((*gr)[SVR4_PC]));
1113 err |= __get_user(npc, &((*gr)[SVR4_NPC]));
1114 if ((pc | npc) & 3)
1115 goto sigsegv;
1116
1117 /* Retrieve information from passed ucontext */
1118 /* note that nPC is ored a 1, this is used to inform entry.S */
1119 /* that we don't want it to mess with our PC and nPC */
1120
1121 err |= copy_from_user(&setv, &c->sigmask, sizeof(svr4_sigset_t));
1122 set.sig[0] = setv.sigbits[0] | (((long)setv.sigbits[1]) << 32);
1123 if (_NSIG_WORDS >= 2)
1124 set.sig[1] = setv.sigbits[2] | (((long)setv.sigbits[3]) << 32);
1125
1126 err |= __get_user(u_ss_sp, &c->stack.sp);
1127 st.ss_sp = compat_ptr(u_ss_sp);
1128 err |= __get_user(st.ss_flags, &c->stack.flags);
1129 err |= __get_user(st.ss_size, &c->stack.size);
1130 if (err)
1131 goto sigsegv;
1132
1133 /* It is more difficult to avoid calling this function than to
1134 call it and ignore errors. */
1135 old_fs = get_fs();
1136 set_fs(KERNEL_DS);
1137 do_sigaltstack((stack_t __user *) &st, NULL, regs->u_regs[UREG_I6]);
1138 set_fs(old_fs);
1139
1140 sigdelsetmask(&set, ~_BLOCKABLE);
1141 spin_lock_irq(&current->sighand->siglock);
1142 current->blocked = set;
1143 recalc_sigpending();
1144 spin_unlock_irq(&current->sighand->siglock);
1145 regs->tpc = pc;
1146 regs->tnpc = npc | 1;
1147 if (test_thread_flag(TIF_32BIT)) {
1148 regs->tpc &= 0xffffffff;
1149 regs->tnpc &= 0xffffffff;
1150 }
1151 err |= __get_user(regs->y, &((*gr)[SVR4_Y]));
1152 err |= __get_user(psr, &((*gr)[SVR4_PSR]));
1153 regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
1154 regs->tstate |= psr_to_tstate_icc(psr);
1155
1156 /* Restore g[1..7] and o[0..7] registers */
1157 for (i = 0; i < 7; i++)
1158 err |= __get_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
1159 for (i = 0; i < 8; i++)
1160 err |= __get_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
1161 if (err)
1162 goto sigsegv;
1163
1164 return -EINTR;
1165sigsegv:
1166 return -EFAULT;
1167}
1168
1169static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
1170 unsigned long signr, sigset_t *oldset,
1171 siginfo_t *info)
1172{
1173 struct rt_signal_frame32 __user *sf;
1174 int sigframe_size;
1175 u32 psr;
1176 int i, err;
1177 compat_sigset_t seta;
1178
1179 /* 1. Make sure everything is clean */
1180 synchronize_user_stack();
1181 save_and_clear_fpu();
1182
1183 sigframe_size = RT_ALIGNEDSZ;
1184 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
1185 sigframe_size -= sizeof(__siginfo_fpu_t);
1186
1187 sf = (struct rt_signal_frame32 __user *)
1188 get_sigframe(&ka->sa, regs, sigframe_size);
1189
1190 if (invalid_frame_pointer(sf, sigframe_size))
1191 goto sigill;
1192
1193 if (get_thread_wsaved() != 0)
1194 goto sigill;
1195
1196 /* 2. Save the current process state */
1197 if (test_thread_flag(TIF_32BIT)) {
1198 regs->tpc &= 0xffffffff;
1199 regs->tnpc &= 0xffffffff;
1200 }
1201 err = put_user(regs->tpc, &sf->regs.pc);
1202 err |= __put_user(regs->tnpc, &sf->regs.npc);
1203 err |= __put_user(regs->y, &sf->regs.y);
1204 psr = tstate_to_psr(regs->tstate);
1205 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
1206 psr |= PSR_EF;
1207 err |= __put_user(psr, &sf->regs.psr);
1208 for (i = 0; i < 16; i++)
1209 err |= __put_user(regs->u_regs[i], &sf->regs.u_regs[i]);
1210 err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
1211 err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
1212 for (i = 1; i < 16; i++)
1213 err |= __put_user(((u32 *)regs->u_regs)[2*i],
1214 &sf->v8plus.g_upper[i]);
1215 err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
1216 &sf->v8plus.asi);
1217
1218 if (psr & PSR_EF) {
1219 err |= save_fpu_state32(regs, &sf->fpu_state);
1220 err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
1221 } else {
1222 err |= __put_user(0, &sf->fpu_save);
1223 }
1224
1225 /* Update the siginfo structure. */
1226 err |= copy_siginfo_to_user32(&sf->info, info);
1227
1228 /* Setup sigaltstack */
1229 err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
1230 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
1231 err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
1232
1233 switch (_NSIG_WORDS) {
1234 case 4: seta.sig[7] = (oldset->sig[3] >> 32);
1235 seta.sig[6] = oldset->sig[3];
1236 case 3: seta.sig[5] = (oldset->sig[2] >> 32);
1237 seta.sig[4] = oldset->sig[2];
1238 case 2: seta.sig[3] = (oldset->sig[1] >> 32);
1239 seta.sig[2] = oldset->sig[1];
1240 case 1: seta.sig[1] = (oldset->sig[0] >> 32);
1241 seta.sig[0] = oldset->sig[0];
1242 }
1243 err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
1244
1245 err |= copy_in_user((u32 __user *)sf,
1246 (u32 __user *)(regs->u_regs[UREG_FP]),
1247 sizeof(struct reg_window32));
1248 if (err)
1249 goto sigsegv;
1250
1251 /* 3. signal handler back-trampoline and parameters */
1252 regs->u_regs[UREG_FP] = (unsigned long) sf;
1253 regs->u_regs[UREG_I0] = signr;
1254 regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
1255 regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
1256
1257 /* 4. signal handler */
1258 regs->tpc = (unsigned long) ka->sa.sa_handler;
1259 regs->tnpc = (regs->tpc + 4);
1260 if (test_thread_flag(TIF_32BIT)) {
1261 regs->tpc &= 0xffffffff;
1262 regs->tnpc &= 0xffffffff;
1263 }
1264
1265 /* 5. return to kernel instructions */
1266 if (ka->ka_restorer)
1267 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
1268 else {
1269 /* Flush instruction space. */
1270 unsigned long address = ((unsigned long)&(sf->insns[0]));
1271 pgd_t *pgdp = pgd_offset(current->mm, address);
1272 pud_t *pudp = pud_offset(pgdp, address);
1273 pmd_t *pmdp = pmd_offset(pudp, address);
1274 pte_t *ptep;
1275
1276 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
1277
1278 /* mov __NR_rt_sigreturn, %g1 */
1279 err |= __put_user(0x82102065, &sf->insns[0]);
1280
1281 /* t 0x10 */
1282 err |= __put_user(0x91d02010, &sf->insns[1]);
1283 if (err)
1284 goto sigsegv;
1285
1286 preempt_disable();
1287 ptep = pte_offset_map(pmdp, address);
1288 if (pte_present(*ptep)) {
1289 unsigned long page = (unsigned long)
1290 page_address(pte_page(*ptep));
1291
1292 __asm__ __volatile__(
1293 " membar #StoreStore\n"
1294 " flush %0 + %1"
1295 : : "r" (page), "r" (address & (PAGE_SIZE - 1))
1296 : "memory");
1297 }
1298 pte_unmap(ptep);
1299 preempt_enable();
1300 }
1301 return;
1302
1303sigill:
1304 do_exit(SIGILL);
1305sigsegv:
1306 force_sigsegv(signr, current);
1307}
1308
1309static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
1310 siginfo_t *info,
1311 sigset_t *oldset, struct pt_regs *regs,
1312 int svr4_signal)
1313{
1314 if (svr4_signal)
1315 setup_svr4_frame32(&ka->sa, regs->tpc, regs->tnpc,
1316 regs, signr, oldset);
1317 else {
1318 if (ka->sa.sa_flags & SA_SIGINFO)
1319 setup_rt_frame32(ka, regs, signr, oldset, info);
1320 else if (test_thread_flag(TIF_NEWSIGNALS))
1321 new_setup_frame32(ka, regs, signr, oldset);
1322 else
1323 setup_frame32(&ka->sa, regs, signr, oldset, info);
1324 }
1325 if (!(ka->sa.sa_flags & SA_NOMASK)) {
1326 spin_lock_irq(&current->sighand->siglock);
1327 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
1328 sigaddset(&current->blocked,signr);
1329 recalc_sigpending();
1330 spin_unlock_irq(&current->sighand->siglock);
1331 }
1332}
1333
1334static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
1335 struct sigaction *sa)
1336{
1337 switch (regs->u_regs[UREG_I0]) {
1338 case ERESTART_RESTARTBLOCK:
1339 case ERESTARTNOHAND:
1340 no_system_call_restart:
1341 regs->u_regs[UREG_I0] = EINTR;
1342 regs->tstate |= TSTATE_ICARRY;
1343 break;
1344 case ERESTARTSYS:
1345 if (!(sa->sa_flags & SA_RESTART))
1346 goto no_system_call_restart;
1347 /* fallthrough */
1348 case ERESTARTNOINTR:
1349 regs->u_regs[UREG_I0] = orig_i0;
1350 regs->tpc -= 4;
1351 regs->tnpc -= 4;
1352 }
1353}
1354
1355/* Note that 'init' is a special process: it doesn't get signals it doesn't
1356 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1357 * mistake.
1358 */
1359int do_signal32(sigset_t *oldset, struct pt_regs * regs,
1360 unsigned long orig_i0, int restart_syscall)
1361{
1362 siginfo_t info;
1363 struct signal_deliver_cookie cookie;
1364 struct k_sigaction ka;
1365 int signr;
1366 int svr4_signal = current->personality == PER_SVR4;
1367
1368 cookie.restart_syscall = restart_syscall;
1369 cookie.orig_i0 = orig_i0;
1370
1371 signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
1372 if (signr > 0) {
1373 if (cookie.restart_syscall)
1374 syscall_restart32(orig_i0, regs, &ka.sa);
1375 handle_signal32(signr, &ka, &info, oldset,
1376 regs, svr4_signal);
1377 return 1;
1378 }
1379 if (cookie.restart_syscall &&
1380 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
1381 regs->u_regs[UREG_I0] == ERESTARTSYS ||
1382 regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
1383 /* replay the system call when we are done */
1384 regs->u_regs[UREG_I0] = cookie.orig_i0;
1385 regs->tpc -= 4;
1386 regs->tnpc -= 4;
1387 }
1388 if (cookie.restart_syscall &&
1389 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
1390 regs->u_regs[UREG_G1] = __NR_restart_syscall;
1391 regs->tpc -= 4;
1392 regs->tnpc -= 4;
1393 }
1394 return 0;
1395}
1396
1397struct sigstack32 {
1398 u32 the_stack;
1399 int cur_status;
1400};
1401
1402asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp)
1403{
1404 struct sigstack32 __user *ssptr =
1405 (struct sigstack32 __user *)((unsigned long)(u_ssptr));
1406 struct sigstack32 __user *ossptr =
1407 (struct sigstack32 __user *)((unsigned long)(u_ossptr));
1408 int ret = -EFAULT;
1409
1410 /* First see if old state is wanted. */
1411 if (ossptr) {
1412 if (put_user(current->sas_ss_sp + current->sas_ss_size,
1413 &ossptr->the_stack) ||
1414 __put_user(on_sig_stack(sp), &ossptr->cur_status))
1415 goto out;
1416 }
1417
1418 /* Now see if we want to update the new state. */
1419 if (ssptr) {
1420 u32 ss_sp;
1421
1422 if (get_user(ss_sp, &ssptr->the_stack))
1423 goto out;
1424
1425 /* If the current stack was set with sigaltstack, don't
1426 * swap stacks while we are on it.
1427 */
1428 ret = -EPERM;
1429 if (current->sas_ss_sp && on_sig_stack(sp))
1430 goto out;
1431
1432 /* Since we don't know the extent of the stack, and we don't
1433 * track onstack-ness, but rather calculate it, we must
1434 * presume a size. Ho hum this interface is lossy.
1435 */
1436 current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
1437 current->sas_ss_size = SIGSTKSZ;
1438 }
1439
1440 ret = 0;
1441out:
1442 return ret;
1443}
1444
1445asmlinkage long do_sys32_sigaltstack(u32 ussa, u32 uossa, unsigned long sp)
1446{
1447 stack_t uss, uoss;
1448 u32 u_ss_sp = 0;
1449 int ret;
1450 mm_segment_t old_fs;
1451 stack_t32 __user *uss32 = compat_ptr(ussa);
1452 stack_t32 __user *uoss32 = compat_ptr(uossa);
1453
1454 if (ussa && (get_user(u_ss_sp, &uss32->ss_sp) ||
1455 __get_user(uss.ss_flags, &uss32->ss_flags) ||
1456 __get_user(uss.ss_size, &uss32->ss_size)))
1457 return -EFAULT;
1458 uss.ss_sp = compat_ptr(u_ss_sp);
1459 old_fs = get_fs();
1460 set_fs(KERNEL_DS);
1461 ret = do_sigaltstack(ussa ? (stack_t __user *) &uss : NULL,
1462 uossa ? (stack_t __user *) &uoss : NULL, sp);
1463 set_fs(old_fs);
1464 if (!ret && uossa && (put_user(ptr_to_compat(uoss.ss_sp), &uoss32->ss_sp) ||
1465 __put_user(uoss.ss_flags, &uoss32->ss_flags) ||
1466 __put_user(uoss.ss_size, &uoss32->ss_size)))
1467 return -EFAULT;
1468 return ret;
1469}
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
new file mode 100644
index 000000000000..6dff06a44e76
--- /dev/null
+++ b/arch/sparc64/kernel/smp.c
@@ -0,0 +1,1244 @@
1/* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
13#include <linux/smp_lock.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <linux/seq_file.h>
21#include <linux/cache.h>
22#include <linux/jiffies.h>
23#include <linux/profile.h>
24#include <linux/bootmem.h>
25
26#include <asm/head.h>
27#include <asm/ptrace.h>
28#include <asm/atomic.h>
29#include <asm/tlbflush.h>
30#include <asm/mmu_context.h>
31#include <asm/cpudata.h>
32
33#include <asm/irq.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/oplib.h>
37#include <asm/uaccess.h>
38#include <asm/timer.h>
39#include <asm/starfire.h>
40#include <asm/tlb.h>
41
42extern int linux_num_cpus;
43extern void calibrate_delay(void);
44
45/* Please don't make this stuff initdata!!! --DaveM */
46static unsigned char boot_cpu_id;
47
48cpumask_t cpu_online_map = CPU_MASK_NONE;
49cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
50static cpumask_t smp_commenced_mask;
51static cpumask_t cpu_callout_map;
52
53void smp_info(struct seq_file *m)
54{
55 int i;
56
57 seq_printf(m, "State:\n");
58 for (i = 0; i < NR_CPUS; i++) {
59 if (cpu_online(i))
60 seq_printf(m,
61 "CPU%d:\t\tonline\n", i);
62 }
63}
64
65void smp_bogo(struct seq_file *m)
66{
67 int i;
68
69 for (i = 0; i < NR_CPUS; i++)
70 if (cpu_online(i))
71 seq_printf(m,
72 "Cpu%dBogo\t: %lu.%02lu\n"
73 "Cpu%dClkTck\t: %016lx\n",
74 i, cpu_data(i).udelay_val / (500000/HZ),
75 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
76 i, cpu_data(i).clock_tick);
77}
78
79void __init smp_store_cpu_info(int id)
80{
81 int cpu_node;
82
83 /* multiplier and counter set by
84 smp_setup_percpu_timer() */
85 cpu_data(id).udelay_val = loops_per_jiffy;
86
87 cpu_find_by_mid(id, &cpu_node);
88 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
89 "clock-frequency", 0);
90
91 cpu_data(id).pgcache_size = 0;
92 cpu_data(id).pte_cache[0] = NULL;
93 cpu_data(id).pte_cache[1] = NULL;
94 cpu_data(id).pgd_cache = NULL;
95 cpu_data(id).idle_volume = 1;
96}
97
98static void smp_setup_percpu_timer(void);
99
100static volatile unsigned long callin_flag = 0;
101
102extern void inherit_locked_prom_mappings(int save_p);
103
104static inline void cpu_setup_percpu_base(unsigned long cpu_id)
105{
106 __asm__ __volatile__("mov %0, %%g5\n\t"
107 "stxa %0, [%1] %2\n\t"
108 "membar #Sync"
109 : /* no outputs */
110 : "r" (__per_cpu_offset(cpu_id)),
111 "r" (TSB_REG), "i" (ASI_IMMU));
112}
113
114void __init smp_callin(void)
115{
116 int cpuid = hard_smp_processor_id();
117
118 inherit_locked_prom_mappings(0);
119
120 __flush_tlb_all();
121
122 cpu_setup_percpu_base(cpuid);
123
124 smp_setup_percpu_timer();
125
126 local_irq_enable();
127
128 calibrate_delay();
129 smp_store_cpu_info(cpuid);
130 callin_flag = 1;
131 __asm__ __volatile__("membar #Sync\n\t"
132 "flush %%g6" : : : "memory");
133
134 /* Clear this or we will die instantly when we
135 * schedule back to this idler...
136 */
137 clear_thread_flag(TIF_NEWCHILD);
138
139 /* Attach to the address space of init_task. */
140 atomic_inc(&init_mm.mm_count);
141 current->active_mm = &init_mm;
142
143 while (!cpu_isset(cpuid, smp_commenced_mask))
144 membar("#LoadLoad");
145
146 cpu_set(cpuid, cpu_online_map);
147}
148
149void cpu_panic(void)
150{
151 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
152 panic("SMP bolixed\n");
153}
154
155static unsigned long current_tick_offset;
156
157/* This tick register synchronization scheme is taken entirely from
158 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
159 *
160 * The only change I've made is to rework it so that the master
161 * initiates the synchonization instead of the slave. -DaveM
162 */
163
164#define MASTER 0
165#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
166
167#define NUM_ROUNDS 64 /* magic value */
168#define NUM_ITERS 5 /* likewise */
169
170static DEFINE_SPINLOCK(itc_sync_lock);
171static unsigned long go[SLAVE + 1];
172
173#define DEBUG_TICK_SYNC 0
174
175static inline long get_delta (long *rt, long *master)
176{
177 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
178 unsigned long tcenter, t0, t1, tm;
179 unsigned long i;
180
181 for (i = 0; i < NUM_ITERS; i++) {
182 t0 = tick_ops->get_tick();
183 go[MASTER] = 1;
184 membar("#StoreLoad");
185 while (!(tm = go[SLAVE]))
186 membar("#LoadLoad");
187 go[SLAVE] = 0;
188 membar("#StoreStore");
189 t1 = tick_ops->get_tick();
190
191 if (t1 - t0 < best_t1 - best_t0)
192 best_t0 = t0, best_t1 = t1, best_tm = tm;
193 }
194
195 *rt = best_t1 - best_t0;
196 *master = best_tm - best_t0;
197
198 /* average best_t0 and best_t1 without overflow: */
199 tcenter = (best_t0/2 + best_t1/2);
200 if (best_t0 % 2 + best_t1 % 2 == 2)
201 tcenter++;
202 return tcenter - best_tm;
203}
204
205void smp_synchronize_tick_client(void)
206{
207 long i, delta, adj, adjust_latency = 0, done = 0;
208 unsigned long flags, rt, master_time_stamp, bound;
209#if DEBUG_TICK_SYNC
210 struct {
211 long rt; /* roundtrip time */
212 long master; /* master's timestamp */
213 long diff; /* difference between midpoint and master's timestamp */
214 long lat; /* estimate of itc adjustment latency */
215 } t[NUM_ROUNDS];
216#endif
217
218 go[MASTER] = 1;
219
220 while (go[MASTER])
221 membar("#LoadLoad");
222
223 local_irq_save(flags);
224 {
225 for (i = 0; i < NUM_ROUNDS; i++) {
226 delta = get_delta(&rt, &master_time_stamp);
227 if (delta == 0) {
228 done = 1; /* let's lock on to this... */
229 bound = rt;
230 }
231
232 if (!done) {
233 if (i > 0) {
234 adjust_latency += -delta;
235 adj = -delta + adjust_latency/4;
236 } else
237 adj = -delta;
238
239 tick_ops->add_tick(adj, current_tick_offset);
240 }
241#if DEBUG_TICK_SYNC
242 t[i].rt = rt;
243 t[i].master = master_time_stamp;
244 t[i].diff = delta;
245 t[i].lat = adjust_latency/4;
246#endif
247 }
248 }
249 local_irq_restore(flags);
250
251#if DEBUG_TICK_SYNC
252 for (i = 0; i < NUM_ROUNDS; i++)
253 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
254 t[i].rt, t[i].master, t[i].diff, t[i].lat);
255#endif
256
257 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
258 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
259}
260
261static void smp_start_sync_tick_client(int cpu);
262
263static void smp_synchronize_one_tick(int cpu)
264{
265 unsigned long flags, i;
266
267 go[MASTER] = 0;
268
269 smp_start_sync_tick_client(cpu);
270
271 /* wait for client to be ready */
272 while (!go[MASTER])
273 membar("#LoadLoad");
274
275 /* now let the client proceed into his loop */
276 go[MASTER] = 0;
277 membar("#StoreLoad");
278
279 spin_lock_irqsave(&itc_sync_lock, flags);
280 {
281 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
282 while (!go[MASTER])
283 membar("#LoadLoad");
284 go[MASTER] = 0;
285 membar("#StoreStore");
286 go[SLAVE] = tick_ops->get_tick();
287 membar("#StoreLoad");
288 }
289 }
290 spin_unlock_irqrestore(&itc_sync_lock, flags);
291}
292
293extern unsigned long sparc64_cpu_startup;
294
295/* The OBP cpu startup callback truncates the 3rd arg cookie to
296 * 32-bits (I think) so to be safe we have it read the pointer
297 * contained here so we work on >4GB machines. -DaveM
298 */
299static struct thread_info *cpu_new_thread = NULL;
300
301static int __devinit smp_boot_one_cpu(unsigned int cpu)
302{
303 unsigned long entry =
304 (unsigned long)(&sparc64_cpu_startup);
305 unsigned long cookie =
306 (unsigned long)(&cpu_new_thread);
307 struct task_struct *p;
308 int timeout, ret, cpu_node;
309
310 p = fork_idle(cpu);
311 callin_flag = 0;
312 cpu_new_thread = p->thread_info;
313 cpu_set(cpu, cpu_callout_map);
314
315 cpu_find_by_mid(cpu, &cpu_node);
316 prom_startcpu(cpu_node, entry, cookie);
317
318 for (timeout = 0; timeout < 5000000; timeout++) {
319 if (callin_flag)
320 break;
321 udelay(100);
322 }
323 if (callin_flag) {
324 ret = 0;
325 } else {
326 printk("Processor %d is stuck.\n", cpu);
327 cpu_clear(cpu, cpu_callout_map);
328 ret = -ENODEV;
329 }
330 cpu_new_thread = NULL;
331
332 return ret;
333}
334
335static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
336{
337 u64 result, target;
338 int stuck, tmp;
339
340 if (this_is_starfire) {
341 /* map to real upaid */
342 cpu = (((cpu & 0x3c) << 1) |
343 ((cpu & 0x40) >> 4) |
344 (cpu & 0x3));
345 }
346
347 target = (cpu << 14) | 0x70;
348again:
349 /* Ok, this is the real Spitfire Errata #54.
350 * One must read back from a UDB internal register
351 * after writes to the UDB interrupt dispatch, but
352 * before the membar Sync for that write.
353 * So we use the high UDB control register (ASI 0x7f,
354 * ADDR 0x20) for the dummy read. -DaveM
355 */
356 tmp = 0x40;
357 __asm__ __volatile__(
358 "wrpr %1, %2, %%pstate\n\t"
359 "stxa %4, [%0] %3\n\t"
360 "stxa %5, [%0+%8] %3\n\t"
361 "add %0, %8, %0\n\t"
362 "stxa %6, [%0+%8] %3\n\t"
363 "membar #Sync\n\t"
364 "stxa %%g0, [%7] %3\n\t"
365 "membar #Sync\n\t"
366 "mov 0x20, %%g1\n\t"
367 "ldxa [%%g1] 0x7f, %%g0\n\t"
368 "membar #Sync"
369 : "=r" (tmp)
370 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
371 "r" (data0), "r" (data1), "r" (data2), "r" (target),
372 "r" (0x10), "0" (tmp)
373 : "g1");
374
375 /* NOTE: PSTATE_IE is still clear. */
376 stuck = 100000;
377 do {
378 __asm__ __volatile__("ldxa [%%g0] %1, %0"
379 : "=r" (result)
380 : "i" (ASI_INTR_DISPATCH_STAT));
381 if (result == 0) {
382 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
383 : : "r" (pstate));
384 return;
385 }
386 stuck -= 1;
387 if (stuck == 0)
388 break;
389 } while (result & 0x1);
390 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
391 : : "r" (pstate));
392 if (stuck == 0) {
393 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
394 smp_processor_id(), result);
395 } else {
396 udelay(2);
397 goto again;
398 }
399}
400
401static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
402{
403 u64 pstate;
404 int i;
405
406 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
407 for_each_cpu_mask(i, mask)
408 spitfire_xcall_helper(data0, data1, data2, pstate, i);
409}
410
411/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
412 * packet, but we have no use for that. However we do take advantage of
413 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
414 */
415static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
416{
417 u64 pstate, ver;
418 int nack_busy_id, is_jalapeno;
419
420 if (cpus_empty(mask))
421 return;
422
423 /* Unfortunately, someone at Sun had the brilliant idea to make the
424 * busy/nack fields hard-coded by ITID number for this Ultra-III
425 * derivative processor.
426 */
427 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
428 is_jalapeno = ((ver >> 32) == 0x003e0016);
429
430 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
431
432retry:
433 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
434 : : "r" (pstate), "i" (PSTATE_IE));
435
436 /* Setup the dispatch data registers. */
437 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
438 "stxa %1, [%4] %6\n\t"
439 "stxa %2, [%5] %6\n\t"
440 "membar #Sync\n\t"
441 : /* no outputs */
442 : "r" (data0), "r" (data1), "r" (data2),
443 "r" (0x40), "r" (0x50), "r" (0x60),
444 "i" (ASI_INTR_W));
445
446 nack_busy_id = 0;
447 {
448 int i;
449
450 for_each_cpu_mask(i, mask) {
451 u64 target = (i << 14) | 0x70;
452
453 if (!is_jalapeno)
454 target |= (nack_busy_id << 24);
455 __asm__ __volatile__(
456 "stxa %%g0, [%0] %1\n\t"
457 "membar #Sync\n\t"
458 : /* no outputs */
459 : "r" (target), "i" (ASI_INTR_W));
460 nack_busy_id++;
461 }
462 }
463
464 /* Now, poll for completion. */
465 {
466 u64 dispatch_stat;
467 long stuck;
468
469 stuck = 100000 * nack_busy_id;
470 do {
471 __asm__ __volatile__("ldxa [%%g0] %1, %0"
472 : "=r" (dispatch_stat)
473 : "i" (ASI_INTR_DISPATCH_STAT));
474 if (dispatch_stat == 0UL) {
475 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
476 : : "r" (pstate));
477 return;
478 }
479 if (!--stuck)
480 break;
481 } while (dispatch_stat & 0x5555555555555555UL);
482
483 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
484 : : "r" (pstate));
485
486 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
487 /* Busy bits will not clear, continue instead
488 * of freezing up on this cpu.
489 */
490 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
491 smp_processor_id(), dispatch_stat);
492 } else {
493 int i, this_busy_nack = 0;
494
495 /* Delay some random time with interrupts enabled
496 * to prevent deadlock.
497 */
498 udelay(2 * nack_busy_id);
499
500 /* Clear out the mask bits for cpus which did not
501 * NACK us.
502 */
503 for_each_cpu_mask(i, mask) {
504 u64 check_mask;
505
506 if (is_jalapeno)
507 check_mask = (0x2UL << (2*i));
508 else
509 check_mask = (0x2UL <<
510 this_busy_nack);
511 if ((dispatch_stat & check_mask) == 0)
512 cpu_clear(i, mask);
513 this_busy_nack += 2;
514 }
515
516 goto retry;
517 }
518 }
519}
520
521/* Send cross call to all processors mentioned in MASK
522 * except self.
523 */
524static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
525{
526 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
527 int this_cpu = get_cpu();
528
529 cpus_and(mask, mask, cpu_online_map);
530 cpu_clear(this_cpu, mask);
531
532 if (tlb_type == spitfire)
533 spitfire_xcall_deliver(data0, data1, data2, mask);
534 else
535 cheetah_xcall_deliver(data0, data1, data2, mask);
536 /* NOTE: Caller runs local copy on master. */
537
538 put_cpu();
539}
540
541extern unsigned long xcall_sync_tick;
542
543static void smp_start_sync_tick_client(int cpu)
544{
545 cpumask_t mask = cpumask_of_cpu(cpu);
546
547 smp_cross_call_masked(&xcall_sync_tick,
548 0, 0, 0, mask);
549}
550
551/* Send cross call to all processors except self. */
552#define smp_cross_call(func, ctx, data1, data2) \
553 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
554
555struct call_data_struct {
556 void (*func) (void *info);
557 void *info;
558 atomic_t finished;
559 int wait;
560};
561
562static DEFINE_SPINLOCK(call_lock);
563static struct call_data_struct *call_data;
564
565extern unsigned long xcall_call_function;
566
567/*
568 * You must not call this function with disabled interrupts or from a
569 * hardware interrupt handler or from a bottom half handler.
570 */
571int smp_call_function(void (*func)(void *info), void *info,
572 int nonatomic, int wait)
573{
574 struct call_data_struct data;
575 int cpus = num_online_cpus() - 1;
576 long timeout;
577
578 if (!cpus)
579 return 0;
580
581 /* Can deadlock when called with interrupts disabled */
582 WARN_ON(irqs_disabled());
583
584 data.func = func;
585 data.info = info;
586 atomic_set(&data.finished, 0);
587 data.wait = wait;
588
589 spin_lock(&call_lock);
590
591 call_data = &data;
592
593 smp_cross_call(&xcall_call_function, 0, 0, 0);
594
595 /*
596 * Wait for other cpus to complete function or at
597 * least snap the call data.
598 */
599 timeout = 1000000;
600 while (atomic_read(&data.finished) != cpus) {
601 if (--timeout <= 0)
602 goto out_timeout;
603 barrier();
604 udelay(1);
605 }
606
607 spin_unlock(&call_lock);
608
609 return 0;
610
611out_timeout:
612 spin_unlock(&call_lock);
613 printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
614 (long) num_online_cpus() - 1L,
615 (long) atomic_read(&data.finished));
616 return 0;
617}
618
619void smp_call_function_client(int irq, struct pt_regs *regs)
620{
621 void (*func) (void *info) = call_data->func;
622 void *info = call_data->info;
623
624 clear_softint(1 << irq);
625 if (call_data->wait) {
626 /* let initiator proceed only after completion */
627 func(info);
628 atomic_inc(&call_data->finished);
629 } else {
630 /* let initiator proceed after getting data */
631 atomic_inc(&call_data->finished);
632 func(info);
633 }
634}
635
636extern unsigned long xcall_flush_tlb_mm;
637extern unsigned long xcall_flush_tlb_pending;
638extern unsigned long xcall_flush_tlb_kernel_range;
639extern unsigned long xcall_flush_tlb_all_spitfire;
640extern unsigned long xcall_flush_tlb_all_cheetah;
641extern unsigned long xcall_report_regs;
642extern unsigned long xcall_receive_signal;
643
644#ifdef DCACHE_ALIASING_POSSIBLE
645extern unsigned long xcall_flush_dcache_page_cheetah;
646#endif
647extern unsigned long xcall_flush_dcache_page_spitfire;
648
649#ifdef CONFIG_DEBUG_DCFLUSH
650extern atomic_t dcpage_flushes;
651extern atomic_t dcpage_flushes_xcall;
652#endif
653
654static __inline__ void __local_flush_dcache_page(struct page *page)
655{
656#ifdef DCACHE_ALIASING_POSSIBLE
657 __flush_dcache_page(page_address(page),
658 ((tlb_type == spitfire) &&
659 page_mapping(page) != NULL));
660#else
661 if (page_mapping(page) != NULL &&
662 tlb_type == spitfire)
663 __flush_icache_page(__pa(page_address(page)));
664#endif
665}
666
667void smp_flush_dcache_page_impl(struct page *page, int cpu)
668{
669 cpumask_t mask = cpumask_of_cpu(cpu);
670 int this_cpu = get_cpu();
671
672#ifdef CONFIG_DEBUG_DCFLUSH
673 atomic_inc(&dcpage_flushes);
674#endif
675 if (cpu == this_cpu) {
676 __local_flush_dcache_page(page);
677 } else if (cpu_online(cpu)) {
678 void *pg_addr = page_address(page);
679 u64 data0;
680
681 if (tlb_type == spitfire) {
682 data0 =
683 ((u64)&xcall_flush_dcache_page_spitfire);
684 if (page_mapping(page) != NULL)
685 data0 |= ((u64)1 << 32);
686 spitfire_xcall_deliver(data0,
687 __pa(pg_addr),
688 (u64) pg_addr,
689 mask);
690 } else {
691#ifdef DCACHE_ALIASING_POSSIBLE
692 data0 =
693 ((u64)&xcall_flush_dcache_page_cheetah);
694 cheetah_xcall_deliver(data0,
695 __pa(pg_addr),
696 0, mask);
697#endif
698 }
699#ifdef CONFIG_DEBUG_DCFLUSH
700 atomic_inc(&dcpage_flushes_xcall);
701#endif
702 }
703
704 put_cpu();
705}
706
707void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
708{
709 void *pg_addr = page_address(page);
710 cpumask_t mask = cpu_online_map;
711 u64 data0;
712 int this_cpu = get_cpu();
713
714 cpu_clear(this_cpu, mask);
715
716#ifdef CONFIG_DEBUG_DCFLUSH
717 atomic_inc(&dcpage_flushes);
718#endif
719 if (cpus_empty(mask))
720 goto flush_self;
721 if (tlb_type == spitfire) {
722 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
723 if (page_mapping(page) != NULL)
724 data0 |= ((u64)1 << 32);
725 spitfire_xcall_deliver(data0,
726 __pa(pg_addr),
727 (u64) pg_addr,
728 mask);
729 } else {
730#ifdef DCACHE_ALIASING_POSSIBLE
731 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
732 cheetah_xcall_deliver(data0,
733 __pa(pg_addr),
734 0, mask);
735#endif
736 }
737#ifdef CONFIG_DEBUG_DCFLUSH
738 atomic_inc(&dcpage_flushes_xcall);
739#endif
740 flush_self:
741 __local_flush_dcache_page(page);
742
743 put_cpu();
744}
745
746void smp_receive_signal(int cpu)
747{
748 cpumask_t mask = cpumask_of_cpu(cpu);
749
750 if (cpu_online(cpu)) {
751 u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
752
753 if (tlb_type == spitfire)
754 spitfire_xcall_deliver(data0, 0, 0, mask);
755 else
756 cheetah_xcall_deliver(data0, 0, 0, mask);
757 }
758}
759
760void smp_receive_signal_client(int irq, struct pt_regs *regs)
761{
762 /* Just return, rtrap takes care of the rest. */
763 clear_softint(1 << irq);
764}
765
766void smp_report_regs(void)
767{
768 smp_cross_call(&xcall_report_regs, 0, 0, 0);
769}
770
771void smp_flush_tlb_all(void)
772{
773 if (tlb_type == spitfire)
774 smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0);
775 else
776 smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0);
777 __flush_tlb_all();
778}
779
780/* We know that the window frames of the user have been flushed
781 * to the stack before we get here because all callers of us
782 * are flush_tlb_*() routines, and these run after flush_cache_*()
783 * which performs the flushw.
784 *
785 * The SMP TLB coherency scheme we use works as follows:
786 *
787 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
788 * space has (potentially) executed on, this is the heuristic
789 * we use to avoid doing cross calls.
790 *
791 * Also, for flushing from kswapd and also for clones, we
792 * use cpu_vm_mask as the list of cpus to make run the TLB.
793 *
794 * 2) TLB context numbers are shared globally across all processors
795 * in the system, this allows us to play several games to avoid
796 * cross calls.
797 *
798 * One invariant is that when a cpu switches to a process, and
799 * that processes tsk->active_mm->cpu_vm_mask does not have the
800 * current cpu's bit set, that tlb context is flushed locally.
801 *
802 * If the address space is non-shared (ie. mm->count == 1) we avoid
803 * cross calls when we want to flush the currently running process's
804 * tlb state. This is done by clearing all cpu bits except the current
805 * processor's in current->active_mm->cpu_vm_mask and performing the
806 * flush locally only. This will force any subsequent cpus which run
807 * this task to flush the context from the local tlb if the process
808 * migrates to another cpu (again).
809 *
810 * 3) For shared address spaces (threads) and swapping we bite the
811 * bullet for most cases and perform the cross call (but only to
812 * the cpus listed in cpu_vm_mask).
813 *
814 * The performance gain from "optimizing" away the cross call for threads is
815 * questionable (in theory the big win for threads is the massive sharing of
816 * address space state across processors).
817 */
818void smp_flush_tlb_mm(struct mm_struct *mm)
819{
820 /*
821 * This code is called from two places, dup_mmap and exit_mmap. In the
822 * former case, we really need a flush. In the later case, the callers
823 * are single threaded exec_mmap (really need a flush), multithreaded
824 * exec_mmap case (do not need to flush, since the caller gets a new
825 * context via activate_mm), and all other callers of mmput() whence
826 * the flush can be optimized since the associated threads are dead and
827 * the mm is being torn down (__exit_mm and other mmput callers) or the
828 * owning thread is dissociating itself from the mm. The
829 * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
830 * for single thread exec and dup_mmap cases. An alternate check might
831 * have been (current->mm != mm).
832 * Kanoj Sarcar
833 */
834 if (atomic_read(&mm->mm_users) == 0)
835 return;
836
837 {
838 u32 ctx = CTX_HWBITS(mm->context);
839 int cpu = get_cpu();
840
841 if (atomic_read(&mm->mm_users) == 1) {
842 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
843 goto local_flush_and_out;
844 }
845
846 smp_cross_call_masked(&xcall_flush_tlb_mm,
847 ctx, 0, 0,
848 mm->cpu_vm_mask);
849
850 local_flush_and_out:
851 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
852
853 put_cpu();
854 }
855}
856
857void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
858{
859 u32 ctx = CTX_HWBITS(mm->context);
860 int cpu = get_cpu();
861
862 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
863 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
864 goto local_flush_and_out;
865 } else {
866 /* This optimization is not valid. Normally
867 * we will be holding the page_table_lock, but
868 * there is an exception which is copy_page_range()
869 * when forking. The lock is held during the individual
870 * page table updates in the parent, but not at the
871 * top level, which is where we are invoked.
872 */
873 if (0) {
874 cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
875
876 /* By virtue of running under the mm->page_table_lock,
877 * and mmu_context.h:switch_mm doing the same, the
878 * following operation is safe.
879 */
880 if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
881 goto local_flush_and_out;
882 }
883 }
884
885 smp_cross_call_masked(&xcall_flush_tlb_pending,
886 ctx, nr, (unsigned long) vaddrs,
887 mm->cpu_vm_mask);
888
889local_flush_and_out:
890 __flush_tlb_pending(ctx, nr, vaddrs);
891
892 put_cpu();
893}
894
895void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
896{
897 start &= PAGE_MASK;
898 end = PAGE_ALIGN(end);
899 if (start != end) {
900 smp_cross_call(&xcall_flush_tlb_kernel_range,
901 0, start, end);
902
903 __flush_tlb_kernel_range(start, end);
904 }
905}
906
907/* CPU capture. */
908/* #define CAPTURE_DEBUG */
909extern unsigned long xcall_capture;
910
911static atomic_t smp_capture_depth = ATOMIC_INIT(0);
912static atomic_t smp_capture_registry = ATOMIC_INIT(0);
913static unsigned long penguins_are_doing_time;
914
915void smp_capture(void)
916{
917 int result = atomic_add_ret(1, &smp_capture_depth);
918
919 if (result == 1) {
920 int ncpus = num_online_cpus();
921
922#ifdef CAPTURE_DEBUG
923 printk("CPU[%d]: Sending penguins to jail...",
924 smp_processor_id());
925#endif
926 penguins_are_doing_time = 1;
927 membar("#StoreStore | #LoadStore");
928 atomic_inc(&smp_capture_registry);
929 smp_cross_call(&xcall_capture, 0, 0, 0);
930 while (atomic_read(&smp_capture_registry) != ncpus)
931 membar("#LoadLoad");
932#ifdef CAPTURE_DEBUG
933 printk("done\n");
934#endif
935 }
936}
937
938void smp_release(void)
939{
940 if (atomic_dec_and_test(&smp_capture_depth)) {
941#ifdef CAPTURE_DEBUG
942 printk("CPU[%d]: Giving pardon to "
943 "imprisoned penguins\n",
944 smp_processor_id());
945#endif
946 penguins_are_doing_time = 0;
947 membar("#StoreStore | #StoreLoad");
948 atomic_dec(&smp_capture_registry);
949 }
950}
951
952/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
953 * can service tlb flush xcalls...
954 */
955extern void prom_world(int);
956extern void save_alternate_globals(unsigned long *);
957extern void restore_alternate_globals(unsigned long *);
958void smp_penguin_jailcell(int irq, struct pt_regs *regs)
959{
960 unsigned long global_save[24];
961
962 clear_softint(1 << irq);
963
964 preempt_disable();
965
966 __asm__ __volatile__("flushw");
967 save_alternate_globals(global_save);
968 prom_world(1);
969 atomic_inc(&smp_capture_registry);
970 membar("#StoreLoad | #StoreStore");
971 while (penguins_are_doing_time)
972 membar("#LoadLoad");
973 restore_alternate_globals(global_save);
974 atomic_dec(&smp_capture_registry);
975 prom_world(0);
976
977 preempt_enable();
978}
979
980extern unsigned long xcall_promstop;
981
982void smp_promstop_others(void)
983{
984 smp_cross_call(&xcall_promstop, 0, 0, 0);
985}
986
987#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
988#define prof_counter(__cpu) cpu_data(__cpu).counter
989
990void smp_percpu_timer_interrupt(struct pt_regs *regs)
991{
992 unsigned long compare, tick, pstate;
993 int cpu = smp_processor_id();
994 int user = user_mode(regs);
995
996 /*
997 * Check for level 14 softint.
998 */
999 {
1000 unsigned long tick_mask = tick_ops->softint_mask;
1001
1002 if (!(get_softint() & tick_mask)) {
1003 extern void handler_irq(int, struct pt_regs *);
1004
1005 handler_irq(14, regs);
1006 return;
1007 }
1008 clear_softint(tick_mask);
1009 }
1010
1011 do {
1012 profile_tick(CPU_PROFILING, regs);
1013 if (!--prof_counter(cpu)) {
1014 irq_enter();
1015
1016 if (cpu == boot_cpu_id) {
1017 kstat_this_cpu.irqs[0]++;
1018 timer_tick_interrupt(regs);
1019 }
1020
1021 update_process_times(user);
1022
1023 irq_exit();
1024
1025 prof_counter(cpu) = prof_multiplier(cpu);
1026 }
1027
1028 /* Guarantee that the following sequences execute
1029 * uninterrupted.
1030 */
1031 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1032 "wrpr %0, %1, %%pstate"
1033 : "=r" (pstate)
1034 : "i" (PSTATE_IE));
1035
1036 compare = tick_ops->add_compare(current_tick_offset);
1037 tick = tick_ops->get_tick();
1038
1039 /* Restore PSTATE_IE. */
1040 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1041 : /* no outputs */
1042 : "r" (pstate));
1043 } while (time_after_eq(tick, compare));
1044}
1045
1046static void __init smp_setup_percpu_timer(void)
1047{
1048 int cpu = smp_processor_id();
1049 unsigned long pstate;
1050
1051 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1052
1053 /* Guarantee that the following sequences execute
1054 * uninterrupted.
1055 */
1056 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1057 "wrpr %0, %1, %%pstate"
1058 : "=r" (pstate)
1059 : "i" (PSTATE_IE));
1060
1061 tick_ops->init_tick(current_tick_offset);
1062
1063 /* Restore PSTATE_IE. */
1064 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1065 : /* no outputs */
1066 : "r" (pstate));
1067}
1068
1069void __init smp_tick_init(void)
1070{
1071 boot_cpu_id = hard_smp_processor_id();
1072 current_tick_offset = timer_tick_offset;
1073
1074 cpu_set(boot_cpu_id, cpu_online_map);
1075 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1076}
1077
1078/* /proc/profile writes can call this, don't __init it please. */
1079static DEFINE_SPINLOCK(prof_setup_lock);
1080
1081int setup_profiling_timer(unsigned int multiplier)
1082{
1083 unsigned long flags;
1084 int i;
1085
1086 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1087 return -EINVAL;
1088
1089 spin_lock_irqsave(&prof_setup_lock, flags);
1090 for (i = 0; i < NR_CPUS; i++)
1091 prof_multiplier(i) = multiplier;
1092 current_tick_offset = (timer_tick_offset / multiplier);
1093 spin_unlock_irqrestore(&prof_setup_lock, flags);
1094
1095 return 0;
1096}
1097
1098void __init smp_prepare_cpus(unsigned int max_cpus)
1099{
1100 int instance, mid;
1101
1102 instance = 0;
1103 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1104 if (mid < max_cpus)
1105 cpu_set(mid, phys_cpu_present_map);
1106 instance++;
1107 }
1108
1109 if (num_possible_cpus() > max_cpus) {
1110 instance = 0;
1111 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1112 if (mid != boot_cpu_id) {
1113 cpu_clear(mid, phys_cpu_present_map);
1114 if (num_possible_cpus() <= max_cpus)
1115 break;
1116 }
1117 instance++;
1118 }
1119 }
1120
1121 smp_store_cpu_info(boot_cpu_id);
1122}
1123
1124void __devinit smp_prepare_boot_cpu(void)
1125{
1126 if (hard_smp_processor_id() >= NR_CPUS) {
1127 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1128 prom_halt();
1129 }
1130
1131 current_thread_info()->cpu = hard_smp_processor_id();
1132
1133 cpu_set(smp_processor_id(), cpu_online_map);
1134 cpu_set(smp_processor_id(), phys_cpu_present_map);
1135}
1136
1137int __devinit __cpu_up(unsigned int cpu)
1138{
1139 int ret = smp_boot_one_cpu(cpu);
1140
1141 if (!ret) {
1142 cpu_set(cpu, smp_commenced_mask);
1143 while (!cpu_isset(cpu, cpu_online_map))
1144 mb();
1145 if (!cpu_isset(cpu, cpu_online_map)) {
1146 ret = -ENODEV;
1147 } else {
1148 smp_synchronize_one_tick(cpu);
1149 }
1150 }
1151 return ret;
1152}
1153
1154void __init smp_cpus_done(unsigned int max_cpus)
1155{
1156 unsigned long bogosum = 0;
1157 int i;
1158
1159 for (i = 0; i < NR_CPUS; i++) {
1160 if (cpu_online(i))
1161 bogosum += cpu_data(i).udelay_val;
1162 }
1163 printk("Total of %ld processors activated "
1164 "(%lu.%02lu BogoMIPS).\n",
1165 (long) num_online_cpus(),
1166 bogosum/(500000/HZ),
1167 (bogosum/(5000/HZ))%100);
1168}
1169
1170/* This needn't do anything as we do not sleep the cpu
1171 * inside of the idler task, so an interrupt is not needed
1172 * to get a clean fast response.
1173 *
1174 * XXX Reverify this assumption... -DaveM
1175 *
1176 * Addendum: We do want it to do something for the signal
1177 * delivery case, we detect that by just seeing
1178 * if we are trying to send this to an idler or not.
1179 */
1180void smp_send_reschedule(int cpu)
1181{
1182 if (cpu_data(cpu).idle_volume == 0)
1183 smp_receive_signal(cpu);
1184}
1185
1186/* This is a nop because we capture all other cpus
1187 * anyways when making the PROM active.
1188 */
1189void smp_send_stop(void)
1190{
1191}
1192
1193unsigned long __per_cpu_base;
1194unsigned long __per_cpu_shift;
1195
1196EXPORT_SYMBOL(__per_cpu_base);
1197EXPORT_SYMBOL(__per_cpu_shift);
1198
1199void __init setup_per_cpu_areas(void)
1200{
1201 unsigned long goal, size, i;
1202 char *ptr;
1203 /* Created by linker magic */
1204 extern char __per_cpu_start[], __per_cpu_end[];
1205
1206 /* Copy section for each CPU (we discard the original) */
1207 goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
1208
1209#ifdef CONFIG_MODULES
1210 if (goal < PERCPU_ENOUGH_ROOM)
1211 goal = PERCPU_ENOUGH_ROOM;
1212#endif
1213 __per_cpu_shift = 0;
1214 for (size = 1UL; size < goal; size <<= 1UL)
1215 __per_cpu_shift++;
1216
1217 /* Make sure the resulting __per_cpu_base value
1218 * will fit in the 43-bit sign extended IMMU
1219 * TSB register.
1220 */
1221 ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE,
1222 (unsigned long) __per_cpu_start);
1223
1224 __per_cpu_base = ptr - __per_cpu_start;
1225
1226 if ((__per_cpu_shift < PAGE_SHIFT) ||
1227 (__per_cpu_base & ~PAGE_MASK) ||
1228 (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) {
1229 prom_printf("PER_CPU: Invalid layout, "
1230 "ptr[%p] shift[%lx] base[%lx]\n",
1231 ptr, __per_cpu_shift, __per_cpu_base);
1232 prom_halt();
1233 }
1234
1235 for (i = 0; i < NR_CPUS; i++, ptr += size)
1236 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1237
1238 /* Finally, load in the boot cpu's base value.
1239 * We abuse the IMMU TSB register for trap handler
1240 * entry and exit loading of %g5. That is why it
1241 * has to be page aligned.
1242 */
1243 cpu_setup_percpu_base(hard_smp_processor_id());
1244}
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
new file mode 100644
index 000000000000..cad5a1122800
--- /dev/null
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -0,0 +1,432 @@
1/* $Id: sparc64_ksyms.c,v 1.121 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9/* Tell string.h we don't want memcpy etc. as cpp defines */
10#define EXPORT_SYMTAB_STROPS
11#define PROMLIB_INTERNAL
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/sched.h>
18#include <linux/in6.h>
19#include <linux/pci.h>
20#include <linux/interrupt.h>
21#include <linux/fs_struct.h>
22#include <linux/fs.h>
23#include <linux/mm.h>
24#include <linux/socket.h>
25#include <linux/syscalls.h>
26#include <linux/percpu.h>
27#include <linux/init.h>
28#include <net/compat.h>
29
30#include <asm/oplib.h>
31#include <asm/delay.h>
32#include <asm/system.h>
33#include <asm/auxio.h>
34#include <asm/pgtable.h>
35#include <asm/io.h>
36#include <asm/irq.h>
37#include <asm/idprom.h>
38#include <asm/svr4.h>
39#include <asm/elf.h>
40#include <asm/head.h>
41#include <asm/smp.h>
42#include <asm/mostek.h>
43#include <asm/ptrace.h>
44#include <asm/user.h>
45#include <asm/uaccess.h>
46#include <asm/checksum.h>
47#include <asm/fpumacro.h>
48#include <asm/pgalloc.h>
49#include <asm/cacheflush.h>
50#ifdef CONFIG_SBUS
51#include <asm/sbus.h>
52#include <asm/dma.h>
53#endif
54#ifdef CONFIG_PCI
55#include <asm/ebus.h>
56#include <asm/isa.h>
57#endif
58#include <asm/a.out.h>
59#include <asm/ns87303.h>
60#include <asm/timer.h>
61#include <asm/cpudata.h>
62#include <asm/rwsem.h>
63
64struct poll {
65 int fd;
66 short events;
67 short revents;
68};
69
70extern void die_if_kernel(char *str, struct pt_regs *regs);
71extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
72void _sigpause_common (unsigned int set, struct pt_regs *);
73extern void *__bzero(void *, size_t);
74extern void *__memscan_zero(void *, size_t);
75extern void *__memscan_generic(void *, int, size_t);
76extern int __memcmp(const void *, const void *, __kernel_size_t);
77extern __kernel_size_t strlen(const char *);
78extern void linux_sparc_syscall(void);
79extern void rtrap(void);
80extern void show_regs(struct pt_regs *);
81extern void solaris_syscall(void);
82extern void syscall_trace(void);
83extern u32 sunos_sys_table[], sys_call_table32[];
84extern void tl0_solaris(void);
85extern void sys_sigsuspend(void);
86extern int svr4_getcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
87extern int svr4_setcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
88extern int compat_sys_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
89extern int (*handle_mathemu)(struct pt_regs *, struct fpustate *);
90extern long sparc32_open(const char __user * filename, int flags, int mode);
91extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from,
92 unsigned long offset, unsigned long size, pgprot_t prot, int space);
93extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
94 unsigned long pfn, unsigned long size, pgprot_t prot);
95extern void (*prom_palette)(int);
96
97extern int __ashrdi3(int, int);
98
99extern void dump_thread(struct pt_regs *, struct user *);
100extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
101
102#if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK)
103extern void _do_spin_lock (spinlock_t *lock, char *str);
104extern void _do_spin_unlock (spinlock_t *lock);
105extern int _spin_trylock (spinlock_t *lock);
106extern void _do_read_lock(rwlock_t *rw, char *str);
107extern void _do_read_unlock(rwlock_t *rw, char *str);
108extern void _do_write_lock(rwlock_t *rw, char *str);
109extern void _do_write_unlock(rwlock_t *rw);
110extern int _do_write_trylock(rwlock_t *rw, char *str);
111#endif
112
113extern unsigned long phys_base;
114extern unsigned long pfn_base;
115
116extern unsigned int sys_call_table[];
117
118extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
119extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
120 unsigned long *);
121extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
122 unsigned long *, unsigned long *);
123extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
124 unsigned long *, unsigned long *, unsigned long *);
125
126/* Per-CPU information table */
127EXPORT_PER_CPU_SYMBOL(__cpu_data);
128
129/* used by various drivers */
130#ifdef CONFIG_SMP
131#ifndef CONFIG_DEBUG_SPINLOCK
132/* Out of line rw-locking implementation. */
133EXPORT_SYMBOL(__read_lock);
134EXPORT_SYMBOL(__read_unlock);
135EXPORT_SYMBOL(__write_lock);
136EXPORT_SYMBOL(__write_unlock);
137EXPORT_SYMBOL(__write_trylock);
138/* Out of line spin-locking implementation. */
139EXPORT_SYMBOL(_raw_spin_lock);
140EXPORT_SYMBOL(_raw_spin_lock_flags);
141#endif
142
143/* Hard IRQ locking */
144EXPORT_SYMBOL(synchronize_irq);
145
146#if defined(CONFIG_MCOUNT)
147extern void _mcount(void);
148EXPORT_SYMBOL(_mcount);
149#endif
150
151/* CPU online map and active count. */
152EXPORT_SYMBOL(cpu_online_map);
153EXPORT_SYMBOL(phys_cpu_present_map);
154
155/* Spinlock debugging library, optional. */
156#ifdef CONFIG_DEBUG_SPINLOCK
157EXPORT_SYMBOL(_do_spin_lock);
158EXPORT_SYMBOL(_do_spin_unlock);
159EXPORT_SYMBOL(_spin_trylock);
160EXPORT_SYMBOL(_do_read_lock);
161EXPORT_SYMBOL(_do_read_unlock);
162EXPORT_SYMBOL(_do_write_lock);
163EXPORT_SYMBOL(_do_write_unlock);
164EXPORT_SYMBOL(_do_write_trylock);
165#endif
166
167EXPORT_SYMBOL(smp_call_function);
168#endif /* CONFIG_SMP */
169
170EXPORT_SYMBOL(sparc64_get_clock_tick);
171
172/* semaphores */
173EXPORT_SYMBOL(down);
174EXPORT_SYMBOL(down_trylock);
175EXPORT_SYMBOL(down_interruptible);
176EXPORT_SYMBOL(up);
177
178/* RW semaphores */
179EXPORT_SYMBOL(__down_read);
180EXPORT_SYMBOL(__down_read_trylock);
181EXPORT_SYMBOL(__down_write);
182EXPORT_SYMBOL(__down_write_trylock);
183EXPORT_SYMBOL(__up_read);
184EXPORT_SYMBOL(__up_write);
185EXPORT_SYMBOL(__downgrade_write);
186
187/* Atomic counter implementation. */
188EXPORT_SYMBOL(atomic_add);
189EXPORT_SYMBOL(atomic_add_ret);
190EXPORT_SYMBOL(atomic_sub);
191EXPORT_SYMBOL(atomic_sub_ret);
192EXPORT_SYMBOL(atomic64_add);
193EXPORT_SYMBOL(atomic64_add_ret);
194EXPORT_SYMBOL(atomic64_sub);
195EXPORT_SYMBOL(atomic64_sub_ret);
196#ifdef CONFIG_SMP
197EXPORT_SYMBOL(_atomic_dec_and_lock);
198#endif
199
200/* Atomic bit operations. */
201EXPORT_SYMBOL(test_and_set_bit);
202EXPORT_SYMBOL(test_and_clear_bit);
203EXPORT_SYMBOL(test_and_change_bit);
204EXPORT_SYMBOL(set_bit);
205EXPORT_SYMBOL(clear_bit);
206EXPORT_SYMBOL(change_bit);
207
208/* Bit searching */
209EXPORT_SYMBOL(find_next_bit);
210EXPORT_SYMBOL(find_next_zero_bit);
211EXPORT_SYMBOL(find_next_zero_le_bit);
212
213EXPORT_SYMBOL(ivector_table);
214EXPORT_SYMBOL(enable_irq);
215EXPORT_SYMBOL(disable_irq);
216
217EXPORT_SYMBOL(__flushw_user);
218
219EXPORT_SYMBOL(tlb_type);
220EXPORT_SYMBOL(get_fb_unmapped_area);
221EXPORT_SYMBOL(flush_icache_range);
222
223EXPORT_SYMBOL(flush_dcache_page);
224#ifdef DCACHE_ALIASING_POSSIBLE
225EXPORT_SYMBOL(__flush_dcache_range);
226#endif
227
228EXPORT_SYMBOL(mostek_lock);
229EXPORT_SYMBOL(mstk48t02_regs);
230EXPORT_SYMBOL(request_fast_irq);
231#ifdef CONFIG_SUN_AUXIO
232EXPORT_SYMBOL(auxio_set_led);
233EXPORT_SYMBOL(auxio_set_lte);
234#endif
235#ifdef CONFIG_SBUS
236EXPORT_SYMBOL(sbus_root);
237EXPORT_SYMBOL(dma_chain);
238EXPORT_SYMBOL(sbus_set_sbus64);
239EXPORT_SYMBOL(sbus_alloc_consistent);
240EXPORT_SYMBOL(sbus_free_consistent);
241EXPORT_SYMBOL(sbus_map_single);
242EXPORT_SYMBOL(sbus_unmap_single);
243EXPORT_SYMBOL(sbus_map_sg);
244EXPORT_SYMBOL(sbus_unmap_sg);
245EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu);
246EXPORT_SYMBOL(sbus_dma_sync_single_for_device);
247EXPORT_SYMBOL(sbus_dma_sync_sg_for_cpu);
248EXPORT_SYMBOL(sbus_dma_sync_sg_for_device);
249#endif
250EXPORT_SYMBOL(outsb);
251EXPORT_SYMBOL(outsw);
252EXPORT_SYMBOL(outsl);
253EXPORT_SYMBOL(insb);
254EXPORT_SYMBOL(insw);
255EXPORT_SYMBOL(insl);
256#ifdef CONFIG_PCI
257EXPORT_SYMBOL(ebus_chain);
258EXPORT_SYMBOL(isa_chain);
259EXPORT_SYMBOL(pci_memspace_mask);
260EXPORT_SYMBOL(pci_alloc_consistent);
261EXPORT_SYMBOL(pci_free_consistent);
262EXPORT_SYMBOL(pci_map_single);
263EXPORT_SYMBOL(pci_unmap_single);
264EXPORT_SYMBOL(pci_map_sg);
265EXPORT_SYMBOL(pci_unmap_sg);
266EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
267EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
268EXPORT_SYMBOL(pci_dma_supported);
269#endif
270
271/* I/O device mmaping on Sparc64. */
272EXPORT_SYMBOL(io_remap_page_range);
273EXPORT_SYMBOL(io_remap_pfn_range);
274
275/* Solaris/SunOS binary compatibility */
276EXPORT_SYMBOL(_sigpause_common);
277EXPORT_SYMBOL(verify_compat_iovec);
278
279EXPORT_SYMBOL(dump_thread);
280EXPORT_SYMBOL(dump_fpu);
281EXPORT_SYMBOL(__pte_alloc_one_kernel);
282#ifndef CONFIG_SMP
283EXPORT_SYMBOL(pgt_quicklists);
284#endif
285EXPORT_SYMBOL(put_fs_struct);
286
287/* math-emu wants this */
288EXPORT_SYMBOL(die_if_kernel);
289
290/* Kernel thread creation. */
291EXPORT_SYMBOL(kernel_thread);
292
293/* prom symbols */
294EXPORT_SYMBOL(idprom);
295EXPORT_SYMBOL(prom_root_node);
296EXPORT_SYMBOL(prom_getchild);
297EXPORT_SYMBOL(prom_getsibling);
298EXPORT_SYMBOL(prom_searchsiblings);
299EXPORT_SYMBOL(prom_firstprop);
300EXPORT_SYMBOL(prom_nextprop);
301EXPORT_SYMBOL(prom_getproplen);
302EXPORT_SYMBOL(prom_getproperty);
303EXPORT_SYMBOL(prom_node_has_property);
304EXPORT_SYMBOL(prom_setprop);
305EXPORT_SYMBOL(saved_command_line);
306EXPORT_SYMBOL(prom_getname);
307EXPORT_SYMBOL(prom_finddevice);
308EXPORT_SYMBOL(prom_feval);
309EXPORT_SYMBOL(prom_getbool);
310EXPORT_SYMBOL(prom_getstring);
311EXPORT_SYMBOL(prom_getint);
312EXPORT_SYMBOL(prom_getintdefault);
313EXPORT_SYMBOL(__prom_getchild);
314EXPORT_SYMBOL(__prom_getsibling);
315
316/* sparc library symbols */
317EXPORT_SYMBOL(strlen);
318EXPORT_SYMBOL(strnlen);
319EXPORT_SYMBOL(__strlen_user);
320EXPORT_SYMBOL(__strnlen_user);
321EXPORT_SYMBOL(strcpy);
322EXPORT_SYMBOL(strncpy);
323EXPORT_SYMBOL(strcat);
324EXPORT_SYMBOL(strncat);
325EXPORT_SYMBOL(strcmp);
326EXPORT_SYMBOL(strchr);
327EXPORT_SYMBOL(strrchr);
328EXPORT_SYMBOL(strpbrk);
329EXPORT_SYMBOL(strstr);
330
331#ifdef CONFIG_SOLARIS_EMUL_MODULE
332EXPORT_SYMBOL(linux_sparc_syscall);
333EXPORT_SYMBOL(rtrap);
334EXPORT_SYMBOL(show_regs);
335EXPORT_SYMBOL(solaris_syscall);
336EXPORT_SYMBOL(syscall_trace);
337EXPORT_SYMBOL(sunos_sys_table);
338EXPORT_SYMBOL(sys_call_table32);
339EXPORT_SYMBOL(tl0_solaris);
340EXPORT_SYMBOL(sys_sigsuspend);
341EXPORT_SYMBOL(sys_getppid);
342EXPORT_SYMBOL(sys_getpid);
343EXPORT_SYMBOL(sys_geteuid);
344EXPORT_SYMBOL(sys_getuid);
345EXPORT_SYMBOL(sys_getegid);
346EXPORT_SYMBOL(sys_getgid);
347EXPORT_SYMBOL(svr4_getcontext);
348EXPORT_SYMBOL(svr4_setcontext);
349EXPORT_SYMBOL(compat_sys_ioctl);
350EXPORT_SYMBOL(sparc32_open);
351EXPORT_SYMBOL(sys_close);
352#endif
353
354/* Special internal versions of library functions. */
355EXPORT_SYMBOL(_clear_page);
356EXPORT_SYMBOL(clear_user_page);
357EXPORT_SYMBOL(copy_user_page);
358EXPORT_SYMBOL(__bzero);
359EXPORT_SYMBOL(__memscan_zero);
360EXPORT_SYMBOL(__memscan_generic);
361EXPORT_SYMBOL(__memcmp);
362EXPORT_SYMBOL(__memset);
363EXPORT_SYMBOL(memchr);
364
365EXPORT_SYMBOL(csum_partial);
366EXPORT_SYMBOL(csum_partial_copy_nocheck);
367EXPORT_SYMBOL(__csum_partial_copy_from_user);
368EXPORT_SYMBOL(__csum_partial_copy_to_user);
369EXPORT_SYMBOL(ip_fast_csum);
370
371/* Moving data to/from/in userspace. */
372EXPORT_SYMBOL(___copy_to_user);
373EXPORT_SYMBOL(___copy_from_user);
374EXPORT_SYMBOL(___copy_in_user);
375EXPORT_SYMBOL(copy_to_user_fixup);
376EXPORT_SYMBOL(copy_from_user_fixup);
377EXPORT_SYMBOL(copy_in_user_fixup);
378EXPORT_SYMBOL(__strncpy_from_user);
379EXPORT_SYMBOL(__bzero_noasi);
380
381/* Various address conversion macros use this. */
382EXPORT_SYMBOL(phys_base);
383EXPORT_SYMBOL(pfn_base);
384EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
385EXPORT_SYMBOL(page_to_pfn);
386EXPORT_SYMBOL(pfn_to_page);
387
388/* No version information on this, heavily used in inline asm,
389 * and will always be 'void __ret_efault(void)'.
390 */
391EXPORT_SYMBOL(__ret_efault);
392
393/* No version information on these, as gcc produces such symbols. */
394EXPORT_SYMBOL(memcmp);
395EXPORT_SYMBOL(memcpy);
396EXPORT_SYMBOL(memset);
397EXPORT_SYMBOL(memmove);
398EXPORT_SYMBOL(strncmp);
399
400/* Delay routines. */
401EXPORT_SYMBOL(__udelay);
402EXPORT_SYMBOL(__ndelay);
403EXPORT_SYMBOL(__const_udelay);
404EXPORT_SYMBOL(__delay);
405
406void VISenter(void);
407/* RAID code needs this */
408EXPORT_SYMBOL(VISenter);
409
410/* for input/keybdev */
411EXPORT_SYMBOL(sun_do_break);
412EXPORT_SYMBOL(serial_console);
413EXPORT_SYMBOL(stop_a_enabled);
414
415#ifdef CONFIG_DEBUG_BUGVERBOSE
416EXPORT_SYMBOL(do_BUG);
417#endif
418
419/* for ns8703 */
420EXPORT_SYMBOL(ns87303_lock);
421
422/* for solaris compat module */
423EXPORT_SYMBOL_GPL(sys_call_table);
424
425EXPORT_SYMBOL(tick_ops);
426
427EXPORT_SYMBOL(xor_vis_2);
428EXPORT_SYMBOL(xor_vis_3);
429EXPORT_SYMBOL(xor_vis_4);
430EXPORT_SYMBOL(xor_vis_5);
431
432EXPORT_SYMBOL(prom_palette);
diff --git a/arch/sparc64/kernel/starfire.c b/arch/sparc64/kernel/starfire.c
new file mode 100644
index 000000000000..ae859d40771e
--- /dev/null
+++ b/arch/sparc64/kernel/starfire.c
@@ -0,0 +1,123 @@
1/* $Id: starfire.c,v 1.10 2001/04/14 21:13:45 davem Exp $
2 * starfire.c: Starfire/E10000 support.
3 *
4 * Copyright (C) 1998 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2000 Anton Blanchard (anton@samba.org)
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10
11#include <asm/page.h>
12#include <asm/oplib.h>
13#include <asm/smp.h>
14#include <asm/upa.h>
15#include <asm/starfire.h>
16
17/*
18 * A few places around the kernel check this to see if
19 * they need to call us to do things in a Starfire specific
20 * way.
21 */
22int this_is_starfire = 0;
23
24void check_if_starfire(void)
25{
26 int ssnode = prom_finddevice("/ssp-serial");
27 if (ssnode != 0 && ssnode != -1)
28 this_is_starfire = 1;
29}
30
31void starfire_cpu_setup(void)
32{
33 /* Currently, nothing to do. */
34}
35
36int starfire_hard_smp_processor_id(void)
37{
38 return upa_readl(0x1fff40000d0UL);
39}
40
41/*
42 * Each Starfire board has 32 registers which perform translation
43 * and delivery of traditional interrupt packets into the extended
44 * Starfire hardware format. Essentially UPAID's now have 2 more
45 * bits than in all previous Sun5 systems.
46 */
47struct starfire_irqinfo {
48 unsigned long imap_slots[32];
49 unsigned long tregs[32];
50 struct starfire_irqinfo *next;
51 int upaid, hwmid;
52};
53
54static struct starfire_irqinfo *sflist = NULL;
55
56/* Beam me up Scott(McNeil)y... */
57void *starfire_hookup(int upaid)
58{
59 struct starfire_irqinfo *p;
60 unsigned long treg_base, hwmid, i;
61
62 p = kmalloc(sizeof(*p), GFP_KERNEL);
63 if (!p) {
64 prom_printf("starfire_hookup: No memory, this is insane.\n");
65 prom_halt();
66 }
67 treg_base = 0x100fc000000UL;
68 hwmid = ((upaid & 0x3c) << 1) |
69 ((upaid & 0x40) >> 4) |
70 (upaid & 0x3);
71 p->hwmid = hwmid;
72 treg_base += (hwmid << 33UL);
73 treg_base += 0x200UL;
74 for (i = 0; i < 32; i++) {
75 p->imap_slots[i] = 0UL;
76 p->tregs[i] = treg_base + (i * 0x10UL);
77 /* Lets play it safe and not overwrite existing mappings */
78 if (upa_readl(p->tregs[i]) != 0)
79 p->imap_slots[i] = 0xdeadbeaf;
80 }
81 p->upaid = upaid;
82 p->next = sflist;
83 sflist = p;
84
85 return (void *) p;
86}
87
88unsigned int starfire_translate(unsigned long imap,
89 unsigned int upaid)
90{
91 struct starfire_irqinfo *p;
92 unsigned int bus_hwmid;
93 unsigned int i;
94
95 bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f;
96 for (p = sflist; p != NULL; p = p->next)
97 if (p->hwmid == bus_hwmid)
98 break;
99 if (p == NULL) {
100 prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n",
101 ((unsigned long)imap));
102 prom_halt();
103 }
104 for (i = 0; i < 32; i++) {
105 if (p->imap_slots[i] == imap ||
106 p->imap_slots[i] == 0UL)
107 break;
108 }
109 if (i == 32) {
110 printk("starfire_translate: Are you kidding me?\n");
111 panic("Lucy in the sky....");
112 }
113 p->imap_slots[i] = imap;
114
115 /* map to real upaid */
116 upaid = (((upaid & 0x3c) << 1) |
117 ((upaid & 0x40) >> 4) |
118 (upaid & 0x3));
119
120 upa_writel(upaid, p->tregs[i]);
121
122 return i;
123}
diff --git a/arch/sparc64/kernel/sunos_ioctl32.c b/arch/sparc64/kernel/sunos_ioctl32.c
new file mode 100644
index 000000000000..87c1aeb02220
--- /dev/null
+++ b/arch/sparc64/kernel/sunos_ioctl32.c
@@ -0,0 +1,275 @@
1/* $Id: sunos_ioctl32.c,v 1.11 2000/07/30 23:12:24 davem Exp $
2 * sunos_ioctl32.c: SunOS ioctl compatibility on sparc64.
3 *
4 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
5 * Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <asm/uaccess.h>
9
10#include <linux/sched.h>
11#include <linux/errno.h>
12#include <linux/string.h>
13#include <linux/termios.h>
14#include <linux/ioctl.h>
15#include <linux/route.h>
16#include <linux/sockios.h>
17#include <linux/if.h>
18#include <linux/netdevice.h>
19#include <linux/if_arp.h>
20#include <linux/fs.h>
21#include <linux/file.h>
22#include <linux/mm.h>
23#include <linux/smp.h>
24#include <linux/smp_lock.h>
25#include <linux/syscalls.h>
26#include <linux/compat.h>
27#include <asm/kbio.h>
28
29#define SUNOS_NR_OPEN 256
30
31struct rtentry32 {
32 u32 rt_pad1;
33 struct sockaddr rt_dst; /* target address */
34 struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
35 struct sockaddr rt_genmask; /* target network mask (IP) */
36 unsigned short rt_flags;
37 short rt_pad2;
38 u32 rt_pad3;
39 unsigned char rt_tos;
40 unsigned char rt_class;
41 short rt_pad4;
42 short rt_metric; /* +1 for binary compatibility! */
43 /* char * */ u32 rt_dev; /* forcing the device at add */
44 u32 rt_mtu; /* per route MTU/Window */
45 u32 rt_window; /* Window clamping */
46 unsigned short rt_irtt; /* Initial RTT */
47
48};
49
50struct ifmap32 {
51 u32 mem_start;
52 u32 mem_end;
53 unsigned short base_addr;
54 unsigned char irq;
55 unsigned char dma;
56 unsigned char port;
57};
58
59struct ifreq32 {
60#define IFHWADDRLEN 6
61#define IFNAMSIZ 16
62 union {
63 char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
64 } ifr_ifrn;
65 union {
66 struct sockaddr ifru_addr;
67 struct sockaddr ifru_dstaddr;
68 struct sockaddr ifru_broadaddr;
69 struct sockaddr ifru_netmask;
70 struct sockaddr ifru_hwaddr;
71 short ifru_flags;
72 int ifru_ivalue;
73 int ifru_mtu;
74 struct ifmap32 ifru_map;
75 char ifru_slave[IFNAMSIZ]; /* Just fits the size */
76 compat_caddr_t ifru_data;
77 } ifr_ifru;
78};
79
80struct ifconf32 {
81 int ifc_len; /* size of buffer */
82 compat_caddr_t ifcbuf;
83};
84
85extern asmlinkage int compat_sys_ioctl(unsigned int, unsigned int, u32);
86
87asmlinkage int sunos_ioctl (int fd, u32 cmd, u32 arg)
88{
89 int ret = -EBADF;
90
91 if(fd >= SUNOS_NR_OPEN)
92 goto out;
93 if(!fcheck(fd))
94 goto out;
95
96 if(cmd == TIOCSETD) {
97 mm_segment_t old_fs = get_fs();
98 int __user *p;
99 int ntty = N_TTY;
100 int tmp;
101
102 p = (int __user *) (unsigned long) arg;
103 ret = -EFAULT;
104 if(get_user(tmp, p))
105 goto out;
106 if(tmp == 2) {
107 set_fs(KERNEL_DS);
108 ret = sys_ioctl(fd, cmd, (unsigned long) &ntty);
109 set_fs(old_fs);
110 ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
111 goto out;
112 }
113 }
114 if(cmd == TIOCNOTTY) {
115 ret = sys_setsid();
116 goto out;
117 }
118 switch(cmd) {
119 case _IOW('r', 10, struct rtentry32):
120 ret = compat_sys_ioctl(fd, SIOCADDRT, arg);
121 goto out;
122 case _IOW('r', 11, struct rtentry32):
123 ret = compat_sys_ioctl(fd, SIOCDELRT, arg);
124 goto out;
125
126 case _IOW('i', 12, struct ifreq32):
127 ret = compat_sys_ioctl(fd, SIOCSIFADDR, arg);
128 goto out;
129 case _IOWR('i', 13, struct ifreq32):
130 ret = compat_sys_ioctl(fd, SIOCGIFADDR, arg);
131 goto out;
132 case _IOW('i', 14, struct ifreq32):
133 ret = compat_sys_ioctl(fd, SIOCSIFDSTADDR, arg);
134 goto out;
135 case _IOWR('i', 15, struct ifreq32):
136 ret = compat_sys_ioctl(fd, SIOCGIFDSTADDR, arg);
137 goto out;
138 case _IOW('i', 16, struct ifreq32):
139 ret = compat_sys_ioctl(fd, SIOCSIFFLAGS, arg);
140 goto out;
141 case _IOWR('i', 17, struct ifreq32):
142 ret = compat_sys_ioctl(fd, SIOCGIFFLAGS, arg);
143 goto out;
144 case _IOW('i', 18, struct ifreq32):
145 ret = compat_sys_ioctl(fd, SIOCSIFMEM, arg);
146 goto out;
147 case _IOWR('i', 19, struct ifreq32):
148 ret = compat_sys_ioctl(fd, SIOCGIFMEM, arg);
149 goto out;
150
151 case _IOWR('i', 20, struct ifconf32):
152 ret = compat_sys_ioctl(fd, SIOCGIFCONF, arg);
153 goto out;
154
155 case _IOW('i', 21, struct ifreq): /* SIOCSIFMTU */
156 ret = sys_ioctl(fd, SIOCSIFMTU, arg);
157 goto out;
158 case _IOWR('i', 22, struct ifreq): /* SIOCGIFMTU */
159 ret = sys_ioctl(fd, SIOCGIFMTU, arg);
160 goto out;
161
162 case _IOWR('i', 23, struct ifreq32):
163 ret = compat_sys_ioctl(fd, SIOCGIFBRDADDR, arg);
164 goto out;
165 case _IOW('i', 24, struct ifreq32):
166 ret = compat_sys_ioctl(fd, SIOCSIFBRDADDR, arg);
167 goto out;
168 case _IOWR('i', 25, struct ifreq32):
169 ret = compat_sys_ioctl(fd, SIOCGIFNETMASK, arg);
170 goto out;
171 case _IOW('i', 26, struct ifreq32):
172 ret = compat_sys_ioctl(fd, SIOCSIFNETMASK, arg);
173 goto out;
174 case _IOWR('i', 27, struct ifreq32):
175 ret = compat_sys_ioctl(fd, SIOCGIFMETRIC, arg);
176 goto out;
177 case _IOW('i', 28, struct ifreq32):
178 ret = compat_sys_ioctl(fd, SIOCSIFMETRIC, arg);
179 goto out;
180
181 case _IOW('i', 30, struct arpreq):
182 ret = compat_sys_ioctl(fd, SIOCSARP, arg);
183 goto out;
184 case _IOWR('i', 31, struct arpreq):
185 ret = compat_sys_ioctl(fd, SIOCGARP, arg);
186 goto out;
187 case _IOW('i', 32, struct arpreq):
188 ret = compat_sys_ioctl(fd, SIOCDARP, arg);
189 goto out;
190
191 case _IOW('i', 40, struct ifreq32): /* SIOCUPPER */
192 case _IOW('i', 41, struct ifreq32): /* SIOCLOWER */
193 case _IOW('i', 44, struct ifreq32): /* SIOCSETSYNC */
194 case _IOW('i', 45, struct ifreq32): /* SIOCGETSYNC */
195 case _IOW('i', 46, struct ifreq32): /* SIOCSSDSTATS */
196 case _IOW('i', 47, struct ifreq32): /* SIOCSSESTATS */
197 case _IOW('i', 48, struct ifreq32): /* SIOCSPROMISC */
198 ret = -EOPNOTSUPP;
199 goto out;
200
201 case _IOW('i', 49, struct ifreq32):
202 ret = compat_sys_ioctl(fd, SIOCADDMULTI, arg);
203 goto out;
204 case _IOW('i', 50, struct ifreq32):
205 ret = compat_sys_ioctl(fd, SIOCDELMULTI, arg);
206 goto out;
207
208 /* FDDI interface ioctls, unsupported. */
209
210 case _IOW('i', 51, struct ifreq32): /* SIOCFDRESET */
211 case _IOW('i', 52, struct ifreq32): /* SIOCFDSLEEP */
212 case _IOW('i', 53, struct ifreq32): /* SIOCSTRTFMWAR */
213 case _IOW('i', 54, struct ifreq32): /* SIOCLDNSTRTFW */
214 case _IOW('i', 55, struct ifreq32): /* SIOCGETFDSTAT */
215 case _IOW('i', 56, struct ifreq32): /* SIOCFDNMIINT */
216 case _IOW('i', 57, struct ifreq32): /* SIOCFDEXUSER */
217 case _IOW('i', 58, struct ifreq32): /* SIOCFDGNETMAP */
218 case _IOW('i', 59, struct ifreq32): /* SIOCFDGIOCTL */
219 printk("FDDI ioctl, returning EOPNOTSUPP\n");
220 ret = -EOPNOTSUPP;
221 goto out;
222
223 case _IOW('t', 125, int):
224 /* More stupid tty sunos ioctls, just
225 * say it worked.
226 */
227 ret = 0;
228 goto out;
229
230 /* Non posix grp */
231 case _IOW('t', 118, int): {
232 int oldval, newval, __user *ptr;
233
234 cmd = TIOCSPGRP;
235 ptr = (int __user *) (unsigned long) arg;
236 ret = -EFAULT;
237 if(get_user(oldval, ptr))
238 goto out;
239 ret = compat_sys_ioctl(fd, cmd, arg);
240 __get_user(newval, ptr);
241 if(newval == -1) {
242 __put_user(oldval, ptr);
243 ret = -EIO;
244 }
245 if(ret == -ENOTTY)
246 ret = -EIO;
247 goto out;
248 }
249
250 case _IOR('t', 119, int): {
251 int oldval, newval, __user *ptr;
252
253 cmd = TIOCGPGRP;
254 ptr = (int __user *) (unsigned long) arg;
255 ret = -EFAULT;
256 if(get_user(oldval, ptr))
257 goto out;
258 ret = compat_sys_ioctl(fd, cmd, arg);
259 __get_user(newval, ptr);
260 if(newval == -1) {
261 __put_user(oldval, ptr);
262 ret = -EIO;
263 }
264 if(ret == -ENOTTY)
265 ret = -EIO;
266 goto out;
267 }
268 };
269
270 ret = compat_sys_ioctl(fd, cmd, arg);
271 /* so stupid... */
272 ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
273out:
274 return ret;
275}
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
new file mode 100644
index 000000000000..5a95e98c5317
--- /dev/null
+++ b/arch/sparc64/kernel/sys32.S
@@ -0,0 +1,327 @@
1/* $Id: sys32.S,v 1.12 2000/03/24 04:17:37 davem Exp $
2 * sys32.S: I-cache tricks for 32-bit compatibility layer simple
3 * conversions.
4 *
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <linux/config.h>
10#include <asm/errno.h>
11
12/* NOTE: call as jump breaks return stack, we have to avoid that */
13
14 .text
15
16#define SIGN1(STUB,SYSCALL,REG1) \
17 .align 32; \
18 .globl STUB; \
19STUB: sethi %hi(SYSCALL), %g1; \
20 jmpl %g1 + %lo(SYSCALL), %g0; \
21 sra REG1, 0, REG1
22
23#define SIGN2(STUB,SYSCALL,REG1,REG2) \
24 .align 32; \
25 .globl STUB; \
26STUB: sethi %hi(SYSCALL), %g1; \
27 sra REG1, 0, REG1; \
28 jmpl %g1 + %lo(SYSCALL), %g0; \
29 sra REG2, 0, REG2
30
31#define SIGN3(STUB,SYSCALL,REG1,REG2,REG3) \
32 .align 32; \
33 .globl STUB; \
34STUB: sra REG1, 0, REG1; \
35 sethi %hi(SYSCALL), %g1; \
36 sra REG2, 0, REG2; \
37 jmpl %g1 + %lo(SYSCALL), %g0; \
38 sra REG3, 0, REG3
39
40#define SIGN4(STUB,SYSCALL,REG1,REG2,REG3,REG4) \
41 .align 32; \
42 .globl STUB; \
43STUB: sra REG1, 0, REG1; \
44 sethi %hi(SYSCALL), %g1; \
45 sra REG2, 0, REG2; \
46 sra REG3, 0, REG3; \
47 jmpl %g1 + %lo(SYSCALL), %g0; \
48 sra REG4, 0, REG4
49
50SIGN1(sys32_exit, sparc_exit, %o0)
51SIGN1(sys32_exit_group, sys_exit_group, %o0)
52SIGN1(sys32_wait4, compat_sys_wait4, %o2)
53SIGN1(sys32_creat, sys_creat, %o1)
54SIGN1(sys32_mknod, sys_mknod, %o1)
55SIGN1(sys32_perfctr, sys_perfctr, %o0)
56SIGN1(sys32_umount, sys_umount, %o1)
57SIGN1(sys32_signal, sys_signal, %o0)
58SIGN1(sys32_access, sys_access, %o1)
59SIGN1(sys32_msync, sys_msync, %o2)
60SIGN2(sys32_reboot, sys_reboot, %o0, %o1)
61SIGN1(sys32_setitimer, compat_sys_setitimer, %o0)
62SIGN1(sys32_getitimer, compat_sys_getitimer, %o0)
63SIGN1(sys32_sethostname, sys_sethostname, %o1)
64SIGN1(sys32_swapon, sys_swapon, %o1)
65SIGN1(sys32_sigaction, compat_sys_sigaction, %o0)
66SIGN1(sys32_rt_sigaction, compat_sys_rt_sigaction, %o0)
67SIGN1(sys32_sigprocmask, compat_sys_sigprocmask, %o0)
68SIGN1(sys32_rt_sigprocmask, compat_sys_rt_sigprocmask, %o0)
69SIGN2(sys32_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo, %o0, %o1)
70SIGN1(sys32_getrusage, compat_sys_getrusage, %o0)
71SIGN1(sys32_setxattr, sys_setxattr, %o4)
72SIGN1(sys32_lsetxattr, sys_lsetxattr, %o4)
73SIGN1(sys32_fsetxattr, sys_fsetxattr, %o4)
74SIGN1(sys32_fgetxattr, sys_fgetxattr, %o0)
75SIGN1(sys32_flistxattr, sys_flistxattr, %o0)
76SIGN1(sys32_fremovexattr, sys_fremovexattr, %o0)
77SIGN2(sys32_tkill, sys_tkill, %o0, %o1)
78SIGN1(sys32_epoll_create, sys_epoll_create, %o0)
79SIGN3(sys32_epoll_ctl, sys_epoll_ctl, %o0, %o1, %o2)
80SIGN3(sys32_epoll_wait, sys_epoll_wait, %o0, %o2, %o3)
81SIGN1(sys32_readahead, compat_sys_readahead, %o0)
82SIGN2(sys32_fadvise64, compat_sys_fadvise64, %o0, %o4)
83SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5)
84SIGN2(sys32_bdflush, sys_bdflush, %o0, %o1)
85SIGN1(sys32_mlockall, sys_mlockall, %o0)
86SIGN1(sys32_nfsservctl, compat_sys_nfsservctl, %o0)
87SIGN1(sys32_clock_settime, compat_sys_clock_settime, %o1)
88SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1)
89SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
90SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
91SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
92SIGN1(sys32_select, compat_sys_select, %o0)
93SIGN1(sys32_mkdir, sys_mkdir, %o1)
94SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
95SIGN1(sys32_sysfs, compat_sys_sysfs, %o0)
96SIGN3(sys32_ipc, compat_sys_ipc, %o1, %o2, %o3)
97SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1)
98SIGN2(sys32_sendfile64, compat_sys_sendfile64, %o0, %o1)
99SIGN1(sys32_prctl, sys_prctl, %o0)
100SIGN1(sys32_sched_rr_get_interval, compat_sys_sched_rr_get_interval, %o0)
101SIGN2(sys32_waitpid, sys_waitpid, %o0, %o2)
102SIGN1(sys32_getgroups, sys_getgroups, %o0)
103SIGN1(sys32_getpgid, sys_getpgid, %o0)
104SIGN2(sys32_getpriority, sys_getpriority, %o0, %o1)
105SIGN1(sys32_getsid, sys_getsid, %o0)
106SIGN2(sys32_kill, sys_kill, %o0, %o1)
107SIGN1(sys32_nice, sys_nice, %o0)
108SIGN1(sys32_lseek, sys_lseek, %o1)
109SIGN2(sys32_open, sparc32_open, %o1, %o2)
110SIGN1(sys32_readlink, sys_readlink, %o2)
111SIGN1(sys32_sched_get_priority_max, sys_sched_get_priority_max, %o0)
112SIGN1(sys32_sched_get_priority_min, sys_sched_get_priority_min, %o0)
113SIGN1(sys32_sched_getparam, sys_sched_getparam, %o0)
114SIGN1(sys32_sched_getscheduler, sys_sched_getscheduler, %o0)
115SIGN1(sys32_sched_setparam, sys_sched_setparam, %o0)
116SIGN2(sys32_sched_setscheduler, sys_sched_setscheduler, %o0, %o1)
117SIGN1(sys32_getdomainname, sys_getdomainname, %o1)
118SIGN1(sys32_setdomainname, sys_setdomainname, %o1)
119SIGN1(sys32_setgroups, sys_setgroups, %o0)
120SIGN2(sys32_setpgid, sys_setpgid, %o0, %o1)
121SIGN3(sys32_setpriority, sys_setpriority, %o0, %o1, %o2)
122SIGN1(sys32_ssetmask, sys_ssetmask, %o0)
123SIGN2(sys32_syslog, sys_syslog, %o0, %o2)
124SIGN1(sys32_umask, sys_umask, %o0)
125SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2)
126SIGN1(sys32_sendto, sys_sendto, %o0)
127SIGN1(sys32_recvfrom, sys_recvfrom, %o0)
128SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2)
129SIGN2(sys32_connect, sys_connect, %o0, %o2)
130SIGN2(sys32_bind, sys_bind, %o0, %o2)
131SIGN2(sys32_listen, sys_listen, %o0, %o1)
132SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
133SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
134SIGN2(sys32_shutdown, sys_shutdown, %o0, %o1)
135SIGN3(sys32_socketpair, sys_socketpair, %o0, %o1, %o2)
136SIGN1(sys32_getpeername, sys_getpeername, %o0)
137SIGN1(sys32_getsockname, sys_getsockname, %o0)
138
139 .globl sys32_mmap2
140sys32_mmap2:
141 sethi %hi(sys_mmap), %g1
142 jmpl %g1 + %lo(sys_mmap), %g0
143 sllx %o5, 12, %o5
144
145 .align 32
146 .globl sys32_socketcall
147sys32_socketcall: /* %o0=call, %o1=args */
148 cmp %o0, 1
149 bl,pn %xcc, do_einval
150 cmp %o0, 17
151 bg,pn %xcc, do_einval
152 sub %o0, 1, %o0
153 sllx %o0, 5, %o0
154 sethi %hi(__socketcall_table_begin), %g2
155 or %g2, %lo(__socketcall_table_begin), %g2
156 jmpl %g2 + %o0, %g0
157 nop
158
159 /* Each entry is exactly 32 bytes. */
160 .align 32
161__socketcall_table_begin:
162do_sys_socket: /* sys_socket(int, int, int) */
163 ldswa [%o1 + 0x0] %asi, %o0
164 sethi %hi(sys_socket), %g1
165 ldswa [%o1 + 0x8] %asi, %o2
166 jmpl %g1 + %lo(sys_socket), %g0
167 ldswa [%o1 + 0x4] %asi, %o1
168 nop
169 nop
170 nop
171do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
172 ldswa [%o1 + 0x0] %asi, %o0
173 sethi %hi(sys_bind), %g1
174 ldswa [%o1 + 0x8] %asi, %o2
175 jmpl %g1 + %lo(sys_bind), %g0
176 lduwa [%o1 + 0x4] %asi, %o1
177 nop
178 nop
179 nop
180do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
181 ldswa [%o1 + 0x0] %asi, %o0
182 sethi %hi(sys_connect), %g1
183 ldswa [%o1 + 0x8] %asi, %o2
184 jmpl %g1 + %lo(sys_connect), %g0
185 lduwa [%o1 + 0x4] %asi, %o1
186 nop
187 nop
188 nop
189do_sys_listen: /* sys_listen(int, int) */
190 ldswa [%o1 + 0x0] %asi, %o0
191 sethi %hi(sys_listen), %g1
192 jmpl %g1 + %lo(sys_listen), %g0
193 ldswa [%o1 + 0x4] %asi, %o1
194 nop
195 nop
196 nop
197 nop
198do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
199 ldswa [%o1 + 0x0] %asi, %o0
200 sethi %hi(sys_accept), %g1
201 lduwa [%o1 + 0x8] %asi, %o2
202 jmpl %g1 + %lo(sys_accept), %g0
203 lduwa [%o1 + 0x4] %asi, %o1
204 nop
205 nop
206 nop
207do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
208 ldswa [%o1 + 0x0] %asi, %o0
209 sethi %hi(sys_getsockname), %g1
210 lduwa [%o1 + 0x8] %asi, %o2
211 jmpl %g1 + %lo(sys_getsockname), %g0
212 lduwa [%o1 + 0x4] %asi, %o1
213 nop
214 nop
215 nop
216do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
217 ldswa [%o1 + 0x0] %asi, %o0
218 sethi %hi(sys_getpeername), %g1
219 lduwa [%o1 + 0x8] %asi, %o2
220 jmpl %g1 + %lo(sys_getpeername), %g0
221 lduwa [%o1 + 0x4] %asi, %o1
222 nop
223 nop
224 nop
225do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
226 ldswa [%o1 + 0x0] %asi, %o0
227 sethi %hi(sys_socketpair), %g1
228 ldswa [%o1 + 0x8] %asi, %o2
229 lduwa [%o1 + 0xc] %asi, %o3
230 jmpl %g1 + %lo(sys_socketpair), %g0
231 ldswa [%o1 + 0x4] %asi, %o1
232 nop
233 nop
234do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
235 ldswa [%o1 + 0x0] %asi, %o0
236 sethi %hi(sys_send), %g1
237 lduwa [%o1 + 0x8] %asi, %o2
238 lduwa [%o1 + 0xc] %asi, %o3
239 jmpl %g1 + %lo(sys_send), %g0
240 lduwa [%o1 + 0x4] %asi, %o1
241 nop
242 nop
243do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
244 ldswa [%o1 + 0x0] %asi, %o0
245 sethi %hi(sys_recv), %g1
246 lduwa [%o1 + 0x8] %asi, %o2
247 lduwa [%o1 + 0xc] %asi, %o3
248 jmpl %g1 + %lo(sys_recv), %g0
249 lduwa [%o1 + 0x4] %asi, %o1
250 nop
251 nop
252do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
253 ldswa [%o1 + 0x0] %asi, %o0
254 sethi %hi(sys_sendto), %g1
255 lduwa [%o1 + 0x8] %asi, %o2
256 lduwa [%o1 + 0xc] %asi, %o3
257 lduwa [%o1 + 0x10] %asi, %o4
258 ldswa [%o1 + 0x14] %asi, %o5
259 jmpl %g1 + %lo(sys_sendto), %g0
260 lduwa [%o1 + 0x4] %asi, %o1
261do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
262 ldswa [%o1 + 0x0] %asi, %o0
263 sethi %hi(sys_recvfrom), %g1
264 lduwa [%o1 + 0x8] %asi, %o2
265 lduwa [%o1 + 0xc] %asi, %o3
266 lduwa [%o1 + 0x10] %asi, %o4
267 lduwa [%o1 + 0x14] %asi, %o5
268 jmpl %g1 + %lo(sys_recvfrom), %g0
269 lduwa [%o1 + 0x4] %asi, %o1
270do_sys_shutdown: /* sys_shutdown(int, int) */
271 ldswa [%o1 + 0x0] %asi, %o0
272 sethi %hi(sys_shutdown), %g1
273 jmpl %g1 + %lo(sys_shutdown), %g0
274 ldswa [%o1 + 0x4] %asi, %o1
275 nop
276 nop
277 nop
278 nop
279do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */
280 ldswa [%o1 + 0x0] %asi, %o0
281 sethi %hi(compat_sys_setsockopt), %g1
282 ldswa [%o1 + 0x8] %asi, %o2
283 lduwa [%o1 + 0xc] %asi, %o3
284 ldswa [%o1 + 0x10] %asi, %o4
285 jmpl %g1 + %lo(compat_sys_setsockopt), %g0
286 ldswa [%o1 + 0x4] %asi, %o1
287 nop
288do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */
289 ldswa [%o1 + 0x0] %asi, %o0
290 sethi %hi(compat_sys_getsockopt), %g1
291 ldswa [%o1 + 0x8] %asi, %o2
292 lduwa [%o1 + 0xc] %asi, %o3
293 lduwa [%o1 + 0x10] %asi, %o4
294 jmpl %g1 + %lo(compat_sys_getsockopt), %g0
295 ldswa [%o1 + 0x4] %asi, %o1
296 nop
297do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
298 ldswa [%o1 + 0x0] %asi, %o0
299 sethi %hi(compat_sys_sendmsg), %g1
300 lduwa [%o1 + 0x8] %asi, %o2
301 jmpl %g1 + %lo(compat_sys_sendmsg), %g0
302 lduwa [%o1 + 0x4] %asi, %o1
303 nop
304 nop
305 nop
306do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
307 ldswa [%o1 + 0x0] %asi, %o0
308 sethi %hi(compat_sys_recvmsg), %g1
309 lduwa [%o1 + 0x8] %asi, %o2
310 jmpl %g1 + %lo(compat_sys_recvmsg), %g0
311 lduwa [%o1 + 0x4] %asi, %o1
312 nop
313 nop
314 nop
315__socketcall_table_end:
316
317do_einval:
318 retl
319 mov -EINVAL, %o0
320do_efault:
321 retl
322 mov -EFAULT, %o0
323
324 .section __ex_table
325 .align 4
326 .word __socketcall_table_begin, 0, __socketcall_table_end, do_efault
327 .previous
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
new file mode 100644
index 000000000000..0077f02f4b37
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -0,0 +1,723 @@
1/* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
2 * linux/arch/sparc64/kernel/sys_sparc.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
6 * platform.
7 */
8
9#include <linux/config.h>
10#include <linux/errno.h>
11#include <linux/types.h>
12#include <linux/sched.h>
13#include <linux/fs.h>
14#include <linux/file.h>
15#include <linux/mm.h>
16#include <linux/sem.h>
17#include <linux/msg.h>
18#include <linux/shm.h>
19#include <linux/stat.h>
20#include <linux/mman.h>
21#include <linux/utsname.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/slab.h>
25#include <linux/syscalls.h>
26#include <linux/ipc.h>
27#include <linux/personality.h>
28
29#include <asm/uaccess.h>
30#include <asm/ipc.h>
31#include <asm/utrap.h>
32#include <asm/perfctr.h>
33
34/* #define DEBUG_UNIMP_SYSCALL */
35
36/* XXX Make this per-binary type, this way we can detect the type of
37 * XXX a binary. Every Sparc executable calls this very early on.
38 */
39asmlinkage unsigned long sys_getpagesize(void)
40{
41 return PAGE_SIZE;
42}
43
44#define COLOUR_ALIGN(addr,pgoff) \
45 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
46 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
47
48unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
49{
50 struct mm_struct *mm = current->mm;
51 struct vm_area_struct * vma;
52 unsigned long task_size = TASK_SIZE;
53 unsigned long start_addr;
54 int do_color_align;
55
56 if (flags & MAP_FIXED) {
57 /* We do not accept a shared mapping if it would violate
58 * cache aliasing constraints.
59 */
60 if ((flags & MAP_SHARED) &&
61 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
62 return -EINVAL;
63 return addr;
64 }
65
66 if (test_thread_flag(TIF_32BIT))
67 task_size = 0xf0000000UL;
68 if (len > task_size || len > -PAGE_OFFSET)
69 return -ENOMEM;
70
71 do_color_align = 0;
72 if (filp || (flags & MAP_SHARED))
73 do_color_align = 1;
74
75 if (addr) {
76 if (do_color_align)
77 addr = COLOUR_ALIGN(addr, pgoff);
78 else
79 addr = PAGE_ALIGN(addr);
80
81 vma = find_vma(mm, addr);
82 if (task_size - len >= addr &&
83 (!vma || addr + len <= vma->vm_start))
84 return addr;
85 }
86
87 start_addr = addr = mm->free_area_cache;
88
89 task_size -= len;
90
91full_search:
92 if (do_color_align)
93 addr = COLOUR_ALIGN(addr, pgoff);
94 else
95 addr = PAGE_ALIGN(addr);
96
97 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
98 /* At this point: (!vma || addr < vma->vm_end). */
99 if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) {
100 addr = PAGE_OFFSET;
101 vma = find_vma(mm, PAGE_OFFSET);
102 }
103 if (task_size < addr) {
104 if (start_addr != TASK_UNMAPPED_BASE) {
105 start_addr = addr = TASK_UNMAPPED_BASE;
106 goto full_search;
107 }
108 return -ENOMEM;
109 }
110 if (!vma || addr + len <= vma->vm_start) {
111 /*
112 * Remember the place where we stopped the search:
113 */
114 mm->free_area_cache = addr + len;
115 return addr;
116 }
117 addr = vma->vm_end;
118 if (do_color_align)
119 addr = COLOUR_ALIGN(addr, pgoff);
120 }
121}
122
123/* Try to align mapping such that we align it as much as possible. */
124unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
125{
126 unsigned long align_goal, addr = -ENOMEM;
127
128 if (flags & MAP_FIXED) {
129 /* Ok, don't mess with it. */
130 return get_unmapped_area(NULL, addr, len, pgoff, flags);
131 }
132 flags &= ~MAP_SHARED;
133
134 align_goal = PAGE_SIZE;
135 if (len >= (4UL * 1024 * 1024))
136 align_goal = (4UL * 1024 * 1024);
137 else if (len >= (512UL * 1024))
138 align_goal = (512UL * 1024);
139 else if (len >= (64UL * 1024))
140 align_goal = (64UL * 1024);
141
142 do {
143 addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
144 if (!(addr & ~PAGE_MASK)) {
145 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
146 break;
147 }
148
149 if (align_goal == (4UL * 1024 * 1024))
150 align_goal = (512UL * 1024);
151 else if (align_goal == (512UL * 1024))
152 align_goal = (64UL * 1024);
153 else
154 align_goal = PAGE_SIZE;
155 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
156
157 /* Mapping is smaller than 64K or larger areas could not
158 * be obtained.
159 */
160 if (addr & ~PAGE_MASK)
161 addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
162
163 return addr;
164}
165
166asmlinkage unsigned long sparc_brk(unsigned long brk)
167{
168 /* People could try to be nasty and use ta 0x6d in 32bit programs */
169 if (test_thread_flag(TIF_32BIT) &&
170 brk >= 0xf0000000UL)
171 return current->mm->brk;
172
173 if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET))
174 return current->mm->brk;
175 return sys_brk(brk);
176}
177
178/*
179 * sys_pipe() is the normal C calling standard for creating
180 * a pipe. It's not the way unix traditionally does this, though.
181 */
182asmlinkage long sparc_pipe(struct pt_regs *regs)
183{
184 int fd[2];
185 int error;
186
187 error = do_pipe(fd);
188 if (error)
189 goto out;
190 regs->u_regs[UREG_I1] = fd[1];
191 error = fd[0];
192out:
193 return error;
194}
195
196/*
197 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
198 *
199 * This is really horribly ugly.
200 */
201
202asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
203 unsigned long third, void __user *ptr, long fifth)
204{
205 int err;
206
207 /* No need for backward compatibility. We can start fresh... */
208 if (call <= SEMCTL) {
209 switch (call) {
210 case SEMOP:
211 err = sys_semtimedop(first, ptr,
212 (unsigned)second, NULL);
213 goto out;
214 case SEMTIMEDOP:
215 err = sys_semtimedop(first, ptr, (unsigned)second,
216 (const struct timespec __user *) fifth);
217 goto out;
218 case SEMGET:
219 err = sys_semget(first, (int)second, (int)third);
220 goto out;
221 case SEMCTL: {
222 union semun fourth;
223 err = -EINVAL;
224 if (!ptr)
225 goto out;
226 err = -EFAULT;
227 if (get_user(fourth.__pad,
228 (void __user * __user *) ptr))
229 goto out;
230 err = sys_semctl(first, (int)second | IPC_64,
231 (int)third, fourth);
232 goto out;
233 }
234 default:
235 err = -ENOSYS;
236 goto out;
237 };
238 }
239 if (call <= MSGCTL) {
240 switch (call) {
241 case MSGSND:
242 err = sys_msgsnd(first, ptr, (size_t)second,
243 (int)third);
244 goto out;
245 case MSGRCV:
246 err = sys_msgrcv(first, ptr, (size_t)second, fifth,
247 (int)third);
248 goto out;
249 case MSGGET:
250 err = sys_msgget((key_t)first, (int)second);
251 goto out;
252 case MSGCTL:
253 err = sys_msgctl(first, (int)second | IPC_64, ptr);
254 goto out;
255 default:
256 err = -ENOSYS;
257 goto out;
258 };
259 }
260 if (call <= SHMCTL) {
261 switch (call) {
262 case SHMAT: {
263 ulong raddr;
264 err = do_shmat(first, ptr, (int)second, &raddr);
265 if (!err) {
266 if (put_user(raddr,
267 (ulong __user *) third))
268 err = -EFAULT;
269 }
270 goto out;
271 }
272 case SHMDT:
273 err = sys_shmdt(ptr);
274 goto out;
275 case SHMGET:
276 err = sys_shmget(first, (size_t)second, (int)third);
277 goto out;
278 case SHMCTL:
279 err = sys_shmctl(first, (int)second | IPC_64, ptr);
280 goto out;
281 default:
282 err = -ENOSYS;
283 goto out;
284 };
285 } else {
286 err = -ENOSYS;
287 }
288out:
289 return err;
290}
291
292asmlinkage long sparc64_newuname(struct new_utsname __user *name)
293{
294 int ret = sys_newuname(name);
295
296 if (current->personality == PER_LINUX32 && !ret) {
297 ret = (copy_to_user(name->machine, "sparc\0\0", 8)
298 ? -EFAULT : 0);
299 }
300 return ret;
301}
302
303asmlinkage long sparc64_personality(unsigned long personality)
304{
305 int ret;
306
307 if (current->personality == PER_LINUX32 &&
308 personality == PER_LINUX)
309 personality = PER_LINUX32;
310 ret = sys_personality(personality);
311 if (ret == PER_LINUX32)
312 ret = PER_LINUX;
313
314 return ret;
315}
316
317/* Linux version of mmap */
318asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
319 unsigned long prot, unsigned long flags, unsigned long fd,
320 unsigned long off)
321{
322 struct file * file = NULL;
323 unsigned long retval = -EBADF;
324
325 if (!(flags & MAP_ANONYMOUS)) {
326 file = fget(fd);
327 if (!file)
328 goto out;
329 }
330 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
331 len = PAGE_ALIGN(len);
332 retval = -EINVAL;
333
334 if (test_thread_flag(TIF_32BIT)) {
335 if (len > 0xf0000000UL ||
336 ((flags & MAP_FIXED) && addr > 0xf0000000UL - len))
337 goto out_putf;
338 } else {
339 if (len > -PAGE_OFFSET ||
340 ((flags & MAP_FIXED) &&
341 addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
342 goto out_putf;
343 }
344
345 down_write(&current->mm->mmap_sem);
346 retval = do_mmap(file, addr, len, prot, flags, off);
347 up_write(&current->mm->mmap_sem);
348
349out_putf:
350 if (file)
351 fput(file);
352out:
353 return retval;
354}
355
356asmlinkage long sys64_munmap(unsigned long addr, size_t len)
357{
358 long ret;
359
360 if (len > -PAGE_OFFSET ||
361 (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
362 return -EINVAL;
363 down_write(&current->mm->mmap_sem);
364 ret = do_munmap(current->mm, addr, len);
365 up_write(&current->mm->mmap_sem);
366 return ret;
367}
368
369extern unsigned long do_mremap(unsigned long addr,
370 unsigned long old_len, unsigned long new_len,
371 unsigned long flags, unsigned long new_addr);
372
373asmlinkage unsigned long sys64_mremap(unsigned long addr,
374 unsigned long old_len, unsigned long new_len,
375 unsigned long flags, unsigned long new_addr)
376{
377 struct vm_area_struct *vma;
378 unsigned long ret = -EINVAL;
379 if (test_thread_flag(TIF_32BIT))
380 goto out;
381 if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET)
382 goto out;
383 if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET)
384 goto out;
385 down_write(&current->mm->mmap_sem);
386 if (flags & MREMAP_FIXED) {
387 if (new_addr < PAGE_OFFSET &&
388 new_addr + new_len > -PAGE_OFFSET)
389 goto out_sem;
390 } else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) {
391 unsigned long map_flags = 0;
392 struct file *file = NULL;
393
394 ret = -ENOMEM;
395 if (!(flags & MREMAP_MAYMOVE))
396 goto out_sem;
397
398 vma = find_vma(current->mm, addr);
399 if (vma) {
400 if (vma->vm_flags & VM_SHARED)
401 map_flags |= MAP_SHARED;
402 file = vma->vm_file;
403 }
404
405 /* MREMAP_FIXED checked above. */
406 new_addr = get_unmapped_area(file, addr, new_len,
407 vma ? vma->vm_pgoff : 0,
408 map_flags);
409 ret = new_addr;
410 if (new_addr & ~PAGE_MASK)
411 goto out_sem;
412 flags |= MREMAP_FIXED;
413 }
414 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
415out_sem:
416 up_write(&current->mm->mmap_sem);
417out:
418 return ret;
419}
420
421/* we come to here via sys_nis_syscall so it can setup the regs argument */
422asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
423{
424 static int count;
425
426 /* Don't make the system unusable, if someone goes stuck */
427 if (count++ > 5)
428 return -ENOSYS;
429
430 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
431#ifdef DEBUG_UNIMP_SYSCALL
432 show_regs (regs);
433#endif
434
435 return -ENOSYS;
436}
437
438/* #define DEBUG_SPARC_BREAKPOINT */
439
440asmlinkage void sparc_breakpoint(struct pt_regs *regs)
441{
442 siginfo_t info;
443
444 if (test_thread_flag(TIF_32BIT)) {
445 regs->tpc &= 0xffffffff;
446 regs->tnpc &= 0xffffffff;
447 }
448#ifdef DEBUG_SPARC_BREAKPOINT
449 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
450#endif
451 info.si_signo = SIGTRAP;
452 info.si_errno = 0;
453 info.si_code = TRAP_BRKPT;
454 info.si_addr = (void __user *)regs->tpc;
455 info.si_trapno = 0;
456 force_sig_info(SIGTRAP, &info, current);
457#ifdef DEBUG_SPARC_BREAKPOINT
458 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
459#endif
460}
461
462extern void check_pending(int signum);
463
464asmlinkage long sys_getdomainname(char __user *name, int len)
465{
466 int nlen;
467 int err = -EFAULT;
468
469 down_read(&uts_sem);
470
471 nlen = strlen(system_utsname.domainname) + 1;
472
473 if (nlen < len)
474 len = nlen;
475 if (len > __NEW_UTS_LEN)
476 goto done;
477 if (copy_to_user(name, system_utsname.domainname, len))
478 goto done;
479 err = 0;
480done:
481 up_read(&uts_sem);
482 return err;
483}
484
485asmlinkage long solaris_syscall(struct pt_regs *regs)
486{
487 static int count;
488
489 regs->tpc = regs->tnpc;
490 regs->tnpc += 4;
491 if (test_thread_flag(TIF_32BIT)) {
492 regs->tpc &= 0xffffffff;
493 regs->tnpc &= 0xffffffff;
494 }
495 if (++count <= 5) {
496 printk ("For Solaris binary emulation you need solaris module loaded\n");
497 show_regs (regs);
498 }
499 send_sig(SIGSEGV, current, 1);
500
501 return -ENOSYS;
502}
503
504#ifndef CONFIG_SUNOS_EMUL
505asmlinkage long sunos_syscall(struct pt_regs *regs)
506{
507 static int count;
508
509 regs->tpc = regs->tnpc;
510 regs->tnpc += 4;
511 if (test_thread_flag(TIF_32BIT)) {
512 regs->tpc &= 0xffffffff;
513 regs->tnpc &= 0xffffffff;
514 }
515 if (++count <= 20)
516 printk ("SunOS binary emulation not compiled in\n");
517 force_sig(SIGSEGV, current);
518
519 return -ENOSYS;
520}
521#endif
522
523asmlinkage long sys_utrap_install(utrap_entry_t type,
524 utrap_handler_t new_p,
525 utrap_handler_t new_d,
526 utrap_handler_t __user *old_p,
527 utrap_handler_t __user *old_d)
528{
529 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
530 return -EINVAL;
531 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
532 if (old_p) {
533 if (!current_thread_info()->utraps) {
534 if (put_user(NULL, old_p))
535 return -EFAULT;
536 } else {
537 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
538 return -EFAULT;
539 }
540 }
541 if (old_d) {
542 if (put_user(NULL, old_d))
543 return -EFAULT;
544 }
545 return 0;
546 }
547 if (!current_thread_info()->utraps) {
548 current_thread_info()->utraps =
549 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
550 if (!current_thread_info()->utraps)
551 return -ENOMEM;
552 current_thread_info()->utraps[0] = 1;
553 memset(current_thread_info()->utraps+1, 0,
554 UT_TRAP_INSTRUCTION_31*sizeof(long));
555 } else {
556 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
557 current_thread_info()->utraps[0] > 1) {
558 long *p = current_thread_info()->utraps;
559
560 current_thread_info()->utraps =
561 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
562 GFP_KERNEL);
563 if (!current_thread_info()->utraps) {
564 current_thread_info()->utraps = p;
565 return -ENOMEM;
566 }
567 p[0]--;
568 current_thread_info()->utraps[0] = 1;
569 memcpy(current_thread_info()->utraps+1, p+1,
570 UT_TRAP_INSTRUCTION_31*sizeof(long));
571 }
572 }
573 if (old_p) {
574 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
575 return -EFAULT;
576 }
577 if (old_d) {
578 if (put_user(NULL, old_d))
579 return -EFAULT;
580 }
581 current_thread_info()->utraps[type] = (long)new_p;
582
583 return 0;
584}
585
586long sparc_memory_ordering(unsigned long model, struct pt_regs *regs)
587{
588 if (model >= 3)
589 return -EINVAL;
590 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
591 return 0;
592}
593
594asmlinkage long sys_rt_sigaction(int sig,
595 const struct sigaction __user *act,
596 struct sigaction __user *oact,
597 void __user *restorer,
598 size_t sigsetsize)
599{
600 struct k_sigaction new_ka, old_ka;
601 int ret;
602
603 /* XXX: Don't preclude handling different sized sigset_t's. */
604 if (sigsetsize != sizeof(sigset_t))
605 return -EINVAL;
606
607 if (act) {
608 new_ka.ka_restorer = restorer;
609 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
610 return -EFAULT;
611 }
612
613 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
614
615 if (!ret && oact) {
616 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
617 return -EFAULT;
618 }
619
620 return ret;
621}
622
623/* Invoked by rtrap code to update performance counters in
624 * user space.
625 */
626asmlinkage void update_perfctrs(void)
627{
628 unsigned long pic, tmp;
629
630 read_pic(pic);
631 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
632 __put_user(tmp, current_thread_info()->user_cntd0);
633 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
634 __put_user(tmp, current_thread_info()->user_cntd1);
635 reset_pic();
636}
637
638asmlinkage long sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2)
639{
640 int err = 0;
641
642 switch(opcode) {
643 case PERFCTR_ON:
644 current_thread_info()->pcr_reg = arg2;
645 current_thread_info()->user_cntd0 = (u64 __user *) arg0;
646 current_thread_info()->user_cntd1 = (u64 __user *) arg1;
647 current_thread_info()->kernel_cntd0 =
648 current_thread_info()->kernel_cntd1 = 0;
649 write_pcr(arg2);
650 reset_pic();
651 set_thread_flag(TIF_PERFCTR);
652 break;
653
654 case PERFCTR_OFF:
655 err = -EINVAL;
656 if (test_thread_flag(TIF_PERFCTR)) {
657 current_thread_info()->user_cntd0 =
658 current_thread_info()->user_cntd1 = NULL;
659 current_thread_info()->pcr_reg = 0;
660 write_pcr(0);
661 clear_thread_flag(TIF_PERFCTR);
662 err = 0;
663 }
664 break;
665
666 case PERFCTR_READ: {
667 unsigned long pic, tmp;
668
669 if (!test_thread_flag(TIF_PERFCTR)) {
670 err = -EINVAL;
671 break;
672 }
673 read_pic(pic);
674 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
675 err |= __put_user(tmp, current_thread_info()->user_cntd0);
676 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
677 err |= __put_user(tmp, current_thread_info()->user_cntd1);
678 reset_pic();
679 break;
680 }
681
682 case PERFCTR_CLRPIC:
683 if (!test_thread_flag(TIF_PERFCTR)) {
684 err = -EINVAL;
685 break;
686 }
687 current_thread_info()->kernel_cntd0 =
688 current_thread_info()->kernel_cntd1 = 0;
689 reset_pic();
690 break;
691
692 case PERFCTR_SETPCR: {
693 u64 __user *user_pcr = (u64 __user *)arg0;
694
695 if (!test_thread_flag(TIF_PERFCTR)) {
696 err = -EINVAL;
697 break;
698 }
699 err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
700 write_pcr(current_thread_info()->pcr_reg);
701 current_thread_info()->kernel_cntd0 =
702 current_thread_info()->kernel_cntd1 = 0;
703 reset_pic();
704 break;
705 }
706
707 case PERFCTR_GETPCR: {
708 u64 __user *user_pcr = (u64 __user *)arg0;
709
710 if (!test_thread_flag(TIF_PERFCTR)) {
711 err = -EINVAL;
712 break;
713 }
714 err |= __put_user(current_thread_info()->pcr_reg, user_pcr);
715 break;
716 }
717
718 default:
719 err = -EINVAL;
720 break;
721 };
722 return err;
723}
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
new file mode 100644
index 000000000000..567c91c77b20
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -0,0 +1,1118 @@
1/* $Id: sys_sparc32.c,v 1.184 2002/02/09 19:49:31 davem Exp $
2 * sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 *
7 * These routines maintain argument size conversion between 32bit and 64bit
8 * environment.
9 */
10
11#include <linux/config.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/fs.h>
15#include <linux/mm.h>
16#include <linux/file.h>
17#include <linux/signal.h>
18#include <linux/resource.h>
19#include <linux/times.h>
20#include <linux/utsname.h>
21#include <linux/timex.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/sem.h>
25#include <linux/msg.h>
26#include <linux/shm.h>
27#include <linux/slab.h>
28#include <linux/uio.h>
29#include <linux/nfs_fs.h>
30#include <linux/quota.h>
31#include <linux/module.h>
32#include <linux/sunrpc/svc.h>
33#include <linux/nfsd/nfsd.h>
34#include <linux/nfsd/cache.h>
35#include <linux/nfsd/xdr.h>
36#include <linux/nfsd/syscall.h>
37#include <linux/poll.h>
38#include <linux/personality.h>
39#include <linux/stat.h>
40#include <linux/filter.h>
41#include <linux/highmem.h>
42#include <linux/highuid.h>
43#include <linux/mman.h>
44#include <linux/ipv6.h>
45#include <linux/in.h>
46#include <linux/icmpv6.h>
47#include <linux/syscalls.h>
48#include <linux/sysctl.h>
49#include <linux/binfmts.h>
50#include <linux/dnotify.h>
51#include <linux/security.h>
52#include <linux/compat.h>
53#include <linux/vfs.h>
54#include <linux/netfilter_ipv4/ip_tables.h>
55#include <linux/ptrace.h>
56#include <linux/highuid.h>
57
58#include <asm/types.h>
59#include <asm/ipc.h>
60#include <asm/uaccess.h>
61#include <asm/fpumacro.h>
62#include <asm/semaphore.h>
63#include <asm/mmu_context.h>
64
65asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group)
66{
67 return sys_chown(filename, low2highuid(user), low2highgid(group));
68}
69
70asmlinkage long sys32_lchown16(const char __user * filename, u16 user, u16 group)
71{
72 return sys_lchown(filename, low2highuid(user), low2highgid(group));
73}
74
75asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group)
76{
77 return sys_fchown(fd, low2highuid(user), low2highgid(group));
78}
79
80asmlinkage long sys32_setregid16(u16 rgid, u16 egid)
81{
82 return sys_setregid(low2highgid(rgid), low2highgid(egid));
83}
84
85asmlinkage long sys32_setgid16(u16 gid)
86{
87 return sys_setgid((gid_t)gid);
88}
89
90asmlinkage long sys32_setreuid16(u16 ruid, u16 euid)
91{
92 return sys_setreuid(low2highuid(ruid), low2highuid(euid));
93}
94
95asmlinkage long sys32_setuid16(u16 uid)
96{
97 return sys_setuid((uid_t)uid);
98}
99
100asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
101{
102 return sys_setresuid(low2highuid(ruid), low2highuid(euid),
103 low2highuid(suid));
104}
105
106asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid)
107{
108 int retval;
109
110 if (!(retval = put_user(high2lowuid(current->uid), ruid)) &&
111 !(retval = put_user(high2lowuid(current->euid), euid)))
112 retval = put_user(high2lowuid(current->suid), suid);
113
114 return retval;
115}
116
117asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
118{
119 return sys_setresgid(low2highgid(rgid), low2highgid(egid),
120 low2highgid(sgid));
121}
122
123asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
124{
125 int retval;
126
127 if (!(retval = put_user(high2lowgid(current->gid), rgid)) &&
128 !(retval = put_user(high2lowgid(current->egid), egid)))
129 retval = put_user(high2lowgid(current->sgid), sgid);
130
131 return retval;
132}
133
134asmlinkage long sys32_setfsuid16(u16 uid)
135{
136 return sys_setfsuid((uid_t)uid);
137}
138
139asmlinkage long sys32_setfsgid16(u16 gid)
140{
141 return sys_setfsgid((gid_t)gid);
142}
143
144static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
145{
146 int i;
147 u16 group;
148
149 for (i = 0; i < group_info->ngroups; i++) {
150 group = (u16)GROUP_AT(group_info, i);
151 if (put_user(group, grouplist+i))
152 return -EFAULT;
153 }
154
155 return 0;
156}
157
158static int groups16_from_user(struct group_info *group_info, u16 __user *grouplist)
159{
160 int i;
161 u16 group;
162
163 for (i = 0; i < group_info->ngroups; i++) {
164 if (get_user(group, grouplist+i))
165 return -EFAULT;
166 GROUP_AT(group_info, i) = (gid_t)group;
167 }
168
169 return 0;
170}
171
172asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist)
173{
174 int i;
175
176 if (gidsetsize < 0)
177 return -EINVAL;
178
179 get_group_info(current->group_info);
180 i = current->group_info->ngroups;
181 if (gidsetsize) {
182 if (i > gidsetsize) {
183 i = -EINVAL;
184 goto out;
185 }
186 if (groups16_to_user(grouplist, current->group_info)) {
187 i = -EFAULT;
188 goto out;
189 }
190 }
191out:
192 put_group_info(current->group_info);
193 return i;
194}
195
196asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
197{
198 struct group_info *group_info;
199 int retval;
200
201 if (!capable(CAP_SETGID))
202 return -EPERM;
203 if ((unsigned)gidsetsize > NGROUPS_MAX)
204 return -EINVAL;
205
206 group_info = groups_alloc(gidsetsize);
207 if (!group_info)
208 return -ENOMEM;
209 retval = groups16_from_user(group_info, grouplist);
210 if (retval) {
211 put_group_info(group_info);
212 return retval;
213 }
214
215 retval = set_current_groups(group_info);
216 put_group_info(group_info);
217
218 return retval;
219}
220
221asmlinkage long sys32_getuid16(void)
222{
223 return high2lowuid(current->uid);
224}
225
226asmlinkage long sys32_geteuid16(void)
227{
228 return high2lowuid(current->euid);
229}
230
231asmlinkage long sys32_getgid16(void)
232{
233 return high2lowgid(current->gid);
234}
235
236asmlinkage long sys32_getegid16(void)
237{
238 return high2lowgid(current->egid);
239}
240
241/* 32-bit timeval and related flotsam. */
242
243static long get_tv32(struct timeval *o, struct compat_timeval __user *i)
244{
245 return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
246 (__get_user(o->tv_sec, &i->tv_sec) |
247 __get_user(o->tv_usec, &i->tv_usec)));
248}
249
250static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i)
251{
252 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
253 (__put_user(i->tv_sec, &o->tv_sec) |
254 __put_user(i->tv_usec, &o->tv_usec)));
255}
256
257#ifdef CONFIG_SYSVIPC
258asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, u32 fifth)
259{
260 int version;
261
262 version = call >> 16; /* hack for backward compatibility */
263 call &= 0xffff;
264
265 switch (call) {
266 case SEMTIMEDOP:
267 if (fifth)
268 /* sign extend semid */
269 return compat_sys_semtimedop((int)first,
270 compat_ptr(ptr), second,
271 compat_ptr(fifth));
272 /* else fall through for normal semop() */
273 case SEMOP:
274 /* struct sembuf is the same on 32 and 64bit :)) */
275 /* sign extend semid */
276 return sys_semtimedop((int)first, compat_ptr(ptr), second,
277 NULL);
278 case SEMGET:
279 /* sign extend key, nsems */
280 return sys_semget((int)first, (int)second, third);
281 case SEMCTL:
282 /* sign extend semid, semnum */
283 return compat_sys_semctl((int)first, (int)second, third,
284 compat_ptr(ptr));
285
286 case MSGSND:
287 /* sign extend msqid */
288 return compat_sys_msgsnd((int)first, (int)second, third,
289 compat_ptr(ptr));
290 case MSGRCV:
291 /* sign extend msqid, msgtyp */
292 return compat_sys_msgrcv((int)first, second, (int)fifth,
293 third, version, compat_ptr(ptr));
294 case MSGGET:
295 /* sign extend key */
296 return sys_msgget((int)first, second);
297 case MSGCTL:
298 /* sign extend msqid */
299 return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
300
301 case SHMAT:
302 /* sign extend shmid */
303 return compat_sys_shmat((int)first, second, third, version,
304 compat_ptr(ptr));
305 case SHMDT:
306 return sys_shmdt(compat_ptr(ptr));
307 case SHMGET:
308 /* sign extend key_t */
309 return sys_shmget((int)first, second, third);
310 case SHMCTL:
311 /* sign extend shmid */
312 return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
313
314 default:
315 return -ENOSYS;
316 };
317
318 return -ENOSYS;
319}
320#endif
321
322asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
323{
324 if ((int)high < 0)
325 return -EINVAL;
326 else
327 return sys_truncate(path, (high << 32) | low);
328}
329
330asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
331{
332 if ((int)high < 0)
333 return -EINVAL;
334 else
335 return sys_ftruncate(fd, (high << 32) | low);
336}
337
338int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
339{
340 int err;
341
342 if (stat->size > MAX_NON_LFS || !old_valid_dev(stat->dev) ||
343 !old_valid_dev(stat->rdev))
344 return -EOVERFLOW;
345
346 err = put_user(old_encode_dev(stat->dev), &statbuf->st_dev);
347 err |= put_user(stat->ino, &statbuf->st_ino);
348 err |= put_user(stat->mode, &statbuf->st_mode);
349 err |= put_user(stat->nlink, &statbuf->st_nlink);
350 err |= put_user(high2lowuid(stat->uid), &statbuf->st_uid);
351 err |= put_user(high2lowgid(stat->gid), &statbuf->st_gid);
352 err |= put_user(old_encode_dev(stat->rdev), &statbuf->st_rdev);
353 err |= put_user(stat->size, &statbuf->st_size);
354 err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
355 err |= put_user(0, &statbuf->__unused1);
356 err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
357 err |= put_user(0, &statbuf->__unused2);
358 err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
359 err |= put_user(0, &statbuf->__unused3);
360 err |= put_user(stat->blksize, &statbuf->st_blksize);
361 err |= put_user(stat->blocks, &statbuf->st_blocks);
362 err |= put_user(0, &statbuf->__unused4[0]);
363 err |= put_user(0, &statbuf->__unused4[1]);
364
365 return err;
366}
367
368asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2)
369{
370 return sys_sysfs(option, arg1, arg2);
371}
372
373struct sysinfo32 {
374 s32 uptime;
375 u32 loads[3];
376 u32 totalram;
377 u32 freeram;
378 u32 sharedram;
379 u32 bufferram;
380 u32 totalswap;
381 u32 freeswap;
382 unsigned short procs;
383 unsigned short pad;
384 u32 totalhigh;
385 u32 freehigh;
386 u32 mem_unit;
387 char _f[20-2*sizeof(int)-sizeof(int)];
388};
389
390asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
391{
392 struct sysinfo s;
393 int ret, err;
394 int bitcount = 0;
395 mm_segment_t old_fs = get_fs ();
396
397 set_fs(KERNEL_DS);
398 ret = sys_sysinfo((struct sysinfo __user *) &s);
399 set_fs(old_fs);
400 /* Check to see if any memory value is too large for 32-bit and
401 * scale down if needed.
402 */
403 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
404 while (s.mem_unit < PAGE_SIZE) {
405 s.mem_unit <<= 1;
406 bitcount++;
407 }
408 s.totalram >>= bitcount;
409 s.freeram >>= bitcount;
410 s.sharedram >>= bitcount;
411 s.bufferram >>= bitcount;
412 s.totalswap >>= bitcount;
413 s.freeswap >>= bitcount;
414 s.totalhigh >>= bitcount;
415 s.freehigh >>= bitcount;
416 }
417
418 err = put_user (s.uptime, &info->uptime);
419 err |= __put_user (s.loads[0], &info->loads[0]);
420 err |= __put_user (s.loads[1], &info->loads[1]);
421 err |= __put_user (s.loads[2], &info->loads[2]);
422 err |= __put_user (s.totalram, &info->totalram);
423 err |= __put_user (s.freeram, &info->freeram);
424 err |= __put_user (s.sharedram, &info->sharedram);
425 err |= __put_user (s.bufferram, &info->bufferram);
426 err |= __put_user (s.totalswap, &info->totalswap);
427 err |= __put_user (s.freeswap, &info->freeswap);
428 err |= __put_user (s.procs, &info->procs);
429 err |= __put_user (s.totalhigh, &info->totalhigh);
430 err |= __put_user (s.freehigh, &info->freehigh);
431 err |= __put_user (s.mem_unit, &info->mem_unit);
432 if (err)
433 return -EFAULT;
434 return ret;
435}
436
437asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval)
438{
439 struct timespec t;
440 int ret;
441 mm_segment_t old_fs = get_fs ();
442
443 set_fs (KERNEL_DS);
444 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
445 set_fs (old_fs);
446 if (put_compat_timespec(&t, interval))
447 return -EFAULT;
448 return ret;
449}
450
451asmlinkage long compat_sys_rt_sigprocmask(int how,
452 compat_sigset_t __user *set,
453 compat_sigset_t __user *oset,
454 compat_size_t sigsetsize)
455{
456 sigset_t s;
457 compat_sigset_t s32;
458 int ret;
459 mm_segment_t old_fs = get_fs();
460
461 if (set) {
462 if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
463 return -EFAULT;
464 switch (_NSIG_WORDS) {
465 case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
466 case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
467 case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
468 case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
469 }
470 }
471 set_fs (KERNEL_DS);
472 ret = sys_rt_sigprocmask(how,
473 set ? (sigset_t __user *) &s : NULL,
474 oset ? (sigset_t __user *) &s : NULL,
475 sigsetsize);
476 set_fs (old_fs);
477 if (ret) return ret;
478 if (oset) {
479 switch (_NSIG_WORDS) {
480 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
481 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
482 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
483 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
484 }
485 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
486 return -EFAULT;
487 }
488 return 0;
489}
490
491asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
492 compat_size_t sigsetsize)
493{
494 sigset_t s;
495 compat_sigset_t s32;
496 int ret;
497 mm_segment_t old_fs = get_fs();
498
499 set_fs (KERNEL_DS);
500 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
501 set_fs (old_fs);
502 if (!ret) {
503 switch (_NSIG_WORDS) {
504 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
505 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
506 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
507 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
508 }
509 if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
510 return -EFAULT;
511 }
512 return ret;
513}
514
515asmlinkage long compat_sys_rt_sigqueueinfo(int pid, int sig,
516 struct compat_siginfo __user *uinfo)
517{
518 siginfo_t info;
519 int ret;
520 mm_segment_t old_fs = get_fs();
521
522 if (copy_siginfo_from_user32(&info, uinfo))
523 return -EFAULT;
524
525 set_fs (KERNEL_DS);
526 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info);
527 set_fs (old_fs);
528 return ret;
529}
530
531asmlinkage long compat_sys_sigaction(int sig, struct old_sigaction32 __user *act,
532 struct old_sigaction32 __user *oact)
533{
534 struct k_sigaction new_ka, old_ka;
535 int ret;
536
537 if (sig < 0) {
538 set_thread_flag(TIF_NEWSIGNALS);
539 sig = -sig;
540 }
541
542 if (act) {
543 compat_old_sigset_t mask;
544 u32 u_handler, u_restorer;
545
546 ret = get_user(u_handler, &act->sa_handler);
547 new_ka.sa.sa_handler = compat_ptr(u_handler);
548 ret |= __get_user(u_restorer, &act->sa_restorer);
549 new_ka.sa.sa_restorer = compat_ptr(u_restorer);
550 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
551 ret |= __get_user(mask, &act->sa_mask);
552 if (ret)
553 return ret;
554 new_ka.ka_restorer = NULL;
555 siginitset(&new_ka.sa.sa_mask, mask);
556 }
557
558 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
559
560 if (!ret && oact) {
561 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
562 ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
563 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
564 ret |= __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
565 }
566
567 return ret;
568}
569
570asmlinkage long compat_sys_rt_sigaction(int sig,
571 struct sigaction32 __user *act,
572 struct sigaction32 __user *oact,
573 void __user *restorer,
574 compat_size_t sigsetsize)
575{
576 struct k_sigaction new_ka, old_ka;
577 int ret;
578 compat_sigset_t set32;
579
580 /* XXX: Don't preclude handling different sized sigset_t's. */
581 if (sigsetsize != sizeof(compat_sigset_t))
582 return -EINVAL;
583
584 /* All tasks which use RT signals (effectively) use
585 * new style signals.
586 */
587 set_thread_flag(TIF_NEWSIGNALS);
588
589 if (act) {
590 u32 u_handler, u_restorer;
591
592 new_ka.ka_restorer = restorer;
593 ret = get_user(u_handler, &act->sa_handler);
594 new_ka.sa.sa_handler = compat_ptr(u_handler);
595 ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
596 switch (_NSIG_WORDS) {
597 case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | (((long)set32.sig[7]) << 32);
598 case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | (((long)set32.sig[5]) << 32);
599 case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | (((long)set32.sig[3]) << 32);
600 case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | (((long)set32.sig[1]) << 32);
601 }
602 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
603 ret |= __get_user(u_restorer, &act->sa_restorer);
604 new_ka.sa.sa_restorer = compat_ptr(u_restorer);
605 if (ret)
606 return -EFAULT;
607 }
608
609 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
610
611 if (!ret && oact) {
612 switch (_NSIG_WORDS) {
613 case 4: set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); set32.sig[6] = old_ka.sa.sa_mask.sig[3];
614 case 3: set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); set32.sig[4] = old_ka.sa.sa_mask.sig[2];
615 case 2: set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); set32.sig[2] = old_ka.sa.sa_mask.sig[1];
616 case 1: set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); set32.sig[0] = old_ka.sa.sa_mask.sig[0];
617 }
618 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
619 ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
620 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
621 ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
622 if (ret)
623 ret = -EFAULT;
624 }
625
626 return ret;
627}
628
629/*
630 * sparc32_execve() executes a new program after the asm stub has set
631 * things up for us. This should basically do what I want it to.
632 */
633asmlinkage long sparc32_execve(struct pt_regs *regs)
634{
635 int error, base = 0;
636 char *filename;
637
638 /* User register window flush is done by entry.S */
639
640 /* Check for indirect call. */
641 if ((u32)regs->u_regs[UREG_G1] == 0)
642 base = 1;
643
644 filename = getname(compat_ptr(regs->u_regs[base + UREG_I0]));
645 error = PTR_ERR(filename);
646 if (IS_ERR(filename))
647 goto out;
648
649 error = compat_do_execve(filename,
650 compat_ptr(regs->u_regs[base + UREG_I1]),
651 compat_ptr(regs->u_regs[base + UREG_I2]), regs);
652
653 putname(filename);
654
655 if (!error) {
656 fprs_write(0);
657 current_thread_info()->xfsr[0] = 0;
658 current_thread_info()->fpsaved[0] = 0;
659 regs->tstate &= ~TSTATE_PEF;
660 task_lock(current);
661 current->ptrace &= ~PT_DTRACE;
662 task_unlock(current);
663 }
664out:
665 return error;
666}
667
668#ifdef CONFIG_MODULES
669
670asmlinkage long sys32_init_module(void __user *umod, u32 len,
671 const char __user *uargs)
672{
673 return sys_init_module(umod, len, uargs);
674}
675
676asmlinkage long sys32_delete_module(const char __user *name_user,
677 unsigned int flags)
678{
679 return sys_delete_module(name_user, flags);
680}
681
682#else /* CONFIG_MODULES */
683
684asmlinkage long sys32_init_module(const char __user *name_user,
685 struct module __user *mod_user)
686{
687 return -ENOSYS;
688}
689
690asmlinkage long sys32_delete_module(const char __user *name_user)
691{
692 return -ENOSYS;
693}
694
695#endif /* CONFIG_MODULES */
696
697/* Translations due to time_t size differences. Which affects all
698 sorts of things, like timeval and itimerval. */
699
700extern struct timezone sys_tz;
701
702asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv,
703 struct timezone __user *tz)
704{
705 if (tv) {
706 struct timeval ktv;
707 do_gettimeofday(&ktv);
708 if (put_tv32(tv, &ktv))
709 return -EFAULT;
710 }
711 if (tz) {
712 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
713 return -EFAULT;
714 }
715 return 0;
716}
717
718static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
719{
720 long usec;
721
722 if (!access_ok(VERIFY_READ, i, sizeof(*i)))
723 return -EFAULT;
724 if (__get_user(o->tv_sec, &i->tv_sec))
725 return -EFAULT;
726 if (__get_user(usec, &i->tv_usec))
727 return -EFAULT;
728 o->tv_nsec = usec * 1000;
729 return 0;
730}
731
732asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv,
733 struct timezone __user *tz)
734{
735 struct timespec kts;
736 struct timezone ktz;
737
738 if (tv) {
739 if (get_ts32(&kts, tv))
740 return -EFAULT;
741 }
742 if (tz) {
743 if (copy_from_user(&ktz, tz, sizeof(ktz)))
744 return -EFAULT;
745 }
746
747 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
748}
749
750asmlinkage long sys32_utimes(char __user *filename,
751 struct compat_timeval __user *tvs)
752{
753 struct timeval ktvs[2];
754
755 if (tvs) {
756 if (get_tv32(&ktvs[0], tvs) ||
757 get_tv32(&ktvs[1], 1+tvs))
758 return -EFAULT;
759 }
760
761 return do_utimes(filename, (tvs ? &ktvs[0] : NULL));
762}
763
764/* These are here just in case some old sparc32 binary calls it. */
765asmlinkage long sys32_pause(void)
766{
767 current->state = TASK_INTERRUPTIBLE;
768 schedule();
769 return -ERESTARTNOHAND;
770}
771
772asmlinkage compat_ssize_t sys32_pread64(unsigned int fd,
773 char __user *ubuf,
774 compat_size_t count,
775 unsigned long poshi,
776 unsigned long poslo)
777{
778 return sys_pread64(fd, ubuf, count, (poshi << 32) | poslo);
779}
780
781asmlinkage compat_ssize_t sys32_pwrite64(unsigned int fd,
782 char __user *ubuf,
783 compat_size_t count,
784 unsigned long poshi,
785 unsigned long poslo)
786{
787 return sys_pwrite64(fd, ubuf, count, (poshi << 32) | poslo);
788}
789
790asmlinkage long compat_sys_readahead(int fd,
791 unsigned long offhi,
792 unsigned long offlo,
793 compat_size_t count)
794{
795 return sys_readahead(fd, (offhi << 32) | offlo, count);
796}
797
798long compat_sys_fadvise64(int fd,
799 unsigned long offhi,
800 unsigned long offlo,
801 compat_size_t len, int advice)
802{
803 return sys_fadvise64_64(fd, (offhi << 32) | offlo, len, advice);
804}
805
806long compat_sys_fadvise64_64(int fd,
807 unsigned long offhi, unsigned long offlo,
808 unsigned long lenhi, unsigned long lenlo,
809 int advice)
810{
811 return sys_fadvise64_64(fd,
812 (offhi << 32) | offlo,
813 (lenhi << 32) | lenlo,
814 advice);
815}
816
817asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
818 compat_off_t __user *offset,
819 compat_size_t count)
820{
821 mm_segment_t old_fs = get_fs();
822 int ret;
823 off_t of;
824
825 if (offset && get_user(of, offset))
826 return -EFAULT;
827
828 set_fs(KERNEL_DS);
829 ret = sys_sendfile(out_fd, in_fd,
830 offset ? (off_t __user *) &of : NULL,
831 count);
832 set_fs(old_fs);
833
834 if (offset && put_user(of, offset))
835 return -EFAULT;
836
837 return ret;
838}
839
840asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
841 compat_loff_t __user *offset,
842 compat_size_t count)
843{
844 mm_segment_t old_fs = get_fs();
845 int ret;
846 loff_t lof;
847
848 if (offset && get_user(lof, offset))
849 return -EFAULT;
850
851 set_fs(KERNEL_DS);
852 ret = sys_sendfile64(out_fd, in_fd,
853 offset ? (loff_t __user *) &lof : NULL,
854 count);
855 set_fs(old_fs);
856
857 if (offset && put_user(lof, offset))
858 return -EFAULT;
859
860 return ret;
861}
862
863/* Handle adjtimex compatibility. */
864
865struct timex32 {
866 u32 modes;
867 s32 offset, freq, maxerror, esterror;
868 s32 status, constant, precision, tolerance;
869 struct compat_timeval time;
870 s32 tick;
871 s32 ppsfreq, jitter, shift, stabil;
872 s32 jitcnt, calcnt, errcnt, stbcnt;
873 s32 :32; s32 :32; s32 :32; s32 :32;
874 s32 :32; s32 :32; s32 :32; s32 :32;
875 s32 :32; s32 :32; s32 :32; s32 :32;
876};
877
878extern int do_adjtimex(struct timex *);
879
880asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
881{
882 struct timex txc;
883 int ret;
884
885 memset(&txc, 0, sizeof(struct timex));
886
887 if (get_user(txc.modes, &utp->modes) ||
888 __get_user(txc.offset, &utp->offset) ||
889 __get_user(txc.freq, &utp->freq) ||
890 __get_user(txc.maxerror, &utp->maxerror) ||
891 __get_user(txc.esterror, &utp->esterror) ||
892 __get_user(txc.status, &utp->status) ||
893 __get_user(txc.constant, &utp->constant) ||
894 __get_user(txc.precision, &utp->precision) ||
895 __get_user(txc.tolerance, &utp->tolerance) ||
896 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
897 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
898 __get_user(txc.tick, &utp->tick) ||
899 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
900 __get_user(txc.jitter, &utp->jitter) ||
901 __get_user(txc.shift, &utp->shift) ||
902 __get_user(txc.stabil, &utp->stabil) ||
903 __get_user(txc.jitcnt, &utp->jitcnt) ||
904 __get_user(txc.calcnt, &utp->calcnt) ||
905 __get_user(txc.errcnt, &utp->errcnt) ||
906 __get_user(txc.stbcnt, &utp->stbcnt))
907 return -EFAULT;
908
909 ret = do_adjtimex(&txc);
910
911 if (put_user(txc.modes, &utp->modes) ||
912 __put_user(txc.offset, &utp->offset) ||
913 __put_user(txc.freq, &utp->freq) ||
914 __put_user(txc.maxerror, &utp->maxerror) ||
915 __put_user(txc.esterror, &utp->esterror) ||
916 __put_user(txc.status, &utp->status) ||
917 __put_user(txc.constant, &utp->constant) ||
918 __put_user(txc.precision, &utp->precision) ||
919 __put_user(txc.tolerance, &utp->tolerance) ||
920 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
921 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
922 __put_user(txc.tick, &utp->tick) ||
923 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
924 __put_user(txc.jitter, &utp->jitter) ||
925 __put_user(txc.shift, &utp->shift) ||
926 __put_user(txc.stabil, &utp->stabil) ||
927 __put_user(txc.jitcnt, &utp->jitcnt) ||
928 __put_user(txc.calcnt, &utp->calcnt) ||
929 __put_user(txc.errcnt, &utp->errcnt) ||
930 __put_user(txc.stbcnt, &utp->stbcnt))
931 ret = -EFAULT;
932
933 return ret;
934}
935
936/* This is just a version for 32-bit applications which does
937 * not force O_LARGEFILE on.
938 */
939
940asmlinkage long sparc32_open(const char __user *filename,
941 int flags, int mode)
942{
943 char * tmp;
944 int fd, error;
945
946 tmp = getname(filename);
947 fd = PTR_ERR(tmp);
948 if (!IS_ERR(tmp)) {
949 fd = get_unused_fd();
950 if (fd >= 0) {
951 struct file * f = filp_open(tmp, flags, mode);
952 error = PTR_ERR(f);
953 if (IS_ERR(f))
954 goto out_error;
955 fd_install(fd, f);
956 }
957out:
958 putname(tmp);
959 }
960 return fd;
961
962out_error:
963 put_unused_fd(fd);
964 fd = error;
965 goto out;
966}
967
968extern unsigned long do_mremap(unsigned long addr,
969 unsigned long old_len, unsigned long new_len,
970 unsigned long flags, unsigned long new_addr);
971
972asmlinkage unsigned long sys32_mremap(unsigned long addr,
973 unsigned long old_len, unsigned long new_len,
974 unsigned long flags, u32 __new_addr)
975{
976 struct vm_area_struct *vma;
977 unsigned long ret = -EINVAL;
978 unsigned long new_addr = __new_addr;
979
980 if (old_len > 0xf0000000UL || new_len > 0xf0000000UL)
981 goto out;
982 if (addr > 0xf0000000UL - old_len)
983 goto out;
984 down_write(&current->mm->mmap_sem);
985 if (flags & MREMAP_FIXED) {
986 if (new_addr > 0xf0000000UL - new_len)
987 goto out_sem;
988 } else if (addr > 0xf0000000UL - new_len) {
989 unsigned long map_flags = 0;
990 struct file *file = NULL;
991
992 ret = -ENOMEM;
993 if (!(flags & MREMAP_MAYMOVE))
994 goto out_sem;
995
996 vma = find_vma(current->mm, addr);
997 if (vma) {
998 if (vma->vm_flags & VM_SHARED)
999 map_flags |= MAP_SHARED;
1000 file = vma->vm_file;
1001 }
1002
1003 /* MREMAP_FIXED checked above. */
1004 new_addr = get_unmapped_area(file, addr, new_len,
1005 vma ? vma->vm_pgoff : 0,
1006 map_flags);
1007 ret = new_addr;
1008 if (new_addr & ~PAGE_MASK)
1009 goto out_sem;
1010 flags |= MREMAP_FIXED;
1011 }
1012 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1013out_sem:
1014 up_write(&current->mm->mmap_sem);
1015out:
1016 return ret;
1017}
1018
1019struct __sysctl_args32 {
1020 u32 name;
1021 int nlen;
1022 u32 oldval;
1023 u32 oldlenp;
1024 u32 newval;
1025 u32 newlen;
1026 u32 __unused[4];
1027};
1028
1029asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
1030{
1031#ifndef CONFIG_SYSCTL
1032 return -ENOSYS;
1033#else
1034 struct __sysctl_args32 tmp;
1035 int error;
1036 size_t oldlen, __user *oldlenp = NULL;
1037 unsigned long addr = (((unsigned long)&args->__unused[0]) + 7UL) & ~7UL;
1038
1039 if (copy_from_user(&tmp, args, sizeof(tmp)))
1040 return -EFAULT;
1041
1042 if (tmp.oldval && tmp.oldlenp) {
1043 /* Duh, this is ugly and might not work if sysctl_args
1044 is in read-only memory, but do_sysctl does indirectly
1045 a lot of uaccess in both directions and we'd have to
1046 basically copy the whole sysctl.c here, and
1047 glibc's __sysctl uses rw memory for the structure
1048 anyway. */
1049 if (get_user(oldlen, (u32 __user *)(unsigned long)tmp.oldlenp) ||
1050 put_user(oldlen, (size_t __user *)addr))
1051 return -EFAULT;
1052 oldlenp = (size_t __user *)addr;
1053 }
1054
1055 lock_kernel();
1056 error = do_sysctl((int __user *)(unsigned long) tmp.name,
1057 tmp.nlen,
1058 (void __user *)(unsigned long) tmp.oldval,
1059 oldlenp,
1060 (void __user *)(unsigned long) tmp.newval,
1061 tmp.newlen);
1062 unlock_kernel();
1063 if (oldlenp) {
1064 if (!error) {
1065 if (get_user(oldlen, (size_t __user *)addr) ||
1066 put_user(oldlen, (u32 __user *)(unsigned long) tmp.oldlenp))
1067 error = -EFAULT;
1068 }
1069 if (copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
1070 error = -EFAULT;
1071 }
1072 return error;
1073#endif
1074}
1075
1076long sys32_lookup_dcookie(unsigned long cookie_high,
1077 unsigned long cookie_low,
1078 char __user *buf, size_t len)
1079{
1080 return sys_lookup_dcookie((cookie_high << 32) | cookie_low,
1081 buf, len);
1082}
1083
1084extern asmlinkage long
1085sys_timer_create(clockid_t which_clock,
1086 struct sigevent __user *timer_event_spec,
1087 timer_t __user *created_timer_id);
1088
1089long
1090sys32_timer_create(u32 clock, struct compat_sigevent __user *se32,
1091 timer_t __user *timer_id)
1092{
1093 struct sigevent se;
1094 mm_segment_t oldfs;
1095 timer_t t;
1096 long err;
1097
1098 if (se32 == NULL)
1099 return sys_timer_create(clock, NULL, timer_id);
1100
1101 if (get_compat_sigevent(&se, se32))
1102 return -EFAULT;
1103
1104 if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
1105 return -EFAULT;
1106
1107 oldfs = get_fs();
1108 set_fs(KERNEL_DS);
1109 err = sys_timer_create(clock,
1110 (struct sigevent __user *) &se,
1111 (timer_t __user *) &t);
1112 set_fs(oldfs);
1113
1114 if (!err)
1115 err = __put_user (t, timer_id);
1116
1117 return err;
1118}
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
new file mode 100644
index 000000000000..d0592ed54ea5
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -0,0 +1,1343 @@
1/* $Id: sys_sunos32.c,v 1.64 2002/02/09 19:49:31 davem Exp $
2 * sys_sunos32.c: SunOS binary compatibility layer on sparc64.
3 *
4 * Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 *
7 * Based upon preliminary work which is:
8 *
9 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
10 */
11
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/types.h>
15#include <linux/compat.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/resource.h>
22#include <linux/ipc.h>
23#include <linux/shm.h>
24#include <linux/msg.h>
25#include <linux/sem.h>
26#include <linux/signal.h>
27#include <linux/uio.h>
28#include <linux/utsname.h>
29#include <linux/major.h>
30#include <linux/stat.h>
31#include <linux/slab.h>
32#include <linux/pagemap.h>
33#include <linux/errno.h>
34#include <linux/smp.h>
35#include <linux/smp_lock.h>
36#include <linux/syscalls.h>
37
38#include <asm/uaccess.h>
39#include <asm/page.h>
40#include <asm/pgtable.h>
41#include <asm/pconf.h>
42#include <asm/idprom.h> /* for gethostid() */
43#include <asm/unistd.h>
44#include <asm/system.h>
45
46/* For the nfs mount emulation */
47#include <linux/socket.h>
48#include <linux/in.h>
49#include <linux/nfs.h>
50#include <linux/nfs2.h>
51#include <linux/nfs_mount.h>
52
53/* for sunos_select */
54#include <linux/time.h>
55#include <linux/personality.h>
56
57/* For SOCKET_I */
58#include <linux/socket.h>
59#include <net/sock.h>
60#include <net/compat.h>
61
62#define SUNOS_NR_OPEN 256
63
64asmlinkage u32 sunos_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u32 off)
65{
66 struct file *file = NULL;
67 unsigned long retval, ret_type;
68
69 if (flags & MAP_NORESERVE) {
70 static int cnt;
71 if (cnt++ < 10)
72 printk("%s: unimplemented SunOS MAP_NORESERVE mmap() flag\n",
73 current->comm);
74 flags &= ~MAP_NORESERVE;
75 }
76 retval = -EBADF;
77 if (!(flags & MAP_ANONYMOUS)) {
78 struct inode * inode;
79 if (fd >= SUNOS_NR_OPEN)
80 goto out;
81 file = fget(fd);
82 if (!file)
83 goto out;
84 inode = file->f_dentry->d_inode;
85 if (imajor(inode) == MEM_MAJOR && iminor(inode) == 5) {
86 flags |= MAP_ANONYMOUS;
87 fput(file);
88 file = NULL;
89 }
90 }
91
92 retval = -EINVAL;
93 if (!(flags & MAP_FIXED))
94 addr = 0;
95 else if (len > 0xf0000000 || addr > 0xf0000000 - len)
96 goto out_putf;
97 ret_type = flags & _MAP_NEW;
98 flags &= ~_MAP_NEW;
99
100 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
101 down_write(&current->mm->mmap_sem);
102 retval = do_mmap(file,
103 (unsigned long) addr, (unsigned long) len,
104 (unsigned long) prot, (unsigned long) flags,
105 (unsigned long) off);
106 up_write(&current->mm->mmap_sem);
107 if (!ret_type)
108 retval = ((retval < 0xf0000000) ? 0 : retval);
109out_putf:
110 if (file)
111 fput(file);
112out:
113 return (u32) retval;
114}
115
116asmlinkage int sunos_mctl(u32 addr, u32 len, int function, u32 arg)
117{
118 return 0;
119}
120
121asmlinkage int sunos_brk(u32 baddr)
122{
123 int freepages, retval = -ENOMEM;
124 unsigned long rlim;
125 unsigned long newbrk, oldbrk, brk = (unsigned long) baddr;
126
127 down_write(&current->mm->mmap_sem);
128 if (brk < current->mm->end_code)
129 goto out;
130 newbrk = PAGE_ALIGN(brk);
131 oldbrk = PAGE_ALIGN(current->mm->brk);
132 retval = 0;
133 if (oldbrk == newbrk) {
134 current->mm->brk = brk;
135 goto out;
136 }
137 /* Always allow shrinking brk. */
138 if (brk <= current->mm->brk) {
139 current->mm->brk = brk;
140 do_munmap(current->mm, newbrk, oldbrk-newbrk);
141 goto out;
142 }
143 /* Check against rlimit and stack.. */
144 retval = -ENOMEM;
145 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
146 if (rlim >= RLIM_INFINITY)
147 rlim = ~0;
148 if (brk - current->mm->end_code > rlim)
149 goto out;
150 /* Check against existing mmap mappings. */
151 if (find_vma_intersection(current->mm, oldbrk, newbrk+PAGE_SIZE))
152 goto out;
153 /* stupid algorithm to decide if we have enough memory: while
154 * simple, it hopefully works in most obvious cases.. Easy to
155 * fool it, but this should catch most mistakes.
156 */
157 freepages = get_page_cache_size();
158 freepages >>= 1;
159 freepages += nr_free_pages();
160 freepages += nr_swap_pages;
161 freepages -= num_physpages >> 4;
162 freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
163 if (freepages < 0)
164 goto out;
165 /* Ok, we have probably got enough memory - let it rip. */
166 current->mm->brk = brk;
167 do_brk(oldbrk, newbrk-oldbrk);
168 retval = 0;
169out:
170 up_write(&current->mm->mmap_sem);
171 return retval;
172}
173
174asmlinkage u32 sunos_sbrk(int increment)
175{
176 int error, oldbrk;
177
178 /* This should do it hopefully... */
179 oldbrk = (int)current->mm->brk;
180 error = sunos_brk(((int) current->mm->brk) + increment);
181 if (!error)
182 error = oldbrk;
183 return error;
184}
185
186asmlinkage u32 sunos_sstk(int increment)
187{
188 printk("%s: Call to sunos_sstk(increment<%d>) is unsupported\n",
189 current->comm, increment);
190
191 return (u32)-1;
192}
193
194/* Give hints to the kernel as to what paging strategy to use...
195 * Completely bogus, don't remind me.
196 */
197#define VA_NORMAL 0 /* Normal vm usage expected */
198#define VA_ABNORMAL 1 /* Abnormal/random vm usage probable */
199#define VA_SEQUENTIAL 2 /* Accesses will be of a sequential nature */
200#define VA_INVALIDATE 3 /* Page table entries should be flushed ??? */
201static char *vstrings[] = {
202 "VA_NORMAL",
203 "VA_ABNORMAL",
204 "VA_SEQUENTIAL",
205 "VA_INVALIDATE",
206};
207
208asmlinkage void sunos_vadvise(u32 strategy)
209{
210 static int count;
211
212 /* I wanna see who uses this... */
213 if (count++ < 5)
214 printk("%s: Advises us to use %s paging strategy\n",
215 current->comm,
216 strategy <= 3 ? vstrings[strategy] : "BOGUS");
217}
218
219/* This just wants the soft limit (ie. rlim_cur element) of the RLIMIT_NOFILE
220 * resource limit and is for backwards compatibility with older sunos
221 * revs.
222 */
223asmlinkage int sunos_getdtablesize(void)
224{
225 return SUNOS_NR_OPEN;
226}
227
228
229#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
230
231asmlinkage u32 sunos_sigblock(u32 blk_mask)
232{
233 u32 old;
234
235 spin_lock_irq(&current->sighand->siglock);
236 old = (u32) current->blocked.sig[0];
237 current->blocked.sig[0] |= (blk_mask & _BLOCKABLE);
238 recalc_sigpending();
239 spin_unlock_irq(&current->sighand->siglock);
240 return old;
241}
242
243asmlinkage u32 sunos_sigsetmask(u32 newmask)
244{
245 u32 retval;
246
247 spin_lock_irq(&current->sighand->siglock);
248 retval = (u32) current->blocked.sig[0];
249 current->blocked.sig[0] = (newmask & _BLOCKABLE);
250 recalc_sigpending();
251 spin_unlock_irq(&current->sighand->siglock);
252 return retval;
253}
254
255/* SunOS getdents is very similar to the newer Linux (iBCS2 compliant) */
256/* getdents system call, the format of the structure just has a different */
257/* layout (d_off+d_ino instead of d_ino+d_off) */
258struct sunos_dirent {
259 s32 d_off;
260 u32 d_ino;
261 u16 d_reclen;
262 u16 d_namlen;
263 char d_name[1];
264};
265
266struct sunos_dirent_callback {
267 struct sunos_dirent __user *curr;
268 struct sunos_dirent __user *previous;
269 int count;
270 int error;
271};
272
273#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
274#define ROUND_UP(x) (((x)+sizeof(s32)-1) & ~(sizeof(s32)-1))
275
276static int sunos_filldir(void * __buf, const char * name, int namlen,
277 loff_t offset, ino_t ino, unsigned int d_type)
278{
279 struct sunos_dirent __user *dirent;
280 struct sunos_dirent_callback * buf = (struct sunos_dirent_callback *) __buf;
281 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
282
283 buf->error = -EINVAL; /* only used if we fail.. */
284 if (reclen > buf->count)
285 return -EINVAL;
286 dirent = buf->previous;
287 if (dirent)
288 put_user(offset, &dirent->d_off);
289 dirent = buf->curr;
290 buf->previous = dirent;
291 put_user(ino, &dirent->d_ino);
292 put_user(namlen, &dirent->d_namlen);
293 put_user(reclen, &dirent->d_reclen);
294 if (copy_to_user(dirent->d_name, name, namlen))
295 return -EFAULT;
296 put_user(0, dirent->d_name + namlen);
297 dirent = (void __user *) dirent + reclen;
298 buf->curr = dirent;
299 buf->count -= reclen;
300 return 0;
301}
302
303asmlinkage int sunos_getdents(unsigned int fd, void __user *dirent, int cnt)
304{
305 struct file * file;
306 struct sunos_dirent __user *lastdirent;
307 struct sunos_dirent_callback buf;
308 int error = -EBADF;
309
310 if (fd >= SUNOS_NR_OPEN)
311 goto out;
312
313 file = fget(fd);
314 if (!file)
315 goto out;
316
317 error = -EINVAL;
318 if (cnt < (sizeof(struct sunos_dirent) + 255))
319 goto out_putf;
320
321 buf.curr = (struct sunos_dirent __user *) dirent;
322 buf.previous = NULL;
323 buf.count = cnt;
324 buf.error = 0;
325
326 error = vfs_readdir(file, sunos_filldir, &buf);
327 if (error < 0)
328 goto out_putf;
329
330 lastdirent = buf.previous;
331 error = buf.error;
332 if (lastdirent) {
333 put_user(file->f_pos, &lastdirent->d_off);
334 error = cnt - buf.count;
335 }
336
337out_putf:
338 fput(file);
339out:
340 return error;
341}
342
343/* Old sunos getdirentries, severely broken compatibility stuff here. */
344struct sunos_direntry {
345 u32 d_ino;
346 u16 d_reclen;
347 u16 d_namlen;
348 char d_name[1];
349};
350
351struct sunos_direntry_callback {
352 struct sunos_direntry __user *curr;
353 struct sunos_direntry __user *previous;
354 int count;
355 int error;
356};
357
358static int sunos_filldirentry(void * __buf, const char * name, int namlen,
359 loff_t offset, ino_t ino, unsigned int d_type)
360{
361 struct sunos_direntry __user *dirent;
362 struct sunos_direntry_callback * buf =
363 (struct sunos_direntry_callback *) __buf;
364 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
365
366 buf->error = -EINVAL; /* only used if we fail.. */
367 if (reclen > buf->count)
368 return -EINVAL;
369 dirent = buf->previous;
370 dirent = buf->curr;
371 buf->previous = dirent;
372 put_user(ino, &dirent->d_ino);
373 put_user(namlen, &dirent->d_namlen);
374 put_user(reclen, &dirent->d_reclen);
375 if (copy_to_user(dirent->d_name, name, namlen))
376 return -EFAULT;
377 put_user(0, dirent->d_name + namlen);
378 dirent = (void __user *) dirent + reclen;
379 buf->curr = dirent;
380 buf->count -= reclen;
381 return 0;
382}
383
384asmlinkage int sunos_getdirentries(unsigned int fd,
385 void __user *dirent,
386 int cnt,
387 unsigned int __user *basep)
388{
389 struct file * file;
390 struct sunos_direntry __user *lastdirent;
391 int error = -EBADF;
392 struct sunos_direntry_callback buf;
393
394 if (fd >= SUNOS_NR_OPEN)
395 goto out;
396
397 file = fget(fd);
398 if (!file)
399 goto out;
400
401 error = -EINVAL;
402 if (cnt < (sizeof(struct sunos_direntry) + 255))
403 goto out_putf;
404
405 buf.curr = (struct sunos_direntry __user *) dirent;
406 buf.previous = NULL;
407 buf.count = cnt;
408 buf.error = 0;
409
410 error = vfs_readdir(file, sunos_filldirentry, &buf);
411 if (error < 0)
412 goto out_putf;
413
414 lastdirent = buf.previous;
415 error = buf.error;
416 if (lastdirent) {
417 put_user(file->f_pos, basep);
418 error = cnt - buf.count;
419 }
420
421out_putf:
422 fput(file);
423out:
424 return error;
425}
426
427struct sunos_utsname {
428 char sname[9];
429 char nname[9];
430 char nnext[56];
431 char rel[9];
432 char ver[9];
433 char mach[9];
434};
435
436asmlinkage int sunos_uname(struct sunos_utsname __user *name)
437{
438 int ret;
439
440 down_read(&uts_sem);
441 ret = copy_to_user(&name->sname[0], &system_utsname.sysname[0],
442 sizeof(name->sname) - 1);
443 ret |= copy_to_user(&name->nname[0], &system_utsname.nodename[0],
444 sizeof(name->nname) - 1);
445 ret |= put_user('\0', &name->nname[8]);
446 ret |= copy_to_user(&name->rel[0], &system_utsname.release[0],
447 sizeof(name->rel) - 1);
448 ret |= copy_to_user(&name->ver[0], &system_utsname.version[0],
449 sizeof(name->ver) - 1);
450 ret |= copy_to_user(&name->mach[0], &system_utsname.machine[0],
451 sizeof(name->mach) - 1);
452 up_read(&uts_sem);
453 return (ret ? -EFAULT : 0);
454}
455
456asmlinkage int sunos_nosys(void)
457{
458 struct pt_regs *regs;
459 siginfo_t info;
460 static int cnt;
461
462 regs = current_thread_info()->kregs;
463 if (test_thread_flag(TIF_32BIT)) {
464 regs->tpc &= 0xffffffff;
465 regs->tnpc &= 0xffffffff;
466 }
467 info.si_signo = SIGSYS;
468 info.si_errno = 0;
469 info.si_code = __SI_FAULT|0x100;
470 info.si_addr = (void __user *)regs->tpc;
471 info.si_trapno = regs->u_regs[UREG_G1];
472 send_sig_info(SIGSYS, &info, current);
473 if (cnt++ < 4) {
474 printk("Process makes ni_syscall number %d, register dump:\n",
475 (int) regs->u_regs[UREG_G1]);
476 show_regs(regs);
477 }
478 return -ENOSYS;
479}
480
481/* This is not a real and complete implementation yet, just to keep
482 * the easy SunOS binaries happy.
483 */
484asmlinkage int sunos_fpathconf(int fd, int name)
485{
486 int ret;
487
488 switch(name) {
489 case _PCONF_LINK:
490 ret = LINK_MAX;
491 break;
492 case _PCONF_CANON:
493 ret = MAX_CANON;
494 break;
495 case _PCONF_INPUT:
496 ret = MAX_INPUT;
497 break;
498 case _PCONF_NAME:
499 ret = NAME_MAX;
500 break;
501 case _PCONF_PATH:
502 ret = PATH_MAX;
503 break;
504 case _PCONF_PIPE:
505 ret = PIPE_BUF;
506 break;
507 case _PCONF_CHRESTRICT: /* XXX Investigate XXX */
508 ret = 1;
509 break;
510 case _PCONF_NOTRUNC: /* XXX Investigate XXX */
511 case _PCONF_VDISABLE:
512 ret = 0;
513 break;
514 default:
515 ret = -EINVAL;
516 break;
517 }
518 return ret;
519}
520
521asmlinkage int sunos_pathconf(u32 u_path, int name)
522{
523 int ret;
524
525 ret = sunos_fpathconf(0, name); /* XXX cheese XXX */
526 return ret;
527}
528
529asmlinkage int sunos_select(int width, u32 inp, u32 outp, u32 exp, u32 tvp_x)
530{
531 int ret;
532
533 /* SunOS binaries expect that select won't change the tvp contents */
534 ret = compat_sys_select(width, compat_ptr(inp), compat_ptr(outp),
535 compat_ptr(exp), compat_ptr(tvp_x));
536 if (ret == -EINTR && tvp_x) {
537 struct compat_timeval __user *tvp = compat_ptr(tvp_x);
538 time_t sec, usec;
539
540 __get_user(sec, &tvp->tv_sec);
541 __get_user(usec, &tvp->tv_usec);
542 if (sec == 0 && usec == 0)
543 ret = 0;
544 }
545 return ret;
546}
547
548asmlinkage void sunos_nop(void)
549{
550 return;
551}
552
553#if 0 /* This code doesn't translate user pointers correctly,
554 * disable for now. -DaveM
555 */
556
557/* XXXXXXXXXX SunOS mount/umount. XXXXXXXXXXX */
558#define SMNT_RDONLY 1
559#define SMNT_NOSUID 2
560#define SMNT_NEWTYPE 4
561#define SMNT_GRPID 8
562#define SMNT_REMOUNT 16
563#define SMNT_NOSUB 32
564#define SMNT_MULTI 64
565#define SMNT_SYS5 128
566
567struct sunos_fh_t {
568 char fh_data [NFS_FHSIZE];
569};
570
571struct sunos_nfs_mount_args {
572 struct sockaddr_in *addr; /* file server address */
573 struct nfs_fh *fh; /* File handle to be mounted */
574 int flags; /* flags */
575 int wsize; /* write size in bytes */
576 int rsize; /* read size in bytes */
577 int timeo; /* initial timeout in .1 secs */
578 int retrans; /* times to retry send */
579 char *hostname; /* server's hostname */
580 int acregmin; /* attr cache file min secs */
581 int acregmax; /* attr cache file max secs */
582 int acdirmin; /* attr cache dir min secs */
583 int acdirmax; /* attr cache dir max secs */
584 char *netname; /* server's netname */
585};
586
587
588/* Bind the socket on a local reserved port and connect it to the
589 * remote server. This on Linux/i386 is done by the mount program,
590 * not by the kernel.
591 */
592/* XXXXXXXXXXXXXXXXXXXX */
593static int
594sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
595{
596 struct sockaddr_in local;
597 struct sockaddr_in server;
598 int try_port;
599 int ret;
600 struct socket *socket;
601 struct inode *inode;
602 struct file *file;
603
604 file = fget(fd);
605 if (!file)
606 return 0;
607
608 inode = file->f_dentry->d_inode;
609
610 socket = SOCKET_I(inode);
611 local.sin_family = AF_INET;
612 local.sin_addr.s_addr = INADDR_ANY;
613
614 /* IPPORT_RESERVED = 1024, can't find the definition in the kernel */
615 try_port = 1024;
616 do {
617 local.sin_port = htons (--try_port);
618 ret = socket->ops->bind(socket, (struct sockaddr*)&local,
619 sizeof(local));
620 } while (ret && try_port > (1024 / 2));
621
622 if (ret) {
623 fput(file);
624 return 0;
625 }
626
627 server.sin_family = AF_INET;
628 server.sin_addr = addr->sin_addr;
629 server.sin_port = NFS_PORT;
630
631 /* Call sys_connect */
632 ret = socket->ops->connect (socket, (struct sockaddr *) &server,
633 sizeof (server), file->f_flags);
634 fput(file);
635 if (ret < 0)
636 return 0;
637 return 1;
638}
639
640/* XXXXXXXXXXXXXXXXXXXX */
641static int get_default (int value, int def_value)
642{
643 if (value)
644 return value;
645 else
646 return def_value;
647}
648
649/* XXXXXXXXXXXXXXXXXXXX */
650static int sunos_nfs_mount(char *dir_name, int linux_flags, void __user *data)
651{
652 int server_fd, err;
653 char *the_name, *mount_page;
654 struct nfs_mount_data linux_nfs_mount;
655 struct sunos_nfs_mount_args sunos_mount;
656
657 /* Ok, here comes the fun part: Linux's nfs mount needs a
658 * socket connection to the server, but SunOS mount does not
659 * require this, so we use the information on the destination
660 * address to create a socket and bind it to a reserved
661 * port on this system
662 */
663 if (copy_from_user(&sunos_mount, data, sizeof(sunos_mount)))
664 return -EFAULT;
665
666 server_fd = sys_socket (AF_INET, SOCK_DGRAM, IPPROTO_UDP);
667 if (server_fd < 0)
668 return -ENXIO;
669
670 if (copy_from_user(&linux_nfs_mount.addr, sunos_mount.addr,
671 sizeof(*sunos_mount.addr)) ||
672 copy_from_user(&linux_nfs_mount.root, sunos_mount.fh,
673 sizeof(*sunos_mount.fh))) {
674 sys_close (server_fd);
675 return -EFAULT;
676 }
677
678 if (!sunos_nfs_get_server_fd (server_fd, &linux_nfs_mount.addr)){
679 sys_close (server_fd);
680 return -ENXIO;
681 }
682
683 /* Now, bind it to a locally reserved port */
684 linux_nfs_mount.version = NFS_MOUNT_VERSION;
685 linux_nfs_mount.flags = sunos_mount.flags;
686 linux_nfs_mount.fd = server_fd;
687
688 linux_nfs_mount.rsize = get_default (sunos_mount.rsize, 8192);
689 linux_nfs_mount.wsize = get_default (sunos_mount.wsize, 8192);
690 linux_nfs_mount.timeo = get_default (sunos_mount.timeo, 10);
691 linux_nfs_mount.retrans = sunos_mount.retrans;
692
693 linux_nfs_mount.acregmin = sunos_mount.acregmin;
694 linux_nfs_mount.acregmax = sunos_mount.acregmax;
695 linux_nfs_mount.acdirmin = sunos_mount.acdirmin;
696 linux_nfs_mount.acdirmax = sunos_mount.acdirmax;
697
698 the_name = getname(sunos_mount.hostname);
699 if (IS_ERR(the_name))
700 return PTR_ERR(the_name);
701
702 strlcpy(linux_nfs_mount.hostname, the_name,
703 sizeof(linux_nfs_mount.hostname));
704 putname (the_name);
705
706 mount_page = (char *) get_zeroed_page(GFP_KERNEL);
707 if (!mount_page)
708 return -ENOMEM;
709
710 memcpy(mount_page, &linux_nfs_mount, sizeof(linux_nfs_mount));
711
712 err = do_mount("", dir_name, "nfs", linux_flags, mount_page);
713
714 free_page((unsigned long) mount_page);
715 return err;
716}
717
718/* XXXXXXXXXXXXXXXXXXXX */
719asmlinkage int
720sunos_mount(char *type, char *dir, int flags, void *data)
721{
722 int linux_flags = 0;
723 int ret = -EINVAL;
724 char *dev_fname = 0;
725 char *dir_page, *type_page;
726
727 if (!capable (CAP_SYS_ADMIN))
728 return -EPERM;
729
730 /* We don't handle the integer fs type */
731 if ((flags & SMNT_NEWTYPE) == 0)
732 goto out;
733
734 /* Do not allow for those flags we don't support */
735 if (flags & (SMNT_GRPID|SMNT_NOSUB|SMNT_MULTI|SMNT_SYS5))
736 goto out;
737
738 if (flags & SMNT_REMOUNT)
739 linux_flags |= MS_REMOUNT;
740 if (flags & SMNT_RDONLY)
741 linux_flags |= MS_RDONLY;
742 if (flags & SMNT_NOSUID)
743 linux_flags |= MS_NOSUID;
744
745 dir_page = getname(dir);
746 ret = PTR_ERR(dir_page);
747 if (IS_ERR(dir_page))
748 goto out;
749
750 type_page = getname(type);
751 ret = PTR_ERR(type_page);
752 if (IS_ERR(type_page))
753 goto out1;
754
755 if (strcmp(type_page, "ext2") == 0) {
756 dev_fname = getname(data);
757 } else if (strcmp(type_page, "iso9660") == 0) {
758 dev_fname = getname(data);
759 } else if (strcmp(type_page, "minix") == 0) {
760 dev_fname = getname(data);
761 } else if (strcmp(type_page, "nfs") == 0) {
762 ret = sunos_nfs_mount (dir_page, flags, data);
763 goto out2;
764 } else if (strcmp(type_page, "ufs") == 0) {
765 printk("Warning: UFS filesystem mounts unsupported.\n");
766 ret = -ENODEV;
767 goto out2;
768 } else if (strcmp(type_page, "proc")) {
769 ret = -ENODEV;
770 goto out2;
771 }
772 ret = PTR_ERR(dev_fname);
773 if (IS_ERR(dev_fname))
774 goto out2;
775 lock_kernel();
776 ret = do_mount(dev_fname, dir_page, type_page, linux_flags, NULL);
777 unlock_kernel();
778 if (dev_fname)
779 putname(dev_fname);
780out2:
781 putname(type_page);
782out1:
783 putname(dir_page);
784out:
785 return ret;
786}
787#endif
788
789asmlinkage int sunos_setpgrp(pid_t pid, pid_t pgid)
790{
791 int ret;
792
793 /* So stupid... */
794 if ((!pid || pid == current->pid) &&
795 !pgid) {
796 sys_setsid();
797 ret = 0;
798 } else {
799 ret = sys_setpgid(pid, pgid);
800 }
801 return ret;
802}
803
804/* So stupid... */
805extern long compat_sys_wait4(compat_pid_t, compat_uint_t __user *, int,
806 struct compat_rusage __user *);
807
808asmlinkage int sunos_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, struct compat_rusage __user *ru)
809{
810 int ret;
811
812 ret = compat_sys_wait4((pid ? pid : ((compat_pid_t)-1)),
813 stat_addr, options, ru);
814 return ret;
815}
816
817extern int kill_pg(int, int, int);
818asmlinkage int sunos_killpg(int pgrp, int sig)
819{
820 return kill_pg(pgrp, sig, 0);
821}
822
823asmlinkage int sunos_audit(void)
824{
825 printk ("sys_audit\n");
826 return -1;
827}
828
829asmlinkage u32 sunos_gethostid(void)
830{
831 u32 ret;
832
833 ret = (((u32)idprom->id_machtype << 24) | ((u32)idprom->id_sernum));
834
835 return ret;
836}
837
838/* sysconf options, for SunOS compatibility */
839#define _SC_ARG_MAX 1
840#define _SC_CHILD_MAX 2
841#define _SC_CLK_TCK 3
842#define _SC_NGROUPS_MAX 4
843#define _SC_OPEN_MAX 5
844#define _SC_JOB_CONTROL 6
845#define _SC_SAVED_IDS 7
846#define _SC_VERSION 8
847
848asmlinkage s32 sunos_sysconf (int name)
849{
850 s32 ret;
851
852 switch (name){
853 case _SC_ARG_MAX:
854 ret = ARG_MAX;
855 break;
856 case _SC_CHILD_MAX:
857 ret = CHILD_MAX;
858 break;
859 case _SC_CLK_TCK:
860 ret = HZ;
861 break;
862 case _SC_NGROUPS_MAX:
863 ret = NGROUPS_MAX;
864 break;
865 case _SC_OPEN_MAX:
866 ret = OPEN_MAX;
867 break;
868 case _SC_JOB_CONTROL:
869 ret = 1; /* yes, we do support job control */
870 break;
871 case _SC_SAVED_IDS:
872 ret = 1; /* yes, we do support saved uids */
873 break;
874 case _SC_VERSION:
875 /* mhm, POSIX_VERSION is in /usr/include/unistd.h
876 * should it go on /usr/include/linux?
877 */
878 ret = 199009;
879 break;
880 default:
881 ret = -1;
882 break;
883 };
884 return ret;
885}
886
887asmlinkage int sunos_semsys(int op, u32 arg1, u32 arg2, u32 arg3, void __user *ptr)
888{
889 union semun arg4;
890 int ret;
891
892 switch (op) {
893 case 0:
894 /* Most arguments match on a 1:1 basis but cmd doesn't */
895 switch(arg3) {
896 case 4:
897 arg3=GETPID; break;
898 case 5:
899 arg3=GETVAL; break;
900 case 6:
901 arg3=GETALL; break;
902 case 3:
903 arg3=GETNCNT; break;
904 case 7:
905 arg3=GETZCNT; break;
906 case 8:
907 arg3=SETVAL; break;
908 case 9:
909 arg3=SETALL; break;
910 }
911 /* sys_semctl(): */
912 /* value to modify semaphore to */
913 arg4.__pad = ptr;
914 ret = sys_semctl((int)arg1, (int)arg2, (int)arg3, arg4);
915 break;
916 case 1:
917 /* sys_semget(): */
918 ret = sys_semget((key_t)arg1, (int)arg2, (int)arg3);
919 break;
920 case 2:
921 /* sys_semop(): */
922 ret = sys_semop((int)arg1, (struct sembuf __user *)(unsigned long)arg2,
923 (unsigned int) arg3);
924 break;
925 default:
926 ret = -EINVAL;
927 break;
928 };
929 return ret;
930}
931
932struct msgbuf32 {
933 s32 mtype;
934 char mtext[1];
935};
936
937struct ipc_perm32
938{
939 key_t key;
940 compat_uid_t uid;
941 compat_gid_t gid;
942 compat_uid_t cuid;
943 compat_gid_t cgid;
944 compat_mode_t mode;
945 unsigned short seq;
946};
947
948struct msqid_ds32
949{
950 struct ipc_perm32 msg_perm;
951 u32 msg_first;
952 u32 msg_last;
953 compat_time_t msg_stime;
954 compat_time_t msg_rtime;
955 compat_time_t msg_ctime;
956 u32 wwait;
957 u32 rwait;
958 unsigned short msg_cbytes;
959 unsigned short msg_qnum;
960 unsigned short msg_qbytes;
961 compat_ipc_pid_t msg_lspid;
962 compat_ipc_pid_t msg_lrpid;
963};
964
965static inline int sunos_msqid_get(struct msqid_ds32 __user *user,
966 struct msqid_ds *kern)
967{
968 if (get_user(kern->msg_perm.key, &user->msg_perm.key) ||
969 __get_user(kern->msg_perm.uid, &user->msg_perm.uid) ||
970 __get_user(kern->msg_perm.gid, &user->msg_perm.gid) ||
971 __get_user(kern->msg_perm.cuid, &user->msg_perm.cuid) ||
972 __get_user(kern->msg_perm.cgid, &user->msg_perm.cgid) ||
973 __get_user(kern->msg_stime, &user->msg_stime) ||
974 __get_user(kern->msg_rtime, &user->msg_rtime) ||
975 __get_user(kern->msg_ctime, &user->msg_ctime) ||
976 __get_user(kern->msg_ctime, &user->msg_cbytes) ||
977 __get_user(kern->msg_ctime, &user->msg_qnum) ||
978 __get_user(kern->msg_ctime, &user->msg_qbytes) ||
979 __get_user(kern->msg_ctime, &user->msg_lspid) ||
980 __get_user(kern->msg_ctime, &user->msg_lrpid))
981 return -EFAULT;
982 return 0;
983}
984
985static inline int sunos_msqid_put(struct msqid_ds32 __user *user,
986 struct msqid_ds *kern)
987{
988 if (put_user(kern->msg_perm.key, &user->msg_perm.key) ||
989 __put_user(kern->msg_perm.uid, &user->msg_perm.uid) ||
990 __put_user(kern->msg_perm.gid, &user->msg_perm.gid) ||
991 __put_user(kern->msg_perm.cuid, &user->msg_perm.cuid) ||
992 __put_user(kern->msg_perm.cgid, &user->msg_perm.cgid) ||
993 __put_user(kern->msg_stime, &user->msg_stime) ||
994 __put_user(kern->msg_rtime, &user->msg_rtime) ||
995 __put_user(kern->msg_ctime, &user->msg_ctime) ||
996 __put_user(kern->msg_ctime, &user->msg_cbytes) ||
997 __put_user(kern->msg_ctime, &user->msg_qnum) ||
998 __put_user(kern->msg_ctime, &user->msg_qbytes) ||
999 __put_user(kern->msg_ctime, &user->msg_lspid) ||
1000 __put_user(kern->msg_ctime, &user->msg_lrpid))
1001 return -EFAULT;
1002 return 0;
1003}
1004
1005static inline int sunos_msgbuf_get(struct msgbuf32 __user *user, struct msgbuf *kern, int len)
1006{
1007 if (get_user(kern->mtype, &user->mtype) ||
1008 __copy_from_user(kern->mtext, &user->mtext, len))
1009 return -EFAULT;
1010 return 0;
1011}
1012
1013static inline int sunos_msgbuf_put(struct msgbuf32 __user *user, struct msgbuf *kern, int len)
1014{
1015 if (put_user(kern->mtype, &user->mtype) ||
1016 __copy_to_user(user->mtext, kern->mtext, len))
1017 return -EFAULT;
1018 return 0;
1019}
1020
1021asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
1022{
1023 struct sparc_stackf32 __user *sp;
1024 struct msqid_ds kds;
1025 struct msgbuf *kmbuf;
1026 mm_segment_t old_fs = get_fs();
1027 u32 arg5;
1028 int rval;
1029
1030 switch(op) {
1031 case 0:
1032 rval = sys_msgget((key_t)arg1, (int)arg2);
1033 break;
1034 case 1:
1035 if (!sunos_msqid_get((struct msqid_ds32 __user *)(unsigned long)arg3, &kds)) {
1036 set_fs(KERNEL_DS);
1037 rval = sys_msgctl((int)arg1, (int)arg2,
1038 (struct msqid_ds __user *)(unsigned long)arg3);
1039 set_fs(old_fs);
1040 if (!rval)
1041 rval = sunos_msqid_put((struct msqid_ds32 __user *)(unsigned long)arg3,
1042 &kds);
1043 } else
1044 rval = -EFAULT;
1045 break;
1046 case 2:
1047 rval = -EFAULT;
1048 kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3,
1049 GFP_KERNEL);
1050 if (!kmbuf)
1051 break;
1052 sp = (struct sparc_stackf32 __user *)
1053 (current_thread_info()->kregs->u_regs[UREG_FP] & 0xffffffffUL);
1054 if (get_user(arg5, &sp->xxargs[0])) {
1055 rval = -EFAULT;
1056 kfree(kmbuf);
1057 break;
1058 }
1059 set_fs(KERNEL_DS);
1060 rval = sys_msgrcv((int)arg1, (struct msgbuf __user *) kmbuf,
1061 (size_t)arg3,
1062 (long)arg4, (int)arg5);
1063 set_fs(old_fs);
1064 if (!rval)
1065 rval = sunos_msgbuf_put((struct msgbuf32 __user *)(unsigned long)arg2,
1066 kmbuf, arg3);
1067 kfree(kmbuf);
1068 break;
1069 case 3:
1070 rval = -EFAULT;
1071 kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3,
1072 GFP_KERNEL);
1073 if (!kmbuf || sunos_msgbuf_get((struct msgbuf32 __user *)(unsigned long)arg2,
1074 kmbuf, arg3))
1075 break;
1076 set_fs(KERNEL_DS);
1077 rval = sys_msgsnd((int)arg1, (struct msgbuf __user *) kmbuf,
1078 (size_t)arg3, (int)arg4);
1079 set_fs(old_fs);
1080 kfree(kmbuf);
1081 break;
1082 default:
1083 rval = -EINVAL;
1084 break;
1085 }
1086 return rval;
1087}
1088
1089struct shmid_ds32 {
1090 struct ipc_perm32 shm_perm;
1091 int shm_segsz;
1092 compat_time_t shm_atime;
1093 compat_time_t shm_dtime;
1094 compat_time_t shm_ctime;
1095 compat_ipc_pid_t shm_cpid;
1096 compat_ipc_pid_t shm_lpid;
1097 unsigned short shm_nattch;
1098};
1099
1100static inline int sunos_shmid_get(struct shmid_ds32 __user *user,
1101 struct shmid_ds *kern)
1102{
1103 if (get_user(kern->shm_perm.key, &user->shm_perm.key) ||
1104 __get_user(kern->shm_perm.uid, &user->shm_perm.uid) ||
1105 __get_user(kern->shm_perm.gid, &user->shm_perm.gid) ||
1106 __get_user(kern->shm_perm.cuid, &user->shm_perm.cuid) ||
1107 __get_user(kern->shm_perm.cgid, &user->shm_perm.cgid) ||
1108 __get_user(kern->shm_segsz, &user->shm_segsz) ||
1109 __get_user(kern->shm_atime, &user->shm_atime) ||
1110 __get_user(kern->shm_dtime, &user->shm_dtime) ||
1111 __get_user(kern->shm_ctime, &user->shm_ctime) ||
1112 __get_user(kern->shm_cpid, &user->shm_cpid) ||
1113 __get_user(kern->shm_lpid, &user->shm_lpid) ||
1114 __get_user(kern->shm_nattch, &user->shm_nattch))
1115 return -EFAULT;
1116 return 0;
1117}
1118
1119static inline int sunos_shmid_put(struct shmid_ds32 __user *user,
1120 struct shmid_ds *kern)
1121{
1122 if (put_user(kern->shm_perm.key, &user->shm_perm.key) ||
1123 __put_user(kern->shm_perm.uid, &user->shm_perm.uid) ||
1124 __put_user(kern->shm_perm.gid, &user->shm_perm.gid) ||
1125 __put_user(kern->shm_perm.cuid, &user->shm_perm.cuid) ||
1126 __put_user(kern->shm_perm.cgid, &user->shm_perm.cgid) ||
1127 __put_user(kern->shm_segsz, &user->shm_segsz) ||
1128 __put_user(kern->shm_atime, &user->shm_atime) ||
1129 __put_user(kern->shm_dtime, &user->shm_dtime) ||
1130 __put_user(kern->shm_ctime, &user->shm_ctime) ||
1131 __put_user(kern->shm_cpid, &user->shm_cpid) ||
1132 __put_user(kern->shm_lpid, &user->shm_lpid) ||
1133 __put_user(kern->shm_nattch, &user->shm_nattch))
1134 return -EFAULT;
1135 return 0;
1136}
1137
1138asmlinkage int sunos_shmsys(int op, u32 arg1, u32 arg2, u32 arg3)
1139{
1140 struct shmid_ds ksds;
1141 unsigned long raddr;
1142 mm_segment_t old_fs = get_fs();
1143 int rval;
1144
1145 switch(op) {
1146 case 0:
1147 /* do_shmat(): attach a shared memory area */
1148 rval = do_shmat((int)arg1,(char __user *)(unsigned long)arg2,(int)arg3,&raddr);
1149 if (!rval)
1150 rval = (int) raddr;
1151 break;
1152 case 1:
1153 /* sys_shmctl(): modify shared memory area attr. */
1154 if (!sunos_shmid_get((struct shmid_ds32 __user *)(unsigned long)arg3, &ksds)) {
1155 set_fs(KERNEL_DS);
1156 rval = sys_shmctl((int) arg1,(int) arg2,
1157 (struct shmid_ds __user *) &ksds);
1158 set_fs(old_fs);
1159 if (!rval)
1160 rval = sunos_shmid_put((struct shmid_ds32 __user *)(unsigned long)arg3,
1161 &ksds);
1162 } else
1163 rval = -EFAULT;
1164 break;
1165 case 2:
1166 /* sys_shmdt(): detach a shared memory area */
1167 rval = sys_shmdt((char __user *)(unsigned long)arg1);
1168 break;
1169 case 3:
1170 /* sys_shmget(): get a shared memory area */
1171 rval = sys_shmget((key_t)arg1,(int)arg2,(int)arg3);
1172 break;
1173 default:
1174 rval = -EINVAL;
1175 break;
1176 };
1177 return rval;
1178}
1179
1180extern asmlinkage long sparc32_open(const char __user * filename, int flags, int mode);
1181
1182asmlinkage int sunos_open(u32 fname, int flags, int mode)
1183{
1184 const char __user *filename = compat_ptr(fname);
1185
1186 return sparc32_open(filename, flags, mode);
1187}
1188
1189#define SUNOS_EWOULDBLOCK 35
1190
1191/* see the sunos man page read(2v) for an explanation
1192 of this garbage. We use O_NDELAY to mark
1193 file descriptors that have been set non-blocking
1194 using 4.2BSD style calls. (tridge) */
1195
1196static inline int check_nonblock(int ret, int fd)
1197{
1198 if (ret == -EAGAIN) {
1199 struct file * file = fget(fd);
1200 if (file) {
1201 if (file->f_flags & O_NDELAY)
1202 ret = -SUNOS_EWOULDBLOCK;
1203 fput(file);
1204 }
1205 }
1206 return ret;
1207}
1208
1209asmlinkage int sunos_read(unsigned int fd, char __user *buf, u32 count)
1210{
1211 int ret;
1212
1213 ret = check_nonblock(sys_read(fd, buf, count), fd);
1214 return ret;
1215}
1216
1217asmlinkage int sunos_readv(u32 fd, void __user *vector, s32 count)
1218{
1219 int ret;
1220
1221 ret = check_nonblock(compat_sys_readv(fd, vector, count), fd);
1222 return ret;
1223}
1224
1225asmlinkage int sunos_write(unsigned int fd, char __user *buf, u32 count)
1226{
1227 int ret;
1228
1229 ret = check_nonblock(sys_write(fd, buf, count), fd);
1230 return ret;
1231}
1232
1233asmlinkage int sunos_writev(u32 fd, void __user *vector, s32 count)
1234{
1235 int ret;
1236
1237 ret = check_nonblock(compat_sys_writev(fd, vector, count), fd);
1238 return ret;
1239}
1240
1241asmlinkage int sunos_recv(u32 __fd, void __user *ubuf, int size, unsigned flags)
1242{
1243 int ret, fd = (int) __fd;
1244
1245 ret = check_nonblock(sys_recv(fd, ubuf, size, flags), fd);
1246 return ret;
1247}
1248
1249asmlinkage int sunos_send(u32 __fd, void __user *buff, int len, unsigned flags)
1250{
1251 int ret, fd = (int) __fd;
1252
1253 ret = check_nonblock(sys_send(fd, buff, len, flags), fd);
1254 return ret;
1255}
1256
1257asmlinkage int sunos_accept(u32 __fd, struct sockaddr __user *sa, int __user *addrlen)
1258{
1259 int ret, fd = (int) __fd;
1260
1261 while (1) {
1262 ret = check_nonblock(sys_accept(fd, sa, addrlen), fd);
1263 if (ret != -ENETUNREACH && ret != -EHOSTUNREACH)
1264 break;
1265 }
1266 return ret;
1267}
1268
1269#define SUNOS_SV_INTERRUPT 2
1270
1271asmlinkage int sunos_sigaction (int sig,
1272 struct old_sigaction32 __user *act,
1273 struct old_sigaction32 __user *oact)
1274{
1275 struct k_sigaction new_ka, old_ka;
1276 int ret;
1277
1278 if (act) {
1279 compat_old_sigset_t mask;
1280 u32 u_handler;
1281
1282 if (get_user(u_handler, &act->sa_handler) ||
1283 __get_user(new_ka.sa.sa_flags, &act->sa_flags))
1284 return -EFAULT;
1285 new_ka.sa.sa_handler = compat_ptr(u_handler);
1286 __get_user(mask, &act->sa_mask);
1287 new_ka.sa.sa_restorer = NULL;
1288 new_ka.ka_restorer = NULL;
1289 siginitset(&new_ka.sa.sa_mask, mask);
1290 new_ka.sa.sa_flags ^= SUNOS_SV_INTERRUPT;
1291 }
1292
1293 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
1294
1295 if (!ret && oact) {
1296 old_ka.sa.sa_flags ^= SUNOS_SV_INTERRUPT;
1297 if (put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) ||
1298 __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
1299 return -EFAULT;
1300 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
1301 }
1302
1303 return ret;
1304}
1305
1306asmlinkage int sunos_setsockopt(u32 __fd, u32 __level, u32 __optname,
1307 char __user *optval, u32 __optlen)
1308{
1309 int fd = (int) __fd;
1310 int level = (int) __level;
1311 int optname = (int) __optname;
1312 int optlen = (int) __optlen;
1313 int tr_opt = optname;
1314 int ret;
1315
1316 if (level == SOL_IP) {
1317 /* Multicast socketopts (ttl, membership) */
1318 if (tr_opt >=2 && tr_opt <= 6)
1319 tr_opt += 30;
1320 }
1321 ret = sys_setsockopt(fd, level, tr_opt,
1322 optval, optlen);
1323 return ret;
1324}
1325
1326asmlinkage int sunos_getsockopt(u32 __fd, u32 __level, u32 __optname,
1327 char __user *optval, int __user *optlen)
1328{
1329 int fd = (int) __fd;
1330 int level = (int) __level;
1331 int optname = (int) __optname;
1332 int tr_opt = optname;
1333 int ret;
1334
1335 if (level == SOL_IP) {
1336 /* Multicast socketopts (ttl, membership) */
1337 if (tr_opt >=2 && tr_opt <= 6)
1338 tr_opt += 30;
1339 }
1340 ret = compat_sys_getsockopt(fd, level, tr_opt,
1341 optval, optlen);
1342 return ret;
1343}
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
new file mode 100644
index 000000000000..48170f77fff1
--- /dev/null
+++ b/arch/sparc64/kernel/systbls.S
@@ -0,0 +1,251 @@
1/* $Id: systbls.S,v 1.81 2002/02/08 03:57:14 davem Exp $
2 * systbls.S: System call entry point tables for OS compatibility.
3 * The native Linux system call table lives here also.
4 *
5 * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 *
8 * Based upon preliminary work which is:
9 *
10 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
11 */
12
13#include <linux/config.h>
14
15 .text
16 .align 4
17
18#ifdef CONFIG_COMPAT
19 /* First, the 32-bit Linux native syscall table. */
20
21 .globl sys_call_table32
22sys_call_table32:
23/*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write
24/*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link
25/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys32_chown16, sys32_mknod
26/*15*/ .word sys_chmod, sys32_lchown16, sparc_brk, sys32_perfctr, sys32_lseek
27/*20*/ .word sys_getpid, sys_capget, sys_capset, sys32_setuid16, sys32_getuid16
28/*25*/ .word compat_sys_time, sys_ptrace, sys_alarm, sys32_sigaltstack, sys32_pause
29/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
30 .word sys_chown, sys_sync, sys32_kill, compat_sys_newstat, sys32_sendfile
31/*40*/ .word compat_sys_newlstat, sys_dup, sys_pipe, compat_sys_times, sys_getuid
32 .word sys32_umount, sys32_setgid16, sys32_getgid16, sys32_signal, sys32_geteuid16
33/*50*/ .word sys32_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl
34 .word sys32_reboot, sys32_mmap2, sys_symlink, sys32_readlink, sys32_execve
35/*60*/ .word sys32_umask, sys_chroot, compat_sys_newfstat, sys_fstat64, sys_getpagesize
36 .word sys32_msync, sys_vfork, sys32_pread64, sys32_pwrite64, sys_geteuid
37/*70*/ .word sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect
38 .word sys_madvise, sys_vhangup, sys32_truncate64, sys_mincore, sys32_getgroups16
39/*80*/ .word sys32_setgroups16, sys_getpgrp, sys32_setgroups, sys32_setitimer, sys32_ftruncate64
40 .word sys32_swapon, sys32_getitimer, sys_setuid, sys32_sethostname, sys_setgid
41/*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid
42 .word sys_fsync, sys32_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
43/*100*/ .word sys32_getpriority, sys32_rt_sigreturn, sys32_rt_sigaction, sys32_rt_sigprocmask, sys32_rt_sigpending
44 .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, sys32_rt_sigsuspend, sys_setresuid, sys_getresuid
45/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
46 .word sys32_getgroups, sys32_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
47/*120*/ .word compat_sys_readv, compat_sys_writev, sys32_settimeofday, sys32_fchown16, sys_fchmod
48 .word sys_nis_syscall, sys32_setreuid16, sys32_setregid16, sys_rename, sys_truncate
49/*130*/ .word sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
50 .word sys_nis_syscall, sys32_mkdir, sys_rmdir, sys32_utimes, sys_stat64
51/*140*/ .word sys32_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit
52 .word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write
53/*150*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
54 .word compat_sys_fcntl64, sys_ni_syscall, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
55/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
56 .word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr
57/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
58 .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
59/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
60 .word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sparc64_newuname
61/*190*/ .word sys32_init_module, sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl
62 .word sys32_epoll_wait, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask
63/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
64 .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64
65/*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo
66 .word sys32_ipc, sys32_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex
67/*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid
68 .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16
69/*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64
70 .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
71/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
72 .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
73/*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
74 .word sys_ni_syscall, sys32_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
75/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
76 .word sys_timer_delete, sys32_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
77/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
78 .word sys_mq_timedsend, sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
79/*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl
80
81#endif /* CONFIG_COMPAT */
82
83 /* Now the 64-bit native Linux syscall table. */
84
85 .align 4
86 .globl sys_call_table64, sys_call_table
87sys_call_table64:
88sys_call_table:
89/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
90/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
91/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
92/*15*/ .word sys_chmod, sys_lchown, sparc_brk, sys_perfctr, sys_lseek
93/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
94/*25*/ .word sys_nis_syscall, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
95/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
96 .word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64
97/*40*/ .word sys_newlstat, sys_dup, sys_pipe, sys_times, sys_nis_syscall
98 .word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid
99/*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl
100 .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve
101/*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_nis_syscall, sys_getpagesize
102 .word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall
103/*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys64_munmap, sys_mprotect
104 .word sys_madvise, sys_vhangup, sys_nis_syscall, sys_mincore, sys_getgroups
105/*80*/ .word sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall
106 .word sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall
107/*90*/ .word sys_dup2, sys_nis_syscall, sys_fcntl, sys_select, sys_nis_syscall
108 .word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
109/*100*/ .word sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
110 .word sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
111/*110*/ .word sys_setresgid, sys_getresgid, sys_nis_syscall, sys_recvmsg, sys_sendmsg
112 .word sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd
113/*120*/ .word sys_readv, sys_writev, sys_settimeofday, sys_fchown, sys_fchmod
114 .word sys_recvfrom, sys_setreuid, sys_setregid, sys_rename, sys_truncate
115/*130*/ .word sys_ftruncate, sys_flock, sys_nis_syscall, sys_sendto, sys_shutdown
116 .word sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_nis_syscall
117/*140*/ .word sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit
118 .word sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
119/*150*/ .word sys_getsockname, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
120 .word sys_nis_syscall, sys_ni_syscall, sys_statfs, sys_fstatfs, sys_oldumount
121/*160*/ .word sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_utrap_install
122 .word sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
123/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
124 .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
125/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall
126 .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sparc64_newuname
127/*190*/ .word sys_init_module, sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
128 .word sys_epoll_wait, sys_nis_syscall, sys_getppid, sys_nis_syscall, sys_sgetmask
129/*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
130 .word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
131/*210*/ .word sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
132 .word sys_ipc, sys_nis_syscall, sys_clone, sys_nis_syscall, sys_adjtimex
133/*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
134 .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
135/*230*/ .word sys_select, sys_nis_syscall, sys_nis_syscall, sys_stime, sys_statfs64
136 .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
137/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
138 .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
139/*250*/ .word sys64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
140 .word sys_ni_syscall, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
141/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
142 .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
143/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
144 .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
145/*280*/ .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl
146
147#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
148 defined(CONFIG_SOLARIS_EMUL_MODULE)
149 /* Now the 32-bit SunOS syscall table. */
150
151 .align 4
152 .globl sunos_sys_table
153sunos_sys_table:
154/*0*/ .word sunos_indir, sys32_exit, sys_fork
155 .word sunos_read, sunos_write, sunos_open
156 .word sys_close, sunos_wait4, sys_creat
157 .word sys_link, sys_unlink, sunos_execv
158 .word sys_chdir, sunos_nosys, sys32_mknod
159 .word sys_chmod, sys32_lchown16, sunos_brk
160 .word sunos_nosys, sys32_lseek, sunos_getpid
161 .word sunos_nosys, sunos_nosys, sunos_nosys
162 .word sunos_getuid, sunos_nosys, sys_ptrace
163 .word sunos_nosys, sunos_nosys, sunos_nosys
164 .word sunos_nosys, sunos_nosys, sunos_nosys
165 .word sys_access, sunos_nosys, sunos_nosys
166 .word sys_sync, sys_kill, compat_sys_newstat
167 .word sunos_nosys, compat_sys_newlstat, sys_dup
168 .word sys_pipe, sunos_nosys, sunos_nosys
169 .word sunos_nosys, sunos_nosys, sunos_getgid
170 .word sunos_nosys, sunos_nosys
171/*50*/ .word sunos_nosys, sys_acct, sunos_nosys
172 .word sunos_mctl, sunos_ioctl, sys_reboot
173 .word sunos_nosys, sys_symlink, sys_readlink
174 .word sys32_execve, sys_umask, sys_chroot
175 .word compat_sys_newfstat, sunos_nosys, sys_getpagesize
176 .word sys_msync, sys_vfork, sunos_nosys
177 .word sunos_nosys, sunos_sbrk, sunos_sstk
178 .word sunos_mmap, sunos_vadvise, sys_munmap
179 .word sys_mprotect, sys_madvise, sys_vhangup
180 .word sunos_nosys, sys_mincore, sys32_getgroups16
181 .word sys32_setgroups16, sys_getpgrp, sunos_setpgrp
182 .word compat_sys_setitimer, sunos_nosys, sys_swapon
183 .word compat_sys_getitimer, sys_gethostname, sys_sethostname
184 .word sunos_getdtablesize, sys_dup2, sunos_nop
185 .word compat_sys_fcntl, sunos_select, sunos_nop
186 .word sys_fsync, sys32_setpriority, sys32_socket
187 .word sys32_connect, sunos_accept
188/*100*/ .word sys_getpriority, sunos_send, sunos_recv
189 .word sunos_nosys, sys32_bind, sunos_setsockopt
190 .word sys32_listen, sunos_nosys, sunos_sigaction
191 .word sunos_sigblock, sunos_sigsetmask, sys_sigpause
192 .word sys32_sigstack, sys32_recvmsg, sys32_sendmsg
193 .word sunos_nosys, sys32_gettimeofday, compat_sys_getrusage
194 .word sunos_getsockopt, sunos_nosys, sunos_readv
195 .word sunos_writev, sys32_settimeofday, sys32_fchown16
196 .word sys_fchmod, sys32_recvfrom, sys32_setreuid16
197 .word sys32_setregid16, sys_rename, sys_truncate
198 .word sys_ftruncate, sys_flock, sunos_nosys
199 .word sys32_sendto, sys32_shutdown, sys32_socketpair
200 .word sys_mkdir, sys_rmdir, sys32_utimes
201 .word sys32_sigreturn, sunos_nosys, sys32_getpeername
202 .word sunos_gethostid, sunos_nosys, compat_sys_getrlimit
203 .word compat_sys_setrlimit, sunos_killpg, sunos_nosys
204 .word sunos_nosys, sunos_nosys
205/*150*/ .word sys32_getsockname, sunos_nosys, sunos_nosys
206 .word sys_poll, sunos_nosys, sunos_nosys
207 .word sunos_getdirentries, compat_sys_statfs, compat_sys_fstatfs
208 .word sys_oldumount, sunos_nosys, sunos_nosys
209 .word sys_getdomainname, sys_setdomainname
210 .word sunos_nosys, sys_quotactl, sunos_nosys
211 .word sunos_nosys, sys_ustat, sunos_semsys
212 .word sunos_nosys, sunos_shmsys, sunos_audit
213 .word sunos_nosys, sunos_getdents, sys_setsid
214 .word sys_fchdir, sunos_nosys, sunos_nosys
215 .word sunos_nosys, sunos_nosys, sunos_nosys
216 .word sunos_nosys, compat_sys_sigpending, sunos_nosys
217 .word sys_setpgid, sunos_pathconf, sunos_fpathconf
218 .word sunos_sysconf, sunos_uname, sunos_nosys
219 .word sunos_nosys, sunos_nosys, sunos_nosys
220 .word sunos_nosys, sunos_nosys, sunos_nosys
221 .word sunos_nosys, sunos_nosys, sunos_nosys
222/*200*/ .word sunos_nosys, sunos_nosys, sunos_nosys
223 .word sunos_nosys, sunos_nosys, sunos_nosys
224 .word sunos_nosys, sunos_nosys, sunos_nosys
225 .word sunos_nosys, sunos_nosys, sunos_nosys
226 .word sunos_nosys, sunos_nosys, sunos_nosys
227 .word sunos_nosys, sunos_nosys, sunos_nosys
228 .word sunos_nosys, sunos_nosys, sunos_nosys
229 .word sunos_nosys, sunos_nosys, sunos_nosys
230 .word sunos_nosys, sunos_nosys, sunos_nosys
231 .word sunos_nosys, sunos_nosys, sunos_nosys
232 .word sunos_nosys, sunos_nosys, sunos_nosys
233 .word sunos_nosys, sunos_nosys, sunos_nosys
234 .word sunos_nosys, sunos_nosys, sunos_nosys
235 .word sunos_nosys, sunos_nosys, sunos_nosys
236 .word sunos_nosys, sunos_nosys, sunos_nosys
237 .word sunos_nosys, sunos_nosys, sunos_nosys
238 .word sunos_nosys, sunos_nosys
239/*250*/ .word sunos_nosys, sunos_nosys, sunos_nosys
240 .word sunos_nosys, sunos_nosys, sunos_nosys
241 .word sunos_nosys, sunos_nosys, sunos_nosys
242 .word sunos_nosys, sunos_nosys, sunos_nosys
243 .word sunos_nosys, sunos_nosys, sunos_nosys
244 .word sunos_nosys, sunos_nosys, sunos_nosys
245 .word sunos_nosys, sunos_nosys, sunos_nosys
246 .word sunos_nosys, sunos_nosys, sunos_nosys
247 .word sunos_nosys, sunos_nosys, sunos_nosys
248 .word sunos_nosys, sunos_nosys, sunos_nosys
249 .word sunos_nosys, sunos_nosys, sunos_nosys
250 .word sunos_nosys
251#endif
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
new file mode 100644
index 000000000000..6a717d4d2bc5
--- /dev/null
+++ b/arch/sparc64/kernel/time.c
@@ -0,0 +1,1195 @@
1/* $Id: time.c,v 1.42 2002/01/23 14:33:55 davem Exp $
2 * time.c: UltraSparc timer and TOD clock support.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 *
7 * Based largely on code which is:
8 *
9 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
10 */
11
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/param.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/interrupt.h>
21#include <linux/time.h>
22#include <linux/timex.h>
23#include <linux/init.h>
24#include <linux/ioport.h>
25#include <linux/mc146818rtc.h>
26#include <linux/delay.h>
27#include <linux/profile.h>
28#include <linux/bcd.h>
29#include <linux/jiffies.h>
30#include <linux/cpufreq.h>
31#include <linux/percpu.h>
32#include <linux/profile.h>
33
34#include <asm/oplib.h>
35#include <asm/mostek.h>
36#include <asm/timer.h>
37#include <asm/irq.h>
38#include <asm/io.h>
39#include <asm/sbus.h>
40#include <asm/fhc.h>
41#include <asm/pbm.h>
42#include <asm/ebus.h>
43#include <asm/isa.h>
44#include <asm/starfire.h>
45#include <asm/smp.h>
46#include <asm/sections.h>
47#include <asm/cpudata.h>
48
49DEFINE_SPINLOCK(mostek_lock);
50DEFINE_SPINLOCK(rtc_lock);
51unsigned long mstk48t02_regs = 0UL;
52#ifdef CONFIG_PCI
53unsigned long ds1287_regs = 0UL;
54#endif
55
56extern unsigned long wall_jiffies;
57
58u64 jiffies_64 = INITIAL_JIFFIES;
59
60EXPORT_SYMBOL(jiffies_64);
61
62static unsigned long mstk48t08_regs = 0UL;
63static unsigned long mstk48t59_regs = 0UL;
64
65static int set_rtc_mmss(unsigned long);
66
67static __init unsigned long dummy_get_tick(void)
68{
69 return 0;
70}
71
72static __initdata struct sparc64_tick_ops dummy_tick_ops = {
73 .get_tick = dummy_get_tick,
74};
75
76struct sparc64_tick_ops *tick_ops = &dummy_tick_ops;
77
78#define TICK_PRIV_BIT (1UL << 63)
79
80#ifdef CONFIG_SMP
81unsigned long profile_pc(struct pt_regs *regs)
82{
83 unsigned long pc = instruction_pointer(regs);
84
85 if (in_lock_functions(pc))
86 return regs->u_regs[UREG_RETPC];
87 return pc;
88}
89EXPORT_SYMBOL(profile_pc);
90#endif
91
92static void tick_disable_protection(void)
93{
94 /* Set things up so user can access tick register for profiling
95 * purposes. Also workaround BB_ERRATA_1 by doing a dummy
96 * read back of %tick after writing it.
97 */
98 __asm__ __volatile__(
99 " ba,pt %%xcc, 1f\n"
100 " nop\n"
101 " .align 64\n"
102 "1: rd %%tick, %%g2\n"
103 " add %%g2, 6, %%g2\n"
104 " andn %%g2, %0, %%g2\n"
105 " wrpr %%g2, 0, %%tick\n"
106 " rdpr %%tick, %%g0"
107 : /* no outputs */
108 : "r" (TICK_PRIV_BIT)
109 : "g2");
110}
111
112static void tick_init_tick(unsigned long offset)
113{
114 tick_disable_protection();
115
116 __asm__ __volatile__(
117 " rd %%tick, %%g1\n"
118 " andn %%g1, %1, %%g1\n"
119 " ba,pt %%xcc, 1f\n"
120 " add %%g1, %0, %%g1\n"
121 " .align 64\n"
122 "1: wr %%g1, 0x0, %%tick_cmpr\n"
123 " rd %%tick_cmpr, %%g0"
124 : /* no outputs */
125 : "r" (offset), "r" (TICK_PRIV_BIT)
126 : "g1");
127}
128
129static unsigned long tick_get_tick(void)
130{
131 unsigned long ret;
132
133 __asm__ __volatile__("rd %%tick, %0\n\t"
134 "mov %0, %0"
135 : "=r" (ret));
136
137 return ret & ~TICK_PRIV_BIT;
138}
139
140static unsigned long tick_get_compare(void)
141{
142 unsigned long ret;
143
144 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
145 "mov %0, %0"
146 : "=r" (ret));
147
148 return ret;
149}
150
151static unsigned long tick_add_compare(unsigned long adj)
152{
153 unsigned long new_compare;
154
155 /* Workaround for Spitfire Errata (#54 I think??), I discovered
156 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
157 * number 103640.
158 *
159 * On Blackbird writes to %tick_cmpr can fail, the
160 * workaround seems to be to execute the wr instruction
161 * at the start of an I-cache line, and perform a dummy
162 * read back from %tick_cmpr right after writing to it. -DaveM
163 */
164 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
165 "ba,pt %%xcc, 1f\n\t"
166 " add %0, %1, %0\n\t"
167 ".align 64\n"
168 "1:\n\t"
169 "wr %0, 0, %%tick_cmpr\n\t"
170 "rd %%tick_cmpr, %%g0"
171 : "=&r" (new_compare)
172 : "r" (adj));
173
174 return new_compare;
175}
176
177static unsigned long tick_add_tick(unsigned long adj, unsigned long offset)
178{
179 unsigned long new_tick, tmp;
180
181 /* Also need to handle Blackbird bug here too. */
182 __asm__ __volatile__("rd %%tick, %0\n\t"
183 "add %0, %2, %0\n\t"
184 "wrpr %0, 0, %%tick\n\t"
185 "andn %0, %4, %1\n\t"
186 "ba,pt %%xcc, 1f\n\t"
187 " add %1, %3, %1\n\t"
188 ".align 64\n"
189 "1:\n\t"
190 "wr %1, 0, %%tick_cmpr\n\t"
191 "rd %%tick_cmpr, %%g0"
192 : "=&r" (new_tick), "=&r" (tmp)
193 : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
194
195 return new_tick;
196}
197
198static struct sparc64_tick_ops tick_operations = {
199 .init_tick = tick_init_tick,
200 .get_tick = tick_get_tick,
201 .get_compare = tick_get_compare,
202 .add_tick = tick_add_tick,
203 .add_compare = tick_add_compare,
204 .softint_mask = 1UL << 0,
205};
206
207static void stick_init_tick(unsigned long offset)
208{
209 tick_disable_protection();
210
211 /* Let the user get at STICK too. */
212 __asm__ __volatile__(
213 " rd %%asr24, %%g2\n"
214 " andn %%g2, %0, %%g2\n"
215 " wr %%g2, 0, %%asr24"
216 : /* no outputs */
217 : "r" (TICK_PRIV_BIT)
218 : "g1", "g2");
219
220 __asm__ __volatile__(
221 " rd %%asr24, %%g1\n"
222 " andn %%g1, %1, %%g1\n"
223 " add %%g1, %0, %%g1\n"
224 " wr %%g1, 0x0, %%asr25"
225 : /* no outputs */
226 : "r" (offset), "r" (TICK_PRIV_BIT)
227 : "g1");
228}
229
230static unsigned long stick_get_tick(void)
231{
232 unsigned long ret;
233
234 __asm__ __volatile__("rd %%asr24, %0"
235 : "=r" (ret));
236
237 return ret & ~TICK_PRIV_BIT;
238}
239
240static unsigned long stick_get_compare(void)
241{
242 unsigned long ret;
243
244 __asm__ __volatile__("rd %%asr25, %0"
245 : "=r" (ret));
246
247 return ret;
248}
249
250static unsigned long stick_add_tick(unsigned long adj, unsigned long offset)
251{
252 unsigned long new_tick, tmp;
253
254 __asm__ __volatile__("rd %%asr24, %0\n\t"
255 "add %0, %2, %0\n\t"
256 "wr %0, 0, %%asr24\n\t"
257 "andn %0, %4, %1\n\t"
258 "add %1, %3, %1\n\t"
259 "wr %1, 0, %%asr25"
260 : "=&r" (new_tick), "=&r" (tmp)
261 : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
262
263 return new_tick;
264}
265
266static unsigned long stick_add_compare(unsigned long adj)
267{
268 unsigned long new_compare;
269
270 __asm__ __volatile__("rd %%asr25, %0\n\t"
271 "add %0, %1, %0\n\t"
272 "wr %0, 0, %%asr25"
273 : "=&r" (new_compare)
274 : "r" (adj));
275
276 return new_compare;
277}
278
279static struct sparc64_tick_ops stick_operations = {
280 .init_tick = stick_init_tick,
281 .get_tick = stick_get_tick,
282 .get_compare = stick_get_compare,
283 .add_tick = stick_add_tick,
284 .add_compare = stick_add_compare,
285 .softint_mask = 1UL << 16,
286};
287
288/* On Hummingbird the STICK/STICK_CMPR register is implemented
289 * in I/O space. There are two 64-bit registers each, the
290 * first holds the low 32-bits of the value and the second holds
291 * the high 32-bits.
292 *
293 * Since STICK is constantly updating, we have to access it carefully.
294 *
295 * The sequence we use to read is:
296 * 1) read low
297 * 2) read high
298 * 3) read low again, if it rolled over increment high by 1
299 *
300 * Writing STICK safely is also tricky:
301 * 1) write low to zero
302 * 2) write high
303 * 3) write low
304 */
305#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
306#define HBIRD_STICK_ADDR 0x1fe0000f070UL
307
308static unsigned long __hbird_read_stick(void)
309{
310 unsigned long ret, tmp1, tmp2, tmp3;
311 unsigned long addr = HBIRD_STICK_ADDR;
312
313 __asm__ __volatile__("ldxa [%1] %5, %2\n\t"
314 "add %1, 0x8, %1\n\t"
315 "ldxa [%1] %5, %3\n\t"
316 "sub %1, 0x8, %1\n\t"
317 "ldxa [%1] %5, %4\n\t"
318 "cmp %4, %2\n\t"
319 "blu,a,pn %%xcc, 1f\n\t"
320 " add %3, 1, %3\n"
321 "1:\n\t"
322 "sllx %3, 32, %3\n\t"
323 "or %3, %4, %0\n\t"
324 : "=&r" (ret), "=&r" (addr),
325 "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
326 : "i" (ASI_PHYS_BYPASS_EC_E), "1" (addr));
327
328 return ret;
329}
330
331static unsigned long __hbird_read_compare(void)
332{
333 unsigned long low, high;
334 unsigned long addr = HBIRD_STICKCMP_ADDR;
335
336 __asm__ __volatile__("ldxa [%2] %3, %0\n\t"
337 "add %2, 0x8, %2\n\t"
338 "ldxa [%2] %3, %1"
339 : "=&r" (low), "=&r" (high), "=&r" (addr)
340 : "i" (ASI_PHYS_BYPASS_EC_E), "2" (addr));
341
342 return (high << 32UL) | low;
343}
344
345static void __hbird_write_stick(unsigned long val)
346{
347 unsigned long low = (val & 0xffffffffUL);
348 unsigned long high = (val >> 32UL);
349 unsigned long addr = HBIRD_STICK_ADDR;
350
351 __asm__ __volatile__("stxa %%g0, [%0] %4\n\t"
352 "add %0, 0x8, %0\n\t"
353 "stxa %3, [%0] %4\n\t"
354 "sub %0, 0x8, %0\n\t"
355 "stxa %2, [%0] %4"
356 : "=&r" (addr)
357 : "0" (addr), "r" (low), "r" (high),
358 "i" (ASI_PHYS_BYPASS_EC_E));
359}
360
361static void __hbird_write_compare(unsigned long val)
362{
363 unsigned long low = (val & 0xffffffffUL);
364 unsigned long high = (val >> 32UL);
365 unsigned long addr = HBIRD_STICKCMP_ADDR + 0x8UL;
366
367 __asm__ __volatile__("stxa %3, [%0] %4\n\t"
368 "sub %0, 0x8, %0\n\t"
369 "stxa %2, [%0] %4"
370 : "=&r" (addr)
371 : "0" (addr), "r" (low), "r" (high),
372 "i" (ASI_PHYS_BYPASS_EC_E));
373}
374
375static void hbtick_init_tick(unsigned long offset)
376{
377 unsigned long val;
378
379 tick_disable_protection();
380
381 /* XXX This seems to be necessary to 'jumpstart' Hummingbird
382 * XXX into actually sending STICK interrupts. I think because
383 * XXX of how we store %tick_cmpr in head.S this somehow resets the
384 * XXX {TICK + STICK} interrupt mux. -DaveM
385 */
386 __hbird_write_stick(__hbird_read_stick());
387
388 val = __hbird_read_stick() & ~TICK_PRIV_BIT;
389 __hbird_write_compare(val + offset);
390}
391
392static unsigned long hbtick_get_tick(void)
393{
394 return __hbird_read_stick() & ~TICK_PRIV_BIT;
395}
396
397static unsigned long hbtick_get_compare(void)
398{
399 return __hbird_read_compare();
400}
401
402static unsigned long hbtick_add_tick(unsigned long adj, unsigned long offset)
403{
404 unsigned long val;
405
406 val = __hbird_read_stick() + adj;
407 __hbird_write_stick(val);
408
409 val &= ~TICK_PRIV_BIT;
410 __hbird_write_compare(val + offset);
411
412 return val;
413}
414
415static unsigned long hbtick_add_compare(unsigned long adj)
416{
417 unsigned long val = __hbird_read_compare() + adj;
418
419 val &= ~TICK_PRIV_BIT;
420 __hbird_write_compare(val);
421
422 return val;
423}
424
425static struct sparc64_tick_ops hbtick_operations = {
426 .init_tick = hbtick_init_tick,
427 .get_tick = hbtick_get_tick,
428 .get_compare = hbtick_get_compare,
429 .add_tick = hbtick_add_tick,
430 .add_compare = hbtick_add_compare,
431 .softint_mask = 1UL << 0,
432};
433
434/* timer_interrupt() needs to keep up the real-time clock,
435 * as well as call the "do_timer()" routine every clocktick
436 *
437 * NOTE: On SUN5 systems the ticker interrupt comes in using 2
438 * interrupts, one at level14 and one with softint bit 0.
439 */
440unsigned long timer_tick_offset;
441unsigned long timer_tick_compare;
442
443static unsigned long timer_ticks_per_nsec_quotient;
444
445#define TICK_SIZE (tick_nsec / 1000)
446
447static inline void timer_check_rtc(void)
448{
449 /* last time the cmos clock got updated */
450 static long last_rtc_update;
451
452 /* Determine when to update the Mostek clock. */
453 if ((time_status & STA_UNSYNC) == 0 &&
454 xtime.tv_sec > last_rtc_update + 660 &&
455 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
456 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
457 if (set_rtc_mmss(xtime.tv_sec) == 0)
458 last_rtc_update = xtime.tv_sec;
459 else
460 last_rtc_update = xtime.tv_sec - 600;
461 /* do it again in 60 s */
462 }
463}
464
465static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
466{
467 unsigned long ticks, pstate;
468
469 write_seqlock(&xtime_lock);
470
471 do {
472#ifndef CONFIG_SMP
473 profile_tick(CPU_PROFILING, regs);
474 update_process_times(user_mode(regs));
475#endif
476 do_timer(regs);
477
478 /* Guarantee that the following sequences execute
479 * uninterrupted.
480 */
481 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
482 "wrpr %0, %1, %%pstate"
483 : "=r" (pstate)
484 : "i" (PSTATE_IE));
485
486 timer_tick_compare = tick_ops->add_compare(timer_tick_offset);
487 ticks = tick_ops->get_tick();
488
489 /* Restore PSTATE_IE. */
490 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
491 : /* no outputs */
492 : "r" (pstate));
493 } while (time_after_eq(ticks, timer_tick_compare));
494
495 timer_check_rtc();
496
497 write_sequnlock(&xtime_lock);
498
499 return IRQ_HANDLED;
500}
501
502#ifdef CONFIG_SMP
503void timer_tick_interrupt(struct pt_regs *regs)
504{
505 write_seqlock(&xtime_lock);
506
507 do_timer(regs);
508
509 /*
510 * Only keep timer_tick_offset uptodate, but don't set TICK_CMPR.
511 */
512 timer_tick_compare = tick_ops->get_compare() + timer_tick_offset;
513
514 timer_check_rtc();
515
516 write_sequnlock(&xtime_lock);
517}
518#endif
519
520/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
521static void __init kick_start_clock(void)
522{
523 unsigned long regs = mstk48t02_regs;
524 u8 sec, tmp;
525 int i, count;
526
527 prom_printf("CLOCK: Clock was stopped. Kick start ");
528
529 spin_lock_irq(&mostek_lock);
530
531 /* Turn on the kick start bit to start the oscillator. */
532 tmp = mostek_read(regs + MOSTEK_CREG);
533 tmp |= MSTK_CREG_WRITE;
534 mostek_write(regs + MOSTEK_CREG, tmp);
535 tmp = mostek_read(regs + MOSTEK_SEC);
536 tmp &= ~MSTK_STOP;
537 mostek_write(regs + MOSTEK_SEC, tmp);
538 tmp = mostek_read(regs + MOSTEK_HOUR);
539 tmp |= MSTK_KICK_START;
540 mostek_write(regs + MOSTEK_HOUR, tmp);
541 tmp = mostek_read(regs + MOSTEK_CREG);
542 tmp &= ~MSTK_CREG_WRITE;
543 mostek_write(regs + MOSTEK_CREG, tmp);
544
545 spin_unlock_irq(&mostek_lock);
546
547 /* Delay to allow the clock oscillator to start. */
548 sec = MSTK_REG_SEC(regs);
549 for (i = 0; i < 3; i++) {
550 while (sec == MSTK_REG_SEC(regs))
551 for (count = 0; count < 100000; count++)
552 /* nothing */ ;
553 prom_printf(".");
554 sec = MSTK_REG_SEC(regs);
555 }
556 prom_printf("\n");
557
558 spin_lock_irq(&mostek_lock);
559
560 /* Turn off kick start and set a "valid" time and date. */
561 tmp = mostek_read(regs + MOSTEK_CREG);
562 tmp |= MSTK_CREG_WRITE;
563 mostek_write(regs + MOSTEK_CREG, tmp);
564 tmp = mostek_read(regs + MOSTEK_HOUR);
565 tmp &= ~MSTK_KICK_START;
566 mostek_write(regs + MOSTEK_HOUR, tmp);
567 MSTK_SET_REG_SEC(regs,0);
568 MSTK_SET_REG_MIN(regs,0);
569 MSTK_SET_REG_HOUR(regs,0);
570 MSTK_SET_REG_DOW(regs,5);
571 MSTK_SET_REG_DOM(regs,1);
572 MSTK_SET_REG_MONTH(regs,8);
573 MSTK_SET_REG_YEAR(regs,1996 - MSTK_YEAR_ZERO);
574 tmp = mostek_read(regs + MOSTEK_CREG);
575 tmp &= ~MSTK_CREG_WRITE;
576 mostek_write(regs + MOSTEK_CREG, tmp);
577
578 spin_unlock_irq(&mostek_lock);
579
580 /* Ensure the kick start bit is off. If it isn't, turn it off. */
581 while (mostek_read(regs + MOSTEK_HOUR) & MSTK_KICK_START) {
582 prom_printf("CLOCK: Kick start still on!\n");
583
584 spin_lock_irq(&mostek_lock);
585
586 tmp = mostek_read(regs + MOSTEK_CREG);
587 tmp |= MSTK_CREG_WRITE;
588 mostek_write(regs + MOSTEK_CREG, tmp);
589
590 tmp = mostek_read(regs + MOSTEK_HOUR);
591 tmp &= ~MSTK_KICK_START;
592 mostek_write(regs + MOSTEK_HOUR, tmp);
593
594 tmp = mostek_read(regs + MOSTEK_CREG);
595 tmp &= ~MSTK_CREG_WRITE;
596 mostek_write(regs + MOSTEK_CREG, tmp);
597
598 spin_unlock_irq(&mostek_lock);
599 }
600
601 prom_printf("CLOCK: Kick start procedure successful.\n");
602}
603
604/* Return nonzero if the clock chip battery is low. */
605static int __init has_low_battery(void)
606{
607 unsigned long regs = mstk48t02_regs;
608 u8 data1, data2;
609
610 spin_lock_irq(&mostek_lock);
611
612 data1 = mostek_read(regs + MOSTEK_EEPROM); /* Read some data. */
613 mostek_write(regs + MOSTEK_EEPROM, ~data1); /* Write back the complement. */
614 data2 = mostek_read(regs + MOSTEK_EEPROM); /* Read back the complement. */
615 mostek_write(regs + MOSTEK_EEPROM, data1); /* Restore original value. */
616
617 spin_unlock_irq(&mostek_lock);
618
619 return (data1 == data2); /* Was the write blocked? */
620}
621
622/* Probe for the real time clock chip. */
623static void __init set_system_time(void)
624{
625 unsigned int year, mon, day, hour, min, sec;
626 unsigned long mregs = mstk48t02_regs;
627#ifdef CONFIG_PCI
628 unsigned long dregs = ds1287_regs;
629#else
630 unsigned long dregs = 0UL;
631#endif
632 u8 tmp;
633
634 if (!mregs && !dregs) {
635 prom_printf("Something wrong, clock regs not mapped yet.\n");
636 prom_halt();
637 }
638
639 if (mregs) {
640 spin_lock_irq(&mostek_lock);
641
642 /* Traditional Mostek chip. */
643 tmp = mostek_read(mregs + MOSTEK_CREG);
644 tmp |= MSTK_CREG_READ;
645 mostek_write(mregs + MOSTEK_CREG, tmp);
646
647 sec = MSTK_REG_SEC(mregs);
648 min = MSTK_REG_MIN(mregs);
649 hour = MSTK_REG_HOUR(mregs);
650 day = MSTK_REG_DOM(mregs);
651 mon = MSTK_REG_MONTH(mregs);
652 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
653 } else {
654 int i;
655
656 /* Dallas 12887 RTC chip. */
657
658 /* Stolen from arch/i386/kernel/time.c, see there for
659 * credits and descriptive comments.
660 */
661 for (i = 0; i < 1000000; i++) {
662 if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
663 break;
664 udelay(10);
665 }
666 for (i = 0; i < 1000000; i++) {
667 if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
668 break;
669 udelay(10);
670 }
671 do {
672 sec = CMOS_READ(RTC_SECONDS);
673 min = CMOS_READ(RTC_MINUTES);
674 hour = CMOS_READ(RTC_HOURS);
675 day = CMOS_READ(RTC_DAY_OF_MONTH);
676 mon = CMOS_READ(RTC_MONTH);
677 year = CMOS_READ(RTC_YEAR);
678 } while (sec != CMOS_READ(RTC_SECONDS));
679 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
680 BCD_TO_BIN(sec);
681 BCD_TO_BIN(min);
682 BCD_TO_BIN(hour);
683 BCD_TO_BIN(day);
684 BCD_TO_BIN(mon);
685 BCD_TO_BIN(year);
686 }
687 if ((year += 1900) < 1970)
688 year += 100;
689 }
690
691 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
692 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
693 set_normalized_timespec(&wall_to_monotonic,
694 -xtime.tv_sec, -xtime.tv_nsec);
695
696 if (mregs) {
697 tmp = mostek_read(mregs + MOSTEK_CREG);
698 tmp &= ~MSTK_CREG_READ;
699 mostek_write(mregs + MOSTEK_CREG, tmp);
700
701 spin_unlock_irq(&mostek_lock);
702 }
703}
704
705void __init clock_probe(void)
706{
707 struct linux_prom_registers clk_reg[2];
708 char model[128];
709 int node, busnd = -1, err;
710 unsigned long flags;
711 struct linux_central *cbus;
712#ifdef CONFIG_PCI
713 struct linux_ebus *ebus = NULL;
714 struct sparc_isa_bridge *isa_br = NULL;
715#endif
716 static int invoked;
717
718 if (invoked)
719 return;
720 invoked = 1;
721
722
723 if (this_is_starfire) {
724 /* davem suggests we keep this within the 4M locked kernel image */
725 static char obp_gettod[256];
726 static u32 unix_tod;
727
728 sprintf(obp_gettod, "h# %08x unix-gettod",
729 (unsigned int) (long) &unix_tod);
730 prom_feval(obp_gettod);
731 xtime.tv_sec = unix_tod;
732 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
733 set_normalized_timespec(&wall_to_monotonic,
734 -xtime.tv_sec, -xtime.tv_nsec);
735 return;
736 }
737
738 local_irq_save(flags);
739
740 cbus = central_bus;
741 if (cbus != NULL)
742 busnd = central_bus->child->prom_node;
743
744 /* Check FHC Central then EBUSs then ISA bridges then SBUSs.
745 * That way we handle the presence of multiple properly.
746 *
747 * As a special case, machines with Central must provide the
748 * timer chip there.
749 */
750#ifdef CONFIG_PCI
751 if (ebus_chain != NULL) {
752 ebus = ebus_chain;
753 if (busnd == -1)
754 busnd = ebus->prom_node;
755 }
756 if (isa_chain != NULL) {
757 isa_br = isa_chain;
758 if (busnd == -1)
759 busnd = isa_br->prom_node;
760 }
761#endif
762 if (sbus_root != NULL && busnd == -1)
763 busnd = sbus_root->prom_node;
764
765 if (busnd == -1) {
766 prom_printf("clock_probe: problem, cannot find bus to search.\n");
767 prom_halt();
768 }
769
770 node = prom_getchild(busnd);
771
772 while (1) {
773 if (!node)
774 model[0] = 0;
775 else
776 prom_getstring(node, "model", model, sizeof(model));
777 if (strcmp(model, "mk48t02") &&
778 strcmp(model, "mk48t08") &&
779 strcmp(model, "mk48t59") &&
780 strcmp(model, "m5819") &&
781 strcmp(model, "m5819p") &&
782 strcmp(model, "m5823") &&
783 strcmp(model, "ds1287")) {
784 if (cbus != NULL) {
785 prom_printf("clock_probe: Central bus lacks timer chip.\n");
786 prom_halt();
787 }
788
789 if (node != 0)
790 node = prom_getsibling(node);
791#ifdef CONFIG_PCI
792 while ((node == 0) && ebus != NULL) {
793 ebus = ebus->next;
794 if (ebus != NULL) {
795 busnd = ebus->prom_node;
796 node = prom_getchild(busnd);
797 }
798 }
799 while ((node == 0) && isa_br != NULL) {
800 isa_br = isa_br->next;
801 if (isa_br != NULL) {
802 busnd = isa_br->prom_node;
803 node = prom_getchild(busnd);
804 }
805 }
806#endif
807 if (node == 0) {
808 prom_printf("clock_probe: Cannot find timer chip\n");
809 prom_halt();
810 }
811 continue;
812 }
813
814 err = prom_getproperty(node, "reg", (char *)clk_reg,
815 sizeof(clk_reg));
816 if(err == -1) {
817 prom_printf("clock_probe: Cannot get Mostek reg property\n");
818 prom_halt();
819 }
820
821 if (cbus != NULL) {
822 apply_fhc_ranges(central_bus->child, clk_reg, 1);
823 apply_central_ranges(central_bus, clk_reg, 1);
824 }
825#ifdef CONFIG_PCI
826 else if (ebus != NULL) {
827 struct linux_ebus_device *edev;
828
829 for_each_ebusdev(edev, ebus)
830 if (edev->prom_node == node)
831 break;
832 if (edev == NULL) {
833 if (isa_chain != NULL)
834 goto try_isa_clock;
835 prom_printf("%s: Mostek not probed by EBUS\n",
836 __FUNCTION__);
837 prom_halt();
838 }
839
840 if (!strcmp(model, "ds1287") ||
841 !strcmp(model, "m5819") ||
842 !strcmp(model, "m5819p") ||
843 !strcmp(model, "m5823")) {
844 ds1287_regs = edev->resource[0].start;
845 } else {
846 mstk48t59_regs = edev->resource[0].start;
847 mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
848 }
849 break;
850 }
851 else if (isa_br != NULL) {
852 struct sparc_isa_device *isadev;
853
854try_isa_clock:
855 for_each_isadev(isadev, isa_br)
856 if (isadev->prom_node == node)
857 break;
858 if (isadev == NULL) {
859 prom_printf("%s: Mostek not probed by ISA\n");
860 prom_halt();
861 }
862 if (!strcmp(model, "ds1287") ||
863 !strcmp(model, "m5819") ||
864 !strcmp(model, "m5819p") ||
865 !strcmp(model, "m5823")) {
866 ds1287_regs = isadev->resource.start;
867 } else {
868 mstk48t59_regs = isadev->resource.start;
869 mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
870 }
871 break;
872 }
873#endif
874 else {
875 if (sbus_root->num_sbus_ranges) {
876 int nranges = sbus_root->num_sbus_ranges;
877 int rngc;
878
879 for (rngc = 0; rngc < nranges; rngc++)
880 if (clk_reg[0].which_io ==
881 sbus_root->sbus_ranges[rngc].ot_child_space)
882 break;
883 if (rngc == nranges) {
884 prom_printf("clock_probe: Cannot find ranges for "
885 "clock regs.\n");
886 prom_halt();
887 }
888 clk_reg[0].which_io =
889 sbus_root->sbus_ranges[rngc].ot_parent_space;
890 clk_reg[0].phys_addr +=
891 sbus_root->sbus_ranges[rngc].ot_parent_base;
892 }
893 }
894
895 if(model[5] == '0' && model[6] == '2') {
896 mstk48t02_regs = (((u64)clk_reg[0].phys_addr) |
897 (((u64)clk_reg[0].which_io)<<32UL));
898 } else if(model[5] == '0' && model[6] == '8') {
899 mstk48t08_regs = (((u64)clk_reg[0].phys_addr) |
900 (((u64)clk_reg[0].which_io)<<32UL));
901 mstk48t02_regs = mstk48t08_regs + MOSTEK_48T08_48T02;
902 } else {
903 mstk48t59_regs = (((u64)clk_reg[0].phys_addr) |
904 (((u64)clk_reg[0].which_io)<<32UL));
905 mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
906 }
907 break;
908 }
909
910 if (mstk48t02_regs != 0UL) {
911 /* Report a low battery voltage condition. */
912 if (has_low_battery())
913 prom_printf("NVRAM: Low battery voltage!\n");
914
915 /* Kick start the clock if it is completely stopped. */
916 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
917 kick_start_clock();
918 }
919
920 set_system_time();
921
922 local_irq_restore(flags);
923}
924
925/* This is gets the master TICK_INT timer going. */
926static unsigned long sparc64_init_timers(void)
927{
928 unsigned long clock;
929 int node;
930#ifdef CONFIG_SMP
931 extern void smp_tick_init(void);
932#endif
933
934 if (tlb_type == spitfire) {
935 unsigned long ver, manuf, impl;
936
937 __asm__ __volatile__ ("rdpr %%ver, %0"
938 : "=&r" (ver));
939 manuf = ((ver >> 48) & 0xffff);
940 impl = ((ver >> 32) & 0xffff);
941 if (manuf == 0x17 && impl == 0x13) {
942 /* Hummingbird, aka Ultra-IIe */
943 tick_ops = &hbtick_operations;
944 node = prom_root_node;
945 clock = prom_getint(node, "stick-frequency");
946 } else {
947 tick_ops = &tick_operations;
948 cpu_find_by_instance(0, &node, NULL);
949 clock = prom_getint(node, "clock-frequency");
950 }
951 } else {
952 tick_ops = &stick_operations;
953 node = prom_root_node;
954 clock = prom_getint(node, "stick-frequency");
955 }
956 timer_tick_offset = clock / HZ;
957
958#ifdef CONFIG_SMP
959 smp_tick_init();
960#endif
961
962 return clock;
963}
964
965static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_regs *))
966{
967 unsigned long pstate;
968 int err;
969
970 /* Register IRQ handler. */
971 err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, SA_STATIC_ALLOC,
972 "timer", NULL);
973
974 if (err) {
975 prom_printf("Serious problem, cannot register TICK_INT\n");
976 prom_halt();
977 }
978
979 /* Guarantee that the following sequences execute
980 * uninterrupted.
981 */
982 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
983 "wrpr %0, %1, %%pstate"
984 : "=r" (pstate)
985 : "i" (PSTATE_IE));
986
987 tick_ops->init_tick(timer_tick_offset);
988
989 /* Restore PSTATE_IE. */
990 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
991 : /* no outputs */
992 : "r" (pstate));
993
994 local_irq_enable();
995}
996
997struct freq_table {
998 unsigned long udelay_val_ref;
999 unsigned long clock_tick_ref;
1000 unsigned int ref_freq;
1001};
1002static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0, 0 };
1003
1004unsigned long sparc64_get_clock_tick(unsigned int cpu)
1005{
1006 struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
1007
1008 if (ft->clock_tick_ref)
1009 return ft->clock_tick_ref;
1010 return cpu_data(cpu).clock_tick;
1011}
1012
1013#ifdef CONFIG_CPU_FREQ
1014
1015static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
1016 void *data)
1017{
1018 struct cpufreq_freqs *freq = data;
1019 unsigned int cpu = freq->cpu;
1020 struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
1021
1022 if (!ft->ref_freq) {
1023 ft->ref_freq = freq->old;
1024 ft->udelay_val_ref = cpu_data(cpu).udelay_val;
1025 ft->clock_tick_ref = cpu_data(cpu).clock_tick;
1026 }
1027 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
1028 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
1029 (val == CPUFREQ_RESUMECHANGE)) {
1030 cpu_data(cpu).udelay_val =
1031 cpufreq_scale(ft->udelay_val_ref,
1032 ft->ref_freq,
1033 freq->new);
1034 cpu_data(cpu).clock_tick =
1035 cpufreq_scale(ft->clock_tick_ref,
1036 ft->ref_freq,
1037 freq->new);
1038 }
1039
1040 return 0;
1041}
1042
1043static struct notifier_block sparc64_cpufreq_notifier_block = {
1044 .notifier_call = sparc64_cpufreq_notifier
1045};
1046
1047#endif /* CONFIG_CPU_FREQ */
1048
1049static struct time_interpolator sparc64_cpu_interpolator = {
1050 .source = TIME_SOURCE_CPU,
1051 .shift = 16,
1052 .mask = 0xffffffffffffffffLL
1053};
1054
1055/* The quotient formula is taken from the IA64 port. */
1056#define SPARC64_NSEC_PER_CYC_SHIFT 30UL
1057void __init time_init(void)
1058{
1059 unsigned long clock = sparc64_init_timers();
1060
1061 sparc64_cpu_interpolator.frequency = clock;
1062 register_time_interpolator(&sparc64_cpu_interpolator);
1063
1064 /* Now that the interpolator is registered, it is
1065 * safe to start the timer ticking.
1066 */
1067 sparc64_start_timers(timer_interrupt);
1068
1069 timer_ticks_per_nsec_quotient =
1070 (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) +
1071 (clock / 2)) / clock);
1072
1073#ifdef CONFIG_CPU_FREQ
1074 cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
1075 CPUFREQ_TRANSITION_NOTIFIER);
1076#endif
1077}
1078
1079unsigned long long sched_clock(void)
1080{
1081 unsigned long ticks = tick_ops->get_tick();
1082
1083 return (ticks * timer_ticks_per_nsec_quotient)
1084 >> SPARC64_NSEC_PER_CYC_SHIFT;
1085}
1086
1087static int set_rtc_mmss(unsigned long nowtime)
1088{
1089 int real_seconds, real_minutes, chip_minutes;
1090 unsigned long mregs = mstk48t02_regs;
1091#ifdef CONFIG_PCI
1092 unsigned long dregs = ds1287_regs;
1093#else
1094 unsigned long dregs = 0UL;
1095#endif
1096 unsigned long flags;
1097 u8 tmp;
1098
1099 /*
1100 * Not having a register set can lead to trouble.
1101 * Also starfire doesn't have a tod clock.
1102 */
1103 if (!mregs && !dregs)
1104 return -1;
1105
1106 if (mregs) {
1107 spin_lock_irqsave(&mostek_lock, flags);
1108
1109 /* Read the current RTC minutes. */
1110 tmp = mostek_read(mregs + MOSTEK_CREG);
1111 tmp |= MSTK_CREG_READ;
1112 mostek_write(mregs + MOSTEK_CREG, tmp);
1113
1114 chip_minutes = MSTK_REG_MIN(mregs);
1115
1116 tmp = mostek_read(mregs + MOSTEK_CREG);
1117 tmp &= ~MSTK_CREG_READ;
1118 mostek_write(mregs + MOSTEK_CREG, tmp);
1119
1120 /*
1121 * since we're only adjusting minutes and seconds,
1122 * don't interfere with hour overflow. This avoids
1123 * messing with unknown time zones but requires your
1124 * RTC not to be off by more than 15 minutes
1125 */
1126 real_seconds = nowtime % 60;
1127 real_minutes = nowtime / 60;
1128 if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
1129 real_minutes += 30; /* correct for half hour time zone */
1130 real_minutes %= 60;
1131
1132 if (abs(real_minutes - chip_minutes) < 30) {
1133 tmp = mostek_read(mregs + MOSTEK_CREG);
1134 tmp |= MSTK_CREG_WRITE;
1135 mostek_write(mregs + MOSTEK_CREG, tmp);
1136
1137 MSTK_SET_REG_SEC(mregs,real_seconds);
1138 MSTK_SET_REG_MIN(mregs,real_minutes);
1139
1140 tmp = mostek_read(mregs + MOSTEK_CREG);
1141 tmp &= ~MSTK_CREG_WRITE;
1142 mostek_write(mregs + MOSTEK_CREG, tmp);
1143
1144 spin_unlock_irqrestore(&mostek_lock, flags);
1145
1146 return 0;
1147 } else {
1148 spin_unlock_irqrestore(&mostek_lock, flags);
1149
1150 return -1;
1151 }
1152 } else {
1153 int retval = 0;
1154 unsigned char save_control, save_freq_select;
1155
1156 /* Stolen from arch/i386/kernel/time.c, see there for
1157 * credits and descriptive comments.
1158 */
1159 spin_lock_irqsave(&rtc_lock, flags);
1160 save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
1161 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
1162
1163 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
1164 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
1165
1166 chip_minutes = CMOS_READ(RTC_MINUTES);
1167 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
1168 BCD_TO_BIN(chip_minutes);
1169 real_seconds = nowtime % 60;
1170 real_minutes = nowtime / 60;
1171 if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
1172 real_minutes += 30;
1173 real_minutes %= 60;
1174
1175 if (abs(real_minutes - chip_minutes) < 30) {
1176 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
1177 BIN_TO_BCD(real_seconds);
1178 BIN_TO_BCD(real_minutes);
1179 }
1180 CMOS_WRITE(real_seconds,RTC_SECONDS);
1181 CMOS_WRITE(real_minutes,RTC_MINUTES);
1182 } else {
1183 printk(KERN_WARNING
1184 "set_rtc_mmss: can't update from %d to %d\n",
1185 chip_minutes, real_minutes);
1186 retval = -1;
1187 }
1188
1189 CMOS_WRITE(save_control, RTC_CONTROL);
1190 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1191 spin_unlock_irqrestore(&rtc_lock, flags);
1192
1193 return retval;
1194 }
1195}
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
new file mode 100644
index 000000000000..2c8f9344b4ee
--- /dev/null
+++ b/arch/sparc64/kernel/trampoline.S
@@ -0,0 +1,368 @@
1/* $Id: trampoline.S,v 1.26 2002/02/09 19:49:30 davem Exp $
2 * trampoline.S: Jump start slave processors on sparc64.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/head.h>
8#include <asm/asi.h>
9#include <asm/lsu.h>
10#include <asm/dcr.h>
11#include <asm/dcu.h>
12#include <asm/pstate.h>
13#include <asm/page.h>
14#include <asm/pgtable.h>
15#include <asm/spitfire.h>
16#include <asm/processor.h>
17#include <asm/thread_info.h>
18#include <asm/mmu.h>
19
20 .data
21 .align 8
22call_method:
23 .asciz "call-method"
24 .align 8
25itlb_load:
26 .asciz "SUNW,itlb-load"
27 .align 8
28dtlb_load:
29 .asciz "SUNW,dtlb-load"
30
31 .text
32 .align 8
33 .globl sparc64_cpu_startup, sparc64_cpu_startup_end
34sparc64_cpu_startup:
35 flushw
36
37 BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_startup)
38 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_startup)
39
40 ba,pt %xcc, spitfire_startup
41 nop
42
43cheetah_plus_startup:
44 /* Preserve OBP chosen DCU and DCR register settings. */
45 ba,pt %xcc, cheetah_generic_startup
46 nop
47
48cheetah_startup:
49 mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
50 wr %g1, %asr18
51
52 sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
53 or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
54 sllx %g5, 32, %g5
55 or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
56 stxa %g5, [%g0] ASI_DCU_CONTROL_REG
57 membar #Sync
58
59cheetah_generic_startup:
60 mov TSB_EXTENSION_P, %g3
61 stxa %g0, [%g3] ASI_DMMU
62 stxa %g0, [%g3] ASI_IMMU
63 membar #Sync
64
65 mov TSB_EXTENSION_S, %g3
66 stxa %g0, [%g3] ASI_DMMU
67 membar #Sync
68
69 mov TSB_EXTENSION_N, %g3
70 stxa %g0, [%g3] ASI_DMMU
71 stxa %g0, [%g3] ASI_IMMU
72 membar #Sync
73
74 /* Disable STICK_INT interrupts. */
75 sethi %hi(0x80000000), %g5
76 sllx %g5, 32, %g5
77 wr %g5, %asr25
78
79 ba,pt %xcc, startup_continue
80 nop
81
82spitfire_startup:
83 mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
84 stxa %g1, [%g0] ASI_LSU_CONTROL
85 membar #Sync
86
87startup_continue:
88 wrpr %g0, 15, %pil
89
90 sethi %hi(0x80000000), %g2
91 sllx %g2, 32, %g2
92 wr %g2, 0, %tick_cmpr
93
94 /* Call OBP by hand to lock KERNBASE into i/d tlbs.
95 * We lock 2 consequetive entries if we are 'bigkernel'.
96 */
97 mov %o0, %l0
98
99 sethi %hi(prom_entry_lock), %g2
1001: ldstub [%g2 + %lo(prom_entry_lock)], %g1
101 brnz,pn %g1, 1b
102 membar #StoreLoad | #StoreStore
103
104 sethi %hi(p1275buf), %g2
105 or %g2, %lo(p1275buf), %g2
106 ldx [%g2 + 0x10], %l2
107 mov %sp, %l1
108 add %l2, -(192 + 128), %sp
109 flushw
110
111 sethi %hi(call_method), %g2
112 or %g2, %lo(call_method), %g2
113 stx %g2, [%sp + 2047 + 128 + 0x00]
114 mov 5, %g2
115 stx %g2, [%sp + 2047 + 128 + 0x08]
116 mov 1, %g2
117 stx %g2, [%sp + 2047 + 128 + 0x10]
118 sethi %hi(itlb_load), %g2
119 or %g2, %lo(itlb_load), %g2
120 stx %g2, [%sp + 2047 + 128 + 0x18]
121 sethi %hi(mmu_ihandle_cache), %g2
122 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
123 stx %g2, [%sp + 2047 + 128 + 0x20]
124 sethi %hi(KERNBASE), %g2
125 stx %g2, [%sp + 2047 + 128 + 0x28]
126 sethi %hi(kern_locked_tte_data), %g2
127 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
128 stx %g2, [%sp + 2047 + 128 + 0x30]
129
130 mov 15, %g2
131 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
132
133 mov 63, %g2
1341:
135 stx %g2, [%sp + 2047 + 128 + 0x38]
136 sethi %hi(p1275buf), %g2
137 or %g2, %lo(p1275buf), %g2
138 ldx [%g2 + 0x08], %o1
139 call %o1
140 add %sp, (2047 + 128), %o0
141
142 sethi %hi(bigkernel), %g2
143 lduw [%g2 + %lo(bigkernel)], %g2
144 cmp %g2, 0
145 be,pt %icc, do_dtlb
146 nop
147
148 sethi %hi(call_method), %g2
149 or %g2, %lo(call_method), %g2
150 stx %g2, [%sp + 2047 + 128 + 0x00]
151 mov 5, %g2
152 stx %g2, [%sp + 2047 + 128 + 0x08]
153 mov 1, %g2
154 stx %g2, [%sp + 2047 + 128 + 0x10]
155 sethi %hi(itlb_load), %g2
156 or %g2, %lo(itlb_load), %g2
157 stx %g2, [%sp + 2047 + 128 + 0x18]
158 sethi %hi(mmu_ihandle_cache), %g2
159 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
160 stx %g2, [%sp + 2047 + 128 + 0x20]
161 sethi %hi(KERNBASE + 0x400000), %g2
162 stx %g2, [%sp + 2047 + 128 + 0x28]
163 sethi %hi(kern_locked_tte_data), %g2
164 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
165 sethi %hi(0x400000), %g1
166 add %g2, %g1, %g2
167 stx %g2, [%sp + 2047 + 128 + 0x30]
168
169 mov 14, %g2
170 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
171
172 mov 62, %g2
1731:
174 stx %g2, [%sp + 2047 + 128 + 0x38]
175 sethi %hi(p1275buf), %g2
176 or %g2, %lo(p1275buf), %g2
177 ldx [%g2 + 0x08], %o1
178 call %o1
179 add %sp, (2047 + 128), %o0
180
181do_dtlb:
182 sethi %hi(call_method), %g2
183 or %g2, %lo(call_method), %g2
184 stx %g2, [%sp + 2047 + 128 + 0x00]
185 mov 5, %g2
186 stx %g2, [%sp + 2047 + 128 + 0x08]
187 mov 1, %g2
188 stx %g2, [%sp + 2047 + 128 + 0x10]
189 sethi %hi(dtlb_load), %g2
190 or %g2, %lo(dtlb_load), %g2
191 stx %g2, [%sp + 2047 + 128 + 0x18]
192 sethi %hi(mmu_ihandle_cache), %g2
193 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
194 stx %g2, [%sp + 2047 + 128 + 0x20]
195 sethi %hi(KERNBASE), %g2
196 stx %g2, [%sp + 2047 + 128 + 0x28]
197 sethi %hi(kern_locked_tte_data), %g2
198 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
199 stx %g2, [%sp + 2047 + 128 + 0x30]
200
201 mov 15, %g2
202 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
203
204 mov 63, %g2
2051:
206
207 stx %g2, [%sp + 2047 + 128 + 0x38]
208 sethi %hi(p1275buf), %g2
209 or %g2, %lo(p1275buf), %g2
210 ldx [%g2 + 0x08], %o1
211 call %o1
212 add %sp, (2047 + 128), %o0
213
214 sethi %hi(bigkernel), %g2
215 lduw [%g2 + %lo(bigkernel)], %g2
216 cmp %g2, 0
217 be,pt %icc, do_unlock
218 nop
219
220 sethi %hi(call_method), %g2
221 or %g2, %lo(call_method), %g2
222 stx %g2, [%sp + 2047 + 128 + 0x00]
223 mov 5, %g2
224 stx %g2, [%sp + 2047 + 128 + 0x08]
225 mov 1, %g2
226 stx %g2, [%sp + 2047 + 128 + 0x10]
227 sethi %hi(dtlb_load), %g2
228 or %g2, %lo(dtlb_load), %g2
229 stx %g2, [%sp + 2047 + 128 + 0x18]
230 sethi %hi(mmu_ihandle_cache), %g2
231 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
232 stx %g2, [%sp + 2047 + 128 + 0x20]
233 sethi %hi(KERNBASE + 0x400000), %g2
234 stx %g2, [%sp + 2047 + 128 + 0x28]
235 sethi %hi(kern_locked_tte_data), %g2
236 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
237 sethi %hi(0x400000), %g1
238 add %g2, %g1, %g2
239 stx %g2, [%sp + 2047 + 128 + 0x30]
240
241 mov 14, %g2
242 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
243
244 mov 62, %g2
2451:
246
247 stx %g2, [%sp + 2047 + 128 + 0x38]
248 sethi %hi(p1275buf), %g2
249 or %g2, %lo(p1275buf), %g2
250 ldx [%g2 + 0x08], %o1
251 call %o1
252 add %sp, (2047 + 128), %o0
253
254do_unlock:
255 sethi %hi(prom_entry_lock), %g2
256 stb %g0, [%g2 + %lo(prom_entry_lock)]
257 membar #StoreStore | #StoreLoad
258
259 mov %l1, %sp
260 flushw
261
262 mov %l0, %o0
263
264 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
265 wr %g0, 0, %fprs
266
267 /* XXX Buggy PROM... */
268 srl %o0, 0, %o0
269 ldx [%o0], %g6
270
271 wr %g0, ASI_P, %asi
272
273 mov PRIMARY_CONTEXT, %g7
274 stxa %g0, [%g7] ASI_DMMU
275 membar #Sync
276 mov SECONDARY_CONTEXT, %g7
277 stxa %g0, [%g7] ASI_DMMU
278 membar #Sync
279
280 mov 1, %g5
281 sllx %g5, THREAD_SHIFT, %g5
282 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
283 add %g6, %g5, %sp
284 mov 0, %fp
285
286 wrpr %g0, 0, %wstate
287 wrpr %g0, 0, %tl
288
289 /* Setup the trap globals, then we can resurface. */
290 rdpr %pstate, %o1
291 mov %g6, %o2
292 wrpr %o1, PSTATE_AG, %pstate
293 sethi %hi(sparc64_ttable_tl0), %g5
294 wrpr %g5, %tba
295 mov %o2, %g6
296
297 wrpr %o1, PSTATE_MG, %pstate
298#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
299#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
300
301 mov TSB_REG, %g1
302 stxa %g0, [%g1] ASI_DMMU
303 membar #Sync
304 mov TLB_SFSR, %g1
305 sethi %uhi(KERN_HIGHBITS), %g2
306 or %g2, %ulo(KERN_HIGHBITS), %g2
307 sllx %g2, 32, %g2
308 or %g2, KERN_LOWBITS, %g2
309
310 BRANCH_IF_ANY_CHEETAH(g3,g7,9f)
311
312 ba,pt %xcc, 1f
313 nop
314
3159:
316 sethi %uhi(VPTE_BASE_CHEETAH), %g3
317 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
318 ba,pt %xcc, 2f
319 sllx %g3, 32, %g3
3201:
321 sethi %uhi(VPTE_BASE_SPITFIRE), %g3
322 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
323 sllx %g3, 32, %g3
324
3252:
326 clr %g7
327#undef KERN_HIGHBITS
328#undef KERN_LOWBITS
329
330 wrpr %o1, 0x0, %pstate
331 ldx [%g6 + TI_TASK], %g4
332
333 wrpr %g0, 0, %wstate
334
335 call init_irqwork_curcpu
336 nop
337
338 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
339 ba,pt %xcc, 2f
340 nop
341
3421: /* Start using proper page size encodings in ctx register. */
343 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
344 mov PRIMARY_CONTEXT, %g1
345 sllx %g3, 32, %g3
346 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
347 or %g3, %g2, %g3
348 stxa %g3, [%g1] ASI_DMMU
349 membar #Sync
350
3512:
352 rdpr %pstate, %o1
353 or %o1, PSTATE_IE, %o1
354 wrpr %o1, 0, %pstate
355
356 call prom_set_trap_table
357 sethi %hi(sparc64_ttable_tl0), %o0
358
359 call smp_callin
360 nop
361 call cpu_idle
362 mov 0, %o0
363 call cpu_panic
364 nop
3651: b,a,pt %xcc, 1b
366
367 .align 8
368sparc64_cpu_startup_end:
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
new file mode 100644
index 000000000000..56b203a2af69
--- /dev/null
+++ b/arch/sparc64/kernel/traps.c
@@ -0,0 +1,2118 @@
1/* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8/*
9 * I like traps on v9, :))))
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/sched.h> /* for jiffies */
15#include <linux/kernel.h>
16#include <linux/kallsyms.h>
17#include <linux/signal.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/mm.h>
21#include <linux/init.h>
22
23#include <asm/delay.h>
24#include <asm/system.h>
25#include <asm/ptrace.h>
26#include <asm/oplib.h>
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/unistd.h>
30#include <asm/uaccess.h>
31#include <asm/fpumacro.h>
32#include <asm/lsu.h>
33#include <asm/dcu.h>
34#include <asm/estate.h>
35#include <asm/chafsr.h>
36#include <asm/psrcompat.h>
37#include <asm/processor.h>
38#include <asm/timer.h>
39#include <asm/kdebug.h>
40#ifdef CONFIG_KMOD
41#include <linux/kmod.h>
42#endif
43
44struct notifier_block *sparc64die_chain;
45static DEFINE_SPINLOCK(die_notifier_lock);
46
47int register_die_notifier(struct notifier_block *nb)
48{
49 int err = 0;
50 unsigned long flags;
51 spin_lock_irqsave(&die_notifier_lock, flags);
52 err = notifier_chain_register(&sparc64die_chain, nb);
53 spin_unlock_irqrestore(&die_notifier_lock, flags);
54 return err;
55}
56
57/* When an irrecoverable trap occurs at tl > 0, the trap entry
58 * code logs the trap state registers at every level in the trap
59 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
60 * is as follows:
61 */
62struct tl1_traplog {
63 struct {
64 unsigned long tstate;
65 unsigned long tpc;
66 unsigned long tnpc;
67 unsigned long tt;
68 } trapstack[4];
69 unsigned long tl;
70};
71
72static void dump_tl1_traplog(struct tl1_traplog *p)
73{
74 int i;
75
76 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
77 p->tl);
78 for (i = 0; i < 4; i++) {
79 printk(KERN_CRIT
80 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
81 "TNPC[%016lx] TT[%lx]\n",
82 i + 1,
83 p->trapstack[i].tstate, p->trapstack[i].tpc,
84 p->trapstack[i].tnpc, p->trapstack[i].tt);
85 }
86}
87
88void do_call_debug(struct pt_regs *regs)
89{
90 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
91}
92
93void bad_trap(struct pt_regs *regs, long lvl)
94{
95 char buffer[32];
96 siginfo_t info;
97
98 if (notify_die(DIE_TRAP, "bad trap", regs,
99 0, lvl, SIGTRAP) == NOTIFY_STOP)
100 return;
101
102 if (lvl < 0x100) {
103 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
104 die_if_kernel(buffer, regs);
105 }
106
107 lvl -= 0x100;
108 if (regs->tstate & TSTATE_PRIV) {
109 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
110 die_if_kernel(buffer, regs);
111 }
112 if (test_thread_flag(TIF_32BIT)) {
113 regs->tpc &= 0xffffffff;
114 regs->tnpc &= 0xffffffff;
115 }
116 info.si_signo = SIGILL;
117 info.si_errno = 0;
118 info.si_code = ILL_ILLTRP;
119 info.si_addr = (void __user *)regs->tpc;
120 info.si_trapno = lvl;
121 force_sig_info(SIGILL, &info, current);
122}
123
124void bad_trap_tl1(struct pt_regs *regs, long lvl)
125{
126 char buffer[32];
127
128 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
129 0, lvl, SIGTRAP) == NOTIFY_STOP)
130 return;
131
132 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
133
134 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
135 die_if_kernel (buffer, regs);
136}
137
138#ifdef CONFIG_DEBUG_BUGVERBOSE
139void do_BUG(const char *file, int line)
140{
141 bust_spinlocks(1);
142 printk("kernel BUG at %s:%d!\n", file, line);
143}
144#endif
145
146void instruction_access_exception(struct pt_regs *regs,
147 unsigned long sfsr, unsigned long sfar)
148{
149 siginfo_t info;
150
151 if (notify_die(DIE_TRAP, "instruction access exception", regs,
152 0, 0x8, SIGTRAP) == NOTIFY_STOP)
153 return;
154
155 if (regs->tstate & TSTATE_PRIV) {
156 printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
157 sfsr, sfar);
158 die_if_kernel("Iax", regs);
159 }
160 if (test_thread_flag(TIF_32BIT)) {
161 regs->tpc &= 0xffffffff;
162 regs->tnpc &= 0xffffffff;
163 }
164 info.si_signo = SIGSEGV;
165 info.si_errno = 0;
166 info.si_code = SEGV_MAPERR;
167 info.si_addr = (void __user *)regs->tpc;
168 info.si_trapno = 0;
169 force_sig_info(SIGSEGV, &info, current);
170}
171
172void instruction_access_exception_tl1(struct pt_regs *regs,
173 unsigned long sfsr, unsigned long sfar)
174{
175 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
176 0, 0x8, SIGTRAP) == NOTIFY_STOP)
177 return;
178
179 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
180 instruction_access_exception(regs, sfsr, sfar);
181}
182
183void data_access_exception(struct pt_regs *regs,
184 unsigned long sfsr, unsigned long sfar)
185{
186 siginfo_t info;
187
188 if (notify_die(DIE_TRAP, "data access exception", regs,
189 0, 0x30, SIGTRAP) == NOTIFY_STOP)
190 return;
191
192 if (regs->tstate & TSTATE_PRIV) {
193 /* Test if this comes from uaccess places. */
194 unsigned long fixup;
195 unsigned long g2 = regs->u_regs[UREG_G2];
196
197 if ((fixup = search_extables_range(regs->tpc, &g2))) {
198 /* Ouch, somebody is trying ugly VM hole tricks on us... */
199#ifdef DEBUG_EXCEPTIONS
200 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
201 printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
202 "g2<%016lx>\n", regs->tpc, fixup, g2);
203#endif
204 regs->tpc = fixup;
205 regs->tnpc = regs->tpc + 4;
206 regs->u_regs[UREG_G2] = g2;
207 return;
208 }
209 /* Shit... */
210 printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
211 sfsr, sfar);
212 die_if_kernel("Dax", regs);
213 }
214
215 info.si_signo = SIGSEGV;
216 info.si_errno = 0;
217 info.si_code = SEGV_MAPERR;
218 info.si_addr = (void __user *)sfar;
219 info.si_trapno = 0;
220 force_sig_info(SIGSEGV, &info, current);
221}
222
223#ifdef CONFIG_PCI
224/* This is really pathetic... */
225extern volatile int pci_poke_in_progress;
226extern volatile int pci_poke_cpu;
227extern volatile int pci_poke_faulted;
228#endif
229
230/* When access exceptions happen, we must do this. */
231static void spitfire_clean_and_reenable_l1_caches(void)
232{
233 unsigned long va;
234
235 if (tlb_type != spitfire)
236 BUG();
237
238 /* Clean 'em. */
239 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
240 spitfire_put_icache_tag(va, 0x0);
241 spitfire_put_dcache_tag(va, 0x0);
242 }
243
244 /* Re-enable in LSU. */
245 __asm__ __volatile__("flush %%g6\n\t"
246 "membar #Sync\n\t"
247 "stxa %0, [%%g0] %1\n\t"
248 "membar #Sync"
249 : /* no outputs */
250 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
251 LSU_CONTROL_IM | LSU_CONTROL_DM),
252 "i" (ASI_LSU_CONTROL)
253 : "memory");
254}
255
256void do_iae(struct pt_regs *regs)
257{
258 siginfo_t info;
259
260 spitfire_clean_and_reenable_l1_caches();
261
262 if (notify_die(DIE_TRAP, "instruction access exception", regs,
263 0, 0x8, SIGTRAP) == NOTIFY_STOP)
264 return;
265
266 info.si_signo = SIGBUS;
267 info.si_errno = 0;
268 info.si_code = BUS_OBJERR;
269 info.si_addr = (void *)0;
270 info.si_trapno = 0;
271 force_sig_info(SIGBUS, &info, current);
272}
273
274void do_dae(struct pt_regs *regs)
275{
276 siginfo_t info;
277
278#ifdef CONFIG_PCI
279 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
280 spitfire_clean_and_reenable_l1_caches();
281
282 pci_poke_faulted = 1;
283
284 /* Why the fuck did they have to change this? */
285 if (tlb_type == cheetah || tlb_type == cheetah_plus)
286 regs->tpc += 4;
287
288 regs->tnpc = regs->tpc + 4;
289 return;
290 }
291#endif
292 spitfire_clean_and_reenable_l1_caches();
293
294 if (notify_die(DIE_TRAP, "data access exception", regs,
295 0, 0x30, SIGTRAP) == NOTIFY_STOP)
296 return;
297
298 info.si_signo = SIGBUS;
299 info.si_errno = 0;
300 info.si_code = BUS_OBJERR;
301 info.si_addr = (void *)0;
302 info.si_trapno = 0;
303 force_sig_info(SIGBUS, &info, current);
304}
305
306static char ecc_syndrome_table[] = {
307 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
308 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
309 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
310 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
311 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
312 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
313 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
314 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
315 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
316 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
317 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
318 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
319 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
320 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
321 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
322 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
323 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
324 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
325 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
326 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
327 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
328 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
329 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
330 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
331 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
332 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
333 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
334 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
335 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
336 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
337 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
338 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
339};
340
341/* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
342 * in the following format. The AFAR is left as is, with
343 * reserved bits cleared, and is a raw 40-bit physical
344 * address.
345 */
346#define CE_STATUS_UDBH_UE (1UL << (43 + 9))
347#define CE_STATUS_UDBH_CE (1UL << (43 + 8))
348#define CE_STATUS_UDBH_ESYNDR (0xffUL << 43)
349#define CE_STATUS_UDBH_SHIFT 43
350#define CE_STATUS_UDBL_UE (1UL << (33 + 9))
351#define CE_STATUS_UDBL_CE (1UL << (33 + 8))
352#define CE_STATUS_UDBL_ESYNDR (0xffUL << 33)
353#define CE_STATUS_UDBL_SHIFT 33
354#define CE_STATUS_AFSR_MASK (0x1ffffffffUL)
355#define CE_STATUS_AFSR_ME (1UL << 32)
356#define CE_STATUS_AFSR_PRIV (1UL << 31)
357#define CE_STATUS_AFSR_ISAP (1UL << 30)
358#define CE_STATUS_AFSR_ETP (1UL << 29)
359#define CE_STATUS_AFSR_IVUE (1UL << 28)
360#define CE_STATUS_AFSR_TO (1UL << 27)
361#define CE_STATUS_AFSR_BERR (1UL << 26)
362#define CE_STATUS_AFSR_LDP (1UL << 25)
363#define CE_STATUS_AFSR_CP (1UL << 24)
364#define CE_STATUS_AFSR_WP (1UL << 23)
365#define CE_STATUS_AFSR_EDP (1UL << 22)
366#define CE_STATUS_AFSR_UE (1UL << 21)
367#define CE_STATUS_AFSR_CE (1UL << 20)
368#define CE_STATUS_AFSR_ETS (0xfUL << 16)
369#define CE_STATUS_AFSR_ETS_SHIFT 16
370#define CE_STATUS_AFSR_PSYND (0xffffUL << 0)
371#define CE_STATUS_AFSR_PSYND_SHIFT 0
372
373/* Layout of Ecache TAG Parity Syndrome of AFSR */
374#define AFSR_ETSYNDROME_7_0 0x1UL /* E$-tag bus bits <7:0> */
375#define AFSR_ETSYNDROME_15_8 0x2UL /* E$-tag bus bits <15:8> */
376#define AFSR_ETSYNDROME_21_16 0x4UL /* E$-tag bus bits <21:16> */
377#define AFSR_ETSYNDROME_24_22 0x8UL /* E$-tag bus bits <24:22> */
378
379static char *syndrome_unknown = "<Unknown>";
380
381asmlinkage void cee_log(unsigned long ce_status,
382 unsigned long afar,
383 struct pt_regs *regs)
384{
385 char memmod_str[64];
386 char *p;
387 unsigned short scode, udb_reg;
388
389 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
390 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n",
391 smp_processor_id(),
392 (ce_status & CE_STATUS_AFSR_MASK),
393 afar,
394 ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
395 ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));
396
397 udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
398 if (udb_reg & (1 << 8)) {
399 scode = ecc_syndrome_table[udb_reg & 0xff];
400 if (prom_getunumber(scode, afar,
401 memmod_str, sizeof(memmod_str)) == -1)
402 p = syndrome_unknown;
403 else
404 p = memmod_str;
405 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
406 "Memory Module \"%s\"\n",
407 smp_processor_id(), scode, p);
408 }
409
410 udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL);
411 if (udb_reg & (1 << 8)) {
412 scode = ecc_syndrome_table[udb_reg & 0xff];
413 if (prom_getunumber(scode, afar,
414 memmod_str, sizeof(memmod_str)) == -1)
415 p = syndrome_unknown;
416 else
417 p = memmod_str;
418 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
419 "Memory Module \"%s\"\n",
420 smp_processor_id(), scode, p);
421 }
422}
423
424/* Cheetah error trap handling. */
425static unsigned long ecache_flush_physbase;
426static unsigned long ecache_flush_linesize;
427static unsigned long ecache_flush_size;
428
429/* WARNING: The error trap handlers in assembly know the precise
430 * layout of the following structure.
431 *
432 * C-level handlers below use this information to log the error
433 * and then determine how to recover (if possible).
434 */
435struct cheetah_err_info {
436/*0x00*/u64 afsr;
437/*0x08*/u64 afar;
438
439 /* D-cache state */
440/*0x10*/u64 dcache_data[4]; /* The actual data */
441/*0x30*/u64 dcache_index; /* D-cache index */
442/*0x38*/u64 dcache_tag; /* D-cache tag/valid */
443/*0x40*/u64 dcache_utag; /* D-cache microtag */
444/*0x48*/u64 dcache_stag; /* D-cache snooptag */
445
446 /* I-cache state */
447/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
448/*0x90*/u64 icache_index; /* I-cache index */
449/*0x98*/u64 icache_tag; /* I-cache phys tag */
450/*0xa0*/u64 icache_utag; /* I-cache microtag */
451/*0xa8*/u64 icache_stag; /* I-cache snooptag */
452/*0xb0*/u64 icache_upper; /* I-cache upper-tag */
453/*0xb8*/u64 icache_lower; /* I-cache lower-tag */
454
455 /* E-cache state */
456/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
457/*0xe0*/u64 ecache_index; /* E-cache index */
458/*0xe8*/u64 ecache_tag; /* E-cache tag/state */
459
460/*0xf0*/u64 __pad[32 - 30];
461};
462#define CHAFSR_INVALID ((u64)-1L)
463
464/* This table is ordered in priority of errors and matches the
465 * AFAR overwrite policy as well.
466 */
467
468struct afsr_error_table {
469 unsigned long mask;
470 const char *name;
471};
472
473static const char CHAFSR_PERR_msg[] =
474 "System interface protocol error";
475static const char CHAFSR_IERR_msg[] =
476 "Internal processor error";
477static const char CHAFSR_ISAP_msg[] =
478 "System request parity error on incoming addresss";
479static const char CHAFSR_UCU_msg[] =
480 "Uncorrectable E-cache ECC error for ifetch/data";
481static const char CHAFSR_UCC_msg[] =
482 "SW Correctable E-cache ECC error for ifetch/data";
483static const char CHAFSR_UE_msg[] =
484 "Uncorrectable system bus data ECC error for read";
485static const char CHAFSR_EDU_msg[] =
486 "Uncorrectable E-cache ECC error for stmerge/blkld";
487static const char CHAFSR_EMU_msg[] =
488 "Uncorrectable system bus MTAG error";
489static const char CHAFSR_WDU_msg[] =
490 "Uncorrectable E-cache ECC error for writeback";
491static const char CHAFSR_CPU_msg[] =
492 "Uncorrectable ECC error for copyout";
493static const char CHAFSR_CE_msg[] =
494 "HW corrected system bus data ECC error for read";
495static const char CHAFSR_EDC_msg[] =
496 "HW corrected E-cache ECC error for stmerge/blkld";
497static const char CHAFSR_EMC_msg[] =
498 "HW corrected system bus MTAG ECC error";
499static const char CHAFSR_WDC_msg[] =
500 "HW corrected E-cache ECC error for writeback";
501static const char CHAFSR_CPC_msg[] =
502 "HW corrected ECC error for copyout";
503static const char CHAFSR_TO_msg[] =
504 "Unmapped error from system bus";
505static const char CHAFSR_BERR_msg[] =
506 "Bus error response from system bus";
507static const char CHAFSR_IVC_msg[] =
508 "HW corrected system bus data ECC error for ivec read";
509static const char CHAFSR_IVU_msg[] =
510 "Uncorrectable system bus data ECC error for ivec read";
511static struct afsr_error_table __cheetah_error_table[] = {
512 { CHAFSR_PERR, CHAFSR_PERR_msg },
513 { CHAFSR_IERR, CHAFSR_IERR_msg },
514 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
515 { CHAFSR_UCU, CHAFSR_UCU_msg },
516 { CHAFSR_UCC, CHAFSR_UCC_msg },
517 { CHAFSR_UE, CHAFSR_UE_msg },
518 { CHAFSR_EDU, CHAFSR_EDU_msg },
519 { CHAFSR_EMU, CHAFSR_EMU_msg },
520 { CHAFSR_WDU, CHAFSR_WDU_msg },
521 { CHAFSR_CPU, CHAFSR_CPU_msg },
522 { CHAFSR_CE, CHAFSR_CE_msg },
523 { CHAFSR_EDC, CHAFSR_EDC_msg },
524 { CHAFSR_EMC, CHAFSR_EMC_msg },
525 { CHAFSR_WDC, CHAFSR_WDC_msg },
526 { CHAFSR_CPC, CHAFSR_CPC_msg },
527 { CHAFSR_TO, CHAFSR_TO_msg },
528 { CHAFSR_BERR, CHAFSR_BERR_msg },
529 /* These two do not update the AFAR. */
530 { CHAFSR_IVC, CHAFSR_IVC_msg },
531 { CHAFSR_IVU, CHAFSR_IVU_msg },
532 { 0, NULL },
533};
534static const char CHPAFSR_DTO_msg[] =
535 "System bus unmapped error for prefetch/storequeue-read";
536static const char CHPAFSR_DBERR_msg[] =
537 "System bus error for prefetch/storequeue-read";
538static const char CHPAFSR_THCE_msg[] =
539 "Hardware corrected E-cache Tag ECC error";
540static const char CHPAFSR_TSCE_msg[] =
541 "SW handled correctable E-cache Tag ECC error";
542static const char CHPAFSR_TUE_msg[] =
543 "Uncorrectable E-cache Tag ECC error";
544static const char CHPAFSR_DUE_msg[] =
545 "System bus uncorrectable data ECC error due to prefetch/store-fill";
546static struct afsr_error_table __cheetah_plus_error_table[] = {
547 { CHAFSR_PERR, CHAFSR_PERR_msg },
548 { CHAFSR_IERR, CHAFSR_IERR_msg },
549 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
550 { CHAFSR_UCU, CHAFSR_UCU_msg },
551 { CHAFSR_UCC, CHAFSR_UCC_msg },
552 { CHAFSR_UE, CHAFSR_UE_msg },
553 { CHAFSR_EDU, CHAFSR_EDU_msg },
554 { CHAFSR_EMU, CHAFSR_EMU_msg },
555 { CHAFSR_WDU, CHAFSR_WDU_msg },
556 { CHAFSR_CPU, CHAFSR_CPU_msg },
557 { CHAFSR_CE, CHAFSR_CE_msg },
558 { CHAFSR_EDC, CHAFSR_EDC_msg },
559 { CHAFSR_EMC, CHAFSR_EMC_msg },
560 { CHAFSR_WDC, CHAFSR_WDC_msg },
561 { CHAFSR_CPC, CHAFSR_CPC_msg },
562 { CHAFSR_TO, CHAFSR_TO_msg },
563 { CHAFSR_BERR, CHAFSR_BERR_msg },
564 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
565 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
566 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
567 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
568 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
569 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
570 /* These two do not update the AFAR. */
571 { CHAFSR_IVC, CHAFSR_IVC_msg },
572 { CHAFSR_IVU, CHAFSR_IVU_msg },
573 { 0, NULL },
574};
575static const char JPAFSR_JETO_msg[] =
576 "System interface protocol error, hw timeout caused";
577static const char JPAFSR_SCE_msg[] =
578 "Parity error on system snoop results";
579static const char JPAFSR_JEIC_msg[] =
580 "System interface protocol error, illegal command detected";
581static const char JPAFSR_JEIT_msg[] =
582 "System interface protocol error, illegal ADTYPE detected";
583static const char JPAFSR_OM_msg[] =
584 "Out of range memory error has occurred";
585static const char JPAFSR_ETP_msg[] =
586 "Parity error on L2 cache tag SRAM";
587static const char JPAFSR_UMS_msg[] =
588 "Error due to unsupported store";
589static const char JPAFSR_RUE_msg[] =
590 "Uncorrectable ECC error from remote cache/memory";
591static const char JPAFSR_RCE_msg[] =
592 "Correctable ECC error from remote cache/memory";
593static const char JPAFSR_BP_msg[] =
594 "JBUS parity error on returned read data";
595static const char JPAFSR_WBP_msg[] =
596 "JBUS parity error on data for writeback or block store";
597static const char JPAFSR_FRC_msg[] =
598 "Foreign read to DRAM incurring correctable ECC error";
599static const char JPAFSR_FRU_msg[] =
600 "Foreign read to DRAM incurring uncorrectable ECC error";
601static struct afsr_error_table __jalapeno_error_table[] = {
602 { JPAFSR_JETO, JPAFSR_JETO_msg },
603 { JPAFSR_SCE, JPAFSR_SCE_msg },
604 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
605 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
606 { CHAFSR_PERR, CHAFSR_PERR_msg },
607 { CHAFSR_IERR, CHAFSR_IERR_msg },
608 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
609 { CHAFSR_UCU, CHAFSR_UCU_msg },
610 { CHAFSR_UCC, CHAFSR_UCC_msg },
611 { CHAFSR_UE, CHAFSR_UE_msg },
612 { CHAFSR_EDU, CHAFSR_EDU_msg },
613 { JPAFSR_OM, JPAFSR_OM_msg },
614 { CHAFSR_WDU, CHAFSR_WDU_msg },
615 { CHAFSR_CPU, CHAFSR_CPU_msg },
616 { CHAFSR_CE, CHAFSR_CE_msg },
617 { CHAFSR_EDC, CHAFSR_EDC_msg },
618 { JPAFSR_ETP, JPAFSR_ETP_msg },
619 { CHAFSR_WDC, CHAFSR_WDC_msg },
620 { CHAFSR_CPC, CHAFSR_CPC_msg },
621 { CHAFSR_TO, CHAFSR_TO_msg },
622 { CHAFSR_BERR, CHAFSR_BERR_msg },
623 { JPAFSR_UMS, JPAFSR_UMS_msg },
624 { JPAFSR_RUE, JPAFSR_RUE_msg },
625 { JPAFSR_RCE, JPAFSR_RCE_msg },
626 { JPAFSR_BP, JPAFSR_BP_msg },
627 { JPAFSR_WBP, JPAFSR_WBP_msg },
628 { JPAFSR_FRC, JPAFSR_FRC_msg },
629 { JPAFSR_FRU, JPAFSR_FRU_msg },
630 /* These two do not update the AFAR. */
631 { CHAFSR_IVU, CHAFSR_IVU_msg },
632 { 0, NULL },
633};
634static struct afsr_error_table *cheetah_error_table;
635static unsigned long cheetah_afsr_errors;
636
637/* This is allocated at boot time based upon the largest hardware
638 * cpu ID in the system. We allocate two entries per cpu, one for
639 * TL==0 logging and one for TL >= 1 logging.
640 */
641struct cheetah_err_info *cheetah_error_log;
642
643static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
644{
645 struct cheetah_err_info *p;
646 int cpu = smp_processor_id();
647
648 if (!cheetah_error_log)
649 return NULL;
650
651 p = cheetah_error_log + (cpu * 2);
652 if ((afsr & CHAFSR_TL1) != 0UL)
653 p++;
654
655 return p;
656}
657
658extern unsigned int tl0_icpe[], tl1_icpe[];
659extern unsigned int tl0_dcpe[], tl1_dcpe[];
660extern unsigned int tl0_fecc[], tl1_fecc[];
661extern unsigned int tl0_cee[], tl1_cee[];
662extern unsigned int tl0_iae[], tl1_iae[];
663extern unsigned int tl0_dae[], tl1_dae[];
664extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
665extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
666extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
667extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
668extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
669
670void __init cheetah_ecache_flush_init(void)
671{
672 unsigned long largest_size, smallest_linesize, order, ver;
673 int node, i, instance;
674
675 /* Scan all cpu device tree nodes, note two values:
676 * 1) largest E-cache size
677 * 2) smallest E-cache line size
678 */
679 largest_size = 0UL;
680 smallest_linesize = ~0UL;
681
682 instance = 0;
683 while (!cpu_find_by_instance(instance, &node, NULL)) {
684 unsigned long val;
685
686 val = prom_getintdefault(node, "ecache-size",
687 (2 * 1024 * 1024));
688 if (val > largest_size)
689 largest_size = val;
690 val = prom_getintdefault(node, "ecache-line-size", 64);
691 if (val < smallest_linesize)
692 smallest_linesize = val;
693 instance++;
694 }
695
696 if (largest_size == 0UL || smallest_linesize == ~0UL) {
697 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
698 "parameters.\n");
699 prom_halt();
700 }
701
702 ecache_flush_size = (2 * largest_size);
703 ecache_flush_linesize = smallest_linesize;
704
705 /* Discover a physically contiguous chunk of physical
706 * memory in 'sp_banks' of size ecache_flush_size calculated
707 * above. Store the physical base of this area at
708 * ecache_flush_physbase.
709 */
710 for (node = 0; ; node++) {
711 if (sp_banks[node].num_bytes == 0)
712 break;
713 if (sp_banks[node].num_bytes >= ecache_flush_size) {
714 ecache_flush_physbase = sp_banks[node].base_addr;
715 break;
716 }
717 }
718
719 /* Note: Zero would be a valid value of ecache_flush_physbase so
720 * don't use that as the success test. :-)
721 */
722 if (sp_banks[node].num_bytes == 0) {
723 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
724 "contiguous physical memory.\n", ecache_flush_size);
725 prom_halt();
726 }
727
728 /* Now allocate error trap reporting scoreboard. */
729 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
730 for (order = 0; order < MAX_ORDER; order++) {
731 if ((PAGE_SIZE << order) >= node)
732 break;
733 }
734 cheetah_error_log = (struct cheetah_err_info *)
735 __get_free_pages(GFP_KERNEL, order);
736 if (!cheetah_error_log) {
737 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
738 "error logging scoreboard (%d bytes).\n", node);
739 prom_halt();
740 }
741 memset(cheetah_error_log, 0, PAGE_SIZE << order);
742
743 /* Mark all AFSRs as invalid so that the trap handler will
744 * log new new information there.
745 */
746 for (i = 0; i < 2 * NR_CPUS; i++)
747 cheetah_error_log[i].afsr = CHAFSR_INVALID;
748
749 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
750 if ((ver >> 32) == 0x003e0016) {
751 cheetah_error_table = &__jalapeno_error_table[0];
752 cheetah_afsr_errors = JPAFSR_ERRORS;
753 } else if ((ver >> 32) == 0x003e0015) {
754 cheetah_error_table = &__cheetah_plus_error_table[0];
755 cheetah_afsr_errors = CHPAFSR_ERRORS;
756 } else {
757 cheetah_error_table = &__cheetah_error_table[0];
758 cheetah_afsr_errors = CHAFSR_ERRORS;
759 }
760
761 /* Now patch trap tables. */
762 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
763 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
764 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
765 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
766 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
767 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
768 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
769 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
770 if (tlb_type == cheetah_plus) {
771 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
772 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
773 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
774 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
775 }
776 flushi(PAGE_OFFSET);
777}
778
779static void cheetah_flush_ecache(void)
780{
781 unsigned long flush_base = ecache_flush_physbase;
782 unsigned long flush_linesize = ecache_flush_linesize;
783 unsigned long flush_size = ecache_flush_size;
784
785 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
786 " bne,pt %%xcc, 1b\n\t"
787 " ldxa [%2 + %0] %3, %%g0\n\t"
788 : "=&r" (flush_size)
789 : "0" (flush_size), "r" (flush_base),
790 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
791}
792
793static void cheetah_flush_ecache_line(unsigned long physaddr)
794{
795 unsigned long alias;
796
797 physaddr &= ~(8UL - 1UL);
798 physaddr = (ecache_flush_physbase +
799 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
800 alias = physaddr + (ecache_flush_size >> 1UL);
801 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
802 "ldxa [%1] %2, %%g0\n\t"
803 "membar #Sync"
804 : /* no outputs */
805 : "r" (physaddr), "r" (alias),
806 "i" (ASI_PHYS_USE_EC));
807}
808
809/* Unfortunately, the diagnostic access to the I-cache tags we need to
810 * use to clear the thing interferes with I-cache coherency transactions.
811 *
812 * So we must only flush the I-cache when it is disabled.
813 */
814static void __cheetah_flush_icache(void)
815{
816 unsigned long i;
817
818 /* Clear the valid bits in all the tags. */
819 for (i = 0; i < (1 << 15); i += (1 << 5)) {
820 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
821 "membar #Sync"
822 : /* no outputs */
823 : "r" (i | (2 << 3)), "i" (ASI_IC_TAG));
824 }
825}
826
827static void cheetah_flush_icache(void)
828{
829 unsigned long dcu_save;
830
831 /* Save current DCU, disable I-cache. */
832 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
833 "or %0, %2, %%g1\n\t"
834 "stxa %%g1, [%%g0] %1\n\t"
835 "membar #Sync"
836 : "=r" (dcu_save)
837 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
838 : "g1");
839
840 __cheetah_flush_icache();
841
842 /* Restore DCU register */
843 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
844 "membar #Sync"
845 : /* no outputs */
846 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
847}
848
849static void cheetah_flush_dcache(void)
850{
851 unsigned long i;
852
853 for (i = 0; i < (1 << 16); i += (1 << 5)) {
854 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
855 "membar #Sync"
856 : /* no outputs */
857 : "r" (i), "i" (ASI_DCACHE_TAG));
858 }
859}
860
861/* In order to make the even parity correct we must do two things.
862 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
863 * Next, we clear out all 32-bytes of data for that line. Data of
864 * all-zero + tag parity value of zero == correct parity.
865 */
866static void cheetah_plus_zap_dcache_parity(void)
867{
868 unsigned long i;
869
870 for (i = 0; i < (1 << 16); i += (1 << 5)) {
871 unsigned long tag = (i >> 14);
872 unsigned long j;
873
874 __asm__ __volatile__("membar #Sync\n\t"
875 "stxa %0, [%1] %2\n\t"
876 "membar #Sync"
877 : /* no outputs */
878 : "r" (tag), "r" (i),
879 "i" (ASI_DCACHE_UTAG));
880 for (j = i; j < i + (1 << 5); j += (1 << 3))
881 __asm__ __volatile__("membar #Sync\n\t"
882 "stxa %%g0, [%0] %1\n\t"
883 "membar #Sync"
884 : /* no outputs */
885 : "r" (j), "i" (ASI_DCACHE_DATA));
886 }
887}
888
889/* Conversion tables used to frob Cheetah AFSR syndrome values into
890 * something palatable to the memory controller driver get_unumber
891 * routine.
892 */
893#define MT0 137
894#define MT1 138
895#define MT2 139
896#define NONE 254
897#define MTC0 140
898#define MTC1 141
899#define MTC2 142
900#define MTC3 143
901#define C0 128
902#define C1 129
903#define C2 130
904#define C3 131
905#define C4 132
906#define C5 133
907#define C6 134
908#define C7 135
909#define C8 136
910#define M2 144
911#define M3 145
912#define M4 146
913#define M 147
914static unsigned char cheetah_ecc_syntab[] = {
915/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
916/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
917/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
918/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
919/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
920/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
921/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
922/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
923/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
924/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
925/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
926/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
927/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
928/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
929/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
930/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
931/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
932/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
933/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
934/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
935/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
936/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
937/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
938/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
939/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
940/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
941/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
942/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
943/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
944/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
945/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
946/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
947};
948static unsigned char cheetah_mtag_syntab[] = {
949 NONE, MTC0,
950 MTC1, NONE,
951 MTC2, NONE,
952 NONE, MT0,
953 MTC3, NONE,
954 NONE, MT1,
955 NONE, MT2,
956 NONE, NONE
957};
958
959/* Return the highest priority error conditon mentioned. */
960static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
961{
962 unsigned long tmp = 0;
963 int i;
964
965 for (i = 0; cheetah_error_table[i].mask; i++) {
966 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
967 return tmp;
968 }
969 return tmp;
970}
971
972static const char *cheetah_get_string(unsigned long bit)
973{
974 int i;
975
976 for (i = 0; cheetah_error_table[i].mask; i++) {
977 if ((bit & cheetah_error_table[i].mask) != 0UL)
978 return cheetah_error_table[i].name;
979 }
980 return "???";
981}
982
983extern int chmc_getunumber(int, unsigned long, char *, int);
984
985static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
986 unsigned long afsr, unsigned long afar, int recoverable)
987{
988 unsigned long hipri;
989 char unum[256];
990
991 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
992 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
993 afsr, afar,
994 (afsr & CHAFSR_TL1) ? 1 : 0);
995 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
996 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
997 regs->tpc, regs->tnpc, regs->tstate);
998 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
999 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1000 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1001 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1002 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1003 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1004 hipri = cheetah_get_hipri(afsr);
1005 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1006 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1007 hipri, cheetah_get_string(hipri));
1008
1009 /* Try to get unumber if relevant. */
1010#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1011 CHAFSR_CPC | CHAFSR_CPU | \
1012 CHAFSR_UE | CHAFSR_CE | \
1013 CHAFSR_EDC | CHAFSR_EDU | \
1014 CHAFSR_UCC | CHAFSR_UCU | \
1015 CHAFSR_WDU | CHAFSR_WDC)
1016#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1017 if (afsr & ESYND_ERRORS) {
1018 int syndrome;
1019 int ret;
1020
1021 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1022 syndrome = cheetah_ecc_syntab[syndrome];
1023 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1024 if (ret != -1)
1025 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1026 (recoverable ? KERN_WARNING : KERN_CRIT),
1027 smp_processor_id(), unum);
1028 } else if (afsr & MSYND_ERRORS) {
1029 int syndrome;
1030 int ret;
1031
1032 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1033 syndrome = cheetah_mtag_syntab[syndrome];
1034 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1035 if (ret != -1)
1036 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1037 (recoverable ? KERN_WARNING : KERN_CRIT),
1038 smp_processor_id(), unum);
1039 }
1040
1041 /* Now dump the cache snapshots. */
1042 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1043 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1044 (int) info->dcache_index,
1045 info->dcache_tag,
1046 info->dcache_utag,
1047 info->dcache_stag);
1048 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1049 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1050 info->dcache_data[0],
1051 info->dcache_data[1],
1052 info->dcache_data[2],
1053 info->dcache_data[3]);
1054 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1055 "u[%016lx] l[%016lx]\n",
1056 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1057 (int) info->icache_index,
1058 info->icache_tag,
1059 info->icache_utag,
1060 info->icache_stag,
1061 info->icache_upper,
1062 info->icache_lower);
1063 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1064 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1065 info->icache_data[0],
1066 info->icache_data[1],
1067 info->icache_data[2],
1068 info->icache_data[3]);
1069 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1070 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1071 info->icache_data[4],
1072 info->icache_data[5],
1073 info->icache_data[6],
1074 info->icache_data[7]);
1075 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1076 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1077 (int) info->ecache_index, info->ecache_tag);
1078 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1079 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1080 info->ecache_data[0],
1081 info->ecache_data[1],
1082 info->ecache_data[2],
1083 info->ecache_data[3]);
1084
1085 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1086 while (afsr != 0UL) {
1087 unsigned long bit = cheetah_get_hipri(afsr);
1088
1089 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1090 (recoverable ? KERN_WARNING : KERN_CRIT),
1091 bit, cheetah_get_string(bit));
1092
1093 afsr &= ~bit;
1094 }
1095
1096 if (!recoverable)
1097 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1098}
1099
1100static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1101{
1102 unsigned long afsr, afar;
1103 int ret = 0;
1104
1105 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1106 : "=r" (afsr)
1107 : "i" (ASI_AFSR));
1108 if ((afsr & cheetah_afsr_errors) != 0) {
1109 if (logp != NULL) {
1110 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1111 : "=r" (afar)
1112 : "i" (ASI_AFAR));
1113 logp->afsr = afsr;
1114 logp->afar = afar;
1115 }
1116 ret = 1;
1117 }
1118 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1119 "membar #Sync\n\t"
1120 : : "r" (afsr), "i" (ASI_AFSR));
1121
1122 return ret;
1123}
1124
1125void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1126{
1127 struct cheetah_err_info local_snapshot, *p;
1128 int recoverable;
1129
1130 /* Flush E-cache */
1131 cheetah_flush_ecache();
1132
1133 p = cheetah_get_error_log(afsr);
1134 if (!p) {
1135 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1136 afsr, afar);
1137 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1138 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1139 prom_halt();
1140 }
1141
1142 /* Grab snapshot of logged error. */
1143 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1144
1145 /* If the current trap snapshot does not match what the
1146 * trap handler passed along into our args, big trouble.
1147 * In such a case, mark the local copy as invalid.
1148 *
1149 * Else, it matches and we mark the afsr in the non-local
1150 * copy as invalid so we may log new error traps there.
1151 */
1152 if (p->afsr != afsr || p->afar != afar)
1153 local_snapshot.afsr = CHAFSR_INVALID;
1154 else
1155 p->afsr = CHAFSR_INVALID;
1156
1157 cheetah_flush_icache();
1158 cheetah_flush_dcache();
1159
1160 /* Re-enable I-cache/D-cache */
1161 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1162 "or %%g1, %1, %%g1\n\t"
1163 "stxa %%g1, [%%g0] %0\n\t"
1164 "membar #Sync"
1165 : /* no outputs */
1166 : "i" (ASI_DCU_CONTROL_REG),
1167 "i" (DCU_DC | DCU_IC)
1168 : "g1");
1169
1170 /* Re-enable error reporting */
1171 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1172 "or %%g1, %1, %%g1\n\t"
1173 "stxa %%g1, [%%g0] %0\n\t"
1174 "membar #Sync"
1175 : /* no outputs */
1176 : "i" (ASI_ESTATE_ERROR_EN),
1177 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1178 : "g1");
1179
1180 /* Decide if we can continue after handling this trap and
1181 * logging the error.
1182 */
1183 recoverable = 1;
1184 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1185 recoverable = 0;
1186
1187 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1188 * error was logged while we had error reporting traps disabled.
1189 */
1190 if (cheetah_recheck_errors(&local_snapshot)) {
1191 unsigned long new_afsr = local_snapshot.afsr;
1192
1193 /* If we got a new asynchronous error, die... */
1194 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1195 CHAFSR_WDU | CHAFSR_CPU |
1196 CHAFSR_IVU | CHAFSR_UE |
1197 CHAFSR_BERR | CHAFSR_TO))
1198 recoverable = 0;
1199 }
1200
1201 /* Log errors. */
1202 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1203
1204 if (!recoverable)
1205 panic("Irrecoverable Fast-ECC error trap.\n");
1206
1207 /* Flush E-cache to kick the error trap handlers out. */
1208 cheetah_flush_ecache();
1209}
1210
1211/* Try to fix a correctable error by pushing the line out from
1212 * the E-cache. Recheck error reporting registers to see if the
1213 * problem is intermittent.
1214 */
1215static int cheetah_fix_ce(unsigned long physaddr)
1216{
1217 unsigned long orig_estate;
1218 unsigned long alias1, alias2;
1219 int ret;
1220
1221 /* Make sure correctable error traps are disabled. */
1222 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1223 "andn %0, %1, %%g1\n\t"
1224 "stxa %%g1, [%%g0] %2\n\t"
1225 "membar #Sync"
1226 : "=&r" (orig_estate)
1227 : "i" (ESTATE_ERROR_CEEN),
1228 "i" (ASI_ESTATE_ERROR_EN)
1229 : "g1");
1230
1231 /* We calculate alias addresses that will force the
1232 * cache line in question out of the E-cache. Then
1233 * we bring it back in with an atomic instruction so
1234 * that we get it in some modified/exclusive state,
1235 * then we displace it again to try and get proper ECC
1236 * pushed back into the system.
1237 */
1238 physaddr &= ~(8UL - 1UL);
1239 alias1 = (ecache_flush_physbase +
1240 (physaddr & ((ecache_flush_size >> 1) - 1)));
1241 alias2 = alias1 + (ecache_flush_size >> 1);
1242 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1243 "ldxa [%1] %3, %%g0\n\t"
1244 "casxa [%2] %3, %%g0, %%g0\n\t"
1245 "membar #StoreLoad | #StoreStore\n\t"
1246 "ldxa [%0] %3, %%g0\n\t"
1247 "ldxa [%1] %3, %%g0\n\t"
1248 "membar #Sync"
1249 : /* no outputs */
1250 : "r" (alias1), "r" (alias2),
1251 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1252
1253 /* Did that trigger another error? */
1254 if (cheetah_recheck_errors(NULL)) {
1255 /* Try one more time. */
1256 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1257 "membar #Sync"
1258 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1259 if (cheetah_recheck_errors(NULL))
1260 ret = 2;
1261 else
1262 ret = 1;
1263 } else {
1264 /* No new error, intermittent problem. */
1265 ret = 0;
1266 }
1267
1268 /* Restore error enables. */
1269 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1270 "membar #Sync"
1271 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1272
1273 return ret;
1274}
1275
1276/* Return non-zero if PADDR is a valid physical memory address. */
1277static int cheetah_check_main_memory(unsigned long paddr)
1278{
1279 int i;
1280
1281 for (i = 0; ; i++) {
1282 if (sp_banks[i].num_bytes == 0)
1283 break;
1284 if (paddr >= sp_banks[i].base_addr &&
1285 paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1286 return 1;
1287 }
1288 return 0;
1289}
1290
1291void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1292{
1293 struct cheetah_err_info local_snapshot, *p;
1294 int recoverable, is_memory;
1295
1296 p = cheetah_get_error_log(afsr);
1297 if (!p) {
1298 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1299 afsr, afar);
1300 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1301 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1302 prom_halt();
1303 }
1304
1305 /* Grab snapshot of logged error. */
1306 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1307
1308 /* If the current trap snapshot does not match what the
1309 * trap handler passed along into our args, big trouble.
1310 * In such a case, mark the local copy as invalid.
1311 *
1312 * Else, it matches and we mark the afsr in the non-local
1313 * copy as invalid so we may log new error traps there.
1314 */
1315 if (p->afsr != afsr || p->afar != afar)
1316 local_snapshot.afsr = CHAFSR_INVALID;
1317 else
1318 p->afsr = CHAFSR_INVALID;
1319
1320 is_memory = cheetah_check_main_memory(afar);
1321
1322 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1323 /* XXX Might want to log the results of this operation
1324 * XXX somewhere... -DaveM
1325 */
1326 cheetah_fix_ce(afar);
1327 }
1328
1329 {
1330 int flush_all, flush_line;
1331
1332 flush_all = flush_line = 0;
1333 if ((afsr & CHAFSR_EDC) != 0UL) {
1334 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1335 flush_line = 1;
1336 else
1337 flush_all = 1;
1338 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1339 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1340 flush_line = 1;
1341 else
1342 flush_all = 1;
1343 }
1344
1345 /* Trap handler only disabled I-cache, flush it. */
1346 cheetah_flush_icache();
1347
1348 /* Re-enable I-cache */
1349 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1350 "or %%g1, %1, %%g1\n\t"
1351 "stxa %%g1, [%%g0] %0\n\t"
1352 "membar #Sync"
1353 : /* no outputs */
1354 : "i" (ASI_DCU_CONTROL_REG),
1355 "i" (DCU_IC)
1356 : "g1");
1357
1358 if (flush_all)
1359 cheetah_flush_ecache();
1360 else if (flush_line)
1361 cheetah_flush_ecache_line(afar);
1362 }
1363
1364 /* Re-enable error reporting */
1365 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1366 "or %%g1, %1, %%g1\n\t"
1367 "stxa %%g1, [%%g0] %0\n\t"
1368 "membar #Sync"
1369 : /* no outputs */
1370 : "i" (ASI_ESTATE_ERROR_EN),
1371 "i" (ESTATE_ERROR_CEEN)
1372 : "g1");
1373
1374 /* Decide if we can continue after handling this trap and
1375 * logging the error.
1376 */
1377 recoverable = 1;
1378 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1379 recoverable = 0;
1380
1381 /* Re-check AFSR/AFAR */
1382 (void) cheetah_recheck_errors(&local_snapshot);
1383
1384 /* Log errors. */
1385 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1386
1387 if (!recoverable)
1388 panic("Irrecoverable Correctable-ECC error trap.\n");
1389}
1390
1391void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1392{
1393 struct cheetah_err_info local_snapshot, *p;
1394 int recoverable, is_memory;
1395
1396#ifdef CONFIG_PCI
1397 /* Check for the special PCI poke sequence. */
1398 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1399 cheetah_flush_icache();
1400 cheetah_flush_dcache();
1401
1402 /* Re-enable I-cache/D-cache */
1403 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1404 "or %%g1, %1, %%g1\n\t"
1405 "stxa %%g1, [%%g0] %0\n\t"
1406 "membar #Sync"
1407 : /* no outputs */
1408 : "i" (ASI_DCU_CONTROL_REG),
1409 "i" (DCU_DC | DCU_IC)
1410 : "g1");
1411
1412 /* Re-enable error reporting */
1413 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1414 "or %%g1, %1, %%g1\n\t"
1415 "stxa %%g1, [%%g0] %0\n\t"
1416 "membar #Sync"
1417 : /* no outputs */
1418 : "i" (ASI_ESTATE_ERROR_EN),
1419 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1420 : "g1");
1421
1422 (void) cheetah_recheck_errors(NULL);
1423
1424 pci_poke_faulted = 1;
1425 regs->tpc += 4;
1426 regs->tnpc = regs->tpc + 4;
1427 return;
1428 }
1429#endif
1430
1431 p = cheetah_get_error_log(afsr);
1432 if (!p) {
1433 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1434 afsr, afar);
1435 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1436 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1437 prom_halt();
1438 }
1439
1440 /* Grab snapshot of logged error. */
1441 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1442
1443 /* If the current trap snapshot does not match what the
1444 * trap handler passed along into our args, big trouble.
1445 * In such a case, mark the local copy as invalid.
1446 *
1447 * Else, it matches and we mark the afsr in the non-local
1448 * copy as invalid so we may log new error traps there.
1449 */
1450 if (p->afsr != afsr || p->afar != afar)
1451 local_snapshot.afsr = CHAFSR_INVALID;
1452 else
1453 p->afsr = CHAFSR_INVALID;
1454
1455 is_memory = cheetah_check_main_memory(afar);
1456
1457 {
1458 int flush_all, flush_line;
1459
1460 flush_all = flush_line = 0;
1461 if ((afsr & CHAFSR_EDU) != 0UL) {
1462 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1463 flush_line = 1;
1464 else
1465 flush_all = 1;
1466 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1467 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1468 flush_line = 1;
1469 else
1470 flush_all = 1;
1471 }
1472
1473 cheetah_flush_icache();
1474 cheetah_flush_dcache();
1475
1476 /* Re-enable I/D caches */
1477 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1478 "or %%g1, %1, %%g1\n\t"
1479 "stxa %%g1, [%%g0] %0\n\t"
1480 "membar #Sync"
1481 : /* no outputs */
1482 : "i" (ASI_DCU_CONTROL_REG),
1483 "i" (DCU_IC | DCU_DC)
1484 : "g1");
1485
1486 if (flush_all)
1487 cheetah_flush_ecache();
1488 else if (flush_line)
1489 cheetah_flush_ecache_line(afar);
1490 }
1491
1492 /* Re-enable error reporting */
1493 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1494 "or %%g1, %1, %%g1\n\t"
1495 "stxa %%g1, [%%g0] %0\n\t"
1496 "membar #Sync"
1497 : /* no outputs */
1498 : "i" (ASI_ESTATE_ERROR_EN),
1499 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1500 : "g1");
1501
1502 /* Decide if we can continue after handling this trap and
1503 * logging the error.
1504 */
1505 recoverable = 1;
1506 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1507 recoverable = 0;
1508
1509 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1510 * error was logged while we had error reporting traps disabled.
1511 */
1512 if (cheetah_recheck_errors(&local_snapshot)) {
1513 unsigned long new_afsr = local_snapshot.afsr;
1514
1515 /* If we got a new asynchronous error, die... */
1516 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1517 CHAFSR_WDU | CHAFSR_CPU |
1518 CHAFSR_IVU | CHAFSR_UE |
1519 CHAFSR_BERR | CHAFSR_TO))
1520 recoverable = 0;
1521 }
1522
1523 /* Log errors. */
1524 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1525
1526 /* "Recoverable" here means we try to yank the page from ever
1527 * being newly used again. This depends upon a few things:
1528 * 1) Must be main memory, and AFAR must be valid.
1529 * 2) If we trapped from user, OK.
1530 * 3) Else, if we trapped from kernel we must find exception
1531 * table entry (ie. we have to have been accessing user
1532 * space).
1533 *
1534 * If AFAR is not in main memory, or we trapped from kernel
1535 * and cannot find an exception table entry, it is unacceptable
1536 * to try and continue.
1537 */
1538 if (recoverable && is_memory) {
1539 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1540 /* OK, usermode access. */
1541 recoverable = 1;
1542 } else {
1543 unsigned long g2 = regs->u_regs[UREG_G2];
1544 unsigned long fixup = search_extables_range(regs->tpc, &g2);
1545
1546 if (fixup != 0UL) {
1547 /* OK, kernel access to userspace. */
1548 recoverable = 1;
1549
1550 } else {
1551 /* BAD, privileged state is corrupted. */
1552 recoverable = 0;
1553 }
1554
1555 if (recoverable) {
1556 if (pfn_valid(afar >> PAGE_SHIFT))
1557 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1558 else
1559 recoverable = 0;
1560
1561 /* Only perform fixup if we still have a
1562 * recoverable condition.
1563 */
1564 if (recoverable) {
1565 regs->tpc = fixup;
1566 regs->tnpc = regs->tpc + 4;
1567 regs->u_regs[UREG_G2] = g2;
1568 }
1569 }
1570 }
1571 } else {
1572 recoverable = 0;
1573 }
1574
1575 if (!recoverable)
1576 panic("Irrecoverable deferred error trap.\n");
1577}
1578
1579/* Handle a D/I cache parity error trap. TYPE is encoded as:
1580 *
1581 * Bit0: 0=dcache,1=icache
1582 * Bit1: 0=recoverable,1=unrecoverable
1583 *
1584 * The hardware has disabled both the I-cache and D-cache in
1585 * the %dcr register.
1586 */
1587void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1588{
1589 if (type & 0x1)
1590 __cheetah_flush_icache();
1591 else
1592 cheetah_plus_zap_dcache_parity();
1593 cheetah_flush_dcache();
1594
1595 /* Re-enable I-cache/D-cache */
1596 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1597 "or %%g1, %1, %%g1\n\t"
1598 "stxa %%g1, [%%g0] %0\n\t"
1599 "membar #Sync"
1600 : /* no outputs */
1601 : "i" (ASI_DCU_CONTROL_REG),
1602 "i" (DCU_DC | DCU_IC)
1603 : "g1");
1604
1605 if (type & 0x2) {
1606 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1607 smp_processor_id(),
1608 (type & 0x1) ? 'I' : 'D',
1609 regs->tpc);
1610 panic("Irrecoverable Cheetah+ parity error.");
1611 }
1612
1613 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1614 smp_processor_id(),
1615 (type & 0x1) ? 'I' : 'D',
1616 regs->tpc);
1617}
1618
1619void do_fpe_common(struct pt_regs *regs)
1620{
1621 if (regs->tstate & TSTATE_PRIV) {
1622 regs->tpc = regs->tnpc;
1623 regs->tnpc += 4;
1624 } else {
1625 unsigned long fsr = current_thread_info()->xfsr[0];
1626 siginfo_t info;
1627
1628 if (test_thread_flag(TIF_32BIT)) {
1629 regs->tpc &= 0xffffffff;
1630 regs->tnpc &= 0xffffffff;
1631 }
1632 info.si_signo = SIGFPE;
1633 info.si_errno = 0;
1634 info.si_addr = (void __user *)regs->tpc;
1635 info.si_trapno = 0;
1636 info.si_code = __SI_FAULT;
1637 if ((fsr & 0x1c000) == (1 << 14)) {
1638 if (fsr & 0x10)
1639 info.si_code = FPE_FLTINV;
1640 else if (fsr & 0x08)
1641 info.si_code = FPE_FLTOVF;
1642 else if (fsr & 0x04)
1643 info.si_code = FPE_FLTUND;
1644 else if (fsr & 0x02)
1645 info.si_code = FPE_FLTDIV;
1646 else if (fsr & 0x01)
1647 info.si_code = FPE_FLTRES;
1648 }
1649 force_sig_info(SIGFPE, &info, current);
1650 }
1651}
1652
1653void do_fpieee(struct pt_regs *regs)
1654{
1655 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1656 0, 0x24, SIGFPE) == NOTIFY_STOP)
1657 return;
1658
1659 do_fpe_common(regs);
1660}
1661
1662extern int do_mathemu(struct pt_regs *, struct fpustate *);
1663
1664void do_fpother(struct pt_regs *regs)
1665{
1666 struct fpustate *f = FPUSTATE;
1667 int ret = 0;
1668
1669 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1670 0, 0x25, SIGFPE) == NOTIFY_STOP)
1671 return;
1672
1673 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1674 case (2 << 14): /* unfinished_FPop */
1675 case (3 << 14): /* unimplemented_FPop */
1676 ret = do_mathemu(regs, f);
1677 break;
1678 }
1679 if (ret)
1680 return;
1681 do_fpe_common(regs);
1682}
1683
1684void do_tof(struct pt_regs *regs)
1685{
1686 siginfo_t info;
1687
1688 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
1689 0, 0x26, SIGEMT) == NOTIFY_STOP)
1690 return;
1691
1692 if (regs->tstate & TSTATE_PRIV)
1693 die_if_kernel("Penguin overflow trap from kernel mode", regs);
1694 if (test_thread_flag(TIF_32BIT)) {
1695 regs->tpc &= 0xffffffff;
1696 regs->tnpc &= 0xffffffff;
1697 }
1698 info.si_signo = SIGEMT;
1699 info.si_errno = 0;
1700 info.si_code = EMT_TAGOVF;
1701 info.si_addr = (void __user *)regs->tpc;
1702 info.si_trapno = 0;
1703 force_sig_info(SIGEMT, &info, current);
1704}
1705
1706void do_div0(struct pt_regs *regs)
1707{
1708 siginfo_t info;
1709
1710 if (notify_die(DIE_TRAP, "integer division by zero", regs,
1711 0, 0x28, SIGFPE) == NOTIFY_STOP)
1712 return;
1713
1714 if (regs->tstate & TSTATE_PRIV)
1715 die_if_kernel("TL0: Kernel divide by zero.", regs);
1716 if (test_thread_flag(TIF_32BIT)) {
1717 regs->tpc &= 0xffffffff;
1718 regs->tnpc &= 0xffffffff;
1719 }
1720 info.si_signo = SIGFPE;
1721 info.si_errno = 0;
1722 info.si_code = FPE_INTDIV;
1723 info.si_addr = (void __user *)regs->tpc;
1724 info.si_trapno = 0;
1725 force_sig_info(SIGFPE, &info, current);
1726}
1727
1728void instruction_dump (unsigned int *pc)
1729{
1730 int i;
1731
1732 if ((((unsigned long) pc) & 3))
1733 return;
1734
1735 printk("Instruction DUMP:");
1736 for (i = -3; i < 6; i++)
1737 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1738 printk("\n");
1739}
1740
1741static void user_instruction_dump (unsigned int __user *pc)
1742{
1743 int i;
1744 unsigned int buf[9];
1745
1746 if ((((unsigned long) pc) & 3))
1747 return;
1748
1749 if (copy_from_user(buf, pc - 3, sizeof(buf)))
1750 return;
1751
1752 printk("Instruction DUMP:");
1753 for (i = 0; i < 9; i++)
1754 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1755 printk("\n");
1756}
1757
1758void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1759{
1760 unsigned long pc, fp, thread_base, ksp;
1761 struct thread_info *tp = tsk->thread_info;
1762 struct reg_window *rw;
1763 int count = 0;
1764
1765 ksp = (unsigned long) _ksp;
1766
1767 if (tp == current_thread_info())
1768 flushw_all();
1769
1770 fp = ksp + STACK_BIAS;
1771 thread_base = (unsigned long) tp;
1772
1773 printk("Call Trace:");
1774#ifdef CONFIG_KALLSYMS
1775 printk("\n");
1776#endif
1777 do {
1778 /* Bogus frame pointer? */
1779 if (fp < (thread_base + sizeof(struct thread_info)) ||
1780 fp >= (thread_base + THREAD_SIZE))
1781 break;
1782 rw = (struct reg_window *)fp;
1783 pc = rw->ins[7];
1784 printk(" [%016lx] ", pc);
1785 print_symbol("%s\n", pc);
1786 fp = rw->ins[6] + STACK_BIAS;
1787 } while (++count < 16);
1788#ifndef CONFIG_KALLSYMS
1789 printk("\n");
1790#endif
1791}
1792
1793void dump_stack(void)
1794{
1795 unsigned long *ksp;
1796
1797 __asm__ __volatile__("mov %%fp, %0"
1798 : "=r" (ksp));
1799 show_stack(current, ksp);
1800}
1801
1802EXPORT_SYMBOL(dump_stack);
1803
1804static inline int is_kernel_stack(struct task_struct *task,
1805 struct reg_window *rw)
1806{
1807 unsigned long rw_addr = (unsigned long) rw;
1808 unsigned long thread_base, thread_end;
1809
1810 if (rw_addr < PAGE_OFFSET) {
1811 if (task != &init_task)
1812 return 0;
1813 }
1814
1815 thread_base = (unsigned long) task->thread_info;
1816 thread_end = thread_base + sizeof(union thread_union);
1817 if (rw_addr >= thread_base &&
1818 rw_addr < thread_end &&
1819 !(rw_addr & 0x7UL))
1820 return 1;
1821
1822 return 0;
1823}
1824
1825static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
1826{
1827 unsigned long fp = rw->ins[6];
1828
1829 if (!fp)
1830 return NULL;
1831
1832 return (struct reg_window *) (fp + STACK_BIAS);
1833}
1834
1835void die_if_kernel(char *str, struct pt_regs *regs)
1836{
1837 static int die_counter;
1838 extern void __show_regs(struct pt_regs * regs);
1839 extern void smp_report_regs(void);
1840 int count = 0;
1841
1842 /* Amuse the user. */
1843 printk(
1844" \\|/ ____ \\|/\n"
1845" \"@'/ .. \\`@\"\n"
1846" /_| \\__/ |_\\\n"
1847" \\__U_/\n");
1848
1849 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
1850 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
1851 __asm__ __volatile__("flushw");
1852 __show_regs(regs);
1853 if (regs->tstate & TSTATE_PRIV) {
1854 struct reg_window *rw = (struct reg_window *)
1855 (regs->u_regs[UREG_FP] + STACK_BIAS);
1856
1857 /* Stop the back trace when we hit userland or we
1858 * find some badly aligned kernel stack.
1859 */
1860 while (rw &&
1861 count++ < 30&&
1862 is_kernel_stack(current, rw)) {
1863 printk("Caller[%016lx]", rw->ins[7]);
1864 print_symbol(": %s", rw->ins[7]);
1865 printk("\n");
1866
1867 rw = kernel_stack_up(rw);
1868 }
1869 instruction_dump ((unsigned int *) regs->tpc);
1870 } else {
1871 if (test_thread_flag(TIF_32BIT)) {
1872 regs->tpc &= 0xffffffff;
1873 regs->tnpc &= 0xffffffff;
1874 }
1875 user_instruction_dump ((unsigned int __user *) regs->tpc);
1876 }
1877#ifdef CONFIG_SMP
1878 smp_report_regs();
1879#endif
1880
1881 if (regs->tstate & TSTATE_PRIV)
1882 do_exit(SIGKILL);
1883 do_exit(SIGSEGV);
1884}
1885
1886extern int handle_popc(u32 insn, struct pt_regs *regs);
1887extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1888
1889void do_illegal_instruction(struct pt_regs *regs)
1890{
1891 unsigned long pc = regs->tpc;
1892 unsigned long tstate = regs->tstate;
1893 u32 insn;
1894 siginfo_t info;
1895
1896 if (notify_die(DIE_TRAP, "illegal instruction", regs,
1897 0, 0x10, SIGILL) == NOTIFY_STOP)
1898 return;
1899
1900 if (tstate & TSTATE_PRIV)
1901 die_if_kernel("Kernel illegal instruction", regs);
1902 if (test_thread_flag(TIF_32BIT))
1903 pc = (u32)pc;
1904 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
1905 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1906 if (handle_popc(insn, regs))
1907 return;
1908 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1909 if (handle_ldf_stq(insn, regs))
1910 return;
1911 }
1912 }
1913 info.si_signo = SIGILL;
1914 info.si_errno = 0;
1915 info.si_code = ILL_ILLOPC;
1916 info.si_addr = (void __user *)pc;
1917 info.si_trapno = 0;
1918 force_sig_info(SIGILL, &info, current);
1919}
1920
1921void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1922{
1923 siginfo_t info;
1924
1925 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
1926 0, 0x34, SIGSEGV) == NOTIFY_STOP)
1927 return;
1928
1929 if (regs->tstate & TSTATE_PRIV) {
1930 extern void kernel_unaligned_trap(struct pt_regs *regs,
1931 unsigned int insn,
1932 unsigned long sfar,
1933 unsigned long sfsr);
1934
1935 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
1936 sfar, sfsr);
1937 return;
1938 }
1939 info.si_signo = SIGBUS;
1940 info.si_errno = 0;
1941 info.si_code = BUS_ADRALN;
1942 info.si_addr = (void __user *)sfar;
1943 info.si_trapno = 0;
1944 force_sig_info(SIGBUS, &info, current);
1945}
1946
1947void do_privop(struct pt_regs *regs)
1948{
1949 siginfo_t info;
1950
1951 if (notify_die(DIE_TRAP, "privileged operation", regs,
1952 0, 0x11, SIGILL) == NOTIFY_STOP)
1953 return;
1954
1955 if (test_thread_flag(TIF_32BIT)) {
1956 regs->tpc &= 0xffffffff;
1957 regs->tnpc &= 0xffffffff;
1958 }
1959 info.si_signo = SIGILL;
1960 info.si_errno = 0;
1961 info.si_code = ILL_PRVOPC;
1962 info.si_addr = (void __user *)regs->tpc;
1963 info.si_trapno = 0;
1964 force_sig_info(SIGILL, &info, current);
1965}
1966
1967void do_privact(struct pt_regs *regs)
1968{
1969 do_privop(regs);
1970}
1971
1972/* Trap level 1 stuff or other traps we should never see... */
1973void do_cee(struct pt_regs *regs)
1974{
1975 die_if_kernel("TL0: Cache Error Exception", regs);
1976}
1977
1978void do_cee_tl1(struct pt_regs *regs)
1979{
1980 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1981 die_if_kernel("TL1: Cache Error Exception", regs);
1982}
1983
1984void do_dae_tl1(struct pt_regs *regs)
1985{
1986 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1987 die_if_kernel("TL1: Data Access Exception", regs);
1988}
1989
1990void do_iae_tl1(struct pt_regs *regs)
1991{
1992 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1993 die_if_kernel("TL1: Instruction Access Exception", regs);
1994}
1995
1996void do_div0_tl1(struct pt_regs *regs)
1997{
1998 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1999 die_if_kernel("TL1: DIV0 Exception", regs);
2000}
2001
2002void do_fpdis_tl1(struct pt_regs *regs)
2003{
2004 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2005 die_if_kernel("TL1: FPU Disabled", regs);
2006}
2007
2008void do_fpieee_tl1(struct pt_regs *regs)
2009{
2010 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2011 die_if_kernel("TL1: FPU IEEE Exception", regs);
2012}
2013
2014void do_fpother_tl1(struct pt_regs *regs)
2015{
2016 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2017 die_if_kernel("TL1: FPU Other Exception", regs);
2018}
2019
2020void do_ill_tl1(struct pt_regs *regs)
2021{
2022 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2023 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2024}
2025
2026void do_irq_tl1(struct pt_regs *regs)
2027{
2028 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2029 die_if_kernel("TL1: IRQ Exception", regs);
2030}
2031
2032void do_lddfmna_tl1(struct pt_regs *regs)
2033{
2034 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2035 die_if_kernel("TL1: LDDF Exception", regs);
2036}
2037
2038void do_stdfmna_tl1(struct pt_regs *regs)
2039{
2040 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2041 die_if_kernel("TL1: STDF Exception", regs);
2042}
2043
2044void do_paw(struct pt_regs *regs)
2045{
2046 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2047}
2048
2049void do_paw_tl1(struct pt_regs *regs)
2050{
2051 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2052 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2053}
2054
2055void do_vaw(struct pt_regs *regs)
2056{
2057 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2058}
2059
2060void do_vaw_tl1(struct pt_regs *regs)
2061{
2062 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2063 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2064}
2065
2066void do_tof_tl1(struct pt_regs *regs)
2067{
2068 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2069 die_if_kernel("TL1: Tag Overflow Exception", regs);
2070}
2071
2072void do_getpsr(struct pt_regs *regs)
2073{
2074 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2075 regs->tpc = regs->tnpc;
2076 regs->tnpc += 4;
2077 if (test_thread_flag(TIF_32BIT)) {
2078 regs->tpc &= 0xffffffff;
2079 regs->tnpc &= 0xffffffff;
2080 }
2081}
2082
2083extern void thread_info_offsets_are_bolixed_dave(void);
2084
2085/* Only invoked on boot processor. */
2086void __init trap_init(void)
2087{
2088 /* Compile time sanity check. */
2089 if (TI_TASK != offsetof(struct thread_info, task) ||
2090 TI_FLAGS != offsetof(struct thread_info, flags) ||
2091 TI_CPU != offsetof(struct thread_info, cpu) ||
2092 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2093 TI_KSP != offsetof(struct thread_info, ksp) ||
2094 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2095 TI_KREGS != offsetof(struct thread_info, kregs) ||
2096 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2097 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2098 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2099 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2100 TI_GSR != offsetof(struct thread_info, gsr) ||
2101 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2102 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2103 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2104 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2105 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2106 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2107 TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
2108 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2109 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2110 (TI_FPREGS & (64 - 1)))
2111 thread_info_offsets_are_bolixed_dave();
2112
2113 /* Attach to the address space of init_task. On SMP we
2114 * do this in smp.c:smp_callin for other cpus.
2115 */
2116 atomic_inc(&init_mm.mm_count);
2117 current->active_mm = &init_mm;
2118}
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
new file mode 100644
index 000000000000..491bb3681f9d
--- /dev/null
+++ b/arch/sparc64/kernel/ttable.S
@@ -0,0 +1,280 @@
1/* $Id: ttable.S,v 1.38 2002/02/09 19:49:30 davem Exp $
2 * ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah extensions.
3 *
4 * Copyright (C) 1996, 2001 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/config.h>
8
9 .globl sparc64_ttable_tl0, sparc64_ttable_tl1
10 .globl tl0_icpe, tl1_icpe
11 .globl tl0_dcpe, tl1_dcpe
12 .globl tl0_fecc, tl1_fecc
13 .globl tl0_cee, tl1_cee
14 .globl tl0_iae, tl1_iae
15 .globl tl0_dae, tl1_dae
16
17sparc64_ttable_tl0:
18tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
19tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
20tl0_iax: membar #Sync
21 TRAP_NOSAVE_7INSNS(__do_instruction_access_exception)
22tl0_resv009: BTRAP(0x9)
23tl0_iae: TRAP(do_iae)
24tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
25tl0_ill: membar #Sync
26 TRAP_7INSNS(do_illegal_instruction)
27tl0_privop: TRAP(do_privop)
28tl0_resv012: BTRAP(0x12) BTRAP(0x13) BTRAP(0x14) BTRAP(0x15) BTRAP(0x16) BTRAP(0x17)
29tl0_resv018: BTRAP(0x18) BTRAP(0x19) BTRAP(0x1a) BTRAP(0x1b) BTRAP(0x1c) BTRAP(0x1d)
30tl0_resv01e: BTRAP(0x1e) BTRAP(0x1f)
31tl0_fpdis: TRAP_NOSAVE(do_fpdis)
32tl0_fpieee: TRAP_SAVEFPU(do_fpieee)
33tl0_fpother: TRAP_NOSAVE(do_fpother_check_fitos)
34tl0_tof: TRAP(do_tof)
35tl0_cwin: CLEAN_WINDOW
36tl0_div0: TRAP(do_div0)
37tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
38tl0_resv02f: BTRAP(0x2f)
39tl0_dax: TRAP_NOSAVE(__do_data_access_exception)
40tl0_resv031: BTRAP(0x31)
41tl0_dae: TRAP(do_dae)
42tl0_resv033: BTRAP(0x33)
43tl0_mna: TRAP_NOSAVE(do_mna)
44tl0_lddfmna: TRAP_NOSAVE(do_lddfmna)
45tl0_stdfmna: TRAP_NOSAVE(do_stdfmna)
46tl0_privact: TRAP_NOSAVE(__do_privact)
47tl0_resv038: BTRAP(0x38) BTRAP(0x39) BTRAP(0x3a) BTRAP(0x3b) BTRAP(0x3c) BTRAP(0x3d)
48tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
49#ifdef CONFIG_SMP
50tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
51tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
52tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
53#else
54tl0_irq1: BTRAP(0x41)
55tl0_irq2: BTRAP(0x42)
56tl0_irq3: BTRAP(0x43)
57#endif
58tl0_irq4: TRAP_IRQ(handler_irq, 4)
59tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6)
60tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8)
61tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10)
62tl0_irq11: TRAP_IRQ(handler_irq, 11) TRAP_IRQ(handler_irq, 12)
63tl0_irq13: TRAP_IRQ(handler_irq, 13)
64#ifndef CONFIG_SMP
65tl0_irq14: TRAP_IRQ(handler_irq, 14)
66#else
67tl0_irq14: TICK_SMP_IRQ
68#endif
69tl0_irq15: TRAP_IRQ(handler_irq, 15)
70tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55)
71tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b)
72tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
73tl0_ivec: TRAP_IVEC
74tl0_paw: TRAP(do_paw)
75tl0_vaw: TRAP(do_vaw)
76tl0_cee: TRAP_NOSAVE(cee_trap)
77tl0_iamiss:
78#include "itlb_base.S"
79tl0_damiss:
80#include "dtlb_base.S"
81tl0_daprot:
82#include "dtlb_prot.S"
83tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */
84tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */
85tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */
86tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75)
87tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b)
88tl0_resv07c: BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f)
89tl0_s0n: SPILL_0_NORMAL
90tl0_s1n: SPILL_1_NORMAL
91tl0_s2n: SPILL_2_NORMAL
92tl0_s3n: SPILL_3_NORMAL
93tl0_s4n: SPILL_4_NORMAL
94tl0_s5n: SPILL_5_NORMAL
95tl0_s6n: SPILL_6_NORMAL
96tl0_s7n: SPILL_7_NORMAL
97tl0_s0o: SPILL_0_OTHER
98tl0_s1o: SPILL_1_OTHER
99tl0_s2o: SPILL_2_OTHER
100tl0_s3o: SPILL_3_OTHER
101tl0_s4o: SPILL_4_OTHER
102tl0_s5o: SPILL_5_OTHER
103tl0_s6o: SPILL_6_OTHER
104tl0_s7o: SPILL_7_OTHER
105tl0_f0n: FILL_0_NORMAL
106tl0_f1n: FILL_1_NORMAL
107tl0_f2n: FILL_2_NORMAL
108tl0_f3n: FILL_3_NORMAL
109tl0_f4n: FILL_4_NORMAL
110tl0_f5n: FILL_5_NORMAL
111tl0_f6n: FILL_6_NORMAL
112tl0_f7n: FILL_7_NORMAL
113tl0_f0o: FILL_0_OTHER
114tl0_f1o: FILL_1_OTHER
115tl0_f2o: FILL_2_OTHER
116tl0_f3o: FILL_3_OTHER
117tl0_f4o: FILL_4_OTHER
118tl0_f5o: FILL_5_OTHER
119tl0_f6o: FILL_6_OTHER
120tl0_f7o: FILL_7_OTHER
121tl0_sunos: SUNOS_SYSCALL_TRAP
122tl0_bkpt: BREAKPOINT_TRAP
123tl0_divz: TRAP(do_div0)
124tl0_flushw: FLUSH_WINDOW_TRAP
125tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107)
126 .globl tl0_solaris
127tl0_solaris: SOLARIS_SYSCALL_TRAP
128tl0_netbsd: NETBSD_SYSCALL_TRAP
129tl0_resv10a: BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e)
130tl0_resv10f: BTRAP(0x10f)
131tl0_linux32: LINUX_32BIT_SYSCALL_TRAP
132tl0_oldlinux64: LINUX_64BIT_SYSCALL_TRAP
133tl0_resv112: TRAP_UTRAP(UT_TRAP_INSTRUCTION_18,0x112) TRAP_UTRAP(UT_TRAP_INSTRUCTION_19,0x113)
134tl0_resv114: TRAP_UTRAP(UT_TRAP_INSTRUCTION_20,0x114) TRAP_UTRAP(UT_TRAP_INSTRUCTION_21,0x115)
135tl0_resv116: TRAP_UTRAP(UT_TRAP_INSTRUCTION_22,0x116) TRAP_UTRAP(UT_TRAP_INSTRUCTION_23,0x117)
136tl0_resv118: TRAP_UTRAP(UT_TRAP_INSTRUCTION_24,0x118) TRAP_UTRAP(UT_TRAP_INSTRUCTION_25,0x119)
137tl0_resv11a: TRAP_UTRAP(UT_TRAP_INSTRUCTION_26,0x11a) TRAP_UTRAP(UT_TRAP_INSTRUCTION_27,0x11b)
138tl0_resv11c: TRAP_UTRAP(UT_TRAP_INSTRUCTION_28,0x11c) TRAP_UTRAP(UT_TRAP_INSTRUCTION_29,0x11d)
139tl0_resv11e: TRAP_UTRAP(UT_TRAP_INSTRUCTION_30,0x11e) TRAP_UTRAP(UT_TRAP_INSTRUCTION_31,0x11f)
140tl0_getcc: GETCC_TRAP
141tl0_setcc: SETCC_TRAP
142tl0_getpsr: TRAP(do_getpsr)
143tl0_resv123: BTRAP(0x123) BTRAP(0x124) BTRAP(0x125) BTRAP(0x126)
144tl0_solindir: INDIRECT_SOLARIS_SYSCALL(156)
145tl0_resv128: BTRAP(0x128) BTRAP(0x129) BTRAP(0x12a) BTRAP(0x12b) BTRAP(0x12c)
146tl0_resv12d: BTRAP(0x12d) BTRAP(0x12e) BTRAP(0x12f) BTRAP(0x130) BTRAP(0x131)
147tl0_resv132: BTRAP(0x132) BTRAP(0x133) BTRAP(0x134) BTRAP(0x135) BTRAP(0x136)
148tl0_resv137: BTRAP(0x137) BTRAP(0x138) BTRAP(0x139) BTRAP(0x13a) BTRAP(0x13b)
149tl0_resv13c: BTRAP(0x13c) BTRAP(0x13d) BTRAP(0x13e) BTRAP(0x13f) BTRAP(0x140)
150tl0_resv141: BTRAP(0x141) BTRAP(0x142) BTRAP(0x143) BTRAP(0x144) BTRAP(0x145)
151tl0_resv146: BTRAP(0x146) BTRAP(0x147) BTRAP(0x148) BTRAP(0x149) BTRAP(0x14a)
152tl0_resv14b: BTRAP(0x14b) BTRAP(0x14c) BTRAP(0x14d) BTRAP(0x14e) BTRAP(0x14f)
153tl0_resv150: BTRAP(0x150) BTRAP(0x151) BTRAP(0x152) BTRAP(0x153) BTRAP(0x154)
154tl0_resv155: BTRAP(0x155) BTRAP(0x156) BTRAP(0x157) BTRAP(0x158) BTRAP(0x159)
155tl0_resv15a: BTRAP(0x15a) BTRAP(0x15b) BTRAP(0x15c) BTRAP(0x15d) BTRAP(0x15e)
156tl0_resv15f: BTRAP(0x15f) BTRAP(0x160) BTRAP(0x161) BTRAP(0x162) BTRAP(0x163)
157tl0_resv164: BTRAP(0x164) BTRAP(0x165) BTRAP(0x166) BTRAP(0x167) BTRAP(0x168)
158tl0_resv169: BTRAP(0x169) BTRAP(0x16a) BTRAP(0x16b) BTRAP(0x16c)
159tl0_linux64: LINUX_64BIT_SYSCALL_TRAP
160tl0_gsctx: TRAP(sparc64_get_context) TRAP(sparc64_set_context)
161tl0_resv170: KPROBES_TRAP(0x170) KPROBES_TRAP(0x171) BTRAP(0x172)
162tl0_resv173: BTRAP(0x173) BTRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177)
163tl0_resv178: BTRAP(0x178) BTRAP(0x179) BTRAP(0x17a) BTRAP(0x17b) BTRAP(0x17c)
164tl0_resv17d: BTRAP(0x17d) BTRAP(0x17e) BTRAP(0x17f)
165#define BTRAPS(x) BTRAP(x) BTRAP(x+1) BTRAP(x+2) BTRAP(x+3) BTRAP(x+4) BTRAP(x+5) BTRAP(x+6) BTRAP(x+7)
166tl0_resv180: BTRAPS(0x180) BTRAPS(0x188)
167tl0_resv190: BTRAPS(0x190) BTRAPS(0x198)
168tl0_resv1a0: BTRAPS(0x1a0) BTRAPS(0x1a8)
169tl0_resv1b0: BTRAPS(0x1b0) BTRAPS(0x1b8)
170tl0_resv1c0: BTRAPS(0x1c0) BTRAPS(0x1c8)
171tl0_resv1d0: BTRAPS(0x1d0) BTRAPS(0x1d8)
172tl0_resv1e0: BTRAPS(0x1e0) BTRAPS(0x1e8)
173tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8)
174
175sparc64_ttable_tl1:
176tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
177tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
178tl1_iax: TRAP_NOSAVE(__do_instruction_access_exception_tl1)
179tl1_resv009: BTRAPTL1(0x9)
180tl1_iae: TRAPTL1(do_iae_tl1)
181tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
182tl1_ill: TRAPTL1(do_ill_tl1)
183tl1_privop: BTRAPTL1(0x11)
184tl1_resv012: BTRAPTL1(0x12) BTRAPTL1(0x13) BTRAPTL1(0x14) BTRAPTL1(0x15)
185tl1_resv016: BTRAPTL1(0x16) BTRAPTL1(0x17) BTRAPTL1(0x18) BTRAPTL1(0x19)
186tl1_resv01a: BTRAPTL1(0x1a) BTRAPTL1(0x1b) BTRAPTL1(0x1c) BTRAPTL1(0x1d)
187tl1_resv01e: BTRAPTL1(0x1e) BTRAPTL1(0x1f)
188tl1_fpdis: TRAP_NOSAVE(do_fpdis)
189tl1_fpieee: TRAPTL1(do_fpieee_tl1)
190tl1_fpother: TRAPTL1(do_fpother_tl1)
191tl1_tof: TRAPTL1(do_tof_tl1)
192tl1_cwin: CLEAN_WINDOW
193tl1_div0: TRAPTL1(do_div0_tl1)
194tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
195tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
196tl1_dax: TRAP_NOSAVE(__do_data_access_exception_tl1)
197tl1_resv031: BTRAPTL1(0x31)
198tl1_dae: TRAPTL1(do_dae_tl1)
199tl1_resv033: BTRAPTL1(0x33)
200tl1_mna: TRAP_NOSAVE(do_mna)
201tl1_lddfmna: TRAPTL1(do_lddfmna_tl1)
202tl1_stdfmna: TRAPTL1(do_stdfmna_tl1)
203tl1_privact: BTRAPTL1(0x37)
204tl1_resv038: BTRAPTL1(0x38) BTRAPTL1(0x39) BTRAPTL1(0x3a) BTRAPTL1(0x3b)
205tl1_resv03c: BTRAPTL1(0x3c) BTRAPTL1(0x3d) BTRAPTL1(0x3e) BTRAPTL1(0x3f)
206tl1_resv040: BTRAPTL1(0x40)
207tl1_irq1: TRAP_IRQ(do_irq_tl1, 1) TRAP_IRQ(do_irq_tl1, 2) TRAP_IRQ(do_irq_tl1, 3)
208tl1_irq4: TRAP_IRQ(do_irq_tl1, 4) TRAP_IRQ(do_irq_tl1, 5) TRAP_IRQ(do_irq_tl1, 6)
209tl1_irq7: TRAP_IRQ(do_irq_tl1, 7) TRAP_IRQ(do_irq_tl1, 8) TRAP_IRQ(do_irq_tl1, 9)
210tl1_irq10: TRAP_IRQ(do_irq_tl1, 10) TRAP_IRQ(do_irq_tl1, 11)
211tl1_irq12: TRAP_IRQ(do_irq_tl1, 12) TRAP_IRQ(do_irq_tl1, 13)
212tl1_irq14: TRAP_IRQ(do_irq_tl1, 14) TRAP_IRQ(do_irq_tl1, 15)
213tl1_resv050: BTRAPTL1(0x50) BTRAPTL1(0x51) BTRAPTL1(0x52) BTRAPTL1(0x53)
214tl1_resv054: BTRAPTL1(0x54) BTRAPTL1(0x55) BTRAPTL1(0x56) BTRAPTL1(0x57)
215tl1_resv058: BTRAPTL1(0x58) BTRAPTL1(0x59) BTRAPTL1(0x5a) BTRAPTL1(0x5b)
216tl1_resv05c: BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f)
217tl1_ivec: TRAP_IVEC
218tl1_paw: TRAPTL1(do_paw_tl1)
219tl1_vaw: TRAPTL1(do_vaw_tl1)
220
221 /* The grotty trick to save %g1 into current->thread.cee_stuff
222 * is because when we take this trap we could be interrupting trap
223 * code already using the trap alternate global registers.
224 *
225 * We cross our fingers and pray that this store/load does
226 * not cause yet another CEE trap.
227 */
228tl1_cee: membar #Sync
229 stx %g1, [%g6 + TI_CEE_STUFF]
230 ldxa [%g0] ASI_AFSR, %g1
231 membar #Sync
232 stxa %g1, [%g0] ASI_AFSR
233 membar #Sync
234 ldx [%g6 + TI_CEE_STUFF], %g1
235 retry
236
237tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67)
238tl1_damiss:
239#include "dtlb_backend.S"
240tl1_daprot:
241#include "dtlb_prot.S"
242tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */
243tl1_dcpe: BTRAPTL1(0x71) /* D-cache Parity Error on Cheetah+ */
244tl1_icpe: BTRAPTL1(0x72) /* I-cache Parity Error on Cheetah+ */
245tl1_resv073: BTRAPTL1(0x73)
246tl1_resv074: BTRAPTL1(0x74) BTRAPTL1(0x75) BTRAPTL1(0x76) BTRAPTL1(0x77)
247tl1_resv078: BTRAPTL1(0x78) BTRAPTL1(0x79) BTRAPTL1(0x7a) BTRAPTL1(0x7b)
248tl1_resv07c: BTRAPTL1(0x7c) BTRAPTL1(0x7d) BTRAPTL1(0x7e) BTRAPTL1(0x7f)
249tl1_s0n: SPILL_0_NORMAL
250tl1_s1n: SPILL_1_NORMAL
251tl1_s2n: SPILL_2_NORMAL
252tl1_s3n: SPILL_3_NORMAL
253tl1_s4n: SPILL_4_NORMAL
254tl1_s5n: SPILL_5_NORMAL
255tl1_s6n: SPILL_6_NORMAL
256tl1_s7n: SPILL_7_NORMAL
257tl1_s0o: SPILL_0_OTHER
258tl1_s1o: SPILL_1_OTHER
259tl1_s2o: SPILL_2_OTHER
260tl1_s3o: SPILL_3_OTHER
261tl1_s4o: SPILL_4_OTHER
262tl1_s5o: SPILL_5_OTHER
263tl1_s6o: SPILL_6_OTHER
264tl1_s7o: SPILL_7_OTHER
265tl1_f0n: FILL_0_NORMAL
266tl1_f1n: FILL_1_NORMAL
267tl1_f2n: FILL_2_NORMAL
268tl1_f3n: FILL_3_NORMAL
269tl1_f4n: FILL_4_NORMAL
270tl1_f5n: FILL_5_NORMAL
271tl1_f6n: FILL_6_NORMAL
272tl1_f7n: FILL_7_NORMAL
273tl1_f0o: FILL_0_OTHER
274tl1_f1o: FILL_1_OTHER
275tl1_f2o: FILL_2_OTHER
276tl1_f3o: FILL_3_OTHER
277tl1_f4o: FILL_4_OTHER
278tl1_f5o: FILL_5_OTHER
279tl1_f6o: FILL_6_OTHER
280tl1_f7o: FILL_7_OTHER
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
new file mode 100644
index 000000000000..4372bf32ecf6
--- /dev/null
+++ b/arch/sparc64/kernel/unaligned.c
@@ -0,0 +1,729 @@
1/* $Id: unaligned.c,v 1.24 2002/02/09 19:49:31 davem Exp $
2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <asm/asi.h>
15#include <asm/ptrace.h>
16#include <asm/pstate.h>
17#include <asm/processor.h>
18#include <asm/system.h>
19#include <asm/uaccess.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/bitops.h>
23#include <asm/fpumacro.h>
24
25/* #define DEBUG_MNA */
26
27enum direction {
28 load, /* ld, ldd, ldh, ldsh */
29 store, /* st, std, sth, stsh */
30 both, /* Swap, ldstub, cas, ... */
31 fpld,
32 fpst,
33 invalid,
34};
35
36#ifdef DEBUG_MNA
37static char *dirstrings[] = {
38 "load", "store", "both", "fpload", "fpstore", "invalid"
39};
40#endif
41
42static inline enum direction decode_direction(unsigned int insn)
43{
44 unsigned long tmp = (insn >> 21) & 1;
45
46 if (!tmp)
47 return load;
48 else {
49 switch ((insn>>19)&0xf) {
50 case 15: /* swap* */
51 return both;
52 default:
53 return store;
54 }
55 }
56}
57
58/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
59static inline int decode_access_size(unsigned int insn)
60{
61 unsigned int tmp;
62
63 tmp = ((insn >> 19) & 0xf);
64 if (tmp == 11 || tmp == 14) /* ldx/stx */
65 return 8;
66 tmp &= 3;
67 if (!tmp)
68 return 4;
69 else if (tmp == 3)
70 return 16; /* ldd/std - Although it is actually 8 */
71 else if (tmp == 2)
72 return 2;
73 else {
74 printk("Impossible unaligned trap. insn=%08x\n", insn);
75 die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
76
77 /* GCC should never warn that control reaches the end
78 * of this function without returning a value because
79 * die_if_kernel() is marked with attribute 'noreturn'.
80 * Alas, some versions do...
81 */
82
83 return 0;
84 }
85}
86
87static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
88{
89 if (insn & 0x800000) {
90 if (insn & 0x2000)
91 return (unsigned char)(regs->tstate >> 24); /* %asi */
92 else
93 return (unsigned char)(insn >> 5); /* imm_asi */
94 } else
95 return ASI_P;
96}
97
98/* 0x400000 = signed, 0 = unsigned */
99static inline int decode_signedness(unsigned int insn)
100{
101 return (insn & 0x400000);
102}
103
104static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
105 unsigned int rd, int from_kernel)
106{
107 if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
108 if (from_kernel != 0)
109 __asm__ __volatile__("flushw");
110 else
111 flushw_user();
112 }
113}
114
115static inline long sign_extend_imm13(long imm)
116{
117 return imm << 51 >> 51;
118}
119
120static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
121{
122 unsigned long value;
123
124 if (reg < 16)
125 return (!reg ? 0 : regs->u_regs[reg]);
126 if (regs->tstate & TSTATE_PRIV) {
127 struct reg_window *win;
128 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
129 value = win->locals[reg - 16];
130 } else if (test_thread_flag(TIF_32BIT)) {
131 struct reg_window32 __user *win32;
132 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
133 get_user(value, &win32->locals[reg - 16]);
134 } else {
135 struct reg_window __user *win;
136 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
137 get_user(value, &win->locals[reg - 16]);
138 }
139 return value;
140}
141
142static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
143{
144 if (reg < 16)
145 return &regs->u_regs[reg];
146 if (regs->tstate & TSTATE_PRIV) {
147 struct reg_window *win;
148 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
149 return &win->locals[reg - 16];
150 } else if (test_thread_flag(TIF_32BIT)) {
151 struct reg_window32 *win32;
152 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
153 return (unsigned long *)&win32->locals[reg - 16];
154 } else {
155 struct reg_window *win;
156 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
157 return &win->locals[reg - 16];
158 }
159}
160
161unsigned long compute_effective_address(struct pt_regs *regs,
162 unsigned int insn, unsigned int rd)
163{
164 unsigned int rs1 = (insn >> 14) & 0x1f;
165 unsigned int rs2 = insn & 0x1f;
166 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
167
168 if (insn & 0x2000) {
169 maybe_flush_windows(rs1, 0, rd, from_kernel);
170 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
171 } else {
172 maybe_flush_windows(rs1, rs2, rd, from_kernel);
173 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
174 }
175}
176
177/* This is just to make gcc think die_if_kernel does return... */
178static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
179{
180 die_if_kernel(str, regs);
181}
182
183#define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
184__asm__ __volatile__ ( \
185 "wr %4, 0, %%asi\n\t" \
186 "cmp %1, 8\n\t" \
187 "bge,pn %%icc, 9f\n\t" \
188 " cmp %1, 4\n\t" \
189 "be,pt %%icc, 6f\n" \
190"4:\t" " lduba [%2] %%asi, %%l1\n" \
191"5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
192 "sll %%l1, 8, %%l1\n\t" \
193 "brz,pt %3, 3f\n\t" \
194 " add %%l1, %%l2, %%l1\n\t" \
195 "sllx %%l1, 48, %%l1\n\t" \
196 "srax %%l1, 48, %%l1\n" \
197"3:\t" "ba,pt %%xcc, 0f\n\t" \
198 " stx %%l1, [%0]\n" \
199"6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
200 "sll %%l1, 24, %%l1\n" \
201"7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
202 "sll %%l2, 16, %%l2\n" \
203"8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
204 "sll %%g7, 8, %%g7\n\t" \
205 "or %%l1, %%l2, %%l1\n\t" \
206 "or %%g7, %%g1, %%g7\n\t" \
207 "or %%l1, %%g7, %%l1\n\t" \
208 "brnz,a,pt %3, 3f\n\t" \
209 " sra %%l1, 0, %%l1\n" \
210"3:\t" "ba,pt %%xcc, 0f\n\t" \
211 " stx %%l1, [%0]\n" \
212"9:\t" "lduba [%2] %%asi, %%l1\n" \
213"10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
214 "sllx %%l1, 56, %%l1\n" \
215"11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
216 "sllx %%l2, 48, %%l2\n" \
217"12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
218 "sllx %%g7, 40, %%g7\n\t" \
219 "sllx %%g1, 32, %%g1\n\t" \
220 "or %%l1, %%l2, %%l1\n\t" \
221 "or %%g7, %%g1, %%g7\n" \
222"13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \
223 "or %%l1, %%g7, %%g7\n" \
224"14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \
225 "sllx %%l2, 24, %%l2\n" \
226"15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \
227 "sllx %%g1, 16, %%g1\n\t" \
228 "or %%g7, %%l2, %%g7\n" \
229"16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \
230 "sllx %%l1, 8, %%l1\n\t" \
231 "or %%g7, %%g1, %%g7\n\t" \
232 "or %%l1, %%l2, %%l1\n\t" \
233 "or %%g7, %%l1, %%g7\n\t" \
234 "cmp %1, 8\n\t" \
235 "be,a,pt %%icc, 0f\n\t" \
236 " stx %%g7, [%0]\n\t" \
237 "srlx %%g7, 32, %%l1\n\t" \
238 "sra %%g7, 0, %%g7\n\t" \
239 "stx %%l1, [%0]\n\t" \
240 "stx %%g7, [%0 + 8]\n" \
241"0:\n\t" \
242 "wr %%g0, %5, %%asi\n\n\t" \
243 ".section __ex_table\n\t" \
244 ".word 4b, " #errh "\n\t" \
245 ".word 5b, " #errh "\n\t" \
246 ".word 6b, " #errh "\n\t" \
247 ".word 7b, " #errh "\n\t" \
248 ".word 8b, " #errh "\n\t" \
249 ".word 9b, " #errh "\n\t" \
250 ".word 10b, " #errh "\n\t" \
251 ".word 11b, " #errh "\n\t" \
252 ".word 12b, " #errh "\n\t" \
253 ".word 13b, " #errh "\n\t" \
254 ".word 14b, " #errh "\n\t" \
255 ".word 15b, " #errh "\n\t" \
256 ".word 16b, " #errh "\n\n\t" \
257 ".previous\n\t" \
258 : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), \
259 "r" (asi), "i" (ASI_AIUS) \
260 : "l1", "l2", "g7", "g1", "cc"); \
261})
262
263#define store_common(dst_addr, size, src_val, asi, errh) ({ \
264__asm__ __volatile__ ( \
265 "wr %3, 0, %%asi\n\t" \
266 "ldx [%2], %%l1\n" \
267 "cmp %1, 2\n\t" \
268 "be,pn %%icc, 2f\n\t" \
269 " cmp %1, 4\n\t" \
270 "be,pt %%icc, 1f\n\t" \
271 " srlx %%l1, 24, %%l2\n\t" \
272 "srlx %%l1, 56, %%g1\n\t" \
273 "srlx %%l1, 48, %%g7\n" \
274"4:\t" "stba %%g1, [%0] %%asi\n\t" \
275 "srlx %%l1, 40, %%g1\n" \
276"5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \
277 "srlx %%l1, 32, %%g7\n" \
278"6:\t" "stba %%g1, [%0 + 2] %%asi\n" \
279"7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \
280 "srlx %%l1, 16, %%g1\n" \
281"8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \
282 "srlx %%l1, 8, %%g7\n" \
283"9:\t" "stba %%g1, [%0 + 5] %%asi\n" \
284"10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \
285 "ba,pt %%xcc, 0f\n" \
286"11:\t" " stba %%l1, [%0 + 7] %%asi\n" \
287"1:\t" "srl %%l1, 16, %%g7\n" \
288"12:\t" "stba %%l2, [%0] %%asi\n\t" \
289 "srl %%l1, 8, %%l2\n" \
290"13:\t" "stba %%g7, [%0 + 1] %%asi\n" \
291"14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \
292 "ba,pt %%xcc, 0f\n" \
293"15:\t" " stba %%l1, [%0 + 3] %%asi\n" \
294"2:\t" "srl %%l1, 8, %%l2\n" \
295"16:\t" "stba %%l2, [%0] %%asi\n" \
296"17:\t" "stba %%l1, [%0 + 1] %%asi\n" \
297"0:\n\t" \
298 "wr %%g0, %4, %%asi\n\n\t" \
299 ".section __ex_table\n\t" \
300 ".word 4b, " #errh "\n\t" \
301 ".word 5b, " #errh "\n\t" \
302 ".word 6b, " #errh "\n\t" \
303 ".word 7b, " #errh "\n\t" \
304 ".word 8b, " #errh "\n\t" \
305 ".word 9b, " #errh "\n\t" \
306 ".word 10b, " #errh "\n\t" \
307 ".word 11b, " #errh "\n\t" \
308 ".word 12b, " #errh "\n\t" \
309 ".word 13b, " #errh "\n\t" \
310 ".word 14b, " #errh "\n\t" \
311 ".word 15b, " #errh "\n\t" \
312 ".word 16b, " #errh "\n\t" \
313 ".word 17b, " #errh "\n\n\t" \
314 ".previous\n\t" \
315 : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\
316 : "l1", "l2", "g7", "g1", "cc"); \
317})
318
319#define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \
320 unsigned long zero = 0; \
321 unsigned long *src_val = &zero; \
322 \
323 if (size == 16) { \
324 size = 8; \
325 zero = (((long)(reg_num ? \
326 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \
327 (unsigned)fetch_reg(reg_num + 1, regs); \
328 } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
329 store_common(dst_addr, size, src_val, asi, errh); \
330})
331
332extern void smp_capture(void);
333extern void smp_release(void);
334
335#define do_atomic(srcdest_reg, mem, errh) ({ \
336 unsigned long flags, tmp; \
337 \
338 smp_capture(); \
339 local_irq_save(flags); \
340 tmp = *srcdest_reg; \
341 do_integer_load(srcdest_reg, 4, mem, 0, errh); \
342 store_common(mem, 4, &tmp, errh); \
343 local_irq_restore(flags); \
344 smp_release(); \
345})
346
347static inline void advance(struct pt_regs *regs)
348{
349 regs->tpc = regs->tnpc;
350 regs->tnpc += 4;
351 if (test_thread_flag(TIF_32BIT)) {
352 regs->tpc &= 0xffffffff;
353 regs->tnpc &= 0xffffffff;
354 }
355}
356
357static inline int floating_point_load_or_store_p(unsigned int insn)
358{
359 return (insn >> 24) & 1;
360}
361
362static inline int ok_for_kernel(unsigned int insn)
363{
364 return !floating_point_load_or_store_p(insn);
365}
366
367void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
368
369void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
370{
371 unsigned long g2 = regs->u_regs [UREG_G2];
372 unsigned long fixup = search_extables_range(regs->tpc, &g2);
373
374 if (!fixup) {
375 unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
376 if (address < PAGE_SIZE) {
377 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
378 } else
379 printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
380 printk(KERN_ALERT " at virtual address %016lx\n",address);
381 printk(KERN_ALERT "current->{mm,active_mm}->context = %016lx\n",
382 (current->mm ? CTX_HWBITS(current->mm->context) :
383 CTX_HWBITS(current->active_mm->context)));
384 printk(KERN_ALERT "current->{mm,active_mm}->pgd = %016lx\n",
385 (current->mm ? (unsigned long) current->mm->pgd :
386 (unsigned long) current->active_mm->pgd));
387 die_if_kernel("Oops", regs);
388 /* Not reached */
389 }
390 regs->tpc = fixup;
391 regs->tnpc = regs->tpc + 4;
392 regs->u_regs [UREG_G2] = g2;
393
394 regs->tstate &= ~TSTATE_ASI;
395 regs->tstate |= (ASI_AIUS << 24UL);
396}
397
398asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, unsigned long sfar, unsigned long sfsr)
399{
400 enum direction dir = decode_direction(insn);
401 int size = decode_access_size(insn);
402
403 if (!ok_for_kernel(insn) || dir == both) {
404 printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
405 regs->tpc);
406 unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
407
408 __asm__ __volatile__ ("\n"
409"kernel_unaligned_trap_fault:\n\t"
410 "mov %0, %%o0\n\t"
411 "call kernel_mna_trap_fault\n\t"
412 " mov %1, %%o1\n\t"
413 :
414 : "r" (regs), "r" (insn)
415 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
416 "g1", "g2", "g3", "g4", "g7", "cc");
417 } else {
418 unsigned long addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
419
420#ifdef DEBUG_MNA
421 printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
422 regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
423#endif
424 switch (dir) {
425 case load:
426 do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
427 size, (unsigned long *) addr,
428 decode_signedness(insn), decode_asi(insn, regs),
429 kernel_unaligned_trap_fault);
430 break;
431
432 case store:
433 do_integer_store(((insn>>25)&0x1f), size,
434 (unsigned long *) addr, regs,
435 decode_asi(insn, regs),
436 kernel_unaligned_trap_fault);
437 break;
438#if 0 /* unsupported */
439 case both:
440 do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
441 (unsigned long *) addr,
442 kernel_unaligned_trap_fault);
443 break;
444#endif
445 default:
446 panic("Impossible kernel unaligned trap.");
447 /* Not reached... */
448 }
449 advance(regs);
450 }
451}
452
453static char popc_helper[] = {
4540, 1, 1, 2, 1, 2, 2, 3,
4551, 2, 2, 3, 2, 3, 3, 4,
456};
457
458int handle_popc(u32 insn, struct pt_regs *regs)
459{
460 u64 value;
461 int ret, i, rd = ((insn >> 25) & 0x1f);
462 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
463
464 if (insn & 0x2000) {
465 maybe_flush_windows(0, 0, rd, from_kernel);
466 value = sign_extend_imm13(insn);
467 } else {
468 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
469 value = fetch_reg(insn & 0x1f, regs);
470 }
471 for (ret = 0, i = 0; i < 16; i++) {
472 ret += popc_helper[value & 0xf];
473 value >>= 4;
474 }
475 if (rd < 16) {
476 if (rd)
477 regs->u_regs[rd] = ret;
478 } else {
479 if (test_thread_flag(TIF_32BIT)) {
480 struct reg_window32 __user *win32;
481 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
482 put_user(ret, &win32->locals[rd - 16]);
483 } else {
484 struct reg_window __user *win;
485 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
486 put_user(ret, &win->locals[rd - 16]);
487 }
488 }
489 advance(regs);
490 return 1;
491}
492
493extern void do_fpother(struct pt_regs *regs);
494extern void do_privact(struct pt_regs *regs);
495extern void data_access_exception(struct pt_regs *regs,
496 unsigned long sfsr,
497 unsigned long sfar);
498
499int handle_ldf_stq(u32 insn, struct pt_regs *regs)
500{
501 unsigned long addr = compute_effective_address(regs, insn, 0);
502 int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
503 struct fpustate *f = FPUSTATE;
504 int asi = decode_asi(insn, regs);
505 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
506
507 save_and_clear_fpu();
508 current_thread_info()->xfsr[0] &= ~0x1c000;
509 if (freg & 3) {
510 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
511 do_fpother(regs);
512 return 0;
513 }
514 if (insn & 0x200000) {
515 /* STQ */
516 u64 first = 0, second = 0;
517
518 if (current_thread_info()->fpsaved[0] & flag) {
519 first = *(u64 *)&f->regs[freg];
520 second = *(u64 *)&f->regs[freg+2];
521 }
522 if (asi < 0x80) {
523 do_privact(regs);
524 return 1;
525 }
526 switch (asi) {
527 case ASI_P:
528 case ASI_S: break;
529 case ASI_PL:
530 case ASI_SL:
531 {
532 /* Need to convert endians */
533 u64 tmp = __swab64p(&first);
534
535 first = __swab64p(&second);
536 second = tmp;
537 break;
538 }
539 default:
540 data_access_exception(regs, 0, addr);
541 return 1;
542 }
543 if (put_user (first >> 32, (u32 __user *)addr) ||
544 __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
545 __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
546 __put_user ((u32)second, (u32 __user *)(addr + 12))) {
547 data_access_exception(regs, 0, addr);
548 return 1;
549 }
550 } else {
551 /* LDF, LDDF, LDQF */
552 u32 data[4] __attribute__ ((aligned(8)));
553 int size, i;
554 int err;
555
556 if (asi < 0x80) {
557 do_privact(regs);
558 return 1;
559 } else if (asi > ASI_SNFL) {
560 data_access_exception(regs, 0, addr);
561 return 1;
562 }
563 switch (insn & 0x180000) {
564 case 0x000000: size = 1; break;
565 case 0x100000: size = 4; break;
566 default: size = 2; break;
567 }
568 for (i = 0; i < size; i++)
569 data[i] = 0;
570
571 err = get_user (data[0], (u32 __user *) addr);
572 if (!err) {
573 for (i = 1; i < size; i++)
574 err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
575 }
576 if (err && !(asi & 0x2 /* NF */)) {
577 data_access_exception(regs, 0, addr);
578 return 1;
579 }
580 if (asi & 0x8) /* Little */ {
581 u64 tmp;
582
583 switch (size) {
584 case 1: data[0] = le32_to_cpup(data + 0); break;
585 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
586 break;
587 case 4: tmp = le64_to_cpup((u64 *)(data + 0));
588 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
589 *(u64 *)(data + 2) = tmp;
590 break;
591 }
592 }
593 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
594 current_thread_info()->fpsaved[0] = FPRS_FEF;
595 current_thread_info()->gsr[0] = 0;
596 }
597 if (!(current_thread_info()->fpsaved[0] & flag)) {
598 if (freg < 32)
599 memset(f->regs, 0, 32*sizeof(u32));
600 else
601 memset(f->regs+32, 0, 32*sizeof(u32));
602 }
603 memcpy(f->regs + freg, data, size * 4);
604 current_thread_info()->fpsaved[0] |= flag;
605 }
606 advance(regs);
607 return 1;
608}
609
610void handle_ld_nf(u32 insn, struct pt_regs *regs)
611{
612 int rd = ((insn >> 25) & 0x1f);
613 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
614 unsigned long *reg;
615
616 maybe_flush_windows(0, 0, rd, from_kernel);
617 reg = fetch_reg_addr(rd, regs);
618 if (from_kernel || rd < 16) {
619 reg[0] = 0;
620 if ((insn & 0x780000) == 0x180000)
621 reg[1] = 0;
622 } else if (test_thread_flag(TIF_32BIT)) {
623 put_user(0, (int __user *) reg);
624 if ((insn & 0x780000) == 0x180000)
625 put_user(0, ((int __user *) reg) + 1);
626 } else {
627 put_user(0, (unsigned long __user *) reg);
628 if ((insn & 0x780000) == 0x180000)
629 put_user(0, (unsigned long __user *) reg + 1);
630 }
631 advance(regs);
632}
633
634void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
635{
636 unsigned long pc = regs->tpc;
637 unsigned long tstate = regs->tstate;
638 u32 insn;
639 u32 first, second;
640 u64 value;
641 u8 asi, freg;
642 int flag;
643 struct fpustate *f = FPUSTATE;
644
645 if (tstate & TSTATE_PRIV)
646 die_if_kernel("lddfmna from kernel", regs);
647 if (test_thread_flag(TIF_32BIT))
648 pc = (u32)pc;
649 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
650 asi = sfsr >> 16;
651 if ((asi > ASI_SNFL) ||
652 (asi < ASI_P))
653 goto daex;
654 if (get_user(first, (u32 __user *)sfar) ||
655 get_user(second, (u32 __user *)(sfar + 4))) {
656 if (asi & 0x2) /* NF */ {
657 first = 0; second = 0;
658 } else
659 goto daex;
660 }
661 save_and_clear_fpu();
662 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
663 value = (((u64)first) << 32) | second;
664 if (asi & 0x8) /* Little */
665 value = __swab64p(&value);
666 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
667 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
668 current_thread_info()->fpsaved[0] = FPRS_FEF;
669 current_thread_info()->gsr[0] = 0;
670 }
671 if (!(current_thread_info()->fpsaved[0] & flag)) {
672 if (freg < 32)
673 memset(f->regs, 0, 32*sizeof(u32));
674 else
675 memset(f->regs+32, 0, 32*sizeof(u32));
676 }
677 *(u64 *)(f->regs + freg) = value;
678 current_thread_info()->fpsaved[0] |= flag;
679 } else {
680daex: data_access_exception(regs, sfsr, sfar);
681 return;
682 }
683 advance(regs);
684 return;
685}
686
687void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
688{
689 unsigned long pc = regs->tpc;
690 unsigned long tstate = regs->tstate;
691 u32 insn;
692 u64 value;
693 u8 asi, freg;
694 int flag;
695 struct fpustate *f = FPUSTATE;
696
697 if (tstate & TSTATE_PRIV)
698 die_if_kernel("stdfmna from kernel", regs);
699 if (test_thread_flag(TIF_32BIT))
700 pc = (u32)pc;
701 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
702 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
703 asi = sfsr >> 16;
704 value = 0;
705 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
706 if ((asi > ASI_SNFL) ||
707 (asi < ASI_P))
708 goto daex;
709 save_and_clear_fpu();
710 if (current_thread_info()->fpsaved[0] & flag)
711 value = *(u64 *)&f->regs[freg];
712 switch (asi) {
713 case ASI_P:
714 case ASI_S: break;
715 case ASI_PL:
716 case ASI_SL:
717 value = __swab64p(&value); break;
718 default: goto daex;
719 }
720 if (put_user (value >> 32, (u32 __user *) sfar) ||
721 __put_user ((u32)value, (u32 __user *)(sfar + 4)))
722 goto daex;
723 } else {
724daex: data_access_exception(regs, sfsr, sfar);
725 return;
726 }
727 advance(regs);
728 return;
729}
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c
new file mode 100644
index 000000000000..7aae0a18aabe
--- /dev/null
+++ b/arch/sparc64/kernel/us2e_cpufreq.c
@@ -0,0 +1,400 @@
1/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4 *
5 * Many thanks to Dominik Brodowski for fixing up the cpufreq
6 * infrastructure in order to make this driver easier to implement.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/smp.h>
13#include <linux/cpufreq.h>
14#include <linux/threads.h>
15#include <linux/slab.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18
19#include <asm/asi.h>
20#include <asm/timer.h>
21
22static struct cpufreq_driver *cpufreq_us2e_driver;
23
24struct us2e_freq_percpu_info {
25 struct cpufreq_frequency_table table[6];
26};
27
28/* Indexed by cpu number. */
29static struct us2e_freq_percpu_info *us2e_freq_table;
30
31#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
32#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
33
34/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
35 * in the ESTAR mode control register.
36 */
37#define ESTAR_MODE_DIV_1 0x0000000000000000UL
38#define ESTAR_MODE_DIV_2 0x0000000000000001UL
39#define ESTAR_MODE_DIV_4 0x0000000000000003UL
40#define ESTAR_MODE_DIV_6 0x0000000000000002UL
41#define ESTAR_MODE_DIV_8 0x0000000000000004UL
42#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
43
44#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
45#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
46#define MCTRL0_REFR_COUNT_SHIFT 8
47#define MCTRL0_REFR_INTERVAL 7800
48#define MCTRL0_REFR_CLKS_P_CNT 64
49
50static unsigned long read_hbreg(unsigned long addr)
51{
52 unsigned long ret;
53
54 __asm__ __volatile__("ldxa [%1] %2, %0"
55 : "=&r" (ret)
56 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
57 return ret;
58}
59
60static void write_hbreg(unsigned long addr, unsigned long val)
61{
62 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
63 "membar #Sync"
64 : /* no outputs */
65 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
66 : "memory");
67 if (addr == HBIRD_ESTAR_MODE_ADDR) {
68 /* Need to wait 16 clock cycles for the PLL to lock. */
69 udelay(1);
70 }
71}
72
73static void self_refresh_ctl(int enable)
74{
75 unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
76
77 if (enable)
78 mctrl |= MCTRL0_SREFRESH_ENAB;
79 else
80 mctrl &= ~MCTRL0_SREFRESH_ENAB;
81 write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
82 (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
83}
84
85static void frob_mem_refresh(int cpu_slowing_down,
86 unsigned long clock_tick,
87 unsigned long old_divisor, unsigned long divisor)
88{
89 unsigned long old_refr_count, refr_count, mctrl;
90
91
92 refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
93 refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
94
95 mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
96 old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
97 >> MCTRL0_REFR_COUNT_SHIFT;
98
99 mctrl &= ~MCTRL0_REFR_COUNT_MASK;
100 mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
101 write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
102 mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
103
104 if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
105 unsigned long usecs;
106
107 /* We have to wait for both refresh counts (old
108 * and new) to go to zero.
109 */
110 usecs = (MCTRL0_REFR_CLKS_P_CNT *
111 (refr_count + old_refr_count) *
112 1000000UL *
113 old_divisor) / clock_tick;
114 udelay(usecs + 1UL);
115 }
116}
117
118static void us2e_transition(unsigned long estar, unsigned long new_bits,
119 unsigned long clock_tick,
120 unsigned long old_divisor, unsigned long divisor)
121{
122 unsigned long flags;
123
124 local_irq_save(flags);
125
126 estar &= ~ESTAR_MODE_DIV_MASK;
127
128 /* This is based upon the state transition diagram in the IIe manual. */
129 if (old_divisor == 2 && divisor == 1) {
130 self_refresh_ctl(0);
131 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
132 frob_mem_refresh(0, clock_tick, old_divisor, divisor);
133 } else if (old_divisor == 1 && divisor == 2) {
134 frob_mem_refresh(1, clock_tick, old_divisor, divisor);
135 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
136 self_refresh_ctl(1);
137 } else if (old_divisor == 1 && divisor > 2) {
138 us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
139 1, 2);
140 us2e_transition(estar, new_bits, clock_tick,
141 2, divisor);
142 } else if (old_divisor > 2 && divisor == 1) {
143 us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
144 old_divisor, 2);
145 us2e_transition(estar, new_bits, clock_tick,
146 2, divisor);
147 } else if (old_divisor < divisor) {
148 frob_mem_refresh(0, clock_tick, old_divisor, divisor);
149 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
150 } else if (old_divisor > divisor) {
151 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
152 frob_mem_refresh(1, clock_tick, old_divisor, divisor);
153 } else {
154 BUG();
155 }
156
157 local_irq_restore(flags);
158}
159
160static unsigned long index_to_estar_mode(unsigned int index)
161{
162 switch (index) {
163 case 0:
164 return ESTAR_MODE_DIV_1;
165
166 case 1:
167 return ESTAR_MODE_DIV_2;
168
169 case 2:
170 return ESTAR_MODE_DIV_4;
171
172 case 3:
173 return ESTAR_MODE_DIV_6;
174
175 case 4:
176 return ESTAR_MODE_DIV_8;
177
178 default:
179 BUG();
180 };
181}
182
183static unsigned long index_to_divisor(unsigned int index)
184{
185 switch (index) {
186 case 0:
187 return 1;
188
189 case 1:
190 return 2;
191
192 case 2:
193 return 4;
194
195 case 3:
196 return 6;
197
198 case 4:
199 return 8;
200
201 default:
202 BUG();
203 };
204}
205
206static unsigned long estar_to_divisor(unsigned long estar)
207{
208 unsigned long ret;
209
210 switch (estar & ESTAR_MODE_DIV_MASK) {
211 case ESTAR_MODE_DIV_1:
212 ret = 1;
213 break;
214 case ESTAR_MODE_DIV_2:
215 ret = 2;
216 break;
217 case ESTAR_MODE_DIV_4:
218 ret = 4;
219 break;
220 case ESTAR_MODE_DIV_6:
221 ret = 6;
222 break;
223 case ESTAR_MODE_DIV_8:
224 ret = 8;
225 break;
226 default:
227 BUG();
228 };
229
230 return ret;
231}
232
233static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
234{
235 unsigned long new_bits, new_freq;
236 unsigned long clock_tick, divisor, old_divisor, estar;
237 cpumask_t cpus_allowed;
238 struct cpufreq_freqs freqs;
239
240 if (!cpu_online(cpu))
241 return;
242
243 cpus_allowed = current->cpus_allowed;
244 set_cpus_allowed(current, cpumask_of_cpu(cpu));
245
246 new_freq = clock_tick = sparc64_get_clock_tick(cpu);
247 new_bits = index_to_estar_mode(index);
248 divisor = index_to_divisor(index);
249 new_freq /= divisor;
250
251 estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
252
253 old_divisor = estar_to_divisor(estar);
254
255 freqs.old = clock_tick / old_divisor;
256 freqs.new = new_freq;
257 freqs.cpu = cpu;
258 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
259
260 if (old_divisor != divisor)
261 us2e_transition(estar, new_bits, clock_tick, old_divisor, divisor);
262
263 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
264
265 set_cpus_allowed(current, cpus_allowed);
266}
267
268static int us2e_freq_target(struct cpufreq_policy *policy,
269 unsigned int target_freq,
270 unsigned int relation)
271{
272 unsigned int new_index = 0;
273
274 if (cpufreq_frequency_table_target(policy,
275 &us2e_freq_table[policy->cpu].table[0],
276 target_freq,
277 relation,
278 &new_index))
279 return -EINVAL;
280
281 us2e_set_cpu_divider_index(policy->cpu, new_index);
282
283 return 0;
284}
285
286static int us2e_freq_verify(struct cpufreq_policy *policy)
287{
288 return cpufreq_frequency_table_verify(policy,
289 &us2e_freq_table[policy->cpu].table[0]);
290}
291
292static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
293{
294 unsigned int cpu = policy->cpu;
295 unsigned long clock_tick = sparc64_get_clock_tick(cpu);
296 struct cpufreq_frequency_table *table =
297 &us2e_freq_table[cpu].table[0];
298
299 table[0].index = 0;
300 table[0].frequency = clock_tick / 1;
301 table[1].index = 1;
302 table[1].frequency = clock_tick / 2;
303 table[2].index = 2;
304 table[2].frequency = clock_tick / 4;
305 table[2].index = 3;
306 table[2].frequency = clock_tick / 6;
307 table[2].index = 4;
308 table[2].frequency = clock_tick / 8;
309 table[2].index = 5;
310 table[3].frequency = CPUFREQ_TABLE_END;
311
312 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
313 policy->cpuinfo.transition_latency = 0;
314 policy->cur = clock_tick;
315
316 return cpufreq_frequency_table_cpuinfo(policy, table);
317}
318
319static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
320{
321 if (cpufreq_us2e_driver)
322 us2e_set_cpu_divider_index(policy->cpu, 0);
323
324 return 0;
325}
326
327static int __init us2e_freq_init(void)
328{
329 unsigned long manuf, impl, ver;
330 int ret;
331
332 __asm__("rdpr %%ver, %0" : "=r" (ver));
333 manuf = ((ver >> 48) & 0xffff);
334 impl = ((ver >> 32) & 0xffff);
335
336 if (manuf == 0x17 && impl == 0x13) {
337 struct cpufreq_driver *driver;
338
339 ret = -ENOMEM;
340 driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
341 if (!driver)
342 goto err_out;
343 memset(driver, 0, sizeof(*driver));
344
345 us2e_freq_table = kmalloc(
346 (NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
347 GFP_KERNEL);
348 if (!us2e_freq_table)
349 goto err_out;
350
351 memset(us2e_freq_table, 0,
352 (NR_CPUS * sizeof(struct us2e_freq_percpu_info)));
353
354 driver->verify = us2e_freq_verify;
355 driver->target = us2e_freq_target;
356 driver->init = us2e_freq_cpu_init;
357 driver->exit = us2e_freq_cpu_exit;
358 driver->owner = THIS_MODULE,
359 strcpy(driver->name, "UltraSPARC-IIe");
360
361 cpufreq_us2e_driver = driver;
362 ret = cpufreq_register_driver(driver);
363 if (ret)
364 goto err_out;
365
366 return 0;
367
368err_out:
369 if (driver) {
370 kfree(driver);
371 cpufreq_us2e_driver = NULL;
372 }
373 if (us2e_freq_table) {
374 kfree(us2e_freq_table);
375 us2e_freq_table = NULL;
376 }
377 return ret;
378 }
379
380 return -ENODEV;
381}
382
383static void __exit us2e_freq_exit(void)
384{
385 if (cpufreq_us2e_driver) {
386 cpufreq_unregister_driver(cpufreq_us2e_driver);
387
388 kfree(cpufreq_us2e_driver);
389 cpufreq_us2e_driver = NULL;
390 kfree(us2e_freq_table);
391 us2e_freq_table = NULL;
392 }
393}
394
395MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
396MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
397MODULE_LICENSE("GPL");
398
399module_init(us2e_freq_init);
400module_exit(us2e_freq_exit);
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
new file mode 100644
index 000000000000..18fe54b8aa55
--- /dev/null
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -0,0 +1,255 @@
1/* us3_cpufreq.c: UltraSPARC-III cpu frequency support
2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4 *
5 * Many thanks to Dominik Brodowski for fixing up the cpufreq
6 * infrastructure in order to make this driver easier to implement.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/smp.h>
13#include <linux/cpufreq.h>
14#include <linux/threads.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17
18#include <asm/head.h>
19#include <asm/timer.h>
20
21static struct cpufreq_driver *cpufreq_us3_driver;
22
23struct us3_freq_percpu_info {
24 struct cpufreq_frequency_table table[4];
25};
26
27/* Indexed by cpu number. */
28static struct us3_freq_percpu_info *us3_freq_table;
29
30/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
31 * in the Safari config register.
32 */
33#define SAFARI_CFG_DIV_1 0x0000000000000000UL
34#define SAFARI_CFG_DIV_2 0x0000000040000000UL
35#define SAFARI_CFG_DIV_32 0x0000000080000000UL
36#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
37
38static unsigned long read_safari_cfg(void)
39{
40 unsigned long ret;
41
42 __asm__ __volatile__("ldxa [%%g0] %1, %0"
43 : "=&r" (ret)
44 : "i" (ASI_SAFARI_CONFIG));
45 return ret;
46}
47
48static void write_safari_cfg(unsigned long val)
49{
50 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
51 "membar #Sync"
52 : /* no outputs */
53 : "r" (val), "i" (ASI_SAFARI_CONFIG)
54 : "memory");
55}
56
57static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
58{
59 unsigned long clock_tick = sparc64_get_clock_tick(cpu);
60 unsigned long ret;
61
62 switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
63 case SAFARI_CFG_DIV_1:
64 ret = clock_tick / 1;
65 break;
66 case SAFARI_CFG_DIV_2:
67 ret = clock_tick / 2;
68 break;
69 case SAFARI_CFG_DIV_32:
70 ret = clock_tick / 32;
71 break;
72 default:
73 BUG();
74 };
75
76 return ret;
77}
78
79static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
80{
81 unsigned long new_bits, new_freq, reg;
82 cpumask_t cpus_allowed;
83 struct cpufreq_freqs freqs;
84
85 if (!cpu_online(cpu))
86 return;
87
88 cpus_allowed = current->cpus_allowed;
89 set_cpus_allowed(current, cpumask_of_cpu(cpu));
90
91 new_freq = sparc64_get_clock_tick(cpu);
92 switch (index) {
93 case 0:
94 new_bits = SAFARI_CFG_DIV_1;
95 new_freq /= 1;
96 break;
97 case 1:
98 new_bits = SAFARI_CFG_DIV_2;
99 new_freq /= 2;
100 break;
101 case 2:
102 new_bits = SAFARI_CFG_DIV_32;
103 new_freq /= 32;
104 break;
105
106 default:
107 BUG();
108 };
109
110 reg = read_safari_cfg();
111
112 freqs.old = get_current_freq(cpu, reg);
113 freqs.new = new_freq;
114 freqs.cpu = cpu;
115 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
116
117 reg &= ~SAFARI_CFG_DIV_MASK;
118 reg |= new_bits;
119 write_safari_cfg(reg);
120
121 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
122
123 set_cpus_allowed(current, cpus_allowed);
124}
125
126static int us3_freq_target(struct cpufreq_policy *policy,
127 unsigned int target_freq,
128 unsigned int relation)
129{
130 unsigned int new_index = 0;
131
132 if (cpufreq_frequency_table_target(policy,
133 &us3_freq_table[policy->cpu].table[0],
134 target_freq,
135 relation,
136 &new_index))
137 return -EINVAL;
138
139 us3_set_cpu_divider_index(policy->cpu, new_index);
140
141 return 0;
142}
143
144static int us3_freq_verify(struct cpufreq_policy *policy)
145{
146 return cpufreq_frequency_table_verify(policy,
147 &us3_freq_table[policy->cpu].table[0]);
148}
149
150static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
151{
152 unsigned int cpu = policy->cpu;
153 unsigned long clock_tick = sparc64_get_clock_tick(cpu);
154 struct cpufreq_frequency_table *table =
155 &us3_freq_table[cpu].table[0];
156
157 table[0].index = 0;
158 table[0].frequency = clock_tick / 1;
159 table[1].index = 1;
160 table[1].frequency = clock_tick / 2;
161 table[2].index = 2;
162 table[2].frequency = clock_tick / 32;
163 table[3].index = 0;
164 table[3].frequency = CPUFREQ_TABLE_END;
165
166 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
167 policy->cpuinfo.transition_latency = 0;
168 policy->cur = clock_tick;
169
170 return cpufreq_frequency_table_cpuinfo(policy, table);
171}
172
173static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
174{
175 if (cpufreq_us3_driver)
176 us3_set_cpu_divider_index(policy->cpu, 0);
177
178 return 0;
179}
180
181static int __init us3_freq_init(void)
182{
183 unsigned long manuf, impl, ver;
184 int ret;
185
186 __asm__("rdpr %%ver, %0" : "=r" (ver));
187 manuf = ((ver >> 48) & 0xffff);
188 impl = ((ver >> 32) & 0xffff);
189
190 if (manuf == CHEETAH_MANUF &&
191 (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) {
192 struct cpufreq_driver *driver;
193
194 ret = -ENOMEM;
195 driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
196 if (!driver)
197 goto err_out;
198 memset(driver, 0, sizeof(*driver));
199
200 us3_freq_table = kmalloc(
201 (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
202 GFP_KERNEL);
203 if (!us3_freq_table)
204 goto err_out;
205
206 memset(us3_freq_table, 0,
207 (NR_CPUS * sizeof(struct us3_freq_percpu_info)));
208
209 driver->verify = us3_freq_verify;
210 driver->target = us3_freq_target;
211 driver->init = us3_freq_cpu_init;
212 driver->exit = us3_freq_cpu_exit;
213 driver->owner = THIS_MODULE,
214 strcpy(driver->name, "UltraSPARC-III");
215
216 cpufreq_us3_driver = driver;
217 ret = cpufreq_register_driver(driver);
218 if (ret)
219 goto err_out;
220
221 return 0;
222
223err_out:
224 if (driver) {
225 kfree(driver);
226 cpufreq_us3_driver = NULL;
227 }
228 if (us3_freq_table) {
229 kfree(us3_freq_table);
230 us3_freq_table = NULL;
231 }
232 return ret;
233 }
234
235 return -ENODEV;
236}
237
238static void __exit us3_freq_exit(void)
239{
240 if (cpufreq_us3_driver) {
241 cpufreq_unregister_driver(cpufreq_us3_driver);
242
243 kfree(cpufreq_us3_driver);
244 cpufreq_us3_driver = NULL;
245 kfree(us3_freq_table);
246 us3_freq_table = NULL;
247 }
248}
249
250MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
251MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
252MODULE_LICENSE("GPL");
253
254module_init(us3_freq_init);
255module_exit(us3_freq_exit);
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..382fd6798bb9
--- /dev/null
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -0,0 +1,106 @@
1/* ld script to make UltraLinux kernel */
2
3#include <asm-generic/vmlinux.lds.h>
4
5OUTPUT_FORMAT("elf64-sparc", "elf64-sparc", "elf64-sparc")
6OUTPUT_ARCH(sparc:v9a)
7ENTRY(_start)
8
9jiffies = jiffies_64;
10SECTIONS
11{
12 swapper_pmd_dir = 0x0000000000402000;
13 empty_pg_dir = 0x0000000000403000;
14 . = 0x4000;
15 .text 0x0000000000404000 :
16 {
17 *(.text)
18 SCHED_TEXT
19 LOCK_TEXT
20 *(.gnu.warning)
21 } =0
22 _etext = .;
23 PROVIDE (etext = .);
24
25 RODATA
26
27 .data :
28 {
29 *(.data)
30 CONSTRUCTORS
31 }
32 .data1 : { *(.data1) }
33 . = ALIGN(64);
34 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
35 _edata = .;
36 PROVIDE (edata = .);
37 .fixup : { *(.fixup) }
38
39 . = ALIGN(16);
40 __start___ex_table = .;
41 __ex_table : { *(__ex_table) }
42 __stop___ex_table = .;
43
44 . = ALIGN(8192);
45 __init_begin = .;
46 .init.text : {
47 _sinittext = .;
48 *(.init.text)
49 _einittext = .;
50 }
51 .init.data : { *(.init.data) }
52 . = ALIGN(16);
53 __setup_start = .;
54 .init.setup : { *(.init.setup) }
55 __setup_end = .;
56 __initcall_start = .;
57 .initcall.init : {
58 *(.initcall1.init)
59 *(.initcall2.init)
60 *(.initcall3.init)
61 *(.initcall4.init)
62 *(.initcall5.init)
63 *(.initcall6.init)
64 *(.initcall7.init)
65 }
66 __initcall_end = .;
67 __con_initcall_start = .;
68 .con_initcall.init : { *(.con_initcall.init) }
69 __con_initcall_end = .;
70 SECURITY_INIT
71 . = ALIGN(8192);
72 __initramfs_start = .;
73 .init.ramfs : { *(.init.ramfs) }
74 __initramfs_end = .;
75 . = ALIGN(8192);
76 __per_cpu_start = .;
77 .data.percpu : { *(.data.percpu) }
78 __per_cpu_end = .;
79 . = ALIGN(8192);
80 __init_end = .;
81 __bss_start = .;
82 .sbss : { *(.sbss) *(.scommon) }
83 .bss :
84 {
85 *(.dynbss)
86 *(.bss)
87 *(COMMON)
88 }
89 _end = . ;
90 PROVIDE (end = .);
91 /* Stabs debugging sections. */
92 .stab 0 : { *(.stab) }
93 .stabstr 0 : { *(.stabstr) }
94 .stab.excl 0 : { *(.stab.excl) }
95 .stab.exclstr 0 : { *(.stab.exclstr) }
96 .stab.index 0 : { *(.stab.index) }
97 .stab.indexstr 0 : { *(.stab.indexstr) }
98 .comment 0 : { *(.comment) }
99 .debug 0 : { *(.debug) }
100 .debug_srcinfo 0 : { *(.debug_srcinfo) }
101 .debug_aranges 0 : { *(.debug_aranges) }
102 .debug_pubnames 0 : { *(.debug_pubnames) }
103 .debug_sfnames 0 : { *(.debug_sfnames) }
104 .line 0 : { *(.line) }
105 /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) }
106}
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
new file mode 100644
index 000000000000..dfbc7e0dcf70
--- /dev/null
+++ b/arch/sparc64/kernel/winfixup.S
@@ -0,0 +1,417 @@
1/* $Id: winfixup.S,v 1.30 2002/02/09 19:49:30 davem Exp $
2 *
3 * winfixup.S: Handle cases where user stack pointer is found to be bogus.
4 *
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <asm/asi.h>
9#include <asm/head.h>
10#include <asm/page.h>
11#include <asm/ptrace.h>
12#include <asm/processor.h>
13#include <asm/spitfire.h>
14#include <asm/thread_info.h>
15
16 .text
17
18set_pcontext:
19cplus_winfixup_insn_1:
20 sethi %hi(0), %l1
21 mov PRIMARY_CONTEXT, %g1
22 sllx %l1, 32, %l1
23cplus_winfixup_insn_2:
24 sethi %hi(0), %g2
25 or %l1, %g2, %l1
26 stxa %l1, [%g1] ASI_DMMU
27 flush %g6
28 retl
29 nop
30
31cplus_wfinsn_1:
32 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
33cplus_wfinsn_2:
34 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
35
36 .align 32
37
38 /* Here are the rules, pay attention.
39 *
40 * The kernel is disallowed from touching user space while
41 * the trap level is greater than zero, except for from within
42 * the window spill/fill handlers. This must be followed
43 * so that we can easily detect the case where we tried to
44 * spill/fill with a bogus (or unmapped) user stack pointer.
45 *
46 * These are layed out in a special way for cache reasons,
47 * don't touch...
48 */
49 .globl fill_fixup, spill_fixup
50fill_fixup:
51 rdpr %tstate, %g1
52 andcc %g1, TSTATE_PRIV, %g0
53 or %g4, FAULT_CODE_WINFIXUP, %g4
54 be,pt %xcc, window_scheisse_from_user_common
55 and %g1, TSTATE_CWP, %g1
56
57 /* This is the extremely complex case, but it does happen from
58 * time to time if things are just right. Essentially the restore
59 * done in rtrap right before going back to user mode, with tl=1
60 * and that levels trap stack registers all setup, took a fill trap,
61 * the user stack was not mapped in the tlb, and tlb miss occurred,
62 * the pte found was not valid, and a simple ref bit watch update
63 * could not satisfy the miss, so we got here.
64 *
65 * We must carefully unwind the state so we get back to tl=0, preserve
66 * all the register values we were going to give to the user. Luckily
67 * most things are where they need to be, we also have the address
68 * which triggered the fault handy as well.
69 *
70 * Also note that we must preserve %l5 and %l6. If the user was
71 * returning from a system call, we must make it look this way
72 * after we process the fill fault on the users stack.
73 *
74 * First, get into the window where the original restore was executed.
75 */
76
77 rdpr %wstate, %g2 ! Grab user mode wstate.
78 wrpr %g1, %cwp ! Get into the right window.
79 sll %g2, 3, %g2 ! NORMAL-->OTHER
80
81 wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
82 wrpr %g2, 0x0, %wstate ! This must be consistent.
83 wrpr %g0, 0x0, %otherwin ! We know this.
84 call set_pcontext ! Change contexts...
85 nop
86 rdpr %pstate, %l1 ! Prepare to change globals.
87 mov %g6, %o7 ! Get current.
88
89 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
90 stb %g4, [%g6 + TI_FAULT_CODE]
91 stx %g5, [%g6 + TI_FAULT_ADDR]
92 wrpr %g0, 0x0, %tl ! Out of trap levels.
93 wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
94 mov %o7, %g6
95 ldx [%g6 + TI_TASK], %g4
96#ifdef CONFIG_SMP
97 mov TSB_REG, %g1
98 ldxa [%g1] ASI_IMMU, %g5
99#endif
100
101 /* This is the same as below, except we handle this a bit special
102 * since we must preserve %l5 and %l6, see comment above.
103 */
104 call do_sparc64_fault
105 add %sp, PTREGS_OFF, %o0
106 ba,pt %xcc, rtrap
107 nop ! yes, nop is correct
108
109 /* Be very careful about usage of the alternate globals here.
110 * You cannot touch %g4/%g5 as that has the fault information
111 * should this be from usermode. Also be careful for the case
112 * where we get here from the save instruction in etrap.S when
113 * coming from either user or kernel (does not matter which, it
114 * is the same problem in both cases). Essentially this means
115 * do not touch %g7 or %g2 so we handle the two cases fine.
116 */
117spill_fixup:
118 ldx [%g6 + TI_FLAGS], %g1
119 andcc %g1, _TIF_32BIT, %g0
120 ldub [%g6 + TI_WSAVED], %g1
121
122 sll %g1, 3, %g3
123 add %g6, %g3, %g3
124 stx %sp, [%g3 + TI_RWIN_SPTRS]
125 sll %g1, 7, %g3
126 bne,pt %xcc, 1f
127 add %g6, %g3, %g3
128 stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
129 stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
130
131 stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
132 stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
133 stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
134 stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
135 stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
136 stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
137 stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
138 stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
139
140 stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
141 stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
142 stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
143 stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
144 stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
145 b,pt %xcc, 2f
146 stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
1471: stw %l0, [%g3 + TI_REG_WINDOW + 0x00]
148
149 stw %l1, [%g3 + TI_REG_WINDOW + 0x04]
150 stw %l2, [%g3 + TI_REG_WINDOW + 0x08]
151 stw %l3, [%g3 + TI_REG_WINDOW + 0x0c]
152 stw %l4, [%g3 + TI_REG_WINDOW + 0x10]
153 stw %l5, [%g3 + TI_REG_WINDOW + 0x14]
154 stw %l6, [%g3 + TI_REG_WINDOW + 0x18]
155 stw %l7, [%g3 + TI_REG_WINDOW + 0x1c]
156 stw %i0, [%g3 + TI_REG_WINDOW + 0x20]
157
158 stw %i1, [%g3 + TI_REG_WINDOW + 0x24]
159 stw %i2, [%g3 + TI_REG_WINDOW + 0x28]
160 stw %i3, [%g3 + TI_REG_WINDOW + 0x2c]
161 stw %i4, [%g3 + TI_REG_WINDOW + 0x30]
162 stw %i5, [%g3 + TI_REG_WINDOW + 0x34]
163 stw %i6, [%g3 + TI_REG_WINDOW + 0x38]
164 stw %i7, [%g3 + TI_REG_WINDOW + 0x3c]
1652: add %g1, 1, %g1
166
167 stb %g1, [%g6 + TI_WSAVED]
168 rdpr %tstate, %g1
169 andcc %g1, TSTATE_PRIV, %g0
170 saved
171 and %g1, TSTATE_CWP, %g1
172 be,pn %xcc, window_scheisse_from_user_common
173 mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4
174 retry
175
176window_scheisse_from_user_common:
177 stb %g4, [%g6 + TI_FAULT_CODE]
178 stx %g5, [%g6 + TI_FAULT_ADDR]
179 wrpr %g1, %cwp
180 ba,pt %xcc, etrap
181 rd %pc, %g7
182 call do_sparc64_fault
183 add %sp, PTREGS_OFF, %o0
184 ba,a,pt %xcc, rtrap_clr_l6
185
186 .globl winfix_mna, fill_fixup_mna, spill_fixup_mna
187winfix_mna:
188 andn %g3, 0x7f, %g3
189 add %g3, 0x78, %g3
190 wrpr %g3, %tnpc
191 done
192fill_fixup_mna:
193 rdpr %tstate, %g1
194 andcc %g1, TSTATE_PRIV, %g0
195 be,pt %xcc, window_mna_from_user_common
196 and %g1, TSTATE_CWP, %g1
197
198 /* Please, see fill_fixup commentary about why we must preserve
199 * %l5 and %l6 to preserve absolute correct semantics.
200 */
201 rdpr %wstate, %g2 ! Grab user mode wstate.
202 wrpr %g1, %cwp ! Get into the right window.
203 sll %g2, 3, %g2 ! NORMAL-->OTHER
204 wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
205
206 wrpr %g2, 0x0, %wstate ! This must be consistent.
207 wrpr %g0, 0x0, %otherwin ! We know this.
208 call set_pcontext ! Change contexts...
209 nop
210 rdpr %pstate, %l1 ! Prepare to change globals.
211 mov %g4, %o2 ! Setup args for
212 mov %g5, %o1 ! final call to mem_address_unaligned.
213 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
214
215 mov %g6, %o7 ! Stash away current.
216 wrpr %g0, 0x0, %tl ! Out of trap levels.
217 wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
218 mov %o7, %g6 ! Get current back.
219 ldx [%g6 + TI_TASK], %g4 ! Finish it.
220#ifdef CONFIG_SMP
221 mov TSB_REG, %g1
222 ldxa [%g1] ASI_IMMU, %g5
223#endif
224 call mem_address_unaligned
225 add %sp, PTREGS_OFF, %o0
226
227 b,pt %xcc, rtrap
228 nop ! yes, the nop is correct
229spill_fixup_mna:
230 ldx [%g6 + TI_FLAGS], %g1
231 andcc %g1, _TIF_32BIT, %g0
232 ldub [%g6 + TI_WSAVED], %g1
233 sll %g1, 3, %g3
234 add %g6, %g3, %g3
235 stx %sp, [%g3 + TI_RWIN_SPTRS]
236
237 sll %g1, 7, %g3
238 bne,pt %xcc, 1f
239 add %g6, %g3, %g3
240 stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
241 stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
242 stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
243 stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
244 stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
245
246 stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
247 stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
248 stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
249 stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
250 stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
251 stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
252 stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
253 stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
254
255 stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
256 stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
257 stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
258 b,pt %xcc, 2f
259 add %g1, 1, %g1
2601: std %l0, [%g3 + TI_REG_WINDOW + 0x00]
261 std %l2, [%g3 + TI_REG_WINDOW + 0x08]
262 std %l4, [%g3 + TI_REG_WINDOW + 0x10]
263
264 std %l6, [%g3 + TI_REG_WINDOW + 0x18]
265 std %i0, [%g3 + TI_REG_WINDOW + 0x20]
266 std %i2, [%g3 + TI_REG_WINDOW + 0x28]
267 std %i4, [%g3 + TI_REG_WINDOW + 0x30]
268 std %i6, [%g3 + TI_REG_WINDOW + 0x38]
269 add %g1, 1, %g1
2702: stb %g1, [%g6 + TI_WSAVED]
271 rdpr %tstate, %g1
272
273 andcc %g1, TSTATE_PRIV, %g0
274 saved
275 be,pn %xcc, window_mna_from_user_common
276 and %g1, TSTATE_CWP, %g1
277 retry
278window_mna_from_user_common:
279 wrpr %g1, %cwp
280 sethi %hi(109f), %g7
281 ba,pt %xcc, etrap
282109: or %g7, %lo(109b), %g7
283 mov %l4, %o2
284 mov %l5, %o1
285 call mem_address_unaligned
286 add %sp, PTREGS_OFF, %o0
287 ba,pt %xcc, rtrap
288 clr %l6
289
290 /* These are only needed for 64-bit mode processes which
291 * put their stack pointer into the VPTE area and there
292 * happens to be a VPTE tlb entry mapped there during
293 * a spill/fill trap to that stack frame.
294 */
295 .globl winfix_dax, fill_fixup_dax, spill_fixup_dax
296winfix_dax:
297 andn %g3, 0x7f, %g3
298 add %g3, 0x74, %g3
299 wrpr %g3, %tnpc
300 done
301fill_fixup_dax:
302 rdpr %tstate, %g1
303 andcc %g1, TSTATE_PRIV, %g0
304 be,pt %xcc, window_dax_from_user_common
305 and %g1, TSTATE_CWP, %g1
306
307 /* Please, see fill_fixup commentary about why we must preserve
308 * %l5 and %l6 to preserve absolute correct semantics.
309 */
310 rdpr %wstate, %g2 ! Grab user mode wstate.
311 wrpr %g1, %cwp ! Get into the right window.
312 sll %g2, 3, %g2 ! NORMAL-->OTHER
313 wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
314
315 wrpr %g2, 0x0, %wstate ! This must be consistent.
316 wrpr %g0, 0x0, %otherwin ! We know this.
317 call set_pcontext ! Change contexts...
318 nop
319 rdpr %pstate, %l1 ! Prepare to change globals.
320 mov %g4, %o1 ! Setup args for
321 mov %g5, %o2 ! final call to data_access_exception.
322 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
323
324 mov %g6, %o7 ! Stash away current.
325 wrpr %g0, 0x0, %tl ! Out of trap levels.
326 wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
327 mov %o7, %g6 ! Get current back.
328 ldx [%g6 + TI_TASK], %g4 ! Finish it.
329#ifdef CONFIG_SMP
330 mov TSB_REG, %g1
331 ldxa [%g1] ASI_IMMU, %g5
332#endif
333 call data_access_exception
334 add %sp, PTREGS_OFF, %o0
335
336 b,pt %xcc, rtrap
337 nop ! yes, the nop is correct
338spill_fixup_dax:
339 ldx [%g6 + TI_FLAGS], %g1
340 andcc %g1, _TIF_32BIT, %g0
341 ldub [%g6 + TI_WSAVED], %g1
342 sll %g1, 3, %g3
343 add %g6, %g3, %g3
344 stx %sp, [%g3 + TI_RWIN_SPTRS]
345
346 sll %g1, 7, %g3
347 bne,pt %xcc, 1f
348 add %g6, %g3, %g3
349 stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
350 stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
351 stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
352 stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
353 stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
354
355 stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
356 stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
357 stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
358 stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
359 stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
360 stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
361 stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
362 stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
363
364 stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
365 stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
366 stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
367 b,pt %xcc, 2f
368 add %g1, 1, %g1
3691: std %l0, [%g3 + TI_REG_WINDOW + 0x00]
370 std %l2, [%g3 + TI_REG_WINDOW + 0x08]
371 std %l4, [%g3 + TI_REG_WINDOW + 0x10]
372
373 std %l6, [%g3 + TI_REG_WINDOW + 0x18]
374 std %i0, [%g3 + TI_REG_WINDOW + 0x20]
375 std %i2, [%g3 + TI_REG_WINDOW + 0x28]
376 std %i4, [%g3 + TI_REG_WINDOW + 0x30]
377 std %i6, [%g3 + TI_REG_WINDOW + 0x38]
378 add %g1, 1, %g1
3792: stb %g1, [%g6 + TI_WSAVED]
380 rdpr %tstate, %g1
381
382 andcc %g1, TSTATE_PRIV, %g0
383 saved
384 be,pn %xcc, window_dax_from_user_common
385 and %g1, TSTATE_CWP, %g1
386 retry
387window_dax_from_user_common:
388 wrpr %g1, %cwp
389 sethi %hi(109f), %g7
390 ba,pt %xcc, etrap
391109: or %g7, %lo(109b), %g7
392 mov %l4, %o1
393 mov %l5, %o2
394 call data_access_exception
395 add %sp, PTREGS_OFF, %o0
396 ba,pt %xcc, rtrap
397 clr %l6
398
399
400 .globl cheetah_plus_patch_winfixup
401cheetah_plus_patch_winfixup:
402 sethi %hi(cplus_wfinsn_1), %o0
403 sethi %hi(cplus_winfixup_insn_1), %o2
404 lduw [%o0 + %lo(cplus_wfinsn_1)], %o1
405 or %o2, %lo(cplus_winfixup_insn_1), %o2
406 stw %o1, [%o2]
407 flush %o2
408
409 sethi %hi(cplus_wfinsn_2), %o0
410 sethi %hi(cplus_winfixup_insn_2), %o2
411 lduw [%o0 + %lo(cplus_wfinsn_2)], %o1
412 or %o2, %lo(cplus_winfixup_insn_2), %o2
413 stw %o1, [%o2]
414 flush %o2
415
416 retl
417 nop
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
new file mode 100644
index 000000000000..40dbeec7e5d6
--- /dev/null
+++ b/arch/sparc64/lib/Makefile
@@ -0,0 +1,20 @@
1# $Id: Makefile,v 1.25 2000/12/14 22:57:25 davem Exp $
2# Makefile for Sparc64 library files..
3#
4
5EXTRA_AFLAGS := -ansi
6EXTRA_CFLAGS := -Werror
7
8lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
9 memscan.o strncpy_from_user.o strlen_user.o memcmp.o checksum.o \
10 bzero.o csum_copy.o csum_copy_from_user.o csum_copy_to_user.o \
11 VISsave.o atomic.o bitops.o \
12 U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
13 U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
14 copy_in_user.o user_fixup.o memmove.o \
15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
16
17lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
18lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
19
20obj-y += iomap.o
diff --git a/arch/sparc64/lib/PeeCeeI.c b/arch/sparc64/lib/PeeCeeI.c
new file mode 100644
index 000000000000..3008d536e8c2
--- /dev/null
+++ b/arch/sparc64/lib/PeeCeeI.c
@@ -0,0 +1,237 @@
1/* $Id: PeeCeeI.c,v 1.4 1999/09/06 01:17:35 davem Exp $
2 * PeeCeeI.c: The emerging standard...
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/io.h>
8#include <asm/byteorder.h>
9
10void outsb(void __iomem *addr, const void *src, unsigned long count)
11{
12 const u8 *p = src;
13
14 while(count--)
15 outb(*p++, addr);
16}
17
18void outsw(void __iomem *addr, const void *src, unsigned long count)
19{
20 if(count) {
21 u16 *ps = (u16 *)src;
22 u32 *pi;
23
24 if(((u64)src) & 0x2) {
25 u16 val = le16_to_cpup(ps);
26 outw(val, addr);
27 ps++;
28 count--;
29 }
30 pi = (u32 *)ps;
31 while(count >= 2) {
32 u32 w = le32_to_cpup(pi);
33
34 pi++;
35 outw(w >> 0, addr);
36 outw(w >> 16, addr);
37 count -= 2;
38 }
39 ps = (u16 *)pi;
40 if(count) {
41 u16 val = le16_to_cpup(ps);
42 outw(val, addr);
43 }
44 }
45}
46
47void outsl(void __iomem *addr, const void *src, unsigned long count)
48{
49 if(count) {
50 if((((u64)src) & 0x3) == 0) {
51 u32 *p = (u32 *)src;
52 while(count--) {
53 u32 val = cpu_to_le32p(p);
54 outl(val, addr);
55 p++;
56 }
57 } else {
58 u8 *pb;
59 u16 *ps = (u16 *)src;
60 u32 l = 0, l2;
61 u32 *pi;
62
63 switch(((u64)src) & 0x3) {
64 case 0x2:
65 count -= 1;
66 l = cpu_to_le16p(ps) << 16;
67 ps++;
68 pi = (u32 *)ps;
69 while(count--) {
70 l2 = cpu_to_le32p(pi);
71 pi++;
72 outl(((l >> 16) | (l2 << 16)), addr);
73 l = l2;
74 }
75 ps = (u16 *)pi;
76 l2 = cpu_to_le16p(ps);
77 outl(((l >> 16) | (l2 << 16)), addr);
78 break;
79
80 case 0x1:
81 count -= 1;
82 pb = (u8 *)src;
83 l = (*pb++ << 8);
84 ps = (u16 *)pb;
85 l2 = cpu_to_le16p(ps);
86 ps++;
87 l |= (l2 << 16);
88 pi = (u32 *)ps;
89 while(count--) {
90 l2 = cpu_to_le32p(pi);
91 pi++;
92 outl(((l >> 8) | (l2 << 24)), addr);
93 l = l2;
94 }
95 pb = (u8 *)pi;
96 outl(((l >> 8) | (*pb << 24)), addr);
97 break;
98
99 case 0x3:
100 count -= 1;
101 pb = (u8 *)src;
102 l = (*pb++ << 24);
103 pi = (u32 *)pb;
104 while(count--) {
105 l2 = cpu_to_le32p(pi);
106 pi++;
107 outl(((l >> 24) | (l2 << 8)), addr);
108 l = l2;
109 }
110 ps = (u16 *)pi;
111 l2 = cpu_to_le16p(ps);
112 ps++;
113 pb = (u8 *)ps;
114 l2 |= (*pb << 16);
115 outl(((l >> 24) | (l2 << 8)), addr);
116 break;
117 }
118 }
119 }
120}
121
122void insb(void __iomem *addr, void *dst, unsigned long count)
123{
124 if(count) {
125 u32 *pi;
126 u8 *pb = dst;
127
128 while((((unsigned long)pb) & 0x3) && count--)
129 *pb++ = inb(addr);
130 pi = (u32 *)pb;
131 while(count >= 4) {
132 u32 w;
133
134 w = (inb(addr) << 24);
135 w |= (inb(addr) << 16);
136 w |= (inb(addr) << 8);
137 w |= (inb(addr) << 0);
138 *pi++ = w;
139 count -= 4;
140 }
141 pb = (u8 *)pi;
142 while(count--)
143 *pb++ = inb(addr);
144 }
145}
146
147void insw(void __iomem *addr, void *dst, unsigned long count)
148{
149 if(count) {
150 u16 *ps = dst;
151 u32 *pi;
152
153 if(((unsigned long)ps) & 0x2) {
154 *ps++ = le16_to_cpu(inw(addr));
155 count--;
156 }
157 pi = (u32 *)ps;
158 while(count >= 2) {
159 u32 w;
160
161 w = (le16_to_cpu(inw(addr)) << 16);
162 w |= (le16_to_cpu(inw(addr)) << 0);
163 *pi++ = w;
164 count -= 2;
165 }
166 ps = (u16 *)pi;
167 if(count)
168 *ps = le16_to_cpu(inw(addr));
169 }
170}
171
172void insl(void __iomem *addr, void *dst, unsigned long count)
173{
174 if(count) {
175 if((((unsigned long)dst) & 0x3) == 0) {
176 u32 *pi = dst;
177 while(count--)
178 *pi++ = le32_to_cpu(inl(addr));
179 } else {
180 u32 l = 0, l2, *pi;
181 u16 *ps;
182 u8 *pb;
183
184 switch(((unsigned long)dst) & 3) {
185 case 0x2:
186 ps = dst;
187 count -= 1;
188 l = le32_to_cpu(inl(addr));
189 *ps++ = l;
190 pi = (u32 *)ps;
191 while(count--) {
192 l2 = le32_to_cpu(inl(addr));
193 *pi++ = (l << 16) | (l2 >> 16);
194 l = l2;
195 }
196 ps = (u16 *)pi;
197 *ps = l;
198 break;
199
200 case 0x1:
201 pb = dst;
202 count -= 1;
203 l = le32_to_cpu(inl(addr));
204 *pb++ = l >> 24;
205 ps = (u16 *)pb;
206 *ps++ = ((l >> 8) & 0xffff);
207 pi = (u32 *)ps;
208 while(count--) {
209 l2 = le32_to_cpu(inl(addr));
210 *pi++ = (l << 24) | (l2 >> 8);
211 l = l2;
212 }
213 pb = (u8 *)pi;
214 *pb = l;
215 break;
216
217 case 0x3:
218 pb = (u8 *)dst;
219 count -= 1;
220 l = le32_to_cpu(inl(addr));
221 *pb++ = l >> 24;
222 pi = (u32 *)pb;
223 while(count--) {
224 l2 = le32_to_cpu(inl(addr));
225 *pi++ = (l << 8) | (l2 >> 24);
226 l = l2;
227 }
228 ps = (u16 *)pi;
229 *ps++ = ((l >> 8) & 0xffff);
230 pb = (u8 *)ps;
231 *pb = l;
232 break;
233 }
234 }
235 }
236}
237
diff --git a/arch/sparc64/lib/U1copy_from_user.S b/arch/sparc64/lib/U1copy_from_user.S
new file mode 100644
index 000000000000..93146a81e2d3
--- /dev/null
+++ b/arch/sparc64/lib/U1copy_from_user.S
@@ -0,0 +1,33 @@
1/* U1copy_from_user.S: UltraSparc-I/II/IIi/IIe optimized copy from userspace.
2 *
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 */
5
6#define EX_LD(x) \
798: x; \
8 .section .fixup; \
9 .align 4; \
1099: retl; \
11 mov 1, %o0; \
12 .section __ex_table; \
13 .align 4; \
14 .word 98b, 99b; \
15 .text; \
16 .align 4;
17
18#define FUNC_NAME ___copy_from_user
19#define LOAD(type,addr,dest) type##a [addr] %asi, dest
20#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest
21#define EX_RETVAL(x) 0
22
23 /* Writing to %asi is _expensive_ so we hardcode it.
24 * Reading %asi to check for KERNEL_DS is comparatively
25 * cheap.
26 */
27#define PREAMBLE \
28 rd %asi, %g1; \
29 cmp %g1, ASI_AIUS; \
30 bne,pn %icc, memcpy_user_stub; \
31 nop; \
32
33#include "U1memcpy.S"
diff --git a/arch/sparc64/lib/U1copy_to_user.S b/arch/sparc64/lib/U1copy_to_user.S
new file mode 100644
index 000000000000..1fccc521e2bd
--- /dev/null
+++ b/arch/sparc64/lib/U1copy_to_user.S
@@ -0,0 +1,33 @@
1/* U1copy_to_user.S: UltraSparc-I/II/IIi/IIe optimized copy to userspace.
2 *
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 */
5
6#define EX_ST(x) \
798: x; \
8 .section .fixup; \
9 .align 4; \
1099: retl; \
11 mov 1, %o0; \
12 .section __ex_table; \
13 .align 4; \
14 .word 98b, 99b; \
15 .text; \
16 .align 4;
17
18#define FUNC_NAME ___copy_to_user
19#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
20#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
21#define EX_RETVAL(x) 0
22
23 /* Writing to %asi is _expensive_ so we hardcode it.
24 * Reading %asi to check for KERNEL_DS is comparatively
25 * cheap.
26 */
27#define PREAMBLE \
28 rd %asi, %g1; \
29 cmp %g1, ASI_AIUS; \
30 bne,pn %icc, memcpy_user_stub; \
31 nop; \
32
33#include "U1memcpy.S"
diff --git a/arch/sparc64/lib/U1memcpy.S b/arch/sparc64/lib/U1memcpy.S
new file mode 100644
index 000000000000..da9b520c7189
--- /dev/null
+++ b/arch/sparc64/lib/U1memcpy.S
@@ -0,0 +1,560 @@
1/* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy.
2 *
3 * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
5 */
6
7#ifdef __KERNEL__
8#include <asm/visasm.h>
9#include <asm/asi.h>
10#define GLOBAL_SPARE g7
11#else
12#define GLOBAL_SPARE g5
13#define ASI_BLK_P 0xf0
14#define FPRS_FEF 0x04
15#ifdef MEMCPY_DEBUG
16#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
17 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
18#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
19#else
20#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
21#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
22#endif
23#endif
24
25#ifndef EX_LD
26#define EX_LD(x) x
27#endif
28
29#ifndef EX_ST
30#define EX_ST(x) x
31#endif
32
33#ifndef EX_RETVAL
34#define EX_RETVAL(x) x
35#endif
36
37#ifndef LOAD
38#define LOAD(type,addr,dest) type [addr], dest
39#endif
40
41#ifndef LOAD_BLK
42#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest
43#endif
44
45#ifndef STORE
46#define STORE(type,src,addr) type src, [addr]
47#endif
48
49#ifndef STORE_BLK
50#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
51#endif
52
53#ifndef FUNC_NAME
54#define FUNC_NAME memcpy
55#endif
56
57#ifndef PREAMBLE
58#define PREAMBLE
59#endif
60
61#ifndef XCC
62#define XCC xcc
63#endif
64
65#define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \
66 faligndata %f1, %f2, %f48; \
67 faligndata %f2, %f3, %f50; \
68 faligndata %f3, %f4, %f52; \
69 faligndata %f4, %f5, %f54; \
70 faligndata %f5, %f6, %f56; \
71 faligndata %f6, %f7, %f58; \
72 faligndata %f7, %f8, %f60; \
73 faligndata %f8, %f9, %f62;
74
75#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
76 EX_LD(LOAD_BLK(%src, %fdest)); \
77 EX_ST(STORE_BLK(%fsrc, %dest)); \
78 add %src, 0x40, %src; \
79 subcc %len, 0x40, %len; \
80 be,pn %xcc, jmptgt; \
81 add %dest, 0x40, %dest; \
82
83#define LOOP_CHUNK1(src, dest, len, branch_dest) \
84 MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest)
85#define LOOP_CHUNK2(src, dest, len, branch_dest) \
86 MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
87#define LOOP_CHUNK3(src, dest, len, branch_dest) \
88 MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
89
90#define STORE_SYNC(dest, fsrc) \
91 EX_ST(STORE_BLK(%fsrc, %dest)); \
92 add %dest, 0x40, %dest;
93
94#define STORE_JUMP(dest, fsrc, target) \
95 EX_ST(STORE_BLK(%fsrc, %dest)); \
96 add %dest, 0x40, %dest; \
97 ba,pt %xcc, target;
98
99#define FINISH_VISCHUNK(dest, f0, f1, left) \
100 subcc %left, 8, %left;\
101 bl,pn %xcc, 95f; \
102 faligndata %f0, %f1, %f48; \
103 EX_ST(STORE(std, %f48, %dest)); \
104 add %dest, 8, %dest;
105
106#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
107 subcc %left, 8, %left; \
108 bl,pn %xcc, 95f; \
109 fsrc1 %f0, %f1;
110
111#define UNEVEN_VISCHUNK(dest, f0, f1, left) \
112 UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
113 ba,a,pt %xcc, 93f;
114
115 .register %g2,#scratch
116 .register %g3,#scratch
117
118 .text
119 .align 64
120
121 .globl FUNC_NAME
122 .type FUNC_NAME,#function
123FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
124 srlx %o2, 31, %g2
125 cmp %g2, 0
126 tne %xcc, 5
127 PREAMBLE
128 mov %o0, %o4
129 cmp %o2, 0
130 be,pn %XCC, 85f
131 or %o0, %o1, %o3
132 cmp %o2, 16
133 blu,a,pn %XCC, 80f
134 or %o3, %o2, %o3
135
136 cmp %o2, (5 * 64)
137 blu,pt %XCC, 70f
138 andcc %o3, 0x7, %g0
139
140 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. */
141 VISEntry
142
143 /* Is 'dst' already aligned on an 64-byte boundary? */
144 andcc %o0, 0x3f, %g2
145 be,pt %XCC, 2f
146
147 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
148 * of bytes to copy to make 'dst' 64-byte aligned. We pre-
149 * subtract this from 'len'.
150 */
151 sub %o0, %o1, %GLOBAL_SPARE
152 sub %g2, 0x40, %g2
153 sub %g0, %g2, %g2
154 sub %o2, %g2, %o2
155 andcc %g2, 0x7, %g1
156 be,pt %icc, 2f
157 and %g2, 0x38, %g2
158
1591: subcc %g1, 0x1, %g1
160 EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
161 EX_ST(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
162 bgu,pt %XCC, 1b
163 add %o1, 0x1, %o1
164
165 add %o1, %GLOBAL_SPARE, %o0
166
1672: cmp %g2, 0x0
168 and %o1, 0x7, %g1
169 be,pt %icc, 3f
170 alignaddr %o1, %g0, %o1
171
172 EX_LD(LOAD(ldd, %o1, %f4))
1731: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
174 add %o1, 0x8, %o1
175 subcc %g2, 0x8, %g2
176 faligndata %f4, %f6, %f0
177 EX_ST(STORE(std, %f0, %o0))
178 be,pn %icc, 3f
179 add %o0, 0x8, %o0
180
181 EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
182 add %o1, 0x8, %o1
183 subcc %g2, 0x8, %g2
184 faligndata %f6, %f4, %f0
185 EX_ST(STORE(std, %f0, %o0))
186 bne,pt %icc, 1b
187 add %o0, 0x8, %o0
188
189 /* Destination is 64-byte aligned. */
1903:
191 membar #LoadStore | #StoreStore | #StoreLoad
192
193 subcc %o2, 0x40, %GLOBAL_SPARE
194 add %o1, %g1, %g1
195 andncc %GLOBAL_SPARE, (0x40 - 1), %GLOBAL_SPARE
196 srl %g1, 3, %g2
197 sub %o2, %GLOBAL_SPARE, %g3
198 andn %o1, (0x40 - 1), %o1
199 and %g2, 7, %g2
200 andncc %g3, 0x7, %g3
201 fmovd %f0, %f2
202 sub %g3, 0x8, %g3
203 sub %o2, %GLOBAL_SPARE, %o2
204
205 add %g1, %GLOBAL_SPARE, %g1
206 subcc %o2, %g3, %o2
207
208 EX_LD(LOAD_BLK(%o1, %f0))
209 add %o1, 0x40, %o1
210 add %g1, %g3, %g1
211 EX_LD(LOAD_BLK(%o1, %f16))
212 add %o1, 0x40, %o1
213 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
214 EX_LD(LOAD_BLK(%o1, %f32))
215 add %o1, 0x40, %o1
216
217 /* There are 8 instances of the unrolled loop,
218 * one for each possible alignment of the
219 * source buffer. Each loop instance is 452
220 * bytes.
221 */
222 sll %g2, 3, %o3
223 sub %o3, %g2, %o3
224 sllx %o3, 4, %o3
225 add %o3, %g2, %o3
226 sllx %o3, 2, %g2
2271: rd %pc, %o3
228 add %o3, %lo(1f - 1b), %o3
229 jmpl %o3 + %g2, %g0
230 nop
231
232 .align 64
2331: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
234 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
235 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
236 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
237 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
238 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
239 ba,pt %xcc, 1b+4
240 faligndata %f0, %f2, %f48
2411: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
242 STORE_SYNC(o0, f48) membar #Sync
243 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
244 STORE_JUMP(o0, f48, 40f) membar #Sync
2452: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
246 STORE_SYNC(o0, f48) membar #Sync
247 FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
248 STORE_JUMP(o0, f48, 48f) membar #Sync
2493: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
250 STORE_SYNC(o0, f48) membar #Sync
251 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
252 STORE_JUMP(o0, f48, 56f) membar #Sync
253
2541: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
255 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
256 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
257 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
258 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
259 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
260 ba,pt %xcc, 1b+4
261 faligndata %f2, %f4, %f48
2621: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
263 STORE_SYNC(o0, f48) membar #Sync
264 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
265 STORE_JUMP(o0, f48, 41f) membar #Sync
2662: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
267 STORE_SYNC(o0, f48) membar #Sync
268 FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
269 STORE_JUMP(o0, f48, 49f) membar #Sync
2703: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
271 STORE_SYNC(o0, f48) membar #Sync
272 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
273 STORE_JUMP(o0, f48, 57f) membar #Sync
274
2751: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
276 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
277 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
278 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
279 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
280 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
281 ba,pt %xcc, 1b+4
282 faligndata %f4, %f6, %f48
2831: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
284 STORE_SYNC(o0, f48) membar #Sync
285 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
286 STORE_JUMP(o0, f48, 42f) membar #Sync
2872: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
288 STORE_SYNC(o0, f48) membar #Sync
289 FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
290 STORE_JUMP(o0, f48, 50f) membar #Sync
2913: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
292 STORE_SYNC(o0, f48) membar #Sync
293 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
294 STORE_JUMP(o0, f48, 58f) membar #Sync
295
2961: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
297 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
298 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
299 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
300 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
301 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
302 ba,pt %xcc, 1b+4
303 faligndata %f6, %f8, %f48
3041: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
305 STORE_SYNC(o0, f48) membar #Sync
306 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
307 STORE_JUMP(o0, f48, 43f) membar #Sync
3082: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
309 STORE_SYNC(o0, f48) membar #Sync
310 FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
311 STORE_JUMP(o0, f48, 51f) membar #Sync
3123: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
313 STORE_SYNC(o0, f48) membar #Sync
314 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
315 STORE_JUMP(o0, f48, 59f) membar #Sync
316
3171: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
318 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
319 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
320 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
321 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
322 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
323 ba,pt %xcc, 1b+4
324 faligndata %f8, %f10, %f48
3251: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
326 STORE_SYNC(o0, f48) membar #Sync
327 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
328 STORE_JUMP(o0, f48, 44f) membar #Sync
3292: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
330 STORE_SYNC(o0, f48) membar #Sync
331 FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
332 STORE_JUMP(o0, f48, 52f) membar #Sync
3333: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
334 STORE_SYNC(o0, f48) membar #Sync
335 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
336 STORE_JUMP(o0, f48, 60f) membar #Sync
337
3381: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
339 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
340 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
341 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
342 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
343 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
344 ba,pt %xcc, 1b+4
345 faligndata %f10, %f12, %f48
3461: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
347 STORE_SYNC(o0, f48) membar #Sync
348 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
349 STORE_JUMP(o0, f48, 45f) membar #Sync
3502: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
351 STORE_SYNC(o0, f48) membar #Sync
352 FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
353 STORE_JUMP(o0, f48, 53f) membar #Sync
3543: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
355 STORE_SYNC(o0, f48) membar #Sync
356 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
357 STORE_JUMP(o0, f48, 61f) membar #Sync
358
3591: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
360 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
361 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
362 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
363 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
364 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
365 ba,pt %xcc, 1b+4
366 faligndata %f12, %f14, %f48
3671: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
368 STORE_SYNC(o0, f48) membar #Sync
369 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
370 STORE_JUMP(o0, f48, 46f) membar #Sync
3712: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
372 STORE_SYNC(o0, f48) membar #Sync
373 FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
374 STORE_JUMP(o0, f48, 54f) membar #Sync
3753: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
376 STORE_SYNC(o0, f48) membar #Sync
377 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
378 STORE_JUMP(o0, f48, 62f) membar #Sync
379
3801: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
381 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
382 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
383 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
384 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
385 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
386 ba,pt %xcc, 1b+4
387 faligndata %f14, %f16, %f48
3881: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
389 STORE_SYNC(o0, f48) membar #Sync
390 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
391 STORE_JUMP(o0, f48, 47f) membar #Sync
3922: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
393 STORE_SYNC(o0, f48) membar #Sync
394 FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
395 STORE_JUMP(o0, f48, 55f) membar #Sync
3963: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
397 STORE_SYNC(o0, f48) membar #Sync
398 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
399 STORE_JUMP(o0, f48, 63f) membar #Sync
400
40140: FINISH_VISCHUNK(o0, f0, f2, g3)
40241: FINISH_VISCHUNK(o0, f2, f4, g3)
40342: FINISH_VISCHUNK(o0, f4, f6, g3)
40443: FINISH_VISCHUNK(o0, f6, f8, g3)
40544: FINISH_VISCHUNK(o0, f8, f10, g3)
40645: FINISH_VISCHUNK(o0, f10, f12, g3)
40746: FINISH_VISCHUNK(o0, f12, f14, g3)
40847: UNEVEN_VISCHUNK(o0, f14, f0, g3)
40948: FINISH_VISCHUNK(o0, f16, f18, g3)
41049: FINISH_VISCHUNK(o0, f18, f20, g3)
41150: FINISH_VISCHUNK(o0, f20, f22, g3)
41251: FINISH_VISCHUNK(o0, f22, f24, g3)
41352: FINISH_VISCHUNK(o0, f24, f26, g3)
41453: FINISH_VISCHUNK(o0, f26, f28, g3)
41554: FINISH_VISCHUNK(o0, f28, f30, g3)
41655: UNEVEN_VISCHUNK(o0, f30, f0, g3)
41756: FINISH_VISCHUNK(o0, f32, f34, g3)
41857: FINISH_VISCHUNK(o0, f34, f36, g3)
41958: FINISH_VISCHUNK(o0, f36, f38, g3)
42059: FINISH_VISCHUNK(o0, f38, f40, g3)
42160: FINISH_VISCHUNK(o0, f40, f42, g3)
42261: FINISH_VISCHUNK(o0, f42, f44, g3)
42362: FINISH_VISCHUNK(o0, f44, f46, g3)
42463: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
425
42693: EX_LD(LOAD(ldd, %o1, %f2))
427 add %o1, 8, %o1
428 subcc %g3, 8, %g3
429 faligndata %f0, %f2, %f8
430 EX_ST(STORE(std, %f8, %o0))
431 bl,pn %xcc, 95f
432 add %o0, 8, %o0
433 EX_LD(LOAD(ldd, %o1, %f0))
434 add %o1, 8, %o1
435 subcc %g3, 8, %g3
436 faligndata %f2, %f0, %f8
437 EX_ST(STORE(std, %f8, %o0))
438 bge,pt %xcc, 93b
439 add %o0, 8, %o0
440
44195: brz,pt %o2, 2f
442 mov %g1, %o1
443
4441: EX_LD(LOAD(ldub, %o1, %o3))
445 add %o1, 1, %o1
446 subcc %o2, 1, %o2
447 EX_ST(STORE(stb, %o3, %o0))
448 bne,pt %xcc, 1b
449 add %o0, 1, %o0
450
4512: membar #StoreLoad | #StoreStore
452 VISExit
453 retl
454 mov EX_RETVAL(%o4), %o0
455
456 .align 64
45770: /* 16 < len <= (5 * 64) */
458 bne,pn %XCC, 75f
459 sub %o0, %o1, %o3
460
46172: andn %o2, 0xf, %GLOBAL_SPARE
462 and %o2, 0xf, %o2
4631: EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
464 EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
465 subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
466 EX_ST(STORE(stx, %o5, %o1 + %o3))
467 add %o1, 0x8, %o1
468 EX_ST(STORE(stx, %g1, %o1 + %o3))
469 bgu,pt %XCC, 1b
470 add %o1, 0x8, %o1
47173: andcc %o2, 0x8, %g0
472 be,pt %XCC, 1f
473 nop
474 EX_LD(LOAD(ldx, %o1, %o5))
475 sub %o2, 0x8, %o2
476 EX_ST(STORE(stx, %o5, %o1 + %o3))
477 add %o1, 0x8, %o1
4781: andcc %o2, 0x4, %g0
479 be,pt %XCC, 1f
480 nop
481 EX_LD(LOAD(lduw, %o1, %o5))
482 sub %o2, 0x4, %o2
483 EX_ST(STORE(stw, %o5, %o1 + %o3))
484 add %o1, 0x4, %o1
4851: cmp %o2, 0
486 be,pt %XCC, 85f
487 nop
488 ba,pt %xcc, 90f
489 nop
490
49175: andcc %o0, 0x7, %g1
492 sub %g1, 0x8, %g1
493 be,pn %icc, 2f
494 sub %g0, %g1, %g1
495 sub %o2, %g1, %o2
496
4971: EX_LD(LOAD(ldub, %o1, %o5))
498 subcc %g1, 1, %g1
499 EX_ST(STORE(stb, %o5, %o1 + %o3))
500 bgu,pt %icc, 1b
501 add %o1, 1, %o1
502
5032: add %o1, %o3, %o0
504 andcc %o1, 0x7, %g1
505 bne,pt %icc, 8f
506 sll %g1, 3, %g1
507
508 cmp %o2, 16
509 bgeu,pt %icc, 72b
510 nop
511 ba,a,pt %xcc, 73b
512
5138: mov 64, %o3
514 andn %o1, 0x7, %o1
515 EX_LD(LOAD(ldx, %o1, %g2))
516 sub %o3, %g1, %o3
517 andn %o2, 0x7, %GLOBAL_SPARE
518 sllx %g2, %g1, %g2
5191: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
520 subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
521 add %o1, 0x8, %o1
522 srlx %g3, %o3, %o5
523 or %o5, %g2, %o5
524 EX_ST(STORE(stx, %o5, %o0))
525 add %o0, 0x8, %o0
526 bgu,pt %icc, 1b
527 sllx %g3, %g1, %g2
528
529 srl %g1, 3, %g1
530 andcc %o2, 0x7, %o2
531 be,pn %icc, 85f
532 add %o1, %g1, %o1
533 ba,pt %xcc, 90f
534 sub %o0, %o1, %o3
535
536 .align 64
53780: /* 0 < len <= 16 */
538 andcc %o3, 0x3, %g0
539 bne,pn %XCC, 90f
540 sub %o0, %o1, %o3
541
5421: EX_LD(LOAD(lduw, %o1, %g1))
543 subcc %o2, 4, %o2
544 EX_ST(STORE(stw, %g1, %o1 + %o3))
545 bgu,pt %XCC, 1b
546 add %o1, 4, %o1
547
54885: retl
549 mov EX_RETVAL(%o4), %o0
550
551 .align 32
55290: EX_LD(LOAD(ldub, %o1, %g1))
553 subcc %o2, 1, %o2
554 EX_ST(STORE(stb, %g1, %o1 + %o3))
555 bgu,pt %XCC, 90b
556 add %o1, 1, %o1
557 retl
558 mov EX_RETVAL(%o4), %o0
559
560 .size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc64/lib/U3copy_from_user.S b/arch/sparc64/lib/U3copy_from_user.S
new file mode 100644
index 000000000000..df600b667e48
--- /dev/null
+++ b/arch/sparc64/lib/U3copy_from_user.S
@@ -0,0 +1,22 @@
1/* U3copy_from_user.S: UltraSparc-III optimized copy from userspace.
2 *
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 */
5
6#define EX_LD(x) \
798: x; \
8 .section .fixup; \
9 .align 4; \
1099: retl; \
11 mov 1, %o0; \
12 .section __ex_table; \
13 .align 4; \
14 .word 98b, 99b; \
15 .text; \
16 .align 4;
17
18#define FUNC_NAME U3copy_from_user
19#define LOAD(type,addr,dest) type##a [addr] %asi, dest
20#define EX_RETVAL(x) 0
21
22#include "U3memcpy.S"
diff --git a/arch/sparc64/lib/U3copy_to_user.S b/arch/sparc64/lib/U3copy_to_user.S
new file mode 100644
index 000000000000..f337f22ed82e
--- /dev/null
+++ b/arch/sparc64/lib/U3copy_to_user.S
@@ -0,0 +1,33 @@
1/* U3copy_to_user.S: UltraSparc-III optimized copy to userspace.
2 *
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 */
5
6#define EX_ST(x) \
798: x; \
8 .section .fixup; \
9 .align 4; \
1099: retl; \
11 mov 1, %o0; \
12 .section __ex_table; \
13 .align 4; \
14 .word 98b, 99b; \
15 .text; \
16 .align 4;
17
18#define FUNC_NAME U3copy_to_user
19#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
20#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
21#define EX_RETVAL(x) 0
22
23 /* Writing to %asi is _expensive_ so we hardcode it.
24 * Reading %asi to check for KERNEL_DS is comparatively
25 * cheap.
26 */
27#define PREAMBLE \
28 rd %asi, %g1; \
29 cmp %g1, ASI_AIUS; \
30 bne,pn %icc, memcpy_user_stub; \
31 nop; \
32
33#include "U3memcpy.S"
diff --git a/arch/sparc64/lib/U3memcpy.S b/arch/sparc64/lib/U3memcpy.S
new file mode 100644
index 000000000000..7cae9cc6a204
--- /dev/null
+++ b/arch/sparc64/lib/U3memcpy.S
@@ -0,0 +1,422 @@
1/* U3memcpy.S: UltraSparc-III optimized memcpy.
2 *
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 */
5
6#ifdef __KERNEL__
7#include <asm/visasm.h>
8#include <asm/asi.h>
9#define GLOBAL_SPARE %g7
10#else
11#define ASI_BLK_P 0xf0
12#define FPRS_FEF 0x04
13#ifdef MEMCPY_DEBUG
14#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
15 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
16#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
17#else
18#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
19#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
20#endif
21#define GLOBAL_SPARE %g5
22#endif
23
24#ifndef EX_LD
25#define EX_LD(x) x
26#endif
27
28#ifndef EX_ST
29#define EX_ST(x) x
30#endif
31
32#ifndef EX_RETVAL
33#define EX_RETVAL(x) x
34#endif
35
36#ifndef LOAD
37#define LOAD(type,addr,dest) type [addr], dest
38#endif
39
40#ifndef STORE
41#define STORE(type,src,addr) type src, [addr]
42#endif
43
44#ifndef STORE_BLK
45#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
46#endif
47
48#ifndef FUNC_NAME
49#define FUNC_NAME U3memcpy
50#endif
51
52#ifndef PREAMBLE
53#define PREAMBLE
54#endif
55
56#ifndef XCC
57#define XCC xcc
58#endif
59
60 .register %g2,#scratch
61 .register %g3,#scratch
62
63 /* Special/non-trivial issues of this code:
64 *
65 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
66 * 2) Only low 32 FPU registers are used so that only the
67 * lower half of the FPU register set is dirtied by this
68 * code. This is especially important in the kernel.
69 * 3) This code never prefetches cachelines past the end
70 * of the source buffer.
71 */
72
73 .text
74 .align 64
75
76 /* The cheetah's flexible spine, oversized liver, enlarged heart,
77 * slender muscular body, and claws make it the swiftest hunter
78 * in Africa and the fastest animal on land. Can reach speeds
79 * of up to 2.4GB per second.
80 */
81
82 .globl FUNC_NAME
83 .type FUNC_NAME,#function
84FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
85 srlx %o2, 31, %g2
86 cmp %g2, 0
87 tne %xcc, 5
88 PREAMBLE
89 mov %o0, %o4
90 cmp %o2, 0
91 be,pn %XCC, 85f
92 or %o0, %o1, %o3
93 cmp %o2, 16
94 blu,a,pn %XCC, 80f
95 or %o3, %o2, %o3
96
97 cmp %o2, (3 * 64)
98 blu,pt %XCC, 70f
99 andcc %o3, 0x7, %g0
100
101 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve
102 * o5 from here until we hit VISExitHalf.
103 */
104 VISEntryHalf
105
106 /* Is 'dst' already aligned on an 64-byte boundary? */
107 andcc %o0, 0x3f, %g2
108 be,pt %XCC, 2f
109
110 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
111 * of bytes to copy to make 'dst' 64-byte aligned. We pre-
112 * subtract this from 'len'.
113 */
114 sub %o0, %o1, GLOBAL_SPARE
115 sub %g2, 0x40, %g2
116 sub %g0, %g2, %g2
117 sub %o2, %g2, %o2
118 andcc %g2, 0x7, %g1
119 be,pt %icc, 2f
120 and %g2, 0x38, %g2
121
1221: subcc %g1, 0x1, %g1
123 EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
124 EX_ST(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
125 bgu,pt %XCC, 1b
126 add %o1, 0x1, %o1
127
128 add %o1, GLOBAL_SPARE, %o0
129
1302: cmp %g2, 0x0
131 and %o1, 0x7, %g1
132 be,pt %icc, 3f
133 alignaddr %o1, %g0, %o1
134
135 EX_LD(LOAD(ldd, %o1, %f4))
1361: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
137 add %o1, 0x8, %o1
138 subcc %g2, 0x8, %g2
139 faligndata %f4, %f6, %f0
140 EX_ST(STORE(std, %f0, %o0))
141 be,pn %icc, 3f
142 add %o0, 0x8, %o0
143
144 EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
145 add %o1, 0x8, %o1
146 subcc %g2, 0x8, %g2
147 faligndata %f6, %f4, %f2
148 EX_ST(STORE(std, %f2, %o0))
149 bne,pt %icc, 1b
150 add %o0, 0x8, %o0
151
1523: LOAD(prefetch, %o1 + 0x000, #one_read)
153 LOAD(prefetch, %o1 + 0x040, #one_read)
154 andn %o2, (0x40 - 1), GLOBAL_SPARE
155 LOAD(prefetch, %o1 + 0x080, #one_read)
156 LOAD(prefetch, %o1 + 0x0c0, #one_read)
157 LOAD(prefetch, %o1 + 0x100, #one_read)
158 EX_LD(LOAD(ldd, %o1 + 0x000, %f0))
159 LOAD(prefetch, %o1 + 0x140, #one_read)
160 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
161 LOAD(prefetch, %o1 + 0x180, #one_read)
162 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
163 LOAD(prefetch, %o1 + 0x1c0, #one_read)
164 faligndata %f0, %f2, %f16
165 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
166 faligndata %f2, %f4, %f18
167 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
168 faligndata %f4, %f6, %f20
169 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
170 faligndata %f6, %f8, %f22
171
172 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
173 faligndata %f8, %f10, %f24
174 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
175 faligndata %f10, %f12, %f26
176 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
177
178 subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
179 add %o1, 0x40, %o1
180 bgu,pt %XCC, 1f
181 srl GLOBAL_SPARE, 6, %o3
182 ba,pt %xcc, 2f
183 nop
184
185 .align 64
1861:
187 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
188 faligndata %f12, %f14, %f28
189 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
190 faligndata %f14, %f0, %f30
191 EX_ST(STORE_BLK(%f16, %o0))
192 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
193 faligndata %f0, %f2, %f16
194 add %o0, 0x40, %o0
195
196 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
197 faligndata %f2, %f4, %f18
198 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
199 faligndata %f4, %f6, %f20
200 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
201 subcc %o3, 0x01, %o3
202 faligndata %f6, %f8, %f22
203 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
204
205 faligndata %f8, %f10, %f24
206 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
207 LOAD(prefetch, %o1 + 0x1c0, #one_read)
208 faligndata %f10, %f12, %f26
209 bg,pt %XCC, 1b
210 add %o1, 0x40, %o1
211
212 /* Finally we copy the last full 64-byte block. */
2132:
214 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
215 faligndata %f12, %f14, %f28
216 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
217 faligndata %f14, %f0, %f30
218 EX_ST(STORE_BLK(%f16, %o0))
219 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
220 faligndata %f0, %f2, %f16
221 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
222 faligndata %f2, %f4, %f18
223 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
224 faligndata %f4, %f6, %f20
225 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
226 faligndata %f6, %f8, %f22
227 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
228 faligndata %f8, %f10, %f24
229 cmp %g1, 0
230 be,pt %XCC, 1f
231 add %o0, 0x40, %o0
232 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
2331: faligndata %f10, %f12, %f26
234 faligndata %f12, %f14, %f28
235 faligndata %f14, %f0, %f30
236 EX_ST(STORE_BLK(%f16, %o0))
237 add %o0, 0x40, %o0
238 add %o1, 0x40, %o1
239 membar #Sync
240
241 /* Now we copy the (len modulo 64) bytes at the end.
242 * Note how we borrow the %f0 loaded above.
243 *
244 * Also notice how this code is careful not to perform a
245 * load past the end of the src buffer.
246 */
247 and %o2, 0x3f, %o2
248 andcc %o2, 0x38, %g2
249 be,pn %XCC, 2f
250 subcc %g2, 0x8, %g2
251 be,pn %XCC, 2f
252 cmp %g1, 0
253
254 sub %o2, %g2, %o2
255 be,a,pt %XCC, 1f
256 EX_LD(LOAD(ldd, %o1 + 0x00, %f0))
257
2581: EX_LD(LOAD(ldd, %o1 + 0x08, %f2))
259 add %o1, 0x8, %o1
260 subcc %g2, 0x8, %g2
261 faligndata %f0, %f2, %f8
262 EX_ST(STORE(std, %f8, %o0))
263 be,pn %XCC, 2f
264 add %o0, 0x8, %o0
265 EX_LD(LOAD(ldd, %o1 + 0x08, %f0))
266 add %o1, 0x8, %o1
267 subcc %g2, 0x8, %g2
268 faligndata %f2, %f0, %f8
269 EX_ST(STORE(std, %f8, %o0))
270 bne,pn %XCC, 1b
271 add %o0, 0x8, %o0
272
273 /* If anything is left, we copy it one byte at a time.
274 * Note that %g1 is (src & 0x3) saved above before the
275 * alignaddr was performed.
276 */
2772:
278 cmp %o2, 0
279 add %o1, %g1, %o1
280 VISExitHalf
281 be,pn %XCC, 85f
282 sub %o0, %o1, %o3
283
284 andcc %g1, 0x7, %g0
285 bne,pn %icc, 90f
286 andcc %o2, 0x8, %g0
287 be,pt %icc, 1f
288 nop
289 EX_LD(LOAD(ldx, %o1, %o5))
290 EX_ST(STORE(stx, %o5, %o1 + %o3))
291 add %o1, 0x8, %o1
292
2931: andcc %o2, 0x4, %g0
294 be,pt %icc, 1f
295 nop
296 EX_LD(LOAD(lduw, %o1, %o5))
297 EX_ST(STORE(stw, %o5, %o1 + %o3))
298 add %o1, 0x4, %o1
299
3001: andcc %o2, 0x2, %g0
301 be,pt %icc, 1f
302 nop
303 EX_LD(LOAD(lduh, %o1, %o5))
304 EX_ST(STORE(sth, %o5, %o1 + %o3))
305 add %o1, 0x2, %o1
306
3071: andcc %o2, 0x1, %g0
308 be,pt %icc, 85f
309 nop
310 EX_LD(LOAD(ldub, %o1, %o5))
311 ba,pt %xcc, 85f
312 EX_ST(STORE(stb, %o5, %o1 + %o3))
313
314 .align 64
31570: /* 16 < len <= 64 */
316 bne,pn %XCC, 75f
317 sub %o0, %o1, %o3
318
31972:
320 andn %o2, 0xf, GLOBAL_SPARE
321 and %o2, 0xf, %o2
3221: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE
323 EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
324 EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
325 EX_ST(STORE(stx, %o5, %o1 + %o3))
326 add %o1, 0x8, %o1
327 EX_ST(STORE(stx, %g1, %o1 + %o3))
328 bgu,pt %XCC, 1b
329 add %o1, 0x8, %o1
33073: andcc %o2, 0x8, %g0
331 be,pt %XCC, 1f
332 nop
333 sub %o2, 0x8, %o2
334 EX_LD(LOAD(ldx, %o1, %o5))
335 EX_ST(STORE(stx, %o5, %o1 + %o3))
336 add %o1, 0x8, %o1
3371: andcc %o2, 0x4, %g0
338 be,pt %XCC, 1f
339 nop
340 sub %o2, 0x4, %o2
341 EX_LD(LOAD(lduw, %o1, %o5))
342 EX_ST(STORE(stw, %o5, %o1 + %o3))
343 add %o1, 0x4, %o1
3441: cmp %o2, 0
345 be,pt %XCC, 85f
346 nop
347 ba,pt %xcc, 90f
348 nop
349
35075:
351 andcc %o0, 0x7, %g1
352 sub %g1, 0x8, %g1
353 be,pn %icc, 2f
354 sub %g0, %g1, %g1
355 sub %o2, %g1, %o2
356
3571: subcc %g1, 1, %g1
358 EX_LD(LOAD(ldub, %o1, %o5))
359 EX_ST(STORE(stb, %o5, %o1 + %o3))
360 bgu,pt %icc, 1b
361 add %o1, 1, %o1
362
3632: add %o1, %o3, %o0
364 andcc %o1, 0x7, %g1
365 bne,pt %icc, 8f
366 sll %g1, 3, %g1
367
368 cmp %o2, 16
369 bgeu,pt %icc, 72b
370 nop
371 ba,a,pt %xcc, 73b
372
3738: mov 64, %o3
374 andn %o1, 0x7, %o1
375 EX_LD(LOAD(ldx, %o1, %g2))
376 sub %o3, %g1, %o3
377 andn %o2, 0x7, GLOBAL_SPARE
378 sllx %g2, %g1, %g2
3791: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
380 subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE
381 add %o1, 0x8, %o1
382 srlx %g3, %o3, %o5
383 or %o5, %g2, %o5
384 EX_ST(STORE(stx, %o5, %o0))
385 add %o0, 0x8, %o0
386 bgu,pt %icc, 1b
387 sllx %g3, %g1, %g2
388
389 srl %g1, 3, %g1
390 andcc %o2, 0x7, %o2
391 be,pn %icc, 85f
392 add %o1, %g1, %o1
393 ba,pt %xcc, 90f
394 sub %o0, %o1, %o3
395
396 .align 64
39780: /* 0 < len <= 16 */
398 andcc %o3, 0x3, %g0
399 bne,pn %XCC, 90f
400 sub %o0, %o1, %o3
401
4021:
403 subcc %o2, 4, %o2
404 EX_LD(LOAD(lduw, %o1, %g1))
405 EX_ST(STORE(stw, %g1, %o1 + %o3))
406 bgu,pt %XCC, 1b
407 add %o1, 4, %o1
408
40985: retl
410 mov EX_RETVAL(%o4), %o0
411
412 .align 32
41390:
414 subcc %o2, 1, %o2
415 EX_LD(LOAD(ldub, %o1, %g1))
416 EX_ST(STORE(stb, %g1, %o1 + %o3))
417 bgu,pt %XCC, 90b
418 add %o1, 1, %o1
419 retl
420 mov EX_RETVAL(%o4), %o0
421
422 .size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc64/lib/U3patch.S b/arch/sparc64/lib/U3patch.S
new file mode 100644
index 000000000000..e2b6c5e4b95a
--- /dev/null
+++ b/arch/sparc64/lib/U3patch.S
@@ -0,0 +1,32 @@
1/* U3patch.S: Patch Ultra-I routines with Ultra-III variant.
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6#define BRANCH_ALWAYS 0x10680000
7#define NOP 0x01000000
8#define ULTRA3_DO_PATCH(OLD, NEW) \
9 sethi %hi(NEW), %g1; \
10 or %g1, %lo(NEW), %g1; \
11 sethi %hi(OLD), %g2; \
12 or %g2, %lo(OLD), %g2; \
13 sub %g1, %g2, %g1; \
14 sethi %hi(BRANCH_ALWAYS), %g3; \
15 srl %g1, 2, %g1; \
16 or %g3, %lo(BRANCH_ALWAYS), %g3; \
17 or %g3, %g1, %g3; \
18 stw %g3, [%g2]; \
19 sethi %hi(NOP), %g3; \
20 or %g3, %lo(NOP), %g3; \
21 stw %g3, [%g2 + 0x4]; \
22 flush %g2;
23
24 .globl cheetah_patch_copyops
25 .type cheetah_patch_copyops,#function
26cheetah_patch_copyops:
27 ULTRA3_DO_PATCH(memcpy, U3memcpy)
28 ULTRA3_DO_PATCH(___copy_from_user, U3copy_from_user)
29 ULTRA3_DO_PATCH(___copy_to_user, U3copy_to_user)
30 retl
31 nop
32 .size cheetah_patch_copyops,.-cheetah_patch_copyops
diff --git a/arch/sparc64/lib/VISsave.S b/arch/sparc64/lib/VISsave.S
new file mode 100644
index 000000000000..65e328d600a8
--- /dev/null
+++ b/arch/sparc64/lib/VISsave.S
@@ -0,0 +1,131 @@
1/* $Id: VISsave.S,v 1.6 2002/02/09 19:49:30 davem Exp $
2 * VISsave.S: Code for saving FPU register state for
3 * VIS routines. One should not call this directly,
4 * but use macros provided in <asm/visasm.h>.
5 *
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <asm/asi.h>
10#include <asm/page.h>
11#include <asm/ptrace.h>
12#include <asm/visasm.h>
13#include <asm/thread_info.h>
14
15 .text
16 .globl VISenter, VISenterhalf
17
18 /* On entry: %o5=current FPRS value, %g7 is callers address */
19 /* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
20
21 /* Nothing special need be done here to handle pre-emption, this
22 * FPU save/restore mechanism is already preemption safe.
23 */
24
25 .align 32
26VISenter:
27 ldub [%g6 + TI_FPDEPTH], %g1
28 brnz,a,pn %g1, 1f
29 cmp %g1, 1
30 stb %g0, [%g6 + TI_FPSAVED]
31 stx %fsr, [%g6 + TI_XFSR]
329: jmpl %g7 + %g0, %g0
33 nop
341: bne,pn %icc, 2f
35
36 srl %g1, 1, %g1
37vis1: ldub [%g6 + TI_FPSAVED], %g3
38 stx %fsr, [%g6 + TI_XFSR]
39 or %g3, %o5, %g3
40 stb %g3, [%g6 + TI_FPSAVED]
41 rd %gsr, %g3
42 clr %g1
43 ba,pt %xcc, 3f
44
45 stx %g3, [%g6 + TI_GSR]
462: add %g6, %g1, %g3
47 cmp %o5, FPRS_DU
48 be,pn %icc, 6f
49 sll %g1, 3, %g1
50 stb %o5, [%g3 + TI_FPSAVED]
51 rd %gsr, %g2
52 add %g6, %g1, %g3
53 stx %g2, [%g3 + TI_GSR]
54
55 add %g6, %g1, %g2
56 stx %fsr, [%g2 + TI_XFSR]
57 sll %g1, 5, %g1
583: andcc %o5, FPRS_DL|FPRS_DU, %g0
59 be,pn %icc, 9b
60 add %g6, TI_FPREGS, %g2
61 andcc %o5, FPRS_DL, %g0
62 membar #StoreStore | #LoadStore
63
64 be,pn %icc, 4f
65 add %g6, TI_FPREGS+0x40, %g3
66 stda %f0, [%g2 + %g1] ASI_BLK_P
67 stda %f16, [%g3 + %g1] ASI_BLK_P
68 andcc %o5, FPRS_DU, %g0
69 be,pn %icc, 5f
704: add %g1, 128, %g1
71 stda %f32, [%g2 + %g1] ASI_BLK_P
72
73 stda %f48, [%g3 + %g1] ASI_BLK_P
745: membar #Sync
75 jmpl %g7 + %g0, %g0
76 nop
77
786: ldub [%g3 + TI_FPSAVED], %o5
79 or %o5, FPRS_DU, %o5
80 add %g6, TI_FPREGS+0x80, %g2
81 stb %o5, [%g3 + TI_FPSAVED]
82
83 sll %g1, 5, %g1
84 add %g6, TI_FPREGS+0xc0, %g3
85 wr %g0, FPRS_FEF, %fprs
86 membar #StoreStore | #LoadStore
87 stda %f32, [%g2 + %g1] ASI_BLK_P
88 stda %f48, [%g3 + %g1] ASI_BLK_P
89 membar #Sync
90 jmpl %g7 + %g0, %g0
91
92 nop
93
94 .align 32
95VISenterhalf:
96 ldub [%g6 + TI_FPDEPTH], %g1
97 brnz,a,pn %g1, 1f
98 cmp %g1, 1
99 stb %g0, [%g6 + TI_FPSAVED]
100 stx %fsr, [%g6 + TI_XFSR]
101 clr %o5
102 jmpl %g7 + %g0, %g0
103 wr %g0, FPRS_FEF, %fprs
104
1051: bne,pn %icc, 2f
106 srl %g1, 1, %g1
107 ba,pt %xcc, vis1
108 sub %g7, 8, %g7
1092: addcc %g6, %g1, %g3
110 sll %g1, 3, %g1
111 andn %o5, FPRS_DU, %g2
112 stb %g2, [%g3 + TI_FPSAVED]
113
114 rd %gsr, %g2
115 add %g6, %g1, %g3
116 stx %g2, [%g3 + TI_GSR]
117 add %g6, %g1, %g2
118 stx %fsr, [%g2 + TI_XFSR]
119 sll %g1, 5, %g1
1203: andcc %o5, FPRS_DL, %g0
121 be,pn %icc, 4f
122 add %g6, TI_FPREGS, %g2
123
124 membar #StoreStore | #LoadStore
125 add %g6, TI_FPREGS+0x40, %g3
126 stda %f0, [%g2 + %g1] ASI_BLK_P
127 stda %f16, [%g3 + %g1] ASI_BLK_P
128 membar #Sync
1294: and %o5, FPRS_DU, %o5
130 jmpl %g7 + %g0, %g0
131 wr %o5, FPRS_FEF, %fprs
diff --git a/arch/sparc64/lib/atomic.S b/arch/sparc64/lib/atomic.S
new file mode 100644
index 000000000000..e528b8d1a3e6
--- /dev/null
+++ b/arch/sparc64/lib/atomic.S
@@ -0,0 +1,139 @@
1/* $Id: atomic.S,v 1.4 2001/11/18 00:12:56 davem Exp $
2 * atomic.S: These things are too big to do inline.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <asm/asi.h>
9
10 /* On SMP we need to use memory barriers to ensure
11 * correct memory operation ordering, nop these out
12 * for uniprocessor.
13 */
14#ifdef CONFIG_SMP
15#define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad
16#define ATOMIC_POST_BARRIER membar #StoreLoad | #StoreStore
17#else
18#define ATOMIC_PRE_BARRIER nop
19#define ATOMIC_POST_BARRIER nop
20#endif
21
22 .text
23
24 /* Two versions of the atomic routines, one that
25 * does not return a value and does not perform
26 * memory barriers, and a second which returns
27 * a value and does the barriers.
28 */
29 .globl atomic_add
30 .type atomic_add,#function
31atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
321: lduw [%o1], %g1
33 add %g1, %o0, %g7
34 cas [%o1], %g1, %g7
35 cmp %g1, %g7
36 bne,pn %icc, 1b
37 nop
38 retl
39 nop
40 .size atomic_add, .-atomic_add
41
42 .globl atomic_sub
43 .type atomic_sub,#function
44atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
451: lduw [%o1], %g1
46 sub %g1, %o0, %g7
47 cas [%o1], %g1, %g7
48 cmp %g1, %g7
49 bne,pn %icc, 1b
50 nop
51 retl
52 nop
53 .size atomic_sub, .-atomic_sub
54
55 .globl atomic_add_ret
56 .type atomic_add_ret,#function
57atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
58 ATOMIC_PRE_BARRIER
591: lduw [%o1], %g1
60 add %g1, %o0, %g7
61 cas [%o1], %g1, %g7
62 cmp %g1, %g7
63 bne,pn %icc, 1b
64 add %g7, %o0, %g7
65 ATOMIC_POST_BARRIER
66 retl
67 sra %g7, 0, %o0
68 .size atomic_add_ret, .-atomic_add_ret
69
70 .globl atomic_sub_ret
71 .type atomic_sub_ret,#function
72atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
73 ATOMIC_PRE_BARRIER
741: lduw [%o1], %g1
75 sub %g1, %o0, %g7
76 cas [%o1], %g1, %g7
77 cmp %g1, %g7
78 bne,pn %icc, 1b
79 sub %g7, %o0, %g7
80 ATOMIC_POST_BARRIER
81 retl
82 sra %g7, 0, %o0
83 .size atomic_sub_ret, .-atomic_sub_ret
84
85 .globl atomic64_add
86 .type atomic64_add,#function
87atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
881: ldx [%o1], %g1
89 add %g1, %o0, %g7
90 casx [%o1], %g1, %g7
91 cmp %g1, %g7
92 bne,pn %xcc, 1b
93 nop
94 retl
95 nop
96 .size atomic64_add, .-atomic64_add
97
98 .globl atomic64_sub
99 .type atomic64_sub,#function
100atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
1011: ldx [%o1], %g1
102 sub %g1, %o0, %g7
103 casx [%o1], %g1, %g7
104 cmp %g1, %g7
105 bne,pn %xcc, 1b
106 nop
107 retl
108 nop
109 .size atomic64_sub, .-atomic64_sub
110
111 .globl atomic64_add_ret
112 .type atomic64_add_ret,#function
113atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
114 ATOMIC_PRE_BARRIER
1151: ldx [%o1], %g1
116 add %g1, %o0, %g7
117 casx [%o1], %g1, %g7
118 cmp %g1, %g7
119 bne,pn %xcc, 1b
120 add %g7, %o0, %g7
121 ATOMIC_POST_BARRIER
122 retl
123 mov %g7, %o0
124 .size atomic64_add_ret, .-atomic64_add_ret
125
126 .globl atomic64_sub_ret
127 .type atomic64_sub_ret,#function
128atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
129 ATOMIC_PRE_BARRIER
1301: ldx [%o1], %g1
131 sub %g1, %o0, %g7
132 casx [%o1], %g1, %g7
133 cmp %g1, %g7
134 bne,pn %xcc, 1b
135 sub %g7, %o0, %g7
136 ATOMIC_POST_BARRIER
137 retl
138 mov %g7, %o0
139 .size atomic64_sub_ret, .-atomic64_sub_ret
diff --git a/arch/sparc64/lib/bitops.S b/arch/sparc64/lib/bitops.S
new file mode 100644
index 000000000000..886dcd2b376a
--- /dev/null
+++ b/arch/sparc64/lib/bitops.S
@@ -0,0 +1,145 @@
1/* $Id: bitops.S,v 1.3 2001/11/18 00:12:56 davem Exp $
2 * bitops.S: Sparc64 atomic bit operations.
3 *
4 * Copyright (C) 2000 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <asm/asi.h>
9
10 /* On SMP we need to use memory barriers to ensure
11 * correct memory operation ordering, nop these out
12 * for uniprocessor.
13 */
14#ifdef CONFIG_SMP
15#define BITOP_PRE_BARRIER membar #StoreLoad | #LoadLoad
16#define BITOP_POST_BARRIER membar #StoreLoad | #StoreStore
17#else
18#define BITOP_PRE_BARRIER nop
19#define BITOP_POST_BARRIER nop
20#endif
21
22 .text
23
24 .globl test_and_set_bit
25 .type test_and_set_bit,#function
26test_and_set_bit: /* %o0=nr, %o1=addr */
27 BITOP_PRE_BARRIER
28 srlx %o0, 6, %g1
29 mov 1, %o2
30 sllx %g1, 3, %g3
31 and %o0, 63, %g2
32 sllx %o2, %g2, %o2
33 add %o1, %g3, %o1
341: ldx [%o1], %g7
35 or %g7, %o2, %g1
36 casx [%o1], %g7, %g1
37 cmp %g7, %g1
38 bne,pn %xcc, 1b
39 and %g7, %o2, %g2
40 BITOP_POST_BARRIER
41 clr %o0
42 retl
43 movrne %g2, 1, %o0
44 .size test_and_set_bit, .-test_and_set_bit
45
46 .globl test_and_clear_bit
47 .type test_and_clear_bit,#function
48test_and_clear_bit: /* %o0=nr, %o1=addr */
49 BITOP_PRE_BARRIER
50 srlx %o0, 6, %g1
51 mov 1, %o2
52 sllx %g1, 3, %g3
53 and %o0, 63, %g2
54 sllx %o2, %g2, %o2
55 add %o1, %g3, %o1
561: ldx [%o1], %g7
57 andn %g7, %o2, %g1
58 casx [%o1], %g7, %g1
59 cmp %g7, %g1
60 bne,pn %xcc, 1b
61 and %g7, %o2, %g2
62 BITOP_POST_BARRIER
63 clr %o0
64 retl
65 movrne %g2, 1, %o0
66 .size test_and_clear_bit, .-test_and_clear_bit
67
68 .globl test_and_change_bit
69 .type test_and_change_bit,#function
70test_and_change_bit: /* %o0=nr, %o1=addr */
71 BITOP_PRE_BARRIER
72 srlx %o0, 6, %g1
73 mov 1, %o2
74 sllx %g1, 3, %g3
75 and %o0, 63, %g2
76 sllx %o2, %g2, %o2
77 add %o1, %g3, %o1
781: ldx [%o1], %g7
79 xor %g7, %o2, %g1
80 casx [%o1], %g7, %g1
81 cmp %g7, %g1
82 bne,pn %xcc, 1b
83 and %g7, %o2, %g2
84 BITOP_POST_BARRIER
85 clr %o0
86 retl
87 movrne %g2, 1, %o0
88 .size test_and_change_bit, .-test_and_change_bit
89
90 .globl set_bit
91 .type set_bit,#function
92set_bit: /* %o0=nr, %o1=addr */
93 srlx %o0, 6, %g1
94 mov 1, %o2
95 sllx %g1, 3, %g3
96 and %o0, 63, %g2
97 sllx %o2, %g2, %o2
98 add %o1, %g3, %o1
991: ldx [%o1], %g7
100 or %g7, %o2, %g1
101 casx [%o1], %g7, %g1
102 cmp %g7, %g1
103 bne,pn %xcc, 1b
104 nop
105 retl
106 nop
107 .size set_bit, .-set_bit
108
109 .globl clear_bit
110 .type clear_bit,#function
111clear_bit: /* %o0=nr, %o1=addr */
112 srlx %o0, 6, %g1
113 mov 1, %o2
114 sllx %g1, 3, %g3
115 and %o0, 63, %g2
116 sllx %o2, %g2, %o2
117 add %o1, %g3, %o1
1181: ldx [%o1], %g7
119 andn %g7, %o2, %g1
120 casx [%o1], %g7, %g1
121 cmp %g7, %g1
122 bne,pn %xcc, 1b
123 nop
124 retl
125 nop
126 .size clear_bit, .-clear_bit
127
128 .globl change_bit
129 .type change_bit,#function
130change_bit: /* %o0=nr, %o1=addr */
131 srlx %o0, 6, %g1
132 mov 1, %o2
133 sllx %g1, 3, %g3
134 and %o0, 63, %g2
135 sllx %o2, %g2, %o2
136 add %o1, %g3, %o1
1371: ldx [%o1], %g7
138 xor %g7, %o2, %g1
139 casx [%o1], %g7, %g1
140 cmp %g7, %g1
141 bne,pn %xcc, 1b
142 nop
143 retl
144 nop
145 .size change_bit, .-change_bit
diff --git a/arch/sparc64/lib/bzero.S b/arch/sparc64/lib/bzero.S
new file mode 100644
index 000000000000..21a933ffb7c2
--- /dev/null
+++ b/arch/sparc64/lib/bzero.S
@@ -0,0 +1,158 @@
1/* bzero.S: Simple prefetching memset, bzero, and clear_user
2 * implementations.
3 *
4 * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
5 */
6
7 .text
8
9 .globl __memset
10 .type __memset, #function
11__memset: /* %o0=buf, %o1=pat, %o2=len */
12
13 .globl memset
14 .type memset, #function
15memset: /* %o0=buf, %o1=pat, %o2=len */
16 and %o1, 0xff, %o3
17 mov %o2, %o1
18 sllx %o3, 8, %g1
19 or %g1, %o3, %o2
20 sllx %o2, 16, %g1
21 or %g1, %o2, %o2
22 sllx %o2, 32, %g1
23 ba,pt %xcc, 1f
24 or %g1, %o2, %o2
25
26 .globl __bzero
27 .type __bzero, #function
28__bzero: /* %o0=buf, %o1=len */
29 clr %o2
301: mov %o0, %o3
31 brz,pn %o1, __bzero_done
32 cmp %o1, 16
33 bl,pn %icc, __bzero_tiny
34 prefetch [%o0 + 0x000], #n_writes
35 andcc %o0, 0x3, %g0
36 be,pt %icc, 2f
371: stb %o2, [%o0 + 0x00]
38 add %o0, 1, %o0
39 andcc %o0, 0x3, %g0
40 bne,pn %icc, 1b
41 sub %o1, 1, %o1
422: andcc %o0, 0x7, %g0
43 be,pt %icc, 3f
44 stw %o2, [%o0 + 0x00]
45 sub %o1, 4, %o1
46 add %o0, 4, %o0
473: and %o1, 0x38, %g1
48 cmp %o1, 0x40
49 andn %o1, 0x3f, %o4
50 bl,pn %icc, 5f
51 and %o1, 0x7, %o1
52 prefetch [%o0 + 0x040], #n_writes
53 prefetch [%o0 + 0x080], #n_writes
54 prefetch [%o0 + 0x0c0], #n_writes
55 prefetch [%o0 + 0x100], #n_writes
56 prefetch [%o0 + 0x140], #n_writes
574: prefetch [%o0 + 0x180], #n_writes
58 stx %o2, [%o0 + 0x00]
59 stx %o2, [%o0 + 0x08]
60 stx %o2, [%o0 + 0x10]
61 stx %o2, [%o0 + 0x18]
62 stx %o2, [%o0 + 0x20]
63 stx %o2, [%o0 + 0x28]
64 stx %o2, [%o0 + 0x30]
65 stx %o2, [%o0 + 0x38]
66 subcc %o4, 0x40, %o4
67 bne,pt %icc, 4b
68 add %o0, 0x40, %o0
69 brz,pn %g1, 6f
70 nop
715: stx %o2, [%o0 + 0x00]
72 subcc %g1, 8, %g1
73 bne,pt %icc, 5b
74 add %o0, 0x8, %o0
756: brz,pt %o1, __bzero_done
76 nop
77__bzero_tiny:
781: stb %o2, [%o0 + 0x00]
79 subcc %o1, 1, %o1
80 bne,pt %icc, 1b
81 add %o0, 1, %o0
82__bzero_done:
83 retl
84 mov %o3, %o0
85 .size __bzero, .-__bzero
86 .size __memset, .-__memset
87 .size memset, .-memset
88
89#define EX_ST(x,y) \
9098: x,y; \
91 .section .fixup; \
92 .align 4; \
9399: retl; \
94 mov %o1, %o0; \
95 .section __ex_table; \
96 .align 4; \
97 .word 98b, 99b; \
98 .text; \
99 .align 4;
100
101 .globl __bzero_noasi
102 .type __bzero_noasi, #function
103__bzero_noasi: /* %o0=buf, %o1=len */
104 brz,pn %o1, __bzero_noasi_done
105 cmp %o1, 16
106 bl,pn %icc, __bzero_noasi_tiny
107 EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes)
108 andcc %o0, 0x3, %g0
109 be,pt %icc, 2f
1101: EX_ST(stba %g0, [%o0 + 0x00] %asi)
111 add %o0, 1, %o0
112 andcc %o0, 0x3, %g0
113 bne,pn %icc, 1b
114 sub %o1, 1, %o1
1152: andcc %o0, 0x7, %g0
116 be,pt %icc, 3f
117 EX_ST(stwa %g0, [%o0 + 0x00] %asi)
118 sub %o1, 4, %o1
119 add %o0, 4, %o0
1203: and %o1, 0x38, %g1
121 cmp %o1, 0x40
122 andn %o1, 0x3f, %o4
123 bl,pn %icc, 5f
124 and %o1, 0x7, %o1
125 EX_ST(prefetcha [%o0 + 0x040] %asi, #n_writes)
126 EX_ST(prefetcha [%o0 + 0x080] %asi, #n_writes)
127 EX_ST(prefetcha [%o0 + 0x0c0] %asi, #n_writes)
128 EX_ST(prefetcha [%o0 + 0x100] %asi, #n_writes)
129 EX_ST(prefetcha [%o0 + 0x140] %asi, #n_writes)
1304: EX_ST(prefetcha [%o0 + 0x180] %asi, #n_writes)
131 EX_ST(stxa %g0, [%o0 + 0x00] %asi)
132 EX_ST(stxa %g0, [%o0 + 0x08] %asi)
133 EX_ST(stxa %g0, [%o0 + 0x10] %asi)
134 EX_ST(stxa %g0, [%o0 + 0x18] %asi)
135 EX_ST(stxa %g0, [%o0 + 0x20] %asi)
136 EX_ST(stxa %g0, [%o0 + 0x28] %asi)
137 EX_ST(stxa %g0, [%o0 + 0x30] %asi)
138 EX_ST(stxa %g0, [%o0 + 0x38] %asi)
139 subcc %o4, 0x40, %o4
140 bne,pt %icc, 4b
141 add %o0, 0x40, %o0
142 brz,pn %g1, 6f
143 nop
1445: EX_ST(stxa %g0, [%o0 + 0x00] %asi)
145 subcc %g1, 8, %g1
146 bne,pt %icc, 5b
147 add %o0, 0x8, %o0
1486: brz,pt %o1, __bzero_noasi_done
149 nop
150__bzero_noasi_tiny:
1511: EX_ST(stba %g0, [%o0 + 0x00] %asi)
152 subcc %o1, 1, %o1
153 bne,pt %icc, 1b
154 add %o0, 1, %o0
155__bzero_noasi_done:
156 retl
157 clr %o0
158 .size __bzero_noasi, .-__bzero_noasi
diff --git a/arch/sparc64/lib/checksum.S b/arch/sparc64/lib/checksum.S
new file mode 100644
index 000000000000..ba9cd3ccc2b2
--- /dev/null
+++ b/arch/sparc64/lib/checksum.S
@@ -0,0 +1,172 @@
1/* checksum.S: Sparc V9 optimized checksum code.
2 *
3 * Copyright(C) 1995 Linus Torvalds
4 * Copyright(C) 1995 Miguel de Icaza
5 * Copyright(C) 1996, 2000 David S. Miller
6 * Copyright(C) 1997 Jakub Jelinek
7 *
8 * derived from:
9 * Linux/Alpha checksum c-code
10 * Linux/ix86 inline checksum assembly
11 * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
12 * David Mosberger-Tang for optimized reference c-code
13 * BSD4.4 portable checksum routine
14 */
15
16 .text
17
18csum_partial_fix_alignment:
19 /* We checked for zero length already, so there must be
20 * at least one byte.
21 */
22 be,pt %icc, 1f
23 nop
24 ldub [%o0 + 0x00], %o4
25 add %o0, 1, %o0
26 sub %o1, 1, %o1
271: andcc %o0, 0x2, %g0
28 be,pn %icc, csum_partial_post_align
29 cmp %o1, 2
30 blu,pn %icc, csum_partial_end_cruft
31 nop
32 lduh [%o0 + 0x00], %o5
33 add %o0, 2, %o0
34 sub %o1, 2, %o1
35 ba,pt %xcc, csum_partial_post_align
36 add %o5, %o4, %o4
37
38 .align 32
39 .globl csum_partial
40csum_partial: /* %o0=buff, %o1=len, %o2=sum */
41 prefetch [%o0 + 0x000], #n_reads
42 clr %o4
43 prefetch [%o0 + 0x040], #n_reads
44 brz,pn %o1, csum_partial_finish
45 andcc %o0, 0x3, %g0
46
47 /* We "remember" whether the lowest bit in the address
48 * was set in %g7. Because if it is, we have to swap
49 * upper and lower 8 bit fields of the sum we calculate.
50 */
51 bne,pn %icc, csum_partial_fix_alignment
52 andcc %o0, 0x1, %g7
53
54csum_partial_post_align:
55 prefetch [%o0 + 0x080], #n_reads
56 andncc %o1, 0x3f, %o3
57
58 prefetch [%o0 + 0x0c0], #n_reads
59 sub %o1, %o3, %o1
60 brz,pn %o3, 2f
61 prefetch [%o0 + 0x100], #n_reads
62
63 /* So that we don't need to use the non-pairing
64 * add-with-carry instructions we accumulate 32-bit
65 * values into a 64-bit register. At the end of the
66 * loop we fold it down to 32-bits and so on.
67 */
68 prefetch [%o0 + 0x140], #n_reads
691: lduw [%o0 + 0x00], %o5
70 lduw [%o0 + 0x04], %g1
71 lduw [%o0 + 0x08], %g2
72 add %o4, %o5, %o4
73 lduw [%o0 + 0x0c], %g3
74 add %o4, %g1, %o4
75 lduw [%o0 + 0x10], %o5
76 add %o4, %g2, %o4
77 lduw [%o0 + 0x14], %g1
78 add %o4, %g3, %o4
79 lduw [%o0 + 0x18], %g2
80 add %o4, %o5, %o4
81 lduw [%o0 + 0x1c], %g3
82 add %o4, %g1, %o4
83 lduw [%o0 + 0x20], %o5
84 add %o4, %g2, %o4
85 lduw [%o0 + 0x24], %g1
86 add %o4, %g3, %o4
87 lduw [%o0 + 0x28], %g2
88 add %o4, %o5, %o4
89 lduw [%o0 + 0x2c], %g3
90 add %o4, %g1, %o4
91 lduw [%o0 + 0x30], %o5
92 add %o4, %g2, %o4
93 lduw [%o0 + 0x34], %g1
94 add %o4, %g3, %o4
95 lduw [%o0 + 0x38], %g2
96 add %o4, %o5, %o4
97 lduw [%o0 + 0x3c], %g3
98 add %o4, %g1, %o4
99 prefetch [%o0 + 0x180], #n_reads
100 add %o4, %g2, %o4
101 subcc %o3, 0x40, %o3
102 add %o0, 0x40, %o0
103 bne,pt %icc, 1b
104 add %o4, %g3, %o4
105
1062: and %o1, 0x3c, %o3
107 brz,pn %o3, 2f
108 sub %o1, %o3, %o1
1091: lduw [%o0 + 0x00], %o5
110 subcc %o3, 0x4, %o3
111 add %o0, 0x4, %o0
112 bne,pt %icc, 1b
113 add %o4, %o5, %o4
114
1152:
116 /* fold 64-->32 */
117 srlx %o4, 32, %o5
118 srl %o4, 0, %o4
119 add %o4, %o5, %o4
120 srlx %o4, 32, %o5
121 srl %o4, 0, %o4
122 add %o4, %o5, %o4
123
124 /* fold 32-->16 */
125 sethi %hi(0xffff0000), %g1
126 srl %o4, 16, %o5
127 andn %o4, %g1, %g2
128 add %o5, %g2, %o4
129 srl %o4, 16, %o5
130 andn %o4, %g1, %g2
131 add %o5, %g2, %o4
132
133csum_partial_end_cruft:
134 /* %o4 has the 16-bit sum we have calculated so-far. */
135 cmp %o1, 2
136 blu,pt %icc, 1f
137 nop
138 lduh [%o0 + 0x00], %o5
139 sub %o1, 2, %o1
140 add %o0, 2, %o0
141 add %o4, %o5, %o4
1421: brz,pt %o1, 1f
143 nop
144 ldub [%o0 + 0x00], %o5
145 sub %o1, 1, %o1
146 add %o0, 1, %o0
147 sllx %o5, 8, %o5
148 add %o4, %o5, %o4
1491:
150 /* fold 32-->16 */
151 sethi %hi(0xffff0000), %g1
152 srl %o4, 16, %o5
153 andn %o4, %g1, %g2
154 add %o5, %g2, %o4
155 srl %o4, 16, %o5
156 andn %o4, %g1, %g2
157 add %o5, %g2, %o4
158
1591: brz,pt %g7, 1f
160 nop
161
162 /* We started with an odd byte, byte-swap the result. */
163 srl %o4, 8, %o5
164 and %o4, 0xff, %g1
165 sll %g1, 8, %g1
166 or %o5, %g1, %o4
167
1681: add %o2, %o4, %o2
169
170csum_partial_finish:
171 retl
172 mov %o2, %o0
diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S
new file mode 100644
index 000000000000..b59884ef051d
--- /dev/null
+++ b/arch/sparc64/lib/clear_page.S
@@ -0,0 +1,105 @@
1/* clear_page.S: UltraSparc optimized clear page.
2 *
3 * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <asm/visasm.h>
8#include <asm/thread_info.h>
9#include <asm/page.h>
10#include <asm/pgtable.h>
11#include <asm/spitfire.h>
12
13 /* What we used to do was lock a TLB entry into a specific
14 * TLB slot, clear the page with interrupts disabled, then
15 * restore the original TLB entry. This was great for
16 * disturbing the TLB as little as possible, but it meant
17 * we had to keep interrupts disabled for a long time.
18 *
19 * Now, we simply use the normal TLB loading mechanism,
20 * and this makes the cpu choose a slot all by itself.
21 * Then we do a normal TLB flush on exit. We need only
22 * disable preemption during the clear.
23 */
24
25#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
26#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
27
28 .text
29
30 .globl _clear_page
31_clear_page: /* %o0=dest */
32 ba,pt %xcc, clear_page_common
33 clr %o4
34
35 /* This thing is pretty important, it shows up
36 * on the profiles via do_anonymous_page().
37 */
38 .align 32
39 .globl clear_user_page
40clear_user_page: /* %o0=dest, %o1=vaddr */
41 lduw [%g6 + TI_PRE_COUNT], %o2
42 sethi %uhi(PAGE_OFFSET), %g2
43 sethi %hi(PAGE_SIZE), %o4
44
45 sllx %g2, 32, %g2
46 sethi %uhi(TTE_BITS_TOP), %g3
47
48 sllx %g3, 32, %g3
49 sub %o0, %g2, %g1 ! paddr
50
51 or %g3, TTE_BITS_BOTTOM, %g3
52 and %o1, %o4, %o0 ! vaddr D-cache alias bit
53
54 or %g1, %g3, %g1 ! TTE data
55 sethi %hi(TLBTEMP_BASE), %o3
56
57 add %o2, 1, %o4
58 add %o0, %o3, %o0 ! TTE vaddr
59
60 /* Disable preemption. */
61 mov TLB_TAG_ACCESS, %g3
62 stw %o4, [%g6 + TI_PRE_COUNT]
63
64 /* Load TLB entry. */
65 rdpr %pstate, %o4
66 wrpr %o4, PSTATE_IE, %pstate
67 stxa %o0, [%g3] ASI_DMMU
68 stxa %g1, [%g0] ASI_DTLB_DATA_IN
69 flush %g6
70 wrpr %o4, 0x0, %pstate
71
72 mov 1, %o4
73
74clear_page_common:
75 VISEntryHalf
76 membar #StoreLoad | #StoreStore | #LoadStore
77 fzero %f0
78 sethi %hi(PAGE_SIZE/64), %o1
79 mov %o0, %g1 ! remember vaddr for tlbflush
80 fzero %f2
81 or %o1, %lo(PAGE_SIZE/64), %o1
82 faddd %f0, %f2, %f4
83 fmuld %f0, %f2, %f6
84 faddd %f0, %f2, %f8
85 fmuld %f0, %f2, %f10
86
87 faddd %f0, %f2, %f12
88 fmuld %f0, %f2, %f14
891: stda %f0, [%o0 + %g0] ASI_BLK_P
90 subcc %o1, 1, %o1
91 bne,pt %icc, 1b
92 add %o0, 0x40, %o0
93 membar #Sync
94 VISExitHalf
95
96 brz,pn %o4, out
97 nop
98
99 stxa %g0, [%g1] ASI_DMMU_DEMAP
100 membar #Sync
101 stw %o2, [%g6 + TI_PRE_COUNT]
102
103out: retl
104 nop
105
diff --git a/arch/sparc64/lib/copy_in_user.S b/arch/sparc64/lib/copy_in_user.S
new file mode 100644
index 000000000000..816076c0bc06
--- /dev/null
+++ b/arch/sparc64/lib/copy_in_user.S
@@ -0,0 +1,119 @@
1/* copy_in_user.S: Copy from userspace to userspace.
2 *
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 */
5
6#include <asm/asi.h>
7
8#define XCC xcc
9
10#define EX(x,y) \
1198: x,y; \
12 .section .fixup; \
13 .align 4; \
1499: retl; \
15 mov 1, %o0; \
16 .section __ex_table; \
17 .align 4; \
18 .word 98b, 99b; \
19 .text; \
20 .align 4;
21
22 .register %g2,#scratch
23 .register %g3,#scratch
24
25 .text
26 .align 32
27
28 /* Don't try to get too fancy here, just nice and
29 * simple. This is predominantly used for well aligned
30 * small copies in the compat layer. It is also used
31 * to copy register windows around during thread cloning.
32 */
33
34 .globl ___copy_in_user
35 .type ___copy_in_user,#function
36___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
37 /* Writing to %asi is _expensive_ so we hardcode it.
38 * Reading %asi to check for KERNEL_DS is comparatively
39 * cheap.
40 */
41 rd %asi, %g1
42 cmp %g1, ASI_AIUS
43 bne,pn %icc, memcpy_user_stub
44 nop
45
46 cmp %o2, 0
47 be,pn %XCC, 85f
48 or %o0, %o1, %o3
49 cmp %o2, 16
50 bleu,a,pn %XCC, 80f
51 or %o3, %o2, %o3
52
53 /* 16 < len <= 64 */
54 andcc %o3, 0x7, %g0
55 bne,pn %XCC, 90f
56 sub %o0, %o1, %o3
57
58 andn %o2, 0x7, %o4
59 and %o2, 0x7, %o2
601: subcc %o4, 0x8, %o4
61 EX(ldxa [%o1] %asi, %o5)
62 EX(stxa %o5, [%o1 + %o3] ASI_AIUS)
63 bgu,pt %XCC, 1b
64 add %o1, 0x8, %o1
65 andcc %o2, 0x4, %g0
66 be,pt %XCC, 1f
67 nop
68 sub %o2, 0x4, %o2
69 EX(lduwa [%o1] %asi, %o5)
70 EX(stwa %o5, [%o1 + %o3] ASI_AIUS)
71 add %o1, 0x4, %o1
721: cmp %o2, 0
73 be,pt %XCC, 85f
74 nop
75 ba,pt %xcc, 90f
76 nop
77
7880: /* 0 < len <= 16 */
79 andcc %o3, 0x3, %g0
80 bne,pn %XCC, 90f
81 sub %o0, %o1, %o3
82
8382:
84 subcc %o2, 4, %o2
85 EX(lduwa [%o1] %asi, %g1)
86 EX(stwa %g1, [%o1 + %o3] ASI_AIUS)
87 bgu,pt %XCC, 82b
88 add %o1, 4, %o1
89
9085: retl
91 clr %o0
92
93 .align 32
9490:
95 subcc %o2, 1, %o2
96 EX(lduba [%o1] %asi, %g1)
97 EX(stba %g1, [%o1 + %o3] ASI_AIUS)
98 bgu,pt %XCC, 90b
99 add %o1, 1, %o1
100 retl
101 clr %o0
102
103 .size ___copy_in_user, .-___copy_in_user
104
105 /* Act like copy_{to,in}_user(), ie. return zero instead
106 * of original destination pointer. This is invoked when
107 * copy_{to,in}_user() finds that %asi is kernel space.
108 */
109 .globl memcpy_user_stub
110 .type memcpy_user_stub,#function
111memcpy_user_stub:
112 save %sp, -192, %sp
113 mov %i0, %o0
114 mov %i1, %o1
115 call memcpy
116 mov %i2, %o2
117 ret
118 restore %g0, %g0, %o0
119 .size memcpy_user_stub, .-memcpy_user_stub
diff --git a/arch/sparc64/lib/copy_page.S b/arch/sparc64/lib/copy_page.S
new file mode 100644
index 000000000000..23ebf2c970b7
--- /dev/null
+++ b/arch/sparc64/lib/copy_page.S
@@ -0,0 +1,242 @@
1/* clear_page.S: UltraSparc optimized copy page.
2 *
3 * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <asm/visasm.h>
8#include <asm/thread_info.h>
9#include <asm/page.h>
10#include <asm/pgtable.h>
11#include <asm/spitfire.h>
12#include <asm/head.h>
13
14 /* What we used to do was lock a TLB entry into a specific
15 * TLB slot, clear the page with interrupts disabled, then
16 * restore the original TLB entry. This was great for
17 * disturbing the TLB as little as possible, but it meant
18 * we had to keep interrupts disabled for a long time.
19 *
20 * Now, we simply use the normal TLB loading mechanism,
21 * and this makes the cpu choose a slot all by itself.
22 * Then we do a normal TLB flush on exit. We need only
23 * disable preemption during the clear.
24 */
25
26#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
27#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
28#define DCACHE_SIZE (PAGE_SIZE * 2)
29
30#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
31#define PAGE_SIZE_REM 0x80
32#elif (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
33#define PAGE_SIZE_REM 0x100
34#else
35#error Wrong PAGE_SHIFT specified
36#endif
37
38#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \
39 fmovd %reg0, %f48; fmovd %reg1, %f50; \
40 fmovd %reg2, %f52; fmovd %reg3, %f54; \
41 fmovd %reg4, %f56; fmovd %reg5, %f58; \
42 fmovd %reg6, %f60; fmovd %reg7, %f62;
43
44 .text
45
46 .align 32
47 .globl copy_user_page
48 .type copy_user_page,#function
49copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
50 lduw [%g6 + TI_PRE_COUNT], %o4
51 sethi %uhi(PAGE_OFFSET), %g2
52 sethi %hi(PAGE_SIZE), %o3
53
54 sllx %g2, 32, %g2
55 sethi %uhi(TTE_BITS_TOP), %g3
56
57 sllx %g3, 32, %g3
58 sub %o0, %g2, %g1 ! dest paddr
59
60 sub %o1, %g2, %g2 ! src paddr
61 or %g3, TTE_BITS_BOTTOM, %g3
62
63 and %o2, %o3, %o0 ! vaddr D-cache alias bit
64 or %g1, %g3, %g1 ! dest TTE data
65
66 or %g2, %g3, %g2 ! src TTE data
67 sethi %hi(TLBTEMP_BASE), %o3
68
69 sethi %hi(DCACHE_SIZE), %o1
70 add %o0, %o3, %o0 ! dest TTE vaddr
71
72 add %o4, 1, %o2
73 add %o0, %o1, %o1 ! src TTE vaddr
74
75 /* Disable preemption. */
76 mov TLB_TAG_ACCESS, %g3
77 stw %o2, [%g6 + TI_PRE_COUNT]
78
79 /* Load TLB entries. */
80 rdpr %pstate, %o2
81 wrpr %o2, PSTATE_IE, %pstate
82 stxa %o0, [%g3] ASI_DMMU
83 stxa %g1, [%g0] ASI_DTLB_DATA_IN
84 membar #Sync
85 stxa %o1, [%g3] ASI_DMMU
86 stxa %g2, [%g0] ASI_DTLB_DATA_IN
87 membar #Sync
88 wrpr %o2, 0x0, %pstate
89
90 BRANCH_IF_ANY_CHEETAH(g3,o2,1f)
91 ba,pt %xcc, 9f
92 nop
93
941:
95 VISEntryHalf
96 membar #StoreLoad | #StoreStore | #LoadStore
97 sethi %hi((PAGE_SIZE/64)-2), %o2
98 mov %o0, %g1
99 prefetch [%o1 + 0x000], #one_read
100 or %o2, %lo((PAGE_SIZE/64)-2), %o2
101 prefetch [%o1 + 0x040], #one_read
102 prefetch [%o1 + 0x080], #one_read
103 prefetch [%o1 + 0x0c0], #one_read
104 ldd [%o1 + 0x000], %f0
105 prefetch [%o1 + 0x100], #one_read
106 ldd [%o1 + 0x008], %f2
107 prefetch [%o1 + 0x140], #one_read
108 ldd [%o1 + 0x010], %f4
109 prefetch [%o1 + 0x180], #one_read
110 fmovd %f0, %f16
111 ldd [%o1 + 0x018], %f6
112 fmovd %f2, %f18
113 ldd [%o1 + 0x020], %f8
114 fmovd %f4, %f20
115 ldd [%o1 + 0x028], %f10
116 fmovd %f6, %f22
117 ldd [%o1 + 0x030], %f12
118 fmovd %f8, %f24
119 ldd [%o1 + 0x038], %f14
120 fmovd %f10, %f26
121 ldd [%o1 + 0x040], %f0
1221: ldd [%o1 + 0x048], %f2
123 fmovd %f12, %f28
124 ldd [%o1 + 0x050], %f4
125 fmovd %f14, %f30
126 stda %f16, [%o0] ASI_BLK_P
127 ldd [%o1 + 0x058], %f6
128 fmovd %f0, %f16
129 ldd [%o1 + 0x060], %f8
130 fmovd %f2, %f18
131 ldd [%o1 + 0x068], %f10
132 fmovd %f4, %f20
133 ldd [%o1 + 0x070], %f12
134 fmovd %f6, %f22
135 ldd [%o1 + 0x078], %f14
136 fmovd %f8, %f24
137 ldd [%o1 + 0x080], %f0
138 prefetch [%o1 + 0x180], #one_read
139 fmovd %f10, %f26
140 subcc %o2, 1, %o2
141 add %o0, 0x40, %o0
142 bne,pt %xcc, 1b
143 add %o1, 0x40, %o1
144
145 ldd [%o1 + 0x048], %f2
146 fmovd %f12, %f28
147 ldd [%o1 + 0x050], %f4
148 fmovd %f14, %f30
149 stda %f16, [%o0] ASI_BLK_P
150 ldd [%o1 + 0x058], %f6
151 fmovd %f0, %f16
152 ldd [%o1 + 0x060], %f8
153 fmovd %f2, %f18
154 ldd [%o1 + 0x068], %f10
155 fmovd %f4, %f20
156 ldd [%o1 + 0x070], %f12
157 fmovd %f6, %f22
158 add %o0, 0x40, %o0
159 ldd [%o1 + 0x078], %f14
160 fmovd %f8, %f24
161 fmovd %f10, %f26
162 fmovd %f12, %f28
163 fmovd %f14, %f30
164 stda %f16, [%o0] ASI_BLK_P
165 membar #Sync
166 VISExitHalf
167 ba,pt %xcc, 5f
168 nop
169
1709:
171 VISEntry
172 ldub [%g6 + TI_FAULT_CODE], %g3
173 mov %o0, %g1
174 cmp %g3, 0
175 rd %asi, %g3
176 be,a,pt %icc, 1f
177 wr %g0, ASI_BLK_P, %asi
178 wr %g0, ASI_BLK_COMMIT_P, %asi
1791: ldda [%o1] ASI_BLK_P, %f0
180 add %o1, 0x40, %o1
181 ldda [%o1] ASI_BLK_P, %f16
182 add %o1, 0x40, %o1
183 sethi %hi(PAGE_SIZE), %o2
1841: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
185 ldda [%o1] ASI_BLK_P, %f32
186 stda %f48, [%o0] %asi
187 add %o1, 0x40, %o1
188 sub %o2, 0x40, %o2
189 add %o0, 0x40, %o0
190 TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
191 ldda [%o1] ASI_BLK_P, %f0
192 stda %f48, [%o0] %asi
193 add %o1, 0x40, %o1
194 sub %o2, 0x40, %o2
195 add %o0, 0x40, %o0
196 TOUCH(f32, f34, f36, f38, f40, f42, f44, f46)
197 ldda [%o1] ASI_BLK_P, %f16
198 stda %f48, [%o0] %asi
199 sub %o2, 0x40, %o2
200 add %o1, 0x40, %o1
201 cmp %o2, PAGE_SIZE_REM
202 bne,pt %xcc, 1b
203 add %o0, 0x40, %o0
204#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
205 TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
206 ldda [%o1] ASI_BLK_P, %f32
207 stda %f48, [%o0] %asi
208 add %o1, 0x40, %o1
209 sub %o2, 0x40, %o2
210 add %o0, 0x40, %o0
211 TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
212 ldda [%o1] ASI_BLK_P, %f0
213 stda %f48, [%o0] %asi
214 add %o1, 0x40, %o1
215 sub %o2, 0x40, %o2
216 add %o0, 0x40, %o0
217 membar #Sync
218 stda %f32, [%o0] %asi
219 add %o0, 0x40, %o0
220 stda %f0, [%o0] %asi
221#else
222 membar #Sync
223 stda %f0, [%o0] %asi
224 add %o0, 0x40, %o0
225 stda %f16, [%o0] %asi
226#endif
227 membar #Sync
228 wr %g3, 0x0, %asi
229 VISExit
230
2315:
232 stxa %g0, [%g1] ASI_DMMU_DEMAP
233 membar #Sync
234
235 sethi %hi(DCACHE_SIZE), %g2
236 stxa %g0, [%g1 + %g2] ASI_DMMU_DEMAP
237 membar #Sync
238
239 retl
240 stw %o4, [%g6 + TI_PRE_COUNT]
241
242 .size copy_user_page, .-copy_user_page
diff --git a/arch/sparc64/lib/csum_copy.S b/arch/sparc64/lib/csum_copy.S
new file mode 100644
index 000000000000..71af48839064
--- /dev/null
+++ b/arch/sparc64/lib/csum_copy.S
@@ -0,0 +1,308 @@
1/* csum_copy.S: Checksum+copy code for sparc64
2 *
3 * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
4 */
5
6#ifdef __KERNEL__
7#define GLOBAL_SPARE %g7
8#else
9#define GLOBAL_SPARE %g5
10#endif
11
12#ifndef EX_LD
13#define EX_LD(x) x
14#endif
15
16#ifndef EX_ST
17#define EX_ST(x) x
18#endif
19
20#ifndef EX_RETVAL
21#define EX_RETVAL(x) x
22#endif
23
24#ifndef LOAD
25#define LOAD(type,addr,dest) type [addr], dest
26#endif
27
28#ifndef STORE
29#define STORE(type,src,addr) type src, [addr]
30#endif
31
32#ifndef FUNC_NAME
33#define FUNC_NAME csum_partial_copy_nocheck
34#endif
35
36 .register %g2, #scratch
37 .register %g3, #scratch
38
39 .text
40
4190:
42 /* We checked for zero length already, so there must be
43 * at least one byte.
44 */
45 be,pt %icc, 1f
46 nop
47 EX_LD(LOAD(ldub, %o0 + 0x00, %o4))
48 add %o0, 1, %o0
49 sub %o2, 1, %o2
50 EX_ST(STORE(stb, %o4, %o1 + 0x00))
51 add %o1, 1, %o1
521: andcc %o0, 0x2, %g0
53 be,pn %icc, 80f
54 cmp %o2, 2
55 blu,pn %icc, 60f
56 nop
57 EX_LD(LOAD(lduh, %o0 + 0x00, %o5))
58 add %o0, 2, %o0
59 sub %o2, 2, %o2
60 EX_ST(STORE(sth, %o5, %o1 + 0x00))
61 add %o1, 2, %o1
62 ba,pt %xcc, 80f
63 add %o5, %o4, %o4
64
65 .globl FUNC_NAME
66FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
67 LOAD(prefetch, %o0 + 0x000, #n_reads)
68 xor %o0, %o1, %g1
69 clr %o4
70 andcc %g1, 0x3, %g0
71 bne,pn %icc, 95f
72 LOAD(prefetch, %o0 + 0x040, #n_reads)
73
74 brz,pn %o2, 70f
75 andcc %o0, 0x3, %g0
76
77 /* We "remember" whether the lowest bit in the address
78 * was set in GLOBAL_SPARE. Because if it is, we have to swap
79 * upper and lower 8 bit fields of the sum we calculate.
80 */
81 bne,pn %icc, 90b
82 andcc %o0, 0x1, GLOBAL_SPARE
83
8480:
85 LOAD(prefetch, %o0 + 0x080, #n_reads)
86 andncc %o2, 0x3f, %g3
87
88 LOAD(prefetch, %o0 + 0x0c0, #n_reads)
89 sub %o2, %g3, %o2
90 brz,pn %g3, 2f
91 LOAD(prefetch, %o0 + 0x100, #n_reads)
92
93 /* So that we don't need to use the non-pairing
94 * add-with-carry instructions we accumulate 32-bit
95 * values into a 64-bit register. At the end of the
96 * loop we fold it down to 32-bits and so on.
97 */
98 ba,pt %xcc, 1f
99 LOAD(prefetch, %o0 + 0x140, #n_reads)
100
101 .align 32
1021: EX_LD(LOAD(lduw, %o0 + 0x00, %o5))
103 EX_LD(LOAD(lduw, %o0 + 0x04, %g1))
104 EX_LD(LOAD(lduw, %o0 + 0x08, %g2))
105 add %o4, %o5, %o4
106 EX_ST(STORE(stw, %o5, %o1 + 0x00))
107 EX_LD(LOAD(lduw, %o0 + 0x0c, %o5))
108 add %o4, %g1, %o4
109 EX_ST(STORE(stw, %g1, %o1 + 0x04))
110 EX_LD(LOAD(lduw, %o0 + 0x10, %g1))
111 add %o4, %g2, %o4
112 EX_ST(STORE(stw, %g2, %o1 + 0x08))
113 EX_LD(LOAD(lduw, %o0 + 0x14, %g2))
114 add %o4, %o5, %o4
115 EX_ST(STORE(stw, %o5, %o1 + 0x0c))
116 EX_LD(LOAD(lduw, %o0 + 0x18, %o5))
117 add %o4, %g1, %o4
118 EX_ST(STORE(stw, %g1, %o1 + 0x10))
119 EX_LD(LOAD(lduw, %o0 + 0x1c, %g1))
120 add %o4, %g2, %o4
121 EX_ST(STORE(stw, %g2, %o1 + 0x14))
122 EX_LD(LOAD(lduw, %o0 + 0x20, %g2))
123 add %o4, %o5, %o4
124 EX_ST(STORE(stw, %o5, %o1 + 0x18))
125 EX_LD(LOAD(lduw, %o0 + 0x24, %o5))
126 add %o4, %g1, %o4
127 EX_ST(STORE(stw, %g1, %o1 + 0x1c))
128 EX_LD(LOAD(lduw, %o0 + 0x28, %g1))
129 add %o4, %g2, %o4
130 EX_ST(STORE(stw, %g2, %o1 + 0x20))
131 EX_LD(LOAD(lduw, %o0 + 0x2c, %g2))
132 add %o4, %o5, %o4
133 EX_ST(STORE(stw, %o5, %o1 + 0x24))
134 EX_LD(LOAD(lduw, %o0 + 0x30, %o5))
135 add %o4, %g1, %o4
136 EX_ST(STORE(stw, %g1, %o1 + 0x28))
137 EX_LD(LOAD(lduw, %o0 + 0x34, %g1))
138 add %o4, %g2, %o4
139 EX_ST(STORE(stw, %g2, %o1 + 0x2c))
140 EX_LD(LOAD(lduw, %o0 + 0x38, %g2))
141 add %o4, %o5, %o4
142 EX_ST(STORE(stw, %o5, %o1 + 0x30))
143 EX_LD(LOAD(lduw, %o0 + 0x3c, %o5))
144 add %o4, %g1, %o4
145 EX_ST(STORE(stw, %g1, %o1 + 0x34))
146 LOAD(prefetch, %o0 + 0x180, #n_reads)
147 add %o4, %g2, %o4
148 EX_ST(STORE(stw, %g2, %o1 + 0x38))
149 subcc %g3, 0x40, %g3
150 add %o0, 0x40, %o0
151 add %o4, %o5, %o4
152 EX_ST(STORE(stw, %o5, %o1 + 0x3c))
153 bne,pt %icc, 1b
154 add %o1, 0x40, %o1
155
1562: and %o2, 0x3c, %g3
157 brz,pn %g3, 2f
158 sub %o2, %g3, %o2
1591: EX_LD(LOAD(lduw, %o0 + 0x00, %o5))
160 subcc %g3, 0x4, %g3
161 add %o0, 0x4, %o0
162 add %o4, %o5, %o4
163 EX_ST(STORE(stw, %o5, %o1 + 0x00))
164 bne,pt %icc, 1b
165 add %o1, 0x4, %o1
166
1672:
168 /* fold 64-->32 */
169 srlx %o4, 32, %o5
170 srl %o4, 0, %o4
171 add %o4, %o5, %o4
172 srlx %o4, 32, %o5
173 srl %o4, 0, %o4
174 add %o4, %o5, %o4
175
176 /* fold 32-->16 */
177 sethi %hi(0xffff0000), %g1
178 srl %o4, 16, %o5
179 andn %o4, %g1, %g2
180 add %o5, %g2, %o4
181 srl %o4, 16, %o5
182 andn %o4, %g1, %g2
183 add %o5, %g2, %o4
184
18560:
186 /* %o4 has the 16-bit sum we have calculated so-far. */
187 cmp %o2, 2
188 blu,pt %icc, 1f
189 nop
190 EX_LD(LOAD(lduh, %o0 + 0x00, %o5))
191 sub %o2, 2, %o2
192 add %o0, 2, %o0
193 add %o4, %o5, %o4
194 EX_ST(STORE(sth, %o5, %o1 + 0x00))
195 add %o1, 0x2, %o1
1961: brz,pt %o2, 1f
197 nop
198 EX_LD(LOAD(ldub, %o0 + 0x00, %o5))
199 sub %o2, 1, %o2
200 add %o0, 1, %o0
201 EX_ST(STORE(stb, %o5, %o1 + 0x00))
202 sllx %o5, 8, %o5
203 add %o1, 1, %o1
204 add %o4, %o5, %o4
2051:
206 /* fold 32-->16 */
207 sethi %hi(0xffff0000), %g1
208 srl %o4, 16, %o5
209 andn %o4, %g1, %g2
210 add %o5, %g2, %o4
211 srl %o4, 16, %o5
212 andn %o4, %g1, %g2
213 add %o5, %g2, %o4
214
2151: brz,pt GLOBAL_SPARE, 1f
216 nop
217
218 /* We started with an odd byte, byte-swap the result. */
219 srl %o4, 8, %o5
220 and %o4, 0xff, %g1
221 sll %g1, 8, %g1
222 or %o5, %g1, %o4
223
2241: add %o3, %o4, %o3
225
22670:
227 retl
228 mov %o3, %o0
229
23095: mov 0, GLOBAL_SPARE
231 brlez,pn %o2, 4f
232 andcc %o0, 1, %o5
233 be,a,pt %icc, 1f
234 srl %o2, 1, %g1
235 sub %o2, 1, %o2
236 EX_LD(LOAD(ldub, %o0, GLOBAL_SPARE))
237 add %o0, 1, %o0
238 EX_ST(STORE(stb, GLOBAL_SPARE, %o1))
239 srl %o2, 1, %g1
240 add %o1, 1, %o1
2411: brz,a,pn %g1, 3f
242 andcc %o2, 1, %g0
243 andcc %o0, 2, %g0
244 be,a,pt %icc, 1f
245 srl %g1, 1, %g1
246 EX_LD(LOAD(lduh, %o0, %o4))
247 sub %o2, 2, %o2
248 srl %o4, 8, %g2
249 sub %g1, 1, %g1
250 EX_ST(STORE(stb, %g2, %o1))
251 add %o4, GLOBAL_SPARE, GLOBAL_SPARE
252 EX_ST(STORE(stb, %o4, %o1 + 1))
253 add %o0, 2, %o0
254 srl %g1, 1, %g1
255 add %o1, 2, %o1
2561: brz,a,pn %g1, 2f
257 andcc %o2, 2, %g0
258 EX_LD(LOAD(lduw, %o0, %o4))
2595: srl %o4, 24, %g2
260 srl %o4, 16, %g3
261 EX_ST(STORE(stb, %g2, %o1))
262 srl %o4, 8, %g2
263 EX_ST(STORE(stb, %g3, %o1 + 1))
264 add %o0, 4, %o0
265 EX_ST(STORE(stb, %g2, %o1 + 2))
266 addcc %o4, GLOBAL_SPARE, GLOBAL_SPARE
267 EX_ST(STORE(stb, %o4, %o1 + 3))
268 addc GLOBAL_SPARE, %g0, GLOBAL_SPARE
269 add %o1, 4, %o1
270 subcc %g1, 1, %g1
271 bne,a,pt %icc, 5b
272 EX_LD(LOAD(lduw, %o0, %o4))
273 sll GLOBAL_SPARE, 16, %g2
274 srl GLOBAL_SPARE, 16, GLOBAL_SPARE
275 srl %g2, 16, %g2
276 andcc %o2, 2, %g0
277 add %g2, GLOBAL_SPARE, GLOBAL_SPARE
2782: be,a,pt %icc, 3f
279 andcc %o2, 1, %g0
280 EX_LD(LOAD(lduh, %o0, %o4))
281 andcc %o2, 1, %g0
282 srl %o4, 8, %g2
283 add %o0, 2, %o0
284 EX_ST(STORE(stb, %g2, %o1))
285 add GLOBAL_SPARE, %o4, GLOBAL_SPARE
286 EX_ST(STORE(stb, %o4, %o1 + 1))
287 add %o1, 2, %o1
2883: be,a,pt %icc, 1f
289 sll GLOBAL_SPARE, 16, %o4
290 EX_LD(LOAD(ldub, %o0, %g2))
291 sll %g2, 8, %o4
292 EX_ST(STORE(stb, %g2, %o1))
293 add GLOBAL_SPARE, %o4, GLOBAL_SPARE
294 sll GLOBAL_SPARE, 16, %o4
2951: addcc %o4, GLOBAL_SPARE, GLOBAL_SPARE
296 srl GLOBAL_SPARE, 16, %o4
297 addc %g0, %o4, GLOBAL_SPARE
298 brz,pt %o5, 4f
299 srl GLOBAL_SPARE, 8, %o4
300 and GLOBAL_SPARE, 0xff, %g2
301 and %o4, 0xff, %o4
302 sll %g2, 8, %g2
303 or %g2, %o4, GLOBAL_SPARE
3044: addcc %o3, GLOBAL_SPARE, %o3
305 addc %g0, %o3, %o0
306 retl
307 srl %o0, 0, %o0
308 .size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc64/lib/csum_copy_from_user.S b/arch/sparc64/lib/csum_copy_from_user.S
new file mode 100644
index 000000000000..817ebdae39f8
--- /dev/null
+++ b/arch/sparc64/lib/csum_copy_from_user.S
@@ -0,0 +1,21 @@
1/* csum_copy_from_user.S: Checksum+copy from userspace.
2 *
3 * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
4 */
5
6#define EX_LD(x) \
798: x; \
8 .section .fixup; \
9 .align 4; \
1099: retl; \
11 mov -1, %o0; \
12 .section __ex_table; \
13 .align 4; \
14 .word 98b, 99b; \
15 .text; \
16 .align 4;
17
18#define FUNC_NAME __csum_partial_copy_from_user
19#define LOAD(type,addr,dest) type##a [addr] %asi, dest
20
21#include "csum_copy.S"
diff --git a/arch/sparc64/lib/csum_copy_to_user.S b/arch/sparc64/lib/csum_copy_to_user.S
new file mode 100644
index 000000000000..c2f9463ea1e2
--- /dev/null
+++ b/arch/sparc64/lib/csum_copy_to_user.S
@@ -0,0 +1,21 @@
1/* csum_copy_to_user.S: Checksum+copy to userspace.
2 *
3 * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
4 */
5
6#define EX_ST(x) \
798: x; \
8 .section .fixup; \
9 .align 4; \
1099: retl; \
11 mov -1, %o0; \
12 .section __ex_table; \
13 .align 4; \
14 .word 98b, 99b; \
15 .text; \
16 .align 4;
17
18#define FUNC_NAME __csum_partial_copy_to_user
19#define STORE(type,src,addr) type##a src, [addr] %asi
20
21#include "csum_copy.S"
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
new file mode 100644
index 000000000000..c421e0c65325
--- /dev/null
+++ b/arch/sparc64/lib/debuglocks.c
@@ -0,0 +1,376 @@
1/* $Id: debuglocks.c,v 1.9 2001/11/17 00:10:48 davem Exp $
2 * debuglocks.c: Debugging versions of SMP locking primitives.
3 *
4 * Copyright (C) 1998 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/spinlock.h>
11#include <asm/system.h>
12
13#ifdef CONFIG_SMP
14
15#define GET_CALLER(PC) __asm__ __volatile__("mov %%i7, %0" : "=r" (PC))
16
17static inline void show (char *str, spinlock_t *lock, unsigned long caller)
18{
19 int cpu = smp_processor_id();
20
21 printk("%s(%p) CPU#%d stuck at %08x, owner PC(%08x):CPU(%x)\n",
22 str, lock, cpu, (unsigned int) caller,
23 lock->owner_pc, lock->owner_cpu);
24}
25
26static inline void show_read (char *str, rwlock_t *lock, unsigned long caller)
27{
28 int cpu = smp_processor_id();
29
30 printk("%s(%p) CPU#%d stuck at %08x, writer PC(%08x):CPU(%x)\n",
31 str, lock, cpu, (unsigned int) caller,
32 lock->writer_pc, lock->writer_cpu);
33}
34
35static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
36{
37 int cpu = smp_processor_id();
38 int i;
39
40 printk("%s(%p) CPU#%d stuck at %08x\n",
41 str, lock, cpu, (unsigned int) caller);
42 printk("Writer: PC(%08x):CPU(%x)\n",
43 lock->writer_pc, lock->writer_cpu);
44 printk("Readers:");
45 for (i = 0; i < NR_CPUS; i++)
46 if (lock->reader_pc[i])
47 printk(" %d[%08x]", i, lock->reader_pc[i]);
48 printk("\n");
49}
50
51#undef INIT_STUCK
52#define INIT_STUCK 100000000
53
54void _do_spin_lock(spinlock_t *lock, char *str)
55{
56 unsigned long caller, val;
57 int stuck = INIT_STUCK;
58 int cpu = get_cpu();
59 int shown = 0;
60
61 GET_CALLER(caller);
62again:
63 __asm__ __volatile__("ldstub [%1], %0"
64 : "=r" (val)
65 : "r" (&(lock->lock))
66 : "memory");
67 membar("#StoreLoad | #StoreStore");
68 if (val) {
69 while (lock->lock) {
70 if (!--stuck) {
71 if (shown++ <= 2)
72 show(str, lock, caller);
73 stuck = INIT_STUCK;
74 }
75 membar("#LoadLoad");
76 }
77 goto again;
78 }
79 lock->owner_pc = ((unsigned int)caller);
80 lock->owner_cpu = cpu;
81 current->thread.smp_lock_count++;
82 current->thread.smp_lock_pc = ((unsigned int)caller);
83
84 put_cpu();
85}
86
87int _do_spin_trylock(spinlock_t *lock)
88{
89 unsigned long val, caller;
90 int cpu = get_cpu();
91
92 GET_CALLER(caller);
93 __asm__ __volatile__("ldstub [%1], %0"
94 : "=r" (val)
95 : "r" (&(lock->lock))
96 : "memory");
97 membar("#StoreLoad | #StoreStore");
98 if (!val) {
99 lock->owner_pc = ((unsigned int)caller);
100 lock->owner_cpu = cpu;
101 current->thread.smp_lock_count++;
102 current->thread.smp_lock_pc = ((unsigned int)caller);
103 }
104
105 put_cpu();
106
107 return val == 0;
108}
109
110void _do_spin_unlock(spinlock_t *lock)
111{
112 lock->owner_pc = 0;
113 lock->owner_cpu = NO_PROC_ID;
114 membar("#StoreStore | #LoadStore");
115 lock->lock = 0;
116 current->thread.smp_lock_count--;
117}
118
119/* Keep INIT_STUCK the same... */
120
121void _do_read_lock (rwlock_t *rw, char *str)
122{
123 unsigned long caller, val;
124 int stuck = INIT_STUCK;
125 int cpu = get_cpu();
126 int shown = 0;
127
128 GET_CALLER(caller);
129wlock_again:
130 /* Wait for any writer to go away. */
131 while (((long)(rw->lock)) < 0) {
132 if (!--stuck) {
133 if (shown++ <= 2)
134 show_read(str, rw, caller);
135 stuck = INIT_STUCK;
136 }
137 membar("#LoadLoad");
138 }
139 /* Try once to increment the counter. */
140 __asm__ __volatile__(
141" ldx [%0], %%g1\n"
142" brlz,a,pn %%g1, 2f\n"
143" mov 1, %0\n"
144" add %%g1, 1, %%g7\n"
145" casx [%0], %%g1, %%g7\n"
146" sub %%g1, %%g7, %0\n"
147"2:" : "=r" (val)
148 : "0" (&(rw->lock))
149 : "g1", "g7", "memory");
150 membar("#StoreLoad | #StoreStore");
151 if (val)
152 goto wlock_again;
153 rw->reader_pc[cpu] = ((unsigned int)caller);
154 current->thread.smp_lock_count++;
155 current->thread.smp_lock_pc = ((unsigned int)caller);
156
157 put_cpu();
158}
159
160void _do_read_unlock (rwlock_t *rw, char *str)
161{
162 unsigned long caller, val;
163 int stuck = INIT_STUCK;
164 int cpu = get_cpu();
165 int shown = 0;
166
167 GET_CALLER(caller);
168
169 /* Drop our identity _first_. */
170 rw->reader_pc[cpu] = 0;
171 current->thread.smp_lock_count--;
172runlock_again:
173 /* Spin trying to decrement the counter using casx. */
174 __asm__ __volatile__(
175" membar #StoreLoad | #LoadLoad\n"
176" ldx [%0], %%g1\n"
177" sub %%g1, 1, %%g7\n"
178" casx [%0], %%g1, %%g7\n"
179" membar #StoreLoad | #StoreStore\n"
180" sub %%g1, %%g7, %0\n"
181 : "=r" (val)
182 : "0" (&(rw->lock))
183 : "g1", "g7", "memory");
184 if (val) {
185 if (!--stuck) {
186 if (shown++ <= 2)
187 show_read(str, rw, caller);
188 stuck = INIT_STUCK;
189 }
190 goto runlock_again;
191 }
192
193 put_cpu();
194}
195
196void _do_write_lock (rwlock_t *rw, char *str)
197{
198 unsigned long caller, val;
199 int stuck = INIT_STUCK;
200 int cpu = get_cpu();
201 int shown = 0;
202
203 GET_CALLER(caller);
204wlock_again:
205 /* Spin while there is another writer. */
206 while (((long)rw->lock) < 0) {
207 if (!--stuck) {
208 if (shown++ <= 2)
209 show_write(str, rw, caller);
210 stuck = INIT_STUCK;
211 }
212 membar("#LoadLoad");
213 }
214
215 /* Try to acuire the write bit. */
216 __asm__ __volatile__(
217" mov 1, %%g3\n"
218" sllx %%g3, 63, %%g3\n"
219" ldx [%0], %%g1\n"
220" brlz,pn %%g1, 1f\n"
221" or %%g1, %%g3, %%g7\n"
222" casx [%0], %%g1, %%g7\n"
223" membar #StoreLoad | #StoreStore\n"
224" ba,pt %%xcc, 2f\n"
225" sub %%g1, %%g7, %0\n"
226"1: mov 1, %0\n"
227"2:" : "=r" (val)
228 : "0" (&(rw->lock))
229 : "g3", "g1", "g7", "memory");
230 if (val) {
231 /* We couldn't get the write bit. */
232 if (!--stuck) {
233 if (shown++ <= 2)
234 show_write(str, rw, caller);
235 stuck = INIT_STUCK;
236 }
237 goto wlock_again;
238 }
239 if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
240 /* Readers still around, drop the write
241 * lock, spin, and try again.
242 */
243 if (!--stuck) {
244 if (shown++ <= 2)
245 show_write(str, rw, caller);
246 stuck = INIT_STUCK;
247 }
248 __asm__ __volatile__(
249" mov 1, %%g3\n"
250" sllx %%g3, 63, %%g3\n"
251"1: ldx [%0], %%g1\n"
252" andn %%g1, %%g3, %%g7\n"
253" casx [%0], %%g1, %%g7\n"
254" cmp %%g1, %%g7\n"
255" bne,pn %%xcc, 1b\n"
256" membar #StoreLoad | #StoreStore"
257 : /* no outputs */
258 : "r" (&(rw->lock))
259 : "g3", "g1", "g7", "cc", "memory");
260 while(rw->lock != 0) {
261 if (!--stuck) {
262 if (shown++ <= 2)
263 show_write(str, rw, caller);
264 stuck = INIT_STUCK;
265 }
266 membar("#LoadLoad");
267 }
268 goto wlock_again;
269 }
270
271 /* We have it, say who we are. */
272 rw->writer_pc = ((unsigned int)caller);
273 rw->writer_cpu = cpu;
274 current->thread.smp_lock_count++;
275 current->thread.smp_lock_pc = ((unsigned int)caller);
276
277 put_cpu();
278}
279
280void _do_write_unlock(rwlock_t *rw)
281{
282 unsigned long caller, val;
283 int stuck = INIT_STUCK;
284 int shown = 0;
285
286 GET_CALLER(caller);
287
288 /* Drop our identity _first_ */
289 rw->writer_pc = 0;
290 rw->writer_cpu = NO_PROC_ID;
291 current->thread.smp_lock_count--;
292wlock_again:
293 __asm__ __volatile__(
294" membar #StoreLoad | #LoadLoad\n"
295" mov 1, %%g3\n"
296" sllx %%g3, 63, %%g3\n"
297" ldx [%0], %%g1\n"
298" andn %%g1, %%g3, %%g7\n"
299" casx [%0], %%g1, %%g7\n"
300" membar #StoreLoad | #StoreStore\n"
301" sub %%g1, %%g7, %0\n"
302 : "=r" (val)
303 : "0" (&(rw->lock))
304 : "g3", "g1", "g7", "memory");
305 if (val) {
306 if (!--stuck) {
307 if (shown++ <= 2)
308 show_write("write_unlock", rw, caller);
309 stuck = INIT_STUCK;
310 }
311 goto wlock_again;
312 }
313}
314
315int _do_write_trylock (rwlock_t *rw, char *str)
316{
317 unsigned long caller, val;
318 int cpu = get_cpu();
319
320 GET_CALLER(caller);
321
322 /* Try to acuire the write bit. */
323 __asm__ __volatile__(
324" mov 1, %%g3\n"
325" sllx %%g3, 63, %%g3\n"
326" ldx [%0], %%g1\n"
327" brlz,pn %%g1, 1f\n"
328" or %%g1, %%g3, %%g7\n"
329" casx [%0], %%g1, %%g7\n"
330" membar #StoreLoad | #StoreStore\n"
331" ba,pt %%xcc, 2f\n"
332" sub %%g1, %%g7, %0\n"
333"1: mov 1, %0\n"
334"2:" : "=r" (val)
335 : "0" (&(rw->lock))
336 : "g3", "g1", "g7", "memory");
337
338 if (val) {
339 put_cpu();
340 return 0;
341 }
342
343 if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
344 /* Readers still around, drop the write
345 * lock, return failure.
346 */
347 __asm__ __volatile__(
348" mov 1, %%g3\n"
349" sllx %%g3, 63, %%g3\n"
350"1: ldx [%0], %%g1\n"
351" andn %%g1, %%g3, %%g7\n"
352" casx [%0], %%g1, %%g7\n"
353" cmp %%g1, %%g7\n"
354" bne,pn %%xcc, 1b\n"
355" membar #StoreLoad | #StoreStore"
356 : /* no outputs */
357 : "r" (&(rw->lock))
358 : "g3", "g1", "g7", "cc", "memory");
359
360 put_cpu();
361
362 return 0;
363 }
364
365 /* We have it, say who we are. */
366 rw->writer_pc = ((unsigned int)caller);
367 rw->writer_cpu = cpu;
368 current->thread.smp_lock_count++;
369 current->thread.smp_lock_pc = ((unsigned int)caller);
370
371 put_cpu();
372
373 return 1;
374}
375
376#endif /* CONFIG_SMP */
diff --git a/arch/sparc64/lib/dec_and_lock.S b/arch/sparc64/lib/dec_and_lock.S
new file mode 100644
index 000000000000..7e6fdaebedba
--- /dev/null
+++ b/arch/sparc64/lib/dec_and_lock.S
@@ -0,0 +1,78 @@
1/* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $
2 * dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()"
3 * using cas and ldstub instructions.
4 *
5 * Copyright (C) 2000 David S. Miller (davem@redhat.com)
6 */
7#include <linux/config.h>
8#include <asm/thread_info.h>
9
10 .text
11 .align 64
12
13 /* CAS basically works like this:
14 *
15 * void CAS(MEM, REG1, REG2)
16 * {
17 * START_ATOMIC();
18 * if (*(MEM) == REG1) {
19 * TMP = *(MEM);
20 * *(MEM) = REG2;
21 * REG2 = TMP;
22 * } else
23 * REG2 = *(MEM);
24 * END_ATOMIC();
25 * }
26 */
27
28 .globl _atomic_dec_and_lock
29_atomic_dec_and_lock: /* %o0 = counter, %o1 = lock */
30loop1: lduw [%o0], %g2
31 subcc %g2, 1, %g7
32 be,pn %icc, start_to_zero
33 nop
34nzero: cas [%o0], %g2, %g7
35 cmp %g2, %g7
36 bne,pn %icc, loop1
37 mov 0, %g1
38
39out:
40 membar #StoreLoad | #StoreStore
41 retl
42 mov %g1, %o0
43start_to_zero:
44#ifdef CONFIG_PREEMPT
45 ldsw [%g6 + TI_PRE_COUNT], %g3
46 add %g3, 1, %g3
47 stw %g3, [%g6 + TI_PRE_COUNT]
48#endif
49to_zero:
50 ldstub [%o1], %g3
51 brnz,pn %g3, spin_on_lock
52 membar #StoreLoad | #StoreStore
53loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */
54 cmp %g2, %g7
55
56 be,pt %icc, out
57 mov 1, %g1
58 lduw [%o0], %g2
59 subcc %g2, 1, %g7
60 be,pn %icc, loop2
61 nop
62 membar #StoreStore | #LoadStore
63 stb %g0, [%o1]
64#ifdef CONFIG_PREEMPT
65 ldsw [%g6 + TI_PRE_COUNT], %g3
66 sub %g3, 1, %g3
67 stw %g3, [%g6 + TI_PRE_COUNT]
68#endif
69
70 b,pt %xcc, nzero
71 nop
72spin_on_lock:
73 ldub [%o1], %g3
74 brnz,pt %g3, spin_on_lock
75 membar #LoadLoad
76 ba,pt %xcc, to_zero
77 nop
78 nop
diff --git a/arch/sparc64/lib/delay.c b/arch/sparc64/lib/delay.c
new file mode 100644
index 000000000000..f6b4c784d53e
--- /dev/null
+++ b/arch/sparc64/lib/delay.c
@@ -0,0 +1,49 @@
1/* delay.c: Delay loops for sparc64
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 *
5 * Based heavily upon x86 variant which is:
6 * Copyright (C) 1993 Linus Torvalds
7 * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
8 */
9
10#include <linux/delay.h>
11
12void __delay(unsigned long loops)
13{
14 __asm__ __volatile__(
15" b,pt %%xcc, 1f\n"
16" cmp %0, 0\n"
17" .align 32\n"
18"1:\n"
19" bne,pt %%xcc, 1b\n"
20" subcc %0, 1, %0\n"
21 : "=&r" (loops)
22 : "0" (loops)
23 : "cc");
24}
25
26/* We used to multiply by HZ after shifting down by 32 bits
27 * but that runs into problems for higher values of HZ and
28 * slow cpus.
29 */
30void __const_udelay(unsigned long n)
31{
32 n *= 4;
33
34 n *= (cpu_data(_smp_processor_id()).udelay_val * (HZ/4));
35 n >>= 32;
36
37 __delay(n + 1);
38}
39
40void __udelay(unsigned long n)
41{
42 __const_udelay(n * 0x10c7UL);
43}
44
45
46void __ndelay(unsigned long n)
47{
48 __const_udelay(n * 0x5UL);
49}
diff --git a/arch/sparc64/lib/find_bit.c b/arch/sparc64/lib/find_bit.c
new file mode 100644
index 000000000000..6059557067b4
--- /dev/null
+++ b/arch/sparc64/lib/find_bit.c
@@ -0,0 +1,127 @@
1#include <linux/bitops.h>
2
3/**
4 * find_next_bit - find the next set bit in a memory region
5 * @addr: The address to base the search on
6 * @offset: The bitnumber to start searching at
7 * @size: The maximum size to search
8 */
9unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
10 unsigned long offset)
11{
12 const unsigned long *p = addr + (offset >> 6);
13 unsigned long result = offset & ~63UL;
14 unsigned long tmp;
15
16 if (offset >= size)
17 return size;
18 size -= result;
19 offset &= 63UL;
20 if (offset) {
21 tmp = *(p++);
22 tmp &= (~0UL << offset);
23 if (size < 64)
24 goto found_first;
25 if (tmp)
26 goto found_middle;
27 size -= 64;
28 result += 64;
29 }
30 while (size & ~63UL) {
31 if ((tmp = *(p++)))
32 goto found_middle;
33 result += 64;
34 size -= 64;
35 }
36 if (!size)
37 return result;
38 tmp = *p;
39
40found_first:
41 tmp &= (~0UL >> (64 - size));
42 if (tmp == 0UL) /* Are any bits set? */
43 return result + size; /* Nope. */
44found_middle:
45 return result + __ffs(tmp);
46}
47
48/* find_next_zero_bit() finds the first zero bit in a bit string of length
49 * 'size' bits, starting the search at bit 'offset'. This is largely based
50 * on Linus's ALPHA routines, which are pretty portable BTW.
51 */
52
53unsigned long find_next_zero_bit(const unsigned long *addr,
54 unsigned long size, unsigned long offset)
55{
56 const unsigned long *p = addr + (offset >> 6);
57 unsigned long result = offset & ~63UL;
58 unsigned long tmp;
59
60 if (offset >= size)
61 return size;
62 size -= result;
63 offset &= 63UL;
64 if (offset) {
65 tmp = *(p++);
66 tmp |= ~0UL >> (64-offset);
67 if (size < 64)
68 goto found_first;
69 if (~tmp)
70 goto found_middle;
71 size -= 64;
72 result += 64;
73 }
74 while (size & ~63UL) {
75 if (~(tmp = *(p++)))
76 goto found_middle;
77 result += 64;
78 size -= 64;
79 }
80 if (!size)
81 return result;
82 tmp = *p;
83
84found_first:
85 tmp |= ~0UL << size;
86 if (tmp == ~0UL) /* Are any bits zero? */
87 return result + size; /* Nope. */
88found_middle:
89 return result + ffz(tmp);
90}
91
92unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset)
93{
94 unsigned long *p = addr + (offset >> 6);
95 unsigned long result = offset & ~63UL;
96 unsigned long tmp;
97
98 if (offset >= size)
99 return size;
100 size -= result;
101 offset &= 63UL;
102 if(offset) {
103 tmp = __swab64p(p++);
104 tmp |= (~0UL >> (64-offset));
105 if(size < 64)
106 goto found_first;
107 if(~tmp)
108 goto found_middle;
109 size -= 64;
110 result += 64;
111 }
112 while(size & ~63) {
113 if(~(tmp = __swab64p(p++)))
114 goto found_middle;
115 result += 64;
116 size -= 64;
117 }
118 if(!size)
119 return result;
120 tmp = __swab64p(p);
121found_first:
122 tmp |= (~0UL << size);
123 if (tmp == ~0UL) /* Are any bits zero? */
124 return result + size; /* Nope. */
125found_middle:
126 return result + ffz(tmp);
127}
diff --git a/arch/sparc64/lib/iomap.c b/arch/sparc64/lib/iomap.c
new file mode 100644
index 000000000000..ac556db06973
--- /dev/null
+++ b/arch/sparc64/lib/iomap.c
@@ -0,0 +1,48 @@
1/*
2 * Implement the sparc64 iomap interfaces
3 */
4#include <linux/pci.h>
5#include <linux/module.h>
6#include <asm/io.h>
7
8/* Create a virtual mapping cookie for an IO port range */
9void __iomem *ioport_map(unsigned long port, unsigned int nr)
10{
11 return (void __iomem *) (unsigned long) port;
12}
13
14void ioport_unmap(void __iomem *addr)
15{
16 /* Nothing to do */
17}
18EXPORT_SYMBOL(ioport_map);
19EXPORT_SYMBOL(ioport_unmap);
20
21/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
22void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
23{
24 unsigned long start = pci_resource_start(dev, bar);
25 unsigned long len = pci_resource_len(dev, bar);
26 unsigned long flags = pci_resource_flags(dev, bar);
27
28 if (!len || !start)
29 return NULL;
30 if (maxlen && len > maxlen)
31 len = maxlen;
32 if (flags & IORESOURCE_IO)
33 return ioport_map(start, len);
34 if (flags & IORESOURCE_MEM) {
35 if (flags & IORESOURCE_CACHEABLE)
36 return ioremap(start, len);
37 return ioremap_nocache(start, len);
38 }
39 /* What? */
40 return NULL;
41}
42
43void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
44{
45 /* nothing to do */
46}
47EXPORT_SYMBOL(pci_iomap);
48EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/sparc64/lib/ipcsum.S b/arch/sparc64/lib/ipcsum.S
new file mode 100644
index 000000000000..58ca5b9a8778
--- /dev/null
+++ b/arch/sparc64/lib/ipcsum.S
@@ -0,0 +1,34 @@
1 .text
2 .align 32
3 .globl ip_fast_csum
4 .type ip_fast_csum,#function
5ip_fast_csum: /* %o0 = iph, %o1 = ihl */
6 sub %o1, 4, %g7
7 lduw [%o0 + 0x00], %o2
8 lduw [%o0 + 0x04], %g2
9 lduw [%o0 + 0x08], %g3
10 addcc %g2, %o2, %o2
11 lduw [%o0 + 0x0c], %g2
12 addccc %g3, %o2, %o2
13 lduw [%o0 + 0x10], %g3
14
15 addccc %g2, %o2, %o2
16 addc %o2, %g0, %o2
171: addcc %g3, %o2, %o2
18 add %o0, 4, %o0
19 addccc %o2, %g0, %o2
20 subcc %g7, 1, %g7
21 be,a,pt %icc, 2f
22 sll %o2, 16, %g2
23
24 lduw [%o0 + 0x10], %g3
25 ba,pt %xcc, 1b
26 nop
272: addcc %o2, %g2, %g2
28 srl %g2, 16, %o2
29 addc %o2, %g0, %o2
30 xnor %g0, %o2, %o2
31 set 0xffff, %o1
32 retl
33 and %o2, %o1, %o0
34 .size ip_fast_csum, .-ip_fast_csum
diff --git a/arch/sparc64/lib/mcount.S b/arch/sparc64/lib/mcount.S
new file mode 100644
index 000000000000..2ef2e268bdcf
--- /dev/null
+++ b/arch/sparc64/lib/mcount.S
@@ -0,0 +1,61 @@
1/*
2 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com)
3 *
4 * This file implements mcount(), which is used to collect profiling data.
5 * This can also be tweaked for kernel stack overflow detection.
6 */
7
8#include <linux/config.h>
9#include <linux/linkage.h>
10
11#include <asm/ptrace.h>
12#include <asm/thread_info.h>
13
14/*
15 * This is the main variant and is called by C code. GCC's -pg option
16 * automatically instruments every C function with a call to this.
17 */
18
19#ifdef CONFIG_STACK_DEBUG
20
21#define OVSTACKSIZE 4096 /* lets hope this is enough */
22
23 .data
24 .align 8
25panicstring:
26 .asciz "Stack overflow\n"
27 .align 8
28ovstack:
29 .skip OVSTACKSIZE
30#endif
31 .text
32 .align 32
33 .globl mcount, _mcount
34mcount:
35_mcount:
36#ifdef CONFIG_STACK_DEBUG
37 /*
38 * Check whether %sp is dangerously low.
39 */
40 ldub [%g6 + TI_FPDEPTH], %g1
41 srl %g1, 1, %g3
42 add %g3, 1, %g3
43 sllx %g3, 8, %g3 ! each fpregs frame is 256b
44 add %g3, 192, %g3
45 add %g6, %g3, %g3 ! where does task_struct+frame end?
46 sub %g3, STACK_BIAS, %g3
47 cmp %sp, %g3
48 bg,pt %xcc, 1f
49 sethi %hi(panicstring), %g3
50 sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
51 or %g7, %lo(ovstack), %g7
52 add %g7, OVSTACKSIZE, %g7
53 sub %g7, STACK_BIAS, %g7
54 mov %g7, %sp
55 call prom_printf
56 or %g3, %lo(panicstring), %o0
57 call prom_halt
58 nop
59#endif
601: retl
61 nop
diff --git a/arch/sparc64/lib/memcmp.S b/arch/sparc64/lib/memcmp.S
new file mode 100644
index 000000000000..c90ad96c51b9
--- /dev/null
+++ b/arch/sparc64/lib/memcmp.S
@@ -0,0 +1,28 @@
1/* $Id: memcmp.S,v 1.3 2000/03/23 07:51:08 davem Exp $
2 * Sparc64 optimized memcmp code.
3 *
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 2000 David S. Miller (davem@redhat.com)
6 */
7
8 .text
9 .align 32
10 .globl __memcmp, memcmp
11__memcmp:
12memcmp:
13 cmp %o2, 0 ! IEU1 Group
14loop: be,pn %icc, ret_0 ! CTI
15 nop ! IEU0
16 ldub [%o0], %g7 ! LSU Group
17 ldub [%o1], %g3 ! LSU Group
18 sub %o2, 1, %o2 ! IEU0
19 add %o0, 1, %o0 ! IEU1
20 add %o1, 1, %o1 ! IEU0 Group
21 subcc %g7, %g3, %g3 ! IEU1 Group
22 be,pt %icc, loop ! CTI
23 cmp %o2, 0 ! IEU1 Group
24
25ret_n0: retl
26 mov %g3, %o0
27ret_0: retl
28 mov 0, %o0
diff --git a/arch/sparc64/lib/memmove.S b/arch/sparc64/lib/memmove.S
new file mode 100644
index 000000000000..97395802c23c
--- /dev/null
+++ b/arch/sparc64/lib/memmove.S
@@ -0,0 +1,31 @@
1/* memmove.S: Simple memmove implementation.
2 *
3 * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
5 */
6
7 .text
8 .align 32
9 .globl memmove
10 .type memmove,#function
11memmove: /* o0=dst o1=src o2=len */
12 mov %o0, %g1
13 cmp %o0, %o1
14 bleu,pt %xcc, memcpy
15 add %o1, %o2, %g7
16 cmp %g7, %o0
17 bleu,pt %xcc, memcpy
18 add %o0, %o2, %o5
19 sub %g7, 1, %o1
20
21 sub %o5, 1, %o0
221: ldub [%o1], %g7
23 subcc %o2, 1, %o2
24 sub %o1, 1, %o1
25 stb %g7, [%o0]
26 bne,pt %icc, 1b
27 sub %o0, 1, %o0
28
29 retl
30 mov %g1, %o0
31 .size memmove, .-memmove
diff --git a/arch/sparc64/lib/memscan.S b/arch/sparc64/lib/memscan.S
new file mode 100644
index 000000000000..5e72d4911417
--- /dev/null
+++ b/arch/sparc64/lib/memscan.S
@@ -0,0 +1,129 @@
1/* $Id: memscan.S,v 1.3 2000/01/31 04:59:10 davem Exp $
2 * memscan.S: Optimized memscan for Sparc64.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
5 * Copyright (C) 1998 David S. Miller (davem@redhat.com)
6 */
7
8#define HI_MAGIC 0x8080808080808080
9#define LO_MAGIC 0x0101010101010101
10#define ASI_PL 0x88
11
12 .text
13 .align 32
14 .globl __memscan_zero, __memscan_generic
15 .globl memscan
16
17__memscan_zero:
18 /* %o0 = bufp, %o1 = size */
19 brlez,pn %o1, szzero
20 andcc %o0, 7, %g0
21 be,pt %icc, we_are_aligned
22 sethi %hi(HI_MAGIC), %o4
23 ldub [%o0], %o5
241: subcc %o1, 1, %o1
25 brz,pn %o5, 10f
26 add %o0, 1, %o0
27
28 be,pn %xcc, szzero
29 andcc %o0, 7, %g0
30 bne,a,pn %icc, 1b
31 ldub [%o0], %o5
32we_are_aligned:
33 ldxa [%o0] ASI_PL, %o5
34 or %o4, %lo(HI_MAGIC), %o3
35 sllx %o3, 32, %o4
36 or %o4, %o3, %o3
37
38 srlx %o3, 7, %o2
39msloop:
40 sub %o1, 8, %o1
41 add %o0, 8, %o0
42 sub %o5, %o2, %o4
43 xor %o4, %o5, %o4
44 andcc %o4, %o3, %g3
45 bne,pn %xcc, check_bytes
46 srlx %o4, 32, %g3
47
48 brgz,a,pt %o1, msloop
49 ldxa [%o0] ASI_PL, %o5
50check_bytes:
51 bne,a,pn %icc, 2f
52 andcc %o5, 0xff, %g0
53 add %o0, -5, %g2
54 ba,pt %xcc, 3f
55 srlx %o5, 32, %g7
56
572: srlx %o5, 8, %g7
58 be,pn %icc, 1f
59 add %o0, -8, %g2
60 andcc %g7, 0xff, %g0
61 srlx %g7, 8, %g7
62 be,pn %icc, 1f
63 inc %g2
64 andcc %g7, 0xff, %g0
65
66 srlx %g7, 8, %g7
67 be,pn %icc, 1f
68 inc %g2
69 andcc %g7, 0xff, %g0
70 srlx %g7, 8, %g7
71 be,pn %icc, 1f
72 inc %g2
73 andcc %g3, %o3, %g0
74
75 be,a,pn %icc, 2f
76 mov %o0, %g2
773: andcc %g7, 0xff, %g0
78 srlx %g7, 8, %g7
79 be,pn %icc, 1f
80 inc %g2
81 andcc %g7, 0xff, %g0
82 srlx %g7, 8, %g7
83
84 be,pn %icc, 1f
85 inc %g2
86 andcc %g7, 0xff, %g0
87 srlx %g7, 8, %g7
88 be,pn %icc, 1f
89 inc %g2
90 andcc %g7, 0xff, %g0
91 srlx %g7, 8, %g7
92
93 be,pn %icc, 1f
94 inc %g2
952: brgz,a,pt %o1, msloop
96 ldxa [%o0] ASI_PL, %o5
97 inc %g2
981: add %o0, %o1, %o0
99 cmp %g2, %o0
100 retl
101
102 movle %xcc, %g2, %o0
10310: retl
104 sub %o0, 1, %o0
105szzero: retl
106 nop
107
108memscan:
109__memscan_generic:
110 /* %o0 = addr, %o1 = c, %o2 = size */
111 brz,pn %o2, 3f
112 add %o0, %o2, %o3
113 ldub [%o0], %o5
114 sub %g0, %o2, %o4
1151:
116 cmp %o5, %o1
117 be,pn %icc, 2f
118 addcc %o4, 1, %o4
119 bne,a,pt %xcc, 1b
120 ldub [%o3 + %o4], %o5
121 retl
122 /* The delay slot is the same as the next insn, this is just to make it look more awful */
1232:
124 add %o3, %o4, %o0
125 retl
126 sub %o0, 1, %o0
1273:
128 retl
129 nop
diff --git a/arch/sparc64/lib/rwsem.S b/arch/sparc64/lib/rwsem.S
new file mode 100644
index 000000000000..174ff7b9164c
--- /dev/null
+++ b/arch/sparc64/lib/rwsem.S
@@ -0,0 +1,165 @@
1/* rwsem.S: RW semaphore assembler.
2 *
3 * Written by David S. Miller (davem@redhat.com), 2001.
4 * Derived from asm-i386/rwsem.h
5 */
6
7#include <asm/rwsem-const.h>
8
9 .section .sched.text
10
11 .globl __down_read
12__down_read:
131: lduw [%o0], %g1
14 add %g1, 1, %g7
15 cas [%o0], %g1, %g7
16 cmp %g1, %g7
17 bne,pn %icc, 1b
18 add %g7, 1, %g7
19 cmp %g7, 0
20 bl,pn %icc, 3f
21 membar #StoreLoad | #StoreStore
222:
23 retl
24 nop
253:
26 save %sp, -192, %sp
27 call rwsem_down_read_failed
28 mov %i0, %o0
29 ret
30 restore
31 .size __down_read, .-__down_read
32
33 .globl __down_read_trylock
34__down_read_trylock:
351: lduw [%o0], %g1
36 add %g1, 1, %g7
37 cmp %g7, 0
38 bl,pn %icc, 2f
39 mov 0, %o1
40 cas [%o0], %g1, %g7
41 cmp %g1, %g7
42 bne,pn %icc, 1b
43 mov 1, %o1
44 membar #StoreLoad | #StoreStore
452: retl
46 mov %o1, %o0
47 .size __down_read_trylock, .-__down_read_trylock
48
49 .globl __down_write
50__down_write:
51 sethi %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
52 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
531:
54 lduw [%o0], %g3
55 add %g3, %g1, %g7
56 cas [%o0], %g3, %g7
57 cmp %g3, %g7
58 bne,pn %icc, 1b
59 cmp %g7, 0
60 bne,pn %icc, 3f
61 membar #StoreLoad | #StoreStore
622: retl
63 nop
643:
65 save %sp, -192, %sp
66 call rwsem_down_write_failed
67 mov %i0, %o0
68 ret
69 restore
70 .size __down_write, .-__down_write
71
72 .globl __down_write_trylock
73__down_write_trylock:
74 sethi %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
75 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
761:
77 lduw [%o0], %g3
78 cmp %g3, 0
79 bne,pn %icc, 2f
80 mov 0, %o1
81 add %g3, %g1, %g7
82 cas [%o0], %g3, %g7
83 cmp %g3, %g7
84 bne,pn %icc, 1b
85 mov 1, %o1
86 membar #StoreLoad | #StoreStore
872: retl
88 mov %o1, %o0
89 .size __down_write_trylock, .-__down_write_trylock
90
91 .globl __up_read
92__up_read:
931:
94 lduw [%o0], %g1
95 sub %g1, 1, %g7
96 cas [%o0], %g1, %g7
97 cmp %g1, %g7
98 bne,pn %icc, 1b
99 cmp %g7, 0
100 bl,pn %icc, 3f
101 membar #StoreLoad | #StoreStore
1022: retl
103 nop
1043: sethi %hi(RWSEM_ACTIVE_MASK), %g1
105 sub %g7, 1, %g7
106 or %g1, %lo(RWSEM_ACTIVE_MASK), %g1
107 andcc %g7, %g1, %g0
108 bne,pn %icc, 2b
109 nop
110 save %sp, -192, %sp
111 call rwsem_wake
112 mov %i0, %o0
113 ret
114 restore
115 .size __up_read, .-__up_read
116
117 .globl __up_write
118__up_write:
119 sethi %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
120 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
1211:
122 lduw [%o0], %g3
123 sub %g3, %g1, %g7
124 cas [%o0], %g3, %g7
125 cmp %g3, %g7
126 bne,pn %icc, 1b
127 sub %g7, %g1, %g7
128 cmp %g7, 0
129 bl,pn %icc, 3f
130 membar #StoreLoad | #StoreStore
1312:
132 retl
133 nop
1343:
135 save %sp, -192, %sp
136 call rwsem_wake
137 mov %i0, %o0
138 ret
139 restore
140 .size __up_write, .-__up_write
141
142 .globl __downgrade_write
143__downgrade_write:
144 sethi %hi(RWSEM_WAITING_BIAS), %g1
145 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
1461:
147 lduw [%o0], %g3
148 sub %g3, %g1, %g7
149 cas [%o0], %g3, %g7
150 cmp %g3, %g7
151 bne,pn %icc, 1b
152 sub %g7, %g1, %g7
153 cmp %g7, 0
154 bl,pn %icc, 3f
155 membar #StoreLoad | #StoreStore
1562:
157 retl
158 nop
1593:
160 save %sp, -192, %sp
161 call rwsem_downgrade_wake
162 mov %i0, %o0
163 ret
164 restore
165 .size __downgrade_write, .-__downgrade_write
diff --git a/arch/sparc64/lib/strlen.S b/arch/sparc64/lib/strlen.S
new file mode 100644
index 000000000000..e9ba1920d818
--- /dev/null
+++ b/arch/sparc64/lib/strlen.S
@@ -0,0 +1,80 @@
1/* strlen.S: Sparc64 optimized strlen code
2 * Hand optimized from GNU libc's strlen
3 * Copyright (C) 1991,1996 Free Software Foundation
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#define LO_MAGIC 0x01010101
9#define HI_MAGIC 0x80808080
10
11 .align 32
12 .globl strlen
13 .type strlen,#function
14strlen:
15 mov %o0, %o1
16 andcc %o0, 3, %g0
17 be,pt %icc, 9f
18 sethi %hi(HI_MAGIC), %o4
19 ldub [%o0], %o5
20 brz,pn %o5, 11f
21 add %o0, 1, %o0
22 andcc %o0, 3, %g0
23 be,pn %icc, 4f
24 or %o4, %lo(HI_MAGIC), %o3
25 ldub [%o0], %o5
26 brz,pn %o5, 12f
27 add %o0, 1, %o0
28 andcc %o0, 3, %g0
29 be,pt %icc, 5f
30 sethi %hi(LO_MAGIC), %o4
31 ldub [%o0], %o5
32 brz,pn %o5, 13f
33 add %o0, 1, %o0
34 ba,pt %icc, 8f
35 or %o4, %lo(LO_MAGIC), %o2
369:
37 or %o4, %lo(HI_MAGIC), %o3
384:
39 sethi %hi(LO_MAGIC), %o4
405:
41 or %o4, %lo(LO_MAGIC), %o2
428:
43 ld [%o0], %o5
442:
45 sub %o5, %o2, %o4
46 andcc %o4, %o3, %g0
47 be,pt %icc, 8b
48 add %o0, 4, %o0
49
50 /* Check every byte. */
51 srl %o5, 24, %g7
52 andcc %g7, 0xff, %g0
53 be,pn %icc, 1f
54 add %o0, -4, %o4
55 srl %o5, 16, %g7
56 andcc %g7, 0xff, %g0
57 be,pn %icc, 1f
58 add %o4, 1, %o4
59 srl %o5, 8, %g7
60 andcc %g7, 0xff, %g0
61 be,pn %icc, 1f
62 add %o4, 1, %o4
63 andcc %o5, 0xff, %g0
64 bne,a,pt %icc, 2b
65 ld [%o0], %o5
66 add %o4, 1, %o4
671:
68 retl
69 sub %o4, %o1, %o0
7011:
71 retl
72 mov 0, %o0
7312:
74 retl
75 mov 1, %o0
7613:
77 retl
78 mov 2, %o0
79
80 .size strlen, .-strlen
diff --git a/arch/sparc64/lib/strlen_user.S b/arch/sparc64/lib/strlen_user.S
new file mode 100644
index 000000000000..9ed54ba14fc6
--- /dev/null
+++ b/arch/sparc64/lib/strlen_user.S
@@ -0,0 +1,95 @@
1/* strlen_user.S: Sparc64 optimized strlen_user code
2 *
3 * Return length of string in userspace including terminating 0
4 * or 0 for error
5 *
6 * Copyright (C) 1991,1996 Free Software Foundation
7 * Copyright (C) 1996,1999 David S. Miller (davem@redhat.com)
8 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#include <asm/asi.h>
12
13#define LO_MAGIC 0x01010101
14#define HI_MAGIC 0x80808080
15
16 .align 4
17 .global __strlen_user, __strnlen_user
18__strlen_user:
19 sethi %hi(32768), %o1
20__strnlen_user:
21 mov %o1, %g1
22 mov %o0, %o1
23 andcc %o0, 3, %g0
24 be,pt %icc, 9f
25 sethi %hi(HI_MAGIC), %o4
2610: lduba [%o0] %asi, %o5
27 brz,pn %o5, 21f
28 add %o0, 1, %o0
29 andcc %o0, 3, %g0
30 be,pn %icc, 4f
31 or %o4, %lo(HI_MAGIC), %o3
3211: lduba [%o0] %asi, %o5
33 brz,pn %o5, 22f
34 add %o0, 1, %o0
35 andcc %o0, 3, %g0
36 be,pt %icc, 13f
37 srl %o3, 7, %o2
3812: lduba [%o0] %asi, %o5
39 brz,pn %o5, 23f
40 add %o0, 1, %o0
41 ba,pt %icc, 2f
4215: lda [%o0] %asi, %o5
439: or %o4, %lo(HI_MAGIC), %o3
444: srl %o3, 7, %o2
4513: lda [%o0] %asi, %o5
462: sub %o5, %o2, %o4
47 andcc %o4, %o3, %g0
48 bne,pn %icc, 82f
49 add %o0, 4, %o0
50 sub %o0, %o1, %g2
5181: cmp %g2, %g1
52 blu,pt %icc, 13b
53 mov %o0, %o4
54 ba,a,pt %xcc, 1f
55
56 /* Check every byte. */
5782: srl %o5, 24, %g7
58 andcc %g7, 0xff, %g0
59 be,pn %icc, 1f
60 add %o0, -3, %o4
61 srl %o5, 16, %g7
62 andcc %g7, 0xff, %g0
63 be,pn %icc, 1f
64 add %o4, 1, %o4
65 srl %o5, 8, %g7
66 andcc %g7, 0xff, %g0
67 be,pn %icc, 1f
68 add %o4, 1, %o4
69 andcc %o5, 0xff, %g0
70 bne,pt %icc, 81b
71 sub %o0, %o1, %g2
72 add %o4, 1, %o4
731: retl
74 sub %o4, %o1, %o0
7521: retl
76 mov 1, %o0
7722: retl
78 mov 2, %o0
7923: retl
80 mov 3, %o0
81
82 .section .fixup,#alloc,#execinstr
83 .align 4
8430:
85 retl
86 clr %o0
87
88 .section __ex_table,#alloc
89 .align 4
90
91 .word 10b, 30b
92 .word 11b, 30b
93 .word 12b, 30b
94 .word 15b, 30b
95 .word 13b, 30b
diff --git a/arch/sparc64/lib/strncmp.S b/arch/sparc64/lib/strncmp.S
new file mode 100644
index 000000000000..6f14f53dbabe
--- /dev/null
+++ b/arch/sparc64/lib/strncmp.S
@@ -0,0 +1,32 @@
1/* $Id: strncmp.S,v 1.2 1997/03/11 17:51:44 jj Exp $
2 * Sparc64 optimized strncmp code.
3 *
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
6
7#include <asm/asi.h>
8
9 .text
10 .align 32
11 .globl strncmp
12 .type strncmp,#function
13strncmp:
14 brlez,pn %o2, 3f
15 lduba [%o0] (ASI_PNF), %o3
161:
17 add %o0, 1, %o0
18 ldub [%o1], %o4
19 brz,pn %o3, 2f
20 add %o1, 1, %o1
21 cmp %o3, %o4
22 bne,pn %icc, 2f
23 subcc %o2, 1, %o2
24 bne,a,pt %xcc, 1b
25 ldub [%o0], %o3
262:
27 retl
28 sub %o3, %o4, %o0
293:
30 retl
31 clr %o0
32 .size strncmp, .-strncmp
diff --git a/arch/sparc64/lib/strncpy_from_user.S b/arch/sparc64/lib/strncpy_from_user.S
new file mode 100644
index 000000000000..09cbbaa0ebf4
--- /dev/null
+++ b/arch/sparc64/lib/strncpy_from_user.S
@@ -0,0 +1,139 @@
1/* $Id: strncpy_from_user.S,v 1.6 1999/05/25 16:53:05 jj Exp $
2 * strncpy_from_user.S: Sparc64 strncpy from userspace.
3 *
4 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
5 */
6
7#include <asm/asi.h>
8#include <asm/errno.h>
9
10 .data
11 .align 8
120: .xword 0x0101010101010101
13
14 .text
15 .align 32
16
17 /* Must return:
18 *
19 * -EFAULT for an exception
20 * count if we hit the buffer limit
21 * bytes copied if we hit a null byte
22 * (without the null byte)
23 *
24 * This implementation assumes:
25 * %o1 is 8 aligned => !(%o2 & 7)
26 * %o0 is 8 aligned (if not, it will be slooooow, but will work)
27 *
28 * This is optimized for the common case:
29 * in my stats, 90% of src are 8 aligned (even on sparc32)
30 * and average length is 18 or so.
31 */
32
33 .globl __strncpy_from_user
34 .type __strncpy_from_user,#function
35__strncpy_from_user:
36 /* %o0=dest, %o1=src, %o2=count */
37 andcc %o1, 7, %g0 ! IEU1 Group
38 bne,pn %icc, 30f ! CTI
39 add %o0, %o2, %g3 ! IEU0
4060: ldxa [%o1] %asi, %g1 ! Load Group
41 brlez,pn %o2, 10f ! CTI
42 mov %o0, %o3 ! IEU0
4350: sethi %hi(0b), %o4 ! IEU0 Group
44 ldx [%o4 + %lo(0b)], %o4 ! Load
45 sllx %o4, 7, %o5 ! IEU1 Group
461: sub %g1, %o4, %g2 ! IEU0 Group
47 stx %g1, [%o0] ! Store
48 add %o0, 8, %o0 ! IEU1
49 andcc %g2, %o5, %g0 ! IEU1 Group
50 bne,pn %xcc, 5f ! CTI
51 add %o1, 8, %o1 ! IEU0
52 cmp %o0, %g3 ! IEU1 Group
53 bl,a,pt %xcc, 1b ! CTI
5461: ldxa [%o1] %asi, %g1 ! Load
5510: retl ! CTI Group
56 mov %o2, %o0 ! IEU0
575: srlx %g2, 32, %g7 ! IEU0 Group
58 sethi %hi(0xff00), %o4 ! IEU1
59 andcc %g7, %o5, %g0 ! IEU1 Group
60 be,pn %icc, 2f ! CTI
61 or %o4, %lo(0xff00), %o4 ! IEU0
62 srlx %g1, 48, %g7 ! IEU0 Group
63 andcc %g7, %o4, %g0 ! IEU1 Group
64 be,pn %icc, 50f ! CTI
65 andcc %g7, 0xff, %g0 ! IEU1 Group
66 be,pn %icc, 51f ! CTI
67 srlx %g1, 32, %g7 ! IEU0
68 andcc %g7, %o4, %g0 ! IEU1 Group
69 be,pn %icc, 52f ! CTI
70 andcc %g7, 0xff, %g0 ! IEU1 Group
71 be,pn %icc, 53f ! CTI
722: andcc %g2, %o5, %g0 ! IEU1 Group
73 be,pn %icc, 2f ! CTI
74 srl %g1, 16, %g7 ! IEU0
75 andcc %g7, %o4, %g0 ! IEU1 Group
76 be,pn %icc, 54f ! CTI
77 andcc %g7, 0xff, %g0 ! IEU1 Group
78 be,pn %icc, 55f ! CTI
79 andcc %g1, %o4, %g0 ! IEU1 Group
80 be,pn %icc, 56f ! CTI
81 andcc %g1, 0xff, %g0 ! IEU1 Group
82 be,a,pn %icc, 57f ! CTI
83 sub %o0, %o3, %o0 ! IEU0
842: cmp %o0, %g3 ! IEU1 Group
85 bl,a,pt %xcc, 50b ! CTI
8662: ldxa [%o1] %asi, %g1 ! Load
87 retl ! CTI Group
88 mov %o2, %o0 ! IEU0
8950: sub %o0, %o3, %o0
90 retl
91 sub %o0, 8, %o0
9251: sub %o0, %o3, %o0
93 retl
94 sub %o0, 7, %o0
9552: sub %o0, %o3, %o0
96 retl
97 sub %o0, 6, %o0
9853: sub %o0, %o3, %o0
99 retl
100 sub %o0, 5, %o0
10154: sub %o0, %o3, %o0
102 retl
103 sub %o0, 4, %o0
10455: sub %o0, %o3, %o0
105 retl
106 sub %o0, 3, %o0
10756: sub %o0, %o3, %o0
108 retl
109 sub %o0, 2, %o0
11057: retl
111 sub %o0, 1, %o0
11230: brlez,pn %o2, 3f
113 sub %g0, %o2, %o3
114 add %o0, %o2, %o0
11563: lduba [%o1] %asi, %o4
1161: add %o1, 1, %o1
117 brz,pn %o4, 2f
118 stb %o4, [%o0 + %o3]
119 addcc %o3, 1, %o3
120 bne,pt %xcc, 1b
12164: lduba [%o1] %asi, %o4
1223: retl
123 mov %o2, %o0
1242: retl
125 add %o2, %o3, %o0
126 .size __strncpy_from_user, .-__strncpy_from_user
127
128 .section .fixup,#alloc,#execinstr
129 .align 4
1304: retl
131 mov -EFAULT, %o0
132
133 .section __ex_table,#alloc
134 .align 4
135 .word 60b, 4b
136 .word 61b, 4b
137 .word 62b, 4b
138 .word 63b, 4b
139 .word 64b, 4b
diff --git a/arch/sparc64/lib/user_fixup.c b/arch/sparc64/lib/user_fixup.c
new file mode 100644
index 000000000000..0278e34125db
--- /dev/null
+++ b/arch/sparc64/lib/user_fixup.c
@@ -0,0 +1,71 @@
1/* user_fixup.c: Fix up user copy faults.
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6#include <linux/compiler.h>
7#include <linux/kernel.h>
8#include <linux/string.h>
9#include <linux/errno.h>
10#include <asm/uaccess.h>
11
12/* Calculating the exact fault address when using
13 * block loads and stores can be very complicated.
14 * Instead of trying to be clever and handling all
15 * of the cases, just fix things up simply here.
16 */
17
18unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
19{
20 char *dst = to;
21 const char __user *src = from;
22
23 while (size) {
24 if (__get_user(*dst, src))
25 break;
26 dst++;
27 src++;
28 size--;
29 }
30
31 if (size)
32 memset(dst, 0, size);
33
34 return size;
35}
36
37unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
38{
39 char __user *dst = to;
40 const char *src = from;
41
42 while (size) {
43 if (__put_user(*src, dst))
44 break;
45 dst++;
46 src++;
47 size--;
48 }
49
50 return size;
51}
52
53unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
54{
55 char __user *dst = to;
56 char __user *src = from;
57
58 while (size) {
59 char tmp;
60
61 if (__get_user(tmp, src))
62 break;
63 if (__put_user(tmp, dst))
64 break;
65 dst++;
66 src++;
67 size--;
68 }
69
70 return size;
71}
diff --git a/arch/sparc64/lib/xor.S b/arch/sparc64/lib/xor.S
new file mode 100644
index 000000000000..4cd5d2be1ae1
--- /dev/null
+++ b/arch/sparc64/lib/xor.S
@@ -0,0 +1,354 @@
1/*
2 * arch/sparc64/lib/xor.S
3 *
4 * High speed xor_block operation for RAID4/5 utilizing the
5 * UltraSparc Visual Instruction Set.
6 *
7 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
8 */
9
10#include <asm/visasm.h>
11#include <asm/asi.h>
12#include <asm/dcu.h>
13#include <asm/spitfire.h>
14
15/*
16 * Requirements:
17 * !(((long)dest | (long)sourceN) & (64 - 1)) &&
18 * !(len & 127) && len >= 256
19 */
20 .text
21 .align 32
22 .globl xor_vis_2
23 .type xor_vis_2,#function
24xor_vis_2:
25 rd %fprs, %o5
26 andcc %o5, FPRS_FEF|FPRS_DU, %g0
27 be,pt %icc, 0f
28 sethi %hi(VISenter), %g1
29 jmpl %g1 + %lo(VISenter), %g7
30 add %g7, 8, %g7
310: wr %g0, FPRS_FEF, %fprs
32 rd %asi, %g1
33 wr %g0, ASI_BLK_P, %asi
34 membar #LoadStore|#StoreLoad|#StoreStore
35 sub %o0, 128, %o0
36 ldda [%o1] %asi, %f0
37 ldda [%o2] %asi, %f16
38
392: ldda [%o1 + 64] %asi, %f32
40 fxor %f0, %f16, %f16
41 fxor %f2, %f18, %f18
42 fxor %f4, %f20, %f20
43 fxor %f6, %f22, %f22
44 fxor %f8, %f24, %f24
45 fxor %f10, %f26, %f26
46 fxor %f12, %f28, %f28
47 fxor %f14, %f30, %f30
48 stda %f16, [%o1] %asi
49 ldda [%o2 + 64] %asi, %f48
50 ldda [%o1 + 128] %asi, %f0
51 fxor %f32, %f48, %f48
52 fxor %f34, %f50, %f50
53 add %o1, 128, %o1
54 fxor %f36, %f52, %f52
55 add %o2, 128, %o2
56 fxor %f38, %f54, %f54
57 subcc %o0, 128, %o0
58 fxor %f40, %f56, %f56
59 fxor %f42, %f58, %f58
60 fxor %f44, %f60, %f60
61 fxor %f46, %f62, %f62
62 stda %f48, [%o1 - 64] %asi
63 bne,pt %xcc, 2b
64 ldda [%o2] %asi, %f16
65
66 ldda [%o1 + 64] %asi, %f32
67 fxor %f0, %f16, %f16
68 fxor %f2, %f18, %f18
69 fxor %f4, %f20, %f20
70 fxor %f6, %f22, %f22
71 fxor %f8, %f24, %f24
72 fxor %f10, %f26, %f26
73 fxor %f12, %f28, %f28
74 fxor %f14, %f30, %f30
75 stda %f16, [%o1] %asi
76 ldda [%o2 + 64] %asi, %f48
77 membar #Sync
78 fxor %f32, %f48, %f48
79 fxor %f34, %f50, %f50
80 fxor %f36, %f52, %f52
81 fxor %f38, %f54, %f54
82 fxor %f40, %f56, %f56
83 fxor %f42, %f58, %f58
84 fxor %f44, %f60, %f60
85 fxor %f46, %f62, %f62
86 stda %f48, [%o1 + 64] %asi
87 membar #Sync|#StoreStore|#StoreLoad
88 wr %g1, %g0, %asi
89 retl
90 wr %g0, 0, %fprs
91 .size xor_vis_2, .-xor_vis_2
92
93 .globl xor_vis_3
94 .type xor_vis_3,#function
95xor_vis_3:
96 rd %fprs, %o5
97 andcc %o5, FPRS_FEF|FPRS_DU, %g0
98 be,pt %icc, 0f
99 sethi %hi(VISenter), %g1
100 jmpl %g1 + %lo(VISenter), %g7
101 add %g7, 8, %g7
1020: wr %g0, FPRS_FEF, %fprs
103 rd %asi, %g1
104 wr %g0, ASI_BLK_P, %asi
105 membar #LoadStore|#StoreLoad|#StoreStore
106 sub %o0, 64, %o0
107 ldda [%o1] %asi, %f0
108 ldda [%o2] %asi, %f16
109
1103: ldda [%o3] %asi, %f32
111 fxor %f0, %f16, %f48
112 fxor %f2, %f18, %f50
113 add %o1, 64, %o1
114 fxor %f4, %f20, %f52
115 fxor %f6, %f22, %f54
116 add %o2, 64, %o2
117 fxor %f8, %f24, %f56
118 fxor %f10, %f26, %f58
119 fxor %f12, %f28, %f60
120 fxor %f14, %f30, %f62
121 ldda [%o1] %asi, %f0
122 fxor %f48, %f32, %f48
123 fxor %f50, %f34, %f50
124 fxor %f52, %f36, %f52
125 fxor %f54, %f38, %f54
126 add %o3, 64, %o3
127 fxor %f56, %f40, %f56
128 fxor %f58, %f42, %f58
129 subcc %o0, 64, %o0
130 fxor %f60, %f44, %f60
131 fxor %f62, %f46, %f62
132 stda %f48, [%o1 - 64] %asi
133 bne,pt %xcc, 3b
134 ldda [%o2] %asi, %f16
135
136 ldda [%o3] %asi, %f32
137 fxor %f0, %f16, %f48
138 fxor %f2, %f18, %f50
139 fxor %f4, %f20, %f52
140 fxor %f6, %f22, %f54
141 fxor %f8, %f24, %f56
142 fxor %f10, %f26, %f58
143 fxor %f12, %f28, %f60
144 fxor %f14, %f30, %f62
145 membar #Sync
146 fxor %f48, %f32, %f48
147 fxor %f50, %f34, %f50
148 fxor %f52, %f36, %f52
149 fxor %f54, %f38, %f54
150 fxor %f56, %f40, %f56
151 fxor %f58, %f42, %f58
152 fxor %f60, %f44, %f60
153 fxor %f62, %f46, %f62
154 stda %f48, [%o1] %asi
155 membar #Sync|#StoreStore|#StoreLoad
156 wr %g1, %g0, %asi
157 retl
158 wr %g0, 0, %fprs
159 .size xor_vis_3, .-xor_vis_3
160
161 .globl xor_vis_4
162 .type xor_vis_4,#function
163xor_vis_4:
164 rd %fprs, %o5
165 andcc %o5, FPRS_FEF|FPRS_DU, %g0
166 be,pt %icc, 0f
167 sethi %hi(VISenter), %g1
168 jmpl %g1 + %lo(VISenter), %g7
169 add %g7, 8, %g7
1700: wr %g0, FPRS_FEF, %fprs
171 rd %asi, %g1
172 wr %g0, ASI_BLK_P, %asi
173 membar #LoadStore|#StoreLoad|#StoreStore
174 sub %o0, 64, %o0
175 ldda [%o1] %asi, %f0
176 ldda [%o2] %asi, %f16
177
1784: ldda [%o3] %asi, %f32
179 fxor %f0, %f16, %f16
180 fxor %f2, %f18, %f18
181 add %o1, 64, %o1
182 fxor %f4, %f20, %f20
183 fxor %f6, %f22, %f22
184 add %o2, 64, %o2
185 fxor %f8, %f24, %f24
186 fxor %f10, %f26, %f26
187 fxor %f12, %f28, %f28
188 fxor %f14, %f30, %f30
189 ldda [%o4] %asi, %f48
190 fxor %f16, %f32, %f32
191 fxor %f18, %f34, %f34
192 fxor %f20, %f36, %f36
193 fxor %f22, %f38, %f38
194 add %o3, 64, %o3
195 fxor %f24, %f40, %f40
196 fxor %f26, %f42, %f42
197 fxor %f28, %f44, %f44
198 fxor %f30, %f46, %f46
199 ldda [%o1] %asi, %f0
200 fxor %f32, %f48, %f48
201 fxor %f34, %f50, %f50
202 fxor %f36, %f52, %f52
203 add %o4, 64, %o4
204 fxor %f38, %f54, %f54
205 fxor %f40, %f56, %f56
206 fxor %f42, %f58, %f58
207 subcc %o0, 64, %o0
208 fxor %f44, %f60, %f60
209 fxor %f46, %f62, %f62
210 stda %f48, [%o1 - 64] %asi
211 bne,pt %xcc, 4b
212 ldda [%o2] %asi, %f16
213
214 ldda [%o3] %asi, %f32
215 fxor %f0, %f16, %f16
216 fxor %f2, %f18, %f18
217 fxor %f4, %f20, %f20
218 fxor %f6, %f22, %f22
219 fxor %f8, %f24, %f24
220 fxor %f10, %f26, %f26
221 fxor %f12, %f28, %f28
222 fxor %f14, %f30, %f30
223 ldda [%o4] %asi, %f48
224 fxor %f16, %f32, %f32
225 fxor %f18, %f34, %f34
226 fxor %f20, %f36, %f36
227 fxor %f22, %f38, %f38
228 fxor %f24, %f40, %f40
229 fxor %f26, %f42, %f42
230 fxor %f28, %f44, %f44
231 fxor %f30, %f46, %f46
232 membar #Sync
233 fxor %f32, %f48, %f48
234 fxor %f34, %f50, %f50
235 fxor %f36, %f52, %f52
236 fxor %f38, %f54, %f54
237 fxor %f40, %f56, %f56
238 fxor %f42, %f58, %f58
239 fxor %f44, %f60, %f60
240 fxor %f46, %f62, %f62
241 stda %f48, [%o1] %asi
242 membar #Sync|#StoreStore|#StoreLoad
243 wr %g1, %g0, %asi
244 retl
245 wr %g0, 0, %fprs
246 .size xor_vis_4, .-xor_vis_4
247
248 .globl xor_vis_5
249 .type xor_vis_5,#function
250xor_vis_5:
251 save %sp, -192, %sp
252 rd %fprs, %o5
253 andcc %o5, FPRS_FEF|FPRS_DU, %g0
254 be,pt %icc, 0f
255 sethi %hi(VISenter), %g1
256 jmpl %g1 + %lo(VISenter), %g7
257 add %g7, 8, %g7
2580: wr %g0, FPRS_FEF, %fprs
259 rd %asi, %g1
260 wr %g0, ASI_BLK_P, %asi
261 membar #LoadStore|#StoreLoad|#StoreStore
262 sub %i0, 64, %i0
263 ldda [%i1] %asi, %f0
264 ldda [%i2] %asi, %f16
265
2665: ldda [%i3] %asi, %f32
267 fxor %f0, %f16, %f48
268 fxor %f2, %f18, %f50
269 add %i1, 64, %i1
270 fxor %f4, %f20, %f52
271 fxor %f6, %f22, %f54
272 add %i2, 64, %i2
273 fxor %f8, %f24, %f56
274 fxor %f10, %f26, %f58
275 fxor %f12, %f28, %f60
276 fxor %f14, %f30, %f62
277 ldda [%i4] %asi, %f16
278 fxor %f48, %f32, %f48
279 fxor %f50, %f34, %f50
280 fxor %f52, %f36, %f52
281 fxor %f54, %f38, %f54
282 add %i3, 64, %i3
283 fxor %f56, %f40, %f56
284 fxor %f58, %f42, %f58
285 fxor %f60, %f44, %f60
286 fxor %f62, %f46, %f62
287 ldda [%i5] %asi, %f32
288 fxor %f48, %f16, %f48
289 fxor %f50, %f18, %f50
290 add %i4, 64, %i4
291 fxor %f52, %f20, %f52
292 fxor %f54, %f22, %f54
293 add %i5, 64, %i5
294 fxor %f56, %f24, %f56
295 fxor %f58, %f26, %f58
296 fxor %f60, %f28, %f60
297 fxor %f62, %f30, %f62
298 ldda [%i1] %asi, %f0
299 fxor %f48, %f32, %f48
300 fxor %f50, %f34, %f50
301 fxor %f52, %f36, %f52
302 fxor %f54, %f38, %f54
303 fxor %f56, %f40, %f56
304 fxor %f58, %f42, %f58
305 subcc %i0, 64, %i0
306 fxor %f60, %f44, %f60
307 fxor %f62, %f46, %f62
308 stda %f48, [%i1 - 64] %asi
309 bne,pt %xcc, 5b
310 ldda [%i2] %asi, %f16
311
312 ldda [%i3] %asi, %f32
313 fxor %f0, %f16, %f48
314 fxor %f2, %f18, %f50
315 fxor %f4, %f20, %f52
316 fxor %f6, %f22, %f54
317 fxor %f8, %f24, %f56
318 fxor %f10, %f26, %f58
319 fxor %f12, %f28, %f60
320 fxor %f14, %f30, %f62
321 ldda [%i4] %asi, %f16
322 fxor %f48, %f32, %f48
323 fxor %f50, %f34, %f50
324 fxor %f52, %f36, %f52
325 fxor %f54, %f38, %f54
326 fxor %f56, %f40, %f56
327 fxor %f58, %f42, %f58
328 fxor %f60, %f44, %f60
329 fxor %f62, %f46, %f62
330 ldda [%i5] %asi, %f32
331 fxor %f48, %f16, %f48
332 fxor %f50, %f18, %f50
333 fxor %f52, %f20, %f52
334 fxor %f54, %f22, %f54
335 fxor %f56, %f24, %f56
336 fxor %f58, %f26, %f58
337 fxor %f60, %f28, %f60
338 fxor %f62, %f30, %f62
339 membar #Sync
340 fxor %f48, %f32, %f48
341 fxor %f50, %f34, %f50
342 fxor %f52, %f36, %f52
343 fxor %f54, %f38, %f54
344 fxor %f56, %f40, %f56
345 fxor %f58, %f42, %f58
346 fxor %f60, %f44, %f60
347 fxor %f62, %f46, %f62
348 stda %f48, [%i1] %asi
349 membar #Sync|#StoreStore|#StoreLoad
350 wr %g1, %g0, %asi
351 wr %g0, 0, %fprs
352 ret
353 restore
354 .size xor_vis_5, .-xor_vis_5
diff --git a/arch/sparc64/math-emu/Makefile b/arch/sparc64/math-emu/Makefile
new file mode 100644
index 000000000000..a0b06fd29467
--- /dev/null
+++ b/arch/sparc64/math-emu/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the FPU instruction emulation.
3#
4
5obj-y := math.o
6
7EXTRA_CFLAGS = -I. -Iinclude/math-emu -w
diff --git a/arch/sparc64/math-emu/math.c b/arch/sparc64/math-emu/math.c
new file mode 100644
index 000000000000..2ae05cd7b773
--- /dev/null
+++ b/arch/sparc64/math-emu/math.c
@@ -0,0 +1,493 @@
1/* $Id: math.c,v 1.11 1999/12/20 05:02:25 davem Exp $
2 * arch/sparc64/math-emu/math.c
3 *
4 * Copyright (C) 1997,1999 Jakub Jelinek (jj@ultra.linux.cz)
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 *
7 * Emulation routines originate from soft-fp package, which is part
8 * of glibc and has appropriate copyrights in it.
9 */
10
11#include <linux/types.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14
15#include <asm/fpumacro.h>
16#include <asm/ptrace.h>
17#include <asm/uaccess.h>
18
19#include "sfp-util.h"
20#include <math-emu/soft-fp.h>
21#include <math-emu/single.h>
22#include <math-emu/double.h>
23#include <math-emu/quad.h>
24
25/* QUAD - ftt == 3 */
26#define FMOVQ 0x003
27#define FNEGQ 0x007
28#define FABSQ 0x00b
29#define FSQRTQ 0x02b
30#define FADDQ 0x043
31#define FSUBQ 0x047
32#define FMULQ 0x04b
33#define FDIVQ 0x04f
34#define FDMULQ 0x06e
35#define FQTOX 0x083
36#define FXTOQ 0x08c
37#define FQTOS 0x0c7
38#define FQTOD 0x0cb
39#define FITOQ 0x0cc
40#define FSTOQ 0x0cd
41#define FDTOQ 0x0ce
42#define FQTOI 0x0d3
43/* SUBNORMAL - ftt == 2 */
44#define FSQRTS 0x029
45#define FSQRTD 0x02a
46#define FADDS 0x041
47#define FADDD 0x042
48#define FSUBS 0x045
49#define FSUBD 0x046
50#define FMULS 0x049
51#define FMULD 0x04a
52#define FDIVS 0x04d
53#define FDIVD 0x04e
54#define FSMULD 0x069
55#define FSTOX 0x081
56#define FDTOX 0x082
57#define FDTOS 0x0c6
58#define FSTOD 0x0c9
59#define FSTOI 0x0d1
60#define FDTOI 0x0d2
61#define FXTOS 0x084 /* Only Ultra-III generates this. */
62#define FXTOD 0x088 /* Only Ultra-III generates this. */
63#if 0 /* Optimized inline in sparc64/kernel/entry.S */
64#define FITOS 0x0c4 /* Only Ultra-III generates this. */
65#endif
66#define FITOD 0x0c8 /* Only Ultra-III generates this. */
67/* FPOP2 */
68#define FCMPQ 0x053
69#define FCMPEQ 0x057
70#define FMOVQ0 0x003
71#define FMOVQ1 0x043
72#define FMOVQ2 0x083
73#define FMOVQ3 0x0c3
74#define FMOVQI 0x103
75#define FMOVQX 0x183
76#define FMOVQZ 0x027
77#define FMOVQLE 0x047
78#define FMOVQLZ 0x067
79#define FMOVQNZ 0x0a7
80#define FMOVQGZ 0x0c7
81#define FMOVQGE 0x0e7
82
83#define FSR_TEM_SHIFT 23UL
84#define FSR_TEM_MASK (0x1fUL << FSR_TEM_SHIFT)
85#define FSR_AEXC_SHIFT 5UL
86#define FSR_AEXC_MASK (0x1fUL << FSR_AEXC_SHIFT)
87#define FSR_CEXC_SHIFT 0UL
88#define FSR_CEXC_MASK (0x1fUL << FSR_CEXC_SHIFT)
89
90/* All routines returning an exception to raise should detect
91 * such exceptions _before_ rounding to be consistent with
92 * the behavior of the hardware in the implemented cases
93 * (and thus with the recommendations in the V9 architecture
94 * manual).
95 *
96 * We return 0 if a SIGFPE should be sent, 1 otherwise.
97 */
98static inline int record_exception(struct pt_regs *regs, int eflag)
99{
100 u64 fsr = current_thread_info()->xfsr[0];
101 int would_trap;
102
103 /* Determine if this exception would have generated a trap. */
104 would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL;
105
106 /* If trapping, we only want to signal one bit. */
107 if(would_trap != 0) {
108 eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT);
109 if((eflag & (eflag - 1)) != 0) {
110 if(eflag & FP_EX_INVALID)
111 eflag = FP_EX_INVALID;
112 else if(eflag & FP_EX_OVERFLOW)
113 eflag = FP_EX_OVERFLOW;
114 else if(eflag & FP_EX_UNDERFLOW)
115 eflag = FP_EX_UNDERFLOW;
116 else if(eflag & FP_EX_DIVZERO)
117 eflag = FP_EX_DIVZERO;
118 else if(eflag & FP_EX_INEXACT)
119 eflag = FP_EX_INEXACT;
120 }
121 }
122
123 /* Set CEXC, here is the rule:
124 *
125 * In general all FPU ops will set one and only one
126 * bit in the CEXC field, this is always the case
127 * when the IEEE exception trap is enabled in TEM.
128 */
129 fsr &= ~(FSR_CEXC_MASK);
130 fsr |= ((long)eflag << FSR_CEXC_SHIFT);
131
132 /* Set the AEXC field, rule is:
133 *
134 * If a trap would not be generated, the
135 * CEXC just generated is OR'd into the
136 * existing value of AEXC.
137 */
138 if(would_trap == 0)
139 fsr |= ((long)eflag << FSR_AEXC_SHIFT);
140
141 /* If trapping, indicate fault trap type IEEE. */
142 if(would_trap != 0)
143 fsr |= (1UL << 14);
144
145 current_thread_info()->xfsr[0] = fsr;
146
147 /* If we will not trap, advance the program counter over
148 * the instruction being handled.
149 */
150 if(would_trap == 0) {
151 regs->tpc = regs->tnpc;
152 regs->tnpc += 4;
153 }
154
155 return (would_trap ? 0 : 1);
156}
157
158typedef union {
159 u32 s;
160 u64 d;
161 u64 q[2];
162} *argp;
163
164int do_mathemu(struct pt_regs *regs, struct fpustate *f)
165{
166 unsigned long pc = regs->tpc;
167 unsigned long tstate = regs->tstate;
168 u32 insn = 0;
169 int type = 0;
170 /* ftt tells which ftt it may happen in, r is rd, b is rs2 and a is rs1. The *u arg tells
171 whether the argument should be packed/unpacked (0 - do not unpack/pack, 1 - unpack/pack)
172 non-u args tells the size of the argument (0 - no argument, 1 - single, 2 - double, 3 - quad */
173#define TYPE(ftt, r, ru, b, bu, a, au) type = (au << 2) | (a << 0) | (bu << 5) | (b << 3) | (ru << 8) | (r << 6) | (ftt << 9)
174 int freg;
175 static u64 zero[2] = { 0L, 0L };
176 int flags;
177 FP_DECL_EX;
178 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
179 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
180 FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
181 int IR;
182 long XR, xfsr;
183
184 if (tstate & TSTATE_PRIV)
185 die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
186 if (test_thread_flag(TIF_32BIT))
187 pc = (u32)pc;
188 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
189 if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
190 switch ((insn >> 5) & 0x1ff) {
191 /* QUAD - ftt == 3 */
192 case FMOVQ:
193 case FNEGQ:
194 case FABSQ: TYPE(3,3,0,3,0,0,0); break;
195 case FSQRTQ: TYPE(3,3,1,3,1,0,0); break;
196 case FADDQ:
197 case FSUBQ:
198 case FMULQ:
199 case FDIVQ: TYPE(3,3,1,3,1,3,1); break;
200 case FDMULQ: TYPE(3,3,1,2,1,2,1); break;
201 case FQTOX: TYPE(3,2,0,3,1,0,0); break;
202 case FXTOQ: TYPE(3,3,1,2,0,0,0); break;
203 case FQTOS: TYPE(3,1,1,3,1,0,0); break;
204 case FQTOD: TYPE(3,2,1,3,1,0,0); break;
205 case FITOQ: TYPE(3,3,1,1,0,0,0); break;
206 case FSTOQ: TYPE(3,3,1,1,1,0,0); break;
207 case FDTOQ: TYPE(3,3,1,2,1,0,0); break;
208 case FQTOI: TYPE(3,1,0,3,1,0,0); break;
209 /* SUBNORMAL - ftt == 2 */
210 case FSQRTS: TYPE(2,1,1,1,1,0,0); break;
211 case FSQRTD: TYPE(2,2,1,2,1,0,0); break;
212 case FADDD:
213 case FSUBD:
214 case FMULD:
215 case FDIVD: TYPE(2,2,1,2,1,2,1); break;
216 case FADDS:
217 case FSUBS:
218 case FMULS:
219 case FDIVS: TYPE(2,1,1,1,1,1,1); break;
220 case FSMULD: TYPE(2,2,1,1,1,1,1); break;
221 case FSTOX: TYPE(2,2,0,1,1,0,0); break;
222 case FDTOX: TYPE(2,2,0,2,1,0,0); break;
223 case FDTOS: TYPE(2,1,1,2,1,0,0); break;
224 case FSTOD: TYPE(2,2,1,1,1,0,0); break;
225 case FSTOI: TYPE(2,1,0,1,1,0,0); break;
226 case FDTOI: TYPE(2,1,0,2,1,0,0); break;
227
228 /* Only Ultra-III generates these */
229 case FXTOS: TYPE(2,1,1,2,0,0,0); break;
230 case FXTOD: TYPE(2,2,1,2,0,0,0); break;
231#if 0 /* Optimized inline in sparc64/kernel/entry.S */
232 case FITOS: TYPE(2,1,1,1,0,0,0); break;
233#endif
234 case FITOD: TYPE(2,2,1,1,0,0,0); break;
235 }
236 }
237 else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
238 IR = 2;
239 switch ((insn >> 5) & 0x1ff) {
240 case FCMPQ: TYPE(3,0,0,3,1,3,1); break;
241 case FCMPEQ: TYPE(3,0,0,3,1,3,1); break;
242 /* Now the conditional fmovq support */
243 case FMOVQ0:
244 case FMOVQ1:
245 case FMOVQ2:
246 case FMOVQ3:
247 /* fmovq %fccX, %fY, %fZ */
248 if (!((insn >> 11) & 3))
249 XR = current_thread_info()->xfsr[0] >> 10;
250 else
251 XR = current_thread_info()->xfsr[0] >> (30 + ((insn >> 10) & 0x6));
252 XR &= 3;
253 IR = 0;
254 switch ((insn >> 14) & 0x7) {
255 /* case 0: IR = 0; break; */ /* Never */
256 case 1: if (XR) IR = 1; break; /* Not Equal */
257 case 2: if (XR == 1 || XR == 2) IR = 1; break; /* Less or Greater */
258 case 3: if (XR & 1) IR = 1; break; /* Unordered or Less */
259 case 4: if (XR == 1) IR = 1; break; /* Less */
260 case 5: if (XR & 2) IR = 1; break; /* Unordered or Greater */
261 case 6: if (XR == 2) IR = 1; break; /* Greater */
262 case 7: if (XR == 3) IR = 1; break; /* Unordered */
263 }
264 if ((insn >> 14) & 8)
265 IR ^= 1;
266 break;
267 case FMOVQI:
268 case FMOVQX:
269 /* fmovq %[ix]cc, %fY, %fZ */
270 XR = regs->tstate >> 32;
271 if ((insn >> 5) & 0x80)
272 XR >>= 4;
273 XR &= 0xf;
274 IR = 0;
275 freg = ((XR >> 2) ^ XR) & 2;
276 switch ((insn >> 14) & 0x7) {
277 /* case 0: IR = 0; break; */ /* Never */
278 case 1: if (XR & 4) IR = 1; break; /* Equal */
279 case 2: if ((XR & 4) || freg) IR = 1; break; /* Less or Equal */
280 case 3: if (freg) IR = 1; break; /* Less */
281 case 4: if (XR & 5) IR = 1; break; /* Less or Equal Unsigned */
282 case 5: if (XR & 1) IR = 1; break; /* Carry Set */
283 case 6: if (XR & 8) IR = 1; break; /* Negative */
284 case 7: if (XR & 2) IR = 1; break; /* Overflow Set */
285 }
286 if ((insn >> 14) & 8)
287 IR ^= 1;
288 break;
289 case FMOVQZ:
290 case FMOVQLE:
291 case FMOVQLZ:
292 case FMOVQNZ:
293 case FMOVQGZ:
294 case FMOVQGE:
295 freg = (insn >> 14) & 0x1f;
296 if (!freg)
297 XR = 0;
298 else if (freg < 16)
299 XR = regs->u_regs[freg];
300 else if (test_thread_flag(TIF_32BIT)) {
301 struct reg_window32 __user *win32;
302 flushw_user ();
303 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
304 get_user(XR, &win32->locals[freg - 16]);
305 } else {
306 struct reg_window __user *win;
307 flushw_user ();
308 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
309 get_user(XR, &win->locals[freg - 16]);
310 }
311 IR = 0;
312 switch ((insn >> 10) & 3) {
313 case 1: if (!XR) IR = 1; break; /* Register Zero */
314 case 2: if (XR <= 0) IR = 1; break; /* Register Less Than or Equal to Zero */
315 case 3: if (XR < 0) IR = 1; break; /* Register Less Than Zero */
316 }
317 if ((insn >> 10) & 4)
318 IR ^= 1;
319 break;
320 }
321 if (IR == 0) {
322 /* The fmov test was false. Do a nop instead */
323 current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
324 regs->tpc = regs->tnpc;
325 regs->tnpc += 4;
326 return 1;
327 } else if (IR == 1) {
328 /* Change the instruction into plain fmovq */
329 insn = (insn & 0x3e00001f) | 0x81a00060;
330 TYPE(3,3,0,3,0,0,0);
331 }
332 }
333 }
334 if (type) {
335 argp rs1 = NULL, rs2 = NULL, rd = NULL;
336
337 freg = (current_thread_info()->xfsr[0] >> 14) & 0xf;
338 if (freg != (type >> 9))
339 goto err;
340 current_thread_info()->xfsr[0] &= ~0x1c000;
341 freg = ((insn >> 14) & 0x1f);
342 switch (type & 0x3) {
343 case 3: if (freg & 2) {
344 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
345 goto err;
346 }
347 case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
348 case 1: rs1 = (argp)&f->regs[freg];
349 flags = (freg < 32) ? FPRS_DL : FPRS_DU;
350 if (!(current_thread_info()->fpsaved[0] & flags))
351 rs1 = (argp)&zero;
352 break;
353 }
354 switch (type & 0x7) {
355 case 7: FP_UNPACK_QP (QA, rs1); break;
356 case 6: FP_UNPACK_DP (DA, rs1); break;
357 case 5: FP_UNPACK_SP (SA, rs1); break;
358 }
359 freg = (insn & 0x1f);
360 switch ((type >> 3) & 0x3) {
361 case 3: if (freg & 2) {
362 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
363 goto err;
364 }
365 case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
366 case 1: rs2 = (argp)&f->regs[freg];
367 flags = (freg < 32) ? FPRS_DL : FPRS_DU;
368 if (!(current_thread_info()->fpsaved[0] & flags))
369 rs2 = (argp)&zero;
370 break;
371 }
372 switch ((type >> 3) & 0x7) {
373 case 7: FP_UNPACK_QP (QB, rs2); break;
374 case 6: FP_UNPACK_DP (DB, rs2); break;
375 case 5: FP_UNPACK_SP (SB, rs2); break;
376 }
377 freg = ((insn >> 25) & 0x1f);
378 switch ((type >> 6) & 0x3) {
379 case 3: if (freg & 2) {
380 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
381 goto err;
382 }
383 case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
384 case 1: rd = (argp)&f->regs[freg];
385 flags = (freg < 32) ? FPRS_DL : FPRS_DU;
386 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
387 current_thread_info()->fpsaved[0] = FPRS_FEF;
388 current_thread_info()->gsr[0] = 0;
389 }
390 if (!(current_thread_info()->fpsaved[0] & flags)) {
391 if (freg < 32)
392 memset(f->regs, 0, 32*sizeof(u32));
393 else
394 memset(f->regs+32, 0, 32*sizeof(u32));
395 }
396 current_thread_info()->fpsaved[0] |= flags;
397 break;
398 }
399 switch ((insn >> 5) & 0x1ff) {
400 /* + */
401 case FADDS: FP_ADD_S (SR, SA, SB); break;
402 case FADDD: FP_ADD_D (DR, DA, DB); break;
403 case FADDQ: FP_ADD_Q (QR, QA, QB); break;
404 /* - */
405 case FSUBS: FP_SUB_S (SR, SA, SB); break;
406 case FSUBD: FP_SUB_D (DR, DA, DB); break;
407 case FSUBQ: FP_SUB_Q (QR, QA, QB); break;
408 /* * */
409 case FMULS: FP_MUL_S (SR, SA, SB); break;
410 case FSMULD: FP_CONV (D, S, 1, 1, DA, SA);
411 FP_CONV (D, S, 1, 1, DB, SB);
412 case FMULD: FP_MUL_D (DR, DA, DB); break;
413 case FDMULQ: FP_CONV (Q, D, 2, 1, QA, DA);
414 FP_CONV (Q, D, 2, 1, QB, DB);
415 case FMULQ: FP_MUL_Q (QR, QA, QB); break;
416 /* / */
417 case FDIVS: FP_DIV_S (SR, SA, SB); break;
418 case FDIVD: FP_DIV_D (DR, DA, DB); break;
419 case FDIVQ: FP_DIV_Q (QR, QA, QB); break;
420 /* sqrt */
421 case FSQRTS: FP_SQRT_S (SR, SB); break;
422 case FSQRTD: FP_SQRT_D (DR, DB); break;
423 case FSQRTQ: FP_SQRT_Q (QR, QB); break;
424 /* mov */
425 case FMOVQ: rd->q[0] = rs2->q[0]; rd->q[1] = rs2->q[1]; break;
426 case FABSQ: rd->q[0] = rs2->q[0] & 0x7fffffffffffffffUL; rd->q[1] = rs2->q[1]; break;
427 case FNEGQ: rd->q[0] = rs2->q[0] ^ 0x8000000000000000UL; rd->q[1] = rs2->q[1]; break;
428 /* float to int */
429 case FSTOI: FP_TO_INT_S (IR, SB, 32, 1); break;
430 case FDTOI: FP_TO_INT_D (IR, DB, 32, 1); break;
431 case FQTOI: FP_TO_INT_Q (IR, QB, 32, 1); break;
432 case FSTOX: FP_TO_INT_S (XR, SB, 64, 1); break;
433 case FDTOX: FP_TO_INT_D (XR, DB, 64, 1); break;
434 case FQTOX: FP_TO_INT_Q (XR, QB, 64, 1); break;
435 /* int to float */
436 case FITOQ: IR = rs2->s; FP_FROM_INT_Q (QR, IR, 32, int); break;
437 case FXTOQ: XR = rs2->d; FP_FROM_INT_Q (QR, XR, 64, long); break;
438 /* Only Ultra-III generates these */
439 case FXTOS: XR = rs2->d; FP_FROM_INT_S (SR, XR, 64, long); break;
440 case FXTOD: XR = rs2->d; FP_FROM_INT_D (DR, XR, 64, long); break;
441#if 0 /* Optimized inline in sparc64/kernel/entry.S */
442 case FITOS: IR = rs2->s; FP_FROM_INT_S (SR, IR, 32, int); break;
443#endif
444 case FITOD: IR = rs2->s; FP_FROM_INT_D (DR, IR, 32, int); break;
445 /* float to float */
446 case FSTOD: FP_CONV (D, S, 1, 1, DR, SB); break;
447 case FSTOQ: FP_CONV (Q, S, 2, 1, QR, SB); break;
448 case FDTOQ: FP_CONV (Q, D, 2, 1, QR, DB); break;
449 case FDTOS: FP_CONV (S, D, 1, 1, SR, DB); break;
450 case FQTOS: FP_CONV (S, Q, 1, 2, SR, QB); break;
451 case FQTOD: FP_CONV (D, Q, 1, 2, DR, QB); break;
452 /* comparison */
453 case FCMPQ:
454 case FCMPEQ:
455 FP_CMP_Q(XR, QB, QA, 3);
456 if (XR == 3 &&
457 (((insn >> 5) & 0x1ff) == FCMPEQ ||
458 FP_ISSIGNAN_Q(QA) ||
459 FP_ISSIGNAN_Q(QB)))
460 FP_SET_EXCEPTION (FP_EX_INVALID);
461 }
462 if (!FP_INHIBIT_RESULTS) {
463 switch ((type >> 6) & 0x7) {
464 case 0: xfsr = current_thread_info()->xfsr[0];
465 if (XR == -1) XR = 2;
466 switch (freg & 3) {
467 /* fcc0, 1, 2, 3 */
468 case 0: xfsr &= ~0xc00; xfsr |= (XR << 10); break;
469 case 1: xfsr &= ~0x300000000UL; xfsr |= (XR << 32); break;
470 case 2: xfsr &= ~0xc00000000UL; xfsr |= (XR << 34); break;
471 case 3: xfsr &= ~0x3000000000UL; xfsr |= (XR << 36); break;
472 }
473 current_thread_info()->xfsr[0] = xfsr;
474 break;
475 case 1: rd->s = IR; break;
476 case 2: rd->d = XR; break;
477 case 5: FP_PACK_SP (rd, SR); break;
478 case 6: FP_PACK_DP (rd, DR); break;
479 case 7: FP_PACK_QP (rd, QR); break;
480 }
481 }
482
483 if(_fex != 0)
484 return record_exception(regs, _fex);
485
486 /* Success and no exceptions detected. */
487 current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
488 regs->tpc = regs->tnpc;
489 regs->tnpc += 4;
490 return 1;
491 }
492err: return 0;
493}
diff --git a/arch/sparc64/math-emu/sfp-util.h b/arch/sparc64/math-emu/sfp-util.h
new file mode 100644
index 000000000000..31e474738cf6
--- /dev/null
+++ b/arch/sparc64/math-emu/sfp-util.h
@@ -0,0 +1,120 @@
1/* $Id: sfp-util.h,v 1.5 2001/06/10 06:48:46 davem Exp $
2 * arch/sparc64/math-emu/sfp-util.h
3 *
4 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 *
7 */
8
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/types.h>
12#include <asm/byteorder.h>
13
14#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
15 __asm__ ("addcc %4,%5,%1\n\t" \
16 "add %2,%3,%0\n\t" \
17 "bcs,a,pn %%xcc, 1f\n\t" \
18 "add %0, 1, %0\n" \
19 "1:" \
20 : "=r" ((UDItype)(sh)), \
21 "=&r" ((UDItype)(sl)) \
22 : "r" ((UDItype)(ah)), \
23 "r" ((UDItype)(bh)), \
24 "r" ((UDItype)(al)), \
25 "r" ((UDItype)(bl)) \
26 : "cc")
27
28#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
29 __asm__ ("subcc %4,%5,%1\n\t" \
30 "sub %2,%3,%0\n\t" \
31 "bcs,a,pn %%xcc, 1f\n\t" \
32 "sub %0, 1, %0\n" \
33 "1:" \
34 : "=r" ((UDItype)(sh)), \
35 "=&r" ((UDItype)(sl)) \
36 : "r" ((UDItype)(ah)), \
37 "r" ((UDItype)(bh)), \
38 "r" ((UDItype)(al)), \
39 "r" ((UDItype)(bl)) \
40 : "cc")
41
42#define umul_ppmm(wh, wl, u, v) \
43 do { \
44 UDItype tmp1, tmp2, tmp3, tmp4; \
45 __asm__ __volatile__ ( \
46 "srl %7,0,%3\n\t" \
47 "mulx %3,%6,%1\n\t" \
48 "srlx %6,32,%2\n\t" \
49 "mulx %2,%3,%4\n\t" \
50 "sllx %4,32,%5\n\t" \
51 "srl %6,0,%3\n\t" \
52 "sub %1,%5,%5\n\t" \
53 "srlx %5,32,%5\n\t" \
54 "addcc %4,%5,%4\n\t" \
55 "srlx %7,32,%5\n\t" \
56 "mulx %3,%5,%3\n\t" \
57 "mulx %2,%5,%5\n\t" \
58 "sethi %%hi(0x80000000),%2\n\t" \
59 "addcc %4,%3,%4\n\t" \
60 "srlx %4,32,%4\n\t" \
61 "add %2,%2,%2\n\t" \
62 "movcc %%xcc,%%g0,%2\n\t" \
63 "addcc %5,%4,%5\n\t" \
64 "sllx %3,32,%3\n\t" \
65 "add %1,%3,%1\n\t" \
66 "add %5,%2,%0" \
67 : "=r" ((UDItype)(wh)), \
68 "=&r" ((UDItype)(wl)), \
69 "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
70 : "r" ((UDItype)(u)), \
71 "r" ((UDItype)(v)) \
72 : "cc"); \
73 } while (0)
74
75#define udiv_qrnnd(q, r, n1, n0, d) \
76 do { \
77 UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \
78 __d1 = (d >> 32); \
79 __d0 = (USItype)d; \
80 \
81 __r1 = (n1) % __d1; \
82 __q1 = (n1) / __d1; \
83 __m = (UWtype) __q1 * __d0; \
84 __r1 = (__r1 << 32) | (n0 >> 32); \
85 if (__r1 < __m) \
86 { \
87 __q1--, __r1 += (d); \
88 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */ \
89 if (__r1 < __m) \
90 __q1--, __r1 += (d); \
91 } \
92 __r1 -= __m; \
93 \
94 __r0 = __r1 % __d1; \
95 __q0 = __r1 / __d1; \
96 __m = (UWtype) __q0 * __d0; \
97 __r0 = (__r0 << 32) | ((USItype)n0); \
98 if (__r0 < __m) \
99 { \
100 __q0--, __r0 += (d); \
101 if (__r0 >= (d)) \
102 if (__r0 < __m) \
103 __q0--, __r0 += (d); \
104 } \
105 __r0 -= __m; \
106 \
107 (q) = (UWtype) (__q1 << 32) | __q0; \
108 (r) = __r0; \
109 } while (0)
110
111#define UDIV_NEEDS_NORMALIZATION 1
112
113#define abort() \
114 return 0
115
116#ifdef __BIG_ENDIAN
117#define __BYTE_ORDER __BIG_ENDIAN
118#else
119#define __BYTE_ORDER __LITTLE_ENDIAN
120#endif
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile
new file mode 100644
index 000000000000..cda87333a77b
--- /dev/null
+++ b/arch/sparc64/mm/Makefile
@@ -0,0 +1,10 @@
1# $Id: Makefile,v 1.8 2000/12/14 22:57:25 davem Exp $
2# Makefile for the linux Sparc64-specific parts of the memory manager.
3#
4
5EXTRA_AFLAGS := -ansi
6EXTRA_CFLAGS := -Werror
7
8obj-y := ultra.o tlb.o fault.o init.o generic.o extable.o
9
10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc64/mm/extable.c b/arch/sparc64/mm/extable.c
new file mode 100644
index 000000000000..ec334297ff4f
--- /dev/null
+++ b/arch/sparc64/mm/extable.c
@@ -0,0 +1,80 @@
1/*
2 * linux/arch/sparc64/mm/extable.c
3 */
4
5#include <linux/config.h>
6#include <linux/module.h>
7#include <asm/uaccess.h>
8
9extern const struct exception_table_entry __start___ex_table[];
10extern const struct exception_table_entry __stop___ex_table[];
11
12void sort_extable(struct exception_table_entry *start,
13 struct exception_table_entry *finish)
14{
15}
16
17/* Caller knows they are in a range if ret->fixup == 0 */
18const struct exception_table_entry *
19search_extable(const struct exception_table_entry *start,
20 const struct exception_table_entry *last,
21 unsigned long value)
22{
23 const struct exception_table_entry *walk;
24
25 /* Single insn entries are encoded as:
26 * word 1: insn address
27 * word 2: fixup code address
28 *
29 * Range entries are encoded as:
30 * word 1: first insn address
31 * word 2: 0
32 * word 3: last insn address + 4 bytes
33 * word 4: fixup code address
34 *
35 * See asm/uaccess.h for more details.
36 */
37
38 /* 1. Try to find an exact match. */
39 for (walk = start; walk <= last; walk++) {
40 if (walk->fixup == 0) {
41 /* A range entry, skip both parts. */
42 walk++;
43 continue;
44 }
45
46 if (walk->insn == value)
47 return walk;
48 }
49
50 /* 2. Try to find a range match. */
51 for (walk = start; walk <= (last - 1); walk++) {
52 if (walk->fixup)
53 continue;
54
55 if (walk[0].insn <= value && walk[1].insn > value)
56 return walk;
57
58 walk++;
59 }
60
61 return NULL;
62}
63
64/* Special extable search, which handles ranges. Returns fixup */
65unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
66{
67 const struct exception_table_entry *entry;
68
69 entry = search_exception_tables(addr);
70 if (!entry)
71 return 0;
72
73 /* Inside range? Fix g2 and return correct fixup */
74 if (!entry->fixup) {
75 *g2 = (addr - entry->insn) / 4;
76 return (entry + 1)->fixup;
77 }
78
79 return entry->fixup;
80}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
new file mode 100644
index 000000000000..3ffee7b51aed
--- /dev/null
+++ b/arch/sparc64/mm/fault.c
@@ -0,0 +1,527 @@
1/* $Id: fault.c,v 1.59 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
8#include <asm/head.h>
9
10#include <linux/string.h>
11#include <linux/types.h>
12#include <linux/sched.h>
13#include <linux/ptrace.h>
14#include <linux/mman.h>
15#include <linux/signal.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/smp_lock.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21
22#include <asm/page.h>
23#include <asm/pgtable.h>
24#include <asm/openprom.h>
25#include <asm/oplib.h>
26#include <asm/uaccess.h>
27#include <asm/asi.h>
28#include <asm/lsu.h>
29#include <asm/sections.h>
30#include <asm/kdebug.h>
31
32#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
33
34extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
35
36/*
37 * To debug kernel during syscall entry.
38 */
39void syscall_trace_entry(struct pt_regs *regs)
40{
41 printk("scall entry: %s[%d]/cpu%d: %d\n", current->comm, current->pid, smp_processor_id(), (int) regs->u_regs[UREG_G1]);
42}
43
44/*
45 * To debug kernel during syscall exit.
46 */
47void syscall_trace_exit(struct pt_regs *regs)
48{
49 printk("scall exit: %s[%d]/cpu%d: %d\n", current->comm, current->pid, smp_processor_id(), (int) regs->u_regs[UREG_G1]);
50}
51
52/*
53 * To debug kernel to catch accesses to certain virtual/physical addresses.
54 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
55 * flags = VM_READ watches memread accesses, flags = VM_WRITE watches memwrite accesses.
56 * Caller passes in a 64bit aligned addr, with mask set to the bytes that need to be
57 * watched. This is only useful on a single cpu machine for now. After the watchpoint
58 * is detected, the process causing it will be killed, thus preventing an infinite loop.
59 */
60void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode)
61{
62 unsigned long lsubits;
63
64 __asm__ __volatile__("ldxa [%%g0] %1, %0"
65 : "=r" (lsubits)
66 : "i" (ASI_LSU_CONTROL));
67 lsubits &= ~(LSU_CONTROL_PM | LSU_CONTROL_VM |
68 LSU_CONTROL_PR | LSU_CONTROL_VR |
69 LSU_CONTROL_PW | LSU_CONTROL_VW);
70
71 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
72 "membar #Sync"
73 : /* no outputs */
74 : "r" (addr), "r" (mode ? VIRT_WATCHPOINT : PHYS_WATCHPOINT),
75 "i" (ASI_DMMU));
76
77 lsubits |= ((unsigned long)mask << (mode ? 25 : 33));
78 if (flags & VM_READ)
79 lsubits |= (mode ? LSU_CONTROL_VR : LSU_CONTROL_PR);
80 if (flags & VM_WRITE)
81 lsubits |= (mode ? LSU_CONTROL_VW : LSU_CONTROL_PW);
82 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
83 "membar #Sync"
84 : /* no outputs */
85 : "r" (lsubits), "i" (ASI_LSU_CONTROL)
86 : "memory");
87}
88
89/* Nice, simple, prom library does all the sweating for us. ;) */
90unsigned long __init prom_probe_memory (void)
91{
92 register struct linux_mlist_p1275 *mlist;
93 register unsigned long bytes, base_paddr, tally;
94 register int i;
95
96 i = 0;
97 mlist = *prom_meminfo()->p1275_available;
98 bytes = tally = mlist->num_bytes;
99 base_paddr = mlist->start_adr;
100
101 sp_banks[0].base_addr = base_paddr;
102 sp_banks[0].num_bytes = bytes;
103
104 while (mlist->theres_more != (void *) 0) {
105 i++;
106 mlist = mlist->theres_more;
107 bytes = mlist->num_bytes;
108 tally += bytes;
109 if (i >= SPARC_PHYS_BANKS-1) {
110 printk ("The machine has more banks than "
111 "this kernel can support\n"
112 "Increase the SPARC_PHYS_BANKS "
113 "setting (currently %d)\n",
114 SPARC_PHYS_BANKS);
115 i = SPARC_PHYS_BANKS-1;
116 break;
117 }
118
119 sp_banks[i].base_addr = mlist->start_adr;
120 sp_banks[i].num_bytes = mlist->num_bytes;
121 }
122
123 i++;
124 sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
125 sp_banks[i].num_bytes = 0;
126
127 /* Now mask all bank sizes on a page boundary, it is all we can
128 * use anyways.
129 */
130 for (i = 0; sp_banks[i].num_bytes != 0; i++)
131 sp_banks[i].num_bytes &= PAGE_MASK;
132
133 return tally;
134}
135
136static void unhandled_fault(unsigned long address, struct task_struct *tsk,
137 struct pt_regs *regs)
138{
139 if ((unsigned long) address < PAGE_SIZE) {
140 printk(KERN_ALERT "Unable to handle kernel NULL "
141 "pointer dereference\n");
142 } else {
143 printk(KERN_ALERT "Unable to handle kernel paging request "
144 "at virtual address %016lx\n", (unsigned long)address);
145 }
146 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
147 (tsk->mm ?
148 CTX_HWBITS(tsk->mm->context) :
149 CTX_HWBITS(tsk->active_mm->context)));
150 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
151 (tsk->mm ? (unsigned long) tsk->mm->pgd :
152 (unsigned long) tsk->active_mm->pgd));
153 if (notify_die(DIE_GPF, "general protection fault", regs,
154 0, 0, SIGSEGV) == NOTIFY_STOP)
155 return;
156 die_if_kernel("Oops", regs);
157}
158
159static void bad_kernel_pc(struct pt_regs *regs)
160{
161 unsigned long *ksp;
162
163 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
164 regs->tpc);
165 __asm__("mov %%sp, %0" : "=r" (ksp));
166 show_stack(current, ksp);
167 unhandled_fault(regs->tpc, current, regs);
168}
169
170/*
171 * We now make sure that mmap_sem is held in all paths that call
172 * this. Additionally, to prevent kswapd from ripping ptes from
173 * under us, raise interrupts around the time that we look at the
174 * pte, kswapd will have to wait to get his smp ipi response from
175 * us. This saves us having to get page_table_lock.
176 */
177static unsigned int get_user_insn(unsigned long tpc)
178{
179 pgd_t *pgdp = pgd_offset(current->mm, tpc);
180 pud_t *pudp;
181 pmd_t *pmdp;
182 pte_t *ptep, pte;
183 unsigned long pa;
184 u32 insn = 0;
185 unsigned long pstate;
186
187 if (pgd_none(*pgdp))
188 goto outret;
189 pudp = pud_offset(pgdp, tpc);
190 if (pud_none(*pudp))
191 goto outret;
192 pmdp = pmd_offset(pudp, tpc);
193 if (pmd_none(*pmdp))
194 goto outret;
195
196 /* This disables preemption for us as well. */
197 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
198 __asm__ __volatile__("wrpr %0, %1, %%pstate"
199 : : "r" (pstate), "i" (PSTATE_IE));
200 ptep = pte_offset_map(pmdp, tpc);
201 pte = *ptep;
202 if (!pte_present(pte))
203 goto out;
204
205 pa = (pte_val(pte) & _PAGE_PADDR);
206 pa += (tpc & ~PAGE_MASK);
207
208 /* Use phys bypass so we don't pollute dtlb/dcache. */
209 __asm__ __volatile__("lduwa [%1] %2, %0"
210 : "=r" (insn)
211 : "r" (pa), "i" (ASI_PHYS_USE_EC));
212
213out:
214 pte_unmap(ptep);
215 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
216outret:
217 return insn;
218}
219
220extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int);
221
222static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
223 unsigned int insn, int fault_code)
224{
225 siginfo_t info;
226
227 info.si_code = code;
228 info.si_signo = sig;
229 info.si_errno = 0;
230 if (fault_code & FAULT_CODE_ITLB)
231 info.si_addr = (void __user *) regs->tpc;
232 else
233 info.si_addr = (void __user *)
234 compute_effective_address(regs, insn, 0);
235 info.si_trapno = 0;
236 force_sig_info(sig, &info, current);
237}
238
239extern int handle_ldf_stq(u32, struct pt_regs *);
240extern int handle_ld_nf(u32, struct pt_regs *);
241
242static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
243{
244 if (!insn) {
245 if (!regs->tpc || (regs->tpc & 0x3))
246 return 0;
247 if (regs->tstate & TSTATE_PRIV) {
248 insn = *(unsigned int *) regs->tpc;
249 } else {
250 insn = get_user_insn(regs->tpc);
251 }
252 }
253 return insn;
254}
255
256static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
257 unsigned int insn, unsigned long address)
258{
259 unsigned long g2;
260 unsigned char asi = ASI_P;
261
262 if ((!insn) && (regs->tstate & TSTATE_PRIV))
263 goto cannot_handle;
264
265 /* If user insn could be read (thus insn is zero), that
266 * is fine. We will just gun down the process with a signal
267 * in that case.
268 */
269
270 if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
271 (insn & 0xc0800000) == 0xc0800000) {
272 if (insn & 0x2000)
273 asi = (regs->tstate >> 24);
274 else
275 asi = (insn >> 5);
276 if ((asi & 0xf2) == 0x82) {
277 if (insn & 0x1000000) {
278 handle_ldf_stq(insn, regs);
279 } else {
280 /* This was a non-faulting load. Just clear the
281 * destination register(s) and continue with the next
282 * instruction. -jj
283 */
284 handle_ld_nf(insn, regs);
285 }
286 return;
287 }
288 }
289
290 g2 = regs->u_regs[UREG_G2];
291
292 /* Is this in ex_table? */
293 if (regs->tstate & TSTATE_PRIV) {
294 unsigned long fixup;
295
296 if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) {
297 if (insn & 0x2000)
298 asi = (regs->tstate >> 24);
299 else
300 asi = (insn >> 5);
301 }
302
303 /* Look in asi.h: All _S asis have LS bit set */
304 if ((asi & 0x1) &&
305 (fixup = search_extables_range(regs->tpc, &g2))) {
306 regs->tpc = fixup;
307 regs->tnpc = regs->tpc + 4;
308 regs->u_regs[UREG_G2] = g2;
309 return;
310 }
311 } else {
312 /* The si_code was set to make clear whether
313 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
314 */
315 do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
316 return;
317 }
318
319cannot_handle:
320 unhandled_fault (address, current, regs);
321}
322
323asmlinkage void do_sparc64_fault(struct pt_regs *regs)
324{
325 struct mm_struct *mm = current->mm;
326 struct vm_area_struct *vma;
327 unsigned int insn = 0;
328 int si_code, fault_code;
329 unsigned long address;
330
331 fault_code = get_thread_fault_code();
332
333 if (notify_die(DIE_PAGE_FAULT, "page_fault", regs,
334 fault_code, 0, SIGSEGV) == NOTIFY_STOP)
335 return;
336
337 si_code = SEGV_MAPERR;
338 address = current_thread_info()->fault_address;
339
340 if ((fault_code & FAULT_CODE_ITLB) &&
341 (fault_code & FAULT_CODE_DTLB))
342 BUG();
343
344 if (regs->tstate & TSTATE_PRIV) {
345 unsigned long tpc = regs->tpc;
346
347 /* Sanity check the PC. */
348 if ((tpc >= KERNBASE && tpc < (unsigned long) _etext) ||
349 (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
350 /* Valid, no problems... */
351 } else {
352 bad_kernel_pc(regs);
353 return;
354 }
355 }
356
357 /*
358 * If we're in an interrupt or have no user
359 * context, we must not take the fault..
360 */
361 if (in_atomic() || !mm)
362 goto intr_or_no_mm;
363
364 if (test_thread_flag(TIF_32BIT)) {
365 if (!(regs->tstate & TSTATE_PRIV))
366 regs->tpc &= 0xffffffff;
367 address &= 0xffffffff;
368 }
369
370 if (!down_read_trylock(&mm->mmap_sem)) {
371 if ((regs->tstate & TSTATE_PRIV) &&
372 !search_exception_tables(regs->tpc)) {
373 insn = get_fault_insn(regs, insn);
374 goto handle_kernel_fault;
375 }
376 down_read(&mm->mmap_sem);
377 }
378
379 vma = find_vma(mm, address);
380 if (!vma)
381 goto bad_area;
382
383 /* Pure DTLB misses do not tell us whether the fault causing
384 * load/store/atomic was a write or not, it only says that there
385 * was no match. So in such a case we (carefully) read the
386 * instruction to try and figure this out. It's an optimization
387 * so it's ok if we can't do this.
388 *
389 * Special hack, window spill/fill knows the exact fault type.
390 */
391 if (((fault_code &
392 (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
393 (vma->vm_flags & VM_WRITE) != 0) {
394 insn = get_fault_insn(regs, 0);
395 if (!insn)
396 goto continue_fault;
397 if ((insn & 0xc0200000) == 0xc0200000 &&
398 (insn & 0x1780000) != 0x1680000) {
399 /* Don't bother updating thread struct value,
400 * because update_mmu_cache only cares which tlb
401 * the access came from.
402 */
403 fault_code |= FAULT_CODE_WRITE;
404 }
405 }
406continue_fault:
407
408 if (vma->vm_start <= address)
409 goto good_area;
410 if (!(vma->vm_flags & VM_GROWSDOWN))
411 goto bad_area;
412 if (!(fault_code & FAULT_CODE_WRITE)) {
413 /* Non-faulting loads shouldn't expand stack. */
414 insn = get_fault_insn(regs, insn);
415 if ((insn & 0xc0800000) == 0xc0800000) {
416 unsigned char asi;
417
418 if (insn & 0x2000)
419 asi = (regs->tstate >> 24);
420 else
421 asi = (insn >> 5);
422 if ((asi & 0xf2) == 0x82)
423 goto bad_area;
424 }
425 }
426 if (expand_stack(vma, address))
427 goto bad_area;
428 /*
429 * Ok, we have a good vm_area for this memory access, so
430 * we can handle it..
431 */
432good_area:
433 si_code = SEGV_ACCERR;
434
435 /* If we took a ITLB miss on a non-executable page, catch
436 * that here.
437 */
438 if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
439 BUG_ON(address != regs->tpc);
440 BUG_ON(regs->tstate & TSTATE_PRIV);
441 goto bad_area;
442 }
443
444 if (fault_code & FAULT_CODE_WRITE) {
445 if (!(vma->vm_flags & VM_WRITE))
446 goto bad_area;
447
448 /* Spitfire has an icache which does not snoop
449 * processor stores. Later processors do...
450 */
451 if (tlb_type == spitfire &&
452 (vma->vm_flags & VM_EXEC) != 0 &&
453 vma->vm_file != NULL)
454 set_thread_fault_code(fault_code |
455 FAULT_CODE_BLKCOMMIT);
456 } else {
457 /* Allow reads even for write-only mappings */
458 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
459 goto bad_area;
460 }
461
462 switch (handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE))) {
463 case VM_FAULT_MINOR:
464 current->min_flt++;
465 break;
466 case VM_FAULT_MAJOR:
467 current->maj_flt++;
468 break;
469 case VM_FAULT_SIGBUS:
470 goto do_sigbus;
471 case VM_FAULT_OOM:
472 goto out_of_memory;
473 default:
474 BUG();
475 }
476
477 up_read(&mm->mmap_sem);
478 goto fault_done;
479
480 /*
481 * Something tried to access memory that isn't in our memory map..
482 * Fix it, but check if it's kernel or user first..
483 */
484bad_area:
485 insn = get_fault_insn(regs, insn);
486 up_read(&mm->mmap_sem);
487
488handle_kernel_fault:
489 do_kernel_fault(regs, si_code, fault_code, insn, address);
490
491 goto fault_done;
492
493/*
494 * We ran out of memory, or some other thing happened to us that made
495 * us unable to handle the page fault gracefully.
496 */
497out_of_memory:
498 insn = get_fault_insn(regs, insn);
499 up_read(&mm->mmap_sem);
500 printk("VM: killing process %s\n", current->comm);
501 if (!(regs->tstate & TSTATE_PRIV))
502 do_exit(SIGKILL);
503 goto handle_kernel_fault;
504
505intr_or_no_mm:
506 insn = get_fault_insn(regs, 0);
507 goto handle_kernel_fault;
508
509do_sigbus:
510 insn = get_fault_insn(regs, insn);
511 up_read(&mm->mmap_sem);
512
513 /*
514 * Send a sigbus, regardless of whether we were in kernel
515 * or user mode.
516 */
517 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
518
519 /* Kernel mode? Handle exceptions or die */
520 if (regs->tstate & TSTATE_PRIV)
521 goto handle_kernel_fault;
522
523fault_done:
524 /* These values are no longer needed, clear them. */
525 set_thread_fault_code(0);
526 current_thread_info()->fault_address = 0;
527}
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
new file mode 100644
index 000000000000..6b31f6117a95
--- /dev/null
+++ b/arch/sparc64/mm/generic.c
@@ -0,0 +1,182 @@
1/* $Id: generic.c,v 1.18 2001/12/21 04:56:15 davem Exp $
2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/pagemap.h>
12
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/page.h>
16#include <asm/tlbflush.h>
17
18/* Remap IO memory, the same way as remap_pfn_range(), but use
19 * the obio memory space.
20 *
21 * They use a pgprot that sets PAGE_IO and does not check the
22 * mem_map table as this is independent of normal memory.
23 */
24static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
25 unsigned long address,
26 unsigned long size,
27 unsigned long offset, pgprot_t prot,
28 int space)
29{
30 unsigned long end;
31
32 /* clear hack bit that was used as a write_combine side-effect flag */
33 offset &= ~0x1UL;
34 address &= ~PMD_MASK;
35 end = address + size;
36 if (end > PMD_SIZE)
37 end = PMD_SIZE;
38 do {
39 pte_t entry;
40 unsigned long curend = address + PAGE_SIZE;
41
42 entry = mk_pte_io(offset, prot, space);
43 if (!(address & 0xffff)) {
44 if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
45 entry = mk_pte_io(offset,
46 __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
47 space);
48 curend = address + 0x400000;
49 offset += 0x400000;
50 } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
51 entry = mk_pte_io(offset,
52 __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
53 space);
54 curend = address + 0x80000;
55 offset += 0x80000;
56 } else if (!(offset & 0xfffe) && end >= address + 0x10000) {
57 entry = mk_pte_io(offset,
58 __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
59 space);
60 curend = address + 0x10000;
61 offset += 0x10000;
62 } else
63 offset += PAGE_SIZE;
64 } else
65 offset += PAGE_SIZE;
66
67 do {
68 BUG_ON(!pte_none(*pte));
69 set_pte_at(mm, address, pte, entry);
70 address += PAGE_SIZE;
71 pte++;
72 } while (address < curend);
73 } while (address < end);
74}
75
76static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
77 unsigned long offset, pgprot_t prot, int space)
78{
79 unsigned long end;
80
81 address &= ~PGDIR_MASK;
82 end = address + size;
83 if (end > PGDIR_SIZE)
84 end = PGDIR_SIZE;
85 offset -= address;
86 do {
87 pte_t * pte = pte_alloc_map(mm, pmd, address);
88 if (!pte)
89 return -ENOMEM;
90 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
91 pte_unmap(pte);
92 address = (address + PMD_SIZE) & PMD_MASK;
93 pmd++;
94 } while (address < end);
95 return 0;
96}
97
98static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
99 unsigned long offset, pgprot_t prot, int space)
100{
101 unsigned long end;
102
103 address &= ~PUD_MASK;
104 end = address + size;
105 if (end > PUD_SIZE)
106 end = PUD_SIZE;
107 offset -= address;
108 do {
109 pmd_t *pmd = pmd_alloc(mm, pud, address);
110 if (!pud)
111 return -ENOMEM;
112 io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
113 address = (address + PUD_SIZE) & PUD_MASK;
114 pud++;
115 } while (address < end);
116 return 0;
117}
118
119int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
120{
121 int error = 0;
122 pgd_t * dir;
123 unsigned long beg = from;
124 unsigned long end = from + size;
125 struct mm_struct *mm = vma->vm_mm;
126
127 prot = __pgprot(pg_iobits);
128 offset -= from;
129 dir = pgd_offset(mm, from);
130 flush_cache_range(vma, beg, end);
131
132 spin_lock(&mm->page_table_lock);
133 while (from < end) {
134 pud_t *pud = pud_alloc(mm, dir, from);
135 error = -ENOMEM;
136 if (!pud)
137 break;
138 error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
139 if (error)
140 break;
141 from = (from + PGDIR_SIZE) & PGDIR_MASK;
142 dir++;
143 }
144 flush_tlb_range(vma, beg, end);
145 spin_unlock(&mm->page_table_lock);
146
147 return error;
148}
149
150int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
151 unsigned long pfn, unsigned long size, pgprot_t prot)
152{
153 int error = 0;
154 pgd_t * dir;
155 unsigned long beg = from;
156 unsigned long end = from + size;
157 struct mm_struct *mm = vma->vm_mm;
158 int space = GET_IOSPACE(pfn);
159 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
160
161 prot = __pgprot(pg_iobits);
162 offset -= from;
163 dir = pgd_offset(mm, from);
164 flush_cache_range(vma, beg, end);
165
166 spin_lock(&mm->page_table_lock);
167 while (from < end) {
168 pud_t *pud = pud_alloc(current->mm, dir, from);
169 error = -ENOMEM;
170 if (!pud)
171 break;
172 error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
173 if (error)
174 break;
175 from = (from + PGDIR_SIZE) & PGDIR_MASK;
176 dir++;
177 }
178 flush_tlb_range(vma, beg, end);
179 spin_unlock(&mm->page_table_lock);
180
181 return error;
182}
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
new file mode 100644
index 000000000000..5a1f831b2de1
--- /dev/null
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -0,0 +1,310 @@
1/*
2 * SPARC64 Huge TLB page support.
3 *
4 * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
13#include <linux/pagemap.h>
14#include <linux/smp_lock.h>
15#include <linux/slab.h>
16#include <linux/sysctl.h>
17
18#include <asm/mman.h>
19#include <asm/pgalloc.h>
20#include <asm/tlb.h>
21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h>
23#include <asm/mmu_context.h>
24
25static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
26{
27 pgd_t *pgd;
28 pud_t *pud;
29 pmd_t *pmd;
30 pte_t *pte = NULL;
31
32 pgd = pgd_offset(mm, addr);
33 if (pgd) {
34 pud = pud_offset(pgd, addr);
35 if (pud) {
36 pmd = pmd_alloc(mm, pud, addr);
37 if (pmd)
38 pte = pte_alloc_map(mm, pmd, addr);
39 }
40 }
41 return pte;
42}
43
44static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
45{
46 pgd_t *pgd;
47 pud_t *pud;
48 pmd_t *pmd;
49 pte_t *pte = NULL;
50
51 pgd = pgd_offset(mm, addr);
52 if (pgd) {
53 pud = pud_offset(pgd, addr);
54 if (pud) {
55 pmd = pmd_offset(pud, addr);
56 if (pmd)
57 pte = pte_offset_map(pmd, addr);
58 }
59 }
60 return pte;
61}
62
63#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
64
65static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
66 unsigned long addr,
67 struct page *page, pte_t * page_table, int write_access)
68{
69 unsigned long i;
70 pte_t entry;
71
72 add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
73
74 if (write_access)
75 entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
76 vma->vm_page_prot)));
77 else
78 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
79 entry = pte_mkyoung(entry);
80 mk_pte_huge(entry);
81
82 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
83 set_pte_at(mm, addr, page_table, entry);
84 page_table++;
85 addr += PAGE_SIZE;
86
87 pte_val(entry) += PAGE_SIZE;
88 }
89}
90
91/*
92 * This function checks for proper alignment of input addr and len parameters.
93 */
94int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
95{
96 if (len & ~HPAGE_MASK)
97 return -EINVAL;
98 if (addr & ~HPAGE_MASK)
99 return -EINVAL;
100 return 0;
101}
102
103int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
104 struct vm_area_struct *vma)
105{
106 pte_t *src_pte, *dst_pte, entry;
107 struct page *ptepage;
108 unsigned long addr = vma->vm_start;
109 unsigned long end = vma->vm_end;
110 int i;
111
112 while (addr < end) {
113 dst_pte = huge_pte_alloc(dst, addr);
114 if (!dst_pte)
115 goto nomem;
116 src_pte = huge_pte_offset(src, addr);
117 BUG_ON(!src_pte || pte_none(*src_pte));
118 entry = *src_pte;
119 ptepage = pte_page(entry);
120 get_page(ptepage);
121 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
122 set_pte_at(dst, addr, dst_pte, entry);
123 pte_val(entry) += PAGE_SIZE;
124 dst_pte++;
125 addr += PAGE_SIZE;
126 }
127 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
128 }
129 return 0;
130
131nomem:
132 return -ENOMEM;
133}
134
135int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
136 struct page **pages, struct vm_area_struct **vmas,
137 unsigned long *position, int *length, int i)
138{
139 unsigned long vaddr = *position;
140 int remainder = *length;
141
142 WARN_ON(!is_vm_hugetlb_page(vma));
143
144 while (vaddr < vma->vm_end && remainder) {
145 if (pages) {
146 pte_t *pte;
147 struct page *page;
148
149 pte = huge_pte_offset(mm, vaddr);
150
151 /* hugetlb should be locked, and hence, prefaulted */
152 BUG_ON(!pte || pte_none(*pte));
153
154 page = pte_page(*pte);
155
156 WARN_ON(!PageCompound(page));
157
158 get_page(page);
159 pages[i] = page;
160 }
161
162 if (vmas)
163 vmas[i] = vma;
164
165 vaddr += PAGE_SIZE;
166 --remainder;
167 ++i;
168 }
169
170 *length = remainder;
171 *position = vaddr;
172
173 return i;
174}
175
176struct page *follow_huge_addr(struct mm_struct *mm,
177 unsigned long address, int write)
178{
179 return ERR_PTR(-EINVAL);
180}
181
182int pmd_huge(pmd_t pmd)
183{
184 return 0;
185}
186
187struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
188 pmd_t *pmd, int write)
189{
190 return NULL;
191}
192
193void unmap_hugepage_range(struct vm_area_struct *vma,
194 unsigned long start, unsigned long end)
195{
196 struct mm_struct *mm = vma->vm_mm;
197 unsigned long address;
198 pte_t *pte;
199 struct page *page;
200 int i;
201
202 BUG_ON(start & (HPAGE_SIZE - 1));
203 BUG_ON(end & (HPAGE_SIZE - 1));
204
205 for (address = start; address < end; address += HPAGE_SIZE) {
206 pte = huge_pte_offset(mm, address);
207 BUG_ON(!pte);
208 if (pte_none(*pte))
209 continue;
210 page = pte_page(*pte);
211 put_page(page);
212 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
213 pte_clear(mm, address+(i*PAGE_SIZE), pte);
214 pte++;
215 }
216 }
217 add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT));
218 flush_tlb_range(vma, start, end);
219}
220
221static void context_reload(void *__data)
222{
223 struct mm_struct *mm = __data;
224
225 if (mm == current->mm)
226 load_secondary_context(mm);
227}
228
229int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
230{
231 struct mm_struct *mm = current->mm;
232 unsigned long addr;
233 int ret = 0;
234
235 /* On UltraSPARC-III+ and later, configure the second half of
236 * the Data-TLB for huge pages.
237 */
238 if (tlb_type == cheetah_plus) {
239 unsigned long ctx;
240
241 spin_lock(&ctx_alloc_lock);
242 ctx = mm->context.sparc64_ctx_val;
243 ctx &= ~CTX_PGSZ_MASK;
244 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
245 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
246
247 if (ctx != mm->context.sparc64_ctx_val) {
248 /* When changing the page size fields, we
249 * must perform a context flush so that no
250 * stale entries match. This flush must
251 * occur with the original context register
252 * settings.
253 */
254 do_flush_tlb_mm(mm);
255
256 /* Reload the context register of all processors
257 * also executing in this address space.
258 */
259 mm->context.sparc64_ctx_val = ctx;
260 on_each_cpu(context_reload, mm, 0, 0);
261 }
262 spin_unlock(&ctx_alloc_lock);
263 }
264
265 BUG_ON(vma->vm_start & ~HPAGE_MASK);
266 BUG_ON(vma->vm_end & ~HPAGE_MASK);
267
268 spin_lock(&mm->page_table_lock);
269 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
270 unsigned long idx;
271 pte_t *pte = huge_pte_alloc(mm, addr);
272 struct page *page;
273
274 if (!pte) {
275 ret = -ENOMEM;
276 goto out;
277 }
278 if (!pte_none(*pte))
279 continue;
280
281 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
282 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
283 page = find_get_page(mapping, idx);
284 if (!page) {
285 /* charge the fs quota first */
286 if (hugetlb_get_quota(mapping)) {
287 ret = -ENOMEM;
288 goto out;
289 }
290 page = alloc_huge_page();
291 if (!page) {
292 hugetlb_put_quota(mapping);
293 ret = -ENOMEM;
294 goto out;
295 }
296 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
297 if (! ret) {
298 unlock_page(page);
299 } else {
300 hugetlb_put_quota(mapping);
301 free_huge_page(page);
302 goto out;
303 }
304 }
305 set_huge_pte(mm, vma, addr, page, pte, vma->vm_flags & VM_WRITE);
306 }
307out:
308 spin_unlock(&mm->page_table_lock);
309 return ret;
310}
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
new file mode 100644
index 000000000000..89022ccaa75b
--- /dev/null
+++ b/arch/sparc64/mm/init.c
@@ -0,0 +1,1769 @@
1/* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16#include <linux/slab.h>
17#include <linux/initrd.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/fs.h>
21#include <linux/seq_file.h>
22
23#include <asm/head.h>
24#include <asm/system.h>
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/pgtable.h>
28#include <asm/oplib.h>
29#include <asm/iommu.h>
30#include <asm/io.h>
31#include <asm/uaccess.h>
32#include <asm/mmu_context.h>
33#include <asm/tlbflush.h>
34#include <asm/dma.h>
35#include <asm/starfire.h>
36#include <asm/tlb.h>
37#include <asm/spitfire.h>
38#include <asm/sections.h>
39
40extern void device_scan(void);
41
42struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
43
44unsigned long *sparc64_valid_addr_bitmap;
45
46/* Ugly, but necessary... -DaveM */
47unsigned long phys_base;
48unsigned long kern_base;
49unsigned long kern_size;
50unsigned long pfn_base;
51
52/* This is even uglier. We have a problem where the kernel may not be
53 * located at phys_base. However, initial __alloc_bootmem() calls need to
54 * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
55 * those page mappings wont work. Things are ok after inherit_prom_mappings
56 * is called though. Dave says he'll clean this up some other time.
57 * -- BenC
58 */
59static unsigned long bootmap_base;
60
61/* get_new_mmu_context() uses "cache + 1". */
62DEFINE_SPINLOCK(ctx_alloc_lock);
63unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
64#define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
65unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
66
67/* References to special section boundaries */
68extern char _start[], _end[];
69
70/* Initial ramdisk setup */
71extern unsigned long sparc_ramdisk_image64;
72extern unsigned int sparc_ramdisk_image;
73extern unsigned int sparc_ramdisk_size;
74
75struct page *mem_map_zero;
76
77int bigkernel = 0;
78
79/* XXX Tune this... */
80#define PGT_CACHE_LOW 25
81#define PGT_CACHE_HIGH 50
82
83void check_pgt_cache(void)
84{
85 preempt_disable();
86 if (pgtable_cache_size > PGT_CACHE_HIGH) {
87 do {
88 if (pgd_quicklist)
89 free_pgd_slow(get_pgd_fast());
90 if (pte_quicklist[0])
91 free_pte_slow(pte_alloc_one_fast(NULL, 0));
92 if (pte_quicklist[1])
93 free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
94 } while (pgtable_cache_size > PGT_CACHE_LOW);
95 }
96 preempt_enable();
97}
98
99#ifdef CONFIG_DEBUG_DCFLUSH
100atomic_t dcpage_flushes = ATOMIC_INIT(0);
101#ifdef CONFIG_SMP
102atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
103#endif
104#endif
105
106__inline__ void flush_dcache_page_impl(struct page *page)
107{
108#ifdef CONFIG_DEBUG_DCFLUSH
109 atomic_inc(&dcpage_flushes);
110#endif
111
112#ifdef DCACHE_ALIASING_POSSIBLE
113 __flush_dcache_page(page_address(page),
114 ((tlb_type == spitfire) &&
115 page_mapping(page) != NULL));
116#else
117 if (page_mapping(page) != NULL &&
118 tlb_type == spitfire)
119 __flush_icache_page(__pa(page_address(page)));
120#endif
121}
122
123#define PG_dcache_dirty PG_arch_1
124
125#define dcache_dirty_cpu(page) \
126 (((page)->flags >> 24) & (NR_CPUS - 1UL))
127
128static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
129{
130 unsigned long mask = this_cpu;
131 unsigned long non_cpu_bits = ~((NR_CPUS - 1UL) << 24UL);
132 mask = (mask << 24) | (1UL << PG_dcache_dirty);
133 __asm__ __volatile__("1:\n\t"
134 "ldx [%2], %%g7\n\t"
135 "and %%g7, %1, %%g1\n\t"
136 "or %%g1, %0, %%g1\n\t"
137 "casx [%2], %%g7, %%g1\n\t"
138 "cmp %%g7, %%g1\n\t"
139 "bne,pn %%xcc, 1b\n\t"
140 " membar #StoreLoad | #StoreStore"
141 : /* no outputs */
142 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
143 : "g1", "g7");
144}
145
146static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
147{
148 unsigned long mask = (1UL << PG_dcache_dirty);
149
150 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
151 "1:\n\t"
152 "ldx [%2], %%g7\n\t"
153 "srlx %%g7, 24, %%g1\n\t"
154 "and %%g1, %3, %%g1\n\t"
155 "cmp %%g1, %0\n\t"
156 "bne,pn %%icc, 2f\n\t"
157 " andn %%g7, %1, %%g1\n\t"
158 "casx [%2], %%g7, %%g1\n\t"
159 "cmp %%g7, %%g1\n\t"
160 "bne,pn %%xcc, 1b\n\t"
161 " membar #StoreLoad | #StoreStore\n"
162 "2:"
163 : /* no outputs */
164 : "r" (cpu), "r" (mask), "r" (&page->flags),
165 "i" (NR_CPUS - 1UL)
166 : "g1", "g7");
167}
168
169extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
170
171void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
172{
173 struct page *page;
174 unsigned long pfn;
175 unsigned long pg_flags;
176
177 pfn = pte_pfn(pte);
178 if (pfn_valid(pfn) &&
179 (page = pfn_to_page(pfn), page_mapping(page)) &&
180 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
181 int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
182 int this_cpu = get_cpu();
183
184 /* This is just to optimize away some function calls
185 * in the SMP case.
186 */
187 if (cpu == this_cpu)
188 flush_dcache_page_impl(page);
189 else
190 smp_flush_dcache_page_impl(page, cpu);
191
192 clear_dcache_dirty_cpu(page, cpu);
193
194 put_cpu();
195 }
196
197 if (get_thread_fault_code())
198 __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
199 address, pte, get_thread_fault_code());
200}
201
202void flush_dcache_page(struct page *page)
203{
204 struct address_space *mapping = page_mapping(page);
205 int dirty = test_bit(PG_dcache_dirty, &page->flags);
206 int dirty_cpu = dcache_dirty_cpu(page);
207 int this_cpu = get_cpu();
208
209 if (mapping && !mapping_mapped(mapping)) {
210 if (dirty) {
211 if (dirty_cpu == this_cpu)
212 goto out;
213 smp_flush_dcache_page_impl(page, dirty_cpu);
214 }
215 set_dcache_dirty(page, this_cpu);
216 } else {
217 /* We could delay the flush for the !page_mapping
218 * case too. But that case is for exec env/arg
219 * pages and those are %99 certainly going to get
220 * faulted into the tlb (and thus flushed) anyways.
221 */
222 flush_dcache_page_impl(page);
223 }
224
225out:
226 put_cpu();
227}
228
229void flush_icache_range(unsigned long start, unsigned long end)
230{
231 /* Cheetah has coherent I-cache. */
232 if (tlb_type == spitfire) {
233 unsigned long kaddr;
234
235 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
236 __flush_icache_page(__get_phys(kaddr));
237 }
238}
239
240unsigned long page_to_pfn(struct page *page)
241{
242 return (unsigned long) ((page - mem_map) + pfn_base);
243}
244
245struct page *pfn_to_page(unsigned long pfn)
246{
247 return (mem_map + (pfn - pfn_base));
248}
249
250void show_mem(void)
251{
252 printk("Mem-info:\n");
253 show_free_areas();
254 printk("Free swap: %6ldkB\n",
255 nr_swap_pages << (PAGE_SHIFT-10));
256 printk("%ld pages of RAM\n", num_physpages);
257 printk("%d free pages\n", nr_free_pages());
258 printk("%d pages in page table cache\n",pgtable_cache_size);
259}
260
261void mmu_info(struct seq_file *m)
262{
263 if (tlb_type == cheetah)
264 seq_printf(m, "MMU Type\t: Cheetah\n");
265 else if (tlb_type == cheetah_plus)
266 seq_printf(m, "MMU Type\t: Cheetah+\n");
267 else if (tlb_type == spitfire)
268 seq_printf(m, "MMU Type\t: Spitfire\n");
269 else
270 seq_printf(m, "MMU Type\t: ???\n");
271
272#ifdef CONFIG_DEBUG_DCFLUSH
273 seq_printf(m, "DCPageFlushes\t: %d\n",
274 atomic_read(&dcpage_flushes));
275#ifdef CONFIG_SMP
276 seq_printf(m, "DCPageFlushesXC\t: %d\n",
277 atomic_read(&dcpage_flushes_xcall));
278#endif /* CONFIG_SMP */
279#endif /* CONFIG_DEBUG_DCFLUSH */
280}
281
282struct linux_prom_translation {
283 unsigned long virt;
284 unsigned long size;
285 unsigned long data;
286};
287
288extern unsigned long prom_boot_page;
289extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
290extern int prom_get_mmu_ihandle(void);
291extern void register_prom_callbacks(void);
292
293/* Exported for SMP bootup purposes. */
294unsigned long kern_locked_tte_data;
295
296void __init early_pgtable_allocfail(char *type)
297{
298 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
299 prom_halt();
300}
301
302#define BASE_PAGE_SIZE 8192
303static pmd_t *prompmd;
304
305/*
306 * Translate PROM's mapping we capture at boot time into physical address.
307 * The second parameter is only set from prom_callback() invocations.
308 */
309unsigned long prom_virt_to_phys(unsigned long promva, int *error)
310{
311 pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);
312 pte_t *ptep;
313 unsigned long base;
314
315 if (pmd_none(*pmdp)) {
316 if (error)
317 *error = 1;
318 return(0);
319 }
320 ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
321 if (!pte_present(*ptep)) {
322 if (error)
323 *error = 1;
324 return(0);
325 }
326 if (error) {
327 *error = 0;
328 return(pte_val(*ptep));
329 }
330 base = pte_val(*ptep) & _PAGE_PADDR;
331 return(base + (promva & (BASE_PAGE_SIZE - 1)));
332}
333
334static void inherit_prom_mappings(void)
335{
336 struct linux_prom_translation *trans;
337 unsigned long phys_page, tte_vaddr, tte_data;
338 void (*remap_func)(unsigned long, unsigned long, int);
339 pmd_t *pmdp;
340 pte_t *ptep;
341 int node, n, i, tsz;
342 extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];
343
344 node = prom_finddevice("/virtual-memory");
345 n = prom_getproplen(node, "translations");
346 if (n == 0 || n == -1) {
347 prom_printf("Couldn't get translation property\n");
348 prom_halt();
349 }
350 n += 5 * sizeof(struct linux_prom_translation);
351 for (tsz = 1; tsz < n; tsz <<= 1)
352 /* empty */;
353 trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
354 if (trans == NULL) {
355 prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
356 prom_halt();
357 }
358 memset(trans, 0, tsz);
359
360 if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
361 prom_printf("Couldn't get translation property\n");
362 prom_halt();
363 }
364 n = n / sizeof(*trans);
365
366 /*
367 * The obp translations are saved based on 8k pagesize, since obp can
368 * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000,
369 * ie obp range, are handled in entry.S and do not use the vpte scheme
370 * (see rant in inherit_locked_prom_mappings()).
371 */
372#define OBP_PMD_SIZE 2048
373 prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
374 if (prompmd == NULL)
375 early_pgtable_allocfail("pmd");
376 memset(prompmd, 0, OBP_PMD_SIZE);
377 for (i = 0; i < n; i++) {
378 unsigned long vaddr;
379
380 if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {
381 for (vaddr = trans[i].virt;
382 ((vaddr < trans[i].virt + trans[i].size) &&
383 (vaddr < HI_OBP_ADDRESS));
384 vaddr += BASE_PAGE_SIZE) {
385 unsigned long val;
386
387 pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
388 if (pmd_none(*pmdp)) {
389 ptep = __alloc_bootmem(BASE_PAGE_SIZE,
390 BASE_PAGE_SIZE,
391 bootmap_base);
392 if (ptep == NULL)
393 early_pgtable_allocfail("pte");
394 memset(ptep, 0, BASE_PAGE_SIZE);
395 pmd_set(pmdp, ptep);
396 }
397 ptep = (pte_t *)__pmd_page(*pmdp) +
398 ((vaddr >> 13) & 0x3ff);
399
400 val = trans[i].data;
401
402 /* Clear diag TTE bits. */
403 if (tlb_type == spitfire)
404 val &= ~0x0003fe0000000000UL;
405
406 set_pte_at(&init_mm, vaddr,
407 ptep, __pte(val | _PAGE_MODIFIED));
408 trans[i].data += BASE_PAGE_SIZE;
409 }
410 }
411 }
412 phys_page = __pa(prompmd);
413 obp_iaddr_patch[0] |= (phys_page >> 10);
414 obp_iaddr_patch[1] |= (phys_page & 0x3ff);
415 flushi((long)&obp_iaddr_patch[0]);
416 obp_daddr_patch[0] |= (phys_page >> 10);
417 obp_daddr_patch[1] |= (phys_page & 0x3ff);
418 flushi((long)&obp_daddr_patch[0]);
419
420 /* Now fixup OBP's idea about where we really are mapped. */
421 prom_printf("Remapping the kernel... ");
422
423 /* Spitfire Errata #32 workaround */
424 /* NOTE: Using plain zero for the context value is
425 * correct here, we are not using the Linux trap
426 * tables yet so we should not use the special
427 * UltraSPARC-III+ page size encodings yet.
428 */
429 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
430 "flush %%g6"
431 : /* No outputs */
432 : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
433
434 switch (tlb_type) {
435 default:
436 case spitfire:
437 phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
438 break;
439
440 case cheetah:
441 case cheetah_plus:
442 phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
443 break;
444 };
445
446 phys_page &= _PAGE_PADDR;
447 phys_page += ((unsigned long)&prom_boot_page -
448 (unsigned long)KERNBASE);
449
450 if (tlb_type == spitfire) {
451 /* Lock this into i/d tlb entry 59 */
452 __asm__ __volatile__(
453 "stxa %%g0, [%2] %3\n\t"
454 "stxa %0, [%1] %4\n\t"
455 "membar #Sync\n\t"
456 "flush %%g6\n\t"
457 "stxa %%g0, [%2] %5\n\t"
458 "stxa %0, [%1] %6\n\t"
459 "membar #Sync\n\t"
460 "flush %%g6"
461 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
462 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
463 "r" (59 << 3), "r" (TLB_TAG_ACCESS),
464 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
465 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
466 : "memory");
467 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
468 /* Lock this into i/d tlb-0 entry 11 */
469 __asm__ __volatile__(
470 "stxa %%g0, [%2] %3\n\t"
471 "stxa %0, [%1] %4\n\t"
472 "membar #Sync\n\t"
473 "flush %%g6\n\t"
474 "stxa %%g0, [%2] %5\n\t"
475 "stxa %0, [%1] %6\n\t"
476 "membar #Sync\n\t"
477 "flush %%g6"
478 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
479 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
480 "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
481 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
482 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
483 : "memory");
484 } else {
485 /* Implement me :-) */
486 BUG();
487 }
488
489 tte_vaddr = (unsigned long) KERNBASE;
490
491 /* Spitfire Errata #32 workaround */
492 /* NOTE: Using plain zero for the context value is
493 * correct here, we are not using the Linux trap
494 * tables yet so we should not use the special
495 * UltraSPARC-III+ page size encodings yet.
496 */
497 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
498 "flush %%g6"
499 : /* No outputs */
500 : "r" (0),
501 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
502
503 if (tlb_type == spitfire)
504 tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
505 else
506 tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
507
508 kern_locked_tte_data = tte_data;
509
510 remap_func = (void *) ((unsigned long) &prom_remap -
511 (unsigned long) &prom_boot_page);
512
513
514 /* Spitfire Errata #32 workaround */
515 /* NOTE: Using plain zero for the context value is
516 * correct here, we are not using the Linux trap
517 * tables yet so we should not use the special
518 * UltraSPARC-III+ page size encodings yet.
519 */
520 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
521 "flush %%g6"
522 : /* No outputs */
523 : "r" (0),
524 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
525
526 remap_func((tlb_type == spitfire ?
527 (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
528 (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
529 (unsigned long) KERNBASE,
530 prom_get_mmu_ihandle());
531
532 if (bigkernel)
533 remap_func(((tte_data + 0x400000) & _PAGE_PADDR),
534 (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle());
535
536 /* Flush out that temporary mapping. */
537 spitfire_flush_dtlb_nucleus_page(0x0);
538 spitfire_flush_itlb_nucleus_page(0x0);
539
540 /* Now lock us back into the TLBs via OBP. */
541 prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
542 prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
543 if (bigkernel) {
544 prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
545 tte_vaddr + 0x400000);
546 prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
547 tte_vaddr + 0x400000);
548 }
549
550 /* Re-read translations property. */
551 if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
552 prom_printf("Couldn't get translation property\n");
553 prom_halt();
554 }
555 n = n / sizeof(*trans);
556
557 for (i = 0; i < n; i++) {
558 unsigned long vaddr = trans[i].virt;
559 unsigned long size = trans[i].size;
560
561 if (vaddr < 0xf0000000UL) {
562 unsigned long avoid_start = (unsigned long) KERNBASE;
563 unsigned long avoid_end = avoid_start + (4 * 1024 * 1024);
564
565 if (bigkernel)
566 avoid_end += (4 * 1024 * 1024);
567 if (vaddr < avoid_start) {
568 unsigned long top = vaddr + size;
569
570 if (top > avoid_start)
571 top = avoid_start;
572 prom_unmap(top - vaddr, vaddr);
573 }
574 if ((vaddr + size) > avoid_end) {
575 unsigned long bottom = vaddr;
576
577 if (bottom < avoid_end)
578 bottom = avoid_end;
579 prom_unmap((vaddr + size) - bottom, bottom);
580 }
581 }
582 }
583
584 prom_printf("done.\n");
585
586 register_prom_callbacks();
587}
588
589/* The OBP specifications for sun4u mark 0xfffffffc00000000 and
590 * upwards as reserved for use by the firmware (I wonder if this
591 * will be the same on Cheetah...). We use this virtual address
592 * range for the VPTE table mappings of the nucleus so we need
593 * to zap them when we enter the PROM. -DaveM
594 */
595static void __flush_nucleus_vptes(void)
596{
597 unsigned long prom_reserved_base = 0xfffffffc00000000UL;
598 int i;
599
600 /* Only DTLB must be checked for VPTE entries. */
601 if (tlb_type == spitfire) {
602 for (i = 0; i < 63; i++) {
603 unsigned long tag;
604
605 /* Spitfire Errata #32 workaround */
606 /* NOTE: Always runs on spitfire, so no cheetah+
607 * page size encodings.
608 */
609 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
610 "flush %%g6"
611 : /* No outputs */
612 : "r" (0),
613 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
614
615 tag = spitfire_get_dtlb_tag(i);
616 if (((tag & ~(PAGE_MASK)) == 0) &&
617 ((tag & (PAGE_MASK)) >= prom_reserved_base)) {
618 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
619 "membar #Sync"
620 : /* no outputs */
621 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
622 spitfire_put_dtlb_data(i, 0x0UL);
623 }
624 }
625 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
626 for (i = 0; i < 512; i++) {
627 unsigned long tag = cheetah_get_dtlb_tag(i, 2);
628
629 if ((tag & ~PAGE_MASK) == 0 &&
630 (tag & PAGE_MASK) >= prom_reserved_base) {
631 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
632 "membar #Sync"
633 : /* no outputs */
634 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
635 cheetah_put_dtlb_data(i, 0x0UL, 2);
636 }
637
638 if (tlb_type != cheetah_plus)
639 continue;
640
641 tag = cheetah_get_dtlb_tag(i, 3);
642
643 if ((tag & ~PAGE_MASK) == 0 &&
644 (tag & PAGE_MASK) >= prom_reserved_base) {
645 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
646 "membar #Sync"
647 : /* no outputs */
648 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
649 cheetah_put_dtlb_data(i, 0x0UL, 3);
650 }
651 }
652 } else {
653 /* Implement me :-) */
654 BUG();
655 }
656}
657
658static int prom_ditlb_set;
659struct prom_tlb_entry {
660 int tlb_ent;
661 unsigned long tlb_tag;
662 unsigned long tlb_data;
663};
664struct prom_tlb_entry prom_itlb[16], prom_dtlb[16];
665
666void prom_world(int enter)
667{
668 unsigned long pstate;
669 int i;
670
671 if (!enter)
672 set_fs((mm_segment_t) { get_thread_current_ds() });
673
674 if (!prom_ditlb_set)
675 return;
676
677 /* Make sure the following runs atomically. */
678 __asm__ __volatile__("flushw\n\t"
679 "rdpr %%pstate, %0\n\t"
680 "wrpr %0, %1, %%pstate"
681 : "=r" (pstate)
682 : "i" (PSTATE_IE));
683
684 if (enter) {
685 /* Kick out nucleus VPTEs. */
686 __flush_nucleus_vptes();
687
688 /* Install PROM world. */
689 for (i = 0; i < 16; i++) {
690 if (prom_dtlb[i].tlb_ent != -1) {
691 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
692 "membar #Sync"
693 : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
694 "i" (ASI_DMMU));
695 if (tlb_type == spitfire)
696 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
697 prom_dtlb[i].tlb_data);
698 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
699 cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
700 prom_dtlb[i].tlb_data);
701 }
702 if (prom_itlb[i].tlb_ent != -1) {
703 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
704 "membar #Sync"
705 : : "r" (prom_itlb[i].tlb_tag),
706 "r" (TLB_TAG_ACCESS),
707 "i" (ASI_IMMU));
708 if (tlb_type == spitfire)
709 spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
710 prom_itlb[i].tlb_data);
711 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
712 cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
713 prom_itlb[i].tlb_data);
714 }
715 }
716 } else {
717 for (i = 0; i < 16; i++) {
718 if (prom_dtlb[i].tlb_ent != -1) {
719 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
720 "membar #Sync"
721 : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
722 if (tlb_type == spitfire)
723 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
724 else
725 cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
726 }
727 if (prom_itlb[i].tlb_ent != -1) {
728 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
729 "membar #Sync"
730 : : "r" (TLB_TAG_ACCESS),
731 "i" (ASI_IMMU));
732 if (tlb_type == spitfire)
733 spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
734 else
735 cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL);
736 }
737 }
738 }
739 __asm__ __volatile__("wrpr %0, 0, %%pstate"
740 : : "r" (pstate));
741}
742
743void inherit_locked_prom_mappings(int save_p)
744{
745 int i;
746 int dtlb_seen = 0;
747 int itlb_seen = 0;
748
749 /* Fucking losing PROM has more mappings in the TLB, but
750 * it (conveniently) fails to mention any of these in the
751 * translations property. The only ones that matter are
752 * the locked PROM tlb entries, so we impose the following
753 * irrecovable rule on the PROM, it is allowed 8 locked
754 * entries in the ITLB and 8 in the DTLB.
755 *
756 * Supposedly the upper 16GB of the address space is
757 * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
758 * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface
759 * used between the client program and the firmware on sun5
760 * systems to coordinate mmu mappings is also COMPLETELY
761 * UNDOCUMENTED!!!!!! Thanks S(t)un!
762 */
763 if (save_p) {
764 for (i = 0; i < 16; i++) {
765 prom_itlb[i].tlb_ent = -1;
766 prom_dtlb[i].tlb_ent = -1;
767 }
768 }
769 if (tlb_type == spitfire) {
770 int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
771 for (i = 0; i < high; i++) {
772 unsigned long data;
773
774 /* Spitfire Errata #32 workaround */
775 /* NOTE: Always runs on spitfire, so no cheetah+
776 * page size encodings.
777 */
778 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
779 "flush %%g6"
780 : /* No outputs */
781 : "r" (0),
782 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
783
784 data = spitfire_get_dtlb_data(i);
785 if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
786 unsigned long tag;
787
788 /* Spitfire Errata #32 workaround */
789 /* NOTE: Always runs on spitfire, so no
790 * cheetah+ page size encodings.
791 */
792 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
793 "flush %%g6"
794 : /* No outputs */
795 : "r" (0),
796 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
797
798 tag = spitfire_get_dtlb_tag(i);
799 if (save_p) {
800 prom_dtlb[dtlb_seen].tlb_ent = i;
801 prom_dtlb[dtlb_seen].tlb_tag = tag;
802 prom_dtlb[dtlb_seen].tlb_data = data;
803 }
804 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
805 "membar #Sync"
806 : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
807 spitfire_put_dtlb_data(i, 0x0UL);
808
809 dtlb_seen++;
810 if (dtlb_seen > 15)
811 break;
812 }
813 }
814
815 for (i = 0; i < high; i++) {
816 unsigned long data;
817
818 /* Spitfire Errata #32 workaround */
819 /* NOTE: Always runs on spitfire, so no
820 * cheetah+ page size encodings.
821 */
822 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
823 "flush %%g6"
824 : /* No outputs */
825 : "r" (0),
826 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
827
828 data = spitfire_get_itlb_data(i);
829 if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
830 unsigned long tag;
831
832 /* Spitfire Errata #32 workaround */
833 /* NOTE: Always runs on spitfire, so no
834 * cheetah+ page size encodings.
835 */
836 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
837 "flush %%g6"
838 : /* No outputs */
839 : "r" (0),
840 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
841
842 tag = spitfire_get_itlb_tag(i);
843 if (save_p) {
844 prom_itlb[itlb_seen].tlb_ent = i;
845 prom_itlb[itlb_seen].tlb_tag = tag;
846 prom_itlb[itlb_seen].tlb_data = data;
847 }
848 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
849 "membar #Sync"
850 : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
851 spitfire_put_itlb_data(i, 0x0UL);
852
853 itlb_seen++;
854 if (itlb_seen > 15)
855 break;
856 }
857 }
858 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
859 int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
860
861 for (i = 0; i < high; i++) {
862 unsigned long data;
863
864 data = cheetah_get_ldtlb_data(i);
865 if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
866 unsigned long tag;
867
868 tag = cheetah_get_ldtlb_tag(i);
869 if (save_p) {
870 prom_dtlb[dtlb_seen].tlb_ent = i;
871 prom_dtlb[dtlb_seen].tlb_tag = tag;
872 prom_dtlb[dtlb_seen].tlb_data = data;
873 }
874 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
875 "membar #Sync"
876 : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
877 cheetah_put_ldtlb_data(i, 0x0UL);
878
879 dtlb_seen++;
880 if (dtlb_seen > 15)
881 break;
882 }
883 }
884
885 for (i = 0; i < high; i++) {
886 unsigned long data;
887
888 data = cheetah_get_litlb_data(i);
889 if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
890 unsigned long tag;
891
892 tag = cheetah_get_litlb_tag(i);
893 if (save_p) {
894 prom_itlb[itlb_seen].tlb_ent = i;
895 prom_itlb[itlb_seen].tlb_tag = tag;
896 prom_itlb[itlb_seen].tlb_data = data;
897 }
898 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
899 "membar #Sync"
900 : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
901 cheetah_put_litlb_data(i, 0x0UL);
902
903 itlb_seen++;
904 if (itlb_seen > 15)
905 break;
906 }
907 }
908 } else {
909 /* Implement me :-) */
910 BUG();
911 }
912 if (save_p)
913 prom_ditlb_set = 1;
914}
915
916/* Give PROM back his world, done during reboots... */
917void prom_reload_locked(void)
918{
919 int i;
920
921 for (i = 0; i < 16; i++) {
922 if (prom_dtlb[i].tlb_ent != -1) {
923 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
924 "membar #Sync"
925 : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
926 "i" (ASI_DMMU));
927 if (tlb_type == spitfire)
928 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
929 prom_dtlb[i].tlb_data);
930 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
931 cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
932 prom_dtlb[i].tlb_data);
933 }
934
935 if (prom_itlb[i].tlb_ent != -1) {
936 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
937 "membar #Sync"
938 : : "r" (prom_itlb[i].tlb_tag),
939 "r" (TLB_TAG_ACCESS),
940 "i" (ASI_IMMU));
941 if (tlb_type == spitfire)
942 spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
943 prom_itlb[i].tlb_data);
944 else
945 cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
946 prom_itlb[i].tlb_data);
947 }
948 }
949}
950
951#ifdef DCACHE_ALIASING_POSSIBLE
952void __flush_dcache_range(unsigned long start, unsigned long end)
953{
954 unsigned long va;
955
956 if (tlb_type == spitfire) {
957 int n = 0;
958
959 for (va = start; va < end; va += 32) {
960 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
961 if (++n >= 512)
962 break;
963 }
964 } else {
965 start = __pa(start);
966 end = __pa(end);
967 for (va = start; va < end; va += 32)
968 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
969 "membar #Sync"
970 : /* no outputs */
971 : "r" (va),
972 "i" (ASI_DCACHE_INVALIDATE));
973 }
974}
975#endif /* DCACHE_ALIASING_POSSIBLE */
976
977/* If not locked, zap it. */
978void __flush_tlb_all(void)
979{
980 unsigned long pstate;
981 int i;
982
983 __asm__ __volatile__("flushw\n\t"
984 "rdpr %%pstate, %0\n\t"
985 "wrpr %0, %1, %%pstate"
986 : "=r" (pstate)
987 : "i" (PSTATE_IE));
988 if (tlb_type == spitfire) {
989 for (i = 0; i < 64; i++) {
990 /* Spitfire Errata #32 workaround */
991 /* NOTE: Always runs on spitfire, so no
992 * cheetah+ page size encodings.
993 */
994 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
995 "flush %%g6"
996 : /* No outputs */
997 : "r" (0),
998 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
999
1000 if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
1001 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1002 "membar #Sync"
1003 : /* no outputs */
1004 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
1005 spitfire_put_dtlb_data(i, 0x0UL);
1006 }
1007
1008 /* Spitfire Errata #32 workaround */
1009 /* NOTE: Always runs on spitfire, so no
1010 * cheetah+ page size encodings.
1011 */
1012 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1013 "flush %%g6"
1014 : /* No outputs */
1015 : "r" (0),
1016 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1017
1018 if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
1019 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1020 "membar #Sync"
1021 : /* no outputs */
1022 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
1023 spitfire_put_itlb_data(i, 0x0UL);
1024 }
1025 }
1026 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1027 cheetah_flush_dtlb_all();
1028 cheetah_flush_itlb_all();
1029 }
1030 __asm__ __volatile__("wrpr %0, 0, %%pstate"
1031 : : "r" (pstate));
1032}
1033
1034/* Caller does TLB context flushing on local CPU if necessary.
1035 * The caller also ensures that CTX_VALID(mm->context) is false.
1036 *
1037 * We must be careful about boundary cases so that we never
1038 * let the user have CTX 0 (nucleus) or we ever use a CTX
1039 * version of zero (and thus NO_CONTEXT would not be caught
1040 * by version mis-match tests in mmu_context.h).
1041 */
1042void get_new_mmu_context(struct mm_struct *mm)
1043{
1044 unsigned long ctx, new_ctx;
1045 unsigned long orig_pgsz_bits;
1046
1047
1048 spin_lock(&ctx_alloc_lock);
1049 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
1050 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
1051 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
1052 if (new_ctx >= (1 << CTX_NR_BITS)) {
1053 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
1054 if (new_ctx >= ctx) {
1055 int i;
1056 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
1057 CTX_FIRST_VERSION;
1058 if (new_ctx == 1)
1059 new_ctx = CTX_FIRST_VERSION;
1060
1061 /* Don't call memset, for 16 entries that's just
1062 * plain silly...
1063 */
1064 mmu_context_bmap[0] = 3;
1065 mmu_context_bmap[1] = 0;
1066 mmu_context_bmap[2] = 0;
1067 mmu_context_bmap[3] = 0;
1068 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
1069 mmu_context_bmap[i + 0] = 0;
1070 mmu_context_bmap[i + 1] = 0;
1071 mmu_context_bmap[i + 2] = 0;
1072 mmu_context_bmap[i + 3] = 0;
1073 }
1074 goto out;
1075 }
1076 }
1077 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
1078 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
1079out:
1080 tlb_context_cache = new_ctx;
1081 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
1082 spin_unlock(&ctx_alloc_lock);
1083}
1084
1085#ifndef CONFIG_SMP
1086struct pgtable_cache_struct pgt_quicklists;
1087#endif
1088
1089/* OK, we have to color these pages. The page tables are accessed
1090 * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S
1091 * code, as well as by PAGE_OFFSET range direct-mapped addresses by
1092 * other parts of the kernel. By coloring, we make sure that the tlbmiss
1093 * fast handlers do not get data from old/garbage dcache lines that
1094 * correspond to an old/stale virtual address (user/kernel) that
1095 * previously mapped the pagetable page while accessing vpte range
1096 * addresses. The idea is that if the vpte color and PAGE_OFFSET range
1097 * color is the same, then when the kernel initializes the pagetable
1098 * using the later address range, accesses with the first address
1099 * range will see the newly initialized data rather than the garbage.
1100 */
1101#ifdef DCACHE_ALIASING_POSSIBLE
1102#define DC_ALIAS_SHIFT 1
1103#else
1104#define DC_ALIAS_SHIFT 0
1105#endif
1106pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
1107{
1108 struct page *page;
1109 unsigned long color;
1110
1111 {
1112 pte_t *ptep = pte_alloc_one_fast(mm, address);
1113
1114 if (ptep)
1115 return ptep;
1116 }
1117
1118 color = VPTE_COLOR(address);
1119 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT);
1120 if (page) {
1121 unsigned long *to_free;
1122 unsigned long paddr;
1123 pte_t *pte;
1124
1125#ifdef DCACHE_ALIASING_POSSIBLE
1126 set_page_count(page, 1);
1127 ClearPageCompound(page);
1128
1129 set_page_count((page + 1), 1);
1130 ClearPageCompound(page + 1);
1131#endif
1132 paddr = (unsigned long) page_address(page);
1133 memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
1134
1135 if (!color) {
1136 pte = (pte_t *) paddr;
1137 to_free = (unsigned long *) (paddr + PAGE_SIZE);
1138 } else {
1139 pte = (pte_t *) (paddr + PAGE_SIZE);
1140 to_free = (unsigned long *) paddr;
1141 }
1142
1143#ifdef DCACHE_ALIASING_POSSIBLE
1144 /* Now free the other one up, adjust cache size. */
1145 preempt_disable();
1146 *to_free = (unsigned long) pte_quicklist[color ^ 0x1];
1147 pte_quicklist[color ^ 0x1] = to_free;
1148 pgtable_cache_size++;
1149 preempt_enable();
1150#endif
1151
1152 return pte;
1153 }
1154 return NULL;
1155}
1156
1157void sparc_ultra_dump_itlb(void)
1158{
1159 int slot;
1160
1161 if (tlb_type == spitfire) {
1162 printk ("Contents of itlb: ");
1163 for (slot = 0; slot < 14; slot++) printk (" ");
1164 printk ("%2x:%016lx,%016lx\n",
1165 0,
1166 spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
1167 for (slot = 1; slot < 64; slot+=3) {
1168 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1169 slot,
1170 spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
1171 slot+1,
1172 spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
1173 slot+2,
1174 spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
1175 }
1176 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1177 printk ("Contents of itlb0:\n");
1178 for (slot = 0; slot < 16; slot+=2) {
1179 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1180 slot,
1181 cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
1182 slot+1,
1183 cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
1184 }
1185 printk ("Contents of itlb2:\n");
1186 for (slot = 0; slot < 128; slot+=2) {
1187 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1188 slot,
1189 cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
1190 slot+1,
1191 cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
1192 }
1193 }
1194}
1195
1196void sparc_ultra_dump_dtlb(void)
1197{
1198 int slot;
1199
1200 if (tlb_type == spitfire) {
1201 printk ("Contents of dtlb: ");
1202 for (slot = 0; slot < 14; slot++) printk (" ");
1203 printk ("%2x:%016lx,%016lx\n", 0,
1204 spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
1205 for (slot = 1; slot < 64; slot+=3) {
1206 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1207 slot,
1208 spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
1209 slot+1,
1210 spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
1211 slot+2,
1212 spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
1213 }
1214 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1215 printk ("Contents of dtlb0:\n");
1216 for (slot = 0; slot < 16; slot+=2) {
1217 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1218 slot,
1219 cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
1220 slot+1,
1221 cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
1222 }
1223 printk ("Contents of dtlb2:\n");
1224 for (slot = 0; slot < 512; slot+=2) {
1225 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1226 slot,
1227 cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
1228 slot+1,
1229 cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
1230 }
1231 if (tlb_type == cheetah_plus) {
1232 printk ("Contents of dtlb3:\n");
1233 for (slot = 0; slot < 512; slot+=2) {
1234 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1235 slot,
1236 cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
1237 slot+1,
1238 cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
1239 }
1240 }
1241 }
1242}
1243
1244extern unsigned long cmdline_memory_size;
1245
1246unsigned long __init bootmem_init(unsigned long *pages_avail)
1247{
1248 unsigned long bootmap_size, start_pfn, end_pfn;
1249 unsigned long end_of_phys_memory = 0UL;
1250 unsigned long bootmap_pfn, bytes_avail, size;
1251 int i;
1252
1253#ifdef CONFIG_DEBUG_BOOTMEM
1254 prom_printf("bootmem_init: Scan sp_banks, ");
1255#endif
1256
1257 bytes_avail = 0UL;
1258 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1259 end_of_phys_memory = sp_banks[i].base_addr +
1260 sp_banks[i].num_bytes;
1261 bytes_avail += sp_banks[i].num_bytes;
1262 if (cmdline_memory_size) {
1263 if (bytes_avail > cmdline_memory_size) {
1264 unsigned long slack = bytes_avail - cmdline_memory_size;
1265
1266 bytes_avail -= slack;
1267 end_of_phys_memory -= slack;
1268
1269 sp_banks[i].num_bytes -= slack;
1270 if (sp_banks[i].num_bytes == 0) {
1271 sp_banks[i].base_addr = 0xdeadbeef;
1272 } else {
1273 sp_banks[i+1].num_bytes = 0;
1274 sp_banks[i+1].base_addr = 0xdeadbeef;
1275 }
1276 break;
1277 }
1278 }
1279 }
1280
1281 *pages_avail = bytes_avail >> PAGE_SHIFT;
1282
1283 /* Start with page aligned address of last symbol in kernel
1284 * image. The kernel is hard mapped below PAGE_OFFSET in a
1285 * 4MB locked TLB translation.
1286 */
1287 start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
1288
1289 bootmap_pfn = start_pfn;
1290
1291 end_pfn = end_of_phys_memory >> PAGE_SHIFT;
1292
1293#ifdef CONFIG_BLK_DEV_INITRD
1294 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
1295 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
1296 unsigned long ramdisk_image = sparc_ramdisk_image ?
1297 sparc_ramdisk_image : sparc_ramdisk_image64;
1298 if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
1299 ramdisk_image -= KERNBASE;
1300 initrd_start = ramdisk_image + phys_base;
1301 initrd_end = initrd_start + sparc_ramdisk_size;
1302 if (initrd_end > end_of_phys_memory) {
1303 printk(KERN_CRIT "initrd extends beyond end of memory "
1304 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
1305 initrd_end, end_of_phys_memory);
1306 initrd_start = 0;
1307 }
1308 if (initrd_start) {
1309 if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
1310 initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
1311 bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
1312 }
1313 }
1314#endif
1315 /* Initialize the boot-time allocator. */
1316 max_pfn = max_low_pfn = end_pfn;
1317 min_low_pfn = pfn_base;
1318
1319#ifdef CONFIG_DEBUG_BOOTMEM
1320 prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
1321 min_low_pfn, bootmap_pfn, max_low_pfn);
1322#endif
1323 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
1324
1325 bootmap_base = bootmap_pfn << PAGE_SHIFT;
1326
1327 /* Now register the available physical memory with the
1328 * allocator.
1329 */
1330 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1331#ifdef CONFIG_DEBUG_BOOTMEM
1332 prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
1333 i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
1334#endif
1335 free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
1336 }
1337
1338#ifdef CONFIG_BLK_DEV_INITRD
1339 if (initrd_start) {
1340 size = initrd_end - initrd_start;
1341
1342 /* Resert the initrd image area. */
1343#ifdef CONFIG_DEBUG_BOOTMEM
1344 prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
1345 initrd_start, initrd_end);
1346#endif
1347 reserve_bootmem(initrd_start, size);
1348 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1349
1350 initrd_start += PAGE_OFFSET;
1351 initrd_end += PAGE_OFFSET;
1352 }
1353#endif
1354 /* Reserve the kernel text/data/bss. */
1355#ifdef CONFIG_DEBUG_BOOTMEM
1356 prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
1357#endif
1358 reserve_bootmem(kern_base, kern_size);
1359 *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
1360
1361 /* Reserve the bootmem map. We do not account for it
1362 * in pages_avail because we will release that memory
1363 * in free_all_bootmem.
1364 */
1365 size = bootmap_size;
1366#ifdef CONFIG_DEBUG_BOOTMEM
1367 prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
1368 (bootmap_pfn << PAGE_SHIFT), size);
1369#endif
1370 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
1371 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1372
1373 return end_pfn;
1374}
1375
1376/* paging_init() sets up the page tables */
1377
1378extern void cheetah_ecache_flush_init(void);
1379
1380static unsigned long last_valid_pfn;
1381
1382void __init paging_init(void)
1383{
1384 extern pmd_t swapper_pmd_dir[1024];
1385 extern unsigned int sparc64_vpte_patchme1[1];
1386 extern unsigned int sparc64_vpte_patchme2[1];
1387 unsigned long alias_base = kern_base + PAGE_OFFSET;
1388 unsigned long second_alias_page = 0;
1389 unsigned long pt, flags, end_pfn, pages_avail;
1390 unsigned long shift = alias_base - ((unsigned long)KERNBASE);
1391 unsigned long real_end;
1392
1393 set_bit(0, mmu_context_bmap);
1394
1395 real_end = (unsigned long)_end;
1396 if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1397 bigkernel = 1;
1398#ifdef CONFIG_BLK_DEV_INITRD
1399 if (sparc_ramdisk_image || sparc_ramdisk_image64)
1400 real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size));
1401#endif
1402
1403 /* We assume physical memory starts at some 4mb multiple,
1404 * if this were not true we wouldn't boot up to this point
1405 * anyways.
1406 */
1407 pt = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
1408 pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
1409 local_irq_save(flags);
1410 if (tlb_type == spitfire) {
1411 __asm__ __volatile__(
1412 " stxa %1, [%0] %3\n"
1413 " stxa %2, [%5] %4\n"
1414 " membar #Sync\n"
1415 " flush %%g6\n"
1416 " nop\n"
1417 " nop\n"
1418 " nop\n"
1419 : /* No outputs */
1420 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
1421 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
1422 : "memory");
1423 if (real_end >= KERNBASE + 0x340000) {
1424 second_alias_page = alias_base + 0x400000;
1425 __asm__ __volatile__(
1426 " stxa %1, [%0] %3\n"
1427 " stxa %2, [%5] %4\n"
1428 " membar #Sync\n"
1429 " flush %%g6\n"
1430 " nop\n"
1431 " nop\n"
1432 " nop\n"
1433 : /* No outputs */
1434 : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
1435 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
1436 : "memory");
1437 }
1438 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1439 __asm__ __volatile__(
1440 " stxa %1, [%0] %3\n"
1441 " stxa %2, [%5] %4\n"
1442 " membar #Sync\n"
1443 " flush %%g6\n"
1444 " nop\n"
1445 " nop\n"
1446 " nop\n"
1447 : /* No outputs */
1448 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
1449 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
1450 : "memory");
1451 if (real_end >= KERNBASE + 0x340000) {
1452 second_alias_page = alias_base + 0x400000;
1453 __asm__ __volatile__(
1454 " stxa %1, [%0] %3\n"
1455 " stxa %2, [%5] %4\n"
1456 " membar #Sync\n"
1457 " flush %%g6\n"
1458 " nop\n"
1459 " nop\n"
1460 " nop\n"
1461 : /* No outputs */
1462 : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
1463 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
1464 : "memory");
1465 }
1466 }
1467 local_irq_restore(flags);
1468
1469 /* Now set kernel pgd to upper alias so physical page computations
1470 * work.
1471 */
1472 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1473
1474 memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
1475
1476 /* Now can init the kernel/bad page tables. */
1477 pud_set(pud_offset(&swapper_pg_dir[0], 0),
1478 swapper_pmd_dir + (shift / sizeof(pgd_t)));
1479
1480 sparc64_vpte_patchme1[0] |=
1481 (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
1482 sparc64_vpte_patchme2[0] |=
1483 (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff);
1484 flushi((long)&sparc64_vpte_patchme1[0]);
1485
1486 /* Setup bootmem... */
1487 pages_avail = 0;
1488 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1489
1490 /* Inherit non-locked OBP mappings. */
1491 inherit_prom_mappings();
1492
1493 /* Ok, we can use our TLB miss and window trap handlers safely.
1494 * We need to do a quick peek here to see if we are on StarFire
1495 * or not, so setup_tba can setup the IRQ globals correctly (it
1496 * needs to get the hard smp processor id correctly).
1497 */
1498 {
1499 extern void setup_tba(int);
1500 setup_tba(this_is_starfire);
1501 }
1502
1503 inherit_locked_prom_mappings(1);
1504
1505 /* We only created DTLB mapping of this stuff. */
1506 spitfire_flush_dtlb_nucleus_page(alias_base);
1507 if (second_alias_page)
1508 spitfire_flush_dtlb_nucleus_page(second_alias_page);
1509
1510 __flush_tlb_all();
1511
1512 {
1513 unsigned long zones_size[MAX_NR_ZONES];
1514 unsigned long zholes_size[MAX_NR_ZONES];
1515 unsigned long npages;
1516 int znum;
1517
1518 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1519 zones_size[znum] = zholes_size[znum] = 0;
1520
1521 npages = end_pfn - pfn_base;
1522 zones_size[ZONE_DMA] = npages;
1523 zholes_size[ZONE_DMA] = npages - pages_avail;
1524
1525 free_area_init_node(0, &contig_page_data, zones_size,
1526 phys_base >> PAGE_SHIFT, zholes_size);
1527 }
1528
1529 device_scan();
1530}
1531
1532/* Ok, it seems that the prom can allocate some more memory chunks
1533 * as a side effect of some prom calls we perform during the
1534 * boot sequence. My most likely theory is that it is from the
1535 * prom_set_traptable() call, and OBP is allocating a scratchpad
1536 * for saving client program register state etc.
1537 */
1538static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
1539{
1540 int swapi = 0;
1541 int i, mitr;
1542 unsigned long tmpaddr, tmpsize;
1543 unsigned long lowest;
1544
1545 for (i = 0; thislist[i].theres_more != 0; i++) {
1546 lowest = thislist[i].start_adr;
1547 for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
1548 if (thislist[mitr].start_adr < lowest) {
1549 lowest = thislist[mitr].start_adr;
1550 swapi = mitr;
1551 }
1552 if (lowest == thislist[i].start_adr)
1553 continue;
1554 tmpaddr = thislist[swapi].start_adr;
1555 tmpsize = thislist[swapi].num_bytes;
1556 for (mitr = swapi; mitr > i; mitr--) {
1557 thislist[mitr].start_adr = thislist[mitr-1].start_adr;
1558 thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
1559 }
1560 thislist[i].start_adr = tmpaddr;
1561 thislist[i].num_bytes = tmpsize;
1562 }
1563}
1564
1565void __init rescan_sp_banks(void)
1566{
1567 struct linux_prom64_registers memlist[64];
1568 struct linux_mlist_p1275 avail[64], *mlist;
1569 unsigned long bytes, base_paddr;
1570 int num_regs, node = prom_finddevice("/memory");
1571 int i;
1572
1573 num_regs = prom_getproperty(node, "available",
1574 (char *) memlist, sizeof(memlist));
1575 num_regs = (num_regs / sizeof(struct linux_prom64_registers));
1576 for (i = 0; i < num_regs; i++) {
1577 avail[i].start_adr = memlist[i].phys_addr;
1578 avail[i].num_bytes = memlist[i].reg_size;
1579 avail[i].theres_more = &avail[i + 1];
1580 }
1581 avail[i - 1].theres_more = NULL;
1582 sort_memlist(avail);
1583
1584 mlist = &avail[0];
1585 i = 0;
1586 bytes = mlist->num_bytes;
1587 base_paddr = mlist->start_adr;
1588
1589 sp_banks[0].base_addr = base_paddr;
1590 sp_banks[0].num_bytes = bytes;
1591
1592 while (mlist->theres_more != NULL){
1593 i++;
1594 mlist = mlist->theres_more;
1595 bytes = mlist->num_bytes;
1596 if (i >= SPARC_PHYS_BANKS-1) {
1597 printk ("The machine has more banks than "
1598 "this kernel can support\n"
1599 "Increase the SPARC_PHYS_BANKS "
1600 "setting (currently %d)\n",
1601 SPARC_PHYS_BANKS);
1602 i = SPARC_PHYS_BANKS-1;
1603 break;
1604 }
1605
1606 sp_banks[i].base_addr = mlist->start_adr;
1607 sp_banks[i].num_bytes = mlist->num_bytes;
1608 }
1609
1610 i++;
1611 sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
1612 sp_banks[i].num_bytes = 0;
1613
1614 for (i = 0; sp_banks[i].num_bytes != 0; i++)
1615 sp_banks[i].num_bytes &= PAGE_MASK;
1616}
1617
1618static void __init taint_real_pages(void)
1619{
1620 struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
1621 int i;
1622
1623 for (i = 0; i < SPARC_PHYS_BANKS; i++) {
1624 saved_sp_banks[i].base_addr =
1625 sp_banks[i].base_addr;
1626 saved_sp_banks[i].num_bytes =
1627 sp_banks[i].num_bytes;
1628 }
1629
1630 rescan_sp_banks();
1631
1632 /* Find changes discovered in the sp_bank rescan and
1633 * reserve the lost portions in the bootmem maps.
1634 */
1635 for (i = 0; saved_sp_banks[i].num_bytes; i++) {
1636 unsigned long old_start, old_end;
1637
1638 old_start = saved_sp_banks[i].base_addr;
1639 old_end = old_start +
1640 saved_sp_banks[i].num_bytes;
1641 while (old_start < old_end) {
1642 int n;
1643
1644 for (n = 0; sp_banks[n].num_bytes; n++) {
1645 unsigned long new_start, new_end;
1646
1647 new_start = sp_banks[n].base_addr;
1648 new_end = new_start + sp_banks[n].num_bytes;
1649
1650 if (new_start <= old_start &&
1651 new_end >= (old_start + PAGE_SIZE)) {
1652 set_bit (old_start >> 22,
1653 sparc64_valid_addr_bitmap);
1654 goto do_next_page;
1655 }
1656 }
1657 reserve_bootmem(old_start, PAGE_SIZE);
1658
1659 do_next_page:
1660 old_start += PAGE_SIZE;
1661 }
1662 }
1663}
1664
1665void __init mem_init(void)
1666{
1667 unsigned long codepages, datapages, initpages;
1668 unsigned long addr, last;
1669 int i;
1670
1671 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1672 i += 1;
1673 sparc64_valid_addr_bitmap = (unsigned long *)
1674 __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
1675 if (sparc64_valid_addr_bitmap == NULL) {
1676 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1677 prom_halt();
1678 }
1679 memset(sparc64_valid_addr_bitmap, 0, i << 3);
1680
1681 addr = PAGE_OFFSET + kern_base;
1682 last = PAGE_ALIGN(kern_size) + addr;
1683 while (addr < last) {
1684 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1685 addr += PAGE_SIZE;
1686 }
1687
1688 taint_real_pages();
1689
1690 max_mapnr = last_valid_pfn - pfn_base;
1691 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1692
1693#ifdef CONFIG_DEBUG_BOOTMEM
1694 prom_printf("mem_init: Calling free_all_bootmem().\n");
1695#endif
1696 totalram_pages = num_physpages = free_all_bootmem() - 1;
1697
1698 /*
1699 * Set up the zero page, mark it reserved, so that page count
1700 * is not manipulated when freeing the page from user ptes.
1701 */
1702 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1703 if (mem_map_zero == NULL) {
1704 prom_printf("paging_init: Cannot alloc zero page.\n");
1705 prom_halt();
1706 }
1707 SetPageReserved(mem_map_zero);
1708
1709 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
1710 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1711 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
1712 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1713 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
1714 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1715
1716 printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1717 nr_free_pages() << (PAGE_SHIFT-10),
1718 codepages << (PAGE_SHIFT-10),
1719 datapages << (PAGE_SHIFT-10),
1720 initpages << (PAGE_SHIFT-10),
1721 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1722
1723 if (tlb_type == cheetah || tlb_type == cheetah_plus)
1724 cheetah_ecache_flush_init();
1725}
1726
1727void free_initmem (void)
1728{
1729 unsigned long addr, initend;
1730
1731 /*
1732 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1733 */
1734 addr = PAGE_ALIGN((unsigned long)(__init_begin));
1735 initend = (unsigned long)(__init_end) & PAGE_MASK;
1736 for (; addr < initend; addr += PAGE_SIZE) {
1737 unsigned long page;
1738 struct page *p;
1739
1740 page = (addr +
1741 ((unsigned long) __va(kern_base)) -
1742 ((unsigned long) KERNBASE));
1743 memset((void *)addr, 0xcc, PAGE_SIZE);
1744 p = virt_to_page(page);
1745
1746 ClearPageReserved(p);
1747 set_page_count(p, 1);
1748 __free_page(p);
1749 num_physpages++;
1750 totalram_pages++;
1751 }
1752}
1753
1754#ifdef CONFIG_BLK_DEV_INITRD
1755void free_initrd_mem(unsigned long start, unsigned long end)
1756{
1757 if (start < end)
1758 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1759 for (; start < end; start += PAGE_SIZE) {
1760 struct page *p = virt_to_page(start);
1761
1762 ClearPageReserved(p);
1763 set_page_count(p, 1);
1764 __free_page(p);
1765 num_physpages++;
1766 totalram_pages++;
1767 }
1768}
1769#endif
diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c
new file mode 100644
index 000000000000..90ca99d0b89c
--- /dev/null
+++ b/arch/sparc64/mm/tlb.c
@@ -0,0 +1,151 @@
1/* arch/sparc64/mm/tlb.c
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/init.h>
8#include <linux/percpu.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11
12#include <asm/pgtable.h>
13#include <asm/pgalloc.h>
14#include <asm/tlbflush.h>
15#include <asm/cacheflush.h>
16#include <asm/mmu_context.h>
17#include <asm/tlb.h>
18
19/* Heavily inspired by the ppc64 code. */
20
21DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) =
22 { NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, };
23
24void flush_tlb_pending(void)
25{
26 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
27
28 if (mp->tlb_nr) {
29 if (CTX_VALID(mp->mm->context)) {
30#ifdef CONFIG_SMP
31 smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
32 &mp->vaddrs[0]);
33#else
34 __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
35 mp->tlb_nr, &mp->vaddrs[0]);
36#endif
37 }
38 mp->tlb_nr = 0;
39 }
40}
41
42void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
43{
44 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
45 unsigned long nr;
46
47 vaddr &= PAGE_MASK;
48 if (pte_exec(orig))
49 vaddr |= 0x1UL;
50
51 if (pte_dirty(orig)) {
52 unsigned long paddr, pfn = pte_pfn(orig);
53 struct address_space *mapping;
54 struct page *page;
55
56 if (!pfn_valid(pfn))
57 goto no_cache_flush;
58
59 page = pfn_to_page(pfn);
60 if (PageReserved(page))
61 goto no_cache_flush;
62
63 /* A real file page? */
64 mapping = page_mapping(page);
65 if (!mapping)
66 goto no_cache_flush;
67
68 paddr = (unsigned long) page_address(page);
69 if ((paddr ^ vaddr) & (1 << 13))
70 flush_dcache_page_all(mm, page);
71 }
72
73no_cache_flush:
74
75 if (mp->tlb_frozen)
76 return;
77
78 nr = mp->tlb_nr;
79
80 if (unlikely(nr != 0 && mm != mp->mm)) {
81 flush_tlb_pending();
82 nr = 0;
83 }
84
85 if (nr == 0)
86 mp->mm = mm;
87
88 mp->vaddrs[nr] = vaddr;
89 mp->tlb_nr = ++nr;
90 if (nr >= TLB_BATCH_NR)
91 flush_tlb_pending();
92}
93
94void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
95{
96 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
97 unsigned long nr = mp->tlb_nr;
98 long s = start, e = end, vpte_base;
99
100 if (mp->tlb_frozen)
101 return;
102
103 /* If start is greater than end, that is a real problem. */
104 BUG_ON(start > end);
105
106 /* However, straddling the VA space hole is quite normal. */
107 s &= PMD_MASK;
108 e = (e + PMD_SIZE - 1) & PMD_MASK;
109
110 vpte_base = (tlb_type == spitfire ?
111 VPTE_BASE_SPITFIRE :
112 VPTE_BASE_CHEETAH);
113
114 if (unlikely(nr != 0 && mm != mp->mm)) {
115 flush_tlb_pending();
116 nr = 0;
117 }
118
119 if (nr == 0)
120 mp->mm = mm;
121
122 start = vpte_base + (s >> (PAGE_SHIFT - 3));
123 end = vpte_base + (e >> (PAGE_SHIFT - 3));
124
125 /* If the request straddles the VA space hole, we
126 * need to swap start and end. The reason this
127 * occurs is that "vpte_base" is the center of
128 * the linear page table mapping area. Thus,
129 * high addresses with the sign bit set map to
130 * addresses below vpte_base and non-sign bit
131 * addresses map to addresses above vpte_base.
132 */
133 if (end < start) {
134 unsigned long tmp = start;
135
136 start = end;
137 end = tmp;
138 }
139
140 while (start < end) {
141 mp->vaddrs[nr] = start;
142 mp->tlb_nr = ++nr;
143 if (nr >= TLB_BATCH_NR) {
144 flush_tlb_pending();
145 nr = 0;
146 }
147 start += PAGE_SIZE;
148 }
149 if (nr)
150 flush_tlb_pending();
151}
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
new file mode 100644
index 000000000000..7a0934321010
--- /dev/null
+++ b/arch/sparc64/mm/ultra.S
@@ -0,0 +1,583 @@
1/* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
2 * ultra.S: Don't expand these all over the place...
3 *
4 * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <asm/asi.h>
9#include <asm/pgtable.h>
10#include <asm/page.h>
11#include <asm/spitfire.h>
12#include <asm/mmu_context.h>
13#include <asm/pil.h>
14#include <asm/head.h>
15#include <asm/thread_info.h>
16#include <asm/cacheflush.h>
17
18 /* Basically, most of the Spitfire vs. Cheetah madness
19 * has to do with the fact that Cheetah does not support
20 * IMMU flushes out of the secondary context. Someone needs
21 * to throw a south lake birthday party for the folks
22 * in Microelectronics who refused to fix this shit.
23 */
24
25 /* This file is meant to be read efficiently by the CPU, not humans.
26 * Staraj sie tego nikomu nie pierdolnac...
27 */
28 .text
29 .align 32
30 .globl __flush_tlb_mm
31__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
32 ldxa [%o1] ASI_DMMU, %g2
33 cmp %g2, %o0
34 bne,pn %icc, __spitfire_flush_tlb_mm_slow
35 mov 0x50, %g3
36 stxa %g0, [%g3] ASI_DMMU_DEMAP
37 stxa %g0, [%g3] ASI_IMMU_DEMAP
38 retl
39 flush %g6
40 nop
41 nop
42 nop
43 nop
44 nop
45 nop
46 nop
47 nop
48
49 .align 32
50 .globl __flush_tlb_pending
51__flush_tlb_pending:
52 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
53 rdpr %pstate, %g7
54 sllx %o1, 3, %o1
55 andn %g7, PSTATE_IE, %g2
56 wrpr %g2, %pstate
57 mov SECONDARY_CONTEXT, %o4
58 ldxa [%o4] ASI_DMMU, %g2
59 stxa %o0, [%o4] ASI_DMMU
601: sub %o1, (1 << 3), %o1
61 ldx [%o2 + %o1], %o3
62 andcc %o3, 1, %g0
63 andn %o3, 1, %o3
64 be,pn %icc, 2f
65 or %o3, 0x10, %o3
66 stxa %g0, [%o3] ASI_IMMU_DEMAP
672: stxa %g0, [%o3] ASI_DMMU_DEMAP
68 membar #Sync
69 brnz,pt %o1, 1b
70 nop
71 stxa %g2, [%o4] ASI_DMMU
72 flush %g6
73 retl
74 wrpr %g7, 0x0, %pstate
75
76 .align 32
77 .globl __flush_tlb_kernel_range
78__flush_tlb_kernel_range: /* %o0=start, %o1=end */
79 cmp %o0, %o1
80 be,pn %xcc, 2f
81 sethi %hi(PAGE_SIZE), %o4
82 sub %o1, %o0, %o3
83 sub %o3, %o4, %o3
84 or %o0, 0x20, %o0 ! Nucleus
851: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
86 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
87 membar #Sync
88 brnz,pt %o3, 1b
89 sub %o3, %o4, %o3
902: retl
91 flush %g6
92
93__spitfire_flush_tlb_mm_slow:
94 rdpr %pstate, %g1
95 wrpr %g1, PSTATE_IE, %pstate
96 stxa %o0, [%o1] ASI_DMMU
97 stxa %g0, [%g3] ASI_DMMU_DEMAP
98 stxa %g0, [%g3] ASI_IMMU_DEMAP
99 flush %g6
100 stxa %g2, [%o1] ASI_DMMU
101 flush %g6
102 retl
103 wrpr %g1, 0, %pstate
104
105/*
106 * The following code flushes one page_size worth.
107 */
108#if (PAGE_SHIFT == 13)
109#define ITAG_MASK 0xfe
110#elif (PAGE_SHIFT == 16)
111#define ITAG_MASK 0x7fe
112#else
113#error unsupported PAGE_SIZE
114#endif
115 .align 32
116 .globl __flush_icache_page
117__flush_icache_page: /* %o0 = phys_page */
118 membar #StoreStore
119 srlx %o0, PAGE_SHIFT, %o0
120 sethi %uhi(PAGE_OFFSET), %g1
121 sllx %o0, PAGE_SHIFT, %o0
122 sethi %hi(PAGE_SIZE), %g2
123 sllx %g1, 32, %g1
124 add %o0, %g1, %o0
1251: subcc %g2, 32, %g2
126 bne,pt %icc, 1b
127 flush %o0 + %g2
128 retl
129 nop
130
131#ifdef DCACHE_ALIASING_POSSIBLE
132
133#if (PAGE_SHIFT != 13)
134#error only page shift of 13 is supported by dcache flush
135#endif
136
137#define DTAG_MASK 0x3
138
139 .align 64
140 .globl __flush_dcache_page
141__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
142 sethi %uhi(PAGE_OFFSET), %g1
143 sllx %g1, 32, %g1
144 sub %o0, %g1, %o0
145 clr %o4
146 srlx %o0, 11, %o0
147 sethi %hi(1 << 14), %o2
1481: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group
149 add %o4, (1 << 5), %o4 ! IEU0
150 ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group
151 add %o4, (1 << 5), %o4 ! IEU0
152 ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available
153 add %o4, (1 << 5), %o4 ! IEU0
154 andn %o3, DTAG_MASK, %o3 ! IEU1
155 ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group
156 add %o4, (1 << 5), %o4 ! IEU0
157 andn %g1, DTAG_MASK, %g1 ! IEU1
158 cmp %o0, %o3 ! IEU1 Group
159 be,a,pn %xcc, dflush1 ! CTI
160 sub %o4, (4 << 5), %o4 ! IEU0 (Group)
161 cmp %o0, %g1 ! IEU1 Group
162 andn %g2, DTAG_MASK, %g2 ! IEU0
163 be,a,pn %xcc, dflush2 ! CTI
164 sub %o4, (3 << 5), %o4 ! IEU0 (Group)
165 cmp %o0, %g2 ! IEU1 Group
166 andn %g3, DTAG_MASK, %g3 ! IEU0
167 be,a,pn %xcc, dflush3 ! CTI
168 sub %o4, (2 << 5), %o4 ! IEU0 (Group)
169 cmp %o0, %g3 ! IEU1 Group
170 be,a,pn %xcc, dflush4 ! CTI
171 sub %o4, (1 << 5), %o4 ! IEU0
1722: cmp %o4, %o2 ! IEU1 Group
173 bne,pt %xcc, 1b ! CTI
174 nop ! IEU0
175
176 /* The I-cache does not snoop local stores so we
177 * better flush that too when necessary.
178 */
179 brnz,pt %o1, __flush_icache_page
180 sllx %o0, 11, %o0
181 retl
182 nop
183
184dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG
185 add %o4, (1 << 5), %o4
186dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG
187 add %o4, (1 << 5), %o4
188dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG
189 add %o4, (1 << 5), %o4
190dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
191 add %o4, (1 << 5), %o4
192 membar #Sync
193 ba,pt %xcc, 2b
194 nop
195#endif /* DCACHE_ALIASING_POSSIBLE */
196
197 .align 32
198__prefill_dtlb:
199 rdpr %pstate, %g7
200 wrpr %g7, PSTATE_IE, %pstate
201 mov TLB_TAG_ACCESS, %g1
202 stxa %o5, [%g1] ASI_DMMU
203 stxa %o2, [%g0] ASI_DTLB_DATA_IN
204 flush %g6
205 retl
206 wrpr %g7, %pstate
207__prefill_itlb:
208 rdpr %pstate, %g7
209 wrpr %g7, PSTATE_IE, %pstate
210 mov TLB_TAG_ACCESS, %g1
211 stxa %o5, [%g1] ASI_IMMU
212 stxa %o2, [%g0] ASI_ITLB_DATA_IN
213 flush %g6
214 retl
215 wrpr %g7, %pstate
216
217 .globl __update_mmu_cache
218__update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
219 srlx %o1, PAGE_SHIFT, %o1
220 andcc %o3, FAULT_CODE_DTLB, %g0
221 sllx %o1, PAGE_SHIFT, %o5
222 bne,pt %xcc, __prefill_dtlb
223 or %o5, %o0, %o5
224 ba,a,pt %xcc, __prefill_itlb
225
226 /* Cheetah specific versions, patched at boot time.
227 *
228 * This writes of the PRIMARY_CONTEXT register in this file are
229 * safe even on Cheetah+ and later wrt. the page size fields.
230 * The nucleus page size fields do not matter because we make
231 * no data references, and these instructions execute out of a
232 * locked I-TLB entry sitting in the fully assosciative I-TLB.
233 * This sequence should also never trap.
234 */
235__cheetah_flush_tlb_mm: /* 15 insns */
236 rdpr %pstate, %g7
237 andn %g7, PSTATE_IE, %g2
238 wrpr %g2, 0x0, %pstate
239 wrpr %g0, 1, %tl
240 mov PRIMARY_CONTEXT, %o2
241 mov 0x40, %g3
242 ldxa [%o2] ASI_DMMU, %g2
243 stxa %o0, [%o2] ASI_DMMU
244 stxa %g0, [%g3] ASI_DMMU_DEMAP
245 stxa %g0, [%g3] ASI_IMMU_DEMAP
246 stxa %g2, [%o2] ASI_DMMU
247 flush %g6
248 wrpr %g0, 0, %tl
249 retl
250 wrpr %g7, 0x0, %pstate
251
252__cheetah_flush_tlb_pending: /* 22 insns */
253 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
254 rdpr %pstate, %g7
255 sllx %o1, 3, %o1
256 andn %g7, PSTATE_IE, %g2
257 wrpr %g2, 0x0, %pstate
258 wrpr %g0, 1, %tl
259 mov PRIMARY_CONTEXT, %o4
260 ldxa [%o4] ASI_DMMU, %g2
261 stxa %o0, [%o4] ASI_DMMU
2621: sub %o1, (1 << 3), %o1
263 ldx [%o2 + %o1], %o3
264 andcc %o3, 1, %g0
265 be,pn %icc, 2f
266 andn %o3, 1, %o3
267 stxa %g0, [%o3] ASI_IMMU_DEMAP
2682: stxa %g0, [%o3] ASI_DMMU_DEMAP
269 brnz,pt %o1, 1b
270 membar #Sync
271 stxa %g2, [%o4] ASI_DMMU
272 flush %g6
273 wrpr %g0, 0, %tl
274 retl
275 wrpr %g7, 0x0, %pstate
276
277#ifdef DCACHE_ALIASING_POSSIBLE
278flush_dcpage_cheetah: /* 11 insns */
279 sethi %uhi(PAGE_OFFSET), %g1
280 sllx %g1, 32, %g1
281 sub %o0, %g1, %o0
282 sethi %hi(PAGE_SIZE), %o4
2831: subcc %o4, (1 << 5), %o4
284 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
285 membar #Sync
286 bne,pt %icc, 1b
287 nop
288 retl /* I-cache flush never needed on Cheetah, see callers. */
289 nop
290#endif /* DCACHE_ALIASING_POSSIBLE */
291
292cheetah_patch_one:
2931: lduw [%o1], %g1
294 stw %g1, [%o0]
295 flush %o0
296 subcc %o2, 1, %o2
297 add %o1, 4, %o1
298 bne,pt %icc, 1b
299 add %o0, 4, %o0
300 retl
301 nop
302
303 .globl cheetah_patch_cachetlbops
304cheetah_patch_cachetlbops:
305 save %sp, -128, %sp
306
307 sethi %hi(__flush_tlb_mm), %o0
308 or %o0, %lo(__flush_tlb_mm), %o0
309 sethi %hi(__cheetah_flush_tlb_mm), %o1
310 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
311 call cheetah_patch_one
312 mov 15, %o2
313
314 sethi %hi(__flush_tlb_pending), %o0
315 or %o0, %lo(__flush_tlb_pending), %o0
316 sethi %hi(__cheetah_flush_tlb_pending), %o1
317 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
318 call cheetah_patch_one
319 mov 22, %o2
320
321#ifdef DCACHE_ALIASING_POSSIBLE
322 sethi %hi(__flush_dcache_page), %o0
323 or %o0, %lo(__flush_dcache_page), %o0
324 sethi %hi(flush_dcpage_cheetah), %o1
325 or %o1, %lo(flush_dcpage_cheetah), %o1
326 call cheetah_patch_one
327 mov 11, %o2
328#endif /* DCACHE_ALIASING_POSSIBLE */
329
330 ret
331 restore
332
333#ifdef CONFIG_SMP
334 /* These are all called by the slaves of a cross call, at
335 * trap level 1, with interrupts fully disabled.
336 *
337 * Register usage:
338 * %g5 mm->context (all tlb flushes)
339 * %g1 address arg 1 (tlb page and range flushes)
340 * %g7 address arg 2 (tlb range flush only)
341 *
342 * %g6 ivector table, don't touch
343 * %g2 scratch 1
344 * %g3 scratch 2
345 * %g4 scratch 3
346 *
347 * TODO: Make xcall TLB range flushes use the tricks above... -DaveM
348 */
349 .align 32
350 .globl xcall_flush_tlb_mm
351xcall_flush_tlb_mm:
352 mov PRIMARY_CONTEXT, %g2
353 mov 0x40, %g4
354 ldxa [%g2] ASI_DMMU, %g3
355 stxa %g5, [%g2] ASI_DMMU
356 stxa %g0, [%g4] ASI_DMMU_DEMAP
357 stxa %g0, [%g4] ASI_IMMU_DEMAP
358 stxa %g3, [%g2] ASI_DMMU
359 retry
360
361 .globl xcall_flush_tlb_pending
362xcall_flush_tlb_pending:
363 /* %g5=context, %g1=nr, %g7=vaddrs[] */
364 sllx %g1, 3, %g1
365 mov PRIMARY_CONTEXT, %g4
366 ldxa [%g4] ASI_DMMU, %g2
367 stxa %g5, [%g4] ASI_DMMU
3681: sub %g1, (1 << 3), %g1
369 ldx [%g7 + %g1], %g5
370 andcc %g5, 0x1, %g0
371 be,pn %icc, 2f
372
373 andn %g5, 0x1, %g5
374 stxa %g0, [%g5] ASI_IMMU_DEMAP
3752: stxa %g0, [%g5] ASI_DMMU_DEMAP
376 membar #Sync
377 brnz,pt %g1, 1b
378 nop
379 stxa %g2, [%g4] ASI_DMMU
380 retry
381
382 .globl xcall_flush_tlb_kernel_range
383xcall_flush_tlb_kernel_range:
384 sethi %hi(PAGE_SIZE - 1), %g2
385 or %g2, %lo(PAGE_SIZE - 1), %g2
386 andn %g1, %g2, %g1
387 andn %g7, %g2, %g7
388 sub %g7, %g1, %g3
389 add %g2, 1, %g2
390 sub %g3, %g2, %g3
391 or %g1, 0x20, %g1 ! Nucleus
3921: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
393 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
394 membar #Sync
395 brnz,pt %g3, 1b
396 sub %g3, %g2, %g3
397 retry
398 nop
399 nop
400
401 /* This runs in a very controlled environment, so we do
402 * not need to worry about BH races etc.
403 */
404 .globl xcall_sync_tick
405xcall_sync_tick:
406 rdpr %pstate, %g2
407 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
408 rdpr %pil, %g2
409 wrpr %g0, 15, %pil
410 sethi %hi(109f), %g7
411 b,pt %xcc, etrap_irq
412109: or %g7, %lo(109b), %g7
413 call smp_synchronize_tick_client
414 nop
415 clr %l6
416 b rtrap_xcall
417 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
418
419 /* NOTE: This is SPECIAL!! We do etrap/rtrap however
420 * we choose to deal with the "BH's run with
421 * %pil==15" problem (described in asm/pil.h)
422 * by just invoking rtrap directly past where
423 * BH's are checked for.
424 *
425 * We do it like this because we do not want %pil==15
426 * lockups to prevent regs being reported.
427 */
428 .globl xcall_report_regs
429xcall_report_regs:
430 rdpr %pstate, %g2
431 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
432 rdpr %pil, %g2
433 wrpr %g0, 15, %pil
434 sethi %hi(109f), %g7
435 b,pt %xcc, etrap_irq
436109: or %g7, %lo(109b), %g7
437 call __show_regs
438 add %sp, PTREGS_OFF, %o0
439 clr %l6
440 /* Has to be a non-v9 branch due to the large distance. */
441 b rtrap_xcall
442 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
443
444#ifdef DCACHE_ALIASING_POSSIBLE
445 .align 32
446 .globl xcall_flush_dcache_page_cheetah
447xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
448 sethi %hi(PAGE_SIZE), %g3
4491: subcc %g3, (1 << 5), %g3
450 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
451 membar #Sync
452 bne,pt %icc, 1b
453 nop
454 retry
455 nop
456#endif /* DCACHE_ALIASING_POSSIBLE */
457
458 .globl xcall_flush_dcache_page_spitfire
459xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
460 %g7 == kernel page virtual address
461 %g5 == (page->mapping != NULL) */
462#ifdef DCACHE_ALIASING_POSSIBLE
463 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
464 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
465 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
4661: ldxa [%g3] ASI_DCACHE_TAG, %g2
467 andcc %g2, 0x3, %g0
468 be,pn %xcc, 2f
469 andn %g2, 0x3, %g2
470 cmp %g2, %g1
471
472 bne,pt %xcc, 2f
473 nop
474 stxa %g0, [%g3] ASI_DCACHE_TAG
475 membar #Sync
4762: cmp %g3, 0
477 bne,pt %xcc, 1b
478 sub %g3, (1 << 5), %g3
479
480 brz,pn %g5, 2f
481#endif /* DCACHE_ALIASING_POSSIBLE */
482 sethi %hi(PAGE_SIZE), %g3
483
4841: flush %g7
485 subcc %g3, (1 << 5), %g3
486 bne,pt %icc, 1b
487 add %g7, (1 << 5), %g7
488
4892: retry
490 nop
491 nop
492
493 .globl xcall_promstop
494xcall_promstop:
495 rdpr %pstate, %g2
496 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
497 rdpr %pil, %g2
498 wrpr %g0, 15, %pil
499 sethi %hi(109f), %g7
500 b,pt %xcc, etrap_irq
501109: or %g7, %lo(109b), %g7
502 flushw
503 call prom_stopself
504 nop
505 /* We should not return, just spin if we do... */
5061: b,a,pt %xcc, 1b
507 nop
508
509 .data
510
511errata32_hwbug:
512 .xword 0
513
514 .text
515
516 /* These two are not performance critical... */
517 .globl xcall_flush_tlb_all_spitfire
518xcall_flush_tlb_all_spitfire:
519 /* Spitfire Errata #32 workaround. */
520 sethi %hi(errata32_hwbug), %g4
521 stx %g0, [%g4 + %lo(errata32_hwbug)]
522
523 clr %g2
524 clr %g3
5251: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4
526 and %g4, _PAGE_L, %g5
527 brnz,pn %g5, 2f
528 mov TLB_TAG_ACCESS, %g7
529
530 stxa %g0, [%g7] ASI_DMMU
531 membar #Sync
532 stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS
533 membar #Sync
534
535 /* Spitfire Errata #32 workaround. */
536 sethi %hi(errata32_hwbug), %g4
537 stx %g0, [%g4 + %lo(errata32_hwbug)]
538
5392: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4
540 and %g4, _PAGE_L, %g5
541 brnz,pn %g5, 2f
542 mov TLB_TAG_ACCESS, %g7
543
544 stxa %g0, [%g7] ASI_IMMU
545 membar #Sync
546 stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS
547 membar #Sync
548
549 /* Spitfire Errata #32 workaround. */
550 sethi %hi(errata32_hwbug), %g4
551 stx %g0, [%g4 + %lo(errata32_hwbug)]
552
5532: add %g2, 1, %g2
554 cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT
555 ble,pt %icc, 1b
556 sll %g2, 3, %g3
557 flush %g6
558 retry
559
560 .globl xcall_flush_tlb_all_cheetah
561xcall_flush_tlb_all_cheetah:
562 mov 0x80, %g2
563 stxa %g0, [%g2] ASI_DMMU_DEMAP
564 stxa %g0, [%g2] ASI_IMMU_DEMAP
565 retry
566
567 /* These just get rescheduled to PIL vectors. */
568 .globl xcall_call_function
569xcall_call_function:
570 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
571 retry
572
573 .globl xcall_receive_signal
574xcall_receive_signal:
575 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
576 retry
577
578 .globl xcall_capture
579xcall_capture:
580 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
581 retry
582
583#endif /* CONFIG_SMP */
diff --git a/arch/sparc64/oprofile/Kconfig b/arch/sparc64/oprofile/Kconfig
new file mode 100644
index 000000000000..5ade19801b97
--- /dev/null
+++ b/arch/sparc64/oprofile/Kconfig
@@ -0,0 +1,23 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22endmenu
23
diff --git a/arch/sparc64/oprofile/Makefile b/arch/sparc64/oprofile/Makefile
new file mode 100644
index 000000000000..e9feca1ca28b
--- /dev/null
+++ b/arch/sparc64/oprofile/Makefile
@@ -0,0 +1,9 @@
1obj-$(CONFIG_OPROFILE) += oprofile.o
2
3DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
4 oprof.o cpu_buffer.o buffer_sync.o \
5 event_buffer.o oprofile_files.o \
6 oprofilefs.o oprofile_stats.o \
7 timer_int.o )
8
9oprofile-y := $(DRIVER_OBJS) init.o
diff --git a/arch/sparc64/oprofile/init.c b/arch/sparc64/oprofile/init.c
new file mode 100644
index 000000000000..9ab815b95b5a
--- /dev/null
+++ b/arch/sparc64/oprofile/init.c
@@ -0,0 +1,23 @@
1/**
2 * @file init.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
8 */
9
10#include <linux/kernel.h>
11#include <linux/oprofile.h>
12#include <linux/errno.h>
13#include <linux/init.h>
14
15int __init oprofile_arch_init(struct oprofile_operations * ops)
16{
17 return -ENODEV;
18}
19
20
21void oprofile_arch_exit(void)
22{
23}
diff --git a/arch/sparc64/prom/Makefile b/arch/sparc64/prom/Makefile
new file mode 100644
index 000000000000..8f2420d9e9e6
--- /dev/null
+++ b/arch/sparc64/prom/Makefile
@@ -0,0 +1,10 @@
1# $Id: Makefile,v 1.7 2000/12/14 22:57:25 davem Exp $
2# Makefile for the Sun Boot PROM interface library under
3# Linux.
4#
5
6EXTRA_AFLAGS := -ansi
7EXTRA_CFLAGS := -Werror
8
9lib-y := bootstr.o devops.o init.o memory.o misc.o \
10 tree.o console.o printf.o p1275.o map.o cif.o
diff --git a/arch/sparc64/prom/bootstr.c b/arch/sparc64/prom/bootstr.c
new file mode 100644
index 000000000000..a7278614e99d
--- /dev/null
+++ b/arch/sparc64/prom/bootstr.c
@@ -0,0 +1,40 @@
1/* $Id: bootstr.c,v 1.6 1999/08/31 06:55:01 davem Exp $
2 * bootstr.c: Boot string/argument acquisition from the PROM.
3 *
4 * Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright(C) 1996,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/string.h>
9#include <linux/init.h>
10#include <asm/oplib.h>
11
12/* WARNING: The boot loader knows that these next three variables come one right
13 * after another in the .data section. Do not move this stuff into
14 * the .bss section or it will break things.
15 */
16
17#define BARG_LEN 256
18struct {
19 int bootstr_len;
20 int bootstr_valid;
21 char bootstr_buf[BARG_LEN];
22} bootstr_info = {
23 .bootstr_len = BARG_LEN,
24#ifdef CONFIG_CMDLINE
25 .bootstr_valid = 1,
26 .bootstr_buf = CONFIG_CMDLINE,
27#endif
28};
29
30char * __init
31prom_getbootargs(void)
32{
33 /* This check saves us from a panic when bootfd patches args. */
34 if (bootstr_info.bootstr_valid)
35 return bootstr_info.bootstr_buf;
36 prom_getstring(prom_chosen_node, "bootargs",
37 bootstr_info.bootstr_buf, BARG_LEN);
38 bootstr_info.bootstr_valid = 1;
39 return bootstr_info.bootstr_buf;
40}
diff --git a/arch/sparc64/prom/cif.S b/arch/sparc64/prom/cif.S
new file mode 100644
index 000000000000..29d0ae74aed8
--- /dev/null
+++ b/arch/sparc64/prom/cif.S
@@ -0,0 +1,225 @@
1/* cif.S: PROM entry/exit assembler trampolines.
2 *
3 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
4 * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
5 */
6
7#include <asm/pstate.h>
8
9 .text
10 .globl prom_cif_interface
11prom_cif_interface:
12 sethi %hi(p1275buf), %o0
13 or %o0, %lo(p1275buf), %o0
14 ldx [%o0 + 0x010], %o1 ! prom_cif_stack
15 save %o1, -0x190, %sp
16 ldx [%i0 + 0x008], %l2 ! prom_cif_handler
17 rdpr %pstate, %l4
18 wrpr %g0, 0x15, %pstate ! save alternate globals
19 stx %g1, [%sp + 2047 + 0x0b0]
20 stx %g2, [%sp + 2047 + 0x0b8]
21 stx %g3, [%sp + 2047 + 0x0c0]
22 stx %g4, [%sp + 2047 + 0x0c8]
23 stx %g5, [%sp + 2047 + 0x0d0]
24 stx %g6, [%sp + 2047 + 0x0d8]
25 stx %g7, [%sp + 2047 + 0x0e0]
26 wrpr %g0, 0x814, %pstate ! save interrupt globals
27 stx %g1, [%sp + 2047 + 0x0e8]
28 stx %g2, [%sp + 2047 + 0x0f0]
29 stx %g3, [%sp + 2047 + 0x0f8]
30 stx %g4, [%sp + 2047 + 0x100]
31 stx %g5, [%sp + 2047 + 0x108]
32 stx %g6, [%sp + 2047 + 0x110]
33 stx %g7, [%sp + 2047 + 0x118]
34 wrpr %g0, 0x14, %pstate ! save normal globals
35 stx %g1, [%sp + 2047 + 0x120]
36 stx %g2, [%sp + 2047 + 0x128]
37 stx %g3, [%sp + 2047 + 0x130]
38 stx %g4, [%sp + 2047 + 0x138]
39 stx %g5, [%sp + 2047 + 0x140]
40 stx %g6, [%sp + 2047 + 0x148]
41 stx %g7, [%sp + 2047 + 0x150]
42 wrpr %g0, 0x414, %pstate ! save mmu globals
43 stx %g1, [%sp + 2047 + 0x158]
44 stx %g2, [%sp + 2047 + 0x160]
45 stx %g3, [%sp + 2047 + 0x168]
46 stx %g4, [%sp + 2047 + 0x170]
47 stx %g5, [%sp + 2047 + 0x178]
48 stx %g6, [%sp + 2047 + 0x180]
49 stx %g7, [%sp + 2047 + 0x188]
50 mov %g1, %l0 ! also save to locals, so we can handle
51 mov %g2, %l1 ! tlb faults later on, when accessing
52 mov %g3, %l3 ! the stack.
53 mov %g7, %l5
54 wrpr %l4, PSTATE_IE, %pstate ! turn off interrupts
55 call %l2
56 add %i0, 0x018, %o0 ! prom_args
57 wrpr %g0, 0x414, %pstate ! restore mmu globals
58 mov %l0, %g1
59 mov %l1, %g2
60 mov %l3, %g3
61 mov %l5, %g7
62 wrpr %g0, 0x14, %pstate ! restore normal globals
63 ldx [%sp + 2047 + 0x120], %g1
64 ldx [%sp + 2047 + 0x128], %g2
65 ldx [%sp + 2047 + 0x130], %g3
66 ldx [%sp + 2047 + 0x138], %g4
67 ldx [%sp + 2047 + 0x140], %g5
68 ldx [%sp + 2047 + 0x148], %g6
69 ldx [%sp + 2047 + 0x150], %g7
70 wrpr %g0, 0x814, %pstate ! restore interrupt globals
71 ldx [%sp + 2047 + 0x0e8], %g1
72 ldx [%sp + 2047 + 0x0f0], %g2
73 ldx [%sp + 2047 + 0x0f8], %g3
74 ldx [%sp + 2047 + 0x100], %g4
75 ldx [%sp + 2047 + 0x108], %g5
76 ldx [%sp + 2047 + 0x110], %g6
77 ldx [%sp + 2047 + 0x118], %g7
78 wrpr %g0, 0x15, %pstate ! restore alternate globals
79 ldx [%sp + 2047 + 0x0b0], %g1
80 ldx [%sp + 2047 + 0x0b8], %g2
81 ldx [%sp + 2047 + 0x0c0], %g3
82 ldx [%sp + 2047 + 0x0c8], %g4
83 ldx [%sp + 2047 + 0x0d0], %g5
84 ldx [%sp + 2047 + 0x0d8], %g6
85 ldx [%sp + 2047 + 0x0e0], %g7
86 wrpr %l4, 0, %pstate ! restore original pstate
87 ret
88 restore
89
90 .globl prom_cif_callback
91prom_cif_callback:
92 sethi %hi(p1275buf), %o1
93 or %o1, %lo(p1275buf), %o1
94 save %sp, -0x270, %sp
95 rdpr %pstate, %l4
96 wrpr %g0, 0x15, %pstate ! save PROM alternate globals
97 stx %g1, [%sp + 2047 + 0x0b0]
98 stx %g2, [%sp + 2047 + 0x0b8]
99 stx %g3, [%sp + 2047 + 0x0c0]
100 stx %g4, [%sp + 2047 + 0x0c8]
101 stx %g5, [%sp + 2047 + 0x0d0]
102 stx %g6, [%sp + 2047 + 0x0d8]
103 stx %g7, [%sp + 2047 + 0x0e0]
104 ! restore Linux alternate globals
105 ldx [%sp + 2047 + 0x190], %g1
106 ldx [%sp + 2047 + 0x198], %g2
107 ldx [%sp + 2047 + 0x1a0], %g3
108 ldx [%sp + 2047 + 0x1a8], %g4
109 ldx [%sp + 2047 + 0x1b0], %g5
110 ldx [%sp + 2047 + 0x1b8], %g6
111 ldx [%sp + 2047 + 0x1c0], %g7
112 wrpr %g0, 0x814, %pstate ! save PROM interrupt globals
113 stx %g1, [%sp + 2047 + 0x0e8]
114 stx %g2, [%sp + 2047 + 0x0f0]
115 stx %g3, [%sp + 2047 + 0x0f8]
116 stx %g4, [%sp + 2047 + 0x100]
117 stx %g5, [%sp + 2047 + 0x108]
118 stx %g6, [%sp + 2047 + 0x110]
119 stx %g7, [%sp + 2047 + 0x118]
120 ! restore Linux interrupt globals
121 ldx [%sp + 2047 + 0x1c8], %g1
122 ldx [%sp + 2047 + 0x1d0], %g2
123 ldx [%sp + 2047 + 0x1d8], %g3
124 ldx [%sp + 2047 + 0x1e0], %g4
125 ldx [%sp + 2047 + 0x1e8], %g5
126 ldx [%sp + 2047 + 0x1f0], %g6
127 ldx [%sp + 2047 + 0x1f8], %g7
128 wrpr %g0, 0x14, %pstate ! save PROM normal globals
129 stx %g1, [%sp + 2047 + 0x120]
130 stx %g2, [%sp + 2047 + 0x128]
131 stx %g3, [%sp + 2047 + 0x130]
132 stx %g4, [%sp + 2047 + 0x138]
133 stx %g5, [%sp + 2047 + 0x140]
134 stx %g6, [%sp + 2047 + 0x148]
135 stx %g7, [%sp + 2047 + 0x150]
136 ! restore Linux normal globals
137 ldx [%sp + 2047 + 0x200], %g1
138 ldx [%sp + 2047 + 0x208], %g2
139 ldx [%sp + 2047 + 0x210], %g3
140 ldx [%sp + 2047 + 0x218], %g4
141 ldx [%sp + 2047 + 0x220], %g5
142 ldx [%sp + 2047 + 0x228], %g6
143 ldx [%sp + 2047 + 0x230], %g7
144 wrpr %g0, 0x414, %pstate ! save PROM mmu globals
145 stx %g1, [%sp + 2047 + 0x158]
146 stx %g2, [%sp + 2047 + 0x160]
147 stx %g3, [%sp + 2047 + 0x168]
148 stx %g4, [%sp + 2047 + 0x170]
149 stx %g5, [%sp + 2047 + 0x178]
150 stx %g6, [%sp + 2047 + 0x180]
151 stx %g7, [%sp + 2047 + 0x188]
152 ! restore Linux mmu globals
153 ldx [%sp + 2047 + 0x238], %o0
154 ldx [%sp + 2047 + 0x240], %o1
155 ldx [%sp + 2047 + 0x248], %l2
156 ldx [%sp + 2047 + 0x250], %l3
157 ldx [%sp + 2047 + 0x258], %l5
158 ldx [%sp + 2047 + 0x260], %l6
159 ldx [%sp + 2047 + 0x268], %l7
160 ! switch to Linux tba
161 sethi %hi(sparc64_ttable_tl0), %l1
162 rdpr %tba, %l0 ! save PROM tba
163 mov %o0, %g1
164 mov %o1, %g2
165 mov %l2, %g3
166 mov %l3, %g4
167 mov %l5, %g5
168 mov %l6, %g6
169 mov %l7, %g7
170 wrpr %l1, %tba ! install Linux tba
171 wrpr %l4, 0, %pstate ! restore PSTATE
172 call prom_world
173 mov %g0, %o0
174 ldx [%i1 + 0x000], %l2
175 call %l2
176 mov %i0, %o0
177 mov %o0, %l1
178 call prom_world
179 or %g0, 1, %o0
180 wrpr %g0, 0x14, %pstate ! interrupts off
181 ! restore PROM mmu globals
182 ldx [%sp + 2047 + 0x158], %o0
183 ldx [%sp + 2047 + 0x160], %o1
184 ldx [%sp + 2047 + 0x168], %l2
185 ldx [%sp + 2047 + 0x170], %l3
186 ldx [%sp + 2047 + 0x178], %l5
187 ldx [%sp + 2047 + 0x180], %l6
188 ldx [%sp + 2047 + 0x188], %l7
189 wrpr %g0, 0x414, %pstate ! restore PROM mmu globals
190 mov %o0, %g1
191 mov %o1, %g2
192 mov %l2, %g3
193 mov %l3, %g4
194 mov %l5, %g5
195 mov %l6, %g6
196 mov %l7, %g7
197 wrpr %l0, %tba ! restore PROM tba
198 wrpr %g0, 0x14, %pstate ! restore PROM normal globals
199 ldx [%sp + 2047 + 0x120], %g1
200 ldx [%sp + 2047 + 0x128], %g2
201 ldx [%sp + 2047 + 0x130], %g3
202 ldx [%sp + 2047 + 0x138], %g4
203 ldx [%sp + 2047 + 0x140], %g5
204 ldx [%sp + 2047 + 0x148], %g6
205 ldx [%sp + 2047 + 0x150], %g7
206 wrpr %g0, 0x814, %pstate ! restore PROM interrupt globals
207 ldx [%sp + 2047 + 0x0e8], %g1
208 ldx [%sp + 2047 + 0x0f0], %g2
209 ldx [%sp + 2047 + 0x0f8], %g3
210 ldx [%sp + 2047 + 0x100], %g4
211 ldx [%sp + 2047 + 0x108], %g5
212 ldx [%sp + 2047 + 0x110], %g6
213 ldx [%sp + 2047 + 0x118], %g7
214 wrpr %g0, 0x15, %pstate ! restore PROM alternate globals
215 ldx [%sp + 2047 + 0x0b0], %g1
216 ldx [%sp + 2047 + 0x0b8], %g2
217 ldx [%sp + 2047 + 0x0c0], %g3
218 ldx [%sp + 2047 + 0x0c8], %g4
219 ldx [%sp + 2047 + 0x0d0], %g5
220 ldx [%sp + 2047 + 0x0d8], %g6
221 ldx [%sp + 2047 + 0x0e0], %g7
222 wrpr %l4, 0, %pstate
223 ret
224 restore %l1, 0, %o0
225
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c
new file mode 100644
index 000000000000..028a53fcb1ec
--- /dev/null
+++ b/arch/sparc64/prom/console.c
@@ -0,0 +1,146 @@
1/* $Id: console.c,v 1.9 1997/10/29 07:41:43 ecd Exp $
2 * console.c: Routines that deal with sending and receiving IO
3 * to/from the current console device using the PROM.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <asm/openprom.h>
13#include <asm/oplib.h>
14#include <asm/system.h>
15#include <linux/string.h>
16
17extern int prom_stdin, prom_stdout;
18
19/* Non blocking get character from console input device, returns -1
20 * if no input was taken. This can be used for polling.
21 */
22__inline__ int
23prom_nbgetchar(void)
24{
25 char inc;
26
27 if (p1275_cmd("read", P1275_ARG(1,P1275_ARG_OUT_BUF)|
28 P1275_INOUT(3,1),
29 prom_stdin, &inc, P1275_SIZE(1)) == 1)
30 return inc;
31 else
32 return -1;
33}
34
35/* Non blocking put character to console device, returns -1 if
36 * unsuccessful.
37 */
38__inline__ int
39prom_nbputchar(char c)
40{
41 char outc;
42
43 outc = c;
44 if (p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
45 P1275_INOUT(3,1),
46 prom_stdout, &outc, P1275_SIZE(1)) == 1)
47 return 0;
48 else
49 return -1;
50}
51
52/* Blocking version of get character routine above. */
53char
54prom_getchar(void)
55{
56 int character;
57 while((character = prom_nbgetchar()) == -1) ;
58 return (char) character;
59}
60
61/* Blocking version of put character routine above. */
62void
63prom_putchar(char c)
64{
65 prom_nbputchar(c);
66 return;
67}
68
69void
70prom_puts(char *s, int len)
71{
72 p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
73 P1275_INOUT(3,1),
74 prom_stdout, s, P1275_SIZE(len));
75}
76
77/* Query for input device type */
78enum prom_input_device
79prom_query_input_device(void)
80{
81 int st_p;
82 char propb[64];
83
84 st_p = prom_inst2pkg(prom_stdin);
85 if(prom_node_has_property(st_p, "keyboard"))
86 return PROMDEV_IKBD;
87 prom_getproperty(st_p, "device_type", propb, sizeof(propb));
88 if(strncmp(propb, "serial", 6))
89 return PROMDEV_I_UNK;
90 /* FIXME: Is there any better way how to find out? */
91 memset(propb, 0, sizeof(propb));
92 st_p = prom_finddevice ("/options");
93 prom_getproperty(st_p, "input-device", propb, sizeof(propb));
94
95 /*
96 * If we get here with propb == 'keyboard', we are on ttya, as
97 * the PROM defaulted to this due to 'no input device'.
98 */
99 if (!strncmp(propb, "keyboard", 8))
100 return PROMDEV_ITTYA;
101
102 if (strncmp (propb, "tty", 3) || !propb[3])
103 return PROMDEV_I_UNK;
104 switch (propb[3]) {
105 case 'a': return PROMDEV_ITTYA;
106 case 'b': return PROMDEV_ITTYB;
107 default: return PROMDEV_I_UNK;
108 }
109}
110
111/* Query for output device type */
112
113enum prom_output_device
114prom_query_output_device(void)
115{
116 int st_p;
117 char propb[64];
118 int propl;
119
120 st_p = prom_inst2pkg(prom_stdout);
121 propl = prom_getproperty(st_p, "device_type", propb, sizeof(propb));
122 if (propl >= 0 && propl == sizeof("display") &&
123 strncmp("display", propb, sizeof("display")) == 0)
124 return PROMDEV_OSCREEN;
125 if(strncmp("serial", propb, 6))
126 return PROMDEV_O_UNK;
127 /* FIXME: Is there any better way how to find out? */
128 memset(propb, 0, sizeof(propb));
129 st_p = prom_finddevice ("/options");
130 prom_getproperty(st_p, "output-device", propb, sizeof(propb));
131
132 /*
133 * If we get here with propb == 'screen', we are on ttya, as
134 * the PROM defaulted to this due to 'no input device'.
135 */
136 if (!strncmp(propb, "screen", 6))
137 return PROMDEV_OTTYA;
138
139 if (strncmp (propb, "tty", 3) || !propb[3])
140 return PROMDEV_O_UNK;
141 switch (propb[3]) {
142 case 'a': return PROMDEV_OTTYA;
143 case 'b': return PROMDEV_OTTYB;
144 default: return PROMDEV_O_UNK;
145 }
146}
diff --git a/arch/sparc64/prom/devops.c b/arch/sparc64/prom/devops.c
new file mode 100644
index 000000000000..2c99b21b6981
--- /dev/null
+++ b/arch/sparc64/prom/devops.c
@@ -0,0 +1,41 @@
1/* $Id: devops.c,v 1.3 1997/10/29 07:43:28 ecd Exp $
2 * devops.c: Device operations using the PROM.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7#include <linux/types.h>
8#include <linux/kernel.h>
9#include <linux/sched.h>
10
11#include <asm/openprom.h>
12#include <asm/oplib.h>
13
14/* Open the device described by the string 'dstr'. Returns the handle
15 * to that device used for subsequent operations on that device.
16 * Returns 0 on failure.
17 */
18int
19prom_devopen(char *dstr)
20{
21 return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)|
22 P1275_INOUT(1,1),
23 dstr);
24}
25
26/* Close the device described by device handle 'dhandle'. */
27int
28prom_devclose(int dhandle)
29{
30 p1275_cmd ("close", P1275_INOUT(1,0), dhandle);
31 return 0;
32}
33
34/* Seek to specified location described by 'seekhi' and 'seeklo'
35 * for device 'dhandle'.
36 */
37void
38prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
39{
40 p1275_cmd ("seek", P1275_INOUT(3,1), dhandle, seekhi, seeklo);
41}
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c
new file mode 100644
index 000000000000..817faae058cd
--- /dev/null
+++ b/arch/sparc64/prom/init.c
@@ -0,0 +1,101 @@
1/* $Id: init.c,v 1.10 1999/09/21 14:35:59 davem Exp $
2 * init.c: Initialize internal variables used by the PROM
3 * library functions.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/string.h>
12#include <linux/ctype.h>
13
14#include <asm/openprom.h>
15#include <asm/oplib.h>
16
17enum prom_major_version prom_vers;
18unsigned int prom_rev, prom_prev;
19
20/* The root node of the prom device tree. */
21int prom_root_node;
22int prom_stdin, prom_stdout;
23int prom_chosen_node;
24
25/* You must call prom_init() before you attempt to use any of the
26 * routines in the prom library. It returns 0 on success, 1 on
27 * failure. It gets passed the pointer to the PROM vector.
28 */
29
30extern void prom_meminit(void);
31extern void prom_cif_init(void *, void *);
32
33void __init prom_init(void *cif_handler, void *cif_stack)
34{
35 char buffer[80], *p;
36 int ints[3];
37 int node;
38 int i = 0;
39 int bufadjust;
40
41 prom_vers = PROM_P1275;
42
43 prom_cif_init(cif_handler, cif_stack);
44
45 prom_root_node = prom_getsibling(0);
46 if((prom_root_node == 0) || (prom_root_node == -1))
47 prom_halt();
48
49 prom_chosen_node = prom_finddevice("/chosen");
50 if (!prom_chosen_node || prom_chosen_node == -1)
51 prom_halt();
52
53 prom_stdin = prom_getint (prom_chosen_node, "stdin");
54 prom_stdout = prom_getint (prom_chosen_node, "stdout");
55
56 node = prom_finddevice("/openprom");
57 if (!node || node == -1)
58 prom_halt();
59
60 prom_getstring (node, "version", buffer, sizeof (buffer));
61
62 prom_printf ("\n");
63
64 if (strncmp (buffer, "OBP ", 4))
65 goto strange_version;
66
67 /*
68 * Version field is expected to be 'OBP xx.yy.zz date...'
69 * However, Sun can't stick to this format very well, so
70 * we need to check for 'OBP xx.yy.zz date...' and adjust
71 * accordingly. -spot
72 */
73
74 if (strncmp (buffer, "OBP ", 5))
75 bufadjust = 4;
76 else
77 bufadjust = 5;
78
79 p = buffer + bufadjust;
80 while (p && isdigit(*p) && i < 3) {
81 ints[i++] = simple_strtoul(p, NULL, 0);
82 if ((p = strchr(p, '.')) != NULL)
83 p++;
84 }
85 if (i != 3)
86 goto strange_version;
87
88 prom_rev = ints[1];
89 prom_prev = (ints[0] << 16) | (ints[1] << 8) | ints[2];
90
91 printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust);
92
93 prom_meminit();
94
95 /* Initialization successful. */
96 return;
97
98strange_version:
99 prom_printf ("Strange OBP version `%s'.\n", buffer);
100 prom_halt ();
101}
diff --git a/arch/sparc64/prom/map.S b/arch/sparc64/prom/map.S
new file mode 100644
index 000000000000..21b3f9c99ea7
--- /dev/null
+++ b/arch/sparc64/prom/map.S
@@ -0,0 +1,72 @@
1/* $Id: map.S,v 1.2 1999/11/19 05:53:02 davem Exp $
2 * map.S: Tricky coding required to fixup the kernel OBP maps
3 * properly.
4 *
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 */
7
8 .text
9 .align 8192
10 .globl prom_boot_page
11prom_boot_page:
12call_method:
13 .asciz "call-method"
14 .align 8
15map:
16 .asciz "map"
17 .align 8
18
19 /* When we are invoked, our caller has remapped us to
20 * page zero, therefore we must use PC relative addressing
21 * for everything after we begin performing the unmap/map
22 * calls.
23 */
24 .globl prom_remap
25prom_remap: /* %o0 = physpage, %o1 = virtpage, %o2 = mmu_ihandle */
26 rd %pc, %g1
27 srl %o2, 0, %o2 ! kill sign extension
28 sethi %hi(p1275buf), %g2
29 or %g2, %lo(p1275buf), %g2
30 ldx [%g2 + 0x10], %g3 ! prom_cif_stack
31 save %g3, -(192 + 128), %sp
32 ldx [%g2 + 0x08], %l0 ! prom_cif_handler
33 mov %g6, %i3
34 mov %g4, %i4
35 mov %g5, %i5
36 flushw
37
38 sethi %hi(prom_remap - call_method), %g7
39 or %g7, %lo(prom_remap - call_method), %g7
40 sub %g1, %g7, %l2 ! call-method string
41 sethi %hi(prom_remap - map), %g7
42 or %g7, %lo(prom_remap - map), %g7
43 sub %g1, %g7, %l4 ! map string
44
45 /* OK, map the 4MB region we really live at. */
46 stx %l2, [%sp + 2047 + 128 + 0x00] ! call-method
47 mov 7, %l5
48 stx %l5, [%sp + 2047 + 128 + 0x08] ! num_args
49 mov 1, %l5
50 stx %l5, [%sp + 2047 + 128 + 0x10] ! num_rets
51 stx %l4, [%sp + 2047 + 128 + 0x18] ! map
52 stx %i2, [%sp + 2047 + 128 + 0x20] ! mmu_ihandle
53 mov -1, %l5
54 stx %l5, [%sp + 2047 + 128 + 0x28] ! mode == default
55 sethi %hi(4 * 1024 * 1024), %l5
56 stx %l5, [%sp + 2047 + 128 + 0x30] ! size
57 stx %i1, [%sp + 2047 + 128 + 0x38] ! vaddr
58 stx %g0, [%sp + 2047 + 128 + 0x40] ! filler
59 stx %i0, [%sp + 2047 + 128 + 0x48] ! paddr
60 call %l0
61 add %sp, (2047 + 128), %o0 ! argument array
62
63 /* Restore hard-coded globals. */
64 mov %i3, %g6
65 mov %i4, %g4
66 mov %i5, %g5
67
68 /* Wheee.... we are done. */
69 ret
70 restore
71
72 .align 8192
diff --git a/arch/sparc64/prom/memory.c b/arch/sparc64/prom/memory.c
new file mode 100644
index 000000000000..f4a8143e052c
--- /dev/null
+++ b/arch/sparc64/prom/memory.c
@@ -0,0 +1,152 @@
1/* $Id: memory.c,v 1.5 1999/08/31 06:55:04 davem Exp $
2 * memory.c: Prom routine for acquiring various bits of information
3 * about RAM on the machine, both virtual and physical.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11
12#include <asm/openprom.h>
13#include <asm/oplib.h>
14
15/* This routine, for consistency, returns the ram parameters in the
16 * V0 prom memory descriptor format. I choose this format because I
17 * think it was the easiest to work with. I feel the religious
18 * arguments now... ;) Also, I return the linked lists sorted to
19 * prevent paging_init() upset stomach as I have not yet written
20 * the pepto-bismol kernel module yet.
21 */
22
23struct linux_prom64_registers prom_reg_memlist[64];
24struct linux_prom64_registers prom_reg_tmp[64];
25
26struct linux_mlist_p1275 prom_phys_total[64];
27struct linux_mlist_p1275 prom_prom_taken[64];
28struct linux_mlist_p1275 prom_phys_avail[64];
29
30struct linux_mlist_p1275 *prom_ptot_ptr = prom_phys_total;
31struct linux_mlist_p1275 *prom_ptak_ptr = prom_prom_taken;
32struct linux_mlist_p1275 *prom_pavl_ptr = prom_phys_avail;
33
34struct linux_mem_p1275 prom_memlist;
35
36
37/* Internal Prom library routine to sort a linux_mlist_p1275 memory
38 * list. Used below in initialization.
39 */
40static void __init
41prom_sortmemlist(struct linux_mlist_p1275 *thislist)
42{
43 int swapi = 0;
44 int i, mitr;
45 unsigned long tmpaddr, tmpsize;
46 unsigned long lowest;
47
48 for(i=0; thislist[i].theres_more; i++) {
49 lowest = thislist[i].start_adr;
50 for(mitr = i+1; thislist[mitr-1].theres_more; mitr++)
51 if(thislist[mitr].start_adr < lowest) {
52 lowest = thislist[mitr].start_adr;
53 swapi = mitr;
54 }
55 if(lowest == thislist[i].start_adr) continue;
56 tmpaddr = thislist[swapi].start_adr;
57 tmpsize = thislist[swapi].num_bytes;
58 for(mitr = swapi; mitr > i; mitr--) {
59 thislist[mitr].start_adr = thislist[mitr-1].start_adr;
60 thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
61 }
62 thislist[i].start_adr = tmpaddr;
63 thislist[i].num_bytes = tmpsize;
64 }
65}
66
67/* Initialize the memory lists based upon the prom version. */
68void __init prom_meminit(void)
69{
70 int node = 0;
71 unsigned int iter, num_regs;
72
73 node = prom_finddevice("/memory");
74 num_regs = prom_getproperty(node, "available",
75 (char *) prom_reg_memlist,
76 sizeof(prom_reg_memlist));
77 num_regs = (num_regs/sizeof(struct linux_prom64_registers));
78 for(iter=0; iter<num_regs; iter++) {
79 prom_phys_avail[iter].start_adr =
80 prom_reg_memlist[iter].phys_addr;
81 prom_phys_avail[iter].num_bytes =
82 prom_reg_memlist[iter].reg_size;
83 prom_phys_avail[iter].theres_more =
84 &prom_phys_avail[iter+1];
85 }
86 prom_phys_avail[iter-1].theres_more = NULL;
87
88 num_regs = prom_getproperty(node, "reg",
89 (char *) prom_reg_memlist,
90 sizeof(prom_reg_memlist));
91 num_regs = (num_regs/sizeof(struct linux_prom64_registers));
92 for(iter=0; iter<num_regs; iter++) {
93 prom_phys_total[iter].start_adr =
94 prom_reg_memlist[iter].phys_addr;
95 prom_phys_total[iter].num_bytes =
96 prom_reg_memlist[iter].reg_size;
97 prom_phys_total[iter].theres_more =
98 &prom_phys_total[iter+1];
99 }
100 prom_phys_total[iter-1].theres_more = NULL;
101
102 node = prom_finddevice("/virtual-memory");
103 num_regs = prom_getproperty(node, "available",
104 (char *) prom_reg_memlist,
105 sizeof(prom_reg_memlist));
106 num_regs = (num_regs/sizeof(struct linux_prom64_registers));
107
108 /* Convert available virtual areas to taken virtual
109 * areas. First sort, then convert.
110 */
111 for(iter=0; iter<num_regs; iter++) {
112 prom_prom_taken[iter].start_adr =
113 prom_reg_memlist[iter].phys_addr;
114 prom_prom_taken[iter].num_bytes =
115 prom_reg_memlist[iter].reg_size;
116 prom_prom_taken[iter].theres_more =
117 &prom_prom_taken[iter+1];
118 }
119 prom_prom_taken[iter-1].theres_more = NULL;
120
121 prom_sortmemlist(prom_prom_taken);
122
123 /* Finally, convert. */
124 for(iter=0; iter<num_regs; iter++) {
125 prom_prom_taken[iter].start_adr =
126 prom_prom_taken[iter].start_adr +
127 prom_prom_taken[iter].num_bytes;
128 prom_prom_taken[iter].num_bytes =
129 prom_prom_taken[iter+1].start_adr -
130 prom_prom_taken[iter].start_adr;
131 }
132 prom_prom_taken[iter-1].num_bytes =
133 -1UL - prom_prom_taken[iter-1].start_adr;
134
135 /* Sort the other two lists. */
136 prom_sortmemlist(prom_phys_total);
137 prom_sortmemlist(prom_phys_avail);
138
139 /* Link all the lists into the top-level descriptor. */
140 prom_memlist.p1275_totphys=&prom_ptot_ptr;
141 prom_memlist.p1275_prommap=&prom_ptak_ptr;
142 prom_memlist.p1275_available=&prom_pavl_ptr;
143}
144
145/* This returns a pointer to our libraries internal p1275 format
146 * memory descriptor.
147 */
148struct linux_mem_p1275 *
149prom_meminfo(void)
150{
151 return &prom_memlist;
152}
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
new file mode 100644
index 000000000000..19c44e97e9ee
--- /dev/null
+++ b/arch/sparc64/prom/misc.c
@@ -0,0 +1,339 @@
1/* $Id: misc.c,v 1.20 2001/09/21 03:17:07 kanoj Exp $
2 * misc.c: Miscellaneous prom functions that don't belong
3 * anywhere else.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/config.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/interrupt.h>
14#include <linux/delay.h>
15#include <asm/openprom.h>
16#include <asm/oplib.h>
17#include <asm/system.h>
18
19/* Reset and reboot the machine with the command 'bcommand'. */
20void prom_reboot(char *bcommand)
21{
22 p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) |
23 P1275_INOUT(1, 0), bcommand);
24}
25
26/* Forth evaluate the expression contained in 'fstring'. */
27void prom_feval(char *fstring)
28{
29 if (!fstring || fstring[0] == 0)
30 return;
31 p1275_cmd("interpret", P1275_ARG(0, P1275_ARG_IN_STRING) |
32 P1275_INOUT(1, 1), fstring);
33}
34
35/* We want to do this more nicely some day. */
36extern void (*prom_palette)(int);
37
38#ifdef CONFIG_SMP
39extern void smp_capture(void);
40extern void smp_release(void);
41#endif
42
43/* Drop into the prom, with the chance to continue with the 'go'
44 * prom command.
45 */
46void prom_cmdline(void)
47{
48 unsigned long flags;
49
50 local_irq_save(flags);
51
52 if (!serial_console && prom_palette)
53 prom_palette(1);
54
55#ifdef CONFIG_SMP
56 smp_capture();
57#endif
58
59 p1275_cmd("enter", P1275_INOUT(0, 0));
60
61#ifdef CONFIG_SMP
62 smp_release();
63#endif
64
65 if (!serial_console && prom_palette)
66 prom_palette(0);
67
68 local_irq_restore(flags);
69}
70
71#ifdef CONFIG_SMP
72extern void smp_promstop_others(void);
73#endif
74
75/* Drop into the prom, but completely terminate the program.
76 * No chance of continuing.
77 */
78void prom_halt(void)
79{
80#ifdef CONFIG_SMP
81 smp_promstop_others();
82 udelay(8000);
83#endif
84again:
85 p1275_cmd("exit", P1275_INOUT(0, 0));
86 goto again; /* PROM is out to get me -DaveM */
87}
88
89void prom_halt_power_off(void)
90{
91#ifdef CONFIG_SMP
92 smp_promstop_others();
93 udelay(8000);
94#endif
95 p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
96
97 /* if nothing else helps, we just halt */
98 prom_halt();
99}
100
101/* Set prom sync handler to call function 'funcp'. */
102void prom_setcallback(callback_func_t funcp)
103{
104 if (!funcp)
105 return;
106 p1275_cmd("set-callback", P1275_ARG(0, P1275_ARG_IN_FUNCTION) |
107 P1275_INOUT(1, 1), funcp);
108}
109
110/* Get the idprom and stuff it into buffer 'idbuf'. Returns the
111 * format type. 'num_bytes' is the number of bytes that your idbuf
112 * has space for. Returns 0xff on error.
113 */
114unsigned char prom_get_idprom(char *idbuf, int num_bytes)
115{
116 int len;
117
118 len = prom_getproplen(prom_root_node, "idprom");
119 if ((len >num_bytes) || (len == -1))
120 return 0xff;
121 if (!prom_getproperty(prom_root_node, "idprom", idbuf, num_bytes))
122 return idbuf[0];
123
124 return 0xff;
125}
126
127/* Get the major prom version number. */
128int prom_version(void)
129{
130 return PROM_P1275;
131}
132
133/* Get the prom plugin-revision. */
134int prom_getrev(void)
135{
136 return prom_rev;
137}
138
139/* Get the prom firmware print revision. */
140int prom_getprev(void)
141{
142 return prom_prev;
143}
144
145/* Install Linux trap table so PROM uses that instead of its own. */
146void prom_set_trap_table(unsigned long tba)
147{
148 p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba);
149}
150
151int mmu_ihandle_cache = 0;
152
153int prom_get_mmu_ihandle(void)
154{
155 int node, ret;
156
157 if (mmu_ihandle_cache != 0)
158 return mmu_ihandle_cache;
159
160 node = prom_finddevice("/chosen");
161 ret = prom_getint(node, "mmu");
162 if (ret == -1 || ret == 0)
163 mmu_ihandle_cache = -1;
164 else
165 mmu_ihandle_cache = ret;
166
167 return ret;
168}
169
170static int prom_get_memory_ihandle(void)
171{
172 static int memory_ihandle_cache;
173 int node, ret;
174
175 if (memory_ihandle_cache != 0)
176 return memory_ihandle_cache;
177
178 node = prom_finddevice("/chosen");
179 ret = prom_getint(node, "memory");
180 if (ret == -1 || ret == 0)
181 memory_ihandle_cache = -1;
182 else
183 memory_ihandle_cache = ret;
184
185 return ret;
186}
187
188/* Load explicit I/D TLB entries. */
189long prom_itlb_load(unsigned long index,
190 unsigned long tte_data,
191 unsigned long vaddr)
192{
193 return p1275_cmd("call-method",
194 (P1275_ARG(0, P1275_ARG_IN_STRING) |
195 P1275_ARG(2, P1275_ARG_IN_64B) |
196 P1275_ARG(3, P1275_ARG_IN_64B) |
197 P1275_INOUT(5, 1)),
198 "SUNW,itlb-load",
199 prom_get_mmu_ihandle(),
200 /* And then our actual args are pushed backwards. */
201 vaddr,
202 tte_data,
203 index);
204}
205
206long prom_dtlb_load(unsigned long index,
207 unsigned long tte_data,
208 unsigned long vaddr)
209{
210 return p1275_cmd("call-method",
211 (P1275_ARG(0, P1275_ARG_IN_STRING) |
212 P1275_ARG(2, P1275_ARG_IN_64B) |
213 P1275_ARG(3, P1275_ARG_IN_64B) |
214 P1275_INOUT(5, 1)),
215 "SUNW,dtlb-load",
216 prom_get_mmu_ihandle(),
217 /* And then our actual args are pushed backwards. */
218 vaddr,
219 tte_data,
220 index);
221}
222
223int prom_map(int mode, unsigned long size,
224 unsigned long vaddr, unsigned long paddr)
225{
226 int ret = p1275_cmd("call-method",
227 (P1275_ARG(0, P1275_ARG_IN_STRING) |
228 P1275_ARG(3, P1275_ARG_IN_64B) |
229 P1275_ARG(4, P1275_ARG_IN_64B) |
230 P1275_ARG(6, P1275_ARG_IN_64B) |
231 P1275_INOUT(7, 1)),
232 "map",
233 prom_get_mmu_ihandle(),
234 mode,
235 size,
236 vaddr,
237 0,
238 paddr);
239
240 if (ret == 0)
241 ret = -1;
242 return ret;
243}
244
245void prom_unmap(unsigned long size, unsigned long vaddr)
246{
247 p1275_cmd("call-method",
248 (P1275_ARG(0, P1275_ARG_IN_STRING) |
249 P1275_ARG(2, P1275_ARG_IN_64B) |
250 P1275_ARG(3, P1275_ARG_IN_64B) |
251 P1275_INOUT(4, 0)),
252 "unmap",
253 prom_get_mmu_ihandle(),
254 size,
255 vaddr);
256}
257
258/* Set aside physical memory which is not touched or modified
259 * across soft resets.
260 */
261unsigned long prom_retain(char *name,
262 unsigned long pa_low, unsigned long pa_high,
263 long size, long align)
264{
265 /* XXX I don't think we return multiple values correctly.
266 * XXX OBP supposedly returns pa_low/pa_high here, how does
267 * XXX it work?
268 */
269
270 /* If align is zero, the pa_low/pa_high args are passed,
271 * else they are not.
272 */
273 if (align == 0)
274 return p1275_cmd("SUNW,retain",
275 (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(5, 2)),
276 name, pa_low, pa_high, size, align);
277 else
278 return p1275_cmd("SUNW,retain",
279 (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(3, 2)),
280 name, size, align);
281}
282
283/* Get "Unumber" string for the SIMM at the given
284 * memory address. Usually this will be of the form
285 * "Uxxxx" where xxxx is a decimal number which is
286 * etched into the motherboard next to the SIMM slot
287 * in question.
288 */
289int prom_getunumber(int syndrome_code,
290 unsigned long phys_addr,
291 char *buf, int buflen)
292{
293 return p1275_cmd("call-method",
294 (P1275_ARG(0, P1275_ARG_IN_STRING) |
295 P1275_ARG(3, P1275_ARG_OUT_BUF) |
296 P1275_ARG(6, P1275_ARG_IN_64B) |
297 P1275_INOUT(8, 2)),
298 "SUNW,get-unumber", prom_get_memory_ihandle(),
299 buflen, buf, P1275_SIZE(buflen),
300 0, phys_addr, syndrome_code);
301}
302
303/* Power management extensions. */
304void prom_sleepself(void)
305{
306 p1275_cmd("SUNW,sleep-self", P1275_INOUT(0, 0));
307}
308
309int prom_sleepsystem(void)
310{
311 return p1275_cmd("SUNW,sleep-system", P1275_INOUT(0, 1));
312}
313
314int prom_wakeupsystem(void)
315{
316 return p1275_cmd("SUNW,wakeup-system", P1275_INOUT(0, 1));
317}
318
319#ifdef CONFIG_SMP
320void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0)
321{
322 p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, o0);
323}
324
325void prom_stopself(void)
326{
327 p1275_cmd("SUNW,stop-self", P1275_INOUT(0, 0));
328}
329
330void prom_idleself(void)
331{
332 p1275_cmd("SUNW,idle-self", P1275_INOUT(0, 0));
333}
334
335void prom_resumecpu(int cpunode)
336{
337 p1275_cmd("SUNW,resume-cpu", P1275_INOUT(1, 0), cpunode);
338}
339#endif
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c
new file mode 100644
index 000000000000..59fe38bba39e
--- /dev/null
+++ b/arch/sparc64/prom/p1275.c
@@ -0,0 +1,161 @@
1/* $Id: p1275.c,v 1.22 2001/10/18 09:40:00 davem Exp $
2 * p1275.c: Sun IEEE 1275 PROM low level interface routines
3 *
4 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
6
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/sched.h>
10#include <linux/smp.h>
11#include <linux/string.h>
12#include <linux/spinlock.h>
13
14#include <asm/openprom.h>
15#include <asm/oplib.h>
16#include <asm/system.h>
17#include <asm/spitfire.h>
18#include <asm/pstate.h>
19
20struct {
21 long prom_callback; /* 0x00 */
22 void (*prom_cif_handler)(long *); /* 0x08 */
23 unsigned long prom_cif_stack; /* 0x10 */
24 unsigned long prom_args [23]; /* 0x18 */
25 char prom_buffer [3000];
26} p1275buf;
27
28extern void prom_world(int);
29
30extern void prom_cif_interface(void);
31extern void prom_cif_callback(void);
32
33static inline unsigned long spitfire_get_primary_context(void)
34{
35 unsigned long ctx;
36
37 __asm__ __volatile__("ldxa [%1] %2, %0"
38 : "=r" (ctx)
39 : "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
40 return ctx;
41}
42
43/*
44 * This provides SMP safety on the p1275buf. prom_callback() drops this lock
45 * to allow recursuve acquisition.
46 */
47DEFINE_SPINLOCK(prom_entry_lock);
48
49long p1275_cmd (char *service, long fmt, ...)
50{
51 char *p, *q;
52 unsigned long flags;
53 int nargs, nrets, i;
54 va_list list;
55 long attrs, x;
56
57 p = p1275buf.prom_buffer;
58 BUG_ON((spitfire_get_primary_context() & CTX_NR_MASK) != 0);
59
60 spin_lock_irqsave(&prom_entry_lock, flags);
61
62 p1275buf.prom_args[0] = (unsigned long)p; /* service */
63 strcpy (p, service);
64 p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
65 p1275buf.prom_args[1] = nargs = (fmt & 0x0f); /* nargs */
66 p1275buf.prom_args[2] = nrets = ((fmt & 0xf0) >> 4); /* nrets */
67 attrs = fmt >> 8;
68 va_start(list, fmt);
69 for (i = 0; i < nargs; i++, attrs >>= 3) {
70 switch (attrs & 0x7) {
71 case P1275_ARG_NUMBER:
72 p1275buf.prom_args[i + 3] =
73 (unsigned)va_arg(list, long);
74 break;
75 case P1275_ARG_IN_64B:
76 p1275buf.prom_args[i + 3] =
77 va_arg(list, unsigned long);
78 break;
79 case P1275_ARG_IN_STRING:
80 strcpy (p, va_arg(list, char *));
81 p1275buf.prom_args[i + 3] = (unsigned long)p;
82 p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
83 break;
84 case P1275_ARG_OUT_BUF:
85 (void) va_arg(list, char *);
86 p1275buf.prom_args[i + 3] = (unsigned long)p;
87 x = va_arg(list, long);
88 i++; attrs >>= 3;
89 p = (char *)(((long)(p + (int)x + 7)) & ~7);
90 p1275buf.prom_args[i + 3] = x;
91 break;
92 case P1275_ARG_IN_BUF:
93 q = va_arg(list, char *);
94 p1275buf.prom_args[i + 3] = (unsigned long)p;
95 x = va_arg(list, long);
96 i++; attrs >>= 3;
97 memcpy (p, q, (int)x);
98 p = (char *)(((long)(p + (int)x + 7)) & ~7);
99 p1275buf.prom_args[i + 3] = x;
100 break;
101 case P1275_ARG_OUT_32B:
102 (void) va_arg(list, char *);
103 p1275buf.prom_args[i + 3] = (unsigned long)p;
104 p += 32;
105 break;
106 case P1275_ARG_IN_FUNCTION:
107 p1275buf.prom_args[i + 3] =
108 (unsigned long)prom_cif_callback;
109 p1275buf.prom_callback = va_arg(list, long);
110 break;
111 }
112 }
113 va_end(list);
114
115 prom_world(1);
116 prom_cif_interface();
117 prom_world(0);
118
119 attrs = fmt >> 8;
120 va_start(list, fmt);
121 for (i = 0; i < nargs; i++, attrs >>= 3) {
122 switch (attrs & 0x7) {
123 case P1275_ARG_NUMBER:
124 (void) va_arg(list, long);
125 break;
126 case P1275_ARG_IN_STRING:
127 (void) va_arg(list, char *);
128 break;
129 case P1275_ARG_IN_FUNCTION:
130 (void) va_arg(list, long);
131 break;
132 case P1275_ARG_IN_BUF:
133 (void) va_arg(list, char *);
134 (void) va_arg(list, long);
135 i++; attrs >>= 3;
136 break;
137 case P1275_ARG_OUT_BUF:
138 p = va_arg(list, char *);
139 x = va_arg(list, long);
140 memcpy (p, (char *)(p1275buf.prom_args[i + 3]), (int)x);
141 i++; attrs >>= 3;
142 break;
143 case P1275_ARG_OUT_32B:
144 p = va_arg(list, char *);
145 memcpy (p, (char *)(p1275buf.prom_args[i + 3]), 32);
146 break;
147 }
148 }
149 va_end(list);
150 x = p1275buf.prom_args [nargs + 3];
151
152 spin_unlock_irqrestore(&prom_entry_lock, flags);
153
154 return x;
155}
156
157void prom_cif_init(void *cif_handler, void *cif_stack)
158{
159 p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
160 p1275buf.prom_cif_stack = (unsigned long)cif_stack;
161}
diff --git a/arch/sparc64/prom/printf.c b/arch/sparc64/prom/printf.c
new file mode 100644
index 000000000000..a6df82cafa0d
--- /dev/null
+++ b/arch/sparc64/prom/printf.c
@@ -0,0 +1,47 @@
1/*
2 * printf.c: Internal prom library printf facility.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Copyright (c) 2002 Pete Zaitcev (zaitcev@yahoo.com)
7 *
8 * We used to warn all over the code: DO NOT USE prom_printf(),
9 * and yet people do. Anton's banking code was outputting banks
10 * with prom_printf for most of the 2.4 lifetime. Since an effective
11 * stick is not available, we deployed a carrot: an early printk
12 * through PROM by means of -p boot option. This ought to fix it.
13 * USE printk; if you need, deploy -p.
14 */
15
16#include <linux/kernel.h>
17
18#include <asm/openprom.h>
19#include <asm/oplib.h>
20
21static char ppbuf[1024];
22
23void
24prom_write(const char *buf, unsigned int n)
25{
26 char ch;
27
28 while (n != 0) {
29 --n;
30 if ((ch = *buf++) == '\n')
31 prom_putchar('\r');
32 prom_putchar(ch);
33 }
34}
35
36void
37prom_printf(char *fmt, ...)
38{
39 va_list args;
40 int i;
41
42 va_start(args, fmt);
43 i = vscnprintf(ppbuf, sizeof(ppbuf), fmt, args);
44 va_end(args);
45
46 prom_write(ppbuf, i);
47}
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c
new file mode 100644
index 000000000000..ccf73258ebf7
--- /dev/null
+++ b/arch/sparc64/prom/tree.c
@@ -0,0 +1,377 @@
1/* $Id: tree.c,v 1.10 1998/01/10 22:39:00 ecd Exp $
2 * tree.c: Basic device tree traversal/scanning for the Linux
3 * prom library.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/string.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
13
14#include <asm/openprom.h>
15#include <asm/oplib.h>
16
17/* Return the child of node 'node' or zero if no this node has no
18 * direct descendent.
19 */
20__inline__ int
21__prom_getchild(int node)
22{
23 return p1275_cmd ("child", P1275_INOUT(1, 1), node);
24}
25
26__inline__ int
27prom_getchild(int node)
28{
29 int cnode;
30
31 if(node == -1) return 0;
32 cnode = __prom_getchild(node);
33 if(cnode == -1) return 0;
34 return (int)cnode;
35}
36
37__inline__ int
38prom_getparent(int node)
39{
40 int cnode;
41
42 if(node == -1) return 0;
43 cnode = p1275_cmd ("parent", P1275_INOUT(1, 1), node);
44 if(cnode == -1) return 0;
45 return (int)cnode;
46}
47
48/* Return the next sibling of node 'node' or zero if no more siblings
49 * at this level of depth in the tree.
50 */
51__inline__ int
52__prom_getsibling(int node)
53{
54 return p1275_cmd ("peer", P1275_INOUT(1, 1), node);
55}
56
57__inline__ int
58prom_getsibling(int node)
59{
60 int sibnode;
61
62 if(node == -1) return 0;
63 sibnode = __prom_getsibling(node);
64 if(sibnode == -1) return 0;
65 return sibnode;
66}
67
68/* Return the length in bytes of property 'prop' at node 'node'.
69 * Return -1 on error.
70 */
71__inline__ int
72prom_getproplen(int node, char *prop)
73{
74 if((!node) || (!prop)) return -1;
75 return p1275_cmd ("getproplen",
76 P1275_ARG(1,P1275_ARG_IN_STRING)|
77 P1275_INOUT(2, 1),
78 node, prop);
79}
80
81/* Acquire a property 'prop' at node 'node' and place it in
82 * 'buffer' which has a size of 'bufsize'. If the acquisition
83 * was successful the length will be returned, else -1 is returned.
84 */
85__inline__ int
86prom_getproperty(int node, char *prop, char *buffer, int bufsize)
87{
88 int plen;
89
90 plen = prom_getproplen(node, prop);
91 if((plen > bufsize) || (plen == 0) || (plen == -1))
92 return -1;
93 else {
94 /* Ok, things seem all right. */
95 return p1275_cmd ("getprop",
96 P1275_ARG(1,P1275_ARG_IN_STRING)|
97 P1275_ARG(2,P1275_ARG_OUT_BUF)|
98 P1275_INOUT(4, 1),
99 node, prop, buffer, P1275_SIZE(plen));
100 }
101}
102
103/* Acquire an integer property and return its value. Returns -1
104 * on failure.
105 */
106__inline__ int
107prom_getint(int node, char *prop)
108{
109 int intprop;
110
111 if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
112 return intprop;
113
114 return -1;
115}
116
117/* Acquire an integer property, upon error return the passed default
118 * integer.
119 */
120
121int
122prom_getintdefault(int node, char *property, int deflt)
123{
124 int retval;
125
126 retval = prom_getint(node, property);
127 if(retval == -1) return deflt;
128
129 return retval;
130}
131
132/* Acquire a boolean property, 1=TRUE 0=FALSE. */
133int
134prom_getbool(int node, char *prop)
135{
136 int retval;
137
138 retval = prom_getproplen(node, prop);
139 if(retval == -1) return 0;
140 return 1;
141}
142
143/* Acquire a property whose value is a string, returns a null
144 * string on error. The char pointer is the user supplied string
145 * buffer.
146 */
147void
148prom_getstring(int node, char *prop, char *user_buf, int ubuf_size)
149{
150 int len;
151
152 len = prom_getproperty(node, prop, user_buf, ubuf_size);
153 if(len != -1) return;
154 user_buf[0] = 0;
155 return;
156}
157
158
159/* Does the device at node 'node' have name 'name'?
160 * YES = 1 NO = 0
161 */
162int
163prom_nodematch(int node, char *name)
164{
165 char namebuf[128];
166 prom_getproperty(node, "name", namebuf, sizeof(namebuf));
167 if(strcmp(namebuf, name) == 0) return 1;
168 return 0;
169}
170
171/* Search siblings at 'node_start' for a node with name
172 * 'nodename'. Return node if successful, zero if not.
173 */
174int
175prom_searchsiblings(int node_start, char *nodename)
176{
177
178 int thisnode, error;
179 char promlib_buf[128];
180
181 for(thisnode = node_start; thisnode;
182 thisnode=prom_getsibling(thisnode)) {
183 error = prom_getproperty(thisnode, "name", promlib_buf,
184 sizeof(promlib_buf));
185 /* Should this ever happen? */
186 if(error == -1) continue;
187 if(strcmp(nodename, promlib_buf)==0) return thisnode;
188 }
189
190 return 0;
191}
192
193/* Gets name in the {name@x,yyyyy|name (if no reg)} form */
194int
195prom_getname (int node, char *buffer, int len)
196{
197 int i, sbus = 0;
198 int pci = 0, ebus = 0, ide = 0;
199 struct linux_prom_registers *reg;
200 struct linux_prom64_registers reg64[PROMREG_MAX];
201
202 for (sbus = prom_getparent (node); sbus; sbus = prom_getparent (sbus)) {
203 i = prom_getproperty (sbus, "name", buffer, len);
204 if (i > 0) {
205 buffer [i] = 0;
206 if (!strcmp (buffer, "sbus"))
207 goto getit;
208 }
209 }
210 if ((pci = prom_getparent (node))) {
211 i = prom_getproperty (pci, "name", buffer, len);
212 if (i > 0) {
213 buffer [i] = 0;
214 if (!strcmp (buffer, "pci"))
215 goto getit;
216 }
217 pci = 0;
218 }
219 if ((ebus = prom_getparent (node))) {
220 i = prom_getproperty (ebus, "name", buffer, len);
221 if (i > 0) {
222 buffer[i] = 0;
223 if (!strcmp (buffer, "ebus"))
224 goto getit;
225 }
226 ebus = 0;
227 }
228 if ((ide = prom_getparent (node))) {
229 i = prom_getproperty (ide, "name", buffer, len);
230 if (i > 0) {
231 buffer [i] = 0;
232 if (!strcmp (buffer, "ide"))
233 goto getit;
234 }
235 ide = 0;
236 }
237getit:
238 i = prom_getproperty (node, "name", buffer, len);
239 if (i <= 0) {
240 buffer [0] = 0;
241 return -1;
242 }
243 buffer [i] = 0;
244 len -= i;
245 i = prom_getproperty (node, "reg", (char *)reg64, sizeof (reg64));
246 if (i <= 0) return 0;
247 if (len < 16) return -1;
248 buffer = strchr (buffer, 0);
249 if (sbus) {
250 reg = (struct linux_prom_registers *)reg64;
251 sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);
252 } else if (pci) {
253 int dev, fn;
254 reg = (struct linux_prom_registers *)reg64;
255 fn = (reg[0].which_io >> 8) & 0x07;
256 dev = (reg[0].which_io >> 11) & 0x1f;
257 if (fn)
258 sprintf (buffer, "@%x,%x", dev, fn);
259 else
260 sprintf (buffer, "@%x", dev);
261 } else if (ebus) {
262 reg = (struct linux_prom_registers *)reg64;
263 sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr);
264 } else if (ide) {
265 reg = (struct linux_prom_registers *)reg64;
266 sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr);
267 } else if (i == 4) { /* Happens on 8042's children on Ultra/PCI. */
268 reg = (struct linux_prom_registers *)reg64;
269 sprintf (buffer, "@%x", reg[0].which_io);
270 } else {
271 sprintf (buffer, "@%x,%x",
272 (unsigned int)(reg64[0].phys_addr >> 36),
273 (unsigned int)(reg64[0].phys_addr));
274 }
275 return 0;
276}
277
278/* Return the first property type for node 'node'.
279 * buffer should be at least 32B in length
280 */
281__inline__ char *
282prom_firstprop(int node, char *buffer)
283{
284 *buffer = 0;
285 if(node == -1) return buffer;
286 p1275_cmd ("nextprop", P1275_ARG(2,P1275_ARG_OUT_32B)|
287 P1275_INOUT(3, 0),
288 node, (char *) 0x0, buffer);
289 return buffer;
290}
291
292/* Return the property type string after property type 'oprop'
293 * at node 'node' . Returns NULL string if no more
294 * property types for this node.
295 */
296__inline__ char *
297prom_nextprop(int node, char *oprop, char *buffer)
298{
299 char buf[32];
300
301 if(node == -1) {
302 *buffer = 0;
303 return buffer;
304 }
305 if (oprop == buffer) {
306 strcpy (buf, oprop);
307 oprop = buf;
308 }
309 p1275_cmd ("nextprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
310 P1275_ARG(2,P1275_ARG_OUT_32B)|
311 P1275_INOUT(3, 0),
312 node, oprop, buffer);
313 return buffer;
314}
315
316int
317prom_finddevice(char *name)
318{
319 if(!name) return 0;
320 return p1275_cmd ("finddevice", P1275_ARG(0,P1275_ARG_IN_STRING)|
321 P1275_INOUT(1, 1),
322 name);
323}
324
325int prom_node_has_property(int node, char *prop)
326{
327 char buf [32];
328
329 *buf = 0;
330 do {
331 prom_nextprop(node, buf, buf);
332 if(!strcmp(buf, prop))
333 return 1;
334 } while (*buf);
335 return 0;
336}
337
338/* Set property 'pname' at node 'node' to value 'value' which has a length
339 * of 'size' bytes. Return the number of bytes the prom accepted.
340 */
341int
342prom_setprop(int node, char *pname, char *value, int size)
343{
344 if(size == 0) return 0;
345 if((pname == 0) || (value == 0)) return 0;
346
347 return p1275_cmd ("setprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
348 P1275_ARG(2,P1275_ARG_IN_BUF)|
349 P1275_INOUT(4, 1),
350 node, pname, value, P1275_SIZE(size));
351}
352
353__inline__ int
354prom_inst2pkg(int inst)
355{
356 int node;
357
358 node = p1275_cmd ("instance-to-package", P1275_INOUT(1, 1), inst);
359 if (node == -1) return 0;
360 return node;
361}
362
363/* Return 'node' assigned to a particular prom 'path'
364 * FIXME: Should work for v0 as well
365 */
366int
367prom_pathtoinode(char *path)
368{
369 int node, inst;
370
371 inst = prom_devopen (path);
372 if (inst == 0) return 0;
373 node = prom_inst2pkg (inst);
374 prom_devclose (inst);
375 if (node == -1) return 0;
376 return node;
377}
diff --git a/arch/sparc64/solaris/Makefile b/arch/sparc64/solaris/Makefile
new file mode 100644
index 000000000000..8c8663033bfb
--- /dev/null
+++ b/arch/sparc64/solaris/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the Solaris binary emulation.
3#
4
5EXTRA_AFLAGS := -ansi
6
7solaris-objs := entry64.o fs.o misc.o signal.o systbl.o socket.o \
8 ioctl.o ipc.o socksys.o timod.o
9
10obj-$(CONFIG_SOLARIS_EMUL) += solaris.o
diff --git a/arch/sparc64/solaris/conv.h b/arch/sparc64/solaris/conv.h
new file mode 100644
index 000000000000..5faf59a9de39
--- /dev/null
+++ b/arch/sparc64/solaris/conv.h
@@ -0,0 +1,38 @@
1/* $Id: conv.h,v 1.4 1998/08/15 20:42:51 davem Exp $
2 * conv.h: Utility macros for Solaris emulation
3 *
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
6
7/* #define DEBUG_SOLARIS */
8#define DEBUG_SOLARIS_KMALLOC
9
10#ifndef __ASSEMBLY__
11
12#include <asm/unistd.h>
13
14/* Use this to get at 32-bit user passed pointers. */
15#define A(__x) \
16({ unsigned long __ret; \
17 __asm__ ("srl %0, 0, %0" \
18 : "=r" (__ret) \
19 : "0" (__x)); \
20 (void __user *)__ret; \
21})
22
23extern unsigned sys_call_table[];
24extern unsigned sys_call_table32[];
25extern unsigned sunos_sys_table[];
26
27#define SYS(name) ((long)sys_call_table[__NR_##name])
28#define SUNOS(x) ((long)sunos_sys_table[x])
29
30#ifdef DEBUG_SOLARIS
31#define SOLD(s) printk("%s,%d,%s(): %s\n",__FILE__,__LINE__,__FUNCTION__,(s))
32#define SOLDD(s) printk("solaris: "); printk s
33#else
34#define SOLD(s)
35#define SOLDD(s)
36#endif
37
38#endif /* __ASSEMBLY__ */
diff --git a/arch/sparc64/solaris/entry64.S b/arch/sparc64/solaris/entry64.S
new file mode 100644
index 000000000000..0cc9dad75c5e
--- /dev/null
+++ b/arch/sparc64/solaris/entry64.S
@@ -0,0 +1,218 @@
1/* $Id: entry64.S,v 1.7 2002/02/09 19:49:31 davem Exp $
2 * entry64.S: Solaris syscall emulation entry point.
3 *
4 * Copyright (C) 1996,1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 */
8
9#include <linux/errno.h>
10
11#include <asm/head.h>
12#include <asm/asi.h>
13#include <asm/smp.h>
14#include <asm/ptrace.h>
15#include <asm/page.h>
16#include <asm/signal.h>
17#include <asm/pgtable.h>
18#include <asm/processor.h>
19#include <asm/thread_info.h>
20
21#include "conv.h"
22
23#define NR_SYSCALLS 256
24
25 .text
26solaris_syscall_trace:
27 call syscall_trace
28 nop
29 srl %i0, 0, %o0
30 mov %i4, %o4
31 srl %i1, 0, %o1
32 mov %i5, %o5
33 andcc %l3, 1, %g0
34 be,pt %icc, 2f
35 srl %i2, 0, %o2
36 b,pt %xcc, 2f
37 add %sp, PTREGS_OFF, %o0
38
39solaris_sucks:
40/* Solaris is a big system which needs to be able to do all the things
41 * in Inf+1 different ways */
42 add %i6, 0x5c, %o0
43 mov %i0, %g1
44 mov %i1, %i0
45 mov %i2, %i1
46 srl %o0, 0, %o0
47 mov %i3, %i2
48 movrz %g1, 256, %g1 /* Ensure we don't loop forever */
49 mov %i4, %i3
50 mov %i5, %i4
51 ba,pt %xcc, solaris_sparc_syscall
52exen: lduwa [%o0] ASI_S, %i5
53
54exenf: ba,pt %xcc, solaris_sparc_syscall
55 clr %i5
56
57/* For shared binaries, binfmt_elf32 already sets up personality
58 and exec_domain. This is to handle static binaries as well */
59solaris_reg:
60 call solaris_register
61 nop
62 ba,pt %xcc, 1f
63 mov %i4, %o4
64
65linux_syscall_for_solaris:
66 sethi %hi(sys_call_table32), %l6
67 or %l6, %lo(sys_call_table32), %l6
68 sll %l3, 2, %l4
69 ba,pt %xcc, 10f
70 lduw [%l6 + %l4], %l3
71
72 /* Solaris system calls enter here... */
73 .align 32
74 .globl solaris_sparc_syscall, entry64_personality_patch
75solaris_sparc_syscall:
76entry64_personality_patch:
77 ldub [%g4 + 0x0], %l0
78 cmp %g1, 255
79 bg,pn %icc, solaris_unimplemented
80 srl %g1, 0, %g1
81 sethi %hi(solaris_sys_table), %l7
82 or %l7, %lo(solaris_sys_table), %l7
83 brz,pn %g1, solaris_sucks
84 mov %i4, %o4
85 sll %g1, 2, %l4
86 cmp %l0, 1
87 bne,pn %icc, solaris_reg
881: srl %i0, 0, %o0
89 lduw [%l7 + %l4], %l3
90 srl %i1, 0, %o1
91 ldx [%g6 + TI_FLAGS], %l5
92 cmp %l3, NR_SYSCALLS
93 bleu,a,pn %xcc, linux_syscall_for_solaris
94 nop
95 andcc %l3, 1, %g0
96 bne,a,pn %icc, 10f
97 add %sp, PTREGS_OFF, %o0
9810: srl %i2, 0, %o2
99 mov %i5, %o5
100 andn %l3, 3, %l7
101 andcc %l5, _TIF_SYSCALL_TRACE, %g0
102 bne,pn %icc, solaris_syscall_trace
103 mov %i0, %l5
1042: call %l7
105 srl %i3, 0, %o3
106ret_from_solaris:
107 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
108 ldx [%g6 + TI_FLAGS], %l6
109 sra %o0, 0, %o0
110 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
111 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
112 cmp %o0, -ERESTART_RESTARTBLOCK
113 sllx %g2, 32, %g2
114 bgeu,pn %xcc, 1f
115 andcc %l6, _TIF_SYSCALL_TRACE, %l6
116
117 /* System call success, clear Carry condition code. */
118 andn %g3, %g2, %g3
119 stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
120 bne,pn %icc, solaris_syscall_trace2
121 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1
122 andcc %l1, 1, %g0
123 bne,pn %icc, 2f
124 clr %l6
125 add %l1, 0x4, %l2
126 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] ! pc = npc
127 call rtrap
128 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] !npc = npc+4
129
130 /* When tnpc & 1, this comes from setcontext and we don't want to advance pc */
1312: andn %l1, 3, %l1
132 call rtrap
133 stx %l1, [%sp + PTREGS_OFF + PT_V9_TNPC] !npc = npc&~3
134
1351:
136 /* System call failure, set Carry condition code.
137 * Also, get abs(errno) to return to the process.
138 */
139 sub %g0, %o0, %o0
140 or %g3, %g2, %g3
141 cmp %o0, ERANGE /* 0-ERANGE are identity mapped */
142 bleu,pt %icc, 1f
143 cmp %o0, EMEDIUMTYPE
144 bgu,pn %icc, 1f
145 sethi %hi(solaris_err_table), %l6
146 sll %o0, 2, %o0
147 or %l6, %lo(solaris_err_table), %l6
148 ldsw [%l6 + %o0], %o0
1491: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
150 mov 1, %l6
151 stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
152 bne,pn %icc, solaris_syscall_trace2
153 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1
154 andcc %l1, 1, %g0
155 bne,pn %icc, 2b
156 add %l1, 0x4, %l2
157 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] ! pc = npc
158 call rtrap
159 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] !npc = npc+4
160
161solaris_syscall_trace2:
162 call syscall_trace
163 add %l1, 0x4, %l2 /* npc = npc+4 */
164 andcc %l1, 1, %g0
165 bne,pn %icc, 2b
166 nop
167 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
168 call rtrap
169 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
170
171 /* This one is tricky, so that's why we do it in assembly */
172 .globl solaris_sigsuspend
173solaris_sigsuspend:
174 call do_sol_sigsuspend
175 nop
176 brlz,pn %o0, ret_from_solaris
177 nop
178 call sys_sigsuspend
179 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
180
181 .globl solaris_getpid
182solaris_getpid:
183 call sys_getppid
184 nop
185 call sys_getpid
186 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
187 b,pt %xcc, ret_from_solaris
188 nop
189
190 .globl solaris_getuid
191solaris_getuid:
192 call sys_geteuid
193 nop
194 call sys_getuid
195 stx %o1, [%sp + PTREGS_OFF + PT_V9_I1]
196 b,pt %xcc, ret_from_solaris
197 nop
198
199 .globl solaris_getgid
200solaris_getgid:
201 call sys_getegid
202 nop
203 call sys_getgid
204 stx %o1, [%sp + PTREGS_OFF + PT_V9_I1]
205 b,pt %xcc, ret_from_solaris
206 nop
207
208 .globl solaris_unimplemented
209solaris_unimplemented:
210 call do_sol_unimplemented
211 add %sp, PTREGS_OFF, %o0
212 ba,pt %xcc, ret_from_solaris
213 nop
214
215 .section __ex_table,#alloc
216 .align 4
217 .word exen, exenf
218
diff --git a/arch/sparc64/solaris/fs.c b/arch/sparc64/solaris/fs.c
new file mode 100644
index 000000000000..d7c99fa89661
--- /dev/null
+++ b/arch/sparc64/solaris/fs.c
@@ -0,0 +1,739 @@
1/* $Id: fs.c,v 1.27 2002/02/08 03:57:14 davem Exp $
2 * fs.c: fs related syscall emulation for Solaris
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 *
6 * 1999-08-19 Implemented solaris F_FREESP (truncate)
7 * fcntl, by Jason Rappleye (rappleye@ccr.buffalo.edu)
8 */
9
10#include <linux/types.h>
11#include <linux/sched.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/namei.h>
15#include <linux/mm.h>
16#include <linux/file.h>
17#include <linux/stat.h>
18#include <linux/smp_lock.h>
19#include <linux/limits.h>
20#include <linux/resource.h>
21#include <linux/quotaops.h>
22#include <linux/mount.h>
23#include <linux/vfs.h>
24
25#include <asm/uaccess.h>
26#include <asm/string.h>
27#include <asm/ptrace.h>
28
29#include "conv.h"
30
31#define R3_VERSION 1
32#define R4_VERSION 2
33
34typedef struct {
35 s32 tv_sec;
36 s32 tv_nsec;
37} timestruct_t;
38
39struct sol_stat {
40 u32 st_dev;
41 s32 st_pad1[3]; /* network id */
42 u32 st_ino;
43 u32 st_mode;
44 u32 st_nlink;
45 u32 st_uid;
46 u32 st_gid;
47 u32 st_rdev;
48 s32 st_pad2[2];
49 s32 st_size;
50 s32 st_pad3; /* st_size, off_t expansion */
51 timestruct_t st_atime;
52 timestruct_t st_mtime;
53 timestruct_t st_ctime;
54 s32 st_blksize;
55 s32 st_blocks;
56 char st_fstype[16];
57 s32 st_pad4[8]; /* expansion area */
58};
59
60struct sol_stat64 {
61 u32 st_dev;
62 s32 st_pad1[3]; /* network id */
63 u64 st_ino;
64 u32 st_mode;
65 u32 st_nlink;
66 u32 st_uid;
67 u32 st_gid;
68 u32 st_rdev;
69 s32 st_pad2[2];
70 s64 st_size;
71 timestruct_t st_atime;
72 timestruct_t st_mtime;
73 timestruct_t st_ctime;
74 s64 st_blksize;
75 s32 st_blocks;
76 char st_fstype[16];
77 s32 st_pad4[4]; /* expansion area */
78};
79
80#define UFSMAGIC (((unsigned)'u'<<24)||((unsigned)'f'<<16)||((unsigned)'s'<<8))
81
82static inline int putstat(struct sol_stat __user *ubuf, struct kstat *kbuf)
83{
84 if (kbuf->size > MAX_NON_LFS ||
85 !sysv_valid_dev(kbuf->dev) ||
86 !sysv_valid_dev(kbuf->rdev))
87 return -EOVERFLOW;
88 if (put_user (sysv_encode_dev(kbuf->dev), &ubuf->st_dev) ||
89 __put_user (kbuf->ino, &ubuf->st_ino) ||
90 __put_user (kbuf->mode, &ubuf->st_mode) ||
91 __put_user (kbuf->nlink, &ubuf->st_nlink) ||
92 __put_user (kbuf->uid, &ubuf->st_uid) ||
93 __put_user (kbuf->gid, &ubuf->st_gid) ||
94 __put_user (sysv_encode_dev(kbuf->rdev), &ubuf->st_rdev) ||
95 __put_user (kbuf->size, &ubuf->st_size) ||
96 __put_user (kbuf->atime.tv_sec, &ubuf->st_atime.tv_sec) ||
97 __put_user (kbuf->atime.tv_nsec, &ubuf->st_atime.tv_nsec) ||
98 __put_user (kbuf->mtime.tv_sec, &ubuf->st_mtime.tv_sec) ||
99 __put_user (kbuf->mtime.tv_nsec, &ubuf->st_mtime.tv_nsec) ||
100 __put_user (kbuf->ctime.tv_sec, &ubuf->st_ctime.tv_sec) ||
101 __put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime.tv_nsec) ||
102 __put_user (kbuf->blksize, &ubuf->st_blksize) ||
103 __put_user (kbuf->blocks, &ubuf->st_blocks) ||
104 __put_user (UFSMAGIC, (unsigned __user *)ubuf->st_fstype))
105 return -EFAULT;
106 return 0;
107}
108
109static inline int putstat64(struct sol_stat64 __user *ubuf, struct kstat *kbuf)
110{
111 if (!sysv_valid_dev(kbuf->dev) || !sysv_valid_dev(kbuf->rdev))
112 return -EOVERFLOW;
113 if (put_user (sysv_encode_dev(kbuf->dev), &ubuf->st_dev) ||
114 __put_user (kbuf->ino, &ubuf->st_ino) ||
115 __put_user (kbuf->mode, &ubuf->st_mode) ||
116 __put_user (kbuf->nlink, &ubuf->st_nlink) ||
117 __put_user (kbuf->uid, &ubuf->st_uid) ||
118 __put_user (kbuf->gid, &ubuf->st_gid) ||
119 __put_user (sysv_encode_dev(kbuf->rdev), &ubuf->st_rdev) ||
120 __put_user (kbuf->size, &ubuf->st_size) ||
121 __put_user (kbuf->atime.tv_sec, &ubuf->st_atime.tv_sec) ||
122 __put_user (kbuf->atime.tv_nsec, &ubuf->st_atime.tv_nsec) ||
123 __put_user (kbuf->mtime.tv_sec, &ubuf->st_mtime.tv_sec) ||
124 __put_user (kbuf->mtime.tv_nsec, &ubuf->st_mtime.tv_nsec) ||
125 __put_user (kbuf->ctime.tv_sec, &ubuf->st_ctime.tv_sec) ||
126 __put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime.tv_nsec) ||
127 __put_user (kbuf->blksize, &ubuf->st_blksize) ||
128 __put_user (kbuf->blocks, &ubuf->st_blocks) ||
129 __put_user (UFSMAGIC, (unsigned __user *)ubuf->st_fstype))
130 return -EFAULT;
131 return 0;
132}
133
134asmlinkage int solaris_stat(u32 filename, u32 statbuf)
135{
136 struct kstat s;
137 int ret = vfs_stat(A(filename), &s);
138 if (!ret)
139 return putstat(A(statbuf), &s);
140 return ret;
141}
142
143asmlinkage int solaris_xstat(int vers, u32 filename, u32 statbuf)
144{
145 /* Solaris doesn't bother with looking at vers, so we do neither */
146 return solaris_stat(filename, statbuf);
147}
148
149asmlinkage int solaris_stat64(u32 filename, u32 statbuf)
150{
151 struct kstat s;
152 int ret = vfs_stat(A(filename), &s);
153 if (!ret)
154 return putstat64(A(statbuf), &s);
155 return ret;
156}
157
158asmlinkage int solaris_lstat(u32 filename, u32 statbuf)
159{
160 struct kstat s;
161 int ret = vfs_lstat(A(filename), &s);
162 if (!ret)
163 return putstat(A(statbuf), &s);
164 return ret;
165}
166
167asmlinkage int solaris_lxstat(int vers, u32 filename, u32 statbuf)
168{
169 return solaris_lstat(filename, statbuf);
170}
171
172asmlinkage int solaris_lstat64(u32 filename, u32 statbuf)
173{
174 struct kstat s;
175 int ret = vfs_lstat(A(filename), &s);
176 if (!ret)
177 return putstat64(A(statbuf), &s);
178 return ret;
179}
180
181asmlinkage int solaris_fstat(unsigned int fd, u32 statbuf)
182{
183 struct kstat s;
184 int ret = vfs_fstat(fd, &s);
185 if (!ret)
186 return putstat(A(statbuf), &s);
187 return ret;
188}
189
190asmlinkage int solaris_fxstat(int vers, u32 fd, u32 statbuf)
191{
192 return solaris_fstat(fd, statbuf);
193}
194
195asmlinkage int solaris_fstat64(unsigned int fd, u32 statbuf)
196{
197 struct kstat s;
198 int ret = vfs_fstat(fd, &s);
199 if (!ret)
200 return putstat64(A(statbuf), &s);
201 return ret;
202}
203
204asmlinkage int solaris_mknod(u32 path, u32 mode, s32 dev)
205{
206 int (*sys_mknod)(const char __user *,int,unsigned) =
207 (int (*)(const char __user *,int,unsigned))SYS(mknod);
208 int major = sysv_major(dev);
209 int minor = sysv_minor(dev);
210
211 /* minor is guaranteed to be OK for MKDEV, major might be not */
212 if (major > 0xfff)
213 return -EINVAL;
214 return sys_mknod(A(path), mode, new_encode_dev(MKDEV(major,minor)));
215}
216
217asmlinkage int solaris_xmknod(int vers, u32 path, u32 mode, s32 dev)
218{
219 return solaris_mknod(path, mode, dev);
220}
221
222asmlinkage int solaris_getdents64(unsigned int fd, void __user *dirent, unsigned int count)
223{
224 int (*sys_getdents)(unsigned int, void __user *, unsigned int) =
225 (int (*)(unsigned int, void __user *, unsigned int))SYS(getdents);
226
227 return sys_getdents(fd, dirent, count);
228}
229
230/* This statfs thingie probably will go in the near future, but... */
231
232struct sol_statfs {
233 short f_type;
234 s32 f_bsize;
235 s32 f_frsize;
236 s32 f_blocks;
237 s32 f_bfree;
238 u32 f_files;
239 u32 f_ffree;
240 char f_fname[6];
241 char f_fpack[6];
242};
243
244asmlinkage int solaris_statfs(u32 path, u32 buf, int len, int fstype)
245{
246 int ret;
247 struct statfs s;
248 mm_segment_t old_fs = get_fs();
249 int (*sys_statfs)(const char __user *,struct statfs __user *) =
250 (int (*)(const char __user *,struct statfs __user *))SYS(statfs);
251 struct sol_statfs __user *ss = A(buf);
252
253 if (len != sizeof(struct sol_statfs)) return -EINVAL;
254 if (!fstype) {
255 /* FIXME: mixing userland and kernel pointers */
256 set_fs (KERNEL_DS);
257 ret = sys_statfs(A(path), &s);
258 set_fs (old_fs);
259 if (!ret) {
260 if (put_user (s.f_type, &ss->f_type) ||
261 __put_user (s.f_bsize, &ss->f_bsize) ||
262 __put_user (0, &ss->f_frsize) ||
263 __put_user (s.f_blocks, &ss->f_blocks) ||
264 __put_user (s.f_bfree, &ss->f_bfree) ||
265 __put_user (s.f_files, &ss->f_files) ||
266 __put_user (s.f_ffree, &ss->f_ffree) ||
267 __clear_user (&ss->f_fname, 12))
268 return -EFAULT;
269 }
270 return ret;
271 }
272/* Linux can't stat unmounted filesystems so we
273 * simply lie and claim 100MB of 1GB is free. Sorry.
274 */
275 if (put_user (fstype, &ss->f_type) ||
276 __put_user (1024, &ss->f_bsize) ||
277 __put_user (0, &ss->f_frsize) ||
278 __put_user (1024*1024, &ss->f_blocks) ||
279 __put_user (100*1024, &ss->f_bfree) ||
280 __put_user (60000, &ss->f_files) ||
281 __put_user (50000, &ss->f_ffree) ||
282 __clear_user (&ss->f_fname, 12))
283 return -EFAULT;
284 return 0;
285}
286
287asmlinkage int solaris_fstatfs(u32 fd, u32 buf, int len, int fstype)
288{
289 int ret;
290 struct statfs s;
291 mm_segment_t old_fs = get_fs();
292 int (*sys_fstatfs)(unsigned,struct statfs __user *) =
293 (int (*)(unsigned,struct statfs __user *))SYS(fstatfs);
294 struct sol_statfs __user *ss = A(buf);
295
296 if (len != sizeof(struct sol_statfs)) return -EINVAL;
297 if (!fstype) {
298 set_fs (KERNEL_DS);
299 ret = sys_fstatfs(fd, &s);
300 set_fs (old_fs);
301 if (!ret) {
302 if (put_user (s.f_type, &ss->f_type) ||
303 __put_user (s.f_bsize, &ss->f_bsize) ||
304 __put_user (0, &ss->f_frsize) ||
305 __put_user (s.f_blocks, &ss->f_blocks) ||
306 __put_user (s.f_bfree, &ss->f_bfree) ||
307 __put_user (s.f_files, &ss->f_files) ||
308 __put_user (s.f_ffree, &ss->f_ffree) ||
309 __clear_user (&ss->f_fname, 12))
310 return -EFAULT;
311 }
312 return ret;
313 }
314 /* Otherwise fstatfs is the same as statfs */
315 return solaris_statfs(0, buf, len, fstype);
316}
317
318struct sol_statvfs {
319 u32 f_bsize;
320 u32 f_frsize;
321 u32 f_blocks;
322 u32 f_bfree;
323 u32 f_bavail;
324 u32 f_files;
325 u32 f_ffree;
326 u32 f_favail;
327 u32 f_fsid;
328 char f_basetype[16];
329 u32 f_flag;
330 u32 f_namemax;
331 char f_fstr[32];
332 u32 f_filler[16];
333};
334
335struct sol_statvfs64 {
336 u32 f_bsize;
337 u32 f_frsize;
338 u64 f_blocks;
339 u64 f_bfree;
340 u64 f_bavail;
341 u64 f_files;
342 u64 f_ffree;
343 u64 f_favail;
344 u32 f_fsid;
345 char f_basetype[16];
346 u32 f_flag;
347 u32 f_namemax;
348 char f_fstr[32];
349 u32 f_filler[16];
350};
351
352static int report_statvfs(struct vfsmount *mnt, struct inode *inode, u32 buf)
353{
354 struct kstatfs s;
355 int error;
356 struct sol_statvfs __user *ss = A(buf);
357
358 error = vfs_statfs(mnt->mnt_sb, &s);
359 if (!error) {
360 const char *p = mnt->mnt_sb->s_type->name;
361 int i = 0;
362 int j = strlen (p);
363
364 if (j > 15) j = 15;
365 if (IS_RDONLY(inode)) i = 1;
366 if (mnt->mnt_flags & MNT_NOSUID) i |= 2;
367 if (!sysv_valid_dev(inode->i_sb->s_dev))
368 return -EOVERFLOW;
369 if (put_user (s.f_bsize, &ss->f_bsize) ||
370 __put_user (0, &ss->f_frsize) ||
371 __put_user (s.f_blocks, &ss->f_blocks) ||
372 __put_user (s.f_bfree, &ss->f_bfree) ||
373 __put_user (s.f_bavail, &ss->f_bavail) ||
374 __put_user (s.f_files, &ss->f_files) ||
375 __put_user (s.f_ffree, &ss->f_ffree) ||
376 __put_user (s.f_ffree, &ss->f_favail) ||
377 __put_user (sysv_encode_dev(inode->i_sb->s_dev), &ss->f_fsid) ||
378 __copy_to_user (ss->f_basetype,p,j) ||
379 __put_user (0, (char __user *)&ss->f_basetype[j]) ||
380 __put_user (s.f_namelen, &ss->f_namemax) ||
381 __put_user (i, &ss->f_flag) ||
382 __clear_user (&ss->f_fstr, 32))
383 return -EFAULT;
384 }
385 return error;
386}
387
388static int report_statvfs64(struct vfsmount *mnt, struct inode *inode, u32 buf)
389{
390 struct kstatfs s;
391 int error;
392 struct sol_statvfs64 __user *ss = A(buf);
393
394 error = vfs_statfs(mnt->mnt_sb, &s);
395 if (!error) {
396 const char *p = mnt->mnt_sb->s_type->name;
397 int i = 0;
398 int j = strlen (p);
399
400 if (j > 15) j = 15;
401 if (IS_RDONLY(inode)) i = 1;
402 if (mnt->mnt_flags & MNT_NOSUID) i |= 2;
403 if (!sysv_valid_dev(inode->i_sb->s_dev))
404 return -EOVERFLOW;
405 if (put_user (s.f_bsize, &ss->f_bsize) ||
406 __put_user (0, &ss->f_frsize) ||
407 __put_user (s.f_blocks, &ss->f_blocks) ||
408 __put_user (s.f_bfree, &ss->f_bfree) ||
409 __put_user (s.f_bavail, &ss->f_bavail) ||
410 __put_user (s.f_files, &ss->f_files) ||
411 __put_user (s.f_ffree, &ss->f_ffree) ||
412 __put_user (s.f_ffree, &ss->f_favail) ||
413 __put_user (sysv_encode_dev(inode->i_sb->s_dev), &ss->f_fsid) ||
414 __copy_to_user (ss->f_basetype,p,j) ||
415 __put_user (0, (char __user *)&ss->f_basetype[j]) ||
416 __put_user (s.f_namelen, &ss->f_namemax) ||
417 __put_user (i, &ss->f_flag) ||
418 __clear_user (&ss->f_fstr, 32))
419 return -EFAULT;
420 }
421 return error;
422}
423
424asmlinkage int solaris_statvfs(u32 path, u32 buf)
425{
426 struct nameidata nd;
427 int error;
428
429 error = user_path_walk(A(path),&nd);
430 if (!error) {
431 struct inode * inode = nd.dentry->d_inode;
432 error = report_statvfs(nd.mnt, inode, buf);
433 path_release(&nd);
434 }
435 return error;
436}
437
438asmlinkage int solaris_fstatvfs(unsigned int fd, u32 buf)
439{
440 struct file * file;
441 int error;
442
443 error = -EBADF;
444 file = fget(fd);
445 if (file) {
446 error = report_statvfs(file->f_vfsmnt, file->f_dentry->d_inode, buf);
447 fput(file);
448 }
449
450 return error;
451}
452
453asmlinkage int solaris_statvfs64(u32 path, u32 buf)
454{
455 struct nameidata nd;
456 int error;
457
458 lock_kernel();
459 error = user_path_walk(A(path), &nd);
460 if (!error) {
461 struct inode * inode = nd.dentry->d_inode;
462 error = report_statvfs64(nd.mnt, inode, buf);
463 path_release(&nd);
464 }
465 unlock_kernel();
466 return error;
467}
468
469asmlinkage int solaris_fstatvfs64(unsigned int fd, u32 buf)
470{
471 struct file * file;
472 int error;
473
474 error = -EBADF;
475 file = fget(fd);
476 if (file) {
477 lock_kernel();
478 error = report_statvfs64(file->f_vfsmnt, file->f_dentry->d_inode, buf);
479 unlock_kernel();
480 fput(file);
481 }
482 return error;
483}
484
485extern asmlinkage long sparc32_open(const char * filename, int flags, int mode);
486
487asmlinkage int solaris_open(u32 fname, int flags, u32 mode)
488{
489 const char *filename = (const char *)(long)fname;
490 int fl = flags & 0xf;
491
492 /* Translate flags first. */
493 if (flags & 0x2000) fl |= O_LARGEFILE;
494 if (flags & 0x8050) fl |= O_SYNC;
495 if (flags & 0x80) fl |= O_NONBLOCK;
496 if (flags & 0x100) fl |= O_CREAT;
497 if (flags & 0x200) fl |= O_TRUNC;
498 if (flags & 0x400) fl |= O_EXCL;
499 if (flags & 0x800) fl |= O_NOCTTY;
500 flags = fl;
501
502 return sparc32_open(filename, flags, mode);
503}
504
505#define SOL_F_SETLK 6
506#define SOL_F_SETLKW 7
507#define SOL_F_FREESP 11
508#define SOL_F_ISSTREAM 13
509#define SOL_F_GETLK 14
510#define SOL_F_PRIV 15
511#define SOL_F_NPRIV 16
512#define SOL_F_QUOTACTL 17
513#define SOL_F_BLOCKS 18
514#define SOL_F_BLKSIZE 19
515#define SOL_F_GETOWN 23
516#define SOL_F_SETOWN 24
517
518struct sol_flock {
519 short l_type;
520 short l_whence;
521 u32 l_start;
522 u32 l_len;
523 s32 l_sysid;
524 s32 l_pid;
525 s32 l_pad[4];
526};
527
528asmlinkage int solaris_fcntl(unsigned fd, unsigned cmd, u32 arg)
529{
530 int (*sys_fcntl)(unsigned,unsigned,unsigned long) =
531 (int (*)(unsigned,unsigned,unsigned long))SYS(fcntl);
532 int ret, flags;
533
534 switch (cmd) {
535 case F_DUPFD:
536 case F_GETFD:
537 case F_SETFD: return sys_fcntl(fd, cmd, (unsigned long)arg);
538 case F_GETFL:
539 flags = sys_fcntl(fd, cmd, 0);
540 ret = flags & 0xf;
541 if (flags & O_SYNC) ret |= 0x8050;
542 if (flags & O_NONBLOCK) ret |= 0x80;
543 return ret;
544 case F_SETFL:
545 flags = arg & 0xf;
546 if (arg & 0x8050) flags |= O_SYNC;
547 if (arg & 0x80) flags |= O_NONBLOCK;
548 return sys_fcntl(fd, cmd, (long)flags);
549 case SOL_F_GETLK:
550 case SOL_F_SETLK:
551 case SOL_F_SETLKW:
552 {
553 struct flock f;
554 struct sol_flock __user *p = A(arg);
555 mm_segment_t old_fs = get_fs();
556
557 switch (cmd) {
558 case SOL_F_GETLK: cmd = F_GETLK; break;
559 case SOL_F_SETLK: cmd = F_SETLK; break;
560 case SOL_F_SETLKW: cmd = F_SETLKW; break;
561 }
562
563 if (get_user (f.l_type, &p->l_type) ||
564 __get_user (f.l_whence, &p->l_whence) ||
565 __get_user (f.l_start, &p->l_start) ||
566 __get_user (f.l_len, &p->l_len) ||
567 __get_user (f.l_pid, &p->l_sysid))
568 return -EFAULT;
569
570 set_fs(KERNEL_DS);
571 ret = sys_fcntl(fd, cmd, (unsigned long)&f);
572 set_fs(old_fs);
573
574 if (__put_user (f.l_type, &p->l_type) ||
575 __put_user (f.l_whence, &p->l_whence) ||
576 __put_user (f.l_start, &p->l_start) ||
577 __put_user (f.l_len, &p->l_len) ||
578 __put_user (f.l_pid, &p->l_pid) ||
579 __put_user (0, &p->l_sysid))
580 return -EFAULT;
581
582 return ret;
583 }
584 case SOL_F_FREESP:
585 {
586 int length;
587 int (*sys_newftruncate)(unsigned int, unsigned long)=
588 (int (*)(unsigned int, unsigned long))SYS(ftruncate);
589
590 if (get_user(length, &((struct sol_flock __user *)A(arg))->l_start))
591 return -EFAULT;
592
593 return sys_newftruncate(fd, length);
594 }
595 };
596 return -EINVAL;
597}
598
599asmlinkage int solaris_ulimit(int cmd, int val)
600{
601 switch (cmd) {
602 case 1: /* UL_GETFSIZE - in 512B chunks */
603 return current->signal->rlim[RLIMIT_FSIZE].rlim_cur >> 9;
604 case 2: /* UL_SETFSIZE */
605 if ((unsigned long)val > (LONG_MAX>>9)) return -ERANGE;
606 val <<= 9;
607 task_lock(current->group_leader);
608 if (val > current->signal->rlim[RLIMIT_FSIZE].rlim_max) {
609 if (!capable(CAP_SYS_RESOURCE)) {
610 task_unlock(current->group_leader);
611 return -EPERM;
612 }
613 current->signal->rlim[RLIMIT_FSIZE].rlim_max = val;
614 }
615 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = val;
616 task_unlock(current->group_leader);
617 return 0;
618 case 3: /* UL_GMEMLIM */
619 return current->signal->rlim[RLIMIT_DATA].rlim_cur;
620 case 4: /* UL_GDESLIM */
621 return NR_OPEN;
622 }
623 return -EINVAL;
624}
625
626/* At least at the time I'm writing this, Linux doesn't have ACLs, so we
627 just fake this */
628asmlinkage int solaris_acl(u32 filename, int cmd, int nentries, u32 aclbufp)
629{
630 return -ENOSYS;
631}
632
633asmlinkage int solaris_facl(unsigned int fd, int cmd, int nentries, u32 aclbufp)
634{
635 return -ENOSYS;
636}
637
638asmlinkage int solaris_pread(unsigned int fd, char __user *buf, u32 count, u32 pos)
639{
640 ssize_t (*sys_pread64)(unsigned int, char __user *, size_t, loff_t) =
641 (ssize_t (*)(unsigned int, char __user *, size_t, loff_t))SYS(pread64);
642
643 return sys_pread64(fd, buf, count, (loff_t)pos);
644}
645
646asmlinkage int solaris_pwrite(unsigned int fd, char __user *buf, u32 count, u32 pos)
647{
648 ssize_t (*sys_pwrite64)(unsigned int, char __user *, size_t, loff_t) =
649 (ssize_t (*)(unsigned int, char __user *, size_t, loff_t))SYS(pwrite64);
650
651 return sys_pwrite64(fd, buf, count, (loff_t)pos);
652}
653
654/* POSIX.1 names */
655#define _PC_LINK_MAX 1
656#define _PC_MAX_CANON 2
657#define _PC_MAX_INPUT 3
658#define _PC_NAME_MAX 4
659#define _PC_PATH_MAX 5
660#define _PC_PIPE_BUF 6
661#define _PC_NO_TRUNC 7
662#define _PC_VDISABLE 8
663#define _PC_CHOWN_RESTRICTED 9
664/* POSIX.4 names */
665#define _PC_ASYNC_IO 10
666#define _PC_PRIO_IO 11
667#define _PC_SYNC_IO 12
668#define _PC_LAST 12
669
670/* This is not a real and complete implementation yet, just to keep
671 * the easy Solaris binaries happy.
672 */
673asmlinkage int solaris_fpathconf(int fd, int name)
674{
675 int ret;
676
677 switch(name) {
678 case _PC_LINK_MAX:
679 ret = LINK_MAX;
680 break;
681 case _PC_MAX_CANON:
682 ret = MAX_CANON;
683 break;
684 case _PC_MAX_INPUT:
685 ret = MAX_INPUT;
686 break;
687 case _PC_NAME_MAX:
688 ret = NAME_MAX;
689 break;
690 case _PC_PATH_MAX:
691 ret = PATH_MAX;
692 break;
693 case _PC_PIPE_BUF:
694 ret = PIPE_BUF;
695 break;
696 case _PC_CHOWN_RESTRICTED:
697 ret = 1;
698 break;
699 case _PC_NO_TRUNC:
700 case _PC_VDISABLE:
701 ret = 0;
702 break;
703 default:
704 ret = -EINVAL;
705 break;
706 }
707 return ret;
708}
709
710asmlinkage int solaris_pathconf(u32 path, int name)
711{
712 return solaris_fpathconf(0, name);
713}
714
715/* solaris_llseek returns long long - quite difficult */
716asmlinkage long solaris_llseek(struct pt_regs *regs, u32 off_hi, u32 off_lo, int whence)
717{
718 int (*sys_llseek)(unsigned int, unsigned long, unsigned long, loff_t __user *, unsigned int) =
719 (int (*)(unsigned int, unsigned long, unsigned long, loff_t __user *, unsigned int))SYS(_llseek);
720 int ret;
721 mm_segment_t old_fs = get_fs();
722 loff_t retval;
723
724 set_fs(KERNEL_DS);
725 ret = sys_llseek((unsigned int)regs->u_regs[UREG_I0], off_hi, off_lo, &retval, whence);
726 set_fs(old_fs);
727 if (ret < 0) return ret;
728 regs->u_regs[UREG_I1] = (u32)retval;
729 return (retval >> 32);
730}
731
732/* Have to mask out all but lower 3 bits */
733asmlinkage int solaris_access(u32 filename, long mode)
734{
735 int (*sys_access)(const char __user *, int) =
736 (int (*)(const char __user *, int))SYS(access);
737
738 return sys_access(A(filename), mode & 7);
739}
diff --git a/arch/sparc64/solaris/ioctl.c b/arch/sparc64/solaris/ioctl.c
new file mode 100644
index 000000000000..cac0a1cf0050
--- /dev/null
+++ b/arch/sparc64/solaris/ioctl.c
@@ -0,0 +1,820 @@
1/* $Id: ioctl.c,v 1.17 2002/02/08 03:57:14 davem Exp $
2 * ioctl.c: Solaris ioctl emulation.
3 *
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1997,1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
6 *
7 * Streams & timod emulation based on code
8 * Copyright (C) 1995, 1996 Mike Jagdis (jaggy@purplet.demon.co.uk)
9 *
10 * 1999-08-19 Implemented solaris 'm' (mag tape) and
11 * 'O' (openprom) ioctls, by Jason Rappleye
12 * (rappleye@ccr.buffalo.edu)
13 */
14
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/syscalls.h>
21#include <linux/ioctl.h>
22#include <linux/fs.h>
23#include <linux/file.h>
24#include <linux/netdevice.h>
25#include <linux/mtio.h>
26#include <linux/time.h>
27#include <linux/compat.h>
28
29#include <net/sock.h>
30
31#include <asm/uaccess.h>
32#include <asm/termios.h>
33#include <asm/openpromio.h>
34
35#include "conv.h"
36#include "socksys.h"
37
38extern asmlinkage int compat_sys_ioctl(unsigned int fd, unsigned int cmd,
39 u32 arg);
40asmlinkage int solaris_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
41
42extern int timod_putmsg(unsigned int fd, char __user *ctl_buf, int ctl_len,
43 char __user *data_buf, int data_len, int flags);
44extern int timod_getmsg(unsigned int fd, char __user *ctl_buf, int ctl_maxlen, int __user *ctl_len,
45 char __user *data_buf, int data_maxlen, int __user *data_len, int *flags);
46
47/* termio* stuff {{{ */
48
49struct solaris_termios {
50 u32 c_iflag;
51 u32 c_oflag;
52 u32 c_cflag;
53 u32 c_lflag;
54 u8 c_cc[19];
55};
56
57struct solaris_termio {
58 u16 c_iflag;
59 u16 c_oflag;
60 u16 c_cflag;
61 u16 c_lflag;
62 s8 c_line;
63 u8 c_cc[8];
64};
65
66struct solaris_termiox {
67 u16 x_hflag;
68 u16 x_cflag;
69 u16 x_rflag[5];
70 u16 x_sflag;
71};
72
73static u32 solaris_to_linux_cflag(u32 cflag)
74{
75 cflag &= 0x7fdff000;
76 if (cflag & 0x200000) {
77 int baud = cflag & 0xf;
78 cflag &= ~0x20000f;
79 switch (baud) {
80 case 0: baud = B57600; break;
81 case 1: baud = B76800; break;
82 case 2: baud = B115200; break;
83 case 3: baud = B153600; break;
84 case 4: baud = B230400; break;
85 case 5: baud = B307200; break;
86 case 6: baud = B460800; break;
87 }
88 cflag |= CBAUDEX | baud;
89 }
90 return cflag;
91}
92
93static u32 linux_to_solaris_cflag(u32 cflag)
94{
95 cflag &= ~(CMSPAR | CIBAUD);
96 if (cflag & CBAUDEX) {
97 int baud = cflag & CBAUD;
98 cflag &= ~CBAUD;
99 switch (baud) {
100 case B57600: baud = 0; break;
101 case B76800: baud = 1; break;
102 case B115200: baud = 2; break;
103 case B153600: baud = 3; break;
104 case B230400: baud = 4; break;
105 case B307200: baud = 5; break;
106 case B460800: baud = 6; break;
107 case B614400: baud = 7; break;
108 case B921600: baud = 8; break;
109#if 0
110 case B1843200: baud = 9; break;
111#endif
112 }
113 cflag |= 0x200000 | baud;
114 }
115 return cflag;
116}
117
118static inline int linux_to_solaris_termio(unsigned int fd, unsigned int cmd, u32 arg)
119{
120 struct solaris_termio __user *p = A(arg);
121 int ret;
122
123 ret = sys_ioctl(fd, cmd, (unsigned long)p);
124 if (!ret) {
125 u32 cflag;
126
127 if (__get_user (cflag, &p->c_cflag))
128 return -EFAULT;
129 cflag = linux_to_solaris_cflag(cflag);
130 if (__put_user (cflag, &p->c_cflag))
131 return -EFAULT;
132 }
133 return ret;
134}
135
136static int solaris_to_linux_termio(unsigned int fd, unsigned int cmd, u32 arg)
137{
138 int ret;
139 struct solaris_termio s;
140 mm_segment_t old_fs = get_fs();
141
142 if (copy_from_user (&s, (struct solaris_termio __user *)A(arg), sizeof(struct solaris_termio)))
143 return -EFAULT;
144 s.c_cflag = solaris_to_linux_cflag(s.c_cflag);
145 set_fs(KERNEL_DS);
146 ret = sys_ioctl(fd, cmd, (unsigned long)&s);
147 set_fs(old_fs);
148 return ret;
149}
150
151static inline int linux_to_solaris_termios(unsigned int fd, unsigned int cmd, u32 arg)
152{
153 int ret;
154 struct solaris_termios s;
155 mm_segment_t old_fs = get_fs();
156
157 set_fs(KERNEL_DS);
158 ret = sys_ioctl(fd, cmd, (unsigned long)&s);
159 set_fs(old_fs);
160 if (!ret) {
161 struct solaris_termios __user *p = A(arg);
162 if (put_user (s.c_iflag, &p->c_iflag) ||
163 __put_user (s.c_oflag, &p->c_oflag) ||
164 __put_user (linux_to_solaris_cflag(s.c_cflag), &p->c_cflag) ||
165 __put_user (s.c_lflag, &p->c_lflag) ||
166 __copy_to_user (p->c_cc, s.c_cc, 16) ||
167 __clear_user (p->c_cc + 16, 2))
168 return -EFAULT;
169 }
170 return ret;
171}
172
173static int solaris_to_linux_termios(unsigned int fd, unsigned int cmd, u32 arg)
174{
175 int ret;
176 struct solaris_termios s;
177 struct solaris_termios __user *p = A(arg);
178 mm_segment_t old_fs = get_fs();
179
180 set_fs(KERNEL_DS);
181 ret = sys_ioctl(fd, TCGETS, (unsigned long)&s);
182 set_fs(old_fs);
183 if (ret) return ret;
184 if (put_user (s.c_iflag, &p->c_iflag) ||
185 __put_user (s.c_oflag, &p->c_oflag) ||
186 __put_user (s.c_cflag, &p->c_cflag) ||
187 __put_user (s.c_lflag, &p->c_lflag) ||
188 __copy_from_user (s.c_cc, p->c_cc, 16))
189 return -EFAULT;
190 s.c_cflag = solaris_to_linux_cflag(s.c_cflag);
191 set_fs(KERNEL_DS);
192 ret = sys_ioctl(fd, cmd, (unsigned long)&s);
193 set_fs(old_fs);
194 return ret;
195}
196
197static inline int solaris_T(unsigned int fd, unsigned int cmd, u32 arg)
198{
199 switch (cmd & 0xff) {
200 case 1: /* TCGETA */
201 return linux_to_solaris_termio(fd, TCGETA, arg);
202 case 2: /* TCSETA */
203 return solaris_to_linux_termio(fd, TCSETA, arg);
204 case 3: /* TCSETAW */
205 return solaris_to_linux_termio(fd, TCSETAW, arg);
206 case 4: /* TCSETAF */
207 return solaris_to_linux_termio(fd, TCSETAF, arg);
208 case 5: /* TCSBRK */
209 return sys_ioctl(fd, TCSBRK, arg);
210 case 6: /* TCXONC */
211 return sys_ioctl(fd, TCXONC, arg);
212 case 7: /* TCFLSH */
213 return sys_ioctl(fd, TCFLSH, arg);
214 case 13: /* TCGETS */
215 return linux_to_solaris_termios(fd, TCGETS, arg);
216 case 14: /* TCSETS */
217 return solaris_to_linux_termios(fd, TCSETS, arg);
218 case 15: /* TCSETSW */
219 return solaris_to_linux_termios(fd, TCSETSW, arg);
220 case 16: /* TCSETSF */
221 return solaris_to_linux_termios(fd, TCSETSF, arg);
222 case 103: /* TIOCSWINSZ */
223 return sys_ioctl(fd, TIOCSWINSZ, arg);
224 case 104: /* TIOCGWINSZ */
225 return sys_ioctl(fd, TIOCGWINSZ, arg);
226 }
227 return -ENOSYS;
228}
229
230static inline int solaris_t(unsigned int fd, unsigned int cmd, u32 arg)
231{
232 switch (cmd & 0xff) {
233 case 20: /* TIOCGPGRP */
234 return sys_ioctl(fd, TIOCGPGRP, arg);
235 case 21: /* TIOCSPGRP */
236 return sys_ioctl(fd, TIOCSPGRP, arg);
237 }
238 return -ENOSYS;
239}
240
241/* }}} */
242
243/* A pseudo STREAMS support {{{ */
244
245struct strioctl {
246 int cmd, timeout, len;
247 u32 data;
248};
249
250struct solaris_si_sockparams {
251 int sp_family;
252 int sp_type;
253 int sp_protocol;
254};
255
256struct solaris_o_si_udata {
257 int tidusize;
258 int addrsize;
259 int optsize;
260 int etsdusize;
261 int servtype;
262 int so_state;
263 int so_options;
264 int tsdusize;
265};
266
267struct solaris_si_udata {
268 int tidusize;
269 int addrsize;
270 int optsize;
271 int etsdusize;
272 int servtype;
273 int so_state;
274 int so_options;
275 int tsdusize;
276 struct solaris_si_sockparams sockparams;
277};
278
279#define SOLARIS_MODULE_TIMOD 0
280#define SOLARIS_MODULE_SOCKMOD 1
281#define SOLARIS_MODULE_MAX 2
282
283static struct module_info {
284 const char *name;
285 /* can be expanded further if needed */
286} module_table[ SOLARIS_MODULE_MAX + 1 ] = {
287 /* the ordering here must match the module numbers above! */
288 { "timod" },
289 { "sockmod" },
290 { NULL }
291};
292
293static inline int solaris_sockmod(unsigned int fd, unsigned int cmd, u32 arg)
294{
295 struct inode *ino;
296 /* I wonder which of these tests are superfluous... --patrik */
297 spin_lock(&current->files->file_lock);
298 if (! current->files->fd[fd] ||
299 ! current->files->fd[fd]->f_dentry ||
300 ! (ino = current->files->fd[fd]->f_dentry->d_inode) ||
301 ! S_ISSOCK(ino->i_mode)) {
302 spin_unlock(&current->files->file_lock);
303 return TBADF;
304 }
305 spin_unlock(&current->files->file_lock);
306
307 switch (cmd & 0xff) {
308 case 109: /* SI_SOCKPARAMS */
309 {
310 struct solaris_si_sockparams si;
311 if (copy_from_user (&si, A(arg), sizeof(si)))
312 return (EFAULT << 8) | TSYSERR;
313
314 /* Should we modify socket ino->socket_i.ops and type? */
315 return 0;
316 }
317 case 110: /* SI_GETUDATA */
318 {
319 int etsdusize, servtype;
320 struct solaris_si_udata __user *p = A(arg);
321 switch (SOCKET_I(ino)->type) {
322 case SOCK_STREAM:
323 etsdusize = 1;
324 servtype = 2;
325 break;
326 default:
327 etsdusize = -2;
328 servtype = 3;
329 break;
330 }
331 if (put_user(16384, &p->tidusize) ||
332 __put_user(sizeof(struct sockaddr), &p->addrsize) ||
333 __put_user(-1, &p->optsize) ||
334 __put_user(etsdusize, &p->etsdusize) ||
335 __put_user(servtype, &p->servtype) ||
336 __put_user(0, &p->so_state) ||
337 __put_user(0, &p->so_options) ||
338 __put_user(16384, &p->tsdusize) ||
339 __put_user(SOCKET_I(ino)->ops->family, &p->sockparams.sp_family) ||
340 __put_user(SOCKET_I(ino)->type, &p->sockparams.sp_type) ||
341 __put_user(SOCKET_I(ino)->ops->family, &p->sockparams.sp_protocol))
342 return (EFAULT << 8) | TSYSERR;
343 return 0;
344 }
345 case 101: /* O_SI_GETUDATA */
346 {
347 int etsdusize, servtype;
348 struct solaris_o_si_udata __user *p = A(arg);
349 switch (SOCKET_I(ino)->type) {
350 case SOCK_STREAM:
351 etsdusize = 1;
352 servtype = 2;
353 break;
354 default:
355 etsdusize = -2;
356 servtype = 3;
357 break;
358 }
359 if (put_user(16384, &p->tidusize) ||
360 __put_user(sizeof(struct sockaddr), &p->addrsize) ||
361 __put_user(-1, &p->optsize) ||
362 __put_user(etsdusize, &p->etsdusize) ||
363 __put_user(servtype, &p->servtype) ||
364 __put_user(0, &p->so_state) ||
365 __put_user(0, &p->so_options) ||
366 __put_user(16384, &p->tsdusize))
367 return (EFAULT << 8) | TSYSERR;
368 return 0;
369 }
370 case 102: /* SI_SHUTDOWN */
371 case 103: /* SI_LISTEN */
372 case 104: /* SI_SETMYNAME */
373 case 105: /* SI_SETPEERNAME */
374 case 106: /* SI_GETINTRANSIT */
375 case 107: /* SI_TCL_LINK */
376 case 108: /* SI_TCL_UNLINK */
377 ;
378 }
379 return TNOTSUPPORT;
380}
381
382static inline int solaris_timod(unsigned int fd, unsigned int cmd, u32 arg,
383 int len, int __user *len_p)
384{
385 int ret;
386
387 switch (cmd & 0xff) {
388 case 141: /* TI_OPTMGMT */
389 {
390 int i;
391 u32 prim;
392 SOLD("TI_OPMGMT entry");
393 ret = timod_putmsg(fd, A(arg), len, NULL, -1, 0);
394 SOLD("timod_putmsg() returned");
395 if (ret)
396 return (-ret << 8) | TSYSERR;
397 i = MSG_HIPRI;
398 SOLD("calling timod_getmsg()");
399 ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
400 SOLD("timod_getmsg() returned");
401 if (ret)
402 return (-ret << 8) | TSYSERR;
403 SOLD("ret ok");
404 if (get_user(prim, (u32 __user *)A(arg)))
405 return (EFAULT << 8) | TSYSERR;
406 SOLD("got prim");
407 if (prim == T_ERROR_ACK) {
408 u32 tmp, tmp2;
409 SOLD("prim is T_ERROR_ACK");
410 if (get_user(tmp, (u32 __user *)A(arg)+3) ||
411 get_user(tmp2, (u32 __user *)A(arg)+2))
412 return (EFAULT << 8) | TSYSERR;
413 return (tmp2 << 8) | tmp;
414 }
415 SOLD("TI_OPMGMT return 0");
416 return 0;
417 }
418 case 142: /* TI_BIND */
419 {
420 int i;
421 u32 prim;
422 SOLD("TI_BIND entry");
423 ret = timod_putmsg(fd, A(arg), len, NULL, -1, 0);
424 SOLD("timod_putmsg() returned");
425 if (ret)
426 return (-ret << 8) | TSYSERR;
427 len = 1024; /* Solaris allows arbitrary return size */
428 i = MSG_HIPRI;
429 SOLD("calling timod_getmsg()");
430 ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
431 SOLD("timod_getmsg() returned");
432 if (ret)
433 return (-ret << 8) | TSYSERR;
434 SOLD("ret ok");
435 if (get_user(prim, (u32 __user *)A(arg)))
436 return (EFAULT << 8) | TSYSERR;
437 SOLD("got prim");
438 if (prim == T_ERROR_ACK) {
439 u32 tmp, tmp2;
440 SOLD("prim is T_ERROR_ACK");
441 if (get_user(tmp, (u32 __user *)A(arg)+3) ||
442 get_user(tmp2, (u32 __user *)A(arg)+2))
443 return (EFAULT << 8) | TSYSERR;
444 return (tmp2 << 8) | tmp;
445 }
446 SOLD("no ERROR_ACK requested");
447 if (prim != T_OK_ACK)
448 return TBADSEQ;
449 SOLD("OK_ACK requested");
450 i = MSG_HIPRI;
451 SOLD("calling timod_getmsg()");
452 ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
453 SOLD("timod_getmsg() returned");
454 if (ret)
455 return (-ret << 8) | TSYSERR;
456 SOLD("TI_BIND return ok");
457 return 0;
458 }
459 case 140: /* TI_GETINFO */
460 case 143: /* TI_UNBIND */
461 case 144: /* TI_GETMYNAME */
462 case 145: /* TI_GETPEERNAME */
463 case 146: /* TI_SETMYNAME */
464 case 147: /* TI_SETPEERNAME */
465 ;
466 }
467 return TNOTSUPPORT;
468}
469
470static inline int solaris_S(struct file *filp, unsigned int fd, unsigned int cmd, u32 arg)
471{
472 char *p;
473 int ret;
474 mm_segment_t old_fs;
475 struct strioctl si;
476 struct inode *ino;
477 struct sol_socket_struct *sock;
478 struct module_info *mi;
479
480 ino = filp->f_dentry->d_inode;
481 if (!S_ISSOCK(ino->i_mode))
482 return -EBADF;
483 sock = filp->private_data;
484 if (! sock) {
485 printk("solaris_S: NULL private_data\n");
486 return -EBADF;
487 }
488 if (sock->magic != SOLARIS_SOCKET_MAGIC) {
489 printk("solaris_S: invalid magic\n");
490 return -EBADF;
491 }
492
493
494 switch (cmd & 0xff) {
495 case 1: /* I_NREAD */
496 return -ENOSYS;
497 case 2: /* I_PUSH */
498 {
499 p = getname (A(arg));
500 if (IS_ERR (p))
501 return PTR_ERR(p);
502 ret = -EINVAL;
503 for (mi = module_table; mi->name; mi++) {
504 if (strcmp(mi->name, p) == 0) {
505 sol_module m;
506 if (sock->modcount >= MAX_NR_STREAM_MODULES) {
507 ret = -ENXIO;
508 break;
509 }
510 m = (sol_module) (mi - module_table);
511 sock->module[sock->modcount++] = m;
512 ret = 0;
513 break;
514 }
515 }
516 putname (p);
517 return ret;
518 }
519 case 3: /* I_POP */
520 if (sock->modcount <= 0) return -EINVAL;
521 sock->modcount--;
522 return 0;
523 case 4: /* I_LOOK */
524 {
525 const char *p;
526 if (sock->modcount <= 0) return -EINVAL;
527 p = module_table[(unsigned)sock->module[sock->modcount]].name;
528 if (copy_to_user (A(arg), p, strlen(p)))
529 return -EFAULT;
530 return 0;
531 }
532 case 5: /* I_FLUSH */
533 return 0;
534 case 8: /* I_STR */
535 if (copy_from_user(&si, A(arg), sizeof(struct strioctl)))
536 return -EFAULT;
537 /* We ignore what module is actually at the top of stack. */
538 switch ((si.cmd >> 8) & 0xff) {
539 case 'I':
540 return solaris_sockmod(fd, si.cmd, si.data);
541 case 'T':
542 return solaris_timod(fd, si.cmd, si.data, si.len,
543 &((struct strioctl __user *)A(arg))->len);
544 default:
545 return solaris_ioctl(fd, si.cmd, si.data);
546 }
547 case 9: /* I_SETSIG */
548 return sys_ioctl(fd, FIOSETOWN, current->pid);
549 case 10: /* I_GETSIG */
550 old_fs = get_fs();
551 set_fs(KERNEL_DS);
552 sys_ioctl(fd, FIOGETOWN, (unsigned long)&ret);
553 set_fs(old_fs);
554 if (ret == current->pid) return 0x3ff;
555 else return -EINVAL;
556 case 11: /* I_FIND */
557 {
558 int i;
559 p = getname (A(arg));
560 if (IS_ERR (p))
561 return PTR_ERR(p);
562 ret = 0;
563 for (i = 0; i < sock->modcount; i++) {
564 unsigned m = sock->module[i];
565 if (strcmp(module_table[m].name, p) == 0) {
566 ret = 1;
567 break;
568 }
569 }
570 putname (p);
571 return ret;
572 }
573 case 19: /* I_SWROPT */
574 case 32: /* I_SETCLTIME */
575 return 0; /* Lie */
576 }
577 return -ENOSYS;
578}
579
580static inline int solaris_s(unsigned int fd, unsigned int cmd, u32 arg)
581{
582 switch (cmd & 0xff) {
583 case 0: /* SIOCSHIWAT */
584 case 2: /* SIOCSLOWAT */
585 return 0; /* We don't support them */
586 case 1: /* SIOCGHIWAT */
587 case 3: /* SIOCGLOWAT */
588 if (put_user (0, (u32 __user *)A(arg)))
589 return -EFAULT;
590 return 0; /* Lie */
591 case 7: /* SIOCATMARK */
592 return sys_ioctl(fd, SIOCATMARK, arg);
593 case 8: /* SIOCSPGRP */
594 return sys_ioctl(fd, SIOCSPGRP, arg);
595 case 9: /* SIOCGPGRP */
596 return sys_ioctl(fd, SIOCGPGRP, arg);
597 }
598 return -ENOSYS;
599}
600
601static inline int solaris_r(unsigned int fd, unsigned int cmd, u32 arg)
602{
603 switch (cmd & 0xff) {
604 case 10: /* SIOCADDRT */
605 return compat_sys_ioctl(fd, SIOCADDRT, arg);
606 case 11: /* SIOCDELRT */
607 return compat_sys_ioctl(fd, SIOCDELRT, arg);
608 }
609 return -ENOSYS;
610}
611
612static inline int solaris_i(unsigned int fd, unsigned int cmd, u32 arg)
613{
614 switch (cmd & 0xff) {
615 case 12: /* SIOCSIFADDR */
616 return compat_sys_ioctl(fd, SIOCSIFADDR, arg);
617 case 13: /* SIOCGIFADDR */
618 return compat_sys_ioctl(fd, SIOCGIFADDR, arg);
619 case 14: /* SIOCSIFDSTADDR */
620 return compat_sys_ioctl(fd, SIOCSIFDSTADDR, arg);
621 case 15: /* SIOCGIFDSTADDR */
622 return compat_sys_ioctl(fd, SIOCGIFDSTADDR, arg);
623 case 16: /* SIOCSIFFLAGS */
624 return compat_sys_ioctl(fd, SIOCSIFFLAGS, arg);
625 case 17: /* SIOCGIFFLAGS */
626 return compat_sys_ioctl(fd, SIOCGIFFLAGS, arg);
627 case 18: /* SIOCSIFMEM */
628 return compat_sys_ioctl(fd, SIOCSIFMEM, arg);
629 case 19: /* SIOCGIFMEM */
630 return compat_sys_ioctl(fd, SIOCGIFMEM, arg);
631 case 20: /* SIOCGIFCONF */
632 return compat_sys_ioctl(fd, SIOCGIFCONF, arg);
633 case 21: /* SIOCSIFMTU */
634 return compat_sys_ioctl(fd, SIOCSIFMTU, arg);
635 case 22: /* SIOCGIFMTU */
636 return compat_sys_ioctl(fd, SIOCGIFMTU, arg);
637 case 23: /* SIOCGIFBRDADDR */
638 return compat_sys_ioctl(fd, SIOCGIFBRDADDR, arg);
639 case 24: /* SIOCSIFBRDADDR */
640 return compat_sys_ioctl(fd, SIOCSIFBRDADDR, arg);
641 case 25: /* SIOCGIFNETMASK */
642 return compat_sys_ioctl(fd, SIOCGIFNETMASK, arg);
643 case 26: /* SIOCSIFNETMASK */
644 return compat_sys_ioctl(fd, SIOCSIFNETMASK, arg);
645 case 27: /* SIOCGIFMETRIC */
646 return compat_sys_ioctl(fd, SIOCGIFMETRIC, arg);
647 case 28: /* SIOCSIFMETRIC */
648 return compat_sys_ioctl(fd, SIOCSIFMETRIC, arg);
649 case 30: /* SIOCSARP */
650 return compat_sys_ioctl(fd, SIOCSARP, arg);
651 case 31: /* SIOCGARP */
652 return compat_sys_ioctl(fd, SIOCGARP, arg);
653 case 32: /* SIOCDARP */
654 return compat_sys_ioctl(fd, SIOCDARP, arg);
655 case 52: /* SIOCGETNAME */
656 case 53: /* SIOCGETPEER */
657 {
658 struct sockaddr uaddr;
659 int uaddr_len = sizeof(struct sockaddr), ret;
660 long args[3];
661 mm_segment_t old_fs = get_fs();
662 int (*sys_socketcall)(int, unsigned long *) =
663 (int (*)(int, unsigned long *))SYS(socketcall);
664
665 args[0] = fd; args[1] = (long)&uaddr; args[2] = (long)&uaddr_len;
666 set_fs(KERNEL_DS);
667 ret = sys_socketcall(((cmd & 0xff) == 52) ? SYS_GETSOCKNAME : SYS_GETPEERNAME,
668 args);
669 set_fs(old_fs);
670 if (ret >= 0) {
671 if (copy_to_user(A(arg), &uaddr, uaddr_len))
672 return -EFAULT;
673 }
674 return ret;
675 }
676#if 0
677 case 86: /* SIOCSOCKSYS */
678 return socksys_syscall(fd, arg);
679#endif
680 case 87: /* SIOCGIFNUM */
681 {
682 struct net_device *d;
683 int i = 0;
684
685 read_lock_bh(&dev_base_lock);
686 for (d = dev_base; d; d = d->next) i++;
687 read_unlock_bh(&dev_base_lock);
688
689 if (put_user (i, (int __user *)A(arg)))
690 return -EFAULT;
691 return 0;
692 }
693 }
694 return -ENOSYS;
695}
696
697static int solaris_m(unsigned int fd, unsigned int cmd, u32 arg)
698{
699 int ret;
700
701 switch (cmd & 0xff) {
702 case 1: /* MTIOCTOP */
703 ret = sys_ioctl(fd, MTIOCTOP, (unsigned long)&arg);
704 break;
705 case 2: /* MTIOCGET */
706 ret = sys_ioctl(fd, MTIOCGET, (unsigned long)&arg);
707 break;
708 case 3: /* MTIOCGETDRIVETYPE */
709 case 4: /* MTIOCPERSISTENT */
710 case 5: /* MTIOCPERSISTENTSTATUS */
711 case 6: /* MTIOCLRERR */
712 case 7: /* MTIOCGUARANTEEDORDER */
713 case 8: /* MTIOCRESERVE */
714 case 9: /* MTIOCRELEASE */
715 case 10: /* MTIOCFORCERESERVE */
716 case 13: /* MTIOCSTATE */
717 case 14: /* MTIOCREADIGNOREILI */
718 case 15: /* MTIOCREADIGNOREEOFS */
719 case 16: /* MTIOCSHORTFMK */
720 default:
721 ret = -ENOSYS; /* linux doesn't support these */
722 break;
723 };
724
725 return ret;
726}
727
728static int solaris_O(unsigned int fd, unsigned int cmd, u32 arg)
729{
730 int ret = -EINVAL;
731
732 switch (cmd & 0xff) {
733 case 1: /* OPROMGETOPT */
734 ret = sys_ioctl(fd, OPROMGETOPT, arg);
735 break;
736 case 2: /* OPROMSETOPT */
737 ret = sys_ioctl(fd, OPROMSETOPT, arg);
738 break;
739 case 3: /* OPROMNXTOPT */
740 ret = sys_ioctl(fd, OPROMNXTOPT, arg);
741 break;
742 case 4: /* OPROMSETOPT2 */
743 ret = sys_ioctl(fd, OPROMSETOPT2, arg);
744 break;
745 case 5: /* OPROMNEXT */
746 ret = sys_ioctl(fd, OPROMNEXT, arg);
747 break;
748 case 6: /* OPROMCHILD */
749 ret = sys_ioctl(fd, OPROMCHILD, arg);
750 break;
751 case 7: /* OPROMGETPROP */
752 ret = sys_ioctl(fd, OPROMGETPROP, arg);
753 break;
754 case 8: /* OPROMNXTPROP */
755 ret = sys_ioctl(fd, OPROMNXTPROP, arg);
756 break;
757 case 9: /* OPROMU2P */
758 ret = sys_ioctl(fd, OPROMU2P, arg);
759 break;
760 case 10: /* OPROMGETCONS */
761 ret = sys_ioctl(fd, OPROMGETCONS, arg);
762 break;
763 case 11: /* OPROMGETFBNAME */
764 ret = sys_ioctl(fd, OPROMGETFBNAME, arg);
765 break;
766 case 12: /* OPROMGETBOOTARGS */
767 ret = sys_ioctl(fd, OPROMGETBOOTARGS, arg);
768 break;
769 case 13: /* OPROMGETVERSION */
770 case 14: /* OPROMPATH2DRV */
771 case 15: /* OPROMDEV2PROMNAME */
772 case 16: /* OPROMPROM2DEVNAME */
773 case 17: /* OPROMGETPROPLEN */
774 default:
775 ret = -EINVAL;
776 break;
777 };
778 return ret;
779}
780
781/* }}} */
782
783asmlinkage int solaris_ioctl(unsigned int fd, unsigned int cmd, u32 arg)
784{
785 struct file *filp;
786 int error = -EBADF;
787
788 filp = fget(fd);
789 if (!filp)
790 goto out;
791
792 lock_kernel();
793 error = -EFAULT;
794 switch ((cmd >> 8) & 0xff) {
795 case 'S': error = solaris_S(filp, fd, cmd, arg); break;
796 case 'T': error = solaris_T(fd, cmd, arg); break;
797 case 'i': error = solaris_i(fd, cmd, arg); break;
798 case 'r': error = solaris_r(fd, cmd, arg); break;
799 case 's': error = solaris_s(fd, cmd, arg); break;
800 case 't': error = solaris_t(fd, cmd, arg); break;
801 case 'f': error = sys_ioctl(fd, cmd, arg); break;
802 case 'm': error = solaris_m(fd, cmd, arg); break;
803 case 'O': error = solaris_O(fd, cmd, arg); break;
804 default:
805 error = -ENOSYS;
806 break;
807 }
808 unlock_kernel();
809 fput(filp);
810out:
811 if (error == -ENOSYS) {
812 unsigned char c = cmd>>8;
813
814 if (c < ' ' || c > 126) c = '.';
815 printk("solaris_ioctl: Unknown cmd fd(%d) cmd(%08x '%c') arg(%08x)\n",
816 (int)fd, (unsigned int)cmd, c, (unsigned int)arg);
817 error = -EINVAL;
818 }
819 return error;
820}
diff --git a/arch/sparc64/solaris/ipc.c b/arch/sparc64/solaris/ipc.c
new file mode 100644
index 000000000000..8cef5fd57b2e
--- /dev/null
+++ b/arch/sparc64/solaris/ipc.c
@@ -0,0 +1,127 @@
1/* $Id: ipc.c,v 1.5 1999/12/09 00:41:00 davem Exp $
2 * ipc.c: Solaris IPC emulation
3 *
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/smp_lock.h>
10#include <linux/wait.h>
11#include <linux/mm.h>
12#include <linux/shm.h>
13#include <linux/sem.h>
14#include <linux/msg.h>
15
16#include <asm/uaccess.h>
17#include <asm/string.h>
18#include <asm/ipc.h>
19
20#include "conv.h"
21
22struct solaris_ipc_perm {
23 s32 uid;
24 s32 gid;
25 s32 cuid;
26 s32 cgid;
27 u32 mode;
28 u32 seq;
29 int key;
30 s32 pad[4];
31};
32
33struct solaris_shmid_ds {
34 struct solaris_ipc_perm shm_perm;
35 int shm_segsz;
36 u32 shm_amp;
37 unsigned short shm_lkcnt;
38 char __padxx[2];
39 s32 shm_lpid;
40 s32 shm_cpid;
41 u32 shm_nattch;
42 u32 shm_cnattch;
43 s32 shm_atime;
44 s32 shm_pad1;
45 s32 shm_dtime;
46 s32 shm_pad2;
47 s32 shm_ctime;
48 s32 shm_pad3;
49 unsigned short shm_cv;
50 char shm_pad4[2];
51 u32 shm_sptas;
52 s32 shm_pad5[2];
53};
54
55asmlinkage long solaris_shmsys(int cmd, u32 arg1, u32 arg2, u32 arg3)
56{
57 int (*sys_ipc)(unsigned,int,int,unsigned long,void __user *,long) =
58 (int (*)(unsigned,int,int,unsigned long,void __user *,long))SYS(ipc);
59 mm_segment_t old_fs;
60 unsigned long raddr;
61 int ret;
62
63 switch (cmd) {
64 case 0: /* shmat */
65 old_fs = get_fs();
66 set_fs(KERNEL_DS);
67 ret = sys_ipc(SHMAT, arg1, arg3 & ~0x4000, (unsigned long)&raddr, A(arg2), 0);
68 set_fs(old_fs);
69 if (ret >= 0) return (u32)raddr;
70 else return ret;
71 case 1: /* shmctl */
72 switch (arg2) {
73 case 3: /* SHM_LOCK */
74 case 4: /* SHM_UNLOCK */
75 return sys_ipc(SHMCTL, arg1, (arg2 == 3) ? SHM_LOCK : SHM_UNLOCK, 0, NULL, 0);
76 case 10: /* IPC_RMID */
77 return sys_ipc(SHMCTL, arg1, IPC_RMID, 0, NULL, 0);
78 case 11: /* IPC_SET */
79 {
80 struct shmid_ds s;
81 struct solaris_shmid_ds __user *p = A(arg3);
82
83 if (get_user (s.shm_perm.uid, &p->shm_perm.uid) ||
84 __get_user (s.shm_perm.gid, &p->shm_perm.gid) ||
85 __get_user (s.shm_perm.mode, &p->shm_perm.mode))
86 return -EFAULT;
87 old_fs = get_fs();
88 set_fs(KERNEL_DS);
89 ret = sys_ipc(SHMCTL, arg1, IPC_SET, 0, &s, 0);
90 set_fs(old_fs);
91 return ret;
92 }
93 case 12: /* IPC_STAT */
94 {
95 struct shmid_ds s;
96 struct solaris_shmid_ds __user *p = A(arg3);
97
98 old_fs = get_fs();
99 set_fs(KERNEL_DS);
100 ret = sys_ipc(SHMCTL, arg1, IPC_SET, 0, &s, 0);
101 set_fs(old_fs);
102 if (put_user (s.shm_perm.uid, &(p->shm_perm.uid)) ||
103 __put_user (s.shm_perm.gid, &(p->shm_perm.gid)) ||
104 __put_user (s.shm_perm.cuid, &(p->shm_perm.cuid)) ||
105 __put_user (s.shm_perm.cgid, &(p->shm_perm.cgid)) ||
106 __put_user (s.shm_perm.mode, &(p->shm_perm.mode)) ||
107 __put_user (s.shm_perm.seq, &(p->shm_perm.seq)) ||
108 __put_user (s.shm_perm.key, &(p->shm_perm.key)) ||
109 __put_user (s.shm_segsz, &(p->shm_segsz)) ||
110 __put_user (s.shm_lpid, &(p->shm_lpid)) ||
111 __put_user (s.shm_cpid, &(p->shm_cpid)) ||
112 __put_user (s.shm_nattch, &(p->shm_nattch)) ||
113 __put_user (s.shm_atime, &(p->shm_atime)) ||
114 __put_user (s.shm_dtime, &(p->shm_dtime)) ||
115 __put_user (s.shm_ctime, &(p->shm_ctime)))
116 return -EFAULT;
117 return ret;
118 }
119 default: return -EINVAL;
120 }
121 case 2: /* shmdt */
122 return sys_ipc(SHMDT, 0, 0, 0, A(arg1), 0);
123 case 3: /* shmget */
124 return sys_ipc(SHMGET, arg1, arg2, arg3, NULL, 0);
125 }
126 return -EINVAL;
127}
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c
new file mode 100644
index 000000000000..15b4cfe07557
--- /dev/null
+++ b/arch/sparc64/solaris/misc.c
@@ -0,0 +1,784 @@
1/* $Id: misc.c,v 1.36 2002/02/09 19:49:31 davem Exp $
2 * misc.c: Miscellaneous syscall emulation for Solaris
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
6
7#include <linux/config.h>
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/smp_lock.h>
11#include <linux/utsname.h>
12#include <linux/limits.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/mman.h>
16#include <linux/file.h>
17#include <linux/timex.h>
18#include <linux/major.h>
19#include <linux/compat.h>
20
21#include <asm/uaccess.h>
22#include <asm/string.h>
23#include <asm/oplib.h>
24#include <asm/idprom.h>
25#include <asm/smp.h>
26
27#include "conv.h"
28
29/* Conversion from Linux to Solaris errnos. 0-34 are identity mapped.
30 Some Linux errnos (EPROCLIM, EDOTDOT, ERREMOTE, EUCLEAN, ENOTNAM,
31 ENAVAIL, EISNAM, EREMOTEIO, ENOMEDIUM, EMEDIUMTYPE) have no Solaris
32 equivalents. I return EINVAL in that case, which is very wrong. If
33 someone suggest a better value for them, you're welcomed.
34 On the other side, Solaris ECANCELED and ENOTSUP have no Linux equivalents,
35 but that doesn't matter here. --jj */
36int solaris_err_table[] = {
37/* 0 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
38/* 10 */ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
39/* 20 */ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
40/* 30 */ 30, 31, 32, 33, 34, 22, 150, 149, 95, 96,
41/* 40 */ 97, 98, 99, 120, 121, 122, 123, 124, 125, 126,
42/* 50 */ 127, 128, 129, 130, 131, 132, 133, 134, 143, 144,
43/* 60 */ 145, 146, 90, 78, 147, 148, 93, 22, 94, 49,
44/* 70 */ 151, 66, 60, 62, 63, 35, 77, 36, 45, 46,
45/* 80 */ 64, 22, 67, 68, 69, 70, 71, 74, 22, 82,
46/* 90 */ 89, 92, 79, 81, 37, 38, 39, 40, 41, 42,
47/* 100 */ 43, 44, 50, 51, 52, 53, 54, 55, 56, 57,
48/* 110 */ 87, 61, 84, 65, 83, 80, 91, 22, 22, 22,
49/* 120 */ 22, 22, 88, 86, 85, 22, 22,
50};
51
52#define SOLARIS_NR_OPEN 256
53
54static u32 do_solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u64 off)
55{
56 struct file *file = NULL;
57 unsigned long retval, ret_type;
58
59 /* Do we need it here? */
60 set_personality(PER_SVR4);
61 if (flags & MAP_NORESERVE) {
62 static int cnt;
63
64 if (cnt < 5) {
65 printk("%s: unimplemented Solaris MAP_NORESERVE mmap() flag\n",
66 current->comm);
67 cnt++;
68 }
69 flags &= ~MAP_NORESERVE;
70 }
71 retval = -EBADF;
72 if(!(flags & MAP_ANONYMOUS)) {
73 if(fd >= SOLARIS_NR_OPEN)
74 goto out;
75 file = fget(fd);
76 if (!file)
77 goto out;
78 else {
79 struct inode * inode = file->f_dentry->d_inode;
80 if(imajor(inode) == MEM_MAJOR &&
81 iminor(inode) == 5) {
82 flags |= MAP_ANONYMOUS;
83 fput(file);
84 file = NULL;
85 }
86 }
87 }
88
89 retval = -EINVAL;
90 len = PAGE_ALIGN(len);
91 if(!(flags & MAP_FIXED))
92 addr = 0;
93 else if (len > 0xf0000000UL || addr > 0xf0000000UL - len)
94 goto out_putf;
95 ret_type = flags & _MAP_NEW;
96 flags &= ~_MAP_NEW;
97
98 down_write(&current->mm->mmap_sem);
99 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
100 retval = do_mmap(file,
101 (unsigned long) addr, (unsigned long) len,
102 (unsigned long) prot, (unsigned long) flags, off);
103 up_write(&current->mm->mmap_sem);
104 if(!ret_type)
105 retval = ((retval < 0xf0000000) ? 0 : retval);
106
107out_putf:
108 if (file)
109 fput(file);
110out:
111 return (u32) retval;
112}
113
114asmlinkage u32 solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u32 off)
115{
116 return do_solaris_mmap(addr, len, prot, flags, fd, (u64) off);
117}
118
119asmlinkage u32 solaris_mmap64(struct pt_regs *regs, u32 len, u32 prot, u32 flags, u32 fd, u32 offhi)
120{
121 u32 offlo;
122
123 if (regs->u_regs[UREG_G1]) {
124 if (get_user (offlo, (u32 __user *)(long)((u32)regs->u_regs[UREG_I6] + 0x5c)))
125 return -EFAULT;
126 } else {
127 if (get_user (offlo, (u32 __user *)(long)((u32)regs->u_regs[UREG_I6] + 0x60)))
128 return -EFAULT;
129 }
130 return do_solaris_mmap((u32)regs->u_regs[UREG_I0], len, prot, flags, fd, (((u64)offhi)<<32)|offlo);
131}
132
133asmlinkage int solaris_brk(u32 brk)
134{
135 int (*sunos_brk)(u32) = (int (*)(u32))SUNOS(17);
136
137 return sunos_brk(brk);
138}
139
140static int __set_utsfield(char __user *to, int to_size,
141 const char *from, int from_size,
142 int dotchop, int countfrom)
143{
144 int len = countfrom ? (to_size > from_size ?
145 from_size : to_size) : to_size;
146 int off;
147
148 if (copy_to_user(to, from, len))
149 return -EFAULT;
150
151 off = len < to_size? len: len - 1;
152 if (dotchop) {
153 const char *p = strnchr(from, len, '.');
154 if (p) off = p - from;
155 }
156
157 if (__put_user('\0', to + off))
158 return -EFAULT;
159
160 return 0;
161}
162
163#define set_utsfield(to, from, dotchop, countfrom) \
164 __set_utsfield((to), sizeof(to), \
165 (from), sizeof(from), \
166 (dotchop), (countfrom))
167
168struct sol_uname {
169 char sysname[9];
170 char nodename[9];
171 char release[9];
172 char version[9];
173 char machine[9];
174};
175
176struct sol_utsname {
177 char sysname[257];
178 char nodename[257];
179 char release[257];
180 char version[257];
181 char machine[257];
182};
183
184static char *machine(void)
185{
186 switch (sparc_cpu_model) {
187 case sun4: return "sun4";
188 case sun4c: return "sun4c";
189 case sun4e: return "sun4e";
190 case sun4m: return "sun4m";
191 case sun4d: return "sun4d";
192 case sun4u: return "sun4u";
193 default: return "sparc";
194 }
195}
196
197static char *platform(char *buffer)
198{
199 int len;
200
201 *buffer = 0;
202 len = prom_getproperty(prom_root_node, "name", buffer, 256);
203 if(len > 0)
204 buffer[len] = 0;
205 if (*buffer) {
206 char *p;
207
208 for (p = buffer; *p; p++)
209 if (*p == '/' || *p == ' ') *p = '_';
210 return buffer;
211 }
212
213 return "sun4u";
214}
215
216static char *serial(char *buffer)
217{
218 int node = prom_getchild(prom_root_node);
219 int len;
220
221 node = prom_searchsiblings(node, "options");
222 *buffer = 0;
223 len = prom_getproperty(node, "system-board-serial#", buffer, 256);
224 if(len > 0)
225 buffer[len] = 0;
226 if (!*buffer)
227 return "4512348717234";
228 else
229 return buffer;
230}
231
232asmlinkage int solaris_utssys(u32 buf, u32 flags, int which, u32 buf2)
233{
234 struct sol_uname __user *v = A(buf);
235 int err;
236
237 switch (which) {
238 case 0: /* old uname */
239 /* Let's cheat */
240 err = set_utsfield(v->sysname, "SunOS", 1, 0);
241 down_read(&uts_sem);
242 err |= set_utsfield(v->nodename, system_utsname.nodename,
243 1, 1);
244 up_read(&uts_sem);
245 err |= set_utsfield(v->release, "2.6", 0, 0);
246 err |= set_utsfield(v->version, "Generic", 0, 0);
247 err |= set_utsfield(v->machine, machine(), 0, 0);
248 return (err ? -EFAULT : 0);
249 case 2: /* ustat */
250 return -ENOSYS;
251 case 3: /* fusers */
252 return -ENOSYS;
253 default:
254 return -ENOSYS;
255 }
256}
257
258asmlinkage int solaris_utsname(u32 buf)
259{
260 struct sol_utsname __user *v = A(buf);
261 int err;
262
263 /* Why should we not lie a bit? */
264 down_read(&uts_sem);
265 err = set_utsfield(v->sysname, "SunOS", 0, 0);
266 err |= set_utsfield(v->nodename, system_utsname.nodename, 1, 1);
267 err |= set_utsfield(v->release, "5.6", 0, 0);
268 err |= set_utsfield(v->version, "Generic", 0, 0);
269 err |= set_utsfield(v->machine, machine(), 0, 0);
270 up_read(&uts_sem);
271
272 return (err ? -EFAULT : 0);
273}
274
275#define SI_SYSNAME 1 /* return name of operating system */
276#define SI_HOSTNAME 2 /* return name of node */
277#define SI_RELEASE 3 /* return release of operating system */
278#define SI_VERSION 4 /* return version field of utsname */
279#define SI_MACHINE 5 /* return kind of machine */
280#define SI_ARCHITECTURE 6 /* return instruction set arch */
281#define SI_HW_SERIAL 7 /* return hardware serial number */
282#define SI_HW_PROVIDER 8 /* return hardware manufacturer */
283#define SI_SRPC_DOMAIN 9 /* return secure RPC domain */
284#define SI_PLATFORM 513 /* return platform identifier */
285
286asmlinkage int solaris_sysinfo(int cmd, u32 buf, s32 count)
287{
288 char *p, *q, *r;
289 char buffer[256];
290 int len;
291
292 /* Again, we cheat :)) */
293 switch (cmd) {
294 case SI_SYSNAME: r = "SunOS"; break;
295 case SI_HOSTNAME:
296 r = buffer + 256;
297 down_read(&uts_sem);
298 for (p = system_utsname.nodename, q = buffer;
299 q < r && *p && *p != '.'; *q++ = *p++);
300 up_read(&uts_sem);
301 *q = 0;
302 r = buffer;
303 break;
304 case SI_RELEASE: r = "5.6"; break;
305 case SI_MACHINE: r = machine(); break;
306 case SI_ARCHITECTURE: r = "sparc"; break;
307 case SI_HW_PROVIDER: r = "Sun_Microsystems"; break;
308 case SI_HW_SERIAL: r = serial(buffer); break;
309 case SI_PLATFORM: r = platform(buffer); break;
310 case SI_SRPC_DOMAIN: r = ""; break;
311 case SI_VERSION: r = "Generic"; break;
312 default: return -EINVAL;
313 }
314 len = strlen(r) + 1;
315 if (count < len) {
316 if (copy_to_user(A(buf), r, count - 1) ||
317 __put_user(0, (char __user *)A(buf) + count - 1))
318 return -EFAULT;
319 } else {
320 if (copy_to_user(A(buf), r, len))
321 return -EFAULT;
322 }
323 return len;
324}
325
326#define SOLARIS_CONFIG_NGROUPS 2
327#define SOLARIS_CONFIG_CHILD_MAX 3
328#define SOLARIS_CONFIG_OPEN_FILES 4
329#define SOLARIS_CONFIG_POSIX_VER 5
330#define SOLARIS_CONFIG_PAGESIZE 6
331#define SOLARIS_CONFIG_CLK_TCK 7
332#define SOLARIS_CONFIG_XOPEN_VER 8
333#define SOLARIS_CONFIG_PROF_TCK 10
334#define SOLARIS_CONFIG_NPROC_CONF 11
335#define SOLARIS_CONFIG_NPROC_ONLN 12
336#define SOLARIS_CONFIG_AIO_LISTIO_MAX 13
337#define SOLARIS_CONFIG_AIO_MAX 14
338#define SOLARIS_CONFIG_AIO_PRIO_DELTA_MAX 15
339#define SOLARIS_CONFIG_DELAYTIMER_MAX 16
340#define SOLARIS_CONFIG_MQ_OPEN_MAX 17
341#define SOLARIS_CONFIG_MQ_PRIO_MAX 18
342#define SOLARIS_CONFIG_RTSIG_MAX 19
343#define SOLARIS_CONFIG_SEM_NSEMS_MAX 20
344#define SOLARIS_CONFIG_SEM_VALUE_MAX 21
345#define SOLARIS_CONFIG_SIGQUEUE_MAX 22
346#define SOLARIS_CONFIG_SIGRT_MIN 23
347#define SOLARIS_CONFIG_SIGRT_MAX 24
348#define SOLARIS_CONFIG_TIMER_MAX 25
349#define SOLARIS_CONFIG_PHYS_PAGES 26
350#define SOLARIS_CONFIG_AVPHYS_PAGES 27
351
352asmlinkage int solaris_sysconf(int id)
353{
354 switch (id) {
355 case SOLARIS_CONFIG_NGROUPS: return NGROUPS_MAX;
356 case SOLARIS_CONFIG_CHILD_MAX: return CHILD_MAX;
357 case SOLARIS_CONFIG_OPEN_FILES: return OPEN_MAX;
358 case SOLARIS_CONFIG_POSIX_VER: return 199309;
359 case SOLARIS_CONFIG_PAGESIZE: return PAGE_SIZE;
360 case SOLARIS_CONFIG_XOPEN_VER: return 3;
361 case SOLARIS_CONFIG_CLK_TCK:
362 case SOLARIS_CONFIG_PROF_TCK:
363 return sparc64_get_clock_tick(smp_processor_id());
364#ifdef CONFIG_SMP
365 case SOLARIS_CONFIG_NPROC_CONF: return NR_CPUS;
366 case SOLARIS_CONFIG_NPROC_ONLN: return num_online_cpus();
367#else
368 case SOLARIS_CONFIG_NPROC_CONF: return 1;
369 case SOLARIS_CONFIG_NPROC_ONLN: return 1;
370#endif
371 case SOLARIS_CONFIG_SIGRT_MIN: return 37;
372 case SOLARIS_CONFIG_SIGRT_MAX: return 44;
373 case SOLARIS_CONFIG_PHYS_PAGES:
374 case SOLARIS_CONFIG_AVPHYS_PAGES:
375 {
376 struct sysinfo s;
377
378 si_meminfo(&s);
379 if (id == SOLARIS_CONFIG_PHYS_PAGES)
380 return s.totalram >>= PAGE_SHIFT;
381 else
382 return s.freeram >>= PAGE_SHIFT;
383 }
384 /* XXX support these as well -jj */
385 case SOLARIS_CONFIG_AIO_LISTIO_MAX: return -EINVAL;
386 case SOLARIS_CONFIG_AIO_MAX: return -EINVAL;
387 case SOLARIS_CONFIG_AIO_PRIO_DELTA_MAX: return -EINVAL;
388 case SOLARIS_CONFIG_DELAYTIMER_MAX: return -EINVAL;
389 case SOLARIS_CONFIG_MQ_OPEN_MAX: return -EINVAL;
390 case SOLARIS_CONFIG_MQ_PRIO_MAX: return -EINVAL;
391 case SOLARIS_CONFIG_RTSIG_MAX: return -EINVAL;
392 case SOLARIS_CONFIG_SEM_NSEMS_MAX: return -EINVAL;
393 case SOLARIS_CONFIG_SEM_VALUE_MAX: return -EINVAL;
394 case SOLARIS_CONFIG_SIGQUEUE_MAX: return -EINVAL;
395 case SOLARIS_CONFIG_TIMER_MAX: return -EINVAL;
396 default: return -EINVAL;
397 }
398}
399
400asmlinkage int solaris_procids(int cmd, s32 pid, s32 pgid)
401{
402 int ret;
403
404 switch (cmd) {
405 case 0: /* getpgrp */
406 return process_group(current);
407 case 1: /* setpgrp */
408 {
409 int (*sys_setpgid)(pid_t,pid_t) =
410 (int (*)(pid_t,pid_t))SYS(setpgid);
411
412 /* can anyone explain me the difference between
413 Solaris setpgrp and setsid? */
414 ret = sys_setpgid(0, 0);
415 if (ret) return ret;
416 current->signal->tty = NULL;
417 return process_group(current);
418 }
419 case 2: /* getsid */
420 {
421 int (*sys_getsid)(pid_t) = (int (*)(pid_t))SYS(getsid);
422 return sys_getsid(pid);
423 }
424 case 3: /* setsid */
425 {
426 int (*sys_setsid)(void) = (int (*)(void))SYS(setsid);
427 return sys_setsid();
428 }
429 case 4: /* getpgid */
430 {
431 int (*sys_getpgid)(pid_t) = (int (*)(pid_t))SYS(getpgid);
432 return sys_getpgid(pid);
433 }
434 case 5: /* setpgid */
435 {
436 int (*sys_setpgid)(pid_t,pid_t) =
437 (int (*)(pid_t,pid_t))SYS(setpgid);
438 return sys_setpgid(pid,pgid);
439 }
440 }
441 return -EINVAL;
442}
443
444asmlinkage int solaris_gettimeofday(u32 tim)
445{
446 int (*sys_gettimeofday)(struct timeval *, struct timezone *) =
447 (int (*)(struct timeval *, struct timezone *))SYS(gettimeofday);
448
449 return sys_gettimeofday((struct timeval *)(u64)tim, NULL);
450}
451
452#define RLIM_SOL_INFINITY32 0x7fffffff
453#define RLIM_SOL_SAVED_MAX32 0x7ffffffe
454#define RLIM_SOL_SAVED_CUR32 0x7ffffffd
455#define RLIM_SOL_INFINITY ((u64)-3)
456#define RLIM_SOL_SAVED_MAX ((u64)-2)
457#define RLIM_SOL_SAVED_CUR ((u64)-1)
458#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
459#define RLIMIT_SOL_NOFILE 5
460#define RLIMIT_SOL_VMEM 6
461
462struct rlimit32 {
463 u32 rlim_cur;
464 u32 rlim_max;
465};
466
467asmlinkage int solaris_getrlimit(unsigned int resource, struct rlimit32 __user *rlim)
468{
469 struct rlimit r;
470 int ret;
471 mm_segment_t old_fs = get_fs ();
472 int (*sys_getrlimit)(unsigned int, struct rlimit *) =
473 (int (*)(unsigned int, struct rlimit *))SYS(getrlimit);
474
475 if (resource > RLIMIT_SOL_VMEM)
476 return -EINVAL;
477 switch (resource) {
478 case RLIMIT_SOL_NOFILE: resource = RLIMIT_NOFILE; break;
479 case RLIMIT_SOL_VMEM: resource = RLIMIT_AS; break;
480 default: break;
481 }
482 set_fs (KERNEL_DS);
483 ret = sys_getrlimit(resource, &r);
484 set_fs (old_fs);
485 if (!ret) {
486 if (r.rlim_cur == RLIM_INFINITY)
487 r.rlim_cur = RLIM_SOL_INFINITY32;
488 else if ((u64)r.rlim_cur > RLIM_SOL_INFINITY32)
489 r.rlim_cur = RLIM_SOL_SAVED_CUR32;
490 if (r.rlim_max == RLIM_INFINITY)
491 r.rlim_max = RLIM_SOL_INFINITY32;
492 else if ((u64)r.rlim_max > RLIM_SOL_INFINITY32)
493 r.rlim_max = RLIM_SOL_SAVED_MAX32;
494 ret = put_user (r.rlim_cur, &rlim->rlim_cur);
495 ret |= __put_user (r.rlim_max, &rlim->rlim_max);
496 }
497 return ret;
498}
499
500asmlinkage int solaris_setrlimit(unsigned int resource, struct rlimit32 __user *rlim)
501{
502 struct rlimit r, rold;
503 int ret;
504 mm_segment_t old_fs = get_fs ();
505 int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
506 (int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
507 int (*sys_setrlimit)(unsigned int, struct rlimit __user *) =
508 (int (*)(unsigned int, struct rlimit __user *))SYS(setrlimit);
509
510 if (resource > RLIMIT_SOL_VMEM)
511 return -EINVAL;
512 switch (resource) {
513 case RLIMIT_SOL_NOFILE: resource = RLIMIT_NOFILE; break;
514 case RLIMIT_SOL_VMEM: resource = RLIMIT_AS; break;
515 default: break;
516 }
517 if (get_user (r.rlim_cur, &rlim->rlim_cur) ||
518 __get_user (r.rlim_max, &rlim->rlim_max))
519 return -EFAULT;
520 set_fs (KERNEL_DS);
521 ret = sys_getrlimit(resource, &rold);
522 if (!ret) {
523 if (r.rlim_cur == RLIM_SOL_INFINITY32)
524 r.rlim_cur = RLIM_INFINITY;
525 else if (r.rlim_cur == RLIM_SOL_SAVED_CUR32)
526 r.rlim_cur = rold.rlim_cur;
527 else if (r.rlim_cur == RLIM_SOL_SAVED_MAX32)
528 r.rlim_cur = rold.rlim_max;
529 if (r.rlim_max == RLIM_SOL_INFINITY32)
530 r.rlim_max = RLIM_INFINITY;
531 else if (r.rlim_max == RLIM_SOL_SAVED_CUR32)
532 r.rlim_max = rold.rlim_cur;
533 else if (r.rlim_max == RLIM_SOL_SAVED_MAX32)
534 r.rlim_max = rold.rlim_max;
535 ret = sys_setrlimit(resource, &r);
536 }
537 set_fs (old_fs);
538 return ret;
539}
540
541asmlinkage int solaris_getrlimit64(unsigned int resource, struct rlimit __user *rlim)
542{
543 struct rlimit r;
544 int ret;
545 mm_segment_t old_fs = get_fs ();
546 int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
547 (int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
548
549 if (resource > RLIMIT_SOL_VMEM)
550 return -EINVAL;
551 switch (resource) {
552 case RLIMIT_SOL_NOFILE: resource = RLIMIT_NOFILE; break;
553 case RLIMIT_SOL_VMEM: resource = RLIMIT_AS; break;
554 default: break;
555 }
556 set_fs (KERNEL_DS);
557 ret = sys_getrlimit(resource, &r);
558 set_fs (old_fs);
559 if (!ret) {
560 if (r.rlim_cur == RLIM_INFINITY)
561 r.rlim_cur = RLIM_SOL_INFINITY;
562 if (r.rlim_max == RLIM_INFINITY)
563 r.rlim_max = RLIM_SOL_INFINITY;
564 ret = put_user (r.rlim_cur, &rlim->rlim_cur);
565 ret |= __put_user (r.rlim_max, &rlim->rlim_max);
566 }
567 return ret;
568}
569
570asmlinkage int solaris_setrlimit64(unsigned int resource, struct rlimit __user *rlim)
571{
572 struct rlimit r, rold;
573 int ret;
574 mm_segment_t old_fs = get_fs ();
575 int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
576 (int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
577 int (*sys_setrlimit)(unsigned int, struct rlimit __user *) =
578 (int (*)(unsigned int, struct rlimit __user *))SYS(setrlimit);
579
580 if (resource > RLIMIT_SOL_VMEM)
581 return -EINVAL;
582 switch (resource) {
583 case RLIMIT_SOL_NOFILE: resource = RLIMIT_NOFILE; break;
584 case RLIMIT_SOL_VMEM: resource = RLIMIT_AS; break;
585 default: break;
586 }
587 if (get_user (r.rlim_cur, &rlim->rlim_cur) ||
588 __get_user (r.rlim_max, &rlim->rlim_max))
589 return -EFAULT;
590 set_fs (KERNEL_DS);
591 ret = sys_getrlimit(resource, &rold);
592 if (!ret) {
593 if (r.rlim_cur == RLIM_SOL_INFINITY)
594 r.rlim_cur = RLIM_INFINITY;
595 else if (r.rlim_cur == RLIM_SOL_SAVED_CUR)
596 r.rlim_cur = rold.rlim_cur;
597 else if (r.rlim_cur == RLIM_SOL_SAVED_MAX)
598 r.rlim_cur = rold.rlim_max;
599 if (r.rlim_max == RLIM_SOL_INFINITY)
600 r.rlim_max = RLIM_INFINITY;
601 else if (r.rlim_max == RLIM_SOL_SAVED_CUR)
602 r.rlim_max = rold.rlim_cur;
603 else if (r.rlim_max == RLIM_SOL_SAVED_MAX)
604 r.rlim_max = rold.rlim_max;
605 ret = sys_setrlimit(resource, &r);
606 }
607 set_fs (old_fs);
608 return ret;
609}
610
611struct sol_ntptimeval {
612 struct compat_timeval time;
613 s32 maxerror;
614 s32 esterror;
615};
616
617struct sol_timex {
618 u32 modes;
619 s32 offset;
620 s32 freq;
621 s32 maxerror;
622 s32 esterror;
623 s32 status;
624 s32 constant;
625 s32 precision;
626 s32 tolerance;
627 s32 ppsfreq;
628 s32 jitter;
629 s32 shift;
630 s32 stabil;
631 s32 jitcnt;
632 s32 calcnt;
633 s32 errcnt;
634 s32 stbcnt;
635};
636
637asmlinkage int solaris_ntp_gettime(struct sol_ntptimeval __user *ntp)
638{
639 int (*sys_adjtimex)(struct timex __user *) =
640 (int (*)(struct timex __user *))SYS(adjtimex);
641 struct timex t;
642 int ret;
643 mm_segment_t old_fs = get_fs();
644
645 set_fs(KERNEL_DS);
646 t.modes = 0;
647 ret = sys_adjtimex(&t);
648 set_fs(old_fs);
649 if (ret < 0)
650 return ret;
651 ret = put_user (t.time.tv_sec, &ntp->time.tv_sec);
652 ret |= __put_user (t.time.tv_usec, &ntp->time.tv_usec);
653 ret |= __put_user (t.maxerror, &ntp->maxerror);
654 ret |= __put_user (t.esterror, &ntp->esterror);
655 return ret;
656}
657
658asmlinkage int solaris_ntp_adjtime(struct sol_timex __user *txp)
659{
660 int (*sys_adjtimex)(struct timex __user *) =
661 (int (*)(struct timex __user *))SYS(adjtimex);
662 struct timex t;
663 int ret, err;
664 mm_segment_t old_fs = get_fs();
665
666 ret = get_user (t.modes, &txp->modes);
667 ret |= __get_user (t.offset, &txp->offset);
668 ret |= __get_user (t.freq, &txp->freq);
669 ret |= __get_user (t.maxerror, &txp->maxerror);
670 ret |= __get_user (t.esterror, &txp->esterror);
671 ret |= __get_user (t.status, &txp->status);
672 ret |= __get_user (t.constant, &txp->constant);
673 set_fs(KERNEL_DS);
674 ret = sys_adjtimex(&t);
675 set_fs(old_fs);
676 if (ret < 0)
677 return ret;
678 err = put_user (t.offset, &txp->offset);
679 err |= __put_user (t.freq, &txp->freq);
680 err |= __put_user (t.maxerror, &txp->maxerror);
681 err |= __put_user (t.esterror, &txp->esterror);
682 err |= __put_user (t.status, &txp->status);
683 err |= __put_user (t.constant, &txp->constant);
684 err |= __put_user (t.precision, &txp->precision);
685 err |= __put_user (t.tolerance, &txp->tolerance);
686 err |= __put_user (t.ppsfreq, &txp->ppsfreq);
687 err |= __put_user (t.jitter, &txp->jitter);
688 err |= __put_user (t.shift, &txp->shift);
689 err |= __put_user (t.stabil, &txp->stabil);
690 err |= __put_user (t.jitcnt, &txp->jitcnt);
691 err |= __put_user (t.calcnt, &txp->calcnt);
692 err |= __put_user (t.errcnt, &txp->errcnt);
693 err |= __put_user (t.stbcnt, &txp->stbcnt);
694 if (err)
695 return -EFAULT;
696 return ret;
697}
698
699asmlinkage int do_sol_unimplemented(struct pt_regs *regs)
700{
701 printk ("Unimplemented Solaris syscall %d %08x %08x %08x %08x\n",
702 (int)regs->u_regs[UREG_G1],
703 (int)regs->u_regs[UREG_I0],
704 (int)regs->u_regs[UREG_I1],
705 (int)regs->u_regs[UREG_I2],
706 (int)regs->u_regs[UREG_I3]);
707 return -ENOSYS;
708}
709
710asmlinkage void solaris_register(void)
711{
712 set_personality(PER_SVR4);
713}
714
715extern long solaris_to_linux_signals[], linux_to_solaris_signals[];
716
717struct exec_domain solaris_exec_domain = {
718 .name = "Solaris",
719 .handler = NULL,
720 .pers_low = 1, /* PER_SVR4 personality */
721 .pers_high = 1,
722 .signal_map = solaris_to_linux_signals,
723 .signal_invmap =linux_to_solaris_signals,
724 .module = THIS_MODULE,
725 .next = NULL
726};
727
728extern int init_socksys(void);
729
730#ifdef MODULE
731
732MODULE_AUTHOR("Jakub Jelinek (jj@ultra.linux.cz), Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)");
733MODULE_DESCRIPTION("Solaris binary emulation module");
734MODULE_LICENSE("GPL");
735
736#ifdef __sparc_v9__
737extern u32 tl0_solaris[8];
738#define update_ttable(x) \
739 tl0_solaris[3] = (((long)(x) - (long)tl0_solaris - 3) >> 2) | 0x40000000; \
740 __asm__ __volatile__ ("membar #StoreStore; flush %0" : : "r" (&tl0_solaris[3]))
741#else
742#endif
743
744extern u32 solaris_sparc_syscall[];
745extern u32 solaris_syscall[];
746extern void cleanup_socksys(void);
747
748extern u32 entry64_personality_patch;
749
750int init_module(void)
751{
752 int ret;
753
754 SOLDD(("Solaris module at %p\n", solaris_sparc_syscall));
755 register_exec_domain(&solaris_exec_domain);
756 if ((ret = init_socksys())) {
757 unregister_exec_domain(&solaris_exec_domain);
758 return ret;
759 }
760 update_ttable(solaris_sparc_syscall);
761 entry64_personality_patch |=
762 (offsetof(struct task_struct, personality) +
763 (sizeof(unsigned long) - 1));
764 __asm__ __volatile__("membar #StoreStore; flush %0"
765 : : "r" (&entry64_personality_patch));
766 return 0;
767}
768
769void cleanup_module(void)
770{
771 update_ttable(solaris_syscall);
772 cleanup_socksys();
773 unregister_exec_domain(&solaris_exec_domain);
774}
775
776#else
777int init_solaris_emul(void)
778{
779 register_exec_domain(&solaris_exec_domain);
780 init_socksys();
781 return 0;
782}
783#endif
784
diff --git a/arch/sparc64/solaris/signal.c b/arch/sparc64/solaris/signal.c
new file mode 100644
index 000000000000..7fa2634e2085
--- /dev/null
+++ b/arch/sparc64/solaris/signal.c
@@ -0,0 +1,430 @@
1/* $Id: signal.c,v 1.7 2000/09/05 21:44:54 davem Exp $
2 * signal.c: Signal emulation for Solaris
3 *
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
6
7#include <linux/types.h>
8#include <linux/smp_lock.h>
9#include <linux/errno.h>
10
11#include <asm/uaccess.h>
12#include <asm/svr4.h>
13#include <asm/string.h>
14
15#include "conv.h"
16#include "signal.h"
17
18#define _S(nr) (1L<<((nr)-1))
19
20#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
21
22long linux_to_solaris_signals[] = {
23 0,
24 SOLARIS_SIGHUP, SOLARIS_SIGINT,
25 SOLARIS_SIGQUIT, SOLARIS_SIGILL,
26 SOLARIS_SIGTRAP, SOLARIS_SIGIOT,
27 SOLARIS_SIGEMT, SOLARIS_SIGFPE,
28 SOLARIS_SIGKILL, SOLARIS_SIGBUS,
29 SOLARIS_SIGSEGV, SOLARIS_SIGSYS,
30 SOLARIS_SIGPIPE, SOLARIS_SIGALRM,
31 SOLARIS_SIGTERM, SOLARIS_SIGURG,
32 SOLARIS_SIGSTOP, SOLARIS_SIGTSTP,
33 SOLARIS_SIGCONT, SOLARIS_SIGCLD,
34 SOLARIS_SIGTTIN, SOLARIS_SIGTTOU,
35 SOLARIS_SIGPOLL, SOLARIS_SIGXCPU,
36 SOLARIS_SIGXFSZ, SOLARIS_SIGVTALRM,
37 SOLARIS_SIGPROF, SOLARIS_SIGWINCH,
38 SOLARIS_SIGUSR1, SOLARIS_SIGUSR1,
39 SOLARIS_SIGUSR2, -1,
40};
41
42long solaris_to_linux_signals[] = {
43 0,
44 SIGHUP, SIGINT, SIGQUIT, SIGILL,
45 SIGTRAP, SIGIOT, SIGEMT, SIGFPE,
46 SIGKILL, SIGBUS, SIGSEGV, SIGSYS,
47 SIGPIPE, SIGALRM, SIGTERM, SIGUSR1,
48 SIGUSR2, SIGCHLD, -1, SIGWINCH,
49 SIGURG, SIGPOLL, SIGSTOP, SIGTSTP,
50 SIGCONT, SIGTTIN, SIGTTOU, SIGVTALRM,
51 SIGPROF, SIGXCPU, SIGXFSZ, -1,
52 -1, -1, -1, -1,
53 -1, -1, -1, -1,
54 -1, -1, -1, -1,
55};
56
57static inline long mapsig(long sig)
58{
59 if ((unsigned long)sig > SOLARIS_NSIGNALS)
60 return -EINVAL;
61 return solaris_to_linux_signals[sig];
62}
63
64asmlinkage int solaris_kill(int pid, int sig)
65{
66 int (*sys_kill)(int,int) =
67 (int (*)(int,int))SYS(kill);
68 int s = mapsig(sig);
69
70 if (s < 0) return s;
71 return sys_kill(pid, s);
72}
73
74static long sig_handler(int sig, u32 arg, int one_shot)
75{
76 struct sigaction sa, old;
77 int ret;
78 mm_segment_t old_fs = get_fs();
79 int (*sys_sigaction)(int,struct sigaction __user *,struct sigaction __user *) =
80 (int (*)(int,struct sigaction __user *,struct sigaction __user *))SYS(sigaction);
81
82 sigemptyset(&sa.sa_mask);
83 sa.sa_restorer = NULL;
84 sa.sa_handler = (__sighandler_t)A(arg);
85 sa.sa_flags = 0;
86 if (one_shot) sa.sa_flags = SA_ONESHOT | SA_NOMASK;
87 set_fs (KERNEL_DS);
88 ret = sys_sigaction(sig, (void __user *)&sa, (void __user *)&old);
89 set_fs (old_fs);
90 if (ret < 0) return ret;
91 return (u32)(unsigned long)old.sa_handler;
92}
93
94static inline long solaris_signal(int sig, u32 arg)
95{
96 return sig_handler (sig, arg, 1);
97}
98
99static long solaris_sigset(int sig, u32 arg)
100{
101 if (arg != 2) /* HOLD */ {
102 spin_lock_irq(&current->sighand->siglock);
103 sigdelsetmask(&current->blocked, _S(sig));
104 recalc_sigpending();
105 spin_unlock_irq(&current->sighand->siglock);
106 return sig_handler (sig, arg, 0);
107 } else {
108 spin_lock_irq(&current->sighand->siglock);
109 sigaddsetmask(&current->blocked, (_S(sig) & ~_BLOCKABLE));
110 recalc_sigpending();
111 spin_unlock_irq(&current->sighand->siglock);
112 return 0;
113 }
114}
115
116static inline long solaris_sighold(int sig)
117{
118 return solaris_sigset(sig, 2);
119}
120
121static inline long solaris_sigrelse(int sig)
122{
123 spin_lock_irq(&current->sighand->siglock);
124 sigdelsetmask(&current->blocked, _S(sig));
125 recalc_sigpending();
126 spin_unlock_irq(&current->sighand->siglock);
127 return 0;
128}
129
130static inline long solaris_sigignore(int sig)
131{
132 return sig_handler(sig, (u32)(unsigned long)SIG_IGN, 0);
133}
134
135static inline long solaris_sigpause(int sig)
136{
137 printk ("Need to support solaris sigpause\n");
138 return -ENOSYS;
139}
140
141asmlinkage long solaris_sigfunc(int sig, u32 arg)
142{
143 int func = sig & ~0xff;
144
145 sig = mapsig(sig & 0xff);
146 if (sig < 0) return sig;
147 switch (func) {
148 case 0: return solaris_signal(sig, arg);
149 case 0x100: return solaris_sigset(sig, arg);
150 case 0x200: return solaris_sighold(sig);
151 case 0x400: return solaris_sigrelse(sig);
152 case 0x800: return solaris_sigignore(sig);
153 case 0x1000: return solaris_sigpause(sig);
154 }
155 return -EINVAL;
156}
157
158typedef struct {
159 u32 __sigbits[4];
160} sol_sigset_t;
161
162static inline int mapin(u32 *p, sigset_t *q)
163{
164 int i;
165 u32 x;
166 int sig;
167
168 sigemptyset(q);
169 x = p[0];
170 for (i = 1; i <= SOLARIS_NSIGNALS; i++) {
171 if (x & 1) {
172 sig = solaris_to_linux_signals[i];
173 if (sig == -1)
174 return -EINVAL;
175 sigaddsetmask(q, (1L << (sig - 1)));
176 }
177 x >>= 1;
178 if (i == 32)
179 x = p[1];
180 }
181 return 0;
182}
183
184static inline int mapout(sigset_t *q, u32 *p)
185{
186 int i;
187 int sig;
188
189 p[0] = 0;
190 p[1] = 0;
191 for (i = 1; i <= 32; i++) {
192 if (sigismember(q, sigmask(i))) {
193 sig = linux_to_solaris_signals[i];
194 if (sig == -1)
195 return -EINVAL;
196 if (sig > 32)
197 p[1] |= 1L << (sig - 33);
198 else
199 p[0] |= 1L << (sig - 1);
200 }
201 }
202 return 0;
203}
204
205asmlinkage int solaris_sigprocmask(int how, u32 in, u32 out)
206{
207 sigset_t in_s, *ins, out_s, *outs;
208 mm_segment_t old_fs = get_fs();
209 int ret;
210 int (*sys_sigprocmask)(int,sigset_t __user *,sigset_t __user *) =
211 (int (*)(int,sigset_t __user *,sigset_t __user *))SYS(sigprocmask);
212
213 ins = NULL; outs = NULL;
214 if (in) {
215 u32 tmp[2];
216
217 if (copy_from_user (tmp, (void __user *)A(in), 2*sizeof(u32)))
218 return -EFAULT;
219 ins = &in_s;
220 if (mapin (tmp, ins)) return -EINVAL;
221 }
222 if (out) outs = &out_s;
223 set_fs (KERNEL_DS);
224 ret = sys_sigprocmask((how == 3) ? SIG_SETMASK : how,
225 (void __user *)ins, (void __user *)outs);
226 set_fs (old_fs);
227 if (ret) return ret;
228 if (out) {
229 u32 tmp[4];
230
231 tmp[2] = 0; tmp[3] = 0;
232 if (mapout (outs, tmp)) return -EINVAL;
233 if (copy_to_user((void __user *)A(out), tmp, 4*sizeof(u32)))
234 return -EFAULT;
235 }
236 return 0;
237}
238
239asmlinkage long do_sol_sigsuspend(u32 mask)
240{
241 sigset_t s;
242 u32 tmp[2];
243
244 if (copy_from_user (tmp, (sol_sigset_t __user *)A(mask), 2*sizeof(u32)))
245 return -EFAULT;
246 if (mapin (tmp, &s)) return -EINVAL;
247 return (long)s.sig[0];
248}
249
250struct sol_sigaction {
251 int sa_flags;
252 u32 sa_handler;
253 u32 sa_mask[4];
254 int sa_resv[2];
255};
256
257asmlinkage int solaris_sigaction(int sig, u32 act, u32 old)
258{
259 u32 tmp, tmp2[4];
260 struct sigaction s, s2;
261 int ret;
262 mm_segment_t old_fs = get_fs();
263 struct sol_sigaction __user *p = (void __user *)A(old);
264 int (*sys_sigaction)(int,struct sigaction __user *,struct sigaction __user *) =
265 (int (*)(int,struct sigaction __user *,struct sigaction __user *))SYS(sigaction);
266
267 sig = mapsig(sig);
268 if (sig < 0) {
269 /* We cheat a little bit for Solaris only signals */
270 if (old && clear_user(p, sizeof(struct sol_sigaction)))
271 return -EFAULT;
272 return 0;
273 }
274 if (act) {
275 if (get_user (tmp, &p->sa_flags))
276 return -EFAULT;
277 s.sa_flags = 0;
278 if (tmp & SOLARIS_SA_ONSTACK) s.sa_flags |= SA_STACK;
279 if (tmp & SOLARIS_SA_RESTART) s.sa_flags |= SA_RESTART;
280 if (tmp & SOLARIS_SA_NODEFER) s.sa_flags |= SA_NOMASK;
281 if (tmp & SOLARIS_SA_RESETHAND) s.sa_flags |= SA_ONESHOT;
282 if (tmp & SOLARIS_SA_NOCLDSTOP) s.sa_flags |= SA_NOCLDSTOP;
283 if (get_user (tmp, &p->sa_handler) ||
284 copy_from_user (tmp2, &p->sa_mask, 2*sizeof(u32)))
285 return -EFAULT;
286 s.sa_handler = (__sighandler_t)A(tmp);
287 if (mapin (tmp2, &s.sa_mask)) return -EINVAL;
288 s.sa_restorer = NULL;
289 }
290 set_fs(KERNEL_DS);
291 ret = sys_sigaction(sig, act ? (void __user *)&s : NULL,
292 old ? (void __user *)&s2 : NULL);
293 set_fs(old_fs);
294 if (ret) return ret;
295 if (old) {
296 if (mapout (&s2.sa_mask, tmp2)) return -EINVAL;
297 tmp = 0; tmp2[2] = 0; tmp2[3] = 0;
298 if (s2.sa_flags & SA_STACK) tmp |= SOLARIS_SA_ONSTACK;
299 if (s2.sa_flags & SA_RESTART) tmp |= SOLARIS_SA_RESTART;
300 if (s2.sa_flags & SA_NOMASK) tmp |= SOLARIS_SA_NODEFER;
301 if (s2.sa_flags & SA_ONESHOT) tmp |= SOLARIS_SA_RESETHAND;
302 if (s2.sa_flags & SA_NOCLDSTOP) tmp |= SOLARIS_SA_NOCLDSTOP;
303 if (put_user (tmp, &p->sa_flags) ||
304 __put_user ((u32)(unsigned long)s2.sa_handler, &p->sa_handler) ||
305 copy_to_user (&p->sa_mask, tmp2, 4*sizeof(u32)))
306 return -EFAULT;
307 }
308 return 0;
309}
310
311asmlinkage int solaris_sigpending(int which, u32 set)
312{
313 sigset_t s;
314 u32 tmp[4];
315 switch (which) {
316 case 1: /* sigpending */
317 spin_lock_irq(&current->sighand->siglock);
318 sigandsets(&s, &current->blocked, &current->pending.signal);
319 recalc_sigpending();
320 spin_unlock_irq(&current->sighand->siglock);
321 break;
322 case 2: /* sigfillset - I just set signals which have linux equivalents */
323 sigfillset(&s);
324 break;
325 default: return -EINVAL;
326 }
327 if (mapout (&s, tmp)) return -EINVAL;
328 tmp[2] = 0; tmp[3] = 0;
329 if (copy_to_user ((u32 __user *)A(set), tmp, sizeof(tmp)))
330 return -EFAULT;
331 return 0;
332}
333
334asmlinkage int solaris_wait(u32 stat_loc)
335{
336 unsigned __user *p = (unsigned __user *)A(stat_loc);
337 int (*sys_wait4)(pid_t,unsigned __user *, int, struct rusage __user *) =
338 (int (*)(pid_t,unsigned __user *, int, struct rusage __user *))SYS(wait4);
339 int ret, status;
340
341 ret = sys_wait4(-1, p, WUNTRACED, NULL);
342 if (ret >= 0 && stat_loc) {
343 if (get_user (status, p))
344 return -EFAULT;
345 if (((status - 1) & 0xffff) < 0xff)
346 status = linux_to_solaris_signals[status & 0x7f] & 0x7f;
347 else if ((status & 0xff) == 0x7f)
348 status = (linux_to_solaris_signals[(status >> 8) & 0xff] << 8) | 0x7f;
349 if (__put_user (status, p))
350 return -EFAULT;
351 }
352 return ret;
353}
354
355asmlinkage int solaris_waitid(int idtype, s32 pid, u32 info, int options)
356{
357 int (*sys_wait4)(pid_t,unsigned __user *, int, struct rusage __user *) =
358 (int (*)(pid_t,unsigned __user *, int, struct rusage __user *))SYS(wait4);
359 int opts, status, ret;
360
361 switch (idtype) {
362 case 0: /* P_PID */ break;
363 case 1: /* P_PGID */ pid = -pid; break;
364 case 7: /* P_ALL */ pid = -1; break;
365 default: return -EINVAL;
366 }
367 opts = 0;
368 if (options & SOLARIS_WUNTRACED) opts |= WUNTRACED;
369 if (options & SOLARIS_WNOHANG) opts |= WNOHANG;
370 current->state = TASK_RUNNING;
371 ret = sys_wait4(pid, (unsigned int __user *)A(info), opts, NULL);
372 if (ret < 0) return ret;
373 if (info) {
374 struct sol_siginfo __user *s = (void __user *)A(info);
375
376 if (get_user (status, (unsigned int __user *)A(info)))
377 return -EFAULT;
378
379 if (__put_user (SOLARIS_SIGCLD, &s->si_signo) ||
380 __put_user (ret, &s->_data._proc._pid))
381 return -EFAULT;
382
383 switch (status & 0xff) {
384 case 0: ret = SOLARIS_CLD_EXITED;
385 status = (status >> 8) & 0xff;
386 break;
387 case 0x7f:
388 status = (status >> 8) & 0xff;
389 switch (status) {
390 case SIGSTOP:
391 case SIGTSTP: ret = SOLARIS_CLD_STOPPED;
392 default: ret = SOLARIS_CLD_EXITED;
393 }
394 status = linux_to_solaris_signals[status];
395 break;
396 default:
397 if (status & 0x80) ret = SOLARIS_CLD_DUMPED;
398 else ret = SOLARIS_CLD_KILLED;
399 status = linux_to_solaris_signals[status & 0x7f];
400 break;
401 }
402
403 if (__put_user (ret, &s->si_code) ||
404 __put_user (status, &s->_data._proc._pdata._cld._status))
405 return -EFAULT;
406 }
407 return 0;
408}
409
410extern int svr4_setcontext(svr4_ucontext_t *c, struct pt_regs *regs);
411extern int svr4_getcontext(svr4_ucontext_t *c, struct pt_regs *regs);
412
413asmlinkage int solaris_context(struct pt_regs *regs)
414{
415 switch ((unsigned)regs->u_regs[UREG_I0]) {
416 case 0: /* getcontext */
417 return svr4_getcontext((svr4_ucontext_t *)(long)(u32)regs->u_regs[UREG_I1], regs);
418 case 1: /* setcontext */
419 return svr4_setcontext((svr4_ucontext_t *)(long)(u32)regs->u_regs[UREG_I1], regs);
420 default:
421 return -EINVAL;
422
423 }
424}
425
426asmlinkage int solaris_sigaltstack(u32 ss, u32 oss)
427{
428/* XXX Implement this soon */
429 return 0;
430}
diff --git a/arch/sparc64/solaris/signal.h b/arch/sparc64/solaris/signal.h
new file mode 100644
index 000000000000..e91570803050
--- /dev/null
+++ b/arch/sparc64/solaris/signal.h
@@ -0,0 +1,108 @@
1/* $Id: signal.h,v 1.3 1998/04/12 06:20:33 davem Exp $
2 * signal.h: Signal emulation for Solaris
3 *
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
6
7#define SOLARIS_SIGHUP 1
8#define SOLARIS_SIGINT 2
9#define SOLARIS_SIGQUIT 3
10#define SOLARIS_SIGILL 4
11#define SOLARIS_SIGTRAP 5
12#define SOLARIS_SIGIOT 6
13#define SOLARIS_SIGEMT 7
14#define SOLARIS_SIGFPE 8
15#define SOLARIS_SIGKILL 9
16#define SOLARIS_SIGBUS 10
17#define SOLARIS_SIGSEGV 11
18#define SOLARIS_SIGSYS 12
19#define SOLARIS_SIGPIPE 13
20#define SOLARIS_SIGALRM 14
21#define SOLARIS_SIGTERM 15
22#define SOLARIS_SIGUSR1 16
23#define SOLARIS_SIGUSR2 17
24#define SOLARIS_SIGCLD 18
25#define SOLARIS_SIGPWR 19
26#define SOLARIS_SIGWINCH 20
27#define SOLARIS_SIGURG 21
28#define SOLARIS_SIGPOLL 22
29#define SOLARIS_SIGSTOP 23
30#define SOLARIS_SIGTSTP 24
31#define SOLARIS_SIGCONT 25
32#define SOLARIS_SIGTTIN 26
33#define SOLARIS_SIGTTOU 27
34#define SOLARIS_SIGVTALRM 28
35#define SOLARIS_SIGPROF 29
36#define SOLARIS_SIGXCPU 30
37#define SOLARIS_SIGXFSZ 31
38#define SOLARIS_SIGWAITING 32
39#define SOLARIS_SIGLWP 33
40#define SOLARIS_SIGFREEZE 34
41#define SOLARIS_SIGTHAW 35
42#define SOLARIS_SIGCANCEL 36
43#define SOLARIS_SIGRTMIN 37
44#define SOLARIS_SIGRTMAX 44
45#define SOLARIS_NSIGNALS 44
46
47
48#define SOLARIS_SA_ONSTACK 1
49#define SOLARIS_SA_RESETHAND 2
50#define SOLARIS_SA_RESTART 4
51#define SOLARIS_SA_SIGINFO 8
52#define SOLARIS_SA_NODEFER 16
53#define SOLARIS_SA_NOCLDWAIT 0x10000
54#define SOLARIS_SA_NOCLDSTOP 0x20000
55
56struct sol_siginfo {
57 int si_signo;
58 int si_code;
59 int si_errno;
60 union {
61 char pad[128-3*sizeof(int)];
62 struct {
63 s32 _pid;
64 union {
65 struct {
66 s32 _uid;
67 s32 _value;
68 } _kill;
69 struct {
70 s32 _utime;
71 int _status;
72 s32 _stime;
73 } _cld;
74 } _pdata;
75 } _proc;
76 struct { /* SIGSEGV, SIGBUS, SIGILL and SIGFPE */
77 u32 _addr;
78 int _trapno;
79 } _fault;
80 struct { /* SIGPOLL, SIGXFSZ */
81 int _fd;
82 s32 _band;
83 } _file;
84 } _data;
85};
86
87#define SOLARIS_WUNTRACED 0x04
88#define SOLARIS_WNOHANG 0x40
89#define SOLARIS_WEXITED 0x01
90#define SOLARIS_WTRAPPED 0x02
91#define SOLARIS_WSTOPPED WUNTRACED
92#define SOLARIS_WCONTINUED 0x08
93#define SOLARIS_WNOWAIT 0x80
94
95#define SOLARIS_TRAP_BRKPT 1
96#define SOLARIS_TRAP_TRACE 2
97#define SOLARIS_CLD_EXITED 1
98#define SOLARIS_CLD_KILLED 2
99#define SOLARIS_CLD_DUMPED 3
100#define SOLARIS_CLD_TRAPPED 4
101#define SOLARIS_CLD_STOPPED 5
102#define SOLARIS_CLD_CONTINUED 6
103#define SOLARIS_POLL_IN 1
104#define SOLARIS_POLL_OUT 2
105#define SOLARIS_POLL_MSG 3
106#define SOLARIS_POLL_ERR 4
107#define SOLARIS_POLL_PRI 5
108#define SOLARIS_POLL_HUP 6
diff --git a/arch/sparc64/solaris/socket.c b/arch/sparc64/solaris/socket.c
new file mode 100644
index 000000000000..ec8e074c4eac
--- /dev/null
+++ b/arch/sparc64/solaris/socket.c
@@ -0,0 +1,415 @@
1/* $Id: socket.c,v 1.6 2002/02/08 03:57:14 davem Exp $
2 * socket.c: Socket syscall emulation for Solaris 2.6+
3 *
4 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
5 *
6 * 1999-08-19 Fixed socketpair code
7 * Jason Rappleye (rappleye@ccr.buffalo.edu)
8 */
9
10#include <linux/types.h>
11#include <linux/smp_lock.h>
12#include <linux/mm.h>
13#include <linux/slab.h>
14#include <linux/socket.h>
15#include <linux/file.h>
16#include <linux/net.h>
17#include <linux/compat.h>
18#include <net/compat.h>
19
20#include <asm/uaccess.h>
21#include <asm/string.h>
22#include <asm/oplib.h>
23#include <asm/idprom.h>
24
25#include "conv.h"
26
27#define SOCK_SOL_STREAM 2
28#define SOCK_SOL_DGRAM 1
29#define SOCK_SOL_RAW 4
30#define SOCK_SOL_RDM 5
31#define SOCK_SOL_SEQPACKET 6
32
33#define SOL_SO_SNDLOWAT 0x1003
34#define SOL_SO_RCVLOWAT 0x1004
35#define SOL_SO_SNDTIMEO 0x1005
36#define SOL_SO_RCVTIMEO 0x1006
37#define SOL_SO_STATE 0x2000
38
39#define SOL_SS_NDELAY 0x040
40#define SOL_SS_NONBLOCK 0x080
41#define SOL_SS_ASYNC 0x100
42
43#define SO_STATE 0x000e
44
45static int socket_check(int family, int type)
46{
47 if (family != PF_UNIX && family != PF_INET)
48 return -ESOCKTNOSUPPORT;
49 switch (type) {
50 case SOCK_SOL_STREAM: type = SOCK_STREAM; break;
51 case SOCK_SOL_DGRAM: type = SOCK_DGRAM; break;
52 case SOCK_SOL_RAW: type = SOCK_RAW; break;
53 case SOCK_SOL_RDM: type = SOCK_RDM; break;
54 case SOCK_SOL_SEQPACKET: type = SOCK_SEQPACKET; break;
55 default: return -EINVAL;
56 }
57 return type;
58}
59
60static int solaris_to_linux_sockopt(int optname)
61{
62 switch (optname) {
63 case SOL_SO_SNDLOWAT: optname = SO_SNDLOWAT; break;
64 case SOL_SO_RCVLOWAT: optname = SO_RCVLOWAT; break;
65 case SOL_SO_SNDTIMEO: optname = SO_SNDTIMEO; break;
66 case SOL_SO_RCVTIMEO: optname = SO_RCVTIMEO; break;
67 case SOL_SO_STATE: optname = SO_STATE; break;
68 };
69
70 return optname;
71}
72
73asmlinkage int solaris_socket(int family, int type, int protocol)
74{
75 int (*sys_socket)(int, int, int) =
76 (int (*)(int, int, int))SYS(socket);
77
78 type = socket_check (family, type);
79 if (type < 0) return type;
80 return sys_socket(family, type, protocol);
81}
82
83asmlinkage int solaris_socketpair(int *usockvec)
84{
85 int (*sys_socketpair)(int, int, int, int *) =
86 (int (*)(int, int, int, int *))SYS(socketpair);
87
88 /* solaris socketpair really only takes one arg at the syscall
89 * level, int * usockvec. The libs apparently take care of
90 * making sure that family==AF_UNIX and type==SOCK_STREAM. The
91 * pointer we really want ends up residing in the first (and
92 * supposedly only) argument.
93 */
94
95 return sys_socketpair(AF_UNIX, SOCK_STREAM, 0, (int *)usockvec);
96}
97
98asmlinkage int solaris_bind(int fd, struct sockaddr *addr, int addrlen)
99{
100 int (*sys_bind)(int, struct sockaddr *, int) =
101 (int (*)(int, struct sockaddr *, int))SUNOS(104);
102
103 return sys_bind(fd, addr, addrlen);
104}
105
106asmlinkage int solaris_setsockopt(int fd, int level, int optname, u32 optval, int optlen)
107{
108 int (*sunos_setsockopt)(int, int, int, u32, int) =
109 (int (*)(int, int, int, u32, int))SUNOS(105);
110
111 optname = solaris_to_linux_sockopt(optname);
112 if (optname < 0)
113 return optname;
114 if (optname == SO_STATE)
115 return 0;
116
117 return sunos_setsockopt(fd, level, optname, optval, optlen);
118}
119
120asmlinkage int solaris_getsockopt(int fd, int level, int optname, u32 optval, u32 optlen)
121{
122 int (*sunos_getsockopt)(int, int, int, u32, u32) =
123 (int (*)(int, int, int, u32, u32))SUNOS(118);
124
125 optname = solaris_to_linux_sockopt(optname);
126 if (optname < 0)
127 return optname;
128
129 if (optname == SO_STATE)
130 optname = SOL_SO_STATE;
131
132 return sunos_getsockopt(fd, level, optname, optval, optlen);
133}
134
135asmlinkage int solaris_connect(int fd, struct sockaddr __user *addr, int addrlen)
136{
137 int (*sys_connect)(int, struct sockaddr __user *, int) =
138 (int (*)(int, struct sockaddr __user *, int))SYS(connect);
139
140 return sys_connect(fd, addr, addrlen);
141}
142
143asmlinkage int solaris_accept(int fd, struct sockaddr __user *addr, int __user *addrlen)
144{
145 int (*sys_accept)(int, struct sockaddr __user *, int __user *) =
146 (int (*)(int, struct sockaddr __user *, int __user *))SYS(accept);
147
148 return sys_accept(fd, addr, addrlen);
149}
150
151asmlinkage int solaris_listen(int fd, int backlog)
152{
153 int (*sys_listen)(int, int) =
154 (int (*)(int, int))SUNOS(106);
155
156 return sys_listen(fd, backlog);
157}
158
159asmlinkage int solaris_shutdown(int fd, int how)
160{
161 int (*sys_shutdown)(int, int) =
162 (int (*)(int, int))SYS(shutdown);
163
164 return sys_shutdown(fd, how);
165}
166
167#define MSG_SOL_OOB 0x1
168#define MSG_SOL_PEEK 0x2
169#define MSG_SOL_DONTROUTE 0x4
170#define MSG_SOL_EOR 0x8
171#define MSG_SOL_CTRUNC 0x10
172#define MSG_SOL_TRUNC 0x20
173#define MSG_SOL_WAITALL 0x40
174#define MSG_SOL_DONTWAIT 0x80
175
176static int solaris_to_linux_msgflags(int flags)
177{
178 int fl = flags & (MSG_OOB|MSG_PEEK|MSG_DONTROUTE);
179
180 if (flags & MSG_SOL_EOR) fl |= MSG_EOR;
181 if (flags & MSG_SOL_CTRUNC) fl |= MSG_CTRUNC;
182 if (flags & MSG_SOL_TRUNC) fl |= MSG_TRUNC;
183 if (flags & MSG_SOL_WAITALL) fl |= MSG_WAITALL;
184 if (flags & MSG_SOL_DONTWAIT) fl |= MSG_DONTWAIT;
185 return fl;
186}
187
188static int linux_to_solaris_msgflags(int flags)
189{
190 int fl = flags & (MSG_OOB|MSG_PEEK|MSG_DONTROUTE);
191
192 if (flags & MSG_EOR) fl |= MSG_SOL_EOR;
193 if (flags & MSG_CTRUNC) fl |= MSG_SOL_CTRUNC;
194 if (flags & MSG_TRUNC) fl |= MSG_SOL_TRUNC;
195 if (flags & MSG_WAITALL) fl |= MSG_SOL_WAITALL;
196 if (flags & MSG_DONTWAIT) fl |= MSG_SOL_DONTWAIT;
197 return fl;
198}
199
200asmlinkage int solaris_recvfrom(int s, char __user *buf, int len, int flags, u32 from, u32 fromlen)
201{
202 int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
203 (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
204
205 return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), A(from), A(fromlen));
206}
207
208asmlinkage int solaris_recv(int s, char __user *buf, int len, int flags)
209{
210 int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
211 (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
212
213 return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), NULL, NULL);
214}
215
216asmlinkage int solaris_sendto(int s, char __user *buf, int len, int flags, u32 to, u32 tolen)
217{
218 int (*sys_sendto)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
219 (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(sendto);
220
221 return sys_sendto(s, buf, len, solaris_to_linux_msgflags(flags), A(to), A(tolen));
222}
223
224asmlinkage int solaris_send(int s, char *buf, int len, int flags)
225{
226 int (*sys_sendto)(int, void *, size_t, unsigned, struct sockaddr *, int *) =
227 (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(sendto);
228
229 return sys_sendto(s, buf, len, solaris_to_linux_msgflags(flags), NULL, NULL);
230}
231
232asmlinkage int solaris_getpeername(int fd, struct sockaddr *addr, int *addrlen)
233{
234 int (*sys_getpeername)(int, struct sockaddr *, int *) =
235 (int (*)(int, struct sockaddr *, int *))SYS(getpeername);
236
237 return sys_getpeername(fd, addr, addrlen);
238}
239
240asmlinkage int solaris_getsockname(int fd, struct sockaddr *addr, int *addrlen)
241{
242 int (*sys_getsockname)(int, struct sockaddr *, int *) =
243 (int (*)(int, struct sockaddr *, int *))SYS(getsockname);
244
245 return sys_getsockname(fd, addr, addrlen);
246}
247
248/* XXX This really belongs in some header file... -DaveM */
249#define MAX_SOCK_ADDR 128 /* 108 for Unix domain -
250 16 for IP, 16 for IPX,
251 24 for IPv6,
252 about 80 for AX.25 */
253
254struct sol_nmsghdr {
255 u32 msg_name;
256 int msg_namelen;
257 u32 msg_iov;
258 u32 msg_iovlen;
259 u32 msg_control;
260 u32 msg_controllen;
261 u32 msg_flags;
262};
263
264struct sol_cmsghdr {
265 u32 cmsg_len;
266 int cmsg_level;
267 int cmsg_type;
268 unsigned char cmsg_data[0];
269};
270
271static inline int msghdr_from_user32_to_kern(struct msghdr *kmsg,
272 struct sol_nmsghdr __user *umsg)
273{
274 u32 tmp1, tmp2, tmp3;
275 int err;
276
277 err = get_user(tmp1, &umsg->msg_name);
278 err |= __get_user(tmp2, &umsg->msg_iov);
279 err |= __get_user(tmp3, &umsg->msg_control);
280 if (err)
281 return -EFAULT;
282
283 kmsg->msg_name = A(tmp1);
284 kmsg->msg_iov = A(tmp2);
285 kmsg->msg_control = A(tmp3);
286
287 err = get_user(kmsg->msg_namelen, &umsg->msg_namelen);
288 err |= get_user(kmsg->msg_controllen, &umsg->msg_controllen);
289 err |= get_user(kmsg->msg_flags, &umsg->msg_flags);
290
291 kmsg->msg_flags = solaris_to_linux_msgflags(kmsg->msg_flags);
292
293 return err;
294}
295
296asmlinkage int solaris_sendmsg(int fd, struct sol_nmsghdr __user *user_msg, unsigned user_flags)
297{
298 struct socket *sock;
299 char address[MAX_SOCK_ADDR];
300 struct iovec iov[UIO_FASTIOV];
301 unsigned char ctl[sizeof(struct cmsghdr) + 20];
302 unsigned char *ctl_buf = ctl;
303 struct msghdr kern_msg;
304 int err, total_len;
305
306 if(msghdr_from_user32_to_kern(&kern_msg, user_msg))
307 return -EFAULT;
308 if(kern_msg.msg_iovlen > UIO_MAXIOV)
309 return -EINVAL;
310 err = verify_compat_iovec(&kern_msg, iov, address, VERIFY_READ);
311 if (err < 0)
312 goto out;
313 total_len = err;
314
315 if(kern_msg.msg_controllen) {
316 struct sol_cmsghdr __user *ucmsg = kern_msg.msg_control;
317 unsigned long *kcmsg;
318 compat_size_t cmlen;
319
320 if(kern_msg.msg_controllen > sizeof(ctl) &&
321 kern_msg.msg_controllen <= 256) {
322 err = -ENOBUFS;
323 ctl_buf = kmalloc(kern_msg.msg_controllen, GFP_KERNEL);
324 if(!ctl_buf)
325 goto out_freeiov;
326 }
327 __get_user(cmlen, &ucmsg->cmsg_len);
328 kcmsg = (unsigned long *) ctl_buf;
329 *kcmsg++ = (unsigned long)cmlen;
330 err = -EFAULT;
331 if(copy_from_user(kcmsg, &ucmsg->cmsg_level,
332 kern_msg.msg_controllen - sizeof(compat_size_t)))
333 goto out_freectl;
334 kern_msg.msg_control = ctl_buf;
335 }
336 kern_msg.msg_flags = solaris_to_linux_msgflags(user_flags);
337
338 lock_kernel();
339 sock = sockfd_lookup(fd, &err);
340 if (sock != NULL) {
341 if (sock->file->f_flags & O_NONBLOCK)
342 kern_msg.msg_flags |= MSG_DONTWAIT;
343 err = sock_sendmsg(sock, &kern_msg, total_len);
344 sockfd_put(sock);
345 }
346 unlock_kernel();
347
348out_freectl:
349 /* N.B. Use kfree here, as kern_msg.msg_controllen might change? */
350 if(ctl_buf != ctl)
351 kfree(ctl_buf);
352out_freeiov:
353 if(kern_msg.msg_iov != iov)
354 kfree(kern_msg.msg_iov);
355out:
356 return err;
357}
358
359asmlinkage int solaris_recvmsg(int fd, struct sol_nmsghdr __user *user_msg, unsigned int user_flags)
360{
361 struct iovec iovstack[UIO_FASTIOV];
362 struct msghdr kern_msg;
363 char addr[MAX_SOCK_ADDR];
364 struct socket *sock;
365 struct iovec *iov = iovstack;
366 struct sockaddr __user *uaddr;
367 int __user *uaddr_len;
368 unsigned long cmsg_ptr;
369 int err, total_len, len = 0;
370
371 if(msghdr_from_user32_to_kern(&kern_msg, user_msg))
372 return -EFAULT;
373 if(kern_msg.msg_iovlen > UIO_MAXIOV)
374 return -EINVAL;
375
376 uaddr = kern_msg.msg_name;
377 uaddr_len = &user_msg->msg_namelen;
378 err = verify_compat_iovec(&kern_msg, iov, addr, VERIFY_WRITE);
379 if (err < 0)
380 goto out;
381 total_len = err;
382
383 cmsg_ptr = (unsigned long) kern_msg.msg_control;
384 kern_msg.msg_flags = 0;
385
386 lock_kernel();
387 sock = sockfd_lookup(fd, &err);
388 if (sock != NULL) {
389 if (sock->file->f_flags & O_NONBLOCK)
390 user_flags |= MSG_DONTWAIT;
391 err = sock_recvmsg(sock, &kern_msg, total_len, user_flags);
392 if(err >= 0)
393 len = err;
394 sockfd_put(sock);
395 }
396 unlock_kernel();
397
398 if(uaddr != NULL && err >= 0)
399 err = move_addr_to_user(addr, kern_msg.msg_namelen, uaddr, uaddr_len);
400 if(err >= 0) {
401 err = __put_user(linux_to_solaris_msgflags(kern_msg.msg_flags), &user_msg->msg_flags);
402 if(!err) {
403 /* XXX Convert cmsg back into userspace 32-bit format... */
404 err = __put_user((unsigned long)kern_msg.msg_control - cmsg_ptr,
405 &user_msg->msg_controllen);
406 }
407 }
408
409 if(kern_msg.msg_iov != iov)
410 kfree(kern_msg.msg_iov);
411out:
412 if(err < 0)
413 return err;
414 return len;
415}
diff --git a/arch/sparc64/solaris/socksys.c b/arch/sparc64/solaris/socksys.c
new file mode 100644
index 000000000000..d7c1c76582cc
--- /dev/null
+++ b/arch/sparc64/solaris/socksys.c
@@ -0,0 +1,211 @@
1/* $Id: socksys.c,v 1.21 2002/02/08 03:57:14 davem Exp $
2 * socksys.c: /dev/inet/ stuff for Solaris emulation.
3 *
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1997, 1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
6 * Copyright (C) 1995, 1996 Mike Jagdis (jaggy@purplet.demon.co.uk)
7 */
8
9/*
10 * Dave, _please_ give me specifications on this fscking mess so that I
11 * could at least get it into the state when it wouldn't screw the rest of
12 * the kernel over. socksys.c and timod.c _stink_ and we are not talking
13 * H2S here, it's isopropilmercaptan in concentrations way over LD50. -- AV
14 */
15
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/ioctl.h>
22#include <linux/fs.h>
23#include <linux/file.h>
24#include <linux/init.h>
25#include <linux/poll.h>
26#include <linux/slab.h>
27#include <linux/syscalls.h>
28#include <linux/in.h>
29#include <linux/devfs_fs_kernel.h>
30
31#include <net/sock.h>
32
33#include <asm/uaccess.h>
34#include <asm/termios.h>
35
36#include "conv.h"
37#include "socksys.h"
38
39static int af_inet_protocols[] = {
40IPPROTO_ICMP, IPPROTO_ICMP, IPPROTO_IGMP, IPPROTO_IPIP, IPPROTO_TCP,
41IPPROTO_EGP, IPPROTO_PUP, IPPROTO_UDP, IPPROTO_IDP, IPPROTO_RAW,
420, 0, 0, 0, 0, 0,
43};
44
45#ifndef DEBUG_SOLARIS_KMALLOC
46
47#define mykmalloc kmalloc
48#define mykfree kfree
49
50#else
51
52extern void * mykmalloc(size_t s, int gfp);
53extern void mykfree(void *);
54
55#endif
56
57static unsigned int (*sock_poll)(struct file *, poll_table *);
58
59static struct file_operations socksys_file_ops = {
60 /* Currently empty */
61};
62
63static int socksys_open(struct inode * inode, struct file * filp)
64{
65 int family, type, protocol, fd;
66 struct dentry *dentry;
67 int (*sys_socket)(int,int,int) =
68 (int (*)(int,int,int))SUNOS(97);
69 struct sol_socket_struct * sock;
70
71 family = ((iminor(inode) >> 4) & 0xf);
72 switch (family) {
73 case AF_UNIX:
74 type = SOCK_STREAM;
75 protocol = 0;
76 break;
77 case AF_INET:
78 protocol = af_inet_protocols[iminor(inode) & 0xf];
79 switch (protocol) {
80 case IPPROTO_TCP: type = SOCK_STREAM; break;
81 case IPPROTO_UDP: type = SOCK_DGRAM; break;
82 default: type = SOCK_RAW; break;
83 }
84 break;
85 default:
86 type = SOCK_RAW;
87 protocol = 0;
88 break;
89 }
90
91 fd = sys_socket(family, type, protocol);
92 if (fd < 0)
93 return fd;
94 /*
95 * N.B. The following operations are not legal!
96 *
97 * No shit. WTF is it supposed to do, anyway?
98 *
99 * Try instead:
100 * d_delete(filp->f_dentry), then d_instantiate with sock inode
101 */
102 dentry = filp->f_dentry;
103 filp->f_dentry = dget(fcheck(fd)->f_dentry);
104 filp->f_dentry->d_inode->i_rdev = inode->i_rdev;
105 filp->f_dentry->d_inode->i_flock = inode->i_flock;
106 SOCKET_I(filp->f_dentry->d_inode)->file = filp;
107 filp->f_op = &socksys_file_ops;
108 sock = (struct sol_socket_struct*)
109 mykmalloc(sizeof(struct sol_socket_struct), GFP_KERNEL);
110 if (!sock) return -ENOMEM;
111 SOLDD(("sock=%016lx(%016lx)\n", sock, filp));
112 sock->magic = SOLARIS_SOCKET_MAGIC;
113 sock->modcount = 0;
114 sock->state = TS_UNBND;
115 sock->offset = 0;
116 sock->pfirst = sock->plast = NULL;
117 filp->private_data = sock;
118 SOLDD(("filp->private_data %016lx\n", filp->private_data));
119
120 sys_close(fd);
121 dput(dentry);
122 return 0;
123}
124
125static int socksys_release(struct inode * inode, struct file * filp)
126{
127 struct sol_socket_struct * sock;
128 struct T_primsg *it;
129
130 /* XXX: check this */
131 sock = (struct sol_socket_struct *)filp->private_data;
132 SOLDD(("sock release %016lx(%016lx)\n", sock, filp));
133 it = sock->pfirst;
134 while (it) {
135 struct T_primsg *next = it->next;
136
137 SOLDD(("socksys_release %016lx->%016lx\n", it, next));
138 mykfree((char*)it);
139 it = next;
140 }
141 filp->private_data = NULL;
142 SOLDD(("socksys_release %016lx\n", sock));
143 mykfree((char*)sock);
144 return 0;
145}
146
147static unsigned int socksys_poll(struct file * filp, poll_table * wait)
148{
149 struct inode *ino;
150 unsigned int mask = 0;
151
152 ino=filp->f_dentry->d_inode;
153 if (ino && S_ISSOCK(ino->i_mode)) {
154 struct sol_socket_struct *sock;
155 sock = (struct sol_socket_struct*)filp->private_data;
156 if (sock && sock->pfirst) {
157 mask |= POLLIN | POLLRDNORM;
158 if (sock->pfirst->pri == MSG_HIPRI)
159 mask |= POLLPRI;
160 }
161 }
162 if (sock_poll)
163 mask |= (*sock_poll)(filp, wait);
164 return mask;
165}
166
167static struct file_operations socksys_fops = {
168 .open = socksys_open,
169 .release = socksys_release,
170};
171
172int __init
173init_socksys(void)
174{
175 int ret;
176 struct file * file;
177 int (*sys_socket)(int,int,int) =
178 (int (*)(int,int,int))SUNOS(97);
179 int (*sys_close)(unsigned int) =
180 (int (*)(unsigned int))SYS(close);
181
182 ret = register_chrdev (30, "socksys", &socksys_fops);
183 if (ret < 0) {
184 printk ("Couldn't register socksys character device\n");
185 return ret;
186 }
187 ret = sys_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
188 if (ret < 0) {
189 printk ("Couldn't create socket\n");
190 return ret;
191 }
192
193 devfs_mk_cdev(MKDEV(30, 0), S_IFCHR|S_IRUSR|S_IWUSR, "socksys");
194
195 file = fcheck(ret);
196 /* N.B. Is this valid? Suppose the f_ops are in a module ... */
197 socksys_file_ops = *file->f_op;
198 sys_close(ret);
199 sock_poll = socksys_file_ops.poll;
200 socksys_file_ops.poll = socksys_poll;
201 socksys_file_ops.release = socksys_release;
202 return 0;
203}
204
205void
206cleanup_socksys(void)
207{
208 if (unregister_chrdev(30, "socksys"))
209 printk ("Couldn't unregister socksys character device\n");
210 devfs_remove ("socksys");
211}
diff --git a/arch/sparc64/solaris/socksys.h b/arch/sparc64/solaris/socksys.h
new file mode 100644
index 000000000000..5d1b78ec1600
--- /dev/null
+++ b/arch/sparc64/solaris/socksys.h
@@ -0,0 +1,208 @@
1/* $Id: socksys.h,v 1.2 1998/03/26 08:46:07 jj Exp $
2 * socksys.h: Definitions for STREAMS modules emulation code.
3 *
4 * Copyright (C) 1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
5 */
6
7#define MSG_HIPRI 0x01
8#define MSG_ANY 0x02
9#define MSG_BAND 0x04
10
11#define MORECTL 1
12#define MOREDATA 2
13
14#define TBADADDR 1
15#define TBADOPT 2
16#define TACCES 3
17#define TBADF 4
18#define TNOADDR 5
19#define TOUTSTATE 6
20#define TBADSEQ 7
21#define TSYSERR 8
22#define TLOOK 9
23#define TBADDATA 10
24#define TBUFOVFLW 11
25#define TFLOW 12
26#define TNODATA 13
27#define TNODIS 14
28#define TNOUDERR 15
29#define TBADFLAG 16
30#define TNOREL 17
31#define TNOTSUPPORT 18
32#define TSTATECHNG 19
33
34#define T_CONN_REQ 0
35#define T_CONN_RES 1
36#define T_DISCON_REQ 2
37#define T_DATA_REQ 3
38#define T_EXDATA_REQ 4
39#define T_INFO_REQ 5
40#define T_BIND_REQ 6
41#define T_UNBIND_REQ 7
42#define T_UNITDATA_REQ 8
43#define T_OPTMGMT_REQ 9
44#define T_ORDREL_REQ 10
45
46#define T_CONN_IND 11
47#define T_CONN_CON 12
48#define T_DISCON_IND 13
49#define T_DATA_IND 14
50#define T_EXDATA_IND 15
51#define T_INFO_ACK 16
52#define T_BIND_ACK 17
53#define T_ERROR_ACK 18
54#define T_OK_ACK 19
55#define T_UNITDATA_IND 20
56#define T_UDERROR_IND 21
57#define T_OPTMGMT_ACK 22
58#define T_ORDREL_IND 23
59
60#define T_NEGOTIATE 0x0004
61#define T_FAILURE 0x0040
62
63#define TS_UNBND 0 /* unbound */
64#define TS_WACK_BREQ 1 /* waiting for T_BIND_REQ ack */
65#define TS_WACK_UREQ 2 /* waiting for T_UNBIND_REQ ack */
66#define TS_IDLE 3 /* idle */
67#define TS_WACK_OPTREQ 4 /* waiting for T_OPTMGMT_REQ ack */
68#define TS_WACK_CREQ 5 /* waiting for T_CONN_REQ ack */
69#define TS_WCON_CREQ 6 /* waiting for T_CONN_REQ confirmation */
70#define TS_WRES_CIND 7 /* waiting for T_CONN_IND */
71#define TS_WACK_CRES 8 /* waiting for T_CONN_RES ack */
72#define TS_DATA_XFER 9 /* data transfer */
73#define TS_WIND_ORDREL 10 /* releasing read but not write */
74#define TS_WREQ_ORDREL 11 /* wait to release write but not read */
75#define TS_WACK_DREQ6 12 /* waiting for T_DISCON_REQ ack */
76#define TS_WACK_DREQ7 13 /* waiting for T_DISCON_REQ ack */
77#define TS_WACK_DREQ9 14 /* waiting for T_DISCON_REQ ack */
78#define TS_WACK_DREQ10 15 /* waiting for T_DISCON_REQ ack */
79#define TS_WACK_DREQ11 16 /* waiting for T_DISCON_REQ ack */
80#define TS_NOSTATES 17
81
82struct T_conn_req {
83 s32 PRIM_type;
84 s32 DEST_length;
85 s32 DEST_offset;
86 s32 OPT_length;
87 s32 OPT_offset;
88};
89
90struct T_bind_req {
91 s32 PRIM_type;
92 s32 ADDR_length;
93 s32 ADDR_offset;
94 u32 CONIND_number;
95};
96
97struct T_unitdata_req {
98 s32 PRIM_type;
99 s32 DEST_length;
100 s32 DEST_offset;
101 s32 OPT_length;
102 s32 OPT_offset;
103};
104
105struct T_optmgmt_req {
106 s32 PRIM_type;
107 s32 OPT_length;
108 s32 OPT_offset;
109 s32 MGMT_flags;
110};
111
112struct T_bind_ack {
113 s32 PRIM_type;
114 s32 ADDR_length;
115 s32 ADDR_offset;
116 u32 CONIND_number;
117};
118
119struct T_error_ack {
120 s32 PRIM_type;
121 s32 ERROR_prim;
122 s32 TLI_error;
123 s32 UNIX_error;
124};
125
126struct T_ok_ack {
127 s32 PRIM_type;
128 s32 CORRECT_prim;
129};
130
131struct T_conn_ind {
132 s32 PRIM_type;
133 s32 SRC_length;
134 s32 SRC_offset;
135 s32 OPT_length;
136 s32 OPT_offset;
137 s32 SEQ_number;
138};
139
140struct T_conn_con {
141 s32 PRIM_type;
142 s32 RES_length;
143 s32 RES_offset;
144 s32 OPT_length;
145 s32 OPT_offset;
146};
147
148struct T_discon_ind {
149 s32 PRIM_type;
150 s32 DISCON_reason;
151 s32 SEQ_number;
152};
153
154struct T_unitdata_ind {
155 s32 PRIM_type;
156 s32 SRC_length;
157 s32 SRC_offset;
158 s32 OPT_length;
159 s32 OPT_offset;
160};
161
162struct T_optmgmt_ack {
163 s32 PRIM_type;
164 s32 OPT_length;
165 s32 OPT_offset;
166 s32 MGMT_flags;
167};
168
169struct opthdr {
170 s32 level;
171 s32 name;
172 s32 len;
173 char value[0];
174};
175
176struct T_primsg {
177 struct T_primsg *next;
178 unsigned char pri;
179 unsigned char band;
180 int length;
181 s32 type;
182};
183
184struct strbuf {
185 s32 maxlen;
186 s32 len;
187 u32 buf;
188} ;
189
190/* Constants used by STREAMS modules emulation code */
191
192typedef char sol_module;
193
194#define MAX_NR_STREAM_MODULES 16
195
196/* Private data structure assigned to sockets. */
197
198struct sol_socket_struct {
199 int magic;
200 int modcount;
201 sol_module module[MAX_NR_STREAM_MODULES];
202 long state;
203 int offset;
204 struct T_primsg *pfirst, *plast;
205};
206
207#define SOLARIS_SOCKET_MAGIC 0xADDED
208
diff --git a/arch/sparc64/solaris/systbl.S b/arch/sparc64/solaris/systbl.S
new file mode 100644
index 000000000000..d25667eeae10
--- /dev/null
+++ b/arch/sparc64/solaris/systbl.S
@@ -0,0 +1,314 @@
1/* $Id: systbl.S,v 1.11 2000/03/13 21:57:35 davem Exp $
2 * systbl.S: System call entry point table for Solaris compatibility.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 */
7
8#include <asm/unistd.h>
9
10/* Fall back to sys_call_table32 entry */
11#define CHAIN(name) __NR_##name
12
13/* Pass pt_regs pointer as first argument */
14#define REGS(name) name+1
15
16/* Hack till all be implemented */
17#define solaris_getpmsg solaris_unimplemented
18#define solaris_hrtsys solaris_unimplemented
19#define solaris_msgsys solaris_unimplemented
20#define solaris_putpmsg solaris_unimplemented
21#define solaris_semsys solaris_unimplemented
22
23 .data
24 .globl solaris_sys_table
25solaris_sys_table:
26 .word solaris_unimplemented /* nosys 0 */
27 .word CHAIN(exit) /* exit d 1 */
28 .word CHAIN(fork) /* fork 2 */
29 .word CHAIN(read) /* read dpd 3 */
30 .word CHAIN(write) /* write dpd 4 */
31 .word solaris_open /* open soo 5 */
32 .word CHAIN(close) /* close d 6 */
33 .word solaris_wait /* wait xxx 7 */
34 .word CHAIN(creat) /* creat so 8 */
35 .word CHAIN(link) /* link ss 9 */
36 .word CHAIN(unlink) /* unlink s 10 */
37 .word solaris_unimplemented /* exec sxx 11 */
38 .word CHAIN(chdir) /* chdir s 12 */
39 .word CHAIN(time) /* time 13 */
40 .word solaris_mknod /* mknod sox 14 */
41 .word CHAIN(chmod) /* chmod so 15 */
42 .word CHAIN(chown) /* chown sdd 16 */
43 .word solaris_brk /* brk/break x 17 */
44 .word solaris_stat /* stat sp 18 */
45 .word CHAIN(lseek) /* seek/lseek ddd 19 */
46 .word solaris_getpid /* getpid 20 */
47 .word solaris_unimplemented /* mount 21 */
48 .word CHAIN(umount) /* umount s 22 */
49 .word CHAIN(setuid) /* setuid d 23 */
50 .word solaris_getuid /* getuid 24 */
51 .word CHAIN(stime) /* stime d 25 */
52#if 0
53 .word solaris_ptrace /* ptrace xdxx 26 */
54#else
55 .word CHAIN(ptrace) /* ptrace xdxx 26 */
56#endif
57 .word CHAIN(alarm) /* alarm d 27 */
58 .word solaris_fstat /* fstat dp 28 */
59 .word CHAIN(pause) /* pause 29 */
60 .word CHAIN(utime) /* utime xx 30 */
61 .word solaris_unimplemented /* stty 31 */
62 .word solaris_unimplemented /* gtty 32 */
63 .word solaris_access /* access so 33 */
64 .word CHAIN(nice) /* nice d 34 */
65 .word solaris_statfs /* statfs spdd 35 */
66 .word CHAIN(sync) /* sync 36 */
67 .word solaris_kill /* kill dd 37 */
68 .word solaris_fstatfs /* fstatfs dpdd 38 */
69 .word solaris_procids /* pgrpsys ddd 39 */
70 .word solaris_unimplemented /* xenix 40 */
71 .word CHAIN(dup) /* dup d 41 */
72 .word CHAIN(pipe) /* pipe 42 */
73 .word CHAIN(times) /* times p 43 */
74 .word 44 /*CHAIN(profil)*/ /* prof xxxx 44 */
75 .word solaris_unimplemented /* lock/plock 45 */
76 .word CHAIN(setgid) /* setgid d 46 */
77 .word solaris_getgid /* getgid 47 */
78 .word solaris_sigfunc /* sigfunc xx 48 */
79 .word REGS(solaris_msgsys) /* msgsys dxddd 49 */
80 .word solaris_unimplemented /* syssun/3b 50 */
81 .word CHAIN(acct) /* acct/sysacct x 51 */
82 .word solaris_shmsys /* shmsys ddxo 52 */
83 .word REGS(solaris_semsys) /* semsys dddx 53 */
84 .word solaris_ioctl /* ioctl dxx 54 */
85 .word solaris_unimplemented /* uadmin xxx 55 */
86 .word solaris_unimplemented /* reserved:exch 56 */
87 .word solaris_utssys /* utssys x 57 */
88 .word CHAIN(fsync) /* fsync d 58 */
89 .word CHAIN(execve) /* execv spp 59 */
90 .word CHAIN(umask) /* umask o 60 */
91 .word CHAIN(chroot) /* chroot s 61 */
92 .word solaris_fcntl /* fcntl dxx 62 */
93 .word solaris_ulimit /* ulimit xx 63 */
94 .word solaris_unimplemented /* ? 64 */
95 .word solaris_unimplemented /* ? 65 */
96 .word solaris_unimplemented /* ? 66 */
97 .word solaris_unimplemented /* ? 67 */
98 .word solaris_unimplemented /* ? 68 */
99 .word solaris_unimplemented /* ? 69 */
100 .word solaris_unimplemented /* advfs 70 */
101 .word solaris_unimplemented /* unadvfs 71 */
102 .word solaris_unimplemented /* rmount 72 */
103 .word solaris_unimplemented /* rumount 73 */
104 .word solaris_unimplemented /* rfstart 74 */
105 .word solaris_unimplemented /* ? 75 */
106 .word solaris_unimplemented /* rdebug 76 */
107 .word solaris_unimplemented /* rfstop 77 */
108 .word solaris_unimplemented /* rfsys 78 */
109 .word CHAIN(rmdir) /* rmdir s 79 */
110 .word CHAIN(mkdir) /* mkdir so 80 */
111 .word CHAIN(getdents) /* getdents dxd 81 */
112 .word solaris_unimplemented /* libattach 82 */
113 .word solaris_unimplemented /* libdetach 83 */
114 .word CHAIN(sysfs) /* sysfs dxx 84 */
115 .word solaris_getmsg /* getmsg dxxx 85 */
116 .word solaris_putmsg /* putmsg dxxd 86 */
117 .word CHAIN(poll) /* poll xdd 87 */
118 .word solaris_lstat /* lstat sp 88 */
119 .word CHAIN(symlink) /* symlink ss 89 */
120 .word CHAIN(readlink) /* readlink spd 90 */
121 .word CHAIN(setgroups) /* setgroups dp 91 */
122 .word CHAIN(getgroups) /* getgroups dp 92 */
123 .word CHAIN(fchmod) /* fchmod do 93 */
124 .word CHAIN(fchown) /* fchown ddd 94 */
125 .word solaris_sigprocmask /* sigprocmask dxx 95 */
126 .word solaris_sigsuspend /* sigsuspend x 96 */
127 .word solaris_sigaltstack /* sigaltstack xx 97 */
128 .word solaris_sigaction /* sigaction dxx 98 */
129 .word solaris_sigpending /* sigpending dd 99 */
130 .word REGS(solaris_context) /* context 100 */
131 .word solaris_unimplemented /* evsys 101 */
132 .word solaris_unimplemented /* evtrapret 102 */
133 .word solaris_statvfs /* statvfs sp 103 */
134 .word solaris_fstatvfs /* fstatvfs dp 104 */
135 .word solaris_unimplemented /* unknown 105 */
136 .word solaris_unimplemented /* nfssys 106 */
137 .word solaris_waitid /* waitid ddxd 107 */
138 .word solaris_unimplemented /* sigsendsys ddd 108 */
139 .word REGS(solaris_hrtsys) /* hrtsys xxx 109 */
140 .word solaris_unimplemented /* acancel dxd 110 */
141 .word solaris_unimplemented /* async 111 */
142 .word solaris_unimplemented /* priocntlsys 112 */
143 .word solaris_pathconf /* pathconf sd 113 */
144 .word CHAIN(mincore) /* mincore d 114 */
145 .word solaris_mmap /* mmap xxxxdx 115 */
146 .word CHAIN(mprotect) /* mprotect xdx 116 */
147 .word CHAIN(munmap) /* munmap xd 117 */
148 .word solaris_fpathconf /* fpathconf dd 118 */
149 .word CHAIN(fork) /* fork 119 */
150 .word solaris_unimplemented /* fchdir d 120 */
151 .word CHAIN(readv) /* readv dxd 121 */
152 .word CHAIN(writev) /* writev dxd 122 */
153 .word solaris_xstat /* xstat dsx 123 */
154 .word solaris_lxstat /* lxstat dsx 124 */
155 .word solaris_fxstat /* fxstat ddx 125 */
156 .word solaris_xmknod /* xmknod dsox 126 */
157 .word solaris_unimplemented /* syslocal d 127 */
158 .word solaris_setrlimit /* setrlimit dp 128 */
159 .word solaris_getrlimit /* getrlimit dp 129 */
160 .word CHAIN(chown) /* lchown sdd 130 */
161 .word solaris_unimplemented /* memcntl 131 */
162 .word solaris_getpmsg /* getpmsg dxxxx 132 */
163 .word solaris_putpmsg /* putpmsg dxxdd 133 */
164 .word CHAIN(rename) /* rename ss 134 */
165 .word solaris_utsname /* uname x 135 */
166 .word solaris_unimplemented /* setegid 136 */
167 .word solaris_sysconf /* sysconfig d 137 */
168 .word solaris_unimplemented /* adjtime 138 */
169 .word solaris_sysinfo /* systeminfo dsd 139 */
170 .word solaris_unimplemented /* ? 140 */
171 .word solaris_unimplemented /* seteuid 141 */
172 .word solaris_unimplemented /* ? 142 */
173 .word solaris_unimplemented /* ? 143 */
174 .word solaris_unimplemented /* secsys dx 144 */
175 .word solaris_unimplemented /* filepriv sdxd 145 */
176 .word solaris_unimplemented /* procpriv dxd 146 */
177 .word solaris_unimplemented /* devstat sdx 147 */
178 .word solaris_unimplemented /* aclipc ddddx 148 */
179 .word solaris_unimplemented /* fdevstat ddx 149 */
180 .word solaris_unimplemented /* flvlfile ddx 150 */
181 .word solaris_unimplemented /* lvlfile sdx 151 */
182 .word solaris_unimplemented /* ? 152 */
183 .word solaris_unimplemented /* fchroot d 153 */
184 .word solaris_unimplemented /* lvlproc dx 154 */
185 .word solaris_unimplemented /* ? 155 */
186 .word solaris_gettimeofday /* gettimeofday x 156 */
187 .word CHAIN(getitimer) /* getitimer dx 157 */
188 .word CHAIN(setitimer) /* setitimer dxx 158 */
189 .word solaris_unimplemented /* lwp-xxx 159 */
190 .word solaris_unimplemented /* lwp-xxx 160 */
191 .word solaris_unimplemented /* lwp-xxx 161 */
192 .word solaris_unimplemented /* lwp-xxx 162 */
193 .word solaris_unimplemented /* lwp-xxx 163 */
194 .word solaris_unimplemented /* lwp-xxx 164 */
195 .word solaris_unimplemented /* lwp-xxx 165 */
196 .word solaris_unimplemented /* lwp-xxx 166 */
197 .word solaris_unimplemented /* lwp-xxx 167 */
198 .word solaris_unimplemented /* lwp-xxx 168 */
199 .word solaris_unimplemented /* lwp-xxx 169 */
200 .word solaris_unimplemented /* lwp-xxx 170 */
201 .word solaris_unimplemented /* lwp-xxx 171 */
202 .word solaris_unimplemented /* lwp-xxx 172 */
203 .word solaris_pread /* pread dpdd 173 */
204 .word solaris_pwrite /* pwrite dpdd 174 */
205 .word REGS(solaris_llseek) /* llseek dLd 175 */
206 .word solaris_unimplemented /* lwpself 176 */
207 .word solaris_unimplemented /* lwpinfo 177 */
208 .word solaris_unimplemented /* lwpprivate 178 */
209 .word solaris_unimplemented /* processorbind 179 */
210 .word solaris_unimplemented /* processorexbind 180 */
211 .word solaris_unimplemented /* 181 */
212 .word solaris_unimplemented /* sync_mailbox 182 */
213 .word solaris_unimplemented /* prepblock 183 */
214 .word solaris_unimplemented /* block 184 */
215 .word solaris_acl /* acl sddp 185 */
216 .word solaris_unimplemented /* unblock 186 */
217 .word solaris_unimplemented /* cancelblock 187 */
218 .word solaris_unimplemented /* ? 188 */
219 .word solaris_unimplemented /* xxxxx 189 */
220 .word solaris_unimplemented /* xxxxxe 190 */
221 .word solaris_unimplemented /* 191 */
222 .word solaris_unimplemented /* 192 */
223 .word solaris_unimplemented /* 193 */
224 .word solaris_unimplemented /* 194 */
225 .word solaris_unimplemented /* 195 */
226 .word solaris_unimplemented /* 196 */
227 .word solaris_unimplemented /* 197 */
228 .word solaris_unimplemented /* 198 */
229 .word CHAIN(nanosleep) /* nanosleep dd 199 */
230 .word solaris_facl /* facl dddp 200 */
231 .word solaris_unimplemented /* 201 */
232 .word CHAIN(setreuid) /* setreuid dd 202 */
233 .word CHAIN(setregid) /* setregid dd 203 */
234 .word solaris_unimplemented /* 204 */
235 .word solaris_unimplemented /* 205 */
236 .word solaris_unimplemented /* 206 */
237 .word solaris_unimplemented /* 207 */
238 .word solaris_unimplemented /* 208 */
239 .word solaris_unimplemented /* 209 */
240 .word solaris_unimplemented /* 210 */
241 .word solaris_unimplemented /* 211 */
242 .word solaris_unimplemented /* 212 */
243 .word solaris_getdents64 /* getdents64 dpd 213 */
244 .word REGS(solaris_mmap64) /* mmap64 xxxxdX 214 */
245 .word solaris_stat64 /* stat64 sP 215 */
246 .word solaris_lstat64 /* lstat64 sP 216 */
247 .word solaris_fstat64 /* fstat64 dP 217 */
248 .word solaris_statvfs64 /* statvfs64 sP 218 */
249 .word solaris_fstatvfs64 /* fstatvfs64 dP 219 */
250 .word solaris_setrlimit64 /* setrlimit64 dP 220 */
251 .word solaris_getrlimit64 /* getrlimit64 dP 221 */
252 .word CHAIN(pread64) /* pread64 dpdD 222 */
253 .word CHAIN(pwrite64) /* pwrite64 dpdD 223 */
254 .word CHAIN(creat) /* creat64 so 224 */
255 .word solaris_open /* open64 soo 225 */
256 .word solaris_unimplemented /* 226 */
257 .word solaris_unimplemented /* 227 */
258 .word solaris_unimplemented /* 228 */
259 .word solaris_unimplemented /* 229 */
260 .word solaris_socket /* socket ddd 230 */
261 .word solaris_socketpair /* socketpair dddp 231 */
262 .word solaris_bind /* bind dpd 232 */
263 .word solaris_listen /* listen dd 233 */
264 .word solaris_accept /* accept dpp 234 */
265 .word solaris_connect /* connect dpd 235 */
266 .word solaris_shutdown /* shutdown dd 236 */
267 .word solaris_recv /* recv dpdd 237 */
268 .word solaris_recvfrom /* recvfrom dpddpp 238 */
269 .word solaris_recvmsg /* recvmsg dpd 239 */
270 .word solaris_send /* send dpdd 240 */
271 .word solaris_sendmsg /* sendmsg dpd 241 */
272 .word solaris_sendto /* sendto dpddpd 242 */
273 .word solaris_getpeername /* getpeername dpp 243 */
274 .word solaris_getsockname /* getsockname dpp 244 */
275 .word solaris_getsockopt /* getsockopt dddpp 245 */
276 .word solaris_setsockopt /* setsockopt dddpp 246 */
277 .word solaris_unimplemented /* 247 */
278 .word solaris_ntp_gettime /* ntp_gettime p 248 */
279 .word solaris_ntp_adjtime /* ntp_adjtime p 249 */
280 .word solaris_unimplemented /* 250 */
281 .word solaris_unimplemented /* 251 */
282 .word solaris_unimplemented /* 252 */
283 .word solaris_unimplemented /* 253 */
284 .word solaris_unimplemented /* 254 */
285 .word solaris_unimplemented /* 255 */
286 .word solaris_unimplemented /* 256 */
287 .word solaris_unimplemented /* 257 */
288 .word solaris_unimplemented /* 258 */
289 .word solaris_unimplemented /* 259 */
290 .word solaris_unimplemented /* 260 */
291 .word solaris_unimplemented /* 261 */
292 .word solaris_unimplemented /* 262 */
293 .word solaris_unimplemented /* 263 */
294 .word solaris_unimplemented /* 264 */
295 .word solaris_unimplemented /* 265 */
296 .word solaris_unimplemented /* 266 */
297 .word solaris_unimplemented /* 267 */
298 .word solaris_unimplemented /* 268 */
299 .word solaris_unimplemented /* 269 */
300 .word solaris_unimplemented /* 270 */
301 .word solaris_unimplemented /* 271 */
302 .word solaris_unimplemented /* 272 */
303 .word solaris_unimplemented /* 273 */
304 .word solaris_unimplemented /* 274 */
305 .word solaris_unimplemented /* 275 */
306 .word solaris_unimplemented /* 276 */
307 .word solaris_unimplemented /* 277 */
308 .word solaris_unimplemented /* 278 */
309 .word solaris_unimplemented /* 279 */
310 .word solaris_unimplemented /* 280 */
311 .word solaris_unimplemented /* 281 */
312 .word solaris_unimplemented /* 282 */
313 .word solaris_unimplemented /* 283 */
314
diff --git a/arch/sparc64/solaris/timod.c b/arch/sparc64/solaris/timod.c
new file mode 100644
index 000000000000..022c80f43392
--- /dev/null
+++ b/arch/sparc64/solaris/timod.c
@@ -0,0 +1,959 @@
1/* $Id: timod.c,v 1.19 2002/02/08 03:57:14 davem Exp $
2 * timod.c: timod emulation.
3 *
4 * Copyright (C) 1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
5 *
6 * Streams & timod emulation based on code
7 * Copyright (C) 1995, 1996 Mike Jagdis (jaggy@purplet.demon.co.uk)
8 *
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/smp_lock.h>
16#include <linux/ioctl.h>
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/netdevice.h>
20#include <linux/poll.h>
21
22#include <net/sock.h>
23
24#include <asm/uaccess.h>
25#include <asm/termios.h>
26
27#include "conv.h"
28#include "socksys.h"
29
30asmlinkage int solaris_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
31
32static DEFINE_SPINLOCK(timod_pagelock);
33static char * page = NULL ;
34
35#ifndef DEBUG_SOLARIS_KMALLOC
36
37#define mykmalloc kmalloc
38#define mykfree kfree
39
40#else
41
42void * mykmalloc(size_t s, int gfp)
43{
44 static char * page;
45 static size_t free;
46 void * r;
47 s = ((s + 63) & ~63);
48 if( s > PAGE_SIZE ) {
49 SOLD("too big size, calling real kmalloc");
50 return kmalloc(s, gfp);
51 }
52 if( s > free ) {
53 /* we are wasting memory, but we don't care */
54 page = (char *)__get_free_page(gfp);
55 free = PAGE_SIZE;
56 }
57 r = page;
58 page += s;
59 free -= s;
60 return r;
61}
62
63void mykfree(void *p)
64{
65}
66
67#endif
68
69#ifndef DEBUG_SOLARIS
70
71#define BUF_SIZE PAGE_SIZE
72#define PUT_MAGIC(a,m)
73#define SCHECK_MAGIC(a,m)
74#define BUF_OFFSET 0
75#define MKCTL_TRAILER 0
76
77#else
78
79#define BUF_SIZE (PAGE_SIZE-2*sizeof(u64))
80#define BUFPAGE_MAGIC 0xBADC0DEDDEADBABEL
81#define MKCTL_MAGIC 0xDEADBABEBADC0DEDL
82#define PUT_MAGIC(a,m) do{(*(u64*)(a))=(m);}while(0)
83#define SCHECK_MAGIC(a,m) do{if((*(u64*)(a))!=(m))printk("%s,%u,%s(): magic %08x at %p corrupted!\n",\
84 __FILE__,__LINE__,__FUNCTION__,(m),(a));}while(0)
85#define BUF_OFFSET sizeof(u64)
86#define MKCTL_TRAILER sizeof(u64)
87
88#endif
89
90static char *getpage( void )
91{
92 char *r;
93 SOLD("getting page");
94 spin_lock(&timod_pagelock);
95 if (page) {
96 r = page;
97 page = NULL;
98 spin_unlock(&timod_pagelock);
99 SOLD("got cached");
100 return r + BUF_OFFSET;
101 }
102 spin_unlock(&timod_pagelock);
103 SOLD("getting new");
104 r = (char *)__get_free_page(GFP_KERNEL);
105 PUT_MAGIC(r,BUFPAGE_MAGIC);
106 PUT_MAGIC(r+PAGE_SIZE-sizeof(u64),BUFPAGE_MAGIC);
107 return r + BUF_OFFSET;
108}
109
110static void putpage(char *p)
111{
112 SOLD("putting page");
113 p = p - BUF_OFFSET;
114 SCHECK_MAGIC(p,BUFPAGE_MAGIC);
115 SCHECK_MAGIC(p+PAGE_SIZE-sizeof(u64),BUFPAGE_MAGIC);
116 spin_lock(&timod_pagelock);
117 if (page) {
118 spin_unlock(&timod_pagelock);
119 free_page((unsigned long)p);
120 SOLD("freed it");
121 } else {
122 page = p;
123 spin_unlock(&timod_pagelock);
124 SOLD("cached it");
125 }
126}
127
128static struct T_primsg *timod_mkctl(int size)
129{
130 struct T_primsg *it;
131
132 SOLD("creating primsg");
133 it = (struct T_primsg *)mykmalloc(size+sizeof(*it)-sizeof(s32)+2*MKCTL_TRAILER, GFP_KERNEL);
134 if (it) {
135 SOLD("got it");
136 it->pri = MSG_HIPRI;
137 it->length = size;
138 PUT_MAGIC((char*)((u64)(((char *)&it->type)+size+7)&~7),MKCTL_MAGIC);
139 }
140 return it;
141}
142
143static void timod_wake_socket(unsigned int fd)
144{
145 struct socket *sock;
146
147 SOLD("wakeing socket");
148 sock = SOCKET_I(current->files->fd[fd]->f_dentry->d_inode);
149 wake_up_interruptible(&sock->wait);
150 read_lock(&sock->sk->sk_callback_lock);
151 if (sock->fasync_list && !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
152 __kill_fasync(sock->fasync_list, SIGIO, POLL_IN);
153 read_unlock(&sock->sk->sk_callback_lock);
154 SOLD("done");
155}
156
157static void timod_queue(unsigned int fd, struct T_primsg *it)
158{
159 struct sol_socket_struct *sock;
160
161 SOLD("queuing primsg");
162 sock = (struct sol_socket_struct *)current->files->fd[fd]->private_data;
163 it->next = sock->pfirst;
164 sock->pfirst = it;
165 if (!sock->plast)
166 sock->plast = it;
167 timod_wake_socket(fd);
168 SOLD("done");
169}
170
171static void timod_queue_end(unsigned int fd, struct T_primsg *it)
172{
173 struct sol_socket_struct *sock;
174
175 SOLD("queuing primsg at end");
176 sock = (struct sol_socket_struct *)current->files->fd[fd]->private_data;
177 it->next = NULL;
178 if (sock->plast)
179 sock->plast->next = it;
180 else
181 sock->pfirst = it;
182 sock->plast = it;
183 SOLD("done");
184}
185
186static void timod_error(unsigned int fd, int prim, int terr, int uerr)
187{
188 struct T_primsg *it;
189
190 SOLD("making error");
191 it = timod_mkctl(sizeof(struct T_error_ack));
192 if (it) {
193 struct T_error_ack *err = (struct T_error_ack *)&it->type;
194
195 SOLD("got it");
196 err->PRIM_type = T_ERROR_ACK;
197 err->ERROR_prim = prim;
198 err->TLI_error = terr;
199 err->UNIX_error = uerr; /* FIXME: convert this */
200 timod_queue(fd, it);
201 }
202 SOLD("done");
203}
204
205static void timod_ok(unsigned int fd, int prim)
206{
207 struct T_primsg *it;
208 struct T_ok_ack *ok;
209
210 SOLD("creating ok ack");
211 it = timod_mkctl(sizeof(*ok));
212 if (it) {
213 SOLD("got it");
214 ok = (struct T_ok_ack *)&it->type;
215 ok->PRIM_type = T_OK_ACK;
216 ok->CORRECT_prim = prim;
217 timod_queue(fd, it);
218 }
219 SOLD("done");
220}
221
222static int timod_optmgmt(unsigned int fd, int flag, char __user *opt_buf, int opt_len, int do_ret)
223{
224 int error, failed;
225 int ret_space, ret_len;
226 long args[5];
227 char *ret_pos,*ret_buf;
228 int (*sys_socketcall)(int, unsigned long *) =
229 (int (*)(int, unsigned long *))SYS(socketcall);
230 mm_segment_t old_fs = get_fs();
231
232 SOLD("entry");
233 SOLDD(("fd %u flg %u buf %p len %u doret %u",fd,flag,opt_buf,opt_len,do_ret));
234 if (!do_ret && (!opt_buf || opt_len <= 0))
235 return 0;
236 SOLD("getting page");
237 ret_pos = ret_buf = getpage();
238 ret_space = BUF_SIZE;
239 ret_len = 0;
240
241 error = failed = 0;
242 SOLD("looping");
243 while(opt_len >= sizeof(struct opthdr)) {
244 struct opthdr *opt;
245 int orig_opt_len;
246 SOLD("loop start");
247 opt = (struct opthdr *)ret_pos;
248 if (ret_space < sizeof(struct opthdr)) {
249 failed = TSYSERR;
250 break;
251 }
252 SOLD("getting opthdr");
253 if (copy_from_user(opt, opt_buf, sizeof(struct opthdr)) ||
254 opt->len > opt_len) {
255 failed = TBADOPT;
256 break;
257 }
258 SOLD("got opthdr");
259 if (flag == T_NEGOTIATE) {
260 char *buf;
261
262 SOLD("handling T_NEGOTIATE");
263 buf = ret_pos + sizeof(struct opthdr);
264 if (ret_space < opt->len + sizeof(struct opthdr) ||
265 copy_from_user(buf, opt_buf+sizeof(struct opthdr), opt->len)) {
266 failed = TSYSERR;
267 break;
268 }
269 SOLD("got optdata");
270 args[0] = fd;
271 args[1] = opt->level;
272 args[2] = opt->name;
273 args[3] = (long)buf;
274 args[4] = opt->len;
275 SOLD("calling SETSOCKOPT");
276 set_fs(KERNEL_DS);
277 error = sys_socketcall(SYS_SETSOCKOPT, args);
278 set_fs(old_fs);
279 if (error) {
280 failed = TBADOPT;
281 break;
282 }
283 SOLD("SETSOCKOPT ok");
284 }
285 orig_opt_len = opt->len;
286 opt->len = ret_space - sizeof(struct opthdr);
287 if (opt->len < 0) {
288 failed = TSYSERR;
289 break;
290 }
291 args[0] = fd;
292 args[1] = opt->level;
293 args[2] = opt->name;
294 args[3] = (long)(ret_pos+sizeof(struct opthdr));
295 args[4] = (long)&opt->len;
296 SOLD("calling GETSOCKOPT");
297 set_fs(KERNEL_DS);
298 error = sys_socketcall(SYS_GETSOCKOPT, args);
299 set_fs(old_fs);
300 if (error) {
301 failed = TBADOPT;
302 break;
303 }
304 SOLD("GETSOCKOPT ok");
305 ret_space -= sizeof(struct opthdr) + opt->len;
306 ret_len += sizeof(struct opthdr) + opt->len;
307 ret_pos += sizeof(struct opthdr) + opt->len;
308 opt_len -= sizeof(struct opthdr) + orig_opt_len;
309 opt_buf += sizeof(struct opthdr) + orig_opt_len;
310 SOLD("loop end");
311 }
312 SOLD("loop done");
313 if (do_ret) {
314 SOLD("generating ret msg");
315 if (failed)
316 timod_error(fd, T_OPTMGMT_REQ, failed, -error);
317 else {
318 struct T_primsg *it;
319 it = timod_mkctl(sizeof(struct T_optmgmt_ack) + ret_len);
320 if (it) {
321 struct T_optmgmt_ack *ack =
322 (struct T_optmgmt_ack *)&it->type;
323 SOLD("got primsg");
324 ack->PRIM_type = T_OPTMGMT_ACK;
325 ack->OPT_length = ret_len;
326 ack->OPT_offset = sizeof(struct T_optmgmt_ack);
327 ack->MGMT_flags = (failed ? T_FAILURE : flag);
328 memcpy(((char*)ack)+sizeof(struct T_optmgmt_ack),
329 ret_buf, ret_len);
330 timod_queue(fd, it);
331 }
332 }
333 }
334 SOLDD(("put_page %p\n", ret_buf));
335 putpage(ret_buf);
336 SOLD("done");
337 return 0;
338}
339
340int timod_putmsg(unsigned int fd, char __user *ctl_buf, int ctl_len,
341 char __user *data_buf, int data_len, int flags)
342{
343 int ret, error, terror;
344 char *buf;
345 struct file *filp;
346 struct inode *ino;
347 struct sol_socket_struct *sock;
348 mm_segment_t old_fs = get_fs();
349 long args[6];
350 int (*sys_socketcall)(int, unsigned long __user *) =
351 (int (*)(int, unsigned long __user *))SYS(socketcall);
352 int (*sys_sendto)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int) =
353 (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int))SYS(sendto);
354 filp = current->files->fd[fd];
355 ino = filp->f_dentry->d_inode;
356 sock = (struct sol_socket_struct *)filp->private_data;
357 SOLD("entry");
358 if (get_user(ret, (int __user *)A(ctl_buf)))
359 return -EFAULT;
360 switch (ret) {
361 case T_BIND_REQ:
362 {
363 struct T_bind_req req;
364
365 SOLDD(("bind %016lx(%016lx)\n", sock, filp));
366 SOLD("T_BIND_REQ");
367 if (sock->state != TS_UNBND) {
368 timod_error(fd, T_BIND_REQ, TOUTSTATE, 0);
369 return 0;
370 }
371 SOLD("state ok");
372 if (copy_from_user(&req, ctl_buf, sizeof(req))) {
373 timod_error(fd, T_BIND_REQ, TSYSERR, EFAULT);
374 return 0;
375 }
376 SOLD("got ctl req");
377 if (req.ADDR_offset && req.ADDR_length) {
378 if (req.ADDR_length > BUF_SIZE) {
379 timod_error(fd, T_BIND_REQ, TSYSERR, EFAULT);
380 return 0;
381 }
382 SOLD("req size ok");
383 buf = getpage();
384 if (copy_from_user(buf, ctl_buf + req.ADDR_offset, req.ADDR_length)) {
385 timod_error(fd, T_BIND_REQ, TSYSERR, EFAULT);
386 putpage(buf);
387 return 0;
388 }
389 SOLD("got ctl data");
390 args[0] = fd;
391 args[1] = (long)buf;
392 args[2] = req.ADDR_length;
393 SOLD("calling BIND");
394 set_fs(KERNEL_DS);
395 error = sys_socketcall(SYS_BIND, args);
396 set_fs(old_fs);
397 putpage(buf);
398 SOLD("BIND returned");
399 } else
400 error = 0;
401 if (!error) {
402 struct T_primsg *it;
403 if (req.CONIND_number) {
404 args[0] = fd;
405 args[1] = req.CONIND_number;
406 SOLD("calling LISTEN");
407 set_fs(KERNEL_DS);
408 error = sys_socketcall(SYS_LISTEN, args);
409 set_fs(old_fs);
410 SOLD("LISTEN done");
411 }
412 it = timod_mkctl(sizeof(struct T_bind_ack)+sizeof(struct sockaddr));
413 if (it) {
414 struct T_bind_ack *ack;
415
416 ack = (struct T_bind_ack *)&it->type;
417 ack->PRIM_type = T_BIND_ACK;
418 ack->ADDR_offset = sizeof(*ack);
419 ack->ADDR_length = sizeof(struct sockaddr);
420 ack->CONIND_number = req.CONIND_number;
421 args[0] = fd;
422 args[1] = (long)(ack+sizeof(*ack));
423 args[2] = (long)&ack->ADDR_length;
424 set_fs(KERNEL_DS);
425 sys_socketcall(SYS_GETSOCKNAME,args);
426 set_fs(old_fs);
427 sock->state = TS_IDLE;
428 timod_ok(fd, T_BIND_REQ);
429 timod_queue_end(fd, it);
430 SOLD("BIND done");
431 return 0;
432 }
433 }
434 SOLD("some error");
435 switch (error) {
436 case -EINVAL:
437 terror = TOUTSTATE;
438 error = 0;
439 break;
440 case -EACCES:
441 terror = TACCES;
442 error = 0;
443 break;
444 case -EADDRNOTAVAIL:
445 case -EADDRINUSE:
446 terror = TNOADDR;
447 error = 0;
448 break;
449 default:
450 terror = TSYSERR;
451 break;
452 }
453 timod_error(fd, T_BIND_REQ, terror, -error);
454 SOLD("BIND done");
455 return 0;
456 }
457 case T_CONN_REQ:
458 {
459 struct T_conn_req req;
460 unsigned short oldflags;
461 struct T_primsg *it;
462 SOLD("T_CONN_REQ");
463 if (sock->state != TS_UNBND && sock->state != TS_IDLE) {
464 timod_error(fd, T_CONN_REQ, TOUTSTATE, 0);
465 return 0;
466 }
467 SOLD("state ok");
468 if (copy_from_user(&req, ctl_buf, sizeof(req))) {
469 timod_error(fd, T_CONN_REQ, TSYSERR, EFAULT);
470 return 0;
471 }
472 SOLD("got ctl req");
473 if (ctl_len > BUF_SIZE) {
474 timod_error(fd, T_CONN_REQ, TSYSERR, EFAULT);
475 return 0;
476 }
477 SOLD("req size ok");
478 buf = getpage();
479 if (copy_from_user(buf, ctl_buf, ctl_len)) {
480 timod_error(fd, T_CONN_REQ, TSYSERR, EFAULT);
481 putpage(buf);
482 return 0;
483 }
484#ifdef DEBUG_SOLARIS
485 {
486 char * ptr = buf;
487 int len = ctl_len;
488 printk("returned data (%d bytes): ",len);
489 while( len-- ) {
490 if (!(len & 7))
491 printk(" ");
492 printk("%02x",(unsigned char)*ptr++);
493 }
494 printk("\n");
495 }
496#endif
497 SOLD("got ctl data");
498 args[0] = fd;
499 args[1] = (long)buf+req.DEST_offset;
500 args[2] = req.DEST_length;
501 oldflags = filp->f_flags;
502 filp->f_flags &= ~O_NONBLOCK;
503 SOLD("calling CONNECT");
504 set_fs(KERNEL_DS);
505 error = sys_socketcall(SYS_CONNECT, args);
506 set_fs(old_fs);
507 filp->f_flags = oldflags;
508 SOLD("CONNECT done");
509 if (!error) {
510 struct T_conn_con *con;
511 SOLD("no error");
512 it = timod_mkctl(ctl_len);
513 if (!it) {
514 putpage(buf);
515 return -ENOMEM;
516 }
517 con = (struct T_conn_con *)&it->type;
518#ifdef DEBUG_SOLARIS
519 {
520 char * ptr = buf;
521 int len = ctl_len;
522 printk("returned data (%d bytes): ",len);
523 while( len-- ) {
524 if (!(len & 7))
525 printk(" ");
526 printk("%02x",(unsigned char)*ptr++);
527 }
528 printk("\n");
529 }
530#endif
531 memcpy(con, buf, ctl_len);
532 SOLD("copied ctl_buf");
533 con->PRIM_type = T_CONN_CON;
534 sock->state = TS_DATA_XFER;
535 } else {
536 struct T_discon_ind *dis;
537 SOLD("some error");
538 it = timod_mkctl(sizeof(*dis));
539 if (!it) {
540 putpage(buf);
541 return -ENOMEM;
542 }
543 SOLD("got primsg");
544 dis = (struct T_discon_ind *)&it->type;
545 dis->PRIM_type = T_DISCON_IND;
546 dis->DISCON_reason = -error; /* FIXME: convert this as in iABI_errors() */
547 dis->SEQ_number = 0;
548 }
549 putpage(buf);
550 timod_ok(fd, T_CONN_REQ);
551 it->pri = 0;
552 timod_queue_end(fd, it);
553 SOLD("CONNECT done");
554 return 0;
555 }
556 case T_OPTMGMT_REQ:
557 {
558 struct T_optmgmt_req req;
559 SOLD("OPTMGMT_REQ");
560 if (copy_from_user(&req, ctl_buf, sizeof(req)))
561 return -EFAULT;
562 SOLD("got req");
563 return timod_optmgmt(fd, req.MGMT_flags,
564 req.OPT_offset > 0 ? ctl_buf + req.OPT_offset : NULL,
565 req.OPT_length, 1);
566 }
567 case T_UNITDATA_REQ:
568 {
569 struct T_unitdata_req req;
570
571 int err;
572 SOLD("T_UNITDATA_REQ");
573 if (sock->state != TS_IDLE && sock->state != TS_DATA_XFER) {
574 timod_error(fd, T_CONN_REQ, TOUTSTATE, 0);
575 return 0;
576 }
577 SOLD("state ok");
578 if (copy_from_user(&req, ctl_buf, sizeof(req))) {
579 timod_error(fd, T_CONN_REQ, TSYSERR, EFAULT);
580 return 0;
581 }
582 SOLD("got ctl req");
583#ifdef DEBUG_SOLARIS
584 {
585 char * ptr = ctl_buf+req.DEST_offset;
586 int len = req.DEST_length;
587 printk("socket address (%d bytes): ",len);
588 while( len-- ) {
589 char c;
590 if (get_user(c,ptr))
591 printk("??");
592 else
593 printk("%02x",(unsigned char)c);
594 ptr++;
595 }
596 printk("\n");
597 }
598#endif
599 err = sys_sendto(fd, data_buf, data_len, 0, req.DEST_length > 0 ? (struct sockaddr __user *)(ctl_buf+req.DEST_offset) : NULL, req.DEST_length);
600 if (err == data_len)
601 return 0;
602 if(err >= 0) {
603 printk("timod: sendto failed to send all the data\n");
604 return 0;
605 }
606 timod_error(fd, T_CONN_REQ, TSYSERR, -err);
607 return 0;
608 }
609 default:
610 printk(KERN_INFO "timod_putmsg: unsupported command %u.\n", ret);
611 break;
612 }
613 return -EINVAL;
614}
615
616int timod_getmsg(unsigned int fd, char __user *ctl_buf, int ctl_maxlen, s32 __user *ctl_len,
617 char __user *data_buf, int data_maxlen, s32 __user *data_len, int *flags_p)
618{
619 int error;
620 int oldflags;
621 struct file *filp;
622 struct inode *ino;
623 struct sol_socket_struct *sock;
624 struct T_unitdata_ind udi;
625 mm_segment_t old_fs = get_fs();
626 long args[6];
627 char __user *tmpbuf;
628 int tmplen;
629 int (*sys_socketcall)(int, unsigned long __user *) =
630 (int (*)(int, unsigned long __user *))SYS(socketcall);
631 int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *);
632
633 SOLD("entry");
634 SOLDD(("%u %p %d %p %p %d %p %d\n", fd, ctl_buf, ctl_maxlen, ctl_len, data_buf, data_maxlen, data_len, *flags_p));
635 filp = current->files->fd[fd];
636 ino = filp->f_dentry->d_inode;
637 sock = (struct sol_socket_struct *)filp->private_data;
638 SOLDD(("%p %p\n", sock->pfirst, sock->pfirst ? sock->pfirst->next : NULL));
639 if ( ctl_maxlen > 0 && !sock->pfirst && SOCKET_I(ino)->type == SOCK_STREAM
640 && sock->state == TS_IDLE) {
641 SOLD("calling LISTEN");
642 args[0] = fd;
643 args[1] = -1;
644 set_fs(KERNEL_DS);
645 sys_socketcall(SYS_LISTEN, args);
646 set_fs(old_fs);
647 SOLD("LISTEN done");
648 }
649 if (!(filp->f_flags & O_NONBLOCK)) {
650 struct poll_wqueues wait_table;
651 poll_table *wait;
652
653 poll_initwait(&wait_table);
654 wait = &wait_table.pt;
655 for(;;) {
656 SOLD("loop");
657 set_current_state(TASK_INTERRUPTIBLE);
658 /* ! ( l<0 || ( l>=0 && ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */
659 /* ( ! l<0 && ! ( l>=0 && ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */
660 /* ( l>=0 && ( ! l>=0 || ! ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */
661 /* ( l>=0 && ( l<0 || ( pfirst && ! (flags == HIPRI && pri != HIPRI) ) ) ) */
662 /* ( l>=0 && ( l<0 || ( pfirst && (flags != HIPRI || pri == HIPRI) ) ) ) */
663 /* ( l>=0 && ( pfirst && (flags != HIPRI || pri == HIPRI) ) ) */
664 if (ctl_maxlen >= 0 && sock->pfirst && (*flags_p != MSG_HIPRI || sock->pfirst->pri == MSG_HIPRI))
665 break;
666 SOLD("cond 1 passed");
667 if (
668 #if 1
669 *flags_p != MSG_HIPRI &&
670 #endif
671 ((filp->f_op->poll(filp, wait) & POLLIN) ||
672 (filp->f_op->poll(filp, NULL) & POLLIN) ||
673 signal_pending(current))
674 ) {
675 break;
676 }
677 if( *flags_p == MSG_HIPRI ) {
678 SOLD("avoiding lockup");
679 break ;
680 }
681 if(wait_table.error) {
682 SOLD("wait-table error");
683 poll_freewait(&wait_table);
684 return wait_table.error;
685 }
686 SOLD("scheduling");
687 schedule();
688 }
689 SOLD("loop done");
690 current->state = TASK_RUNNING;
691 poll_freewait(&wait_table);
692 if (signal_pending(current)) {
693 SOLD("signal pending");
694 return -EINTR;
695 }
696 }
697 if (ctl_maxlen >= 0 && sock->pfirst) {
698 struct T_primsg *it = sock->pfirst;
699 int l = min_t(int, ctl_maxlen, it->length);
700 SCHECK_MAGIC((char*)((u64)(((char *)&it->type)+sock->offset+it->length+7)&~7),MKCTL_MAGIC);
701 SOLD("purting ctl data");
702 if(copy_to_user(ctl_buf,
703 (char*)&it->type + sock->offset, l))
704 return -EFAULT;
705 SOLD("pur it");
706 if(put_user(l, ctl_len))
707 return -EFAULT;
708 SOLD("set ctl_len");
709 *flags_p = it->pri;
710 it->length -= l;
711 if (it->length) {
712 SOLD("more ctl");
713 sock->offset += l;
714 return MORECTL;
715 } else {
716 SOLD("removing message");
717 sock->pfirst = it->next;
718 if (!sock->pfirst)
719 sock->plast = NULL;
720 SOLDD(("getmsg kfree %016lx->%016lx\n", it, sock->pfirst));
721 mykfree(it);
722 sock->offset = 0;
723 SOLD("ctl done");
724 return 0;
725 }
726 }
727 *flags_p = 0;
728 if (ctl_maxlen >= 0) {
729 SOLD("ACCEPT perhaps?");
730 if (SOCKET_I(ino)->type == SOCK_STREAM && sock->state == TS_IDLE) {
731 struct T_conn_ind ind;
732 char *buf = getpage();
733 int len = BUF_SIZE;
734
735 SOLD("trying ACCEPT");
736 if (put_user(ctl_maxlen - sizeof(ind), ctl_len))
737 return -EFAULT;
738 args[0] = fd;
739 args[1] = (long)buf;
740 args[2] = (long)&len;
741 oldflags = filp->f_flags;
742 filp->f_flags |= O_NONBLOCK;
743 SOLD("calling ACCEPT");
744 set_fs(KERNEL_DS);
745 error = sys_socketcall(SYS_ACCEPT, args);
746 set_fs(old_fs);
747 filp->f_flags = oldflags;
748 if (error < 0) {
749 SOLD("some error");
750 putpage(buf);
751 return error;
752 }
753 if (error) {
754 SOLD("connect");
755 putpage(buf);
756 if (sizeof(ind) > ctl_maxlen) {
757 SOLD("generating CONN_IND");
758 ind.PRIM_type = T_CONN_IND;
759 ind.SRC_length = len;
760 ind.SRC_offset = sizeof(ind);
761 ind.OPT_length = ind.OPT_offset = 0;
762 ind.SEQ_number = error;
763 if(copy_to_user(ctl_buf, &ind, sizeof(ind))||
764 put_user(sizeof(ind)+ind.SRC_length,ctl_len))
765 return -EFAULT;
766 SOLD("CONN_IND created");
767 }
768 if (data_maxlen >= 0)
769 put_user(0, data_len);
770 SOLD("CONN_IND done");
771 return 0;
772 }
773 if (len>ctl_maxlen) {
774 SOLD("data don't fit");
775 putpage(buf);
776 return -EFAULT; /* XXX - is this ok ? */
777 }
778 if(copy_to_user(ctl_buf,buf,len) || put_user(len,ctl_len)){
779 SOLD("can't copy data");
780 putpage(buf);
781 return -EFAULT;
782 }
783 SOLD("ACCEPT done");
784 putpage(buf);
785 }
786 }
787 SOLD("checking data req");
788 if (data_maxlen <= 0) {
789 if (data_maxlen == 0)
790 put_user(0, data_len);
791 if (ctl_maxlen >= 0)
792 put_user(0, ctl_len);
793 return -EAGAIN;
794 }
795 SOLD("wants data");
796 if (ctl_maxlen > sizeof(udi) && sock->state == TS_IDLE) {
797 SOLD("udi fits");
798 tmpbuf = ctl_buf + sizeof(udi);
799 tmplen = ctl_maxlen - sizeof(udi);
800 } else {
801 SOLD("udi does not fit");
802 tmpbuf = NULL;
803 tmplen = 0;
804 }
805 if (put_user(tmplen, ctl_len))
806 return -EFAULT;
807 SOLD("set ctl_len");
808 oldflags = filp->f_flags;
809 filp->f_flags |= O_NONBLOCK;
810 SOLD("calling recvfrom");
811 sys_recvfrom = (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
812 error = sys_recvfrom(fd, data_buf, data_maxlen, 0, (struct sockaddr __user *)tmpbuf, ctl_len);
813 filp->f_flags = oldflags;
814 if (error < 0)
815 return error;
816 SOLD("error >= 0" ) ;
817 if (error && ctl_maxlen > sizeof(udi) && sock->state == TS_IDLE) {
818 SOLD("generating udi");
819 udi.PRIM_type = T_UNITDATA_IND;
820 if (get_user(udi.SRC_length, ctl_len))
821 return -EFAULT;
822 udi.SRC_offset = sizeof(udi);
823 udi.OPT_length = udi.OPT_offset = 0;
824 if (copy_to_user(ctl_buf, &udi, sizeof(udi)) ||
825 put_user(sizeof(udi)+udi.SRC_length, ctl_len))
826 return -EFAULT;
827 SOLD("udi done");
828 } else {
829 if (put_user(0, ctl_len))
830 return -EFAULT;
831 }
832 put_user(error, data_len);
833 SOLD("done");
834 return 0;
835}
836
837asmlinkage int solaris_getmsg(unsigned int fd, u32 arg1, u32 arg2, u32 arg3)
838{
839 struct file *filp;
840 struct inode *ino;
841 struct strbuf __user *ctlptr;
842 struct strbuf __user *datptr;
843 struct strbuf ctl, dat;
844 int __user *flgptr;
845 int flags;
846 int error = -EBADF;
847
848 SOLD("entry");
849 lock_kernel();
850 if(fd >= NR_OPEN) goto out;
851
852 filp = current->files->fd[fd];
853 if(!filp) goto out;
854
855 ino = filp->f_dentry->d_inode;
856 if (!ino || !S_ISSOCK(ino->i_mode))
857 goto out;
858
859 ctlptr = (struct strbuf __user *)A(arg1);
860 datptr = (struct strbuf __user *)A(arg2);
861 flgptr = (int __user *)A(arg3);
862
863 error = -EFAULT;
864
865 if (ctlptr) {
866 if (copy_from_user(&ctl,ctlptr,sizeof(struct strbuf)) ||
867 put_user(-1,&ctlptr->len))
868 goto out;
869 } else
870 ctl.maxlen = -1;
871
872 if (datptr) {
873 if (copy_from_user(&dat,datptr,sizeof(struct strbuf)) ||
874 put_user(-1,&datptr->len))
875 goto out;
876 } else
877 dat.maxlen = -1;
878
879 if (get_user(flags,flgptr))
880 goto out;
881
882 switch (flags) {
883 case 0:
884 case MSG_HIPRI:
885 case MSG_ANY:
886 case MSG_BAND:
887 break;
888 default:
889 error = -EINVAL;
890 goto out;
891 }
892
893 error = timod_getmsg(fd,A(ctl.buf),ctl.maxlen,&ctlptr->len,
894 A(dat.buf),dat.maxlen,&datptr->len,&flags);
895
896 if (!error && put_user(flags,flgptr))
897 error = -EFAULT;
898out:
899 unlock_kernel();
900 SOLD("done");
901 return error;
902}
903
904asmlinkage int solaris_putmsg(unsigned int fd, u32 arg1, u32 arg2, u32 arg3)
905{
906 struct file *filp;
907 struct inode *ino;
908 struct strbuf __user *ctlptr;
909 struct strbuf __user *datptr;
910 struct strbuf ctl, dat;
911 int flags = (int) arg3;
912 int error = -EBADF;
913
914 SOLD("entry");
915 lock_kernel();
916 if(fd >= NR_OPEN) goto out;
917
918 filp = current->files->fd[fd];
919 if(!filp) goto out;
920
921 ino = filp->f_dentry->d_inode;
922 if (!ino) goto out;
923
924 if (!S_ISSOCK(ino->i_mode) &&
925 (imajor(ino) != 30 || iminor(ino) != 1))
926 goto out;
927
928 ctlptr = A(arg1);
929 datptr = A(arg2);
930
931 error = -EFAULT;
932
933 if (ctlptr) {
934 if (copy_from_user(&ctl,ctlptr,sizeof(ctl)))
935 goto out;
936 if (ctl.len < 0 && flags) {
937 error = -EINVAL;
938 goto out;
939 }
940 } else {
941 ctl.len = 0;
942 ctl.buf = 0;
943 }
944
945 if (datptr) {
946 if (copy_from_user(&dat,datptr,sizeof(dat)))
947 goto out;
948 } else {
949 dat.len = 0;
950 dat.buf = 0;
951 }
952
953 error = timod_putmsg(fd,A(ctl.buf),ctl.len,
954 A(dat.buf),dat.len,flags);
955out:
956 unlock_kernel();
957 SOLD("done");
958 return error;
959}