diff options
Diffstat (limited to 'arch/s390')
76 files changed, 24818 insertions, 0 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig new file mode 100644 index 000000000000..ab79af84699a --- /dev/null +++ b/arch/s390/Kconfig | |||
@@ -0,0 +1,480 @@ | |||
1 | # | ||
2 | # For a description of the syntax of this configuration file, | ||
3 | # see Documentation/kbuild/kconfig-language.txt. | ||
4 | # | ||
5 | |||
6 | config MMU | ||
7 | bool | ||
8 | default y | ||
9 | |||
10 | config RWSEM_GENERIC_SPINLOCK | ||
11 | bool | ||
12 | |||
13 | config RWSEM_XCHGADD_ALGORITHM | ||
14 | bool | ||
15 | default y | ||
16 | |||
17 | config GENERIC_CALIBRATE_DELAY | ||
18 | bool | ||
19 | default y | ||
20 | |||
21 | config GENERIC_BUST_SPINLOCK | ||
22 | bool | ||
23 | |||
24 | mainmenu "Linux Kernel Configuration" | ||
25 | |||
26 | config ARCH_S390 | ||
27 | bool | ||
28 | default y | ||
29 | |||
30 | config UID16 | ||
31 | bool | ||
32 | default y | ||
33 | depends on ARCH_S390X = 'n' | ||
34 | |||
35 | source "init/Kconfig" | ||
36 | |||
37 | menu "Base setup" | ||
38 | |||
39 | comment "Processor type and features" | ||
40 | |||
41 | config ARCH_S390X | ||
42 | bool "64 bit kernel" | ||
43 | help | ||
44 | Select this option if you have a 64 bit IBM zSeries machine | ||
45 | and want to use the 64 bit addressing mode. | ||
46 | |||
47 | config 64BIT | ||
48 | def_bool ARCH_S390X | ||
49 | |||
50 | config ARCH_S390_31 | ||
51 | bool | ||
52 | depends on ARCH_S390X = 'n' | ||
53 | default y | ||
54 | |||
55 | config SMP | ||
56 | bool "Symmetric multi-processing support" | ||
57 | ---help--- | ||
58 | This enables support for systems with more than one CPU. If you have | ||
59 | a system with only one CPU, like most personal computers, say N. If | ||
60 | you have a system with more than one CPU, say Y. | ||
61 | |||
62 | If you say N here, the kernel will run on single and multiprocessor | ||
63 | machines, but will use only one CPU of a multiprocessor machine. If | ||
64 | you say Y here, the kernel will run on many, but not all, | ||
65 | singleprocessor machines. On a singleprocessor machine, the kernel | ||
66 | will run faster if you say N here. | ||
67 | |||
68 | See also the <file:Documentation/smp.txt> and the SMP-HOWTO | ||
69 | available at <http://www.tldp.org/docs.html#howto>. | ||
70 | |||
71 | Even if you don't know what to do here, say Y. | ||
72 | |||
73 | config NR_CPUS | ||
74 | int "Maximum number of CPUs (2-64)" | ||
75 | range 2 64 | ||
76 | depends on SMP | ||
77 | default "32" | ||
78 | help | ||
79 | This allows you to specify the maximum number of CPUs which this | ||
80 | kernel will support. The maximum supported value is 64 and the | ||
81 | minimum value which makes sense is 2. | ||
82 | |||
83 | This is purely to save memory - each supported CPU adds | ||
84 | approximately sixteen kilobytes to the kernel image. | ||
85 | |||
86 | config HOTPLUG_CPU | ||
87 | bool "Support for hot-pluggable CPUs" | ||
88 | depends on SMP | ||
89 | select HOTPLUG | ||
90 | default n | ||
91 | help | ||
92 | Say Y here to be able to turn CPUs off and on. CPUs | ||
93 | can be controlled through /sys/devices/system/cpu/cpu#. | ||
94 | Say N if you want to disable CPU hotplug. | ||
95 | |||
96 | config MATHEMU | ||
97 | bool "IEEE FPU emulation" | ||
98 | depends on MARCH_G5 | ||
99 | help | ||
100 | This option is required for IEEE compliant floating point arithmetic | ||
101 | on older S/390 machines. Say Y unless you know your machine doesn't | ||
102 | need this. | ||
103 | |||
104 | config S390_SUPPORT | ||
105 | bool "Kernel support for 31 bit emulation" | ||
106 | depends on ARCH_S390X | ||
107 | help | ||
108 | Select this option if you want to enable your system kernel to | ||
109 | handle system-calls from ELF binaries for 31 bit ESA. This option | ||
110 | (and some other stuff like libraries and such) is needed for | ||
111 | executing 31 bit applications. It is safe to say "Y". | ||
112 | |||
113 | config COMPAT | ||
114 | bool | ||
115 | depends on S390_SUPPORT | ||
116 | default y | ||
117 | |||
118 | config SYSVIPC_COMPAT | ||
119 | bool | ||
120 | depends on COMPAT && SYSVIPC | ||
121 | default y | ||
122 | |||
123 | config BINFMT_ELF32 | ||
124 | tristate "Kernel support for 31 bit ELF binaries" | ||
125 | depends on S390_SUPPORT | ||
126 | help | ||
127 | This allows you to run 32-bit Linux/ELF binaries on your zSeries | ||
128 | in 64 bit mode. Everybody wants this; say Y. | ||
129 | |||
130 | comment "Code generation options" | ||
131 | |||
132 | choice | ||
133 | prompt "Processor type" | ||
134 | default MARCH_G5 | ||
135 | |||
136 | config MARCH_G5 | ||
137 | bool "S/390 model G5 and G6" | ||
138 | depends on ARCH_S390_31 | ||
139 | help | ||
140 | Select this to build a 31 bit kernel that works | ||
141 | on all S/390 and zSeries machines. | ||
142 | |||
143 | config MARCH_Z900 | ||
144 | bool "IBM eServer zSeries model z800 and z900" | ||
145 | help | ||
146 | Select this to optimize for zSeries machines. This | ||
147 | will enable some optimizations that are not available | ||
148 | on older 31 bit only CPUs. | ||
149 | |||
150 | config MARCH_Z990 | ||
151 | bool "IBM eServer zSeries model z890 and z990" | ||
152 | help | ||
153 | Select this enable optimizations for model z890/z990. | ||
154 | This will be slightly faster but does not work on | ||
155 | older machines such as the z900. | ||
156 | |||
157 | endchoice | ||
158 | |||
159 | config PACK_STACK | ||
160 | bool "Pack kernel stack" | ||
161 | help | ||
162 | This option enables the compiler option -mkernel-backchain if it | ||
163 | is available. If the option is available the compiler supports | ||
164 | the new stack layout which dramatically reduces the minimum stack | ||
165 | frame size. With an old compiler a non-leaf function needs a | ||
166 | minimum of 96 bytes on 31 bit and 160 bytes on 64 bit. With | ||
167 | -mkernel-backchain the minimum size drops to 16 byte on 31 bit | ||
168 | and 24 byte on 64 bit. | ||
169 | |||
170 | Say Y if you are unsure. | ||
171 | |||
172 | config SMALL_STACK | ||
173 | bool "Use 4kb/8kb for kernel stack instead of 8kb/16kb" | ||
174 | depends on PACK_STACK | ||
175 | help | ||
176 | If you say Y here and the compiler supports the -mkernel-backchain | ||
177 | option the kernel will use a smaller kernel stack size. For 31 bit | ||
178 | the reduced size is 4kb instead of 8kb and for 64 bit it is 8kb | ||
179 | instead of 16kb. This allows to run more thread on a system and | ||
180 | reduces the pressure on the memory management for higher order | ||
181 | page allocations. | ||
182 | |||
183 | Say N if you are unsure. | ||
184 | |||
185 | |||
186 | config CHECK_STACK | ||
187 | bool "Detect kernel stack overflow" | ||
188 | help | ||
189 | This option enables the compiler option -mstack-guard and | ||
190 | -mstack-size if they are available. If the compiler supports them | ||
191 | it will emit additional code to each function prolog to trigger | ||
192 | an illegal operation if the kernel stack is about to overflow. | ||
193 | |||
194 | Say N if you are unsure. | ||
195 | |||
196 | config STACK_GUARD | ||
197 | int "Size of the guard area (128-1024)" | ||
198 | range 128 1024 | ||
199 | depends on CHECK_STACK | ||
200 | default "256" | ||
201 | help | ||
202 | This allows you to specify the size of the guard area at the lower | ||
203 | end of the kernel stack. If the kernel stack points into the guard | ||
204 | area on function entry an illegal operation is triggered. The size | ||
205 | needs to be a power of 2. Please keep in mind that the size of an | ||
206 | interrupt frame is 184 bytes for 31 bit and 328 bytes on 64 bit. | ||
207 | The minimum size for the stack guard should be 256 for 31 bit and | ||
208 | 512 for 64 bit. | ||
209 | |||
210 | config WARN_STACK | ||
211 | bool "Emit compiler warnings for function with broken stack usage" | ||
212 | help | ||
213 | This option enables the compiler options -mwarn-framesize and | ||
214 | -mwarn-dynamicstack. If the compiler supports these options it | ||
215 | will generate warnings for function which either use alloca or | ||
216 | create a stack frame bigger then CONFIG_WARN_STACK_SIZE. | ||
217 | |||
218 | Say N if you are unsure. | ||
219 | |||
220 | config WARN_STACK_SIZE | ||
221 | int "Maximum frame size considered safe (128-2048)" | ||
222 | range 128 2048 | ||
223 | depends on WARN_STACK | ||
224 | default "256" | ||
225 | help | ||
226 | This allows you to specify the maximum frame size a function may | ||
227 | have without the compiler complaining about it. | ||
228 | |||
229 | comment "I/O subsystem configuration" | ||
230 | |||
231 | config MACHCHK_WARNING | ||
232 | bool "Process warning machine checks" | ||
233 | help | ||
234 | Select this option if you want the machine check handler on IBM S/390 or | ||
235 | zSeries to process warning machine checks (e.g. on power failures). | ||
236 | If unsure, say "Y". | ||
237 | |||
238 | config QDIO | ||
239 | tristate "QDIO support" | ||
240 | ---help--- | ||
241 | This driver provides the Queued Direct I/O base support for the | ||
242 | IBM S/390 (G5 and G6) and eServer zSeries (z800, z890, z900 and z990). | ||
243 | |||
244 | For details please refer to the documentation provided by IBM at | ||
245 | <http://www10.software.ibm.com/developerworks/opensource/linux390> | ||
246 | |||
247 | To compile this driver as a module, choose M here: the | ||
248 | module will be called qdio. | ||
249 | |||
250 | If unsure, say Y. | ||
251 | |||
252 | config QDIO_PERF_STATS | ||
253 | bool "Performance statistics in /proc" | ||
254 | depends on QDIO | ||
255 | help | ||
256 | Say Y here to get performance statistics in /proc/qdio_perf | ||
257 | |||
258 | If unsure, say N. | ||
259 | |||
260 | config QDIO_DEBUG | ||
261 | bool "Extended debugging information" | ||
262 | depends on QDIO | ||
263 | help | ||
264 | Say Y here to get extended debugging output in /proc/s390dbf/qdio... | ||
265 | Warning: this option reduces the performance of the QDIO module. | ||
266 | |||
267 | If unsure, say N. | ||
268 | |||
269 | comment "Misc" | ||
270 | |||
271 | config PREEMPT | ||
272 | bool "Preemptible Kernel" | ||
273 | help | ||
274 | This option reduces the latency of the kernel when reacting to | ||
275 | real-time or interactive events by allowing a low priority process to | ||
276 | be preempted even if it is in kernel mode executing a system call. | ||
277 | This allows applications to run more reliably even when the system is | ||
278 | under load. | ||
279 | |||
280 | Say N if you are unsure. | ||
281 | |||
282 | config IPL | ||
283 | bool "Builtin IPL record support" | ||
284 | help | ||
285 | If you want to use the produced kernel to IPL directly from a | ||
286 | device, you have to merge a bootsector specific to the device | ||
287 | into the first bytes of the kernel. You will have to select the | ||
288 | IPL device. | ||
289 | |||
290 | choice | ||
291 | prompt "IPL method generated into head.S" | ||
292 | depends on IPL | ||
293 | default IPL_TAPE | ||
294 | help | ||
295 | Select "tape" if you want to IPL the image from a Tape. | ||
296 | |||
297 | Select "vm_reader" if you are running under VM/ESA and want | ||
298 | to IPL the image from the emulated card reader. | ||
299 | |||
300 | config IPL_TAPE | ||
301 | bool "tape" | ||
302 | |||
303 | config IPL_VM | ||
304 | bool "vm_reader" | ||
305 | |||
306 | endchoice | ||
307 | |||
308 | source "fs/Kconfig.binfmt" | ||
309 | |||
310 | config PROCESS_DEBUG | ||
311 | bool "Show crashed user process info" | ||
312 | help | ||
313 | Say Y to print all process fault locations to the console. This is | ||
314 | a debugging option; you probably do not want to set it unless you | ||
315 | are an S390 port maintainer. | ||
316 | |||
317 | config PFAULT | ||
318 | bool "Pseudo page fault support" | ||
319 | help | ||
320 | Select this option, if you want to use PFAULT pseudo page fault | ||
321 | handling under VM. If running native or in LPAR, this option | ||
322 | has no effect. If your VM does not support PFAULT, PAGEEX | ||
323 | pseudo page fault handling will be used. | ||
324 | Note that VM 4.2 supports PFAULT but has a bug in its | ||
325 | implementation that causes some problems. | ||
326 | Everybody who wants to run Linux under VM != VM4.2 should select | ||
327 | this option. | ||
328 | |||
329 | config SHARED_KERNEL | ||
330 | bool "VM shared kernel support" | ||
331 | help | ||
332 | Select this option, if you want to share the text segment of the | ||
333 | Linux kernel between different VM guests. This reduces memory | ||
334 | usage with lots of guests but greatly increases kernel size. | ||
335 | You should only select this option if you know what you are | ||
336 | doing and want to exploit this feature. | ||
337 | |||
338 | config CMM | ||
339 | tristate "Cooperative memory management" | ||
340 | help | ||
341 | Select this option, if you want to enable the kernel interface | ||
342 | to reduce the memory size of the system. This is accomplished | ||
343 | by allocating pages of memory and put them "on hold". This only | ||
344 | makes sense for a system running under VM where the unused pages | ||
345 | will be reused by VM for other guest systems. The interface | ||
346 | allows an external monitor to balance memory of many systems. | ||
347 | Everybody who wants to run Linux under VM should select this | ||
348 | option. | ||
349 | |||
350 | config CMM_PROC | ||
351 | bool "/proc interface to cooperative memory management" | ||
352 | depends on CMM | ||
353 | help | ||
354 | Select this option to enable the /proc interface to the | ||
355 | cooperative memory management. | ||
356 | |||
357 | config CMM_IUCV | ||
358 | bool "IUCV special message interface to cooperative memory management" | ||
359 | depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV) | ||
360 | help | ||
361 | Select this option to enable the special message interface to | ||
362 | the cooperative memory management. | ||
363 | |||
364 | config VIRT_TIMER | ||
365 | bool "Virtual CPU timer support" | ||
366 | help | ||
367 | This provides a kernel interface for virtual CPU timers. | ||
368 | Default is disabled. | ||
369 | |||
370 | config VIRT_CPU_ACCOUNTING | ||
371 | bool "Base user process accounting on virtual cpu timer" | ||
372 | depends on VIRT_TIMER | ||
373 | help | ||
374 | Select this option to use CPU timer deltas to do user | ||
375 | process accounting. | ||
376 | |||
377 | config APPLDATA_BASE | ||
378 | bool "Linux - VM Monitor Stream, base infrastructure" | ||
379 | depends on PROC_FS && VIRT_TIMER=y | ||
380 | help | ||
381 | This provides a kernel interface for creating and updating z/VM APPLDATA | ||
382 | monitor records. The monitor records are updated at certain time | ||
383 | intervals, once the timer is started. | ||
384 | Writing 1 or 0 to /proc/appldata/timer starts(1) or stops(0) the timer, | ||
385 | i.e. enables or disables monitoring on the Linux side. | ||
386 | A custom interval value (in seconds) can be written to | ||
387 | /proc/appldata/interval. | ||
388 | |||
389 | Defaults are 60 seconds interval and timer off. | ||
390 | The /proc entries can also be read from, showing the current settings. | ||
391 | |||
392 | config APPLDATA_MEM | ||
393 | tristate "Monitor memory management statistics" | ||
394 | depends on APPLDATA_BASE | ||
395 | help | ||
396 | This provides memory management related data to the Linux - VM Monitor | ||
397 | Stream, like paging/swapping rate, memory utilisation, etc. | ||
398 | Writing 1 or 0 to /proc/appldata/memory creates(1) or removes(0) a z/VM | ||
399 | APPLDATA monitor record, i.e. enables or disables monitoring this record | ||
400 | on the z/VM side. | ||
401 | |||
402 | Default is disabled. | ||
403 | The /proc entry can also be read from, showing the current settings. | ||
404 | |||
405 | This can also be compiled as a module, which will be called | ||
406 | appldata_mem.o. | ||
407 | |||
408 | config APPLDATA_OS | ||
409 | tristate "Monitor OS statistics" | ||
410 | depends on APPLDATA_BASE | ||
411 | help | ||
412 | This provides OS related data to the Linux - VM Monitor Stream, like | ||
413 | CPU utilisation, etc. | ||
414 | Writing 1 or 0 to /proc/appldata/os creates(1) or removes(0) a z/VM | ||
415 | APPLDATA monitor record, i.e. enables or disables monitoring this record | ||
416 | on the z/VM side. | ||
417 | |||
418 | Default is disabled. | ||
419 | This can also be compiled as a module, which will be called | ||
420 | appldata_os.o. | ||
421 | |||
422 | config APPLDATA_NET_SUM | ||
423 | tristate "Monitor overall network statistics" | ||
424 | depends on APPLDATA_BASE | ||
425 | help | ||
426 | This provides network related data to the Linux - VM Monitor Stream, | ||
427 | currently there is only a total sum of network I/O statistics, no | ||
428 | per-interface data. | ||
429 | Writing 1 or 0 to /proc/appldata/net_sum creates(1) or removes(0) a z/VM | ||
430 | APPLDATA monitor record, i.e. enables or disables monitoring this record | ||
431 | on the z/VM side. | ||
432 | |||
433 | Default is disabled. | ||
434 | This can also be compiled as a module, which will be called | ||
435 | appldata_net_sum.o. | ||
436 | |||
437 | config NO_IDLE_HZ | ||
438 | bool "No HZ timer ticks in idle" | ||
439 | help | ||
440 | Switches the regular HZ timer off when the system is going idle. | ||
441 | This helps z/VM to detect that the Linux system is idle. VM can | ||
442 | then "swap-out" this guest which reduces memory usage. It also | ||
443 | reduces the overhead of idle systems. | ||
444 | |||
445 | The HZ timer can be switched on/off via /proc/sys/kernel/hz_timer. | ||
446 | hz_timer=0 means HZ timer is disabled. hz_timer=1 means HZ | ||
447 | timer is active. | ||
448 | |||
449 | config NO_IDLE_HZ_INIT | ||
450 | bool "HZ timer in idle off by default" | ||
451 | depends on NO_IDLE_HZ | ||
452 | help | ||
453 | The HZ timer is switched off in idle by default. That means the | ||
454 | HZ timer is already disabled at boot time. | ||
455 | |||
456 | endmenu | ||
457 | |||
458 | config PCMCIA | ||
459 | bool | ||
460 | default n | ||
461 | |||
462 | source "drivers/base/Kconfig" | ||
463 | |||
464 | source "drivers/scsi/Kconfig" | ||
465 | |||
466 | source "drivers/s390/Kconfig" | ||
467 | |||
468 | source "net/Kconfig" | ||
469 | |||
470 | source "fs/Kconfig" | ||
471 | |||
472 | source "arch/s390/oprofile/Kconfig" | ||
473 | |||
474 | source "arch/s390/Kconfig.debug" | ||
475 | |||
476 | source "security/Kconfig" | ||
477 | |||
478 | source "crypto/Kconfig" | ||
479 | |||
480 | source "lib/Kconfig" | ||
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug new file mode 100644 index 000000000000..f53b6d5300e5 --- /dev/null +++ b/arch/s390/Kconfig.debug | |||
@@ -0,0 +1,5 @@ | |||
1 | menu "Kernel hacking" | ||
2 | |||
3 | source "lib/Kconfig.debug" | ||
4 | |||
5 | endmenu | ||
diff --git a/arch/s390/Makefile b/arch/s390/Makefile new file mode 100644 index 000000000000..3cd8dd25c9d7 --- /dev/null +++ b/arch/s390/Makefile | |||
@@ -0,0 +1,116 @@ | |||
1 | # | ||
2 | # s390/Makefile | ||
3 | # | ||
4 | # This file is included by the global makefile so that you can add your own | ||
5 | # architecture-specific flags and dependencies. Remember to do have actions | ||
6 | # for "archclean" and "archdep" for cleaning up and making dependencies for | ||
7 | # this architecture | ||
8 | # | ||
9 | # This file is subject to the terms and conditions of the GNU General Public | ||
10 | # License. See the file "COPYING" in the main directory of this archive | ||
11 | # for more details. | ||
12 | # | ||
13 | # Copyright (C) 1994 by Linus Torvalds | ||
14 | # | ||
15 | |||
16 | ifdef CONFIG_ARCH_S390_31 | ||
17 | LDFLAGS := -m elf_s390 | ||
18 | CFLAGS += -m31 | ||
19 | AFLAGS += -m31 | ||
20 | UTS_MACHINE := s390 | ||
21 | STACK_SIZE := 8192 | ||
22 | endif | ||
23 | |||
24 | ifdef CONFIG_ARCH_S390X | ||
25 | LDFLAGS := -m elf64_s390 | ||
26 | MODFLAGS += -fpic -D__PIC__ | ||
27 | CFLAGS += -m64 | ||
28 | AFLAGS += -m64 | ||
29 | UTS_MACHINE := s390x | ||
30 | STACK_SIZE := 16384 | ||
31 | endif | ||
32 | |||
33 | cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) | ||
34 | cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) | ||
35 | cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) | ||
36 | |||
37 | # old style option for packed stacks | ||
38 | ifeq ($(call cc-option-yn,-mkernel-backchain),y) | ||
39 | cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK | ||
40 | aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK | ||
41 | cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK | ||
42 | aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK | ||
43 | ifdef CONFIG_SMALL_STACK | ||
44 | STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) ) | ||
45 | endif | ||
46 | endif | ||
47 | |||
48 | # new style option for packed stacks | ||
49 | ifeq ($(call cc-option-yn,-mpacked-stack),y) | ||
50 | cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK | ||
51 | aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK | ||
52 | cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK | ||
53 | aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK | ||
54 | ifdef CONFIG_SMALL_STACK | ||
55 | STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) ) | ||
56 | endif | ||
57 | endif | ||
58 | |||
59 | ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y) | ||
60 | cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE) | ||
61 | cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD) | ||
62 | endif | ||
63 | |||
64 | ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y) | ||
65 | cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack | ||
66 | cflags-$(CONFIG_WARN_STACK) += -mwarn-framesize=$(CONFIG_WARN_STACK_SIZE) | ||
67 | endif | ||
68 | |||
69 | CFLAGS += -mbackchain -msoft-float $(cflags-y) | ||
70 | CFLAGS += $(call cc-option,-finline-limit=10000) | ||
71 | CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare | ||
72 | AFLAGS += $(aflags-y) | ||
73 | |||
74 | OBJCOPYFLAGS := -O binary | ||
75 | LDFLAGS_vmlinux := -e start | ||
76 | |||
77 | head-$(CONFIG_ARCH_S390_31) += arch/$(ARCH)/kernel/head.o | ||
78 | head-$(CONFIG_ARCH_S390X) += arch/$(ARCH)/kernel/head64.o | ||
79 | head-y += arch/$(ARCH)/kernel/init_task.o | ||
80 | |||
81 | core-y += arch/$(ARCH)/mm/ arch/$(ARCH)/kernel/ arch/$(ARCH)/crypto/ \ | ||
82 | arch/$(ARCH)/appldata/ | ||
83 | libs-y += arch/$(ARCH)/lib/ | ||
84 | drivers-y += drivers/s390/ | ||
85 | drivers-$(CONFIG_MATHEMU) += arch/$(ARCH)/math-emu/ | ||
86 | |||
87 | # must be linked after kernel | ||
88 | drivers-$(CONFIG_OPROFILE) += arch/s390/oprofile/ | ||
89 | |||
90 | boot := arch/$(ARCH)/boot | ||
91 | |||
92 | all: image | ||
93 | |||
94 | install: vmlinux | ||
95 | $(Q)$(MAKE) $(build)=$(boot) $@ | ||
96 | |||
97 | image: vmlinux | ||
98 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | ||
99 | |||
100 | archclean: | ||
101 | $(Q)$(MAKE) $(clean)=$(boot) | ||
102 | |||
103 | prepare: include/asm-$(ARCH)/offsets.h | ||
104 | |||
105 | arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \ | ||
106 | include/config/MARKER | ||
107 | |||
108 | include/asm-$(ARCH)/offsets.h: arch/$(ARCH)/kernel/asm-offsets.s | ||
109 | $(call filechk,gen-asm-offsets) | ||
110 | |||
111 | CLEAN_FILES += include/asm-$(ARCH)/offsets.h | ||
112 | |||
113 | # Don't use tabs in echo arguments | ||
114 | define archhelp | ||
115 | echo '* image - Kernel image for IPL ($(boot)/image)' | ||
116 | endef | ||
diff --git a/arch/s390/appldata/Makefile b/arch/s390/appldata/Makefile new file mode 100644 index 000000000000..99f1cf071304 --- /dev/null +++ b/arch/s390/appldata/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Makefile for the Linux - z/VM Monitor Stream. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_APPLDATA_BASE) += appldata_base.o | ||
6 | obj-$(CONFIG_APPLDATA_MEM) += appldata_mem.o | ||
7 | obj-$(CONFIG_APPLDATA_OS) += appldata_os.o | ||
8 | obj-$(CONFIG_APPLDATA_NET_SUM) += appldata_net_sum.o | ||
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h new file mode 100644 index 000000000000..e806a8922bbb --- /dev/null +++ b/arch/s390/appldata/appldata.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * arch/s390/appldata/appldata.h | ||
3 | * | ||
4 | * Definitions and interface for Linux - z/VM Monitor Stream. | ||
5 | * | ||
6 | * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH. | ||
7 | * | ||
8 | * Author: Gerald Schaefer <geraldsc@de.ibm.com> | ||
9 | */ | ||
10 | |||
11 | //#define APPLDATA_DEBUG /* Debug messages on/off */ | ||
12 | |||
13 | #define APPLDATA_MAX_REC_SIZE 4024 /* Maximum size of the */ | ||
14 | /* data buffer */ | ||
15 | #define APPLDATA_MAX_PROCS 100 | ||
16 | |||
17 | #define APPLDATA_PROC_NAME_LENGTH 16 /* Max. length of /proc name */ | ||
18 | |||
19 | #define APPLDATA_RECORD_MEM_ID 0x01 /* IDs to identify the */ | ||
20 | #define APPLDATA_RECORD_OS_ID 0x02 /* individual records, */ | ||
21 | #define APPLDATA_RECORD_NET_SUM_ID 0x03 /* must be < 256 ! */ | ||
22 | #define APPLDATA_RECORD_PROC_ID 0x04 | ||
23 | |||
24 | #define CTL_APPLDATA 2120 /* sysctl IDs, must be unique */ | ||
25 | #define CTL_APPLDATA_TIMER 2121 | ||
26 | #define CTL_APPLDATA_INTERVAL 2122 | ||
27 | #define CTL_APPLDATA_MEM 2123 | ||
28 | #define CTL_APPLDATA_OS 2124 | ||
29 | #define CTL_APPLDATA_NET_SUM 2125 | ||
30 | #define CTL_APPLDATA_PROC 2126 | ||
31 | |||
32 | #define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x) | ||
33 | #define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) | ||
34 | #define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) | ||
35 | |||
36 | #ifdef APPLDATA_DEBUG | ||
37 | #define P_DEBUG(x...) printk(KERN_DEBUG MY_PRINT_NAME " debug: " x) | ||
38 | #else | ||
39 | #define P_DEBUG(x...) do {} while (0) | ||
40 | #endif | ||
41 | |||
42 | struct appldata_ops { | ||
43 | struct list_head list; | ||
44 | struct ctl_table_header *sysctl_header; | ||
45 | struct ctl_table *ctl_table; | ||
46 | int active; /* monitoring status */ | ||
47 | |||
48 | /* fill in from here */ | ||
49 | unsigned int ctl_nr; /* sysctl ID */ | ||
50 | char name[APPLDATA_PROC_NAME_LENGTH]; /* name of /proc fs node */ | ||
51 | unsigned char record_nr; /* Record Nr. for Product ID */ | ||
52 | void (*callback)(void *data); /* callback function */ | ||
53 | void *data; /* record data */ | ||
54 | unsigned int size; /* size of record */ | ||
55 | struct module *owner; /* THIS_MODULE */ | ||
56 | }; | ||
57 | |||
58 | extern int appldata_register_ops(struct appldata_ops *ops); | ||
59 | extern void appldata_unregister_ops(struct appldata_ops *ops); | ||
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c new file mode 100644 index 000000000000..01ae1964c938 --- /dev/null +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -0,0 +1,770 @@ | |||
1 | /* | ||
2 | * arch/s390/appldata/appldata_base.c | ||
3 | * | ||
4 | * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1. | ||
5 | * Exports appldata_register_ops() and appldata_unregister_ops() for the | ||
6 | * data gathering modules. | ||
7 | * | ||
8 | * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH. | ||
9 | * | ||
10 | * Author: Gerald Schaefer <geraldsc@de.ibm.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <asm/uaccess.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/smp.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/proc_fs.h> | ||
23 | #include <linux/page-flags.h> | ||
24 | #include <linux/swap.h> | ||
25 | #include <linux/pagemap.h> | ||
26 | #include <linux/sysctl.h> | ||
27 | #include <asm/timer.h> | ||
28 | //#include <linux/kernel_stat.h> | ||
29 | #include <linux/notifier.h> | ||
30 | #include <linux/cpu.h> | ||
31 | |||
32 | #include "appldata.h" | ||
33 | |||
34 | |||
35 | #define MY_PRINT_NAME "appldata" /* for debug messages, etc. */ | ||
36 | #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for | ||
37 | sampling interval in | ||
38 | milliseconds */ | ||
39 | |||
40 | #define TOD_MICRO 0x01000 /* nr. of TOD clock units | ||
41 | for 1 microsecond */ | ||
42 | #ifndef CONFIG_ARCH_S390X | ||
43 | |||
44 | #define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */ | ||
45 | #define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */ | ||
46 | #define APPLDATA_GEN_EVENT_RECORD 0x02 | ||
47 | #define APPLDATA_START_CONFIG_REC 0x03 | ||
48 | |||
49 | #else | ||
50 | |||
51 | #define APPLDATA_START_INTERVAL_REC 0x80 | ||
52 | #define APPLDATA_STOP_REC 0x81 | ||
53 | #define APPLDATA_GEN_EVENT_RECORD 0x82 | ||
54 | #define APPLDATA_START_CONFIG_REC 0x83 | ||
55 | |||
56 | #endif /* CONFIG_ARCH_S390X */ | ||
57 | |||
58 | |||
59 | /* | ||
60 | * Parameter list for DIAGNOSE X'DC' | ||
61 | */ | ||
62 | #ifndef CONFIG_ARCH_S390X | ||
63 | struct appldata_parameter_list { | ||
64 | u16 diag; /* The DIAGNOSE code X'00DC' */ | ||
65 | u8 function; /* The function code for the DIAGNOSE */ | ||
66 | u8 parlist_length; /* Length of the parameter list */ | ||
67 | u32 product_id_addr; /* Address of the 16-byte product ID */ | ||
68 | u16 reserved; | ||
69 | u16 buffer_length; /* Length of the application data buffer */ | ||
70 | u32 buffer_addr; /* Address of the application data buffer */ | ||
71 | }; | ||
72 | #else | ||
73 | struct appldata_parameter_list { | ||
74 | u16 diag; | ||
75 | u8 function; | ||
76 | u8 parlist_length; | ||
77 | u32 unused01; | ||
78 | u16 reserved; | ||
79 | u16 buffer_length; | ||
80 | u32 unused02; | ||
81 | u64 product_id_addr; | ||
82 | u64 buffer_addr; | ||
83 | }; | ||
84 | #endif /* CONFIG_ARCH_S390X */ | ||
85 | |||
86 | /* | ||
87 | * /proc entries (sysctl) | ||
88 | */ | ||
89 | static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata"; | ||
90 | static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, | ||
91 | void __user *buffer, size_t *lenp, loff_t *ppos); | ||
92 | static int appldata_interval_handler(ctl_table *ctl, int write, | ||
93 | struct file *filp, | ||
94 | void __user *buffer, | ||
95 | size_t *lenp, loff_t *ppos); | ||
96 | |||
97 | static struct ctl_table_header *appldata_sysctl_header; | ||
98 | static struct ctl_table appldata_table[] = { | ||
99 | { | ||
100 | .ctl_name = CTL_APPLDATA_TIMER, | ||
101 | .procname = "timer", | ||
102 | .mode = S_IRUGO | S_IWUSR, | ||
103 | .proc_handler = &appldata_timer_handler, | ||
104 | }, | ||
105 | { | ||
106 | .ctl_name = CTL_APPLDATA_INTERVAL, | ||
107 | .procname = "interval", | ||
108 | .mode = S_IRUGO | S_IWUSR, | ||
109 | .proc_handler = &appldata_interval_handler, | ||
110 | }, | ||
111 | { .ctl_name = 0 } | ||
112 | }; | ||
113 | |||
114 | static struct ctl_table appldata_dir_table[] = { | ||
115 | { | ||
116 | .ctl_name = CTL_APPLDATA, | ||
117 | .procname = appldata_proc_name, | ||
118 | .maxlen = 0, | ||
119 | .mode = S_IRUGO | S_IXUGO, | ||
120 | .child = appldata_table, | ||
121 | }, | ||
122 | { .ctl_name = 0 } | ||
123 | }; | ||
124 | |||
125 | /* | ||
126 | * Timer | ||
127 | */ | ||
128 | DEFINE_PER_CPU(struct vtimer_list, appldata_timer); | ||
129 | static atomic_t appldata_expire_count = ATOMIC_INIT(0); | ||
130 | |||
131 | static DEFINE_SPINLOCK(appldata_timer_lock); | ||
132 | static int appldata_interval = APPLDATA_CPU_INTERVAL; | ||
133 | static int appldata_timer_active; | ||
134 | |||
135 | /* | ||
136 | * Tasklet | ||
137 | */ | ||
138 | static struct tasklet_struct appldata_tasklet_struct; | ||
139 | |||
140 | /* | ||
141 | * Ops list | ||
142 | */ | ||
143 | static DEFINE_SPINLOCK(appldata_ops_lock); | ||
144 | static LIST_HEAD(appldata_ops_list); | ||
145 | |||
146 | |||
147 | /************************* timer, tasklet, DIAG ******************************/ | ||
148 | /* | ||
149 | * appldata_timer_function() | ||
150 | * | ||
151 | * schedule tasklet and reschedule timer | ||
152 | */ | ||
153 | static void appldata_timer_function(unsigned long data, struct pt_regs *regs) | ||
154 | { | ||
155 | P_DEBUG(" -= Timer =-\n"); | ||
156 | P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(), | ||
157 | atomic_read(&appldata_expire_count)); | ||
158 | if (atomic_dec_and_test(&appldata_expire_count)) { | ||
159 | atomic_set(&appldata_expire_count, num_online_cpus()); | ||
160 | tasklet_schedule((struct tasklet_struct *) data); | ||
161 | } | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * appldata_tasklet_function() | ||
166 | * | ||
167 | * call data gathering function for each (active) module | ||
168 | */ | ||
169 | static void appldata_tasklet_function(unsigned long data) | ||
170 | { | ||
171 | struct list_head *lh; | ||
172 | struct appldata_ops *ops; | ||
173 | int i; | ||
174 | |||
175 | P_DEBUG(" -= Tasklet =-\n"); | ||
176 | i = 0; | ||
177 | spin_lock(&appldata_ops_lock); | ||
178 | list_for_each(lh, &appldata_ops_list) { | ||
179 | ops = list_entry(lh, struct appldata_ops, list); | ||
180 | P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n", | ||
181 | ++i, ops->active, ops->name); | ||
182 | if (ops->active == 1) { | ||
183 | ops->callback(ops->data); | ||
184 | } | ||
185 | } | ||
186 | spin_unlock(&appldata_ops_lock); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * appldata_diag() | ||
191 | * | ||
192 | * prepare parameter list, issue DIAG 0xDC | ||
193 | */ | ||
194 | static int appldata_diag(char record_nr, u16 function, unsigned long buffer, | ||
195 | u16 length) | ||
196 | { | ||
197 | unsigned long ry; | ||
198 | struct appldata_product_id { | ||
199 | char prod_nr[7]; /* product nr. */ | ||
200 | char prod_fn[2]; /* product function */ | ||
201 | char record_nr; /* record nr. */ | ||
202 | char version_nr[2]; /* version */ | ||
203 | char release_nr[2]; /* release */ | ||
204 | char mod_lvl[2]; /* modification lvl. */ | ||
205 | } appldata_product_id = { | ||
206 | /* all strings are EBCDIC, record_nr is byte */ | ||
207 | .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4, | ||
208 | 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ | ||
209 | .prod_fn = {0xD5, 0xD3}, /* "NL" */ | ||
210 | .record_nr = record_nr, | ||
211 | .version_nr = {0xF2, 0xF6}, /* "26" */ | ||
212 | .release_nr = {0xF0, 0xF1}, /* "01" */ | ||
213 | .mod_lvl = {0xF0, 0xF0}, /* "00" */ | ||
214 | }; | ||
215 | struct appldata_parameter_list appldata_parameter_list = { | ||
216 | .diag = 0xDC, | ||
217 | .function = function, | ||
218 | .parlist_length = | ||
219 | sizeof(appldata_parameter_list), | ||
220 | .buffer_length = length, | ||
221 | .product_id_addr = | ||
222 | (unsigned long) &appldata_product_id, | ||
223 | .buffer_addr = virt_to_phys((void *) buffer) | ||
224 | }; | ||
225 | |||
226 | if (!MACHINE_IS_VM) | ||
227 | return -ENOSYS; | ||
228 | ry = -1; | ||
229 | asm volatile( | ||
230 | "diag %1,%0,0xDC\n\t" | ||
231 | : "=d" (ry) : "d" (&(appldata_parameter_list)) : "cc"); | ||
232 | return (int) ry; | ||
233 | } | ||
234 | /********************** timer, tasklet, DIAG <END> ***************************/ | ||
235 | |||
236 | |||
237 | /****************************** /proc stuff **********************************/ | ||
238 | |||
239 | /* | ||
240 | * appldata_mod_vtimer_wrap() | ||
241 | * | ||
242 | * wrapper function for mod_virt_timer(), because smp_call_function_on() | ||
243 | * accepts only one parameter. | ||
244 | */ | ||
245 | static void __appldata_mod_vtimer_wrap(void *p) { | ||
246 | struct { | ||
247 | struct vtimer_list *timer; | ||
248 | u64 expires; | ||
249 | } *args = p; | ||
250 | mod_virt_timer(args->timer, args->expires); | ||
251 | } | ||
252 | |||
253 | #define APPLDATA_ADD_TIMER 0 | ||
254 | #define APPLDATA_DEL_TIMER 1 | ||
255 | #define APPLDATA_MOD_TIMER 2 | ||
256 | |||
257 | /* | ||
258 | * __appldata_vtimer_setup() | ||
259 | * | ||
260 | * Add, delete or modify virtual timers on all online cpus. | ||
261 | * The caller needs to get the appldata_timer_lock spinlock. | ||
262 | */ | ||
263 | static void | ||
264 | __appldata_vtimer_setup(int cmd) | ||
265 | { | ||
266 | u64 per_cpu_interval; | ||
267 | int i; | ||
268 | |||
269 | switch (cmd) { | ||
270 | case APPLDATA_ADD_TIMER: | ||
271 | if (appldata_timer_active) | ||
272 | break; | ||
273 | per_cpu_interval = (u64) (appldata_interval*1000 / | ||
274 | num_online_cpus()) * TOD_MICRO; | ||
275 | for_each_online_cpu(i) { | ||
276 | per_cpu(appldata_timer, i).expires = per_cpu_interval; | ||
277 | smp_call_function_on(add_virt_timer_periodic, | ||
278 | &per_cpu(appldata_timer, i), | ||
279 | 0, 1, i); | ||
280 | } | ||
281 | appldata_timer_active = 1; | ||
282 | P_INFO("Monitoring timer started.\n"); | ||
283 | break; | ||
284 | case APPLDATA_DEL_TIMER: | ||
285 | for_each_online_cpu(i) | ||
286 | del_virt_timer(&per_cpu(appldata_timer, i)); | ||
287 | if (!appldata_timer_active) | ||
288 | break; | ||
289 | appldata_timer_active = 0; | ||
290 | atomic_set(&appldata_expire_count, num_online_cpus()); | ||
291 | P_INFO("Monitoring timer stopped.\n"); | ||
292 | break; | ||
293 | case APPLDATA_MOD_TIMER: | ||
294 | per_cpu_interval = (u64) (appldata_interval*1000 / | ||
295 | num_online_cpus()) * TOD_MICRO; | ||
296 | if (!appldata_timer_active) | ||
297 | break; | ||
298 | for_each_online_cpu(i) { | ||
299 | struct { | ||
300 | struct vtimer_list *timer; | ||
301 | u64 expires; | ||
302 | } args; | ||
303 | args.timer = &per_cpu(appldata_timer, i); | ||
304 | args.expires = per_cpu_interval; | ||
305 | smp_call_function_on(__appldata_mod_vtimer_wrap, | ||
306 | &args, 0, 1, i); | ||
307 | } | ||
308 | } | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * appldata_timer_handler() | ||
313 | * | ||
314 | * Start/Stop timer, show status of timer (0 = not active, 1 = active) | ||
315 | */ | ||
316 | static int | ||
317 | appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, | ||
318 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
319 | { | ||
320 | int len; | ||
321 | char buf[2]; | ||
322 | |||
323 | if (!*lenp || *ppos) { | ||
324 | *lenp = 0; | ||
325 | return 0; | ||
326 | } | ||
327 | if (!write) { | ||
328 | len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n"); | ||
329 | if (len > *lenp) | ||
330 | len = *lenp; | ||
331 | if (copy_to_user(buffer, buf, len)) | ||
332 | return -EFAULT; | ||
333 | goto out; | ||
334 | } | ||
335 | len = *lenp; | ||
336 | if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) | ||
337 | return -EFAULT; | ||
338 | spin_lock(&appldata_timer_lock); | ||
339 | if (buf[0] == '1') | ||
340 | __appldata_vtimer_setup(APPLDATA_ADD_TIMER); | ||
341 | else if (buf[0] == '0') | ||
342 | __appldata_vtimer_setup(APPLDATA_DEL_TIMER); | ||
343 | spin_unlock(&appldata_timer_lock); | ||
344 | out: | ||
345 | *lenp = len; | ||
346 | *ppos += len; | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * appldata_interval_handler() | ||
352 | * | ||
353 | * Set (CPU) timer interval for collection of data (in milliseconds), show | ||
354 | * current timer interval. | ||
355 | */ | ||
356 | static int | ||
357 | appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, | ||
358 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
359 | { | ||
360 | int len, interval; | ||
361 | char buf[16]; | ||
362 | |||
363 | if (!*lenp || *ppos) { | ||
364 | *lenp = 0; | ||
365 | return 0; | ||
366 | } | ||
367 | if (!write) { | ||
368 | len = sprintf(buf, "%i\n", appldata_interval); | ||
369 | if (len > *lenp) | ||
370 | len = *lenp; | ||
371 | if (copy_to_user(buffer, buf, len)) | ||
372 | return -EFAULT; | ||
373 | goto out; | ||
374 | } | ||
375 | len = *lenp; | ||
376 | if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) { | ||
377 | return -EFAULT; | ||
378 | } | ||
379 | sscanf(buf, "%i", &interval); | ||
380 | if (interval <= 0) { | ||
381 | P_ERROR("Timer CPU interval has to be > 0!\n"); | ||
382 | return -EINVAL; | ||
383 | } | ||
384 | |||
385 | spin_lock(&appldata_timer_lock); | ||
386 | appldata_interval = interval; | ||
387 | __appldata_vtimer_setup(APPLDATA_MOD_TIMER); | ||
388 | spin_unlock(&appldata_timer_lock); | ||
389 | |||
390 | P_INFO("Monitoring CPU interval set to %u milliseconds.\n", | ||
391 | interval); | ||
392 | out: | ||
393 | *lenp = len; | ||
394 | *ppos += len; | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * appldata_generic_handler() | ||
400 | * | ||
401 | * Generic start/stop monitoring and DIAG, show status of | ||
402 | * monitoring (0 = not in process, 1 = in process) | ||
403 | */ | ||
404 | static int | ||
405 | appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, | ||
406 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
407 | { | ||
408 | struct appldata_ops *ops = NULL, *tmp_ops; | ||
409 | int rc, len, found; | ||
410 | char buf[2]; | ||
411 | struct list_head *lh; | ||
412 | |||
413 | found = 0; | ||
414 | spin_lock_bh(&appldata_ops_lock); | ||
415 | list_for_each(lh, &appldata_ops_list) { | ||
416 | tmp_ops = list_entry(lh, struct appldata_ops, list); | ||
417 | if (&tmp_ops->ctl_table[2] == ctl) { | ||
418 | found = 1; | ||
419 | } | ||
420 | } | ||
421 | if (!found) { | ||
422 | spin_unlock_bh(&appldata_ops_lock); | ||
423 | return -ENODEV; | ||
424 | } | ||
425 | ops = ctl->data; | ||
426 | if (!try_module_get(ops->owner)) { // protect this function | ||
427 | spin_unlock_bh(&appldata_ops_lock); | ||
428 | return -ENODEV; | ||
429 | } | ||
430 | spin_unlock_bh(&appldata_ops_lock); | ||
431 | |||
432 | if (!*lenp || *ppos) { | ||
433 | *lenp = 0; | ||
434 | module_put(ops->owner); | ||
435 | return 0; | ||
436 | } | ||
437 | if (!write) { | ||
438 | len = sprintf(buf, ops->active ? "1\n" : "0\n"); | ||
439 | if (len > *lenp) | ||
440 | len = *lenp; | ||
441 | if (copy_to_user(buffer, buf, len)) { | ||
442 | module_put(ops->owner); | ||
443 | return -EFAULT; | ||
444 | } | ||
445 | goto out; | ||
446 | } | ||
447 | len = *lenp; | ||
448 | if (copy_from_user(buf, buffer, | ||
449 | len > sizeof(buf) ? sizeof(buf) : len)) { | ||
450 | module_put(ops->owner); | ||
451 | return -EFAULT; | ||
452 | } | ||
453 | |||
454 | spin_lock_bh(&appldata_ops_lock); | ||
455 | if ((buf[0] == '1') && (ops->active == 0)) { | ||
456 | if (!try_module_get(ops->owner)) { // protect tasklet | ||
457 | spin_unlock_bh(&appldata_ops_lock); | ||
458 | module_put(ops->owner); | ||
459 | return -ENODEV; | ||
460 | } | ||
461 | ops->active = 1; | ||
462 | ops->callback(ops->data); // init record | ||
463 | rc = appldata_diag(ops->record_nr, | ||
464 | APPLDATA_START_INTERVAL_REC, | ||
465 | (unsigned long) ops->data, ops->size); | ||
466 | if (rc != 0) { | ||
467 | P_ERROR("START DIAG 0xDC for %s failed, " | ||
468 | "return code: %d\n", ops->name, rc); | ||
469 | module_put(ops->owner); | ||
470 | ops->active = 0; | ||
471 | } else { | ||
472 | P_INFO("Monitoring %s data enabled, " | ||
473 | "DIAG 0xDC started.\n", ops->name); | ||
474 | } | ||
475 | } else if ((buf[0] == '0') && (ops->active == 1)) { | ||
476 | ops->active = 0; | ||
477 | rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, | ||
478 | (unsigned long) ops->data, ops->size); | ||
479 | if (rc != 0) { | ||
480 | P_ERROR("STOP DIAG 0xDC for %s failed, " | ||
481 | "return code: %d\n", ops->name, rc); | ||
482 | } else { | ||
483 | P_INFO("Monitoring %s data disabled, " | ||
484 | "DIAG 0xDC stopped.\n", ops->name); | ||
485 | } | ||
486 | module_put(ops->owner); | ||
487 | } | ||
488 | spin_unlock_bh(&appldata_ops_lock); | ||
489 | out: | ||
490 | *lenp = len; | ||
491 | *ppos += len; | ||
492 | module_put(ops->owner); | ||
493 | return 0; | ||
494 | } | ||
495 | |||
496 | /*************************** /proc stuff <END> *******************************/ | ||
497 | |||
498 | |||
499 | /************************* module-ops management *****************************/ | ||
500 | /* | ||
501 | * appldata_register_ops() | ||
502 | * | ||
503 | * update ops list, register /proc/sys entries | ||
504 | */ | ||
505 | int appldata_register_ops(struct appldata_ops *ops) | ||
506 | { | ||
507 | struct list_head *lh; | ||
508 | struct appldata_ops *tmp_ops; | ||
509 | int i; | ||
510 | |||
511 | i = 0; | ||
512 | |||
513 | if ((ops->size > APPLDATA_MAX_REC_SIZE) || | ||
514 | (ops->size < 0)){ | ||
515 | P_ERROR("Invalid size of %s record = %i, maximum = %i!\n", | ||
516 | ops->name, ops->size, APPLDATA_MAX_REC_SIZE); | ||
517 | return -ENOMEM; | ||
518 | } | ||
519 | if ((ops->ctl_nr == CTL_APPLDATA) || | ||
520 | (ops->ctl_nr == CTL_APPLDATA_TIMER) || | ||
521 | (ops->ctl_nr == CTL_APPLDATA_INTERVAL)) { | ||
522 | P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr); | ||
523 | return -EBUSY; | ||
524 | } | ||
525 | ops->ctl_table = kmalloc(4*sizeof(struct ctl_table), GFP_KERNEL); | ||
526 | if (ops->ctl_table == NULL) { | ||
527 | P_ERROR("Not enough memory for %s ctl_table!\n", ops->name); | ||
528 | return -ENOMEM; | ||
529 | } | ||
530 | memset(ops->ctl_table, 0, 4*sizeof(struct ctl_table)); | ||
531 | |||
532 | spin_lock_bh(&appldata_ops_lock); | ||
533 | list_for_each(lh, &appldata_ops_list) { | ||
534 | tmp_ops = list_entry(lh, struct appldata_ops, list); | ||
535 | P_DEBUG("register_ops loop: %i) name = %s, ctl = %i\n", | ||
536 | ++i, tmp_ops->name, tmp_ops->ctl_nr); | ||
537 | P_DEBUG("Comparing %s (ctl %i) with %s (ctl %i)\n", | ||
538 | tmp_ops->name, tmp_ops->ctl_nr, ops->name, | ||
539 | ops->ctl_nr); | ||
540 | if (strncmp(tmp_ops->name, ops->name, | ||
541 | APPLDATA_PROC_NAME_LENGTH) == 0) { | ||
542 | P_ERROR("Name \"%s\" already registered!\n", ops->name); | ||
543 | kfree(ops->ctl_table); | ||
544 | spin_unlock_bh(&appldata_ops_lock); | ||
545 | return -EBUSY; | ||
546 | } | ||
547 | if (tmp_ops->ctl_nr == ops->ctl_nr) { | ||
548 | P_ERROR("ctl_nr %i already registered!\n", ops->ctl_nr); | ||
549 | kfree(ops->ctl_table); | ||
550 | spin_unlock_bh(&appldata_ops_lock); | ||
551 | return -EBUSY; | ||
552 | } | ||
553 | } | ||
554 | list_add(&ops->list, &appldata_ops_list); | ||
555 | spin_unlock_bh(&appldata_ops_lock); | ||
556 | |||
557 | ops->ctl_table[0].ctl_name = CTL_APPLDATA; | ||
558 | ops->ctl_table[0].procname = appldata_proc_name; | ||
559 | ops->ctl_table[0].maxlen = 0; | ||
560 | ops->ctl_table[0].mode = S_IRUGO | S_IXUGO; | ||
561 | ops->ctl_table[0].child = &ops->ctl_table[2]; | ||
562 | |||
563 | ops->ctl_table[1].ctl_name = 0; | ||
564 | |||
565 | ops->ctl_table[2].ctl_name = ops->ctl_nr; | ||
566 | ops->ctl_table[2].procname = ops->name; | ||
567 | ops->ctl_table[2].mode = S_IRUGO | S_IWUSR; | ||
568 | ops->ctl_table[2].proc_handler = appldata_generic_handler; | ||
569 | ops->ctl_table[2].data = ops; | ||
570 | |||
571 | ops->ctl_table[3].ctl_name = 0; | ||
572 | |||
573 | ops->sysctl_header = register_sysctl_table(ops->ctl_table,1); | ||
574 | |||
575 | P_INFO("%s-ops registered!\n", ops->name); | ||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | /* | ||
580 | * appldata_unregister_ops() | ||
581 | * | ||
582 | * update ops list, unregister /proc entries, stop DIAG if necessary | ||
583 | */ | ||
584 | void appldata_unregister_ops(struct appldata_ops *ops) | ||
585 | { | ||
586 | spin_lock_bh(&appldata_ops_lock); | ||
587 | unregister_sysctl_table(ops->sysctl_header); | ||
588 | list_del(&ops->list); | ||
589 | kfree(ops->ctl_table); | ||
590 | ops->ctl_table = NULL; | ||
591 | spin_unlock_bh(&appldata_ops_lock); | ||
592 | P_INFO("%s-ops unregistered!\n", ops->name); | ||
593 | } | ||
594 | /********************** module-ops management <END> **************************/ | ||
595 | |||
596 | |||
597 | /******************************* init / exit *********************************/ | ||
598 | |||
599 | static void | ||
600 | appldata_online_cpu(int cpu) | ||
601 | { | ||
602 | init_virt_timer(&per_cpu(appldata_timer, cpu)); | ||
603 | per_cpu(appldata_timer, cpu).function = appldata_timer_function; | ||
604 | per_cpu(appldata_timer, cpu).data = (unsigned long) | ||
605 | &appldata_tasklet_struct; | ||
606 | atomic_inc(&appldata_expire_count); | ||
607 | spin_lock(&appldata_timer_lock); | ||
608 | __appldata_vtimer_setup(APPLDATA_MOD_TIMER); | ||
609 | spin_unlock(&appldata_timer_lock); | ||
610 | } | ||
611 | |||
612 | static void | ||
613 | appldata_offline_cpu(int cpu) | ||
614 | { | ||
615 | del_virt_timer(&per_cpu(appldata_timer, cpu)); | ||
616 | if (atomic_dec_and_test(&appldata_expire_count)) { | ||
617 | atomic_set(&appldata_expire_count, num_online_cpus()); | ||
618 | tasklet_schedule(&appldata_tasklet_struct); | ||
619 | } | ||
620 | spin_lock(&appldata_timer_lock); | ||
621 | __appldata_vtimer_setup(APPLDATA_MOD_TIMER); | ||
622 | spin_unlock(&appldata_timer_lock); | ||
623 | } | ||
624 | |||
625 | static int | ||
626 | appldata_cpu_notify(struct notifier_block *self, | ||
627 | unsigned long action, void *hcpu) | ||
628 | { | ||
629 | switch (action) { | ||
630 | case CPU_ONLINE: | ||
631 | appldata_online_cpu((long) hcpu); | ||
632 | break; | ||
633 | #ifdef CONFIG_HOTPLUG_CPU | ||
634 | case CPU_DEAD: | ||
635 | appldata_offline_cpu((long) hcpu); | ||
636 | break; | ||
637 | #endif | ||
638 | default: | ||
639 | break; | ||
640 | } | ||
641 | return NOTIFY_OK; | ||
642 | } | ||
643 | |||
644 | static struct notifier_block __devinitdata appldata_nb = { | ||
645 | .notifier_call = appldata_cpu_notify, | ||
646 | }; | ||
647 | |||
648 | /* | ||
649 | * appldata_init() | ||
650 | * | ||
651 | * init timer and tasklet, register /proc entries | ||
652 | */ | ||
653 | static int __init appldata_init(void) | ||
654 | { | ||
655 | int i; | ||
656 | |||
657 | P_DEBUG("sizeof(parameter_list) = %lu\n", | ||
658 | sizeof(struct appldata_parameter_list)); | ||
659 | |||
660 | for_each_online_cpu(i) | ||
661 | appldata_online_cpu(i); | ||
662 | |||
663 | /* Register cpu hotplug notifier */ | ||
664 | register_cpu_notifier(&appldata_nb); | ||
665 | |||
666 | appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1); | ||
667 | #ifdef MODULE | ||
668 | appldata_dir_table[0].de->owner = THIS_MODULE; | ||
669 | appldata_table[0].de->owner = THIS_MODULE; | ||
670 | appldata_table[1].de->owner = THIS_MODULE; | ||
671 | #endif | ||
672 | |||
673 | tasklet_init(&appldata_tasklet_struct, appldata_tasklet_function, 0); | ||
674 | P_DEBUG("Base interface initialized.\n"); | ||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * appldata_exit() | ||
680 | * | ||
681 | * stop timer and tasklet, unregister /proc entries | ||
682 | */ | ||
683 | static void __exit appldata_exit(void) | ||
684 | { | ||
685 | struct list_head *lh; | ||
686 | struct appldata_ops *ops; | ||
687 | int rc, i; | ||
688 | |||
689 | P_DEBUG("Unloading module ...\n"); | ||
690 | /* | ||
691 | * ops list should be empty, but just in case something went wrong... | ||
692 | */ | ||
693 | spin_lock_bh(&appldata_ops_lock); | ||
694 | list_for_each(lh, &appldata_ops_list) { | ||
695 | ops = list_entry(lh, struct appldata_ops, list); | ||
696 | rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, | ||
697 | (unsigned long) ops->data, ops->size); | ||
698 | if (rc != 0) { | ||
699 | P_ERROR("STOP DIAG 0xDC for %s failed, " | ||
700 | "return code: %d\n", ops->name, rc); | ||
701 | } | ||
702 | } | ||
703 | spin_unlock_bh(&appldata_ops_lock); | ||
704 | |||
705 | for_each_online_cpu(i) | ||
706 | appldata_offline_cpu(i); | ||
707 | |||
708 | appldata_timer_active = 0; | ||
709 | |||
710 | unregister_sysctl_table(appldata_sysctl_header); | ||
711 | |||
712 | tasklet_kill(&appldata_tasklet_struct); | ||
713 | P_DEBUG("... module unloaded!\n"); | ||
714 | } | ||
715 | /**************************** init / exit <END> ******************************/ | ||
716 | |||
717 | |||
718 | module_init(appldata_init); | ||
719 | module_exit(appldata_exit); | ||
720 | MODULE_LICENSE("GPL"); | ||
721 | MODULE_AUTHOR("Gerald Schaefer"); | ||
722 | MODULE_DESCRIPTION("Linux-VM Monitor Stream, base infrastructure"); | ||
723 | |||
724 | EXPORT_SYMBOL_GPL(appldata_register_ops); | ||
725 | EXPORT_SYMBOL_GPL(appldata_unregister_ops); | ||
726 | |||
727 | #ifdef MODULE | ||
728 | /* | ||
729 | * Kernel symbols needed by appldata_mem and appldata_os modules. | ||
730 | * However, if this file is compiled as a module (for testing only), these | ||
731 | * symbols are not exported. In this case, we define them locally and export | ||
732 | * those. | ||
733 | */ | ||
734 | void si_swapinfo(struct sysinfo *val) | ||
735 | { | ||
736 | val->freeswap = -1ul; | ||
737 | val->totalswap = -1ul; | ||
738 | } | ||
739 | |||
740 | unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200, | ||
741 | -1 - FIXED_1/200}; | ||
742 | int nr_threads = -1; | ||
743 | |||
744 | void get_full_page_state(struct page_state *ps) | ||
745 | { | ||
746 | memset(ps, -1, sizeof(struct page_state)); | ||
747 | } | ||
748 | |||
749 | unsigned long nr_running(void) | ||
750 | { | ||
751 | return -1; | ||
752 | } | ||
753 | |||
754 | unsigned long nr_iowait(void) | ||
755 | { | ||
756 | return -1; | ||
757 | } | ||
758 | |||
759 | /*unsigned long nr_context_switches(void) | ||
760 | { | ||
761 | return -1; | ||
762 | }*/ | ||
763 | #endif /* MODULE */ | ||
764 | EXPORT_SYMBOL_GPL(si_swapinfo); | ||
765 | EXPORT_SYMBOL_GPL(nr_threads); | ||
766 | EXPORT_SYMBOL_GPL(avenrun); | ||
767 | EXPORT_SYMBOL_GPL(get_full_page_state); | ||
768 | EXPORT_SYMBOL_GPL(nr_running); | ||
769 | EXPORT_SYMBOL_GPL(nr_iowait); | ||
770 | //EXPORT_SYMBOL_GPL(nr_context_switches); | ||
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c new file mode 100644 index 000000000000..462ee9a84e76 --- /dev/null +++ b/arch/s390/appldata/appldata_mem.c | |||
@@ -0,0 +1,195 @@ | |||
1 | /* | ||
2 | * arch/s390/appldata/appldata_mem.c | ||
3 | * | ||
4 | * Data gathering module for Linux-VM Monitor Stream, Stage 1. | ||
5 | * Collects data related to memory management. | ||
6 | * | ||
7 | * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH. | ||
8 | * | ||
9 | * Author: Gerald Schaefer <geraldsc@de.ibm.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/kernel_stat.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <linux/pagemap.h> | ||
20 | #include <linux/swap.h> | ||
21 | |||
22 | #include "appldata.h" | ||
23 | |||
24 | |||
25 | #define MY_PRINT_NAME "appldata_mem" /* for debug messages, etc. */ | ||
26 | #define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */ | ||
27 | |||
28 | /* | ||
29 | * Memory data | ||
30 | * | ||
31 | * This is accessed as binary data by z/VM. If changes to it can't be avoided, | ||
32 | * the structure version (product ID, see appldata_base.c) needs to be changed | ||
33 | * as well and all documentation and z/VM applications using it must be | ||
34 | * updated. | ||
35 | * | ||
36 | * The record layout is documented in the Linux for zSeries Device Drivers | ||
37 | * book: | ||
38 | * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml | ||
39 | */ | ||
40 | struct appldata_mem_data { | ||
41 | u64 timestamp; | ||
42 | u32 sync_count_1; /* after VM collected the record data, */ | ||
43 | u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the | ||
44 | same. If not, the record has been updated on | ||
45 | the Linux side while VM was collecting the | ||
46 | (possibly corrupt) data */ | ||
47 | |||
48 | u64 pgpgin; /* data read from disk */ | ||
49 | u64 pgpgout; /* data written to disk */ | ||
50 | u64 pswpin; /* pages swapped in */ | ||
51 | u64 pswpout; /* pages swapped out */ | ||
52 | |||
53 | u64 sharedram; /* sharedram is currently set to 0 */ | ||
54 | |||
55 | u64 totalram; /* total main memory size */ | ||
56 | u64 freeram; /* free main memory size */ | ||
57 | u64 totalhigh; /* total high memory size */ | ||
58 | u64 freehigh; /* free high memory size */ | ||
59 | |||
60 | u64 bufferram; /* memory reserved for buffers, free cache */ | ||
61 | u64 cached; /* size of (used) cache, w/o buffers */ | ||
62 | u64 totalswap; /* total swap space size */ | ||
63 | u64 freeswap; /* free swap space */ | ||
64 | |||
65 | // New in 2.6 --> | ||
66 | u64 pgalloc; /* page allocations */ | ||
67 | u64 pgfault; /* page faults (major+minor) */ | ||
68 | u64 pgmajfault; /* page faults (major only) */ | ||
69 | // <-- New in 2.6 | ||
70 | |||
71 | } appldata_mem_data; | ||
72 | |||
73 | |||
74 | static inline void appldata_debug_print(struct appldata_mem_data *mem_data) | ||
75 | { | ||
76 | P_DEBUG("--- MEM - RECORD ---\n"); | ||
77 | P_DEBUG("pgpgin = %8lu KB\n", mem_data->pgpgin); | ||
78 | P_DEBUG("pgpgout = %8lu KB\n", mem_data->pgpgout); | ||
79 | P_DEBUG("pswpin = %8lu Pages\n", mem_data->pswpin); | ||
80 | P_DEBUG("pswpout = %8lu Pages\n", mem_data->pswpout); | ||
81 | P_DEBUG("pgalloc = %8lu \n", mem_data->pgalloc); | ||
82 | P_DEBUG("pgfault = %8lu \n", mem_data->pgfault); | ||
83 | P_DEBUG("pgmajfault = %8lu \n", mem_data->pgmajfault); | ||
84 | P_DEBUG("sharedram = %8lu KB\n", mem_data->sharedram); | ||
85 | P_DEBUG("totalram = %8lu KB\n", mem_data->totalram); | ||
86 | P_DEBUG("freeram = %8lu KB\n", mem_data->freeram); | ||
87 | P_DEBUG("totalhigh = %8lu KB\n", mem_data->totalhigh); | ||
88 | P_DEBUG("freehigh = %8lu KB\n", mem_data->freehigh); | ||
89 | P_DEBUG("bufferram = %8lu KB\n", mem_data->bufferram); | ||
90 | P_DEBUG("cached = %8lu KB\n", mem_data->cached); | ||
91 | P_DEBUG("totalswap = %8lu KB\n", mem_data->totalswap); | ||
92 | P_DEBUG("freeswap = %8lu KB\n", mem_data->freeswap); | ||
93 | P_DEBUG("sync_count_1 = %u\n", mem_data->sync_count_1); | ||
94 | P_DEBUG("sync_count_2 = %u\n", mem_data->sync_count_2); | ||
95 | P_DEBUG("timestamp = %lX\n", mem_data->timestamp); | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * appldata_get_mem_data() | ||
100 | * | ||
101 | * gather memory data | ||
102 | */ | ||
103 | static void appldata_get_mem_data(void *data) | ||
104 | { | ||
105 | /* | ||
106 | * don't put large structures on the stack, we are | ||
107 | * serialized through the appldata_ops_lock and can use static | ||
108 | */ | ||
109 | static struct sysinfo val; | ||
110 | static struct page_state ps; | ||
111 | struct appldata_mem_data *mem_data; | ||
112 | |||
113 | mem_data = data; | ||
114 | mem_data->sync_count_1++; | ||
115 | |||
116 | get_full_page_state(&ps); | ||
117 | mem_data->pgpgin = ps.pgpgin >> 1; | ||
118 | mem_data->pgpgout = ps.pgpgout >> 1; | ||
119 | mem_data->pswpin = ps.pswpin; | ||
120 | mem_data->pswpout = ps.pswpout; | ||
121 | mem_data->pgalloc = ps.pgalloc_high + ps.pgalloc_normal + | ||
122 | ps.pgalloc_dma; | ||
123 | mem_data->pgfault = ps.pgfault; | ||
124 | mem_data->pgmajfault = ps.pgmajfault; | ||
125 | |||
126 | si_meminfo(&val); | ||
127 | mem_data->sharedram = val.sharedram; | ||
128 | mem_data->totalram = P2K(val.totalram); | ||
129 | mem_data->freeram = P2K(val.freeram); | ||
130 | mem_data->totalhigh = P2K(val.totalhigh); | ||
131 | mem_data->freehigh = P2K(val.freehigh); | ||
132 | mem_data->bufferram = P2K(val.bufferram); | ||
133 | mem_data->cached = P2K(atomic_read(&nr_pagecache) - val.bufferram); | ||
134 | |||
135 | si_swapinfo(&val); | ||
136 | mem_data->totalswap = P2K(val.totalswap); | ||
137 | mem_data->freeswap = P2K(val.freeswap); | ||
138 | |||
139 | mem_data->timestamp = get_clock(); | ||
140 | mem_data->sync_count_2++; | ||
141 | #ifdef APPLDATA_DEBUG | ||
142 | appldata_debug_print(mem_data); | ||
143 | #endif | ||
144 | } | ||
145 | |||
146 | |||
147 | static struct appldata_ops ops = { | ||
148 | .ctl_nr = CTL_APPLDATA_MEM, | ||
149 | .name = "mem", | ||
150 | .record_nr = APPLDATA_RECORD_MEM_ID, | ||
151 | .size = sizeof(struct appldata_mem_data), | ||
152 | .callback = &appldata_get_mem_data, | ||
153 | .data = &appldata_mem_data, | ||
154 | .owner = THIS_MODULE, | ||
155 | }; | ||
156 | |||
157 | |||
158 | /* | ||
159 | * appldata_mem_init() | ||
160 | * | ||
161 | * init_data, register ops | ||
162 | */ | ||
163 | static int __init appldata_mem_init(void) | ||
164 | { | ||
165 | int rc; | ||
166 | |||
167 | P_DEBUG("sizeof(mem) = %lu\n", sizeof(struct appldata_mem_data)); | ||
168 | |||
169 | rc = appldata_register_ops(&ops); | ||
170 | if (rc != 0) { | ||
171 | P_ERROR("Error registering ops, rc = %i\n", rc); | ||
172 | } else { | ||
173 | P_DEBUG("%s-ops registered!\n", ops.name); | ||
174 | } | ||
175 | return rc; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * appldata_mem_exit() | ||
180 | * | ||
181 | * unregister ops | ||
182 | */ | ||
183 | static void __exit appldata_mem_exit(void) | ||
184 | { | ||
185 | appldata_unregister_ops(&ops); | ||
186 | P_DEBUG("%s-ops unregistered!\n", ops.name); | ||
187 | } | ||
188 | |||
189 | |||
190 | module_init(appldata_mem_init); | ||
191 | module_exit(appldata_mem_exit); | ||
192 | |||
193 | MODULE_LICENSE("GPL"); | ||
194 | MODULE_AUTHOR("Gerald Schaefer"); | ||
195 | MODULE_DESCRIPTION("Linux-VM Monitor Stream, MEMORY statistics"); | ||
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c new file mode 100644 index 000000000000..dd61638d3027 --- /dev/null +++ b/arch/s390/appldata/appldata_net_sum.c | |||
@@ -0,0 +1,195 @@ | |||
1 | /* | ||
2 | * arch/s390/appldata/appldata_net_sum.c | ||
3 | * | ||
4 | * Data gathering module for Linux-VM Monitor Stream, Stage 1. | ||
5 | * Collects accumulated network statistics (Packets received/transmitted, | ||
6 | * dropped, errors, ...). | ||
7 | * | ||
8 | * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH. | ||
9 | * | ||
10 | * Author: Gerald Schaefer <geraldsc@de.ibm.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/kernel_stat.h> | ||
19 | #include <linux/netdevice.h> | ||
20 | |||
21 | #include "appldata.h" | ||
22 | |||
23 | |||
24 | #define MY_PRINT_NAME "appldata_net_sum" /* for debug messages, etc. */ | ||
25 | |||
26 | |||
27 | /* | ||
28 | * Network data | ||
29 | * | ||
30 | * This is accessed as binary data by z/VM. If changes to it can't be avoided, | ||
31 | * the structure version (product ID, see appldata_base.c) needs to be changed | ||
32 | * as well and all documentation and z/VM applications using it must be updated. | ||
33 | * | ||
34 | * The record layout is documented in the Linux for zSeries Device Drivers | ||
35 | * book: | ||
36 | * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml | ||
37 | */ | ||
38 | struct appldata_net_sum_data { | ||
39 | u64 timestamp; | ||
40 | u32 sync_count_1; /* after VM collected the record data, */ | ||
41 | u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the | ||
42 | same. If not, the record has been updated on | ||
43 | the Linux side while VM was collecting the | ||
44 | (possibly corrupt) data */ | ||
45 | |||
46 | u32 nr_interfaces; /* nr. of network interfaces being monitored */ | ||
47 | |||
48 | u32 padding; /* next value is 64-bit aligned, so these */ | ||
49 | /* 4 byte would be padded out by compiler */ | ||
50 | |||
51 | u64 rx_packets; /* total packets received */ | ||
52 | u64 tx_packets; /* total packets transmitted */ | ||
53 | u64 rx_bytes; /* total bytes received */ | ||
54 | u64 tx_bytes; /* total bytes transmitted */ | ||
55 | u64 rx_errors; /* bad packets received */ | ||
56 | u64 tx_errors; /* packet transmit problems */ | ||
57 | u64 rx_dropped; /* no space in linux buffers */ | ||
58 | u64 tx_dropped; /* no space available in linux */ | ||
59 | u64 collisions; /* collisions while transmitting */ | ||
60 | } appldata_net_sum_data; | ||
61 | |||
62 | |||
63 | static inline void appldata_print_debug(struct appldata_net_sum_data *net_data) | ||
64 | { | ||
65 | P_DEBUG("--- NET - RECORD ---\n"); | ||
66 | |||
67 | P_DEBUG("nr_interfaces = %u\n", net_data->nr_interfaces); | ||
68 | P_DEBUG("rx_packets = %8lu\n", net_data->rx_packets); | ||
69 | P_DEBUG("tx_packets = %8lu\n", net_data->tx_packets); | ||
70 | P_DEBUG("rx_bytes = %8lu\n", net_data->rx_bytes); | ||
71 | P_DEBUG("tx_bytes = %8lu\n", net_data->tx_bytes); | ||
72 | P_DEBUG("rx_errors = %8lu\n", net_data->rx_errors); | ||
73 | P_DEBUG("tx_errors = %8lu\n", net_data->tx_errors); | ||
74 | P_DEBUG("rx_dropped = %8lu\n", net_data->rx_dropped); | ||
75 | P_DEBUG("tx_dropped = %8lu\n", net_data->tx_dropped); | ||
76 | P_DEBUG("collisions = %8lu\n", net_data->collisions); | ||
77 | |||
78 | P_DEBUG("sync_count_1 = %u\n", net_data->sync_count_1); | ||
79 | P_DEBUG("sync_count_2 = %u\n", net_data->sync_count_2); | ||
80 | P_DEBUG("timestamp = %lX\n", net_data->timestamp); | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * appldata_get_net_sum_data() | ||
85 | * | ||
86 | * gather accumulated network statistics | ||
87 | */ | ||
88 | static void appldata_get_net_sum_data(void *data) | ||
89 | { | ||
90 | int i; | ||
91 | struct appldata_net_sum_data *net_data; | ||
92 | struct net_device *dev; | ||
93 | struct net_device_stats *stats; | ||
94 | unsigned long rx_packets, tx_packets, rx_bytes, tx_bytes, rx_errors, | ||
95 | tx_errors, rx_dropped, tx_dropped, collisions; | ||
96 | |||
97 | net_data = data; | ||
98 | net_data->sync_count_1++; | ||
99 | |||
100 | i = 0; | ||
101 | rx_packets = 0; | ||
102 | tx_packets = 0; | ||
103 | rx_bytes = 0; | ||
104 | tx_bytes = 0; | ||
105 | rx_errors = 0; | ||
106 | tx_errors = 0; | ||
107 | rx_dropped = 0; | ||
108 | tx_dropped = 0; | ||
109 | collisions = 0; | ||
110 | read_lock(&dev_base_lock); | ||
111 | for (dev = dev_base; dev != NULL; dev = dev->next) { | ||
112 | if (dev->get_stats == NULL) { | ||
113 | continue; | ||
114 | } | ||
115 | stats = dev->get_stats(dev); | ||
116 | rx_packets += stats->rx_packets; | ||
117 | tx_packets += stats->tx_packets; | ||
118 | rx_bytes += stats->rx_bytes; | ||
119 | tx_bytes += stats->tx_bytes; | ||
120 | rx_errors += stats->rx_errors; | ||
121 | tx_errors += stats->tx_errors; | ||
122 | rx_dropped += stats->rx_dropped; | ||
123 | tx_dropped += stats->tx_dropped; | ||
124 | collisions += stats->collisions; | ||
125 | i++; | ||
126 | } | ||
127 | read_unlock(&dev_base_lock); | ||
128 | net_data->nr_interfaces = i; | ||
129 | net_data->rx_packets = rx_packets; | ||
130 | net_data->tx_packets = tx_packets; | ||
131 | net_data->rx_bytes = rx_bytes; | ||
132 | net_data->tx_bytes = tx_bytes; | ||
133 | net_data->rx_errors = rx_errors; | ||
134 | net_data->tx_errors = tx_errors; | ||
135 | net_data->rx_dropped = rx_dropped; | ||
136 | net_data->tx_dropped = tx_dropped; | ||
137 | net_data->collisions = collisions; | ||
138 | |||
139 | net_data->timestamp = get_clock(); | ||
140 | net_data->sync_count_2++; | ||
141 | #ifdef APPLDATA_DEBUG | ||
142 | appldata_print_debug(net_data); | ||
143 | #endif | ||
144 | } | ||
145 | |||
146 | |||
147 | static struct appldata_ops ops = { | ||
148 | .ctl_nr = CTL_APPLDATA_NET_SUM, | ||
149 | .name = "net_sum", | ||
150 | .record_nr = APPLDATA_RECORD_NET_SUM_ID, | ||
151 | .size = sizeof(struct appldata_net_sum_data), | ||
152 | .callback = &appldata_get_net_sum_data, | ||
153 | .data = &appldata_net_sum_data, | ||
154 | .owner = THIS_MODULE, | ||
155 | }; | ||
156 | |||
157 | |||
158 | /* | ||
159 | * appldata_net_init() | ||
160 | * | ||
161 | * init data, register ops | ||
162 | */ | ||
163 | static int __init appldata_net_init(void) | ||
164 | { | ||
165 | int rc; | ||
166 | |||
167 | P_DEBUG("sizeof(net) = %lu\n", sizeof(struct appldata_net_sum_data)); | ||
168 | |||
169 | rc = appldata_register_ops(&ops); | ||
170 | if (rc != 0) { | ||
171 | P_ERROR("Error registering ops, rc = %i\n", rc); | ||
172 | } else { | ||
173 | P_DEBUG("%s-ops registered!\n", ops.name); | ||
174 | } | ||
175 | return rc; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * appldata_net_exit() | ||
180 | * | ||
181 | * unregister ops | ||
182 | */ | ||
183 | static void __exit appldata_net_exit(void) | ||
184 | { | ||
185 | appldata_unregister_ops(&ops); | ||
186 | P_DEBUG("%s-ops unregistered!\n", ops.name); | ||
187 | } | ||
188 | |||
189 | |||
190 | module_init(appldata_net_init); | ||
191 | module_exit(appldata_net_exit); | ||
192 | |||
193 | MODULE_LICENSE("GPL"); | ||
194 | MODULE_AUTHOR("Gerald Schaefer"); | ||
195 | MODULE_DESCRIPTION("Linux-VM Monitor Stream, accumulated network statistics"); | ||
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c new file mode 100644 index 000000000000..b83f07484551 --- /dev/null +++ b/arch/s390/appldata/appldata_os.c | |||
@@ -0,0 +1,241 @@ | |||
1 | /* | ||
2 | * arch/s390/appldata/appldata_os.c | ||
3 | * | ||
4 | * Data gathering module for Linux-VM Monitor Stream, Stage 1. | ||
5 | * Collects misc. OS related data (CPU utilization, running processes). | ||
6 | * | ||
7 | * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH. | ||
8 | * | ||
9 | * Author: Gerald Schaefer <geraldsc@de.ibm.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/kernel_stat.h> | ||
18 | #include <linux/netdevice.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <asm/smp.h> | ||
21 | |||
22 | #include "appldata.h" | ||
23 | |||
24 | |||
25 | #define MY_PRINT_NAME "appldata_os" /* for debug messages, etc. */ | ||
26 | #define LOAD_INT(x) ((x) >> FSHIFT) | ||
27 | #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) | ||
28 | |||
29 | /* | ||
30 | * OS data | ||
31 | * | ||
32 | * This is accessed as binary data by z/VM. If changes to it can't be avoided, | ||
33 | * the structure version (product ID, see appldata_base.c) needs to be changed | ||
34 | * as well and all documentation and z/VM applications using it must be | ||
35 | * updated. | ||
36 | * | ||
37 | * The record layout is documented in the Linux for zSeries Device Drivers | ||
38 | * book: | ||
39 | * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml | ||
40 | */ | ||
41 | struct appldata_os_per_cpu { | ||
42 | u32 per_cpu_user; /* timer ticks spent in user mode */ | ||
43 | u32 per_cpu_nice; /* ... spent with modified priority */ | ||
44 | u32 per_cpu_system; /* ... spent in kernel mode */ | ||
45 | u32 per_cpu_idle; /* ... spent in idle mode */ | ||
46 | |||
47 | // New in 2.6 --> | ||
48 | u32 per_cpu_irq; /* ... spent in interrupts */ | ||
49 | u32 per_cpu_softirq; /* ... spent in softirqs */ | ||
50 | u32 per_cpu_iowait; /* ... spent while waiting for I/O */ | ||
51 | // <-- New in 2.6 | ||
52 | }; | ||
53 | |||
54 | struct appldata_os_data { | ||
55 | u64 timestamp; | ||
56 | u32 sync_count_1; /* after VM collected the record data, */ | ||
57 | u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the | ||
58 | same. If not, the record has been updated on | ||
59 | the Linux side while VM was collecting the | ||
60 | (possibly corrupt) data */ | ||
61 | |||
62 | u32 nr_cpus; /* number of (virtual) CPUs */ | ||
63 | u32 per_cpu_size; /* size of the per-cpu data struct */ | ||
64 | u32 cpu_offset; /* offset of the first per-cpu data struct */ | ||
65 | |||
66 | u32 nr_running; /* number of runnable threads */ | ||
67 | u32 nr_threads; /* number of threads */ | ||
68 | u32 avenrun[3]; /* average nr. of running processes during */ | ||
69 | /* the last 1, 5 and 15 minutes */ | ||
70 | |||
71 | // New in 2.6 --> | ||
72 | u32 nr_iowait; /* number of blocked threads | ||
73 | (waiting for I/O) */ | ||
74 | // <-- New in 2.6 | ||
75 | |||
76 | /* per cpu data */ | ||
77 | struct appldata_os_per_cpu os_cpu[0]; | ||
78 | }; | ||
79 | |||
80 | static struct appldata_os_data *appldata_os_data; | ||
81 | |||
82 | |||
83 | static inline void appldata_print_debug(struct appldata_os_data *os_data) | ||
84 | { | ||
85 | int a0, a1, a2, i; | ||
86 | |||
87 | P_DEBUG("--- OS - RECORD ---\n"); | ||
88 | P_DEBUG("nr_threads = %u\n", os_data->nr_threads); | ||
89 | P_DEBUG("nr_running = %u\n", os_data->nr_running); | ||
90 | P_DEBUG("nr_iowait = %u\n", os_data->nr_iowait); | ||
91 | P_DEBUG("avenrun(int) = %8x / %8x / %8x\n", os_data->avenrun[0], | ||
92 | os_data->avenrun[1], os_data->avenrun[2]); | ||
93 | a0 = os_data->avenrun[0]; | ||
94 | a1 = os_data->avenrun[1]; | ||
95 | a2 = os_data->avenrun[2]; | ||
96 | P_DEBUG("avenrun(float) = %d.%02d / %d.%02d / %d.%02d\n", | ||
97 | LOAD_INT(a0), LOAD_FRAC(a0), LOAD_INT(a1), LOAD_FRAC(a1), | ||
98 | LOAD_INT(a2), LOAD_FRAC(a2)); | ||
99 | |||
100 | P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus); | ||
101 | for (i = 0; i < os_data->nr_cpus; i++) { | ||
102 | P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, " | ||
103 | "idle = %u, irq = %u, softirq = %u, iowait = %u\n", | ||
104 | i, | ||
105 | os_data->os_cpu[i].per_cpu_user, | ||
106 | os_data->os_cpu[i].per_cpu_nice, | ||
107 | os_data->os_cpu[i].per_cpu_system, | ||
108 | os_data->os_cpu[i].per_cpu_idle, | ||
109 | os_data->os_cpu[i].per_cpu_irq, | ||
110 | os_data->os_cpu[i].per_cpu_softirq, | ||
111 | os_data->os_cpu[i].per_cpu_iowait); | ||
112 | } | ||
113 | |||
114 | P_DEBUG("sync_count_1 = %u\n", os_data->sync_count_1); | ||
115 | P_DEBUG("sync_count_2 = %u\n", os_data->sync_count_2); | ||
116 | P_DEBUG("timestamp = %lX\n", os_data->timestamp); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * appldata_get_os_data() | ||
121 | * | ||
122 | * gather OS data | ||
123 | */ | ||
124 | static void appldata_get_os_data(void *data) | ||
125 | { | ||
126 | int i, j; | ||
127 | struct appldata_os_data *os_data; | ||
128 | |||
129 | os_data = data; | ||
130 | os_data->sync_count_1++; | ||
131 | |||
132 | os_data->nr_cpus = num_online_cpus(); | ||
133 | |||
134 | os_data->nr_threads = nr_threads; | ||
135 | os_data->nr_running = nr_running(); | ||
136 | os_data->nr_iowait = nr_iowait(); | ||
137 | os_data->avenrun[0] = avenrun[0] + (FIXED_1/200); | ||
138 | os_data->avenrun[1] = avenrun[1] + (FIXED_1/200); | ||
139 | os_data->avenrun[2] = avenrun[2] + (FIXED_1/200); | ||
140 | |||
141 | j = 0; | ||
142 | for_each_online_cpu(i) { | ||
143 | os_data->os_cpu[j].per_cpu_user = | ||
144 | kstat_cpu(i).cpustat.user; | ||
145 | os_data->os_cpu[j].per_cpu_nice = | ||
146 | kstat_cpu(i).cpustat.nice; | ||
147 | os_data->os_cpu[j].per_cpu_system = | ||
148 | kstat_cpu(i).cpustat.system; | ||
149 | os_data->os_cpu[j].per_cpu_idle = | ||
150 | kstat_cpu(i).cpustat.idle; | ||
151 | os_data->os_cpu[j].per_cpu_irq = | ||
152 | kstat_cpu(i).cpustat.irq; | ||
153 | os_data->os_cpu[j].per_cpu_softirq = | ||
154 | kstat_cpu(i).cpustat.softirq; | ||
155 | os_data->os_cpu[j].per_cpu_iowait = | ||
156 | kstat_cpu(i).cpustat.iowait; | ||
157 | j++; | ||
158 | } | ||
159 | |||
160 | os_data->timestamp = get_clock(); | ||
161 | os_data->sync_count_2++; | ||
162 | #ifdef APPLDATA_DEBUG | ||
163 | appldata_print_debug(os_data); | ||
164 | #endif | ||
165 | } | ||
166 | |||
167 | |||
168 | static struct appldata_ops ops = { | ||
169 | .ctl_nr = CTL_APPLDATA_OS, | ||
170 | .name = "os", | ||
171 | .record_nr = APPLDATA_RECORD_OS_ID, | ||
172 | .callback = &appldata_get_os_data, | ||
173 | .owner = THIS_MODULE, | ||
174 | }; | ||
175 | |||
176 | |||
177 | /* | ||
178 | * appldata_os_init() | ||
179 | * | ||
180 | * init data, register ops | ||
181 | */ | ||
182 | static int __init appldata_os_init(void) | ||
183 | { | ||
184 | int rc, size; | ||
185 | |||
186 | size = sizeof(struct appldata_os_data) + | ||
187 | (NR_CPUS * sizeof(struct appldata_os_per_cpu)); | ||
188 | if (size > APPLDATA_MAX_REC_SIZE) { | ||
189 | P_ERROR("Size of record = %i, bigger than maximum (%i)!\n", | ||
190 | size, APPLDATA_MAX_REC_SIZE); | ||
191 | rc = -ENOMEM; | ||
192 | goto out; | ||
193 | } | ||
194 | P_DEBUG("sizeof(os) = %i, sizeof(os_cpu) = %lu\n", size, | ||
195 | sizeof(struct appldata_os_per_cpu)); | ||
196 | |||
197 | appldata_os_data = kmalloc(size, GFP_DMA); | ||
198 | if (appldata_os_data == NULL) { | ||
199 | P_ERROR("No memory for %s!\n", ops.name); | ||
200 | rc = -ENOMEM; | ||
201 | goto out; | ||
202 | } | ||
203 | memset(appldata_os_data, 0, size); | ||
204 | |||
205 | appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu); | ||
206 | appldata_os_data->cpu_offset = offsetof(struct appldata_os_data, | ||
207 | os_cpu); | ||
208 | P_DEBUG("cpu offset = %u\n", appldata_os_data->cpu_offset); | ||
209 | |||
210 | ops.data = appldata_os_data; | ||
211 | ops.size = size; | ||
212 | rc = appldata_register_ops(&ops); | ||
213 | if (rc != 0) { | ||
214 | P_ERROR("Error registering ops, rc = %i\n", rc); | ||
215 | kfree(appldata_os_data); | ||
216 | } else { | ||
217 | P_DEBUG("%s-ops registered!\n", ops.name); | ||
218 | } | ||
219 | out: | ||
220 | return rc; | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * appldata_os_exit() | ||
225 | * | ||
226 | * unregister ops | ||
227 | */ | ||
228 | static void __exit appldata_os_exit(void) | ||
229 | { | ||
230 | appldata_unregister_ops(&ops); | ||
231 | kfree(appldata_os_data); | ||
232 | P_DEBUG("%s-ops unregistered!\n", ops.name); | ||
233 | } | ||
234 | |||
235 | |||
236 | module_init(appldata_os_init); | ||
237 | module_exit(appldata_os_exit); | ||
238 | |||
239 | MODULE_LICENSE("GPL"); | ||
240 | MODULE_AUTHOR("Gerald Schaefer"); | ||
241 | MODULE_DESCRIPTION("Linux-VM Monitor Stream, OS statistics"); | ||
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile new file mode 100644 index 000000000000..4d97eef36b8d --- /dev/null +++ b/arch/s390/boot/Makefile | |||
@@ -0,0 +1,18 @@ | |||
1 | # | ||
2 | # Makefile for the linux s390-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | COMPILE_VERSION := __linux_compile_version_id__`hostname | \ | ||
6 | tr -c '[0-9A-Za-z]' '_'`__`date | \ | ||
7 | tr -c '[0-9A-Za-z]' '_'`_t | ||
8 | |||
9 | EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. | ||
10 | |||
11 | targets := image | ||
12 | |||
13 | $(obj)/image: vmlinux FORCE | ||
14 | $(call if_changed,objcopy) | ||
15 | |||
16 | install: $(CONFIGURE) $(obj)/image | ||
17 | sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \ | ||
18 | System.map Kerntypes "$(INSTALL_PATH)" | ||
diff --git a/arch/s390/boot/install.sh b/arch/s390/boot/install.sh new file mode 100644 index 000000000000..278a8139cb18 --- /dev/null +++ b/arch/s390/boot/install.sh | |||
@@ -0,0 +1,38 @@ | |||
1 | #!/bin/sh | ||
2 | # | ||
3 | # arch/s390x/boot/install.sh | ||
4 | # | ||
5 | # This file is subject to the terms and conditions of the GNU General Public | ||
6 | # License. See the file "COPYING" in the main directory of this archive | ||
7 | # for more details. | ||
8 | # | ||
9 | # Copyright (C) 1995 by Linus Torvalds | ||
10 | # | ||
11 | # Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin | ||
12 | # | ||
13 | # "make install" script for s390 architecture | ||
14 | # | ||
15 | # Arguments: | ||
16 | # $1 - kernel version | ||
17 | # $2 - kernel image file | ||
18 | # $3 - kernel map file | ||
19 | # $4 - default install path (blank if root directory) | ||
20 | # | ||
21 | |||
22 | # User may have a custom install script | ||
23 | |||
24 | if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi | ||
25 | if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi | ||
26 | |||
27 | # Default install - same as make zlilo | ||
28 | |||
29 | if [ -f $4/vmlinuz ]; then | ||
30 | mv $4/vmlinuz $4/vmlinuz.old | ||
31 | fi | ||
32 | |||
33 | if [ -f $4/System.map ]; then | ||
34 | mv $4/System.map $4/System.old | ||
35 | fi | ||
36 | |||
37 | cat $2 > $4/vmlinuz | ||
38 | cp $3 $4/System.map | ||
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile new file mode 100644 index 000000000000..96a05e6b51e0 --- /dev/null +++ b/arch/s390/crypto/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Cryptographic API | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_CRYPTO_SHA1_Z990) += sha1_z990.o | ||
6 | obj-$(CONFIG_CRYPTO_DES_Z990) += des_z990.o des_check_key.o | ||
7 | |||
8 | obj-$(CONFIG_CRYPTO_TEST) += crypt_z990_query.o | ||
diff --git a/arch/s390/crypto/crypt_z990.h b/arch/s390/crypto/crypt_z990.h new file mode 100644 index 000000000000..4df660b99e5a --- /dev/null +++ b/arch/s390/crypto/crypt_z990.h | |||
@@ -0,0 +1,374 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for z990 cryptographic instructions. | ||
5 | * | ||
6 | * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation | ||
7 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | */ | ||
15 | #ifndef _CRYPTO_ARCH_S390_CRYPT_Z990_H | ||
16 | #define _CRYPTO_ARCH_S390_CRYPT_Z990_H | ||
17 | |||
18 | #include <asm/errno.h> | ||
19 | |||
20 | #define CRYPT_Z990_OP_MASK 0xFF00 | ||
21 | #define CRYPT_Z990_FUNC_MASK 0x00FF | ||
22 | |||
23 | |||
24 | /*z990 cryptographic operations*/ | ||
25 | enum crypt_z990_operations { | ||
26 | CRYPT_Z990_KM = 0x0100, | ||
27 | CRYPT_Z990_KMC = 0x0200, | ||
28 | CRYPT_Z990_KIMD = 0x0300, | ||
29 | CRYPT_Z990_KLMD = 0x0400, | ||
30 | CRYPT_Z990_KMAC = 0x0500 | ||
31 | }; | ||
32 | |||
33 | /*function codes for KM (CIPHER MESSAGE) instruction*/ | ||
34 | enum crypt_z990_km_func { | ||
35 | KM_QUERY = CRYPT_Z990_KM | 0, | ||
36 | KM_DEA_ENCRYPT = CRYPT_Z990_KM | 1, | ||
37 | KM_DEA_DECRYPT = CRYPT_Z990_KM | 1 | 0x80, //modifier bit->decipher | ||
38 | KM_TDEA_128_ENCRYPT = CRYPT_Z990_KM | 2, | ||
39 | KM_TDEA_128_DECRYPT = CRYPT_Z990_KM | 2 | 0x80, | ||
40 | KM_TDEA_192_ENCRYPT = CRYPT_Z990_KM | 3, | ||
41 | KM_TDEA_192_DECRYPT = CRYPT_Z990_KM | 3 | 0x80, | ||
42 | }; | ||
43 | |||
44 | /*function codes for KMC (CIPHER MESSAGE WITH CHAINING) instruction*/ | ||
45 | enum crypt_z990_kmc_func { | ||
46 | KMC_QUERY = CRYPT_Z990_KMC | 0, | ||
47 | KMC_DEA_ENCRYPT = CRYPT_Z990_KMC | 1, | ||
48 | KMC_DEA_DECRYPT = CRYPT_Z990_KMC | 1 | 0x80, //modifier bit->decipher | ||
49 | KMC_TDEA_128_ENCRYPT = CRYPT_Z990_KMC | 2, | ||
50 | KMC_TDEA_128_DECRYPT = CRYPT_Z990_KMC | 2 | 0x80, | ||
51 | KMC_TDEA_192_ENCRYPT = CRYPT_Z990_KMC | 3, | ||
52 | KMC_TDEA_192_DECRYPT = CRYPT_Z990_KMC | 3 | 0x80, | ||
53 | }; | ||
54 | |||
55 | /*function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) instruction*/ | ||
56 | enum crypt_z990_kimd_func { | ||
57 | KIMD_QUERY = CRYPT_Z990_KIMD | 0, | ||
58 | KIMD_SHA_1 = CRYPT_Z990_KIMD | 1, | ||
59 | }; | ||
60 | |||
61 | /*function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) instruction*/ | ||
62 | enum crypt_z990_klmd_func { | ||
63 | KLMD_QUERY = CRYPT_Z990_KLMD | 0, | ||
64 | KLMD_SHA_1 = CRYPT_Z990_KLMD | 1, | ||
65 | }; | ||
66 | |||
67 | /*function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) instruction*/ | ||
68 | enum crypt_z990_kmac_func { | ||
69 | KMAC_QUERY = CRYPT_Z990_KMAC | 0, | ||
70 | KMAC_DEA = CRYPT_Z990_KMAC | 1, | ||
71 | KMAC_TDEA_128 = CRYPT_Z990_KMAC | 2, | ||
72 | KMAC_TDEA_192 = CRYPT_Z990_KMAC | 3 | ||
73 | }; | ||
74 | |||
75 | /*status word for z990 crypto instructions' QUERY functions*/ | ||
76 | struct crypt_z990_query_status { | ||
77 | u64 high; | ||
78 | u64 low; | ||
79 | }; | ||
80 | |||
81 | /* | ||
82 | * Standard fixup and ex_table sections for crypt_z990 inline functions. | ||
83 | * label 0: the z990 crypto operation | ||
84 | * label 1: just after 1 to catch illegal operation exception on non-z990 | ||
85 | * label 6: the return point after fixup | ||
86 | * label 7: set error value if exception _in_ crypto operation | ||
87 | * label 8: set error value if illegal operation exception | ||
88 | * [ret] is the variable to receive the error code | ||
89 | * [ERR] is the error code value | ||
90 | */ | ||
91 | #ifndef __s390x__ | ||
92 | #define __crypt_z990_fixup \ | ||
93 | ".section .fixup,\"ax\" \n" \ | ||
94 | "7: lhi %0,%h[e1] \n" \ | ||
95 | " bras 1,9f \n" \ | ||
96 | " .long 6b \n" \ | ||
97 | "8: lhi %0,%h[e2] \n" \ | ||
98 | " bras 1,9f \n" \ | ||
99 | " .long 6b \n" \ | ||
100 | "9: l 1,0(1) \n" \ | ||
101 | " br 1 \n" \ | ||
102 | ".previous \n" \ | ||
103 | ".section __ex_table,\"a\" \n" \ | ||
104 | " .align 4 \n" \ | ||
105 | " .long 0b,7b \n" \ | ||
106 | " .long 1b,8b \n" \ | ||
107 | ".previous" | ||
108 | #else /* __s390x__ */ | ||
109 | #define __crypt_z990_fixup \ | ||
110 | ".section .fixup,\"ax\" \n" \ | ||
111 | "7: lhi %0,%h[e1] \n" \ | ||
112 | " jg 6b \n" \ | ||
113 | "8: lhi %0,%h[e2] \n" \ | ||
114 | " jg 6b \n" \ | ||
115 | ".previous\n" \ | ||
116 | ".section __ex_table,\"a\" \n" \ | ||
117 | " .align 8 \n" \ | ||
118 | " .quad 0b,7b \n" \ | ||
119 | " .quad 1b,8b \n" \ | ||
120 | ".previous" | ||
121 | #endif /* __s390x__ */ | ||
122 | |||
123 | /* | ||
124 | * Standard code for setting the result of z990 crypto instructions. | ||
125 | * %0: the register which will receive the result | ||
126 | * [result]: the register containing the result (e.g. second operand length | ||
127 | * to compute number of processed bytes]. | ||
128 | */ | ||
129 | #ifndef __s390x__ | ||
130 | #define __crypt_z990_set_result \ | ||
131 | " lr %0,%[result] \n" | ||
132 | #else /* __s390x__ */ | ||
133 | #define __crypt_z990_set_result \ | ||
134 | " lgr %0,%[result] \n" | ||
135 | #endif | ||
136 | |||
137 | /* | ||
138 | * Executes the KM (CIPHER MESSAGE) operation of the z990 CPU. | ||
139 | * @param func: the function code passed to KM; see crypt_z990_km_func | ||
140 | * @param param: address of parameter block; see POP for details on each func | ||
141 | * @param dest: address of destination memory area | ||
142 | * @param src: address of source memory area | ||
143 | * @param src_len: length of src operand in bytes | ||
144 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
145 | * for encryption/decryption funcs | ||
146 | */ | ||
147 | static inline int | ||
148 | crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len) | ||
149 | { | ||
150 | register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; | ||
151 | register void* __param asm("1") = param; | ||
152 | register u8* __dest asm("4") = dest; | ||
153 | register const u8* __src asm("2") = src; | ||
154 | register long __src_len asm("3") = src_len; | ||
155 | int ret; | ||
156 | |||
157 | ret = 0; | ||
158 | __asm__ __volatile__ ( | ||
159 | "0: .insn rre,0xB92E0000,%1,%2 \n" //KM opcode | ||
160 | "1: brc 1,0b \n" //handle partial completion | ||
161 | __crypt_z990_set_result | ||
162 | "6: \n" | ||
163 | __crypt_z990_fixup | ||
164 | : "+d" (ret), "+a" (__dest), "+a" (__src), | ||
165 | [result] "+d" (__src_len) | ||
166 | : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), | ||
167 | "a" (__param) | ||
168 | : "cc", "memory" | ||
169 | ); | ||
170 | if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ | ||
171 | ret = src_len - ret; | ||
172 | } | ||
173 | return ret; | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the z990 CPU. | ||
178 | * @param func: the function code passed to KM; see crypt_z990_kmc_func | ||
179 | * @param param: address of parameter block; see POP for details on each func | ||
180 | * @param dest: address of destination memory area | ||
181 | * @param src: address of source memory area | ||
182 | * @param src_len: length of src operand in bytes | ||
183 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
184 | * for encryption/decryption funcs | ||
185 | */ | ||
186 | static inline int | ||
187 | crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len) | ||
188 | { | ||
189 | register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; | ||
190 | register void* __param asm("1") = param; | ||
191 | register u8* __dest asm("4") = dest; | ||
192 | register const u8* __src asm("2") = src; | ||
193 | register long __src_len asm("3") = src_len; | ||
194 | int ret; | ||
195 | |||
196 | ret = 0; | ||
197 | __asm__ __volatile__ ( | ||
198 | "0: .insn rre,0xB92F0000,%1,%2 \n" //KMC opcode | ||
199 | "1: brc 1,0b \n" //handle partial completion | ||
200 | __crypt_z990_set_result | ||
201 | "6: \n" | ||
202 | __crypt_z990_fixup | ||
203 | : "+d" (ret), "+a" (__dest), "+a" (__src), | ||
204 | [result] "+d" (__src_len) | ||
205 | : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), | ||
206 | "a" (__param) | ||
207 | : "cc", "memory" | ||
208 | ); | ||
209 | if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ | ||
210 | ret = src_len - ret; | ||
211 | } | ||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | /* | ||
216 | * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation | ||
217 | * of the z990 CPU. | ||
218 | * @param func: the function code passed to KM; see crypt_z990_kimd_func | ||
219 | * @param param: address of parameter block; see POP for details on each func | ||
220 | * @param src: address of source memory area | ||
221 | * @param src_len: length of src operand in bytes | ||
222 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
223 | * for digest funcs | ||
224 | */ | ||
225 | static inline int | ||
226 | crypt_z990_kimd(long func, void* param, const u8* src, long src_len) | ||
227 | { | ||
228 | register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; | ||
229 | register void* __param asm("1") = param; | ||
230 | register const u8* __src asm("2") = src; | ||
231 | register long __src_len asm("3") = src_len; | ||
232 | int ret; | ||
233 | |||
234 | ret = 0; | ||
235 | __asm__ __volatile__ ( | ||
236 | "0: .insn rre,0xB93E0000,%1,%1 \n" //KIMD opcode | ||
237 | "1: brc 1,0b \n" /*handle partical completion of kimd*/ | ||
238 | __crypt_z990_set_result | ||
239 | "6: \n" | ||
240 | __crypt_z990_fixup | ||
241 | : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) | ||
242 | : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), | ||
243 | "a" (__param) | ||
244 | : "cc", "memory" | ||
245 | ); | ||
246 | if (ret >= 0 && (func & CRYPT_Z990_FUNC_MASK)){ | ||
247 | ret = src_len - ret; | ||
248 | } | ||
249 | return ret; | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the z990 CPU. | ||
254 | * @param func: the function code passed to KM; see crypt_z990_klmd_func | ||
255 | * @param param: address of parameter block; see POP for details on each func | ||
256 | * @param src: address of source memory area | ||
257 | * @param src_len: length of src operand in bytes | ||
258 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
259 | * for digest funcs | ||
260 | */ | ||
261 | static inline int | ||
262 | crypt_z990_klmd(long func, void* param, const u8* src, long src_len) | ||
263 | { | ||
264 | register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; | ||
265 | register void* __param asm("1") = param; | ||
266 | register const u8* __src asm("2") = src; | ||
267 | register long __src_len asm("3") = src_len; | ||
268 | int ret; | ||
269 | |||
270 | ret = 0; | ||
271 | __asm__ __volatile__ ( | ||
272 | "0: .insn rre,0xB93F0000,%1,%1 \n" //KLMD opcode | ||
273 | "1: brc 1,0b \n" /*handle partical completion of klmd*/ | ||
274 | __crypt_z990_set_result | ||
275 | "6: \n" | ||
276 | __crypt_z990_fixup | ||
277 | : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) | ||
278 | : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), | ||
279 | "a" (__param) | ||
280 | : "cc", "memory" | ||
281 | ); | ||
282 | if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ | ||
283 | ret = src_len - ret; | ||
284 | } | ||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation | ||
290 | * of the z990 CPU. | ||
291 | * @param func: the function code passed to KM; see crypt_z990_klmd_func | ||
292 | * @param param: address of parameter block; see POP for details on each func | ||
293 | * @param src: address of source memory area | ||
294 | * @param src_len: length of src operand in bytes | ||
295 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
296 | * for digest funcs | ||
297 | */ | ||
298 | static inline int | ||
299 | crypt_z990_kmac(long func, void* param, const u8* src, long src_len) | ||
300 | { | ||
301 | register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; | ||
302 | register void* __param asm("1") = param; | ||
303 | register const u8* __src asm("2") = src; | ||
304 | register long __src_len asm("3") = src_len; | ||
305 | int ret; | ||
306 | |||
307 | ret = 0; | ||
308 | __asm__ __volatile__ ( | ||
309 | "0: .insn rre,0xB91E0000,%5,%5 \n" //KMAC opcode | ||
310 | "1: brc 1,0b \n" /*handle partical completion of klmd*/ | ||
311 | __crypt_z990_set_result | ||
312 | "6: \n" | ||
313 | __crypt_z990_fixup | ||
314 | : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) | ||
315 | : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), | ||
316 | "a" (__param) | ||
317 | : "cc", "memory" | ||
318 | ); | ||
319 | if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ | ||
320 | ret = src_len - ret; | ||
321 | } | ||
322 | return ret; | ||
323 | } | ||
324 | |||
325 | /** | ||
326 | * Tests if a specific z990 crypto function is implemented on the machine. | ||
327 | * @param func: the function code of the specific function; 0 if op in general | ||
328 | * @return 1 if func available; 0 if func or op in general not available | ||
329 | */ | ||
330 | static inline int | ||
331 | crypt_z990_func_available(int func) | ||
332 | { | ||
333 | int ret; | ||
334 | |||
335 | struct crypt_z990_query_status status = { | ||
336 | .high = 0, | ||
337 | .low = 0 | ||
338 | }; | ||
339 | switch (func & CRYPT_Z990_OP_MASK){ | ||
340 | case CRYPT_Z990_KM: | ||
341 | ret = crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0); | ||
342 | break; | ||
343 | case CRYPT_Z990_KMC: | ||
344 | ret = crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0); | ||
345 | break; | ||
346 | case CRYPT_Z990_KIMD: | ||
347 | ret = crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0); | ||
348 | break; | ||
349 | case CRYPT_Z990_KLMD: | ||
350 | ret = crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0); | ||
351 | break; | ||
352 | case CRYPT_Z990_KMAC: | ||
353 | ret = crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0); | ||
354 | break; | ||
355 | default: | ||
356 | ret = 0; | ||
357 | return ret; | ||
358 | } | ||
359 | if (ret >= 0){ | ||
360 | func &= CRYPT_Z990_FUNC_MASK; | ||
361 | func &= 0x7f; //mask modifier bit | ||
362 | if (func < 64){ | ||
363 | ret = (status.high >> (64 - func - 1)) & 0x1; | ||
364 | } else { | ||
365 | ret = (status.low >> (128 - func - 1)) & 0x1; | ||
366 | } | ||
367 | } else { | ||
368 | ret = 0; | ||
369 | } | ||
370 | return ret; | ||
371 | } | ||
372 | |||
373 | |||
374 | #endif // _CRYPTO_ARCH_S390_CRYPT_Z990_H | ||
diff --git a/arch/s390/crypto/crypt_z990_query.c b/arch/s390/crypto/crypt_z990_query.c new file mode 100644 index 000000000000..7133983d1384 --- /dev/null +++ b/arch/s390/crypto/crypt_z990_query.c | |||
@@ -0,0 +1,111 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for z990 cryptographic instructions. | ||
5 | * Testing module for querying processor crypto capabilities. | ||
6 | * | ||
7 | * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
8 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the Free | ||
12 | * Software Foundation; either version 2 of the License, or (at your option) | ||
13 | * any later version. | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <asm/errno.h> | ||
20 | #include "crypt_z990.h" | ||
21 | |||
22 | static void | ||
23 | query_available_functions(void) | ||
24 | { | ||
25 | printk(KERN_INFO "#####################\n"); | ||
26 | //query available KM functions | ||
27 | printk(KERN_INFO "KM_QUERY: %d\n", | ||
28 | crypt_z990_func_available(KM_QUERY)); | ||
29 | printk(KERN_INFO "KM_DEA: %d\n", | ||
30 | crypt_z990_func_available(KM_DEA_ENCRYPT)); | ||
31 | printk(KERN_INFO "KM_TDEA_128: %d\n", | ||
32 | crypt_z990_func_available(KM_TDEA_128_ENCRYPT)); | ||
33 | printk(KERN_INFO "KM_TDEA_192: %d\n", | ||
34 | crypt_z990_func_available(KM_TDEA_192_ENCRYPT)); | ||
35 | //query available KMC functions | ||
36 | printk(KERN_INFO "KMC_QUERY: %d\n", | ||
37 | crypt_z990_func_available(KMC_QUERY)); | ||
38 | printk(KERN_INFO "KMC_DEA: %d\n", | ||
39 | crypt_z990_func_available(KMC_DEA_ENCRYPT)); | ||
40 | printk(KERN_INFO "KMC_TDEA_128: %d\n", | ||
41 | crypt_z990_func_available(KMC_TDEA_128_ENCRYPT)); | ||
42 | printk(KERN_INFO "KMC_TDEA_192: %d\n", | ||
43 | crypt_z990_func_available(KMC_TDEA_192_ENCRYPT)); | ||
44 | //query available KIMD fucntions | ||
45 | printk(KERN_INFO "KIMD_QUERY: %d\n", | ||
46 | crypt_z990_func_available(KIMD_QUERY)); | ||
47 | printk(KERN_INFO "KIMD_SHA_1: %d\n", | ||
48 | crypt_z990_func_available(KIMD_SHA_1)); | ||
49 | //query available KLMD functions | ||
50 | printk(KERN_INFO "KLMD_QUERY: %d\n", | ||
51 | crypt_z990_func_available(KLMD_QUERY)); | ||
52 | printk(KERN_INFO "KLMD_SHA_1: %d\n", | ||
53 | crypt_z990_func_available(KLMD_SHA_1)); | ||
54 | //query available KMAC functions | ||
55 | printk(KERN_INFO "KMAC_QUERY: %d\n", | ||
56 | crypt_z990_func_available(KMAC_QUERY)); | ||
57 | printk(KERN_INFO "KMAC_DEA: %d\n", | ||
58 | crypt_z990_func_available(KMAC_DEA)); | ||
59 | printk(KERN_INFO "KMAC_TDEA_128: %d\n", | ||
60 | crypt_z990_func_available(KMAC_TDEA_128)); | ||
61 | printk(KERN_INFO "KMAC_TDEA_192: %d\n", | ||
62 | crypt_z990_func_available(KMAC_TDEA_192)); | ||
63 | } | ||
64 | |||
65 | static int | ||
66 | init(void) | ||
67 | { | ||
68 | struct crypt_z990_query_status status = { | ||
69 | .high = 0, | ||
70 | .low = 0 | ||
71 | }; | ||
72 | |||
73 | printk(KERN_INFO "crypt_z990: querying available crypto functions\n"); | ||
74 | crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0); | ||
75 | printk(KERN_INFO "KM: %016llx %016llx\n", | ||
76 | (unsigned long long) status.high, | ||
77 | (unsigned long long) status.low); | ||
78 | status.high = status.low = 0; | ||
79 | crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0); | ||
80 | printk(KERN_INFO "KMC: %016llx %016llx\n", | ||
81 | (unsigned long long) status.high, | ||
82 | (unsigned long long) status.low); | ||
83 | status.high = status.low = 0; | ||
84 | crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0); | ||
85 | printk(KERN_INFO "KIMD: %016llx %016llx\n", | ||
86 | (unsigned long long) status.high, | ||
87 | (unsigned long long) status.low); | ||
88 | status.high = status.low = 0; | ||
89 | crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0); | ||
90 | printk(KERN_INFO "KLMD: %016llx %016llx\n", | ||
91 | (unsigned long long) status.high, | ||
92 | (unsigned long long) status.low); | ||
93 | status.high = status.low = 0; | ||
94 | crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0); | ||
95 | printk(KERN_INFO "KMAC: %016llx %016llx\n", | ||
96 | (unsigned long long) status.high, | ||
97 | (unsigned long long) status.low); | ||
98 | |||
99 | query_available_functions(); | ||
100 | return -1; | ||
101 | } | ||
102 | |||
103 | static void __exit | ||
104 | cleanup(void) | ||
105 | { | ||
106 | } | ||
107 | |||
108 | module_init(init); | ||
109 | module_exit(cleanup); | ||
110 | |||
111 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/s390/crypto/crypto_des.h b/arch/s390/crypto/crypto_des.h new file mode 100644 index 000000000000..c964b64111dd --- /dev/null +++ b/arch/s390/crypto/crypto_des.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Function for checking keys for the DES and Tripple DES Encryption | ||
5 | * algorithms. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | */ | ||
13 | #ifndef __CRYPTO_DES_H__ | ||
14 | #define __CRYPTO_DES_H__ | ||
15 | |||
16 | extern int crypto_des_check_key(const u8*, unsigned int, u32*); | ||
17 | |||
18 | #endif //__CRYPTO_DES_H__ | ||
diff --git a/arch/s390/crypto/des_check_key.c b/arch/s390/crypto/des_check_key.c new file mode 100644 index 000000000000..e3f5c5f238fe --- /dev/null +++ b/arch/s390/crypto/des_check_key.c | |||
@@ -0,0 +1,130 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Function for checking keys for the DES and Tripple DES Encryption | ||
5 | * algorithms. | ||
6 | * | ||
7 | * Originally released as descore by Dana L. How <how@isl.stanford.edu>. | ||
8 | * Modified by Raimar Falke <rf13@inf.tu-dresden.de> for the Linux-Kernel. | ||
9 | * Derived from Cryptoapi and Nettle implementations, adapted for in-place | ||
10 | * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL. | ||
11 | * | ||
12 | * s390 Version: | ||
13 | * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation | ||
14 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | ||
15 | * | ||
16 | * Derived from "crypto/des.c" | ||
17 | * Copyright (c) 1992 Dana L. How. | ||
18 | * Copyright (c) Raimar Falke <rf13@inf.tu-dresden.de> | ||
19 | * Copyright (c) Gisle Sflensminde <gisle@ii.uib.no> | ||
20 | * Copyright (C) 2001 Niels Mvller. | ||
21 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | ||
22 | * | ||
23 | * This program is free software; you can redistribute it and/or modify | ||
24 | * it under the terms of the GNU General Public License as published by | ||
25 | * the Free Software Foundation; either version 2 of the License, or | ||
26 | * (at your option) any later version. | ||
27 | * | ||
28 | */ | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/errno.h> | ||
32 | #include <linux/crypto.h> | ||
33 | |||
34 | #define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o)) | ||
35 | |||
36 | static const u8 parity[] = { | ||
37 | 8,1,0,8,0,8,8,0,0,8,8,0,8,0,2,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,3, | ||
38 | 0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8, | ||
39 | 0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8, | ||
40 | 8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0, | ||
41 | 0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8, | ||
42 | 8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0, | ||
43 | 8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0, | ||
44 | 4,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,5,0,8,0,8,8,0,0,8,8,0,8,0,6,8, | ||
45 | }; | ||
46 | |||
47 | /* | ||
48 | * RFC2451: Weak key checks SHOULD be performed. | ||
49 | */ | ||
50 | int | ||
51 | crypto_des_check_key(const u8 *key, unsigned int keylen, u32 *flags) | ||
52 | { | ||
53 | u32 n, w; | ||
54 | |||
55 | n = parity[key[0]]; n <<= 4; | ||
56 | n |= parity[key[1]]; n <<= 4; | ||
57 | n |= parity[key[2]]; n <<= 4; | ||
58 | n |= parity[key[3]]; n <<= 4; | ||
59 | n |= parity[key[4]]; n <<= 4; | ||
60 | n |= parity[key[5]]; n <<= 4; | ||
61 | n |= parity[key[6]]; n <<= 4; | ||
62 | n |= parity[key[7]]; | ||
63 | w = 0x88888888L; | ||
64 | |||
65 | if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) | ||
66 | && !((n - (w >> 3)) & w)) { /* 1 in 10^10 keys passes this test */ | ||
67 | if (n < 0x41415151) { | ||
68 | if (n < 0x31312121) { | ||
69 | if (n < 0x14141515) { | ||
70 | /* 01 01 01 01 01 01 01 01 */ | ||
71 | if (n == 0x11111111) goto weak; | ||
72 | /* 01 1F 01 1F 01 0E 01 0E */ | ||
73 | if (n == 0x13131212) goto weak; | ||
74 | } else { | ||
75 | /* 01 E0 01 E0 01 F1 01 F1 */ | ||
76 | if (n == 0x14141515) goto weak; | ||
77 | /* 01 FE 01 FE 01 FE 01 FE */ | ||
78 | if (n == 0x16161616) goto weak; | ||
79 | } | ||
80 | } else { | ||
81 | if (n < 0x34342525) { | ||
82 | /* 1F 01 1F 01 0E 01 0E 01 */ | ||
83 | if (n == 0x31312121) goto weak; | ||
84 | /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */ | ||
85 | if (n == 0x33332222) goto weak; | ||
86 | } else { | ||
87 | /* 1F E0 1F E0 0E F1 0E F1 */ | ||
88 | if (n == 0x34342525) goto weak; | ||
89 | /* 1F FE 1F FE 0E FE 0E FE */ | ||
90 | if (n == 0x36362626) goto weak; | ||
91 | } | ||
92 | } | ||
93 | } else { | ||
94 | if (n < 0x61616161) { | ||
95 | if (n < 0x44445555) { | ||
96 | /* E0 01 E0 01 F1 01 F1 01 */ | ||
97 | if (n == 0x41415151) goto weak; | ||
98 | /* E0 1F E0 1F F1 0E F1 0E */ | ||
99 | if (n == 0x43435252) goto weak; | ||
100 | } else { | ||
101 | /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */ | ||
102 | if (n == 0x44445555) goto weak; | ||
103 | /* E0 FE E0 FE F1 FE F1 FE */ | ||
104 | if (n == 0x46465656) goto weak; | ||
105 | } | ||
106 | } else { | ||
107 | if (n < 0x64646565) { | ||
108 | /* FE 01 FE 01 FE 01 FE 01 */ | ||
109 | if (n == 0x61616161) goto weak; | ||
110 | /* FE 1F FE 1F FE 0E FE 0E */ | ||
111 | if (n == 0x63636262) goto weak; | ||
112 | } else { | ||
113 | /* FE E0 FE E0 FE F1 FE F1 */ | ||
114 | if (n == 0x64646565) goto weak; | ||
115 | /* FE FE FE FE FE FE FE FE */ | ||
116 | if (n == 0x66666666) goto weak; | ||
117 | } | ||
118 | } | ||
119 | } | ||
120 | } | ||
121 | return 0; | ||
122 | weak: | ||
123 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | |||
127 | EXPORT_SYMBOL(crypto_des_check_key); | ||
128 | |||
129 | MODULE_LICENSE("GPL"); | ||
130 | MODULE_DESCRIPTION("Key Check function for DES & DES3 Cipher Algorithms"); | ||
diff --git a/arch/s390/crypto/des_z990.c b/arch/s390/crypto/des_z990.c new file mode 100644 index 000000000000..813cf37b1177 --- /dev/null +++ b/arch/s390/crypto/des_z990.c | |||
@@ -0,0 +1,284 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * z990 implementation of the DES Cipher Algorithm. | ||
5 | * | ||
6 | * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | ||
8 | * | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <asm/scatterlist.h> | ||
21 | #include <linux/crypto.h> | ||
22 | #include "crypt_z990.h" | ||
23 | #include "crypto_des.h" | ||
24 | |||
25 | #define DES_BLOCK_SIZE 8 | ||
26 | #define DES_KEY_SIZE 8 | ||
27 | |||
28 | #define DES3_128_KEY_SIZE (2 * DES_KEY_SIZE) | ||
29 | #define DES3_128_BLOCK_SIZE DES_BLOCK_SIZE | ||
30 | |||
31 | #define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE) | ||
32 | #define DES3_192_BLOCK_SIZE DES_BLOCK_SIZE | ||
33 | |||
34 | struct crypt_z990_des_ctx { | ||
35 | u8 iv[DES_BLOCK_SIZE]; | ||
36 | u8 key[DES_KEY_SIZE]; | ||
37 | }; | ||
38 | |||
39 | struct crypt_z990_des3_128_ctx { | ||
40 | u8 iv[DES_BLOCK_SIZE]; | ||
41 | u8 key[DES3_128_KEY_SIZE]; | ||
42 | }; | ||
43 | |||
44 | struct crypt_z990_des3_192_ctx { | ||
45 | u8 iv[DES_BLOCK_SIZE]; | ||
46 | u8 key[DES3_192_KEY_SIZE]; | ||
47 | }; | ||
48 | |||
49 | static int | ||
50 | des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) | ||
51 | { | ||
52 | struct crypt_z990_des_ctx *dctx; | ||
53 | int ret; | ||
54 | |||
55 | dctx = ctx; | ||
56 | //test if key is valid (not a weak key) | ||
57 | ret = crypto_des_check_key(key, keylen, flags); | ||
58 | if (ret == 0){ | ||
59 | memcpy(dctx->key, key, keylen); | ||
60 | } | ||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | |||
65 | static void | ||
66 | des_encrypt(void *ctx, u8 *dst, const u8 *src) | ||
67 | { | ||
68 | struct crypt_z990_des_ctx *dctx; | ||
69 | |||
70 | dctx = ctx; | ||
71 | crypt_z990_km(KM_DEA_ENCRYPT, dctx->key, dst, src, DES_BLOCK_SIZE); | ||
72 | } | ||
73 | |||
74 | static void | ||
75 | des_decrypt(void *ctx, u8 *dst, const u8 *src) | ||
76 | { | ||
77 | struct crypt_z990_des_ctx *dctx; | ||
78 | |||
79 | dctx = ctx; | ||
80 | crypt_z990_km(KM_DEA_DECRYPT, dctx->key, dst, src, DES_BLOCK_SIZE); | ||
81 | } | ||
82 | |||
83 | static struct crypto_alg des_alg = { | ||
84 | .cra_name = "des", | ||
85 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | ||
86 | .cra_blocksize = DES_BLOCK_SIZE, | ||
87 | .cra_ctxsize = sizeof(struct crypt_z990_des_ctx), | ||
88 | .cra_module = THIS_MODULE, | ||
89 | .cra_list = LIST_HEAD_INIT(des_alg.cra_list), | ||
90 | .cra_u = { .cipher = { | ||
91 | .cia_min_keysize = DES_KEY_SIZE, | ||
92 | .cia_max_keysize = DES_KEY_SIZE, | ||
93 | .cia_setkey = des_setkey, | ||
94 | .cia_encrypt = des_encrypt, | ||
95 | .cia_decrypt = des_decrypt } } | ||
96 | }; | ||
97 | |||
98 | /* | ||
99 | * RFC2451: | ||
100 | * | ||
101 | * For DES-EDE3, there is no known need to reject weak or | ||
102 | * complementation keys. Any weakness is obviated by the use of | ||
103 | * multiple keys. | ||
104 | * | ||
105 | * However, if the two independent 64-bit keys are equal, | ||
106 | * then the DES3 operation is simply the same as DES. | ||
107 | * Implementers MUST reject keys that exhibit this property. | ||
108 | * | ||
109 | */ | ||
110 | static int | ||
111 | des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) | ||
112 | { | ||
113 | int i, ret; | ||
114 | struct crypt_z990_des3_128_ctx *dctx; | ||
115 | const u8* temp_key = key; | ||
116 | |||
117 | dctx = ctx; | ||
118 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { | ||
119 | |||
120 | *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; | ||
121 | return -EINVAL; | ||
122 | } | ||
123 | for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { | ||
124 | ret = crypto_des_check_key(temp_key, DES_KEY_SIZE, flags); | ||
125 | if (ret < 0) | ||
126 | return ret; | ||
127 | } | ||
128 | memcpy(dctx->key, key, keylen); | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static void | ||
133 | des3_128_encrypt(void *ctx, u8 *dst, const u8 *src) | ||
134 | { | ||
135 | struct crypt_z990_des3_128_ctx *dctx; | ||
136 | |||
137 | dctx = ctx; | ||
138 | crypt_z990_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src, | ||
139 | DES3_128_BLOCK_SIZE); | ||
140 | } | ||
141 | |||
142 | static void | ||
143 | des3_128_decrypt(void *ctx, u8 *dst, const u8 *src) | ||
144 | { | ||
145 | struct crypt_z990_des3_128_ctx *dctx; | ||
146 | |||
147 | dctx = ctx; | ||
148 | crypt_z990_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src, | ||
149 | DES3_128_BLOCK_SIZE); | ||
150 | } | ||
151 | |||
152 | static struct crypto_alg des3_128_alg = { | ||
153 | .cra_name = "des3_ede128", | ||
154 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | ||
155 | .cra_blocksize = DES3_128_BLOCK_SIZE, | ||
156 | .cra_ctxsize = sizeof(struct crypt_z990_des3_128_ctx), | ||
157 | .cra_module = THIS_MODULE, | ||
158 | .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list), | ||
159 | .cra_u = { .cipher = { | ||
160 | .cia_min_keysize = DES3_128_KEY_SIZE, | ||
161 | .cia_max_keysize = DES3_128_KEY_SIZE, | ||
162 | .cia_setkey = des3_128_setkey, | ||
163 | .cia_encrypt = des3_128_encrypt, | ||
164 | .cia_decrypt = des3_128_decrypt } } | ||
165 | }; | ||
166 | |||
167 | /* | ||
168 | * RFC2451: | ||
169 | * | ||
170 | * For DES-EDE3, there is no known need to reject weak or | ||
171 | * complementation keys. Any weakness is obviated by the use of | ||
172 | * multiple keys. | ||
173 | * | ||
174 | * However, if the first two or last two independent 64-bit keys are | ||
175 | * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the | ||
176 | * same as DES. Implementers MUST reject keys that exhibit this | ||
177 | * property. | ||
178 | * | ||
179 | */ | ||
180 | static int | ||
181 | des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) | ||
182 | { | ||
183 | int i, ret; | ||
184 | struct crypt_z990_des3_192_ctx *dctx; | ||
185 | const u8* temp_key; | ||
186 | |||
187 | dctx = ctx; | ||
188 | temp_key = key; | ||
189 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && | ||
190 | memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], | ||
191 | DES_KEY_SIZE))) { | ||
192 | |||
193 | *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; | ||
194 | return -EINVAL; | ||
195 | } | ||
196 | for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { | ||
197 | ret = crypto_des_check_key(temp_key, DES_KEY_SIZE, flags); | ||
198 | if (ret < 0){ | ||
199 | return ret; | ||
200 | } | ||
201 | } | ||
202 | memcpy(dctx->key, key, keylen); | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static void | ||
207 | des3_192_encrypt(void *ctx, u8 *dst, const u8 *src) | ||
208 | { | ||
209 | struct crypt_z990_des3_192_ctx *dctx; | ||
210 | |||
211 | dctx = ctx; | ||
212 | crypt_z990_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src, | ||
213 | DES3_192_BLOCK_SIZE); | ||
214 | } | ||
215 | |||
216 | static void | ||
217 | des3_192_decrypt(void *ctx, u8 *dst, const u8 *src) | ||
218 | { | ||
219 | struct crypt_z990_des3_192_ctx *dctx; | ||
220 | |||
221 | dctx = ctx; | ||
222 | crypt_z990_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src, | ||
223 | DES3_192_BLOCK_SIZE); | ||
224 | } | ||
225 | |||
226 | static struct crypto_alg des3_192_alg = { | ||
227 | .cra_name = "des3_ede", | ||
228 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | ||
229 | .cra_blocksize = DES3_192_BLOCK_SIZE, | ||
230 | .cra_ctxsize = sizeof(struct crypt_z990_des3_192_ctx), | ||
231 | .cra_module = THIS_MODULE, | ||
232 | .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list), | ||
233 | .cra_u = { .cipher = { | ||
234 | .cia_min_keysize = DES3_192_KEY_SIZE, | ||
235 | .cia_max_keysize = DES3_192_KEY_SIZE, | ||
236 | .cia_setkey = des3_192_setkey, | ||
237 | .cia_encrypt = des3_192_encrypt, | ||
238 | .cia_decrypt = des3_192_decrypt } } | ||
239 | }; | ||
240 | |||
241 | |||
242 | |||
243 | static int | ||
244 | init(void) | ||
245 | { | ||
246 | int ret; | ||
247 | |||
248 | if (!crypt_z990_func_available(KM_DEA_ENCRYPT) || | ||
249 | !crypt_z990_func_available(KM_TDEA_128_ENCRYPT) || | ||
250 | !crypt_z990_func_available(KM_TDEA_192_ENCRYPT)){ | ||
251 | return -ENOSYS; | ||
252 | } | ||
253 | |||
254 | ret = 0; | ||
255 | ret |= (crypto_register_alg(&des_alg) == 0)? 0:1; | ||
256 | ret |= (crypto_register_alg(&des3_128_alg) == 0)? 0:2; | ||
257 | ret |= (crypto_register_alg(&des3_192_alg) == 0)? 0:4; | ||
258 | if (ret){ | ||
259 | crypto_unregister_alg(&des3_192_alg); | ||
260 | crypto_unregister_alg(&des3_128_alg); | ||
261 | crypto_unregister_alg(&des_alg); | ||
262 | return -EEXIST; | ||
263 | } | ||
264 | |||
265 | printk(KERN_INFO "crypt_z990: des_z990 loaded.\n"); | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static void __exit | ||
270 | fini(void) | ||
271 | { | ||
272 | crypto_unregister_alg(&des3_192_alg); | ||
273 | crypto_unregister_alg(&des3_128_alg); | ||
274 | crypto_unregister_alg(&des_alg); | ||
275 | } | ||
276 | |||
277 | module_init(init); | ||
278 | module_exit(fini); | ||
279 | |||
280 | MODULE_ALIAS("des"); | ||
281 | MODULE_ALIAS("des3_ede"); | ||
282 | |||
283 | MODULE_LICENSE("GPL"); | ||
284 | MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); | ||
diff --git a/arch/s390/crypto/sha1_z990.c b/arch/s390/crypto/sha1_z990.c new file mode 100644 index 000000000000..298174ddf5b1 --- /dev/null +++ b/arch/s390/crypto/sha1_z990.c | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * z990 implementation of the SHA1 Secure Hash Algorithm. | ||
5 | * | ||
6 | * Derived from cryptoapi implementation, adapted for in-place | ||
7 | * scatterlist interface. Originally based on the public domain | ||
8 | * implementation written by Steve Reid. | ||
9 | * | ||
10 | * s390 Version: | ||
11 | * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation | ||
12 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | ||
13 | * | ||
14 | * Derived from "crypto/sha1.c" | ||
15 | * Copyright (c) Alan Smithee. | ||
16 | * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> | ||
17 | * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or modify it | ||
20 | * under the terms of the GNU General Public License as published by the Free | ||
21 | * Software Foundation; either version 2 of the License, or (at your option) | ||
22 | * any later version. | ||
23 | * | ||
24 | */ | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/crypto.h> | ||
29 | #include <asm/scatterlist.h> | ||
30 | #include <asm/byteorder.h> | ||
31 | #include "crypt_z990.h" | ||
32 | |||
33 | #define SHA1_DIGEST_SIZE 20 | ||
34 | #define SHA1_BLOCK_SIZE 64 | ||
35 | |||
36 | struct crypt_z990_sha1_ctx { | ||
37 | u64 count; | ||
38 | u32 state[5]; | ||
39 | u32 buf_len; | ||
40 | u8 buffer[2 * SHA1_BLOCK_SIZE]; | ||
41 | }; | ||
42 | |||
43 | static void | ||
44 | sha1_init(void *ctx) | ||
45 | { | ||
46 | static const struct crypt_z990_sha1_ctx initstate = { | ||
47 | .state = { | ||
48 | 0x67452301, | ||
49 | 0xEFCDAB89, | ||
50 | 0x98BADCFE, | ||
51 | 0x10325476, | ||
52 | 0xC3D2E1F0 | ||
53 | }, | ||
54 | }; | ||
55 | memcpy(ctx, &initstate, sizeof(initstate)); | ||
56 | } | ||
57 | |||
58 | static void | ||
59 | sha1_update(void *ctx, const u8 *data, unsigned int len) | ||
60 | { | ||
61 | struct crypt_z990_sha1_ctx *sctx; | ||
62 | long imd_len; | ||
63 | |||
64 | sctx = ctx; | ||
65 | sctx->count += len * 8; //message bit length | ||
66 | |||
67 | //anything in buffer yet? -> must be completed | ||
68 | if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { | ||
69 | //complete full block and hash | ||
70 | memcpy(sctx->buffer + sctx->buf_len, data, | ||
71 | SHA1_BLOCK_SIZE - sctx->buf_len); | ||
72 | crypt_z990_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, | ||
73 | SHA1_BLOCK_SIZE); | ||
74 | data += SHA1_BLOCK_SIZE - sctx->buf_len; | ||
75 | len -= SHA1_BLOCK_SIZE - sctx->buf_len; | ||
76 | sctx->buf_len = 0; | ||
77 | } | ||
78 | |||
79 | //rest of data contains full blocks? | ||
80 | imd_len = len & ~0x3ful; | ||
81 | if (imd_len){ | ||
82 | crypt_z990_kimd(KIMD_SHA_1, sctx->state, data, imd_len); | ||
83 | data += imd_len; | ||
84 | len -= imd_len; | ||
85 | } | ||
86 | //anything left? store in buffer | ||
87 | if (len){ | ||
88 | memcpy(sctx->buffer + sctx->buf_len , data, len); | ||
89 | sctx->buf_len += len; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | |||
94 | static void | ||
95 | pad_message(struct crypt_z990_sha1_ctx* sctx) | ||
96 | { | ||
97 | int index; | ||
98 | |||
99 | index = sctx->buf_len; | ||
100 | sctx->buf_len = (sctx->buf_len < 56)? | ||
101 | SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE; | ||
102 | //start pad with 1 | ||
103 | sctx->buffer[index] = 0x80; | ||
104 | //pad with zeros | ||
105 | index++; | ||
106 | memset(sctx->buffer + index, 0x00, sctx->buf_len - index); | ||
107 | //append length | ||
108 | memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count, | ||
109 | sizeof sctx->count); | ||
110 | } | ||
111 | |||
112 | /* Add padding and return the message digest. */ | ||
113 | static void | ||
114 | sha1_final(void* ctx, u8 *out) | ||
115 | { | ||
116 | struct crypt_z990_sha1_ctx *sctx = ctx; | ||
117 | |||
118 | //must perform manual padding | ||
119 | pad_message(sctx); | ||
120 | crypt_z990_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); | ||
121 | //copy digest to out | ||
122 | memcpy(out, sctx->state, SHA1_DIGEST_SIZE); | ||
123 | /* Wipe context */ | ||
124 | memset(sctx, 0, sizeof *sctx); | ||
125 | } | ||
126 | |||
127 | static struct crypto_alg alg = { | ||
128 | .cra_name = "sha1", | ||
129 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST, | ||
130 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
131 | .cra_ctxsize = sizeof(struct crypt_z990_sha1_ctx), | ||
132 | .cra_module = THIS_MODULE, | ||
133 | .cra_list = LIST_HEAD_INIT(alg.cra_list), | ||
134 | .cra_u = { .digest = { | ||
135 | .dia_digestsize = SHA1_DIGEST_SIZE, | ||
136 | .dia_init = sha1_init, | ||
137 | .dia_update = sha1_update, | ||
138 | .dia_final = sha1_final } } | ||
139 | }; | ||
140 | |||
141 | static int | ||
142 | init(void) | ||
143 | { | ||
144 | int ret = -ENOSYS; | ||
145 | |||
146 | if (crypt_z990_func_available(KIMD_SHA_1)){ | ||
147 | ret = crypto_register_alg(&alg); | ||
148 | if (ret == 0){ | ||
149 | printk(KERN_INFO "crypt_z990: sha1_z990 loaded.\n"); | ||
150 | } | ||
151 | } | ||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | static void __exit | ||
156 | fini(void) | ||
157 | { | ||
158 | crypto_unregister_alg(&alg); | ||
159 | } | ||
160 | |||
161 | module_init(init); | ||
162 | module_exit(fini); | ||
163 | |||
164 | MODULE_ALIAS("sha1"); | ||
165 | |||
166 | MODULE_LICENSE("GPL"); | ||
167 | MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); | ||
diff --git a/arch/s390/defconfig b/arch/s390/defconfig new file mode 100644 index 000000000000..1358b4201701 --- /dev/null +++ b/arch/s390/defconfig | |||
@@ -0,0 +1,589 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.11 | ||
4 | # Wed Mar 2 16:57:55 2005 | ||
5 | # | ||
6 | CONFIG_MMU=y | ||
7 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | ||
8 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
9 | CONFIG_ARCH_S390=y | ||
10 | CONFIG_UID16=y | ||
11 | |||
12 | # | ||
13 | # Code maturity level options | ||
14 | # | ||
15 | CONFIG_EXPERIMENTAL=y | ||
16 | CONFIG_CLEAN_COMPILE=y | ||
17 | CONFIG_LOCK_KERNEL=y | ||
18 | |||
19 | # | ||
20 | # General setup | ||
21 | # | ||
22 | CONFIG_LOCALVERSION="" | ||
23 | CONFIG_SWAP=y | ||
24 | CONFIG_SYSVIPC=y | ||
25 | # CONFIG_POSIX_MQUEUE is not set | ||
26 | # CONFIG_BSD_PROCESS_ACCT is not set | ||
27 | CONFIG_SYSCTL=y | ||
28 | # CONFIG_AUDIT is not set | ||
29 | CONFIG_LOG_BUF_SHIFT=17 | ||
30 | CONFIG_HOTPLUG=y | ||
31 | CONFIG_KOBJECT_UEVENT=y | ||
32 | CONFIG_IKCONFIG=y | ||
33 | CONFIG_IKCONFIG_PROC=y | ||
34 | # CONFIG_EMBEDDED is not set | ||
35 | CONFIG_KALLSYMS=y | ||
36 | # CONFIG_KALLSYMS_ALL is not set | ||
37 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | ||
38 | CONFIG_FUTEX=y | ||
39 | CONFIG_EPOLL=y | ||
40 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
41 | CONFIG_SHMEM=y | ||
42 | CONFIG_CC_ALIGN_FUNCTIONS=0 | ||
43 | CONFIG_CC_ALIGN_LABELS=0 | ||
44 | CONFIG_CC_ALIGN_LOOPS=0 | ||
45 | CONFIG_CC_ALIGN_JUMPS=0 | ||
46 | # CONFIG_TINY_SHMEM is not set | ||
47 | |||
48 | # | ||
49 | # Loadable module support | ||
50 | # | ||
51 | CONFIG_MODULES=y | ||
52 | # CONFIG_MODULE_UNLOAD is not set | ||
53 | CONFIG_OBSOLETE_MODPARM=y | ||
54 | # CONFIG_MODVERSIONS is not set | ||
55 | # CONFIG_MODULE_SRCVERSION_ALL is not set | ||
56 | CONFIG_KMOD=y | ||
57 | CONFIG_STOP_MACHINE=y | ||
58 | |||
59 | # | ||
60 | # Base setup | ||
61 | # | ||
62 | |||
63 | # | ||
64 | # Processor type and features | ||
65 | # | ||
66 | # CONFIG_ARCH_S390X is not set | ||
67 | # CONFIG_64BIT is not set | ||
68 | CONFIG_ARCH_S390_31=y | ||
69 | CONFIG_SMP=y | ||
70 | CONFIG_NR_CPUS=32 | ||
71 | CONFIG_HOTPLUG_CPU=y | ||
72 | CONFIG_MATHEMU=y | ||
73 | |||
74 | # | ||
75 | # Code generation options | ||
76 | # | ||
77 | CONFIG_MARCH_G5=y | ||
78 | # CONFIG_MARCH_Z900 is not set | ||
79 | # CONFIG_MARCH_Z990 is not set | ||
80 | CONFIG_PACK_STACK=y | ||
81 | # CONFIG_SMALL_STACK is not set | ||
82 | # CONFIG_CHECK_STACK is not set | ||
83 | # CONFIG_WARN_STACK is not set | ||
84 | |||
85 | # | ||
86 | # I/O subsystem configuration | ||
87 | # | ||
88 | CONFIG_MACHCHK_WARNING=y | ||
89 | CONFIG_QDIO=y | ||
90 | # CONFIG_QDIO_PERF_STATS is not set | ||
91 | # CONFIG_QDIO_DEBUG is not set | ||
92 | |||
93 | # | ||
94 | # Misc | ||
95 | # | ||
96 | # CONFIG_PREEMPT is not set | ||
97 | CONFIG_IPL=y | ||
98 | # CONFIG_IPL_TAPE is not set | ||
99 | CONFIG_IPL_VM=y | ||
100 | CONFIG_BINFMT_ELF=y | ||
101 | CONFIG_BINFMT_MISC=m | ||
102 | # CONFIG_PROCESS_DEBUG is not set | ||
103 | CONFIG_PFAULT=y | ||
104 | # CONFIG_SHARED_KERNEL is not set | ||
105 | # CONFIG_CMM is not set | ||
106 | # CONFIG_VIRT_TIMER is not set | ||
107 | CONFIG_NO_IDLE_HZ=y | ||
108 | CONFIG_NO_IDLE_HZ_INIT=y | ||
109 | # CONFIG_PCMCIA is not set | ||
110 | |||
111 | # | ||
112 | # Generic Driver Options | ||
113 | # | ||
114 | CONFIG_STANDALONE=y | ||
115 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
116 | # CONFIG_FW_LOADER is not set | ||
117 | # CONFIG_DEBUG_DRIVER is not set | ||
118 | |||
119 | # | ||
120 | # SCSI device support | ||
121 | # | ||
122 | CONFIG_SCSI=y | ||
123 | CONFIG_SCSI_PROC_FS=y | ||
124 | |||
125 | # | ||
126 | # SCSI support type (disk, tape, CD-ROM) | ||
127 | # | ||
128 | CONFIG_BLK_DEV_SD=y | ||
129 | CONFIG_CHR_DEV_ST=y | ||
130 | # CONFIG_CHR_DEV_OSST is not set | ||
131 | CONFIG_BLK_DEV_SR=y | ||
132 | CONFIG_BLK_DEV_SR_VENDOR=y | ||
133 | CONFIG_CHR_DEV_SG=y | ||
134 | |||
135 | # | ||
136 | # Some SCSI devices (e.g. CD jukebox) support multiple LUNs | ||
137 | # | ||
138 | CONFIG_SCSI_MULTI_LUN=y | ||
139 | CONFIG_SCSI_CONSTANTS=y | ||
140 | CONFIG_SCSI_LOGGING=y | ||
141 | |||
142 | # | ||
143 | # SCSI Transport Attributes | ||
144 | # | ||
145 | # CONFIG_SCSI_SPI_ATTRS is not set | ||
146 | CONFIG_SCSI_FC_ATTRS=y | ||
147 | # CONFIG_SCSI_ISCSI_ATTRS is not set | ||
148 | |||
149 | # | ||
150 | # SCSI low-level drivers | ||
151 | # | ||
152 | # CONFIG_SCSI_SATA is not set | ||
153 | # CONFIG_SCSI_DEBUG is not set | ||
154 | CONFIG_ZFCP=y | ||
155 | CONFIG_CCW=y | ||
156 | |||
157 | # | ||
158 | # Block devices | ||
159 | # | ||
160 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
161 | CONFIG_BLK_DEV_LOOP=m | ||
162 | # CONFIG_BLK_DEV_CRYPTOLOOP is not set | ||
163 | CONFIG_BLK_DEV_NBD=m | ||
164 | CONFIG_BLK_DEV_RAM=y | ||
165 | CONFIG_BLK_DEV_RAM_COUNT=16 | ||
166 | CONFIG_BLK_DEV_RAM_SIZE=4096 | ||
167 | CONFIG_BLK_DEV_INITRD=y | ||
168 | CONFIG_INITRAMFS_SOURCE="" | ||
169 | # CONFIG_LBD is not set | ||
170 | # CONFIG_CDROM_PKTCDVD is not set | ||
171 | |||
172 | # | ||
173 | # S/390 block device drivers | ||
174 | # | ||
175 | CONFIG_BLK_DEV_XPRAM=m | ||
176 | # CONFIG_DCSSBLK is not set | ||
177 | CONFIG_DASD=y | ||
178 | CONFIG_DASD_PROFILE=y | ||
179 | CONFIG_DASD_ECKD=y | ||
180 | CONFIG_DASD_FBA=y | ||
181 | CONFIG_DASD_DIAG=y | ||
182 | # CONFIG_DASD_CMB is not set | ||
183 | |||
184 | # | ||
185 | # IO Schedulers | ||
186 | # | ||
187 | CONFIG_IOSCHED_NOOP=y | ||
188 | CONFIG_IOSCHED_AS=y | ||
189 | CONFIG_IOSCHED_DEADLINE=y | ||
190 | CONFIG_IOSCHED_CFQ=y | ||
191 | # CONFIG_ATA_OVER_ETH is not set | ||
192 | |||
193 | # | ||
194 | # Multi-device support (RAID and LVM) | ||
195 | # | ||
196 | CONFIG_MD=y | ||
197 | CONFIG_BLK_DEV_MD=y | ||
198 | CONFIG_MD_LINEAR=m | ||
199 | CONFIG_MD_RAID0=m | ||
200 | CONFIG_MD_RAID1=m | ||
201 | # CONFIG_MD_RAID10 is not set | ||
202 | CONFIG_MD_RAID5=m | ||
203 | # CONFIG_MD_RAID6 is not set | ||
204 | CONFIG_MD_MULTIPATH=m | ||
205 | # CONFIG_MD_FAULTY is not set | ||
206 | # CONFIG_BLK_DEV_DM is not set | ||
207 | |||
208 | # | ||
209 | # Character device drivers | ||
210 | # | ||
211 | CONFIG_UNIX98_PTYS=y | ||
212 | CONFIG_UNIX98_PTY_COUNT=2048 | ||
213 | |||
214 | # | ||
215 | # Watchdog Cards | ||
216 | # | ||
217 | # CONFIG_WATCHDOG is not set | ||
218 | |||
219 | # | ||
220 | # S/390 character device drivers | ||
221 | # | ||
222 | CONFIG_TN3270=y | ||
223 | CONFIG_TN3270_TTY=y | ||
224 | CONFIG_TN3270_FS=m | ||
225 | CONFIG_TN3270_CONSOLE=y | ||
226 | CONFIG_TN3215=y | ||
227 | CONFIG_TN3215_CONSOLE=y | ||
228 | CONFIG_CCW_CONSOLE=y | ||
229 | CONFIG_SCLP=y | ||
230 | CONFIG_SCLP_TTY=y | ||
231 | CONFIG_SCLP_CONSOLE=y | ||
232 | # CONFIG_SCLP_VT220_TTY is not set | ||
233 | CONFIG_SCLP_CPI=m | ||
234 | CONFIG_S390_TAPE=m | ||
235 | |||
236 | # | ||
237 | # S/390 tape interface support | ||
238 | # | ||
239 | CONFIG_S390_TAPE_BLOCK=y | ||
240 | |||
241 | # | ||
242 | # S/390 tape hardware support | ||
243 | # | ||
244 | CONFIG_S390_TAPE_34XX=m | ||
245 | # CONFIG_VMLOGRDR is not set | ||
246 | # CONFIG_MONREADER is not set | ||
247 | # CONFIG_DCSS_SHM is not set | ||
248 | |||
249 | # | ||
250 | # Cryptographic devices | ||
251 | # | ||
252 | CONFIG_Z90CRYPT=m | ||
253 | |||
254 | # | ||
255 | # Networking support | ||
256 | # | ||
257 | CONFIG_NET=y | ||
258 | |||
259 | # | ||
260 | # Networking options | ||
261 | # | ||
262 | CONFIG_PACKET=y | ||
263 | # CONFIG_PACKET_MMAP is not set | ||
264 | # CONFIG_NETLINK_DEV is not set | ||
265 | CONFIG_UNIX=y | ||
266 | CONFIG_NET_KEY=y | ||
267 | CONFIG_INET=y | ||
268 | CONFIG_IP_MULTICAST=y | ||
269 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
270 | # CONFIG_IP_PNP is not set | ||
271 | # CONFIG_NET_IPIP is not set | ||
272 | # CONFIG_NET_IPGRE is not set | ||
273 | # CONFIG_IP_MROUTE is not set | ||
274 | # CONFIG_ARPD is not set | ||
275 | # CONFIG_SYN_COOKIES is not set | ||
276 | # CONFIG_INET_AH is not set | ||
277 | # CONFIG_INET_ESP is not set | ||
278 | # CONFIG_INET_IPCOMP is not set | ||
279 | # CONFIG_INET_TUNNEL is not set | ||
280 | CONFIG_IP_TCPDIAG=y | ||
281 | CONFIG_IP_TCPDIAG_IPV6=y | ||
282 | CONFIG_IPV6=y | ||
283 | # CONFIG_IPV6_PRIVACY is not set | ||
284 | # CONFIG_INET6_AH is not set | ||
285 | # CONFIG_INET6_ESP is not set | ||
286 | # CONFIG_INET6_IPCOMP is not set | ||
287 | # CONFIG_INET6_TUNNEL is not set | ||
288 | # CONFIG_IPV6_TUNNEL is not set | ||
289 | # CONFIG_NETFILTER is not set | ||
290 | CONFIG_XFRM=y | ||
291 | # CONFIG_XFRM_USER is not set | ||
292 | |||
293 | # | ||
294 | # SCTP Configuration (EXPERIMENTAL) | ||
295 | # | ||
296 | # CONFIG_IP_SCTP is not set | ||
297 | # CONFIG_ATM is not set | ||
298 | # CONFIG_BRIDGE is not set | ||
299 | # CONFIG_VLAN_8021Q is not set | ||
300 | # CONFIG_DECNET is not set | ||
301 | # CONFIG_LLC2 is not set | ||
302 | # CONFIG_IPX is not set | ||
303 | # CONFIG_ATALK is not set | ||
304 | # CONFIG_X25 is not set | ||
305 | # CONFIG_LAPB is not set | ||
306 | # CONFIG_NET_DIVERT is not set | ||
307 | # CONFIG_ECONET is not set | ||
308 | # CONFIG_WAN_ROUTER is not set | ||
309 | |||
310 | # | ||
311 | # QoS and/or fair queueing | ||
312 | # | ||
313 | CONFIG_NET_SCHED=y | ||
314 | CONFIG_NET_SCH_CLK_JIFFIES=y | ||
315 | # CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set | ||
316 | # CONFIG_NET_SCH_CLK_CPU is not set | ||
317 | CONFIG_NET_SCH_CBQ=m | ||
318 | # CONFIG_NET_SCH_HTB is not set | ||
319 | # CONFIG_NET_SCH_HFSC is not set | ||
320 | CONFIG_NET_SCH_PRIO=m | ||
321 | CONFIG_NET_SCH_RED=m | ||
322 | CONFIG_NET_SCH_SFQ=m | ||
323 | CONFIG_NET_SCH_TEQL=m | ||
324 | CONFIG_NET_SCH_TBF=m | ||
325 | CONFIG_NET_SCH_GRED=m | ||
326 | CONFIG_NET_SCH_DSMARK=m | ||
327 | # CONFIG_NET_SCH_NETEM is not set | ||
328 | # CONFIG_NET_SCH_INGRESS is not set | ||
329 | CONFIG_NET_QOS=y | ||
330 | CONFIG_NET_ESTIMATOR=y | ||
331 | CONFIG_NET_CLS=y | ||
332 | CONFIG_NET_CLS_TCINDEX=m | ||
333 | CONFIG_NET_CLS_ROUTE4=m | ||
334 | CONFIG_NET_CLS_ROUTE=y | ||
335 | CONFIG_NET_CLS_FW=m | ||
336 | CONFIG_NET_CLS_U32=m | ||
337 | # CONFIG_CLS_U32_PERF is not set | ||
338 | # CONFIG_NET_CLS_IND is not set | ||
339 | CONFIG_NET_CLS_RSVP=m | ||
340 | CONFIG_NET_CLS_RSVP6=m | ||
341 | # CONFIG_NET_CLS_ACT is not set | ||
342 | CONFIG_NET_CLS_POLICE=y | ||
343 | |||
344 | # | ||
345 | # Network testing | ||
346 | # | ||
347 | # CONFIG_NET_PKTGEN is not set | ||
348 | # CONFIG_NETPOLL is not set | ||
349 | # CONFIG_NET_POLL_CONTROLLER is not set | ||
350 | # CONFIG_HAMRADIO is not set | ||
351 | # CONFIG_IRDA is not set | ||
352 | # CONFIG_BT is not set | ||
353 | CONFIG_NETDEVICES=y | ||
354 | CONFIG_DUMMY=m | ||
355 | CONFIG_BONDING=m | ||
356 | CONFIG_EQUALIZER=m | ||
357 | CONFIG_TUN=m | ||
358 | |||
359 | # | ||
360 | # Ethernet (10 or 100Mbit) | ||
361 | # | ||
362 | CONFIG_NET_ETHERNET=y | ||
363 | # CONFIG_MII is not set | ||
364 | |||
365 | # | ||
366 | # Ethernet (1000 Mbit) | ||
367 | # | ||
368 | |||
369 | # | ||
370 | # Ethernet (10000 Mbit) | ||
371 | # | ||
372 | |||
373 | # | ||
374 | # Token Ring devices | ||
375 | # | ||
376 | # CONFIG_TR is not set | ||
377 | |||
378 | # | ||
379 | # Wireless LAN (non-hamradio) | ||
380 | # | ||
381 | # CONFIG_NET_RADIO is not set | ||
382 | |||
383 | # | ||
384 | # Wan interfaces | ||
385 | # | ||
386 | # CONFIG_WAN is not set | ||
387 | |||
388 | # | ||
389 | # S/390 network device drivers | ||
390 | # | ||
391 | CONFIG_LCS=m | ||
392 | CONFIG_CTC=m | ||
393 | CONFIG_IUCV=m | ||
394 | # CONFIG_NETIUCV is not set | ||
395 | # CONFIG_SMSGIUCV is not set | ||
396 | CONFIG_QETH=y | ||
397 | |||
398 | # | ||
399 | # Gigabit Ethernet default settings | ||
400 | # | ||
401 | # CONFIG_QETH_IPV6 is not set | ||
402 | # CONFIG_QETH_PERF_STATS is not set | ||
403 | CONFIG_CCWGROUP=y | ||
404 | # CONFIG_PPP is not set | ||
405 | # CONFIG_SLIP is not set | ||
406 | # CONFIG_SHAPER is not set | ||
407 | # CONFIG_NETCONSOLE is not set | ||
408 | |||
409 | # | ||
410 | # File systems | ||
411 | # | ||
412 | CONFIG_EXT2_FS=y | ||
413 | # CONFIG_EXT2_FS_XATTR is not set | ||
414 | CONFIG_EXT3_FS=y | ||
415 | CONFIG_EXT3_FS_XATTR=y | ||
416 | # CONFIG_EXT3_FS_POSIX_ACL is not set | ||
417 | # CONFIG_EXT3_FS_SECURITY is not set | ||
418 | CONFIG_JBD=y | ||
419 | # CONFIG_JBD_DEBUG is not set | ||
420 | CONFIG_FS_MBCACHE=y | ||
421 | # CONFIG_REISERFS_FS is not set | ||
422 | # CONFIG_JFS_FS is not set | ||
423 | |||
424 | # | ||
425 | # XFS support | ||
426 | # | ||
427 | # CONFIG_XFS_FS is not set | ||
428 | # CONFIG_MINIX_FS is not set | ||
429 | # CONFIG_ROMFS_FS is not set | ||
430 | # CONFIG_QUOTA is not set | ||
431 | CONFIG_DNOTIFY=y | ||
432 | # CONFIG_AUTOFS_FS is not set | ||
433 | # CONFIG_AUTOFS4_FS is not set | ||
434 | |||
435 | # | ||
436 | # CD-ROM/DVD Filesystems | ||
437 | # | ||
438 | # CONFIG_ISO9660_FS is not set | ||
439 | # CONFIG_UDF_FS is not set | ||
440 | |||
441 | # | ||
442 | # DOS/FAT/NT Filesystems | ||
443 | # | ||
444 | # CONFIG_MSDOS_FS is not set | ||
445 | # CONFIG_VFAT_FS is not set | ||
446 | # CONFIG_NTFS_FS is not set | ||
447 | |||
448 | # | ||
449 | # Pseudo filesystems | ||
450 | # | ||
451 | CONFIG_PROC_FS=y | ||
452 | CONFIG_PROC_KCORE=y | ||
453 | CONFIG_SYSFS=y | ||
454 | # CONFIG_DEVFS_FS is not set | ||
455 | # CONFIG_DEVPTS_FS_XATTR is not set | ||
456 | CONFIG_TMPFS=y | ||
457 | # CONFIG_TMPFS_XATTR is not set | ||
458 | # CONFIG_HUGETLB_PAGE is not set | ||
459 | CONFIG_RAMFS=y | ||
460 | |||
461 | # | ||
462 | # Miscellaneous filesystems | ||
463 | # | ||
464 | # CONFIG_ADFS_FS is not set | ||
465 | # CONFIG_AFFS_FS is not set | ||
466 | # CONFIG_HFS_FS is not set | ||
467 | # CONFIG_HFSPLUS_FS is not set | ||
468 | # CONFIG_BEFS_FS is not set | ||
469 | # CONFIG_BFS_FS is not set | ||
470 | # CONFIG_EFS_FS is not set | ||
471 | # CONFIG_CRAMFS is not set | ||
472 | # CONFIG_VXFS_FS is not set | ||
473 | # CONFIG_HPFS_FS is not set | ||
474 | # CONFIG_QNX4FS_FS is not set | ||
475 | # CONFIG_SYSV_FS is not set | ||
476 | # CONFIG_UFS_FS is not set | ||
477 | |||
478 | # | ||
479 | # Network File Systems | ||
480 | # | ||
481 | CONFIG_NFS_FS=y | ||
482 | CONFIG_NFS_V3=y | ||
483 | # CONFIG_NFS_V4 is not set | ||
484 | # CONFIG_NFS_DIRECTIO is not set | ||
485 | CONFIG_NFSD=y | ||
486 | CONFIG_NFSD_V3=y | ||
487 | # CONFIG_NFSD_V4 is not set | ||
488 | CONFIG_NFSD_TCP=y | ||
489 | CONFIG_LOCKD=y | ||
490 | CONFIG_LOCKD_V4=y | ||
491 | CONFIG_EXPORTFS=y | ||
492 | CONFIG_SUNRPC=y | ||
493 | # CONFIG_RPCSEC_GSS_KRB5 is not set | ||
494 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
495 | # CONFIG_SMB_FS is not set | ||
496 | # CONFIG_CIFS is not set | ||
497 | # CONFIG_NCP_FS is not set | ||
498 | # CONFIG_CODA_FS is not set | ||
499 | # CONFIG_AFS_FS is not set | ||
500 | |||
501 | # | ||
502 | # Partition Types | ||
503 | # | ||
504 | CONFIG_PARTITION_ADVANCED=y | ||
505 | # CONFIG_ACORN_PARTITION is not set | ||
506 | # CONFIG_OSF_PARTITION is not set | ||
507 | # CONFIG_AMIGA_PARTITION is not set | ||
508 | # CONFIG_ATARI_PARTITION is not set | ||
509 | CONFIG_IBM_PARTITION=y | ||
510 | # CONFIG_MAC_PARTITION is not set | ||
511 | CONFIG_MSDOS_PARTITION=y | ||
512 | # CONFIG_BSD_DISKLABEL is not set | ||
513 | # CONFIG_MINIX_SUBPARTITION is not set | ||
514 | # CONFIG_SOLARIS_X86_PARTITION is not set | ||
515 | # CONFIG_UNIXWARE_DISKLABEL is not set | ||
516 | # CONFIG_LDM_PARTITION is not set | ||
517 | # CONFIG_SGI_PARTITION is not set | ||
518 | # CONFIG_ULTRIX_PARTITION is not set | ||
519 | # CONFIG_SUN_PARTITION is not set | ||
520 | # CONFIG_EFI_PARTITION is not set | ||
521 | |||
522 | # | ||
523 | # Native Language Support | ||
524 | # | ||
525 | # CONFIG_NLS is not set | ||
526 | |||
527 | # | ||
528 | # Profiling support | ||
529 | # | ||
530 | # CONFIG_PROFILING is not set | ||
531 | |||
532 | # | ||
533 | # Kernel hacking | ||
534 | # | ||
535 | CONFIG_DEBUG_KERNEL=y | ||
536 | CONFIG_MAGIC_SYSRQ=y | ||
537 | # CONFIG_SCHEDSTATS is not set | ||
538 | # CONFIG_DEBUG_SLAB is not set | ||
539 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | ||
540 | # CONFIG_DEBUG_KOBJECT is not set | ||
541 | # CONFIG_DEBUG_INFO is not set | ||
542 | # CONFIG_DEBUG_FS is not set | ||
543 | |||
544 | # | ||
545 | # Security options | ||
546 | # | ||
547 | # CONFIG_KEYS is not set | ||
548 | # CONFIG_SECURITY is not set | ||
549 | |||
550 | # | ||
551 | # Cryptographic options | ||
552 | # | ||
553 | CONFIG_CRYPTO=y | ||
554 | # CONFIG_CRYPTO_HMAC is not set | ||
555 | # CONFIG_CRYPTO_NULL is not set | ||
556 | # CONFIG_CRYPTO_MD4 is not set | ||
557 | # CONFIG_CRYPTO_MD5 is not set | ||
558 | # CONFIG_CRYPTO_SHA1 is not set | ||
559 | # CONFIG_CRYPTO_SHA1_Z990 is not set | ||
560 | # CONFIG_CRYPTO_SHA256 is not set | ||
561 | # CONFIG_CRYPTO_SHA512 is not set | ||
562 | # CONFIG_CRYPTO_WP512 is not set | ||
563 | # CONFIG_CRYPTO_DES is not set | ||
564 | # CONFIG_CRYPTO_DES_Z990 is not set | ||
565 | # CONFIG_CRYPTO_BLOWFISH is not set | ||
566 | # CONFIG_CRYPTO_TWOFISH is not set | ||
567 | # CONFIG_CRYPTO_SERPENT is not set | ||
568 | # CONFIG_CRYPTO_AES is not set | ||
569 | # CONFIG_CRYPTO_CAST5 is not set | ||
570 | # CONFIG_CRYPTO_CAST6 is not set | ||
571 | # CONFIG_CRYPTO_TEA is not set | ||
572 | # CONFIG_CRYPTO_ARC4 is not set | ||
573 | # CONFIG_CRYPTO_KHAZAD is not set | ||
574 | # CONFIG_CRYPTO_ANUBIS is not set | ||
575 | # CONFIG_CRYPTO_DEFLATE is not set | ||
576 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | ||
577 | # CONFIG_CRYPTO_CRC32C is not set | ||
578 | # CONFIG_CRYPTO_TEST is not set | ||
579 | |||
580 | # | ||
581 | # Hardware crypto devices | ||
582 | # | ||
583 | |||
584 | # | ||
585 | # Library routines | ||
586 | # | ||
587 | # CONFIG_CRC_CCITT is not set | ||
588 | CONFIG_CRC32=m | ||
589 | # CONFIG_LIBCRC32C is not set | ||
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile new file mode 100644 index 000000000000..b41e0e199a7c --- /dev/null +++ b/arch/s390/kernel/Makefile | |||
@@ -0,0 +1,31 @@ | |||
1 | # | ||
2 | # Makefile for the linux kernel. | ||
3 | # | ||
4 | |||
5 | EXTRA_AFLAGS := -traditional | ||
6 | |||
7 | obj-y := bitmap.o traps.o time.o process.o \ | ||
8 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | ||
9 | semaphore.o s390_ext.o debug.o profile.o irq.o | ||
10 | |||
11 | extra-$(CONFIG_ARCH_S390_31) += head.o | ||
12 | extra-$(CONFIG_ARCH_S390X) += head64.o | ||
13 | extra-y += init_task.o vmlinux.lds | ||
14 | |||
15 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o | ||
16 | obj-$(CONFIG_SMP) += smp.o | ||
17 | |||
18 | obj-$(CONFIG_S390_SUPPORT) += compat_linux.o compat_signal.o \ | ||
19 | compat_ioctl.o compat_wrapper.o \ | ||
20 | compat_exec_domain.o | ||
21 | obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o | ||
22 | |||
23 | obj-$(CONFIG_ARCH_S390_31) += entry.o reipl.o | ||
24 | obj-$(CONFIG_ARCH_S390X) += entry64.o reipl64.o | ||
25 | |||
26 | obj-$(CONFIG_VIRT_TIMER) += vtime.o | ||
27 | |||
28 | # | ||
29 | # This is just to get the dependencies... | ||
30 | # | ||
31 | binfmt_elf32.o: $(TOPDIR)/fs/binfmt_elf.c | ||
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c new file mode 100644 index 000000000000..3f7018e9dbe4 --- /dev/null +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * Generate definitions needed by assembly language modules. | ||
3 | * This code generates raw asm output which is post-processed to extract | ||
4 | * and format the required data. | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <linux/sched.h> | ||
9 | |||
10 | /* Use marker if you need to separate the values later */ | ||
11 | |||
12 | #define DEFINE(sym, val, marker) \ | ||
13 | asm volatile("\n->" #sym " %0 " #val " " #marker : : "i" (val)) | ||
14 | |||
15 | #define BLANK() asm volatile("\n->" : : ) | ||
16 | |||
17 | int main(void) | ||
18 | { | ||
19 | DEFINE(__THREAD_info, offsetof(struct task_struct, thread_info),); | ||
20 | DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),); | ||
21 | DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),); | ||
22 | DEFINE(__THREAD_mm_segment, | ||
23 | offsetof(struct task_struct, thread.mm_segment),); | ||
24 | BLANK(); | ||
25 | DEFINE(__TASK_pid, offsetof(struct task_struct, pid),); | ||
26 | BLANK(); | ||
27 | DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid),); | ||
28 | DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address),); | ||
29 | DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id),); | ||
30 | BLANK(); | ||
31 | DEFINE(__TI_task, offsetof(struct thread_info, task),); | ||
32 | DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain),); | ||
33 | DEFINE(__TI_flags, offsetof(struct thread_info, flags),); | ||
34 | DEFINE(__TI_cpu, offsetof(struct thread_info, cpu),); | ||
35 | DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count),); | ||
36 | BLANK(); | ||
37 | DEFINE(__PT_ARGS, offsetof(struct pt_regs, args),); | ||
38 | DEFINE(__PT_PSW, offsetof(struct pt_regs, psw),); | ||
39 | DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs),); | ||
40 | DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2),); | ||
41 | DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc),); | ||
42 | DEFINE(__PT_TRAP, offsetof(struct pt_regs, trap),); | ||
43 | DEFINE(__PT_SIZE, sizeof(struct pt_regs),); | ||
44 | BLANK(); | ||
45 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain),); | ||
46 | DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs),); | ||
47 | DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1),); | ||
48 | return 0; | ||
49 | } | ||
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c new file mode 100644 index 000000000000..03ba5893f17b --- /dev/null +++ b/arch/s390/kernel/binfmt_elf32.c | |||
@@ -0,0 +1,210 @@ | |||
1 | /* | ||
2 | * Support for 32-bit Linux for S390 ELF binaries. | ||
3 | * | ||
4 | * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
5 | * Author(s): Gerhard Tonn (ton@de.ibm.com) | ||
6 | * | ||
7 | * Heavily inspired by the 32-bit Sparc compat code which is | ||
8 | * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com) | ||
9 | * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
10 | */ | ||
11 | |||
12 | #define __ASMS390_ELF_H | ||
13 | |||
14 | #include <linux/time.h> | ||
15 | |||
16 | /* | ||
17 | * These are used to set parameters in the core dumps. | ||
18 | */ | ||
19 | #define ELF_CLASS ELFCLASS32 | ||
20 | #define ELF_DATA ELFDATA2MSB | ||
21 | #define ELF_ARCH EM_S390 | ||
22 | |||
23 | /* | ||
24 | * This is used to ensure we don't load something for the wrong architecture. | ||
25 | */ | ||
26 | #define elf_check_arch(x) \ | ||
27 | (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ | ||
28 | && (x)->e_ident[EI_CLASS] == ELF_CLASS) | ||
29 | |||
30 | /* ELF register definitions */ | ||
31 | #define NUM_GPRS 16 | ||
32 | #define NUM_FPRS 16 | ||
33 | #define NUM_ACRS 16 | ||
34 | |||
35 | /* For SVR4/S390 the function pointer to be registered with `atexit` is | ||
36 | passed in R14. */ | ||
37 | #define ELF_PLAT_INIT(_r, load_addr) \ | ||
38 | do { \ | ||
39 | _r->gprs[14] = 0; \ | ||
40 | } while(0) | ||
41 | |||
42 | #define USE_ELF_CORE_DUMP | ||
43 | #define ELF_EXEC_PAGESIZE 4096 | ||
44 | |||
45 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | ||
46 | use of this is to invoke "./ld.so someprog" to test out a new version of | ||
47 | the loader. We need to make sure that it is out of the way of the program | ||
48 | that it will "exec", and that there is sufficient room for the brk. */ | ||
49 | |||
50 | #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) | ||
51 | |||
52 | /* Wow, the "main" arch needs arch dependent functions too.. :) */ | ||
53 | |||
54 | /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is | ||
55 | now struct_user_regs, they are different) */ | ||
56 | |||
57 | #define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs32(regs, &pr_reg); | ||
58 | |||
59 | #define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs32(tsk, regs) | ||
60 | |||
61 | #define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs) | ||
62 | |||
63 | /* This yields a mask that user programs can use to figure out what | ||
64 | instruction set this CPU supports. */ | ||
65 | |||
66 | #define ELF_HWCAP (0) | ||
67 | |||
68 | /* This yields a string that ld.so will use to load implementation | ||
69 | specific libraries for optimization. This is more specific in | ||
70 | intent than poking at uname or /proc/cpuinfo. | ||
71 | |||
72 | For the moment, we have only optimizations for the Intel generations, | ||
73 | but that could change... */ | ||
74 | |||
75 | #define ELF_PLATFORM (NULL) | ||
76 | |||
77 | #define SET_PERSONALITY(ex, ibcs2) \ | ||
78 | do { \ | ||
79 | if (ibcs2) \ | ||
80 | set_personality(PER_SVR4); \ | ||
81 | else if (current->personality != PER_LINUX32) \ | ||
82 | set_personality(PER_LINUX); \ | ||
83 | set_thread_flag(TIF_31BIT); \ | ||
84 | } while (0) | ||
85 | |||
86 | #include "compat_linux.h" | ||
87 | |||
88 | typedef _s390_fp_regs32 elf_fpregset_t; | ||
89 | |||
90 | typedef struct | ||
91 | { | ||
92 | |||
93 | _psw_t32 psw; | ||
94 | __u32 gprs[__NUM_GPRS]; | ||
95 | __u32 acrs[__NUM_ACRS]; | ||
96 | __u32 orig_gpr2; | ||
97 | } s390_regs32; | ||
98 | typedef s390_regs32 elf_gregset_t; | ||
99 | |||
100 | static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs) | ||
101 | { | ||
102 | int i; | ||
103 | |||
104 | memcpy(®s->psw.mask, &ptregs->psw.mask, 4); | ||
105 | memcpy(®s->psw.addr, (char *)&ptregs->psw.addr + 4, 4); | ||
106 | for (i = 0; i < NUM_GPRS; i++) | ||
107 | regs->gprs[i] = ptregs->gprs[i]; | ||
108 | save_access_regs(regs->acrs); | ||
109 | regs->orig_gpr2 = ptregs->orig_gpr2; | ||
110 | return 1; | ||
111 | } | ||
112 | |||
113 | static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs) | ||
114 | { | ||
115 | struct pt_regs *ptregs = __KSTK_PTREGS(tsk); | ||
116 | int i; | ||
117 | |||
118 | memcpy(®s->psw.mask, &ptregs->psw.mask, 4); | ||
119 | memcpy(®s->psw.addr, (char *)&ptregs->psw.addr + 4, 4); | ||
120 | for (i = 0; i < NUM_GPRS; i++) | ||
121 | regs->gprs[i] = ptregs->gprs[i]; | ||
122 | memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs)); | ||
123 | regs->orig_gpr2 = ptregs->orig_gpr2; | ||
124 | return 1; | ||
125 | } | ||
126 | |||
127 | static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) | ||
128 | { | ||
129 | if (tsk == current) | ||
130 | save_fp_regs((s390_fp_regs *) fpregs); | ||
131 | else | ||
132 | memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t)); | ||
133 | return 1; | ||
134 | } | ||
135 | |||
136 | #include <asm/processor.h> | ||
137 | #include <linux/module.h> | ||
138 | #include <linux/config.h> | ||
139 | #include <linux/elfcore.h> | ||
140 | #include <linux/binfmts.h> | ||
141 | #include <linux/compat.h> | ||
142 | |||
143 | #define elf_prstatus elf_prstatus32 | ||
144 | struct elf_prstatus32 | ||
145 | { | ||
146 | struct elf_siginfo pr_info; /* Info associated with signal */ | ||
147 | short pr_cursig; /* Current signal */ | ||
148 | u32 pr_sigpend; /* Set of pending signals */ | ||
149 | u32 pr_sighold; /* Set of held signals */ | ||
150 | pid_t pr_pid; | ||
151 | pid_t pr_ppid; | ||
152 | pid_t pr_pgrp; | ||
153 | pid_t pr_sid; | ||
154 | struct compat_timeval pr_utime; /* User time */ | ||
155 | struct compat_timeval pr_stime; /* System time */ | ||
156 | struct compat_timeval pr_cutime; /* Cumulative user time */ | ||
157 | struct compat_timeval pr_cstime; /* Cumulative system time */ | ||
158 | elf_gregset_t pr_reg; /* GP registers */ | ||
159 | int pr_fpvalid; /* True if math co-processor being used. */ | ||
160 | }; | ||
161 | |||
162 | #define elf_prpsinfo elf_prpsinfo32 | ||
163 | struct elf_prpsinfo32 | ||
164 | { | ||
165 | char pr_state; /* numeric process state */ | ||
166 | char pr_sname; /* char for pr_state */ | ||
167 | char pr_zomb; /* zombie */ | ||
168 | char pr_nice; /* nice val */ | ||
169 | u32 pr_flag; /* flags */ | ||
170 | u16 pr_uid; | ||
171 | u16 pr_gid; | ||
172 | pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; | ||
173 | /* Lots missing */ | ||
174 | char pr_fname[16]; /* filename of executable */ | ||
175 | char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ | ||
176 | }; | ||
177 | |||
178 | #include <linux/highuid.h> | ||
179 | |||
180 | #undef NEW_TO_OLD_UID | ||
181 | #undef NEW_TO_OLD_GID | ||
182 | #define NEW_TO_OLD_UID(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid) | ||
183 | #define NEW_TO_OLD_GID(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid) | ||
184 | |||
185 | #define elf_addr_t u32 | ||
186 | /* | ||
187 | #define init_elf_binfmt init_elf32_binfmt | ||
188 | */ | ||
189 | |||
190 | #undef start_thread | ||
191 | #define start_thread start_thread31 | ||
192 | |||
193 | MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries," | ||
194 | " Copyright 2000 IBM Corporation"); | ||
195 | MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>"); | ||
196 | |||
197 | #undef MODULE_DESCRIPTION | ||
198 | #undef MODULE_AUTHOR | ||
199 | |||
200 | #undef cputime_to_timeval | ||
201 | #define cputime_to_timeval cputime_to_compat_timeval | ||
202 | static __inline__ void | ||
203 | cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) | ||
204 | { | ||
205 | value->tv_usec = cputime % 1000000; | ||
206 | value->tv_sec = cputime / 1000000; | ||
207 | } | ||
208 | |||
209 | #include "../../../fs/binfmt_elf.c" | ||
210 | |||
diff --git a/arch/s390/kernel/bitmap.S b/arch/s390/kernel/bitmap.S new file mode 100644 index 000000000000..dfb41f946e23 --- /dev/null +++ b/arch/s390/kernel/bitmap.S | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/bitmap.S | ||
3 | * Bitmaps for set_bit, clear_bit, test_and_set_bit, ... | ||
4 | * See include/asm-s390/{bitops.h|posix_types.h} for details | ||
5 | * | ||
6 | * S390 version | ||
7 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
8 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
9 | */ | ||
10 | |||
11 | .globl _oi_bitmap | ||
12 | _oi_bitmap: | ||
13 | .byte 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80 | ||
14 | |||
15 | .globl _ni_bitmap | ||
16 | _ni_bitmap: | ||
17 | .byte 0xFE,0xFD,0xFB,0xF7,0xEF,0xDF,0xBF,0x7F | ||
18 | |||
19 | .globl _zb_findmap | ||
20 | _zb_findmap: | ||
21 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 | ||
22 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 | ||
23 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 | ||
24 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6 | ||
25 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 | ||
26 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 | ||
27 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 | ||
28 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7 | ||
29 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 | ||
30 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 | ||
31 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 | ||
32 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6 | ||
33 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 | ||
34 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 | ||
35 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 | ||
36 | .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 | ||
37 | |||
38 | .globl _sb_findmap | ||
39 | _sb_findmap: | ||
40 | .byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
41 | .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
42 | .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
43 | .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
44 | .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
45 | .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
46 | .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
47 | .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
48 | .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
49 | .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
50 | .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
51 | .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
52 | .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
53 | .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
54 | .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
55 | .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 | ||
56 | |||
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c new file mode 100644 index 000000000000..71d27c493568 --- /dev/null +++ b/arch/s390/kernel/compat_exec_domain.c | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Support for 32-bit Linux for S390 personality. | ||
3 | * | ||
4 | * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
5 | * Author(s): Gerhard Tonn (ton@de.ibm.com) | ||
6 | * | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/personality.h> | ||
13 | #include <linux/sched.h> | ||
14 | |||
15 | struct exec_domain s390_exec_domain; | ||
16 | |||
17 | static int __init | ||
18 | s390_init (void) | ||
19 | { | ||
20 | s390_exec_domain.name = "Linux/s390"; | ||
21 | s390_exec_domain.handler = NULL; | ||
22 | s390_exec_domain.pers_low = PER_LINUX32; | ||
23 | s390_exec_domain.pers_high = PER_LINUX32; | ||
24 | s390_exec_domain.signal_map = default_exec_domain.signal_map; | ||
25 | s390_exec_domain.signal_invmap = default_exec_domain.signal_invmap; | ||
26 | register_exec_domain(&s390_exec_domain); | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | __initcall(s390_init); | ||
diff --git a/arch/s390/kernel/compat_ioctl.c b/arch/s390/kernel/compat_ioctl.c new file mode 100644 index 000000000000..96571ff7115d --- /dev/null +++ b/arch/s390/kernel/compat_ioctl.c | |||
@@ -0,0 +1,73 @@ | |||
1 | /* | ||
2 | * ioctl32.c: Conversion between 32bit and 64bit native ioctls. | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 2000-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Gerhard Tonn (ton@de.ibm.com) | ||
7 | * Arnd Bergmann (arndb@de.ibm.com) | ||
8 | * | ||
9 | * Original implementation from 32-bit Sparc compat code which is | ||
10 | * Copyright (C) 2000 Silicon Graphics, Inc. | ||
11 | * Written by Ulf Carlsson (ulfc@engr.sgi.com) | ||
12 | */ | ||
13 | |||
14 | #include "compat_linux.h" | ||
15 | #define INCLUDES | ||
16 | #define CODE | ||
17 | #include "../../../fs/compat_ioctl.c" | ||
18 | #include <asm/dasd.h> | ||
19 | #include <asm/tape390.h> | ||
20 | |||
21 | static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd, | ||
22 | unsigned long arg, struct file *f) | ||
23 | { | ||
24 | return sys_ioctl(fd, cmd, (unsigned long)compat_ptr(arg)); | ||
25 | } | ||
26 | |||
27 | static int do_ioctl32_ulong(unsigned int fd, unsigned int cmd, | ||
28 | unsigned long arg, struct file *f) | ||
29 | { | ||
30 | return sys_ioctl(fd, cmd, arg); | ||
31 | } | ||
32 | |||
33 | #define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)do_ioctl32_pointer) | ||
34 | #define ULONG_IOCTL(cmd) HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)do_ioctl32_ulong) | ||
35 | #define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler), NULL }, | ||
36 | |||
37 | struct ioctl_trans ioctl_start[] = { | ||
38 | /* architecture independent ioctls */ | ||
39 | #include <linux/compat_ioctl.h> | ||
40 | #define DECLARES | ||
41 | #include "../../../fs/compat_ioctl.c" | ||
42 | |||
43 | /* s390 only ioctls */ | ||
44 | #if defined(CONFIG_DASD) || defined(CONFIG_DASD_MODULE) | ||
45 | COMPATIBLE_IOCTL(DASDAPIVER) | ||
46 | COMPATIBLE_IOCTL(BIODASDDISABLE) | ||
47 | COMPATIBLE_IOCTL(BIODASDENABLE) | ||
48 | COMPATIBLE_IOCTL(BIODASDRSRV) | ||
49 | COMPATIBLE_IOCTL(BIODASDRLSE) | ||
50 | COMPATIBLE_IOCTL(BIODASDSLCK) | ||
51 | COMPATIBLE_IOCTL(BIODASDINFO) | ||
52 | COMPATIBLE_IOCTL(BIODASDINFO2) | ||
53 | COMPATIBLE_IOCTL(BIODASDFMT) | ||
54 | COMPATIBLE_IOCTL(BIODASDPRRST) | ||
55 | COMPATIBLE_IOCTL(BIODASDQUIESCE) | ||
56 | COMPATIBLE_IOCTL(BIODASDRESUME) | ||
57 | COMPATIBLE_IOCTL(BIODASDPRRD) | ||
58 | COMPATIBLE_IOCTL(BIODASDPSRD) | ||
59 | COMPATIBLE_IOCTL(BIODASDGATTR) | ||
60 | COMPATIBLE_IOCTL(BIODASDSATTR) | ||
61 | |||
62 | #endif | ||
63 | |||
64 | #if defined(CONFIG_S390_TAPE) || defined(CONFIG_S390_TAPE_MODULE) | ||
65 | COMPATIBLE_IOCTL(TAPE390_DISPLAY) | ||
66 | #endif | ||
67 | |||
68 | /* s390 doesn't need handlers here */ | ||
69 | COMPATIBLE_IOCTL(TIOCGSERIAL) | ||
70 | COMPATIBLE_IOCTL(TIOCSSERIAL) | ||
71 | }; | ||
72 | |||
73 | int ioctl_table_size = ARRAY_SIZE(ioctl_start); | ||
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c new file mode 100644 index 000000000000..614056222875 --- /dev/null +++ b/arch/s390/kernel/compat_linux.c | |||
@@ -0,0 +1,1045 @@ | |||
1 | /* | ||
2 | * arch/s390x/kernel/linux32.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * Gerhard Tonn (ton@de.ibm.com) | ||
8 | * Thomas Spatzier (tspat@de.ibm.com) | ||
9 | * | ||
10 | * Conversion between 31bit and 64bit native syscalls. | ||
11 | * | ||
12 | * Heavily inspired by the 32-bit Sparc compat code which is | ||
13 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
14 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/file.h> | ||
25 | #include <linux/signal.h> | ||
26 | #include <linux/resource.h> | ||
27 | #include <linux/times.h> | ||
28 | #include <linux/utsname.h> | ||
29 | #include <linux/timex.h> | ||
30 | #include <linux/smp.h> | ||
31 | #include <linux/smp_lock.h> | ||
32 | #include <linux/sem.h> | ||
33 | #include <linux/msg.h> | ||
34 | #include <linux/shm.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/uio.h> | ||
37 | #include <linux/nfs_fs.h> | ||
38 | #include <linux/quota.h> | ||
39 | #include <linux/module.h> | ||
40 | #include <linux/sunrpc/svc.h> | ||
41 | #include <linux/nfsd/nfsd.h> | ||
42 | #include <linux/nfsd/cache.h> | ||
43 | #include <linux/nfsd/xdr.h> | ||
44 | #include <linux/nfsd/syscall.h> | ||
45 | #include <linux/poll.h> | ||
46 | #include <linux/personality.h> | ||
47 | #include <linux/stat.h> | ||
48 | #include <linux/filter.h> | ||
49 | #include <linux/highmem.h> | ||
50 | #include <linux/highuid.h> | ||
51 | #include <linux/mman.h> | ||
52 | #include <linux/ipv6.h> | ||
53 | #include <linux/in.h> | ||
54 | #include <linux/icmpv6.h> | ||
55 | #include <linux/syscalls.h> | ||
56 | #include <linux/sysctl.h> | ||
57 | #include <linux/binfmts.h> | ||
58 | #include <linux/compat.h> | ||
59 | #include <linux/vfs.h> | ||
60 | #include <linux/ptrace.h> | ||
61 | |||
62 | #include <asm/types.h> | ||
63 | #include <asm/ipc.h> | ||
64 | #include <asm/uaccess.h> | ||
65 | #include <asm/semaphore.h> | ||
66 | |||
67 | #include <net/scm.h> | ||
68 | #include <net/sock.h> | ||
69 | |||
70 | #include "compat_linux.h" | ||
71 | |||
72 | |||
73 | /* For this source file, we want overflow handling. */ | ||
74 | |||
75 | #undef high2lowuid | ||
76 | #undef high2lowgid | ||
77 | #undef low2highuid | ||
78 | #undef low2highgid | ||
79 | #undef SET_UID16 | ||
80 | #undef SET_GID16 | ||
81 | #undef NEW_TO_OLD_UID | ||
82 | #undef NEW_TO_OLD_GID | ||
83 | #undef SET_OLDSTAT_UID | ||
84 | #undef SET_OLDSTAT_GID | ||
85 | #undef SET_STAT_UID | ||
86 | #undef SET_STAT_GID | ||
87 | |||
88 | #define high2lowuid(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid) | ||
89 | #define high2lowgid(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid) | ||
90 | #define low2highuid(uid) ((uid) == (u16)-1) ? (uid_t)-1 : (uid_t)(uid) | ||
91 | #define low2highgid(gid) ((gid) == (u16)-1) ? (gid_t)-1 : (gid_t)(gid) | ||
92 | #define SET_UID16(var, uid) var = high2lowuid(uid) | ||
93 | #define SET_GID16(var, gid) var = high2lowgid(gid) | ||
94 | #define NEW_TO_OLD_UID(uid) high2lowuid(uid) | ||
95 | #define NEW_TO_OLD_GID(gid) high2lowgid(gid) | ||
96 | #define SET_OLDSTAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid) | ||
97 | #define SET_OLDSTAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid) | ||
98 | #define SET_STAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid) | ||
99 | #define SET_STAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid) | ||
100 | |||
101 | asmlinkage long sys32_chown16(const char * filename, u16 user, u16 group) | ||
102 | { | ||
103 | return sys_chown(filename, low2highuid(user), low2highgid(group)); | ||
104 | } | ||
105 | |||
106 | asmlinkage long sys32_lchown16(const char * filename, u16 user, u16 group) | ||
107 | { | ||
108 | return sys_lchown(filename, low2highuid(user), low2highgid(group)); | ||
109 | } | ||
110 | |||
111 | asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group) | ||
112 | { | ||
113 | return sys_fchown(fd, low2highuid(user), low2highgid(group)); | ||
114 | } | ||
115 | |||
116 | asmlinkage long sys32_setregid16(u16 rgid, u16 egid) | ||
117 | { | ||
118 | return sys_setregid(low2highgid(rgid), low2highgid(egid)); | ||
119 | } | ||
120 | |||
121 | asmlinkage long sys32_setgid16(u16 gid) | ||
122 | { | ||
123 | return sys_setgid((gid_t)gid); | ||
124 | } | ||
125 | |||
126 | asmlinkage long sys32_setreuid16(u16 ruid, u16 euid) | ||
127 | { | ||
128 | return sys_setreuid(low2highuid(ruid), low2highuid(euid)); | ||
129 | } | ||
130 | |||
131 | asmlinkage long sys32_setuid16(u16 uid) | ||
132 | { | ||
133 | return sys_setuid((uid_t)uid); | ||
134 | } | ||
135 | |||
136 | asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid) | ||
137 | { | ||
138 | return sys_setresuid(low2highuid(ruid), low2highuid(euid), | ||
139 | low2highuid(suid)); | ||
140 | } | ||
141 | |||
142 | asmlinkage long sys32_getresuid16(u16 *ruid, u16 *euid, u16 *suid) | ||
143 | { | ||
144 | int retval; | ||
145 | |||
146 | if (!(retval = put_user(high2lowuid(current->uid), ruid)) && | ||
147 | !(retval = put_user(high2lowuid(current->euid), euid))) | ||
148 | retval = put_user(high2lowuid(current->suid), suid); | ||
149 | |||
150 | return retval; | ||
151 | } | ||
152 | |||
153 | asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid) | ||
154 | { | ||
155 | return sys_setresgid(low2highgid(rgid), low2highgid(egid), | ||
156 | low2highgid(sgid)); | ||
157 | } | ||
158 | |||
159 | asmlinkage long sys32_getresgid16(u16 *rgid, u16 *egid, u16 *sgid) | ||
160 | { | ||
161 | int retval; | ||
162 | |||
163 | if (!(retval = put_user(high2lowgid(current->gid), rgid)) && | ||
164 | !(retval = put_user(high2lowgid(current->egid), egid))) | ||
165 | retval = put_user(high2lowgid(current->sgid), sgid); | ||
166 | |||
167 | return retval; | ||
168 | } | ||
169 | |||
170 | asmlinkage long sys32_setfsuid16(u16 uid) | ||
171 | { | ||
172 | return sys_setfsuid((uid_t)uid); | ||
173 | } | ||
174 | |||
175 | asmlinkage long sys32_setfsgid16(u16 gid) | ||
176 | { | ||
177 | return sys_setfsgid((gid_t)gid); | ||
178 | } | ||
179 | |||
180 | static int groups16_to_user(u16 *grouplist, struct group_info *group_info) | ||
181 | { | ||
182 | int i; | ||
183 | u16 group; | ||
184 | |||
185 | for (i = 0; i < group_info->ngroups; i++) { | ||
186 | group = (u16)GROUP_AT(group_info, i); | ||
187 | if (put_user(group, grouplist+i)) | ||
188 | return -EFAULT; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | static int groups16_from_user(struct group_info *group_info, u16 *grouplist) | ||
195 | { | ||
196 | int i; | ||
197 | u16 group; | ||
198 | |||
199 | for (i = 0; i < group_info->ngroups; i++) { | ||
200 | if (get_user(group, grouplist+i)) | ||
201 | return -EFAULT; | ||
202 | GROUP_AT(group_info, i) = (gid_t)group; | ||
203 | } | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | asmlinkage long sys32_getgroups16(int gidsetsize, u16 *grouplist) | ||
209 | { | ||
210 | int i; | ||
211 | |||
212 | if (gidsetsize < 0) | ||
213 | return -EINVAL; | ||
214 | |||
215 | get_group_info(current->group_info); | ||
216 | i = current->group_info->ngroups; | ||
217 | if (gidsetsize) { | ||
218 | if (i > gidsetsize) { | ||
219 | i = -EINVAL; | ||
220 | goto out; | ||
221 | } | ||
222 | if (groups16_to_user(grouplist, current->group_info)) { | ||
223 | i = -EFAULT; | ||
224 | goto out; | ||
225 | } | ||
226 | } | ||
227 | out: | ||
228 | put_group_info(current->group_info); | ||
229 | return i; | ||
230 | } | ||
231 | |||
232 | asmlinkage long sys32_setgroups16(int gidsetsize, u16 *grouplist) | ||
233 | { | ||
234 | struct group_info *group_info; | ||
235 | int retval; | ||
236 | |||
237 | if (!capable(CAP_SETGID)) | ||
238 | return -EPERM; | ||
239 | if ((unsigned)gidsetsize > NGROUPS_MAX) | ||
240 | return -EINVAL; | ||
241 | |||
242 | group_info = groups_alloc(gidsetsize); | ||
243 | if (!group_info) | ||
244 | return -ENOMEM; | ||
245 | retval = groups16_from_user(group_info, grouplist); | ||
246 | if (retval) { | ||
247 | put_group_info(group_info); | ||
248 | return retval; | ||
249 | } | ||
250 | |||
251 | retval = set_current_groups(group_info); | ||
252 | put_group_info(group_info); | ||
253 | |||
254 | return retval; | ||
255 | } | ||
256 | |||
257 | asmlinkage long sys32_getuid16(void) | ||
258 | { | ||
259 | return high2lowuid(current->uid); | ||
260 | } | ||
261 | |||
262 | asmlinkage long sys32_geteuid16(void) | ||
263 | { | ||
264 | return high2lowuid(current->euid); | ||
265 | } | ||
266 | |||
267 | asmlinkage long sys32_getgid16(void) | ||
268 | { | ||
269 | return high2lowgid(current->gid); | ||
270 | } | ||
271 | |||
272 | asmlinkage long sys32_getegid16(void) | ||
273 | { | ||
274 | return high2lowgid(current->egid); | ||
275 | } | ||
276 | |||
277 | /* 32-bit timeval and related flotsam. */ | ||
278 | |||
279 | static inline long get_tv32(struct timeval *o, struct compat_timeval *i) | ||
280 | { | ||
281 | return (!access_ok(VERIFY_READ, tv32, sizeof(*tv32)) || | ||
282 | (__get_user(o->tv_sec, &i->tv_sec) || | ||
283 | __get_user(o->tv_usec, &i->tv_usec))); | ||
284 | } | ||
285 | |||
286 | static inline long put_tv32(struct compat_timeval *o, struct timeval *i) | ||
287 | { | ||
288 | return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || | ||
289 | (__put_user(i->tv_sec, &o->tv_sec) || | ||
290 | __put_user(i->tv_usec, &o->tv_usec))); | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation. | ||
295 | * | ||
296 | * This is really horribly ugly. | ||
297 | */ | ||
298 | asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr) | ||
299 | { | ||
300 | if (call >> 16) /* hack for backward compatibility */ | ||
301 | return -EINVAL; | ||
302 | |||
303 | call &= 0xffff; | ||
304 | |||
305 | switch (call) { | ||
306 | case SEMTIMEDOP: | ||
307 | return compat_sys_semtimedop(first, compat_ptr(ptr), | ||
308 | second, compat_ptr(third)); | ||
309 | case SEMOP: | ||
310 | /* struct sembuf is the same on 32 and 64bit :)) */ | ||
311 | return sys_semtimedop(first, compat_ptr(ptr), | ||
312 | second, NULL); | ||
313 | case SEMGET: | ||
314 | return sys_semget(first, second, third); | ||
315 | case SEMCTL: | ||
316 | return compat_sys_semctl(first, second, third, | ||
317 | compat_ptr(ptr)); | ||
318 | case MSGSND: | ||
319 | return compat_sys_msgsnd(first, second, third, | ||
320 | compat_ptr(ptr)); | ||
321 | case MSGRCV: | ||
322 | return compat_sys_msgrcv(first, second, 0, third, | ||
323 | 0, compat_ptr(ptr)); | ||
324 | case MSGGET: | ||
325 | return sys_msgget((key_t) first, second); | ||
326 | case MSGCTL: | ||
327 | return compat_sys_msgctl(first, second, compat_ptr(ptr)); | ||
328 | case SHMAT: | ||
329 | return compat_sys_shmat(first, second, third, | ||
330 | 0, compat_ptr(ptr)); | ||
331 | case SHMDT: | ||
332 | return sys_shmdt(compat_ptr(ptr)); | ||
333 | case SHMGET: | ||
334 | return sys_shmget(first, (unsigned)second, third); | ||
335 | case SHMCTL: | ||
336 | return compat_sys_shmctl(first, second, compat_ptr(ptr)); | ||
337 | } | ||
338 | |||
339 | return -ENOSYS; | ||
340 | } | ||
341 | |||
342 | asmlinkage long sys32_truncate64(const char * path, unsigned long high, unsigned long low) | ||
343 | { | ||
344 | if ((int)high < 0) | ||
345 | return -EINVAL; | ||
346 | else | ||
347 | return sys_truncate(path, (high << 32) | low); | ||
348 | } | ||
349 | |||
350 | asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low) | ||
351 | { | ||
352 | if ((int)high < 0) | ||
353 | return -EINVAL; | ||
354 | else | ||
355 | return sys_ftruncate(fd, (high << 32) | low); | ||
356 | } | ||
357 | |||
358 | int cp_compat_stat(struct kstat *stat, struct compat_stat *statbuf) | ||
359 | { | ||
360 | int err; | ||
361 | |||
362 | if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev)) | ||
363 | return -EOVERFLOW; | ||
364 | |||
365 | err = put_user(old_encode_dev(stat->dev), &statbuf->st_dev); | ||
366 | err |= put_user(stat->ino, &statbuf->st_ino); | ||
367 | err |= put_user(stat->mode, &statbuf->st_mode); | ||
368 | err |= put_user(stat->nlink, &statbuf->st_nlink); | ||
369 | err |= put_user(high2lowuid(stat->uid), &statbuf->st_uid); | ||
370 | err |= put_user(high2lowgid(stat->gid), &statbuf->st_gid); | ||
371 | err |= put_user(old_encode_dev(stat->rdev), &statbuf->st_rdev); | ||
372 | err |= put_user(stat->size, &statbuf->st_size); | ||
373 | err |= put_user(stat->atime.tv_sec, &statbuf->st_atime); | ||
374 | err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec); | ||
375 | err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime); | ||
376 | err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec); | ||
377 | err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime); | ||
378 | err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec); | ||
379 | err |= put_user(stat->blksize, &statbuf->st_blksize); | ||
380 | err |= put_user(stat->blocks, &statbuf->st_blocks); | ||
381 | /* fixme | ||
382 | err |= put_user(0, &statbuf->__unused4[0]); | ||
383 | err |= put_user(0, &statbuf->__unused4[1]); | ||
384 | */ | ||
385 | return err; | ||
386 | } | ||
387 | |||
388 | struct sysinfo32 { | ||
389 | s32 uptime; | ||
390 | u32 loads[3]; | ||
391 | u32 totalram; | ||
392 | u32 freeram; | ||
393 | u32 sharedram; | ||
394 | u32 bufferram; | ||
395 | u32 totalswap; | ||
396 | u32 freeswap; | ||
397 | unsigned short procs; | ||
398 | unsigned short pads; | ||
399 | u32 totalhigh; | ||
400 | u32 freehigh; | ||
401 | unsigned int mem_unit; | ||
402 | char _f[8]; | ||
403 | }; | ||
404 | |||
405 | asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info) | ||
406 | { | ||
407 | struct sysinfo s; | ||
408 | int ret, err; | ||
409 | mm_segment_t old_fs = get_fs (); | ||
410 | |||
411 | set_fs (KERNEL_DS); | ||
412 | ret = sys_sysinfo(&s); | ||
413 | set_fs (old_fs); | ||
414 | err = put_user (s.uptime, &info->uptime); | ||
415 | err |= __put_user (s.loads[0], &info->loads[0]); | ||
416 | err |= __put_user (s.loads[1], &info->loads[1]); | ||
417 | err |= __put_user (s.loads[2], &info->loads[2]); | ||
418 | err |= __put_user (s.totalram, &info->totalram); | ||
419 | err |= __put_user (s.freeram, &info->freeram); | ||
420 | err |= __put_user (s.sharedram, &info->sharedram); | ||
421 | err |= __put_user (s.bufferram, &info->bufferram); | ||
422 | err |= __put_user (s.totalswap, &info->totalswap); | ||
423 | err |= __put_user (s.freeswap, &info->freeswap); | ||
424 | err |= __put_user (s.procs, &info->procs); | ||
425 | err |= __put_user (s.totalhigh, &info->totalhigh); | ||
426 | err |= __put_user (s.freehigh, &info->freehigh); | ||
427 | err |= __put_user (s.mem_unit, &info->mem_unit); | ||
428 | if (err) | ||
429 | return -EFAULT; | ||
430 | return ret; | ||
431 | } | ||
432 | |||
433 | asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid, | ||
434 | struct compat_timespec __user *interval) | ||
435 | { | ||
436 | struct timespec t; | ||
437 | int ret; | ||
438 | mm_segment_t old_fs = get_fs (); | ||
439 | |||
440 | set_fs (KERNEL_DS); | ||
441 | ret = sys_sched_rr_get_interval(pid, &t); | ||
442 | set_fs (old_fs); | ||
443 | if (put_compat_timespec(&t, interval)) | ||
444 | return -EFAULT; | ||
445 | return ret; | ||
446 | } | ||
447 | |||
448 | asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, | ||
449 | compat_sigset_t __user *oset, size_t sigsetsize) | ||
450 | { | ||
451 | sigset_t s; | ||
452 | compat_sigset_t s32; | ||
453 | int ret; | ||
454 | mm_segment_t old_fs = get_fs(); | ||
455 | |||
456 | if (set) { | ||
457 | if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) | ||
458 | return -EFAULT; | ||
459 | switch (_NSIG_WORDS) { | ||
460 | case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); | ||
461 | case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); | ||
462 | case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); | ||
463 | case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); | ||
464 | } | ||
465 | } | ||
466 | set_fs (KERNEL_DS); | ||
467 | ret = sys_rt_sigprocmask(how, set ? &s : NULL, oset ? &s : NULL, sigsetsize); | ||
468 | set_fs (old_fs); | ||
469 | if (ret) return ret; | ||
470 | if (oset) { | ||
471 | switch (_NSIG_WORDS) { | ||
472 | case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; | ||
473 | case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; | ||
474 | case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; | ||
475 | case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; | ||
476 | } | ||
477 | if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) | ||
478 | return -EFAULT; | ||
479 | } | ||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, | ||
484 | size_t sigsetsize) | ||
485 | { | ||
486 | sigset_t s; | ||
487 | compat_sigset_t s32; | ||
488 | int ret; | ||
489 | mm_segment_t old_fs = get_fs(); | ||
490 | |||
491 | set_fs (KERNEL_DS); | ||
492 | ret = sys_rt_sigpending(&s, sigsetsize); | ||
493 | set_fs (old_fs); | ||
494 | if (!ret) { | ||
495 | switch (_NSIG_WORDS) { | ||
496 | case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; | ||
497 | case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; | ||
498 | case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; | ||
499 | case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; | ||
500 | } | ||
501 | if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) | ||
502 | return -EFAULT; | ||
503 | } | ||
504 | return ret; | ||
505 | } | ||
506 | |||
507 | asmlinkage long | ||
508 | sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) | ||
509 | { | ||
510 | siginfo_t info; | ||
511 | int ret; | ||
512 | mm_segment_t old_fs = get_fs(); | ||
513 | |||
514 | if (copy_siginfo_from_user32(&info, uinfo)) | ||
515 | return -EFAULT; | ||
516 | set_fs (KERNEL_DS); | ||
517 | ret = sys_rt_sigqueueinfo(pid, sig, &info); | ||
518 | set_fs (old_fs); | ||
519 | return ret; | ||
520 | } | ||
521 | |||
522 | /* | ||
523 | * sys32_execve() executes a new program after the asm stub has set | ||
524 | * things up for us. This should basically do what I want it to. | ||
525 | */ | ||
526 | asmlinkage long | ||
527 | sys32_execve(struct pt_regs regs) | ||
528 | { | ||
529 | int error; | ||
530 | char * filename; | ||
531 | |||
532 | filename = getname(compat_ptr(regs.orig_gpr2)); | ||
533 | error = PTR_ERR(filename); | ||
534 | if (IS_ERR(filename)) | ||
535 | goto out; | ||
536 | error = compat_do_execve(filename, compat_ptr(regs.gprs[3]), | ||
537 | compat_ptr(regs.gprs[4]), ®s); | ||
538 | if (error == 0) | ||
539 | { | ||
540 | task_lock(current); | ||
541 | current->ptrace &= ~PT_DTRACE; | ||
542 | task_unlock(current); | ||
543 | current->thread.fp_regs.fpc=0; | ||
544 | __asm__ __volatile__ | ||
545 | ("sr 0,0\n\t" | ||
546 | "sfpc 0,0\n\t" | ||
547 | : : :"0"); | ||
548 | } | ||
549 | putname(filename); | ||
550 | out: | ||
551 | return error; | ||
552 | } | ||
553 | |||
554 | |||
555 | #ifdef CONFIG_MODULES | ||
556 | |||
557 | asmlinkage long | ||
558 | sys32_init_module(void __user *umod, unsigned long len, | ||
559 | const char __user *uargs) | ||
560 | { | ||
561 | return sys_init_module(umod, len, uargs); | ||
562 | } | ||
563 | |||
564 | asmlinkage long | ||
565 | sys32_delete_module(const char __user *name_user, unsigned int flags) | ||
566 | { | ||
567 | return sys_delete_module(name_user, flags); | ||
568 | } | ||
569 | |||
570 | #else /* CONFIG_MODULES */ | ||
571 | |||
572 | asmlinkage long | ||
573 | sys32_init_module(void __user *umod, unsigned long len, | ||
574 | const char __user *uargs) | ||
575 | { | ||
576 | return -ENOSYS; | ||
577 | } | ||
578 | |||
579 | asmlinkage long | ||
580 | sys32_delete_module(const char __user *name_user, unsigned int flags) | ||
581 | { | ||
582 | return -ENOSYS; | ||
583 | } | ||
584 | |||
585 | #endif /* CONFIG_MODULES */ | ||
586 | |||
587 | /* Translations due to time_t size differences. Which affects all | ||
588 | sorts of things, like timeval and itimerval. */ | ||
589 | |||
590 | extern struct timezone sys_tz; | ||
591 | |||
592 | asmlinkage long sys32_gettimeofday(struct compat_timeval *tv, struct timezone *tz) | ||
593 | { | ||
594 | if (tv) { | ||
595 | struct timeval ktv; | ||
596 | do_gettimeofday(&ktv); | ||
597 | if (put_tv32(tv, &ktv)) | ||
598 | return -EFAULT; | ||
599 | } | ||
600 | if (tz) { | ||
601 | if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) | ||
602 | return -EFAULT; | ||
603 | } | ||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | static inline long get_ts32(struct timespec *o, struct compat_timeval *i) | ||
608 | { | ||
609 | long usec; | ||
610 | |||
611 | if (!access_ok(VERIFY_READ, i, sizeof(*i))) | ||
612 | return -EFAULT; | ||
613 | if (__get_user(o->tv_sec, &i->tv_sec)) | ||
614 | return -EFAULT; | ||
615 | if (__get_user(usec, &i->tv_usec)) | ||
616 | return -EFAULT; | ||
617 | o->tv_nsec = usec * 1000; | ||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | asmlinkage long sys32_settimeofday(struct compat_timeval *tv, struct timezone *tz) | ||
622 | { | ||
623 | struct timespec kts; | ||
624 | struct timezone ktz; | ||
625 | |||
626 | if (tv) { | ||
627 | if (get_ts32(&kts, tv)) | ||
628 | return -EFAULT; | ||
629 | } | ||
630 | if (tz) { | ||
631 | if (copy_from_user(&ktz, tz, sizeof(ktz))) | ||
632 | return -EFAULT; | ||
633 | } | ||
634 | |||
635 | return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); | ||
636 | } | ||
637 | |||
638 | /* These are here just in case some old sparc32 binary calls it. */ | ||
639 | asmlinkage long sys32_pause(void) | ||
640 | { | ||
641 | current->state = TASK_INTERRUPTIBLE; | ||
642 | schedule(); | ||
643 | return -ERESTARTNOHAND; | ||
644 | } | ||
645 | |||
646 | asmlinkage long sys32_pread64(unsigned int fd, char *ubuf, | ||
647 | size_t count, u32 poshi, u32 poslo) | ||
648 | { | ||
649 | if ((compat_ssize_t) count < 0) | ||
650 | return -EINVAL; | ||
651 | return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); | ||
652 | } | ||
653 | |||
654 | asmlinkage long sys32_pwrite64(unsigned int fd, const char *ubuf, | ||
655 | size_t count, u32 poshi, u32 poslo) | ||
656 | { | ||
657 | if ((compat_ssize_t) count < 0) | ||
658 | return -EINVAL; | ||
659 | return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); | ||
660 | } | ||
661 | |||
662 | asmlinkage compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count) | ||
663 | { | ||
664 | return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count); | ||
665 | } | ||
666 | |||
667 | asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t *offset, size_t count) | ||
668 | { | ||
669 | mm_segment_t old_fs = get_fs(); | ||
670 | int ret; | ||
671 | off_t of; | ||
672 | |||
673 | if (offset && get_user(of, offset)) | ||
674 | return -EFAULT; | ||
675 | |||
676 | set_fs(KERNEL_DS); | ||
677 | ret = sys_sendfile(out_fd, in_fd, offset ? &of : NULL, count); | ||
678 | set_fs(old_fs); | ||
679 | |||
680 | if (!ret && offset && put_user(of, offset)) | ||
681 | return -EFAULT; | ||
682 | |||
683 | return ret; | ||
684 | } | ||
685 | |||
686 | asmlinkage long sys32_sendfile64(int out_fd, int in_fd, | ||
687 | compat_loff_t *offset, s32 count) | ||
688 | { | ||
689 | mm_segment_t old_fs = get_fs(); | ||
690 | int ret; | ||
691 | loff_t lof; | ||
692 | |||
693 | if (offset && get_user(lof, offset)) | ||
694 | return -EFAULT; | ||
695 | |||
696 | set_fs(KERNEL_DS); | ||
697 | ret = sys_sendfile64(out_fd, in_fd, offset ? &lof : NULL, count); | ||
698 | set_fs(old_fs); | ||
699 | |||
700 | if (offset && put_user(lof, offset)) | ||
701 | return -EFAULT; | ||
702 | |||
703 | return ret; | ||
704 | } | ||
705 | |||
706 | /* Handle adjtimex compatibility. */ | ||
707 | |||
708 | struct timex32 { | ||
709 | u32 modes; | ||
710 | s32 offset, freq, maxerror, esterror; | ||
711 | s32 status, constant, precision, tolerance; | ||
712 | struct compat_timeval time; | ||
713 | s32 tick; | ||
714 | s32 ppsfreq, jitter, shift, stabil; | ||
715 | s32 jitcnt, calcnt, errcnt, stbcnt; | ||
716 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
717 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
718 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
719 | }; | ||
720 | |||
721 | extern int do_adjtimex(struct timex *); | ||
722 | |||
723 | asmlinkage long sys32_adjtimex(struct timex32 *utp) | ||
724 | { | ||
725 | struct timex txc; | ||
726 | int ret; | ||
727 | |||
728 | memset(&txc, 0, sizeof(struct timex)); | ||
729 | |||
730 | if(get_user(txc.modes, &utp->modes) || | ||
731 | __get_user(txc.offset, &utp->offset) || | ||
732 | __get_user(txc.freq, &utp->freq) || | ||
733 | __get_user(txc.maxerror, &utp->maxerror) || | ||
734 | __get_user(txc.esterror, &utp->esterror) || | ||
735 | __get_user(txc.status, &utp->status) || | ||
736 | __get_user(txc.constant, &utp->constant) || | ||
737 | __get_user(txc.precision, &utp->precision) || | ||
738 | __get_user(txc.tolerance, &utp->tolerance) || | ||
739 | __get_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
740 | __get_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
741 | __get_user(txc.tick, &utp->tick) || | ||
742 | __get_user(txc.ppsfreq, &utp->ppsfreq) || | ||
743 | __get_user(txc.jitter, &utp->jitter) || | ||
744 | __get_user(txc.shift, &utp->shift) || | ||
745 | __get_user(txc.stabil, &utp->stabil) || | ||
746 | __get_user(txc.jitcnt, &utp->jitcnt) || | ||
747 | __get_user(txc.calcnt, &utp->calcnt) || | ||
748 | __get_user(txc.errcnt, &utp->errcnt) || | ||
749 | __get_user(txc.stbcnt, &utp->stbcnt)) | ||
750 | return -EFAULT; | ||
751 | |||
752 | ret = do_adjtimex(&txc); | ||
753 | |||
754 | if(put_user(txc.modes, &utp->modes) || | ||
755 | __put_user(txc.offset, &utp->offset) || | ||
756 | __put_user(txc.freq, &utp->freq) || | ||
757 | __put_user(txc.maxerror, &utp->maxerror) || | ||
758 | __put_user(txc.esterror, &utp->esterror) || | ||
759 | __put_user(txc.status, &utp->status) || | ||
760 | __put_user(txc.constant, &utp->constant) || | ||
761 | __put_user(txc.precision, &utp->precision) || | ||
762 | __put_user(txc.tolerance, &utp->tolerance) || | ||
763 | __put_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
764 | __put_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
765 | __put_user(txc.tick, &utp->tick) || | ||
766 | __put_user(txc.ppsfreq, &utp->ppsfreq) || | ||
767 | __put_user(txc.jitter, &utp->jitter) || | ||
768 | __put_user(txc.shift, &utp->shift) || | ||
769 | __put_user(txc.stabil, &utp->stabil) || | ||
770 | __put_user(txc.jitcnt, &utp->jitcnt) || | ||
771 | __put_user(txc.calcnt, &utp->calcnt) || | ||
772 | __put_user(txc.errcnt, &utp->errcnt) || | ||
773 | __put_user(txc.stbcnt, &utp->stbcnt)) | ||
774 | ret = -EFAULT; | ||
775 | |||
776 | return ret; | ||
777 | } | ||
778 | |||
779 | #ifdef CONFIG_SYSCTL | ||
780 | struct __sysctl_args32 { | ||
781 | u32 name; | ||
782 | int nlen; | ||
783 | u32 oldval; | ||
784 | u32 oldlenp; | ||
785 | u32 newval; | ||
786 | u32 newlen; | ||
787 | u32 __unused[4]; | ||
788 | }; | ||
789 | |||
790 | asmlinkage long sys32_sysctl(struct __sysctl_args32 *args) | ||
791 | { | ||
792 | struct __sysctl_args32 tmp; | ||
793 | int error; | ||
794 | size_t oldlen, *oldlenp = NULL; | ||
795 | unsigned long addr = (((long)&args->__unused[0]) + 7) & ~7; | ||
796 | |||
797 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
798 | return -EFAULT; | ||
799 | |||
800 | if (tmp.oldval && tmp.oldlenp) { | ||
801 | /* Duh, this is ugly and might not work if sysctl_args | ||
802 | is in read-only memory, but do_sysctl does indirectly | ||
803 | a lot of uaccess in both directions and we'd have to | ||
804 | basically copy the whole sysctl.c here, and | ||
805 | glibc's __sysctl uses rw memory for the structure | ||
806 | anyway. */ | ||
807 | if (get_user(oldlen, (u32 *)A(tmp.oldlenp)) || | ||
808 | put_user(oldlen, (size_t *)addr)) | ||
809 | return -EFAULT; | ||
810 | oldlenp = (size_t *)addr; | ||
811 | } | ||
812 | |||
813 | lock_kernel(); | ||
814 | error = do_sysctl((int *)A(tmp.name), tmp.nlen, (void *)A(tmp.oldval), | ||
815 | oldlenp, (void *)A(tmp.newval), tmp.newlen); | ||
816 | unlock_kernel(); | ||
817 | if (oldlenp) { | ||
818 | if (!error) { | ||
819 | if (get_user(oldlen, (size_t *)addr) || | ||
820 | put_user(oldlen, (u32 *)A(tmp.oldlenp))) | ||
821 | error = -EFAULT; | ||
822 | } | ||
823 | copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)); | ||
824 | } | ||
825 | return error; | ||
826 | } | ||
827 | #endif | ||
828 | |||
829 | struct stat64_emu31 { | ||
830 | unsigned long long st_dev; | ||
831 | unsigned int __pad1; | ||
832 | #define STAT64_HAS_BROKEN_ST_INO 1 | ||
833 | u32 __st_ino; | ||
834 | unsigned int st_mode; | ||
835 | unsigned int st_nlink; | ||
836 | u32 st_uid; | ||
837 | u32 st_gid; | ||
838 | unsigned long long st_rdev; | ||
839 | unsigned int __pad3; | ||
840 | long st_size; | ||
841 | u32 st_blksize; | ||
842 | unsigned char __pad4[4]; | ||
843 | u32 __pad5; /* future possible st_blocks high bits */ | ||
844 | u32 st_blocks; /* Number 512-byte blocks allocated. */ | ||
845 | u32 st_atime; | ||
846 | u32 __pad6; | ||
847 | u32 st_mtime; | ||
848 | u32 __pad7; | ||
849 | u32 st_ctime; | ||
850 | u32 __pad8; /* will be high 32 bits of ctime someday */ | ||
851 | unsigned long st_ino; | ||
852 | }; | ||
853 | |||
854 | static int cp_stat64(struct stat64_emu31 *ubuf, struct kstat *stat) | ||
855 | { | ||
856 | struct stat64_emu31 tmp; | ||
857 | |||
858 | memset(&tmp, 0, sizeof(tmp)); | ||
859 | |||
860 | tmp.st_dev = huge_encode_dev(stat->dev); | ||
861 | tmp.st_ino = stat->ino; | ||
862 | tmp.__st_ino = (u32)stat->ino; | ||
863 | tmp.st_mode = stat->mode; | ||
864 | tmp.st_nlink = (unsigned int)stat->nlink; | ||
865 | tmp.st_uid = stat->uid; | ||
866 | tmp.st_gid = stat->gid; | ||
867 | tmp.st_rdev = huge_encode_dev(stat->rdev); | ||
868 | tmp.st_size = stat->size; | ||
869 | tmp.st_blksize = (u32)stat->blksize; | ||
870 | tmp.st_blocks = (u32)stat->blocks; | ||
871 | tmp.st_atime = (u32)stat->atime.tv_sec; | ||
872 | tmp.st_mtime = (u32)stat->mtime.tv_sec; | ||
873 | tmp.st_ctime = (u32)stat->ctime.tv_sec; | ||
874 | |||
875 | return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; | ||
876 | } | ||
877 | |||
878 | asmlinkage long sys32_stat64(char * filename, struct stat64_emu31 * statbuf) | ||
879 | { | ||
880 | struct kstat stat; | ||
881 | int ret = vfs_stat(filename, &stat); | ||
882 | if (!ret) | ||
883 | ret = cp_stat64(statbuf, &stat); | ||
884 | return ret; | ||
885 | } | ||
886 | |||
887 | asmlinkage long sys32_lstat64(char * filename, struct stat64_emu31 * statbuf) | ||
888 | { | ||
889 | struct kstat stat; | ||
890 | int ret = vfs_lstat(filename, &stat); | ||
891 | if (!ret) | ||
892 | ret = cp_stat64(statbuf, &stat); | ||
893 | return ret; | ||
894 | } | ||
895 | |||
896 | asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 * statbuf) | ||
897 | { | ||
898 | struct kstat stat; | ||
899 | int ret = vfs_fstat(fd, &stat); | ||
900 | if (!ret) | ||
901 | ret = cp_stat64(statbuf, &stat); | ||
902 | return ret; | ||
903 | } | ||
904 | |||
905 | /* | ||
906 | * Linux/i386 didn't use to be able to handle more than | ||
907 | * 4 system call parameters, so these system calls used a memory | ||
908 | * block for parameter passing.. | ||
909 | */ | ||
910 | |||
911 | struct mmap_arg_struct_emu31 { | ||
912 | u32 addr; | ||
913 | u32 len; | ||
914 | u32 prot; | ||
915 | u32 flags; | ||
916 | u32 fd; | ||
917 | u32 offset; | ||
918 | }; | ||
919 | |||
920 | /* common code for old and new mmaps */ | ||
921 | static inline long do_mmap2( | ||
922 | unsigned long addr, unsigned long len, | ||
923 | unsigned long prot, unsigned long flags, | ||
924 | unsigned long fd, unsigned long pgoff) | ||
925 | { | ||
926 | struct file * file = NULL; | ||
927 | unsigned long error = -EBADF; | ||
928 | |||
929 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
930 | if (!(flags & MAP_ANONYMOUS)) { | ||
931 | file = fget(fd); | ||
932 | if (!file) | ||
933 | goto out; | ||
934 | } | ||
935 | |||
936 | down_write(¤t->mm->mmap_sem); | ||
937 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
938 | if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) { | ||
939 | /* Result is out of bounds. */ | ||
940 | do_munmap(current->mm, addr, len); | ||
941 | error = -ENOMEM; | ||
942 | } | ||
943 | up_write(¤t->mm->mmap_sem); | ||
944 | |||
945 | if (file) | ||
946 | fput(file); | ||
947 | out: | ||
948 | return error; | ||
949 | } | ||
950 | |||
951 | |||
952 | asmlinkage unsigned long | ||
953 | old32_mmap(struct mmap_arg_struct_emu31 *arg) | ||
954 | { | ||
955 | struct mmap_arg_struct_emu31 a; | ||
956 | int error = -EFAULT; | ||
957 | |||
958 | if (copy_from_user(&a, arg, sizeof(a))) | ||
959 | goto out; | ||
960 | |||
961 | error = -EINVAL; | ||
962 | if (a.offset & ~PAGE_MASK) | ||
963 | goto out; | ||
964 | |||
965 | error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | ||
966 | out: | ||
967 | return error; | ||
968 | } | ||
969 | |||
970 | asmlinkage long | ||
971 | sys32_mmap2(struct mmap_arg_struct_emu31 *arg) | ||
972 | { | ||
973 | struct mmap_arg_struct_emu31 a; | ||
974 | int error = -EFAULT; | ||
975 | |||
976 | if (copy_from_user(&a, arg, sizeof(a))) | ||
977 | goto out; | ||
978 | error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); | ||
979 | out: | ||
980 | return error; | ||
981 | } | ||
982 | |||
983 | asmlinkage long sys32_read(unsigned int fd, char * buf, size_t count) | ||
984 | { | ||
985 | if ((compat_ssize_t) count < 0) | ||
986 | return -EINVAL; | ||
987 | |||
988 | return sys_read(fd, buf, count); | ||
989 | } | ||
990 | |||
991 | asmlinkage long sys32_write(unsigned int fd, char * buf, size_t count) | ||
992 | { | ||
993 | if ((compat_ssize_t) count < 0) | ||
994 | return -EINVAL; | ||
995 | |||
996 | return sys_write(fd, buf, count); | ||
997 | } | ||
998 | |||
999 | asmlinkage long sys32_clone(struct pt_regs regs) | ||
1000 | { | ||
1001 | unsigned long clone_flags; | ||
1002 | unsigned long newsp; | ||
1003 | int *parent_tidptr, *child_tidptr; | ||
1004 | |||
1005 | clone_flags = regs.gprs[3] & 0xffffffffUL; | ||
1006 | newsp = regs.orig_gpr2 & 0x7fffffffUL; | ||
1007 | parent_tidptr = (int *) (regs.gprs[4] & 0x7fffffffUL); | ||
1008 | child_tidptr = (int *) (regs.gprs[5] & 0x7fffffffUL); | ||
1009 | if (!newsp) | ||
1010 | newsp = regs.gprs[15]; | ||
1011 | return do_fork(clone_flags, newsp, ®s, 0, | ||
1012 | parent_tidptr, child_tidptr); | ||
1013 | } | ||
1014 | |||
1015 | /* | ||
1016 | * Wrapper function for sys_timer_create. | ||
1017 | */ | ||
1018 | extern asmlinkage long | ||
1019 | sys_timer_create(clockid_t, struct sigevent *, timer_t *); | ||
1020 | |||
1021 | asmlinkage long | ||
1022 | sys32_timer_create(clockid_t which_clock, struct compat_sigevent *se32, | ||
1023 | timer_t *timer_id) | ||
1024 | { | ||
1025 | struct sigevent se; | ||
1026 | timer_t ktimer_id; | ||
1027 | mm_segment_t old_fs; | ||
1028 | long ret; | ||
1029 | |||
1030 | if (se32 == NULL) | ||
1031 | return sys_timer_create(which_clock, NULL, timer_id); | ||
1032 | |||
1033 | if (get_compat_sigevent(&se, se32)) | ||
1034 | return -EFAULT; | ||
1035 | |||
1036 | old_fs = get_fs(); | ||
1037 | set_fs(KERNEL_DS); | ||
1038 | ret = sys_timer_create(which_clock, &se, &ktimer_id); | ||
1039 | set_fs(old_fs); | ||
1040 | |||
1041 | if (!ret) | ||
1042 | ret = put_user (ktimer_id, timer_id); | ||
1043 | |||
1044 | return ret; | ||
1045 | } | ||
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h new file mode 100644 index 000000000000..bf33dcfec7db --- /dev/null +++ b/arch/s390/kernel/compat_linux.h | |||
@@ -0,0 +1,197 @@ | |||
1 | #ifndef _ASM_S390X_S390_H | ||
2 | #define _ASM_S390X_S390_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <linux/compat.h> | ||
6 | #include <linux/socket.h> | ||
7 | #include <linux/syscalls.h> | ||
8 | #include <linux/nfs_fs.h> | ||
9 | #include <linux/sunrpc/svc.h> | ||
10 | #include <linux/nfsd/nfsd.h> | ||
11 | #include <linux/nfsd/export.h> | ||
12 | |||
13 | /* Macro that masks the high order bit of an 32 bit pointer and converts it*/ | ||
14 | /* to a 64 bit pointer */ | ||
15 | #define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL)) | ||
16 | #define AA(__x) \ | ||
17 | ((unsigned long)(__x)) | ||
18 | |||
19 | /* Now 32bit compatibility types */ | ||
20 | struct ipc_kludge_32 { | ||
21 | __u32 msgp; /* pointer */ | ||
22 | __s32 msgtyp; | ||
23 | }; | ||
24 | |||
25 | struct old_sigaction32 { | ||
26 | __u32 sa_handler; /* Really a pointer, but need to deal with 32 bits */ | ||
27 | compat_old_sigset_t sa_mask; /* A 32 bit mask */ | ||
28 | __u32 sa_flags; | ||
29 | __u32 sa_restorer; /* Another 32 bit pointer */ | ||
30 | }; | ||
31 | |||
32 | typedef struct compat_siginfo { | ||
33 | int si_signo; | ||
34 | int si_errno; | ||
35 | int si_code; | ||
36 | |||
37 | union { | ||
38 | int _pad[((128/sizeof(int)) - 3)]; | ||
39 | |||
40 | /* kill() */ | ||
41 | struct { | ||
42 | pid_t _pid; /* sender's pid */ | ||
43 | uid_t _uid; /* sender's uid */ | ||
44 | } _kill; | ||
45 | |||
46 | /* POSIX.1b timers */ | ||
47 | struct { | ||
48 | timer_t _tid; /* timer id */ | ||
49 | int _overrun; /* overrun count */ | ||
50 | compat_sigval_t _sigval; /* same as below */ | ||
51 | int _sys_private; /* not to be passed to user */ | ||
52 | } _timer; | ||
53 | |||
54 | /* POSIX.1b signals */ | ||
55 | struct { | ||
56 | pid_t _pid; /* sender's pid */ | ||
57 | uid_t _uid; /* sender's uid */ | ||
58 | compat_sigval_t _sigval; | ||
59 | } _rt; | ||
60 | |||
61 | /* SIGCHLD */ | ||
62 | struct { | ||
63 | pid_t _pid; /* which child */ | ||
64 | uid_t _uid; /* sender's uid */ | ||
65 | int _status;/* exit code */ | ||
66 | compat_clock_t _utime; | ||
67 | compat_clock_t _stime; | ||
68 | } _sigchld; | ||
69 | |||
70 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | ||
71 | struct { | ||
72 | __u32 _addr; /* faulting insn/memory ref. - pointer */ | ||
73 | } _sigfault; | ||
74 | |||
75 | /* SIGPOLL */ | ||
76 | struct { | ||
77 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
78 | int _fd; | ||
79 | } _sigpoll; | ||
80 | } _sifields; | ||
81 | } compat_siginfo_t; | ||
82 | |||
83 | /* | ||
84 | * How these fields are to be accessed. | ||
85 | */ | ||
86 | #define si_pid _sifields._kill._pid | ||
87 | #define si_uid _sifields._kill._uid | ||
88 | #define si_status _sifields._sigchld._status | ||
89 | #define si_utime _sifields._sigchld._utime | ||
90 | #define si_stime _sifields._sigchld._stime | ||
91 | #define si_value _sifields._rt._sigval | ||
92 | #define si_int _sifields._rt._sigval.sival_int | ||
93 | #define si_ptr _sifields._rt._sigval.sival_ptr | ||
94 | #define si_addr _sifields._sigfault._addr | ||
95 | #define si_band _sifields._sigpoll._band | ||
96 | #define si_fd _sifields._sigpoll._fd | ||
97 | #define si_tid _sifields._timer._tid | ||
98 | #define si_overrun _sifields._timer._overrun | ||
99 | |||
100 | /* asm/sigcontext.h */ | ||
101 | typedef union | ||
102 | { | ||
103 | __u64 d; | ||
104 | __u32 f; | ||
105 | } freg_t32; | ||
106 | |||
107 | typedef struct | ||
108 | { | ||
109 | unsigned int fpc; | ||
110 | freg_t32 fprs[__NUM_FPRS]; | ||
111 | } _s390_fp_regs32; | ||
112 | |||
113 | typedef struct | ||
114 | { | ||
115 | __u32 mask; | ||
116 | __u32 addr; | ||
117 | } _psw_t32 __attribute__ ((aligned(8))); | ||
118 | |||
119 | #define PSW32_MASK_PER 0x40000000UL | ||
120 | #define PSW32_MASK_DAT 0x04000000UL | ||
121 | #define PSW32_MASK_IO 0x02000000UL | ||
122 | #define PSW32_MASK_EXT 0x01000000UL | ||
123 | #define PSW32_MASK_KEY 0x00F00000UL | ||
124 | #define PSW32_MASK_MCHECK 0x00040000UL | ||
125 | #define PSW32_MASK_WAIT 0x00020000UL | ||
126 | #define PSW32_MASK_PSTATE 0x00010000UL | ||
127 | #define PSW32_MASK_ASC 0x0000C000UL | ||
128 | #define PSW32_MASK_CC 0x00003000UL | ||
129 | #define PSW32_MASK_PM 0x00000f00UL | ||
130 | |||
131 | #define PSW32_ADDR_AMODE31 0x80000000UL | ||
132 | #define PSW32_ADDR_INSN 0x7FFFFFFFUL | ||
133 | |||
134 | #define PSW32_BASE_BITS 0x00080000UL | ||
135 | |||
136 | #define PSW32_ASC_PRIMARY 0x00000000UL | ||
137 | #define PSW32_ASC_ACCREG 0x00004000UL | ||
138 | #define PSW32_ASC_SECONDARY 0x00008000UL | ||
139 | #define PSW32_ASC_HOME 0x0000C000UL | ||
140 | |||
141 | #define PSW32_USER_BITS (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \ | ||
142 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \ | ||
143 | PSW32_MASK_PSTATE) | ||
144 | |||
145 | #define PSW32_MASK_MERGE(CURRENT,NEW) \ | ||
146 | (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \ | ||
147 | ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM))) | ||
148 | |||
149 | |||
150 | typedef struct | ||
151 | { | ||
152 | _psw_t32 psw; | ||
153 | __u32 gprs[__NUM_GPRS]; | ||
154 | __u32 acrs[__NUM_ACRS]; | ||
155 | } _s390_regs_common32; | ||
156 | |||
157 | typedef struct | ||
158 | { | ||
159 | _s390_regs_common32 regs; | ||
160 | _s390_fp_regs32 fpregs; | ||
161 | } _sigregs32; | ||
162 | |||
163 | #define _SIGCONTEXT_NSIG32 64 | ||
164 | #define _SIGCONTEXT_NSIG_BPW32 32 | ||
165 | #define __SIGNAL_FRAMESIZE32 96 | ||
166 | #define _SIGMASK_COPY_SIZE32 (sizeof(u32)*2) | ||
167 | |||
168 | struct sigcontext32 | ||
169 | { | ||
170 | __u32 oldmask[_COMPAT_NSIG_WORDS]; | ||
171 | __u32 sregs; /* pointer */ | ||
172 | }; | ||
173 | |||
174 | /* asm/signal.h */ | ||
175 | struct sigaction32 { | ||
176 | __u32 sa_handler; /* pointer */ | ||
177 | __u32 sa_flags; | ||
178 | __u32 sa_restorer; /* pointer */ | ||
179 | compat_sigset_t sa_mask; /* mask last for extensibility */ | ||
180 | }; | ||
181 | |||
182 | typedef struct { | ||
183 | __u32 ss_sp; /* pointer */ | ||
184 | int ss_flags; | ||
185 | compat_size_t ss_size; | ||
186 | } stack_t32; | ||
187 | |||
188 | /* asm/ucontext.h */ | ||
189 | struct ucontext32 { | ||
190 | __u32 uc_flags; | ||
191 | __u32 uc_link; /* pointer */ | ||
192 | stack_t32 uc_stack; | ||
193 | _sigregs32 uc_mcontext; | ||
194 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | ||
195 | }; | ||
196 | |||
197 | #endif /* _ASM_S390X_S390_H */ | ||
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h new file mode 100644 index 000000000000..419aef913ee1 --- /dev/null +++ b/arch/s390/kernel/compat_ptrace.h | |||
@@ -0,0 +1,83 @@ | |||
1 | #ifndef _PTRACE32_H | ||
2 | #define _PTRACE32_H | ||
3 | |||
4 | #include "compat_linux.h" /* needed for _psw_t32 */ | ||
5 | |||
6 | typedef struct { | ||
7 | __u32 cr[3]; | ||
8 | } per_cr_words32; | ||
9 | |||
10 | typedef struct { | ||
11 | __u16 perc_atmid; /* 0x096 */ | ||
12 | __u32 address; /* 0x098 */ | ||
13 | __u8 access_id; /* 0x0a1 */ | ||
14 | } per_lowcore_words32; | ||
15 | |||
16 | typedef struct { | ||
17 | union { | ||
18 | per_cr_words32 words; | ||
19 | } control_regs; | ||
20 | /* | ||
21 | * Use these flags instead of setting em_instruction_fetch | ||
22 | * directly they are used so that single stepping can be | ||
23 | * switched on & off while not affecting other tracing | ||
24 | */ | ||
25 | unsigned single_step : 1; | ||
26 | unsigned instruction_fetch : 1; | ||
27 | unsigned : 30; | ||
28 | /* | ||
29 | * These addresses are copied into cr10 & cr11 if single | ||
30 | * stepping is switched off | ||
31 | */ | ||
32 | __u32 starting_addr; | ||
33 | __u32 ending_addr; | ||
34 | union { | ||
35 | per_lowcore_words32 words; | ||
36 | } lowcore; | ||
37 | } per_struct32; | ||
38 | |||
39 | struct user_regs_struct32 | ||
40 | { | ||
41 | _psw_t32 psw; | ||
42 | u32 gprs[NUM_GPRS]; | ||
43 | u32 acrs[NUM_ACRS]; | ||
44 | u32 orig_gpr2; | ||
45 | s390_fp_regs fp_regs; | ||
46 | /* | ||
47 | * These per registers are in here so that gdb can modify them | ||
48 | * itself as there is no "official" ptrace interface for hardware | ||
49 | * watchpoints. This is the way intel does it. | ||
50 | */ | ||
51 | per_struct32 per_info; | ||
52 | u32 ieee_instruction_pointer; | ||
53 | /* Used to give failing instruction back to user for ieee exceptions */ | ||
54 | }; | ||
55 | |||
56 | struct user32 { | ||
57 | /* We start with the registers, to mimic the way that "memory" | ||
58 | is returned from the ptrace(3,...) function. */ | ||
59 | struct user_regs_struct32 regs; /* Where the registers are actually stored */ | ||
60 | /* The rest of this junk is to help gdb figure out what goes where */ | ||
61 | u32 u_tsize; /* Text segment size (pages). */ | ||
62 | u32 u_dsize; /* Data segment size (pages). */ | ||
63 | u32 u_ssize; /* Stack segment size (pages). */ | ||
64 | u32 start_code; /* Starting virtual address of text. */ | ||
65 | u32 start_stack; /* Starting virtual address of stack area. | ||
66 | This is actually the bottom of the stack, | ||
67 | the top of the stack is always found in the | ||
68 | esp register. */ | ||
69 | s32 signal; /* Signal that caused the core dump. */ | ||
70 | u32 u_ar0; /* Used by gdb to help find the values for */ | ||
71 | /* the registers. */ | ||
72 | u32 magic; /* To uniquely identify a core file */ | ||
73 | char u_comm[32]; /* User command that was responsible */ | ||
74 | }; | ||
75 | |||
76 | typedef struct | ||
77 | { | ||
78 | __u32 len; | ||
79 | __u32 kernel_addr; | ||
80 | __u32 process_addr; | ||
81 | } ptrace_area_emu31; | ||
82 | |||
83 | #endif /* _PTRACE32_H */ | ||
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c new file mode 100644 index 000000000000..d05d65ac9694 --- /dev/null +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -0,0 +1,648 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/signal32.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) | ||
7 | * Gerhard Tonn (ton@de.ibm.com) | ||
8 | * | ||
9 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
10 | * | ||
11 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | ||
12 | */ | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/compat.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/signal.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/wait.h> | ||
24 | #include <linux/ptrace.h> | ||
25 | #include <linux/unistd.h> | ||
26 | #include <linux/stddef.h> | ||
27 | #include <linux/tty.h> | ||
28 | #include <linux/personality.h> | ||
29 | #include <linux/binfmts.h> | ||
30 | #include <asm/ucontext.h> | ||
31 | #include <asm/uaccess.h> | ||
32 | #include <asm/lowcore.h> | ||
33 | #include "compat_linux.h" | ||
34 | #include "compat_ptrace.h" | ||
35 | |||
36 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
37 | |||
38 | typedef struct | ||
39 | { | ||
40 | __u8 callee_used_stack[__SIGNAL_FRAMESIZE32]; | ||
41 | struct sigcontext32 sc; | ||
42 | _sigregs32 sregs; | ||
43 | int signo; | ||
44 | __u8 retcode[S390_SYSCALL_SIZE]; | ||
45 | } sigframe32; | ||
46 | |||
47 | typedef struct | ||
48 | { | ||
49 | __u8 callee_used_stack[__SIGNAL_FRAMESIZE32]; | ||
50 | __u8 retcode[S390_SYSCALL_SIZE]; | ||
51 | compat_siginfo_t info; | ||
52 | struct ucontext32 uc; | ||
53 | } rt_sigframe32; | ||
54 | |||
55 | asmlinkage int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset)); | ||
56 | |||
57 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) | ||
58 | { | ||
59 | int err; | ||
60 | |||
61 | if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t))) | ||
62 | return -EFAULT; | ||
63 | |||
64 | /* If you change siginfo_t structure, please be sure | ||
65 | this code is fixed accordingly. | ||
66 | It should never copy any pad contained in the structure | ||
67 | to avoid security leaks, but must copy the generic | ||
68 | 3 ints plus the relevant union member. | ||
69 | This routine must convert siginfo from 64bit to 32bit as well | ||
70 | at the same time. */ | ||
71 | err = __put_user(from->si_signo, &to->si_signo); | ||
72 | err |= __put_user(from->si_errno, &to->si_errno); | ||
73 | err |= __put_user((short)from->si_code, &to->si_code); | ||
74 | if (from->si_code < 0) | ||
75 | err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); | ||
76 | else { | ||
77 | switch (from->si_code >> 16) { | ||
78 | case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ | ||
79 | case __SI_MESGQ >> 16: | ||
80 | err |= __put_user(from->si_int, &to->si_int); | ||
81 | /* fallthrough */ | ||
82 | case __SI_KILL >> 16: | ||
83 | err |= __put_user(from->si_pid, &to->si_pid); | ||
84 | err |= __put_user(from->si_uid, &to->si_uid); | ||
85 | break; | ||
86 | case __SI_CHLD >> 16: | ||
87 | err |= __put_user(from->si_pid, &to->si_pid); | ||
88 | err |= __put_user(from->si_uid, &to->si_uid); | ||
89 | err |= __put_user(from->si_utime, &to->si_utime); | ||
90 | err |= __put_user(from->si_stime, &to->si_stime); | ||
91 | err |= __put_user(from->si_status, &to->si_status); | ||
92 | break; | ||
93 | case __SI_FAULT >> 16: | ||
94 | err |= __put_user((unsigned long) from->si_addr, | ||
95 | &to->si_addr); | ||
96 | break; | ||
97 | case __SI_POLL >> 16: | ||
98 | err |= __put_user(from->si_band, &to->si_band); | ||
99 | err |= __put_user(from->si_fd, &to->si_fd); | ||
100 | break; | ||
101 | case __SI_TIMER >> 16: | ||
102 | err |= __put_user(from->si_tid, &to->si_tid); | ||
103 | err |= __put_user(from->si_overrun, &to->si_overrun); | ||
104 | err |= __put_user(from->si_int, &to->si_int); | ||
105 | break; | ||
106 | default: | ||
107 | break; | ||
108 | } | ||
109 | } | ||
110 | return err; | ||
111 | } | ||
112 | |||
113 | int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | ||
114 | { | ||
115 | int err; | ||
116 | u32 tmp; | ||
117 | |||
118 | if (!access_ok (VERIFY_READ, from, sizeof(compat_siginfo_t))) | ||
119 | return -EFAULT; | ||
120 | |||
121 | err = __get_user(to->si_signo, &from->si_signo); | ||
122 | err |= __get_user(to->si_errno, &from->si_errno); | ||
123 | err |= __get_user(to->si_code, &from->si_code); | ||
124 | |||
125 | if (to->si_code < 0) | ||
126 | err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); | ||
127 | else { | ||
128 | switch (to->si_code >> 16) { | ||
129 | case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ | ||
130 | case __SI_MESGQ >> 16: | ||
131 | err |= __get_user(to->si_int, &from->si_int); | ||
132 | /* fallthrough */ | ||
133 | case __SI_KILL >> 16: | ||
134 | err |= __get_user(to->si_pid, &from->si_pid); | ||
135 | err |= __get_user(to->si_uid, &from->si_uid); | ||
136 | break; | ||
137 | case __SI_CHLD >> 16: | ||
138 | err |= __get_user(to->si_pid, &from->si_pid); | ||
139 | err |= __get_user(to->si_uid, &from->si_uid); | ||
140 | err |= __get_user(to->si_utime, &from->si_utime); | ||
141 | err |= __get_user(to->si_stime, &from->si_stime); | ||
142 | err |= __get_user(to->si_status, &from->si_status); | ||
143 | break; | ||
144 | case __SI_FAULT >> 16: | ||
145 | err |= __get_user(tmp, &from->si_addr); | ||
146 | to->si_addr = (void *)(u64) (tmp & PSW32_ADDR_INSN); | ||
147 | break; | ||
148 | case __SI_POLL >> 16: | ||
149 | err |= __get_user(to->si_band, &from->si_band); | ||
150 | err |= __get_user(to->si_fd, &from->si_fd); | ||
151 | break; | ||
152 | case __SI_TIMER >> 16: | ||
153 | err |= __get_user(to->si_tid, &from->si_tid); | ||
154 | err |= __get_user(to->si_overrun, &from->si_overrun); | ||
155 | err |= __get_user(to->si_int, &from->si_int); | ||
156 | break; | ||
157 | default: | ||
158 | break; | ||
159 | } | ||
160 | } | ||
161 | return err; | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * Atomically swap in the new signal mask, and wait for a signal. | ||
166 | */ | ||
167 | asmlinkage int | ||
168 | sys32_sigsuspend(struct pt_regs * regs,int history0, int history1, old_sigset_t mask) | ||
169 | { | ||
170 | sigset_t saveset; | ||
171 | |||
172 | mask &= _BLOCKABLE; | ||
173 | spin_lock_irq(¤t->sighand->siglock); | ||
174 | saveset = current->blocked; | ||
175 | siginitset(¤t->blocked, mask); | ||
176 | recalc_sigpending(); | ||
177 | spin_unlock_irq(¤t->sighand->siglock); | ||
178 | regs->gprs[2] = -EINTR; | ||
179 | |||
180 | while (1) { | ||
181 | set_current_state(TASK_INTERRUPTIBLE); | ||
182 | schedule(); | ||
183 | if (do_signal(regs, &saveset)) | ||
184 | return -EINTR; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | asmlinkage int | ||
189 | sys32_rt_sigsuspend(struct pt_regs * regs, compat_sigset_t __user *unewset, | ||
190 | size_t sigsetsize) | ||
191 | { | ||
192 | sigset_t saveset, newset; | ||
193 | compat_sigset_t set32; | ||
194 | |||
195 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
196 | if (sigsetsize != sizeof(sigset_t)) | ||
197 | return -EINVAL; | ||
198 | |||
199 | if (copy_from_user(&set32, unewset, sizeof(set32))) | ||
200 | return -EFAULT; | ||
201 | switch (_NSIG_WORDS) { | ||
202 | case 4: newset.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32); | ||
203 | case 3: newset.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32); | ||
204 | case 2: newset.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32); | ||
205 | case 1: newset.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32); | ||
206 | } | ||
207 | sigdelsetmask(&newset, ~_BLOCKABLE); | ||
208 | |||
209 | spin_lock_irq(¤t->sighand->siglock); | ||
210 | saveset = current->blocked; | ||
211 | current->blocked = newset; | ||
212 | recalc_sigpending(); | ||
213 | spin_unlock_irq(¤t->sighand->siglock); | ||
214 | regs->gprs[2] = -EINTR; | ||
215 | |||
216 | while (1) { | ||
217 | set_current_state(TASK_INTERRUPTIBLE); | ||
218 | schedule(); | ||
219 | if (do_signal(regs, &saveset)) | ||
220 | return -EINTR; | ||
221 | } | ||
222 | } | ||
223 | |||
224 | asmlinkage long | ||
225 | sys32_sigaction(int sig, const struct old_sigaction32 __user *act, | ||
226 | struct old_sigaction32 __user *oact) | ||
227 | { | ||
228 | struct k_sigaction new_ka, old_ka; | ||
229 | unsigned long sa_handler, sa_restorer; | ||
230 | int ret; | ||
231 | |||
232 | if (act) { | ||
233 | compat_old_sigset_t mask; | ||
234 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
235 | __get_user(sa_handler, &act->sa_handler) || | ||
236 | __get_user(sa_restorer, &act->sa_restorer)) | ||
237 | return -EFAULT; | ||
238 | new_ka.sa.sa_handler = (__sighandler_t) sa_handler; | ||
239 | new_ka.sa.sa_restorer = (void (*)(void)) sa_restorer; | ||
240 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
241 | __get_user(mask, &act->sa_mask); | ||
242 | siginitset(&new_ka.sa.sa_mask, mask); | ||
243 | } | ||
244 | |||
245 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
246 | |||
247 | if (!ret && oact) { | ||
248 | sa_handler = (unsigned long) old_ka.sa.sa_handler; | ||
249 | sa_restorer = (unsigned long) old_ka.sa.sa_restorer; | ||
250 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
251 | __put_user(sa_handler, &oact->sa_handler) || | ||
252 | __put_user(sa_restorer, &oact->sa_restorer)) | ||
253 | return -EFAULT; | ||
254 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
255 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
256 | } | ||
257 | |||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | int | ||
262 | do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact); | ||
263 | |||
264 | asmlinkage long | ||
265 | sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, | ||
266 | struct sigaction32 __user *oact, size_t sigsetsize) | ||
267 | { | ||
268 | struct k_sigaction new_ka, old_ka; | ||
269 | unsigned long sa_handler; | ||
270 | int ret; | ||
271 | compat_sigset_t set32; | ||
272 | |||
273 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
274 | if (sigsetsize != sizeof(compat_sigset_t)) | ||
275 | return -EINVAL; | ||
276 | |||
277 | if (act) { | ||
278 | ret = get_user(sa_handler, &act->sa_handler); | ||
279 | ret |= __copy_from_user(&set32, &act->sa_mask, | ||
280 | sizeof(compat_sigset_t)); | ||
281 | switch (_NSIG_WORDS) { | ||
282 | case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | ||
283 | | (((long)set32.sig[7]) << 32); | ||
284 | case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | ||
285 | | (((long)set32.sig[5]) << 32); | ||
286 | case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | ||
287 | | (((long)set32.sig[3]) << 32); | ||
288 | case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | ||
289 | | (((long)set32.sig[1]) << 32); | ||
290 | } | ||
291 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
292 | |||
293 | if (ret) | ||
294 | return -EFAULT; | ||
295 | new_ka.sa.sa_handler = (__sighandler_t) sa_handler; | ||
296 | } | ||
297 | |||
298 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
299 | |||
300 | if (!ret && oact) { | ||
301 | switch (_NSIG_WORDS) { | ||
302 | case 4: | ||
303 | set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); | ||
304 | set32.sig[6] = old_ka.sa.sa_mask.sig[3]; | ||
305 | case 3: | ||
306 | set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); | ||
307 | set32.sig[4] = old_ka.sa.sa_mask.sig[2]; | ||
308 | case 2: | ||
309 | set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); | ||
310 | set32.sig[2] = old_ka.sa.sa_mask.sig[1]; | ||
311 | case 1: | ||
312 | set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); | ||
313 | set32.sig[0] = old_ka.sa.sa_mask.sig[0]; | ||
314 | } | ||
315 | ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler); | ||
316 | ret |= __copy_to_user(&oact->sa_mask, &set32, | ||
317 | sizeof(compat_sigset_t)); | ||
318 | ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
319 | } | ||
320 | |||
321 | return ret; | ||
322 | } | ||
323 | |||
324 | asmlinkage long | ||
325 | sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss, | ||
326 | struct pt_regs *regs) | ||
327 | { | ||
328 | stack_t kss, koss; | ||
329 | unsigned long ss_sp; | ||
330 | int ret, err = 0; | ||
331 | mm_segment_t old_fs = get_fs(); | ||
332 | |||
333 | if (uss) { | ||
334 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) | ||
335 | return -EFAULT; | ||
336 | err |= __get_user(ss_sp, &uss->ss_sp); | ||
337 | err |= __get_user(kss.ss_size, &uss->ss_size); | ||
338 | err |= __get_user(kss.ss_flags, &uss->ss_flags); | ||
339 | if (err) | ||
340 | return -EFAULT; | ||
341 | kss.ss_sp = (void *) ss_sp; | ||
342 | } | ||
343 | |||
344 | set_fs (KERNEL_DS); | ||
345 | ret = do_sigaltstack((stack_t __user *) (uss ? &kss : NULL), | ||
346 | (stack_t __user *) (uoss ? &koss : NULL), | ||
347 | regs->gprs[15]); | ||
348 | set_fs (old_fs); | ||
349 | |||
350 | if (!ret && uoss) { | ||
351 | if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) | ||
352 | return -EFAULT; | ||
353 | ss_sp = (unsigned long) koss.ss_sp; | ||
354 | err |= __put_user(ss_sp, &uoss->ss_sp); | ||
355 | err |= __put_user(koss.ss_size, &uoss->ss_size); | ||
356 | err |= __put_user(koss.ss_flags, &uoss->ss_flags); | ||
357 | if (err) | ||
358 | return -EFAULT; | ||
359 | } | ||
360 | return ret; | ||
361 | } | ||
362 | |||
363 | static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) | ||
364 | { | ||
365 | _s390_regs_common32 regs32; | ||
366 | int err, i; | ||
367 | |||
368 | regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS, | ||
369 | (__u32)(regs->psw.mask >> 32)); | ||
370 | regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; | ||
371 | for (i = 0; i < NUM_GPRS; i++) | ||
372 | regs32.gprs[i] = (__u32) regs->gprs[i]; | ||
373 | save_access_regs(current->thread.acrs); | ||
374 | memcpy(regs32.acrs, current->thread.acrs, sizeof(regs32.acrs)); | ||
375 | err = __copy_to_user(&sregs->regs, ®s32, sizeof(regs32)); | ||
376 | if (err) | ||
377 | return err; | ||
378 | save_fp_regs(¤t->thread.fp_regs); | ||
379 | /* s390_fp_regs and _s390_fp_regs32 are the same ! */ | ||
380 | return __copy_to_user(&sregs->fpregs, ¤t->thread.fp_regs, | ||
381 | sizeof(_s390_fp_regs32)); | ||
382 | } | ||
383 | |||
384 | static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) | ||
385 | { | ||
386 | _s390_regs_common32 regs32; | ||
387 | int err, i; | ||
388 | |||
389 | /* Alwys make any pending restarted system call return -EINTR */ | ||
390 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
391 | |||
392 | err = __copy_from_user(®s32, &sregs->regs, sizeof(regs32)); | ||
393 | if (err) | ||
394 | return err; | ||
395 | regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask, | ||
396 | (__u64)regs32.psw.mask << 32); | ||
397 | regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); | ||
398 | for (i = 0; i < NUM_GPRS; i++) | ||
399 | regs->gprs[i] = (__u64) regs32.gprs[i]; | ||
400 | memcpy(current->thread.acrs, regs32.acrs, sizeof(current->thread.acrs)); | ||
401 | restore_access_regs(current->thread.acrs); | ||
402 | |||
403 | err = __copy_from_user(¤t->thread.fp_regs, &sregs->fpregs, | ||
404 | sizeof(_s390_fp_regs32)); | ||
405 | current->thread.fp_regs.fpc &= FPC_VALID_MASK; | ||
406 | if (err) | ||
407 | return err; | ||
408 | |||
409 | restore_fp_regs(¤t->thread.fp_regs); | ||
410 | regs->trap = -1; /* disable syscall checks */ | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | asmlinkage long sys32_sigreturn(struct pt_regs *regs) | ||
415 | { | ||
416 | sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; | ||
417 | sigset_t set; | ||
418 | |||
419 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
420 | goto badframe; | ||
421 | if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) | ||
422 | goto badframe; | ||
423 | |||
424 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
425 | spin_lock_irq(¤t->sighand->siglock); | ||
426 | current->blocked = set; | ||
427 | recalc_sigpending(); | ||
428 | spin_unlock_irq(¤t->sighand->siglock); | ||
429 | |||
430 | if (restore_sigregs32(regs, &frame->sregs)) | ||
431 | goto badframe; | ||
432 | |||
433 | return regs->gprs[2]; | ||
434 | |||
435 | badframe: | ||
436 | force_sig(SIGSEGV, current); | ||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) | ||
441 | { | ||
442 | rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; | ||
443 | sigset_t set; | ||
444 | stack_t st; | ||
445 | __u32 ss_sp; | ||
446 | int err; | ||
447 | mm_segment_t old_fs = get_fs(); | ||
448 | |||
449 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
450 | goto badframe; | ||
451 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
452 | goto badframe; | ||
453 | |||
454 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
455 | spin_lock_irq(¤t->sighand->siglock); | ||
456 | current->blocked = set; | ||
457 | recalc_sigpending(); | ||
458 | spin_unlock_irq(¤t->sighand->siglock); | ||
459 | |||
460 | if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) | ||
461 | goto badframe; | ||
462 | |||
463 | err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp); | ||
464 | st.ss_sp = (void *) A((unsigned long)ss_sp); | ||
465 | err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size); | ||
466 | err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags); | ||
467 | if (err) | ||
468 | goto badframe; | ||
469 | |||
470 | /* It is more difficult to avoid calling this function than to | ||
471 | call it and ignore errors. */ | ||
472 | set_fs (KERNEL_DS); | ||
473 | do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]); | ||
474 | set_fs (old_fs); | ||
475 | |||
476 | return regs->gprs[2]; | ||
477 | |||
478 | badframe: | ||
479 | force_sig(SIGSEGV, current); | ||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * Set up a signal frame. | ||
485 | */ | ||
486 | |||
487 | |||
488 | /* | ||
489 | * Determine which stack to use.. | ||
490 | */ | ||
491 | static inline void __user * | ||
492 | get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | ||
493 | { | ||
494 | unsigned long sp; | ||
495 | |||
496 | /* Default to using normal stack */ | ||
497 | sp = (unsigned long) A(regs->gprs[15]); | ||
498 | |||
499 | /* This is the X/Open sanctioned signal stack switching. */ | ||
500 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
501 | if (! on_sig_stack(sp)) | ||
502 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
503 | } | ||
504 | |||
505 | /* This is the legacy signal stack switching. */ | ||
506 | else if (!user_mode(regs) && | ||
507 | !(ka->sa.sa_flags & SA_RESTORER) && | ||
508 | ka->sa.sa_restorer) { | ||
509 | sp = (unsigned long) ka->sa.sa_restorer; | ||
510 | } | ||
511 | |||
512 | return (void __user *)((sp - frame_size) & -8ul); | ||
513 | } | ||
514 | |||
515 | static inline int map_signal(int sig) | ||
516 | { | ||
517 | if (current_thread_info()->exec_domain | ||
518 | && current_thread_info()->exec_domain->signal_invmap | ||
519 | && sig < 32) | ||
520 | return current_thread_info()->exec_domain->signal_invmap[sig]; | ||
521 | else | ||
522 | return sig; | ||
523 | } | ||
524 | |||
525 | static void setup_frame32(int sig, struct k_sigaction *ka, | ||
526 | sigset_t *set, struct pt_regs * regs) | ||
527 | { | ||
528 | sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(sigframe32)); | ||
529 | if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32))) | ||
530 | goto give_sigsegv; | ||
531 | |||
532 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) | ||
533 | goto give_sigsegv; | ||
534 | |||
535 | if (save_sigregs32(regs, &frame->sregs)) | ||
536 | goto give_sigsegv; | ||
537 | if (__put_user((unsigned long) &frame->sregs, &frame->sc.sregs)) | ||
538 | goto give_sigsegv; | ||
539 | |||
540 | /* Set up to return from userspace. If provided, use a stub | ||
541 | already in userspace. */ | ||
542 | if (ka->sa.sa_flags & SA_RESTORER) { | ||
543 | regs->gprs[14] = (__u64) ka->sa.sa_restorer; | ||
544 | } else { | ||
545 | regs->gprs[14] = (__u64) frame->retcode; | ||
546 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, | ||
547 | (u16 __user *)(frame->retcode))) | ||
548 | goto give_sigsegv; | ||
549 | } | ||
550 | |||
551 | /* Set up backchain. */ | ||
552 | if (__put_user(regs->gprs[15], (unsigned int __user *) frame)) | ||
553 | goto give_sigsegv; | ||
554 | |||
555 | /* Set up registers for signal handler */ | ||
556 | regs->gprs[15] = (__u64) frame; | ||
557 | regs->psw.addr = (__u64) ka->sa.sa_handler; | ||
558 | |||
559 | regs->gprs[2] = map_signal(sig); | ||
560 | regs->gprs[3] = (__u64) &frame->sc; | ||
561 | |||
562 | /* We forgot to include these in the sigcontext. | ||
563 | To avoid breaking binary compatibility, they are passed as args. */ | ||
564 | regs->gprs[4] = current->thread.trap_no; | ||
565 | regs->gprs[5] = current->thread.prot_addr; | ||
566 | |||
567 | /* Place signal number on stack to allow backtrace from handler. */ | ||
568 | if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) | ||
569 | goto give_sigsegv; | ||
570 | return; | ||
571 | |||
572 | give_sigsegv: | ||
573 | force_sigsegv(sig, current); | ||
574 | } | ||
575 | |||
576 | static void setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
577 | sigset_t *set, struct pt_regs * regs) | ||
578 | { | ||
579 | int err = 0; | ||
580 | rt_sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(rt_sigframe32)); | ||
581 | if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32))) | ||
582 | goto give_sigsegv; | ||
583 | |||
584 | if (copy_siginfo_to_user32(&frame->info, info)) | ||
585 | goto give_sigsegv; | ||
586 | |||
587 | /* Create the ucontext. */ | ||
588 | err |= __put_user(0, &frame->uc.uc_flags); | ||
589 | err |= __put_user(0, &frame->uc.uc_link); | ||
590 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | ||
591 | err |= __put_user(sas_ss_flags(regs->gprs[15]), | ||
592 | &frame->uc.uc_stack.ss_flags); | ||
593 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
594 | err |= save_sigregs32(regs, &frame->uc.uc_mcontext); | ||
595 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
596 | if (err) | ||
597 | goto give_sigsegv; | ||
598 | |||
599 | /* Set up to return from userspace. If provided, use a stub | ||
600 | already in userspace. */ | ||
601 | if (ka->sa.sa_flags & SA_RESTORER) { | ||
602 | regs->gprs[14] = (__u64) ka->sa.sa_restorer; | ||
603 | } else { | ||
604 | regs->gprs[14] = (__u64) frame->retcode; | ||
605 | err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, | ||
606 | (u16 __user *)(frame->retcode)); | ||
607 | } | ||
608 | |||
609 | /* Set up backchain. */ | ||
610 | if (__put_user(regs->gprs[15], (unsigned int __user *) frame)) | ||
611 | goto give_sigsegv; | ||
612 | |||
613 | /* Set up registers for signal handler */ | ||
614 | regs->gprs[15] = (__u64) frame; | ||
615 | regs->psw.addr = (__u64) ka->sa.sa_handler; | ||
616 | |||
617 | regs->gprs[2] = map_signal(sig); | ||
618 | regs->gprs[3] = (__u64) &frame->info; | ||
619 | regs->gprs[4] = (__u64) &frame->uc; | ||
620 | return; | ||
621 | |||
622 | give_sigsegv: | ||
623 | force_sigsegv(sig, current); | ||
624 | } | ||
625 | |||
626 | /* | ||
627 | * OK, we're invoking a handler | ||
628 | */ | ||
629 | |||
630 | void | ||
631 | handle_signal32(unsigned long sig, struct k_sigaction *ka, | ||
632 | siginfo_t *info, sigset_t *oldset, struct pt_regs * regs) | ||
633 | { | ||
634 | /* Set up the stack frame */ | ||
635 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
636 | setup_rt_frame32(sig, ka, info, oldset, regs); | ||
637 | else | ||
638 | setup_frame32(sig, ka, oldset, regs); | ||
639 | |||
640 | if (!(ka->sa.sa_flags & SA_NODEFER)) { | ||
641 | spin_lock_irq(¤t->sighand->siglock); | ||
642 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
643 | sigaddset(¤t->blocked,sig); | ||
644 | recalc_sigpending(); | ||
645 | spin_unlock_irq(¤t->sighand->siglock); | ||
646 | } | ||
647 | } | ||
648 | |||
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S new file mode 100644 index 000000000000..7a607b1d0380 --- /dev/null +++ b/arch/s390/kernel/compat_wrapper.S | |||
@@ -0,0 +1,1443 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/sys_wrapper31.S | ||
3 | * wrapper for 31 bit compatible system calls. | ||
4 | * | ||
5 | * S390 version | ||
6 | * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Gerhard Tonn (ton@de.ibm.com), | ||
8 | * Thomas Spatzier (tspat@de.ibm.com) | ||
9 | */ | ||
10 | |||
11 | .globl sys32_exit_wrapper | ||
12 | sys32_exit_wrapper: | ||
13 | lgfr %r2,%r2 # int | ||
14 | jg sys_exit # branch to sys_exit | ||
15 | |||
16 | .globl sys32_read_wrapper | ||
17 | sys32_read_wrapper: | ||
18 | llgfr %r2,%r2 # unsigned int | ||
19 | llgtr %r3,%r3 # char * | ||
20 | llgfr %r4,%r4 # size_t | ||
21 | jg sys32_read # branch to sys_read | ||
22 | |||
23 | .globl sys32_write_wrapper | ||
24 | sys32_write_wrapper: | ||
25 | llgfr %r2,%r2 # unsigned int | ||
26 | llgtr %r3,%r3 # const char * | ||
27 | llgfr %r4,%r4 # size_t | ||
28 | jg sys32_write # branch to system call | ||
29 | |||
30 | .globl sys32_open_wrapper | ||
31 | sys32_open_wrapper: | ||
32 | llgtr %r2,%r2 # const char * | ||
33 | lgfr %r3,%r3 # int | ||
34 | lgfr %r4,%r4 # int | ||
35 | jg sys_open # branch to system call | ||
36 | |||
37 | .globl sys32_close_wrapper | ||
38 | sys32_close_wrapper: | ||
39 | llgfr %r2,%r2 # unsigned int | ||
40 | jg sys_close # branch to system call | ||
41 | |||
42 | .globl sys32_creat_wrapper | ||
43 | sys32_creat_wrapper: | ||
44 | llgtr %r2,%r2 # const char * | ||
45 | lgfr %r3,%r3 # int | ||
46 | jg sys_creat # branch to system call | ||
47 | |||
48 | .globl sys32_link_wrapper | ||
49 | sys32_link_wrapper: | ||
50 | llgtr %r2,%r2 # const char * | ||
51 | llgtr %r3,%r3 # const char * | ||
52 | jg sys_link # branch to system call | ||
53 | |||
54 | .globl sys32_unlink_wrapper | ||
55 | sys32_unlink_wrapper: | ||
56 | llgtr %r2,%r2 # const char * | ||
57 | jg sys_unlink # branch to system call | ||
58 | |||
59 | .globl sys32_chdir_wrapper | ||
60 | sys32_chdir_wrapper: | ||
61 | llgtr %r2,%r2 # const char * | ||
62 | jg sys_chdir # branch to system call | ||
63 | |||
64 | .globl sys32_time_wrapper | ||
65 | sys32_time_wrapper: | ||
66 | llgtr %r2,%r2 # int * | ||
67 | jg compat_sys_time # branch to system call | ||
68 | |||
69 | .globl sys32_mknod_wrapper | ||
70 | sys32_mknod_wrapper: | ||
71 | llgtr %r2,%r2 # const char * | ||
72 | lgfr %r3,%r3 # int | ||
73 | llgfr %r4,%r4 # dev | ||
74 | jg sys_mknod # branch to system call | ||
75 | |||
76 | .globl sys32_chmod_wrapper | ||
77 | sys32_chmod_wrapper: | ||
78 | llgtr %r2,%r2 # const char * | ||
79 | llgfr %r3,%r3 # mode_t | ||
80 | jg sys_chmod # branch to system call | ||
81 | |||
82 | .globl sys32_lchown16_wrapper | ||
83 | sys32_lchown16_wrapper: | ||
84 | llgtr %r2,%r2 # const char * | ||
85 | llgfr %r3,%r3 # __kernel_old_uid_emu31_t | ||
86 | llgfr %r4,%r4 # __kernel_old_uid_emu31_t | ||
87 | jg sys32_lchown16 # branch to system call | ||
88 | |||
89 | .globl sys32_lseek_wrapper | ||
90 | sys32_lseek_wrapper: | ||
91 | llgfr %r2,%r2 # unsigned int | ||
92 | lgfr %r3,%r3 # off_t | ||
93 | llgfr %r4,%r4 # unsigned int | ||
94 | jg sys_lseek # branch to system call | ||
95 | |||
96 | #sys32_getpid_wrapper # void | ||
97 | |||
98 | .globl sys32_mount_wrapper | ||
99 | sys32_mount_wrapper: | ||
100 | llgtr %r2,%r2 # char * | ||
101 | llgtr %r3,%r3 # char * | ||
102 | llgtr %r4,%r4 # char * | ||
103 | llgfr %r5,%r5 # unsigned long | ||
104 | llgtr %r6,%r6 # void * | ||
105 | jg compat_sys_mount # branch to system call | ||
106 | |||
107 | .globl sys32_oldumount_wrapper | ||
108 | sys32_oldumount_wrapper: | ||
109 | llgtr %r2,%r2 # char * | ||
110 | jg sys_oldumount # branch to system call | ||
111 | |||
112 | .globl sys32_setuid16_wrapper | ||
113 | sys32_setuid16_wrapper: | ||
114 | llgfr %r2,%r2 # __kernel_old_uid_emu31_t | ||
115 | jg sys32_setuid16 # branch to system call | ||
116 | |||
117 | #sys32_getuid16_wrapper # void | ||
118 | |||
119 | .globl sys32_ptrace_wrapper | ||
120 | sys32_ptrace_wrapper: | ||
121 | lgfr %r2,%r2 # long | ||
122 | lgfr %r3,%r3 # long | ||
123 | llgtr %r4,%r4 # long | ||
124 | llgfr %r5,%r5 # long | ||
125 | jg sys_ptrace # branch to system call | ||
126 | |||
127 | .globl sys32_alarm_wrapper | ||
128 | sys32_alarm_wrapper: | ||
129 | llgfr %r2,%r2 # unsigned int | ||
130 | jg sys_alarm # branch to system call | ||
131 | |||
132 | #sys32_pause_wrapper # void | ||
133 | |||
134 | .globl compat_sys_utime_wrapper | ||
135 | compat_sys_utime_wrapper: | ||
136 | llgtr %r2,%r2 # char * | ||
137 | llgtr %r3,%r3 # struct compat_utimbuf * | ||
138 | jg compat_sys_utime # branch to system call | ||
139 | |||
140 | .globl sys32_access_wrapper | ||
141 | sys32_access_wrapper: | ||
142 | llgtr %r2,%r2 # const char * | ||
143 | lgfr %r3,%r3 # int | ||
144 | jg sys_access # branch to system call | ||
145 | |||
146 | .globl sys32_nice_wrapper | ||
147 | sys32_nice_wrapper: | ||
148 | lgfr %r2,%r2 # int | ||
149 | jg sys_nice # branch to system call | ||
150 | |||
151 | #sys32_sync_wrapper # void | ||
152 | |||
153 | .globl sys32_kill_wrapper | ||
154 | sys32_kill_wrapper: | ||
155 | lgfr %r2,%r2 # int | ||
156 | lgfr %r3,%r3 # int | ||
157 | jg sys_kill # branch to system call | ||
158 | |||
159 | .globl sys32_rename_wrapper | ||
160 | sys32_rename_wrapper: | ||
161 | llgtr %r2,%r2 # const char * | ||
162 | llgtr %r3,%r3 # const char * | ||
163 | jg sys_rename # branch to system call | ||
164 | |||
165 | .globl sys32_mkdir_wrapper | ||
166 | sys32_mkdir_wrapper: | ||
167 | llgtr %r2,%r2 # const char * | ||
168 | lgfr %r3,%r3 # int | ||
169 | jg sys_mkdir # branch to system call | ||
170 | |||
171 | .globl sys32_rmdir_wrapper | ||
172 | sys32_rmdir_wrapper: | ||
173 | llgtr %r2,%r2 # const char * | ||
174 | jg sys_rmdir # branch to system call | ||
175 | |||
176 | .globl sys32_dup_wrapper | ||
177 | sys32_dup_wrapper: | ||
178 | llgfr %r2,%r2 # unsigned int | ||
179 | jg sys_dup # branch to system call | ||
180 | |||
181 | .globl sys32_pipe_wrapper | ||
182 | sys32_pipe_wrapper: | ||
183 | llgtr %r2,%r2 # u32 * | ||
184 | jg sys_pipe # branch to system call | ||
185 | |||
186 | .globl compat_sys_times_wrapper | ||
187 | compat_sys_times_wrapper: | ||
188 | llgtr %r2,%r2 # struct compat_tms * | ||
189 | jg compat_sys_times # branch to system call | ||
190 | |||
191 | .globl sys32_brk_wrapper | ||
192 | sys32_brk_wrapper: | ||
193 | llgtr %r2,%r2 # unsigned long | ||
194 | jg sys_brk # branch to system call | ||
195 | |||
196 | .globl sys32_setgid16_wrapper | ||
197 | sys32_setgid16_wrapper: | ||
198 | llgfr %r2,%r2 # __kernel_old_gid_emu31_t | ||
199 | jg sys32_setgid16 # branch to system call | ||
200 | |||
201 | #sys32_getgid16_wrapper # void | ||
202 | |||
203 | .globl sys32_signal_wrapper | ||
204 | sys32_signal_wrapper: | ||
205 | lgfr %r2,%r2 # int | ||
206 | llgtr %r3,%r3 # __sighandler_t | ||
207 | jg sys_signal | ||
208 | |||
209 | #sys32_geteuid16_wrapper # void | ||
210 | |||
211 | #sys32_getegid16_wrapper # void | ||
212 | |||
213 | .globl sys32_acct_wrapper | ||
214 | sys32_acct_wrapper: | ||
215 | llgtr %r2,%r2 # char * | ||
216 | jg sys_acct # branch to system call | ||
217 | |||
218 | .globl sys32_umount_wrapper | ||
219 | sys32_umount_wrapper: | ||
220 | llgtr %r2,%r2 # char * | ||
221 | lgfr %r3,%r3 # int | ||
222 | jg sys_umount # branch to system call | ||
223 | |||
224 | .globl compat_sys_ioctl_wrapper | ||
225 | compat_sys_ioctl_wrapper: | ||
226 | llgfr %r2,%r2 # unsigned int | ||
227 | llgfr %r3,%r3 # unsigned int | ||
228 | llgfr %r4,%r4 # unsigned int | ||
229 | jg compat_sys_ioctl # branch to system call | ||
230 | |||
231 | .globl compat_sys_fcntl_wrapper | ||
232 | compat_sys_fcntl_wrapper: | ||
233 | llgfr %r2,%r2 # unsigned int | ||
234 | llgfr %r3,%r3 # unsigned int | ||
235 | llgfr %r4,%r4 # unsigned long | ||
236 | jg compat_sys_fcntl # branch to system call | ||
237 | |||
238 | .globl sys32_setpgid_wrapper | ||
239 | sys32_setpgid_wrapper: | ||
240 | lgfr %r2,%r2 # pid_t | ||
241 | lgfr %r3,%r3 # pid_t | ||
242 | jg sys_setpgid # branch to system call | ||
243 | |||
244 | .globl sys32_umask_wrapper | ||
245 | sys32_umask_wrapper: | ||
246 | lgfr %r2,%r2 # int | ||
247 | jg sys_umask # branch to system call | ||
248 | |||
249 | .globl sys32_chroot_wrapper | ||
250 | sys32_chroot_wrapper: | ||
251 | llgtr %r2,%r2 # char * | ||
252 | jg sys_chroot # branch to system call | ||
253 | |||
254 | .globl sys32_ustat_wrapper | ||
255 | sys32_ustat_wrapper: | ||
256 | llgfr %r2,%r2 # dev_t | ||
257 | llgtr %r3,%r3 # struct ustat * | ||
258 | jg sys_ustat | ||
259 | |||
260 | .globl sys32_dup2_wrapper | ||
261 | sys32_dup2_wrapper: | ||
262 | llgfr %r2,%r2 # unsigned int | ||
263 | llgfr %r3,%r3 # unsigned int | ||
264 | jg sys_dup2 # branch to system call | ||
265 | |||
266 | #sys32_getppid_wrapper # void | ||
267 | |||
268 | #sys32_getpgrp_wrapper # void | ||
269 | |||
270 | #sys32_setsid_wrapper # void | ||
271 | |||
272 | .globl sys32_sigaction_wrapper | ||
273 | sys32_sigaction_wrapper: | ||
274 | lgfr %r2,%r2 # int | ||
275 | llgtr %r3,%r3 # const struct old_sigaction * | ||
276 | llgtr %r4,%r4 # struct old_sigaction32 * | ||
277 | jg sys32_sigaction # branch to system call | ||
278 | |||
279 | .globl sys32_setreuid16_wrapper | ||
280 | sys32_setreuid16_wrapper: | ||
281 | llgfr %r2,%r2 # __kernel_old_uid_emu31_t | ||
282 | llgfr %r3,%r3 # __kernel_old_uid_emu31_t | ||
283 | jg sys32_setreuid16 # branch to system call | ||
284 | |||
285 | .globl sys32_setregid16_wrapper | ||
286 | sys32_setregid16_wrapper: | ||
287 | llgfr %r2,%r2 # __kernel_old_gid_emu31_t | ||
288 | llgfr %r3,%r3 # __kernel_old_gid_emu31_t | ||
289 | jg sys32_setregid16 # branch to system call | ||
290 | |||
291 | #sys32_sigsuspend_wrapper # done in sigsuspend_glue | ||
292 | |||
293 | .globl compat_sys_sigpending_wrapper | ||
294 | compat_sys_sigpending_wrapper: | ||
295 | llgtr %r2,%r2 # compat_old_sigset_t * | ||
296 | jg compat_sys_sigpending # branch to system call | ||
297 | |||
298 | .globl sys32_sethostname_wrapper | ||
299 | sys32_sethostname_wrapper: | ||
300 | llgtr %r2,%r2 # char * | ||
301 | lgfr %r3,%r3 # int | ||
302 | jg sys_sethostname # branch to system call | ||
303 | |||
304 | .globl compat_sys_setrlimit_wrapper | ||
305 | compat_sys_setrlimit_wrapper: | ||
306 | llgfr %r2,%r2 # unsigned int | ||
307 | llgtr %r3,%r3 # struct rlimit_emu31 * | ||
308 | jg compat_sys_setrlimit # branch to system call | ||
309 | |||
310 | .globl compat_sys_old_getrlimit_wrapper | ||
311 | compat_sys_old_getrlimit_wrapper: | ||
312 | llgfr %r2,%r2 # unsigned int | ||
313 | llgtr %r3,%r3 # struct rlimit_emu31 * | ||
314 | jg compat_sys_old_getrlimit # branch to system call | ||
315 | |||
316 | .globl compat_sys_getrlimit_wrapper | ||
317 | compat_sys_getrlimit_wrapper: | ||
318 | llgfr %r2,%r2 # unsigned int | ||
319 | llgtr %r3,%r3 # struct rlimit_emu31 * | ||
320 | jg compat_sys_getrlimit # branch to system call | ||
321 | |||
322 | .globl sys32_mmap2_wrapper | ||
323 | sys32_mmap2_wrapper: | ||
324 | llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * | ||
325 | jg sys32_mmap2 # branch to system call | ||
326 | |||
327 | .globl compat_sys_getrusage_wrapper | ||
328 | compat_sys_getrusage_wrapper: | ||
329 | lgfr %r2,%r2 # int | ||
330 | llgtr %r3,%r3 # struct rusage_emu31 * | ||
331 | jg compat_sys_getrusage # branch to system call | ||
332 | |||
333 | .globl sys32_gettimeofday_wrapper | ||
334 | sys32_gettimeofday_wrapper: | ||
335 | llgtr %r2,%r2 # struct timeval_emu31 * | ||
336 | llgtr %r3,%r3 # struct timezone * | ||
337 | jg sys32_gettimeofday # branch to system call | ||
338 | |||
339 | .globl sys32_settimeofday_wrapper | ||
340 | sys32_settimeofday_wrapper: | ||
341 | llgtr %r2,%r2 # struct timeval_emu31 * | ||
342 | llgtr %r3,%r3 # struct timezone * | ||
343 | jg sys32_settimeofday # branch to system call | ||
344 | |||
345 | .globl sys32_getgroups16_wrapper | ||
346 | sys32_getgroups16_wrapper: | ||
347 | lgfr %r2,%r2 # int | ||
348 | llgtr %r3,%r3 # __kernel_old_gid_emu31_t * | ||
349 | jg sys32_getgroups16 # branch to system call | ||
350 | |||
351 | .globl sys32_setgroups16_wrapper | ||
352 | sys32_setgroups16_wrapper: | ||
353 | lgfr %r2,%r2 # int | ||
354 | llgtr %r3,%r3 # __kernel_old_gid_emu31_t * | ||
355 | jg sys32_setgroups16 # branch to system call | ||
356 | |||
357 | .globl sys32_symlink_wrapper | ||
358 | sys32_symlink_wrapper: | ||
359 | llgtr %r2,%r2 # const char * | ||
360 | llgtr %r3,%r3 # const char * | ||
361 | jg sys_symlink # branch to system call | ||
362 | |||
363 | .globl sys32_readlink_wrapper | ||
364 | sys32_readlink_wrapper: | ||
365 | llgtr %r2,%r2 # const char * | ||
366 | llgtr %r3,%r3 # char * | ||
367 | lgfr %r4,%r4 # int | ||
368 | jg sys_readlink # branch to system call | ||
369 | |||
370 | .globl sys32_uselib_wrapper | ||
371 | sys32_uselib_wrapper: | ||
372 | llgtr %r2,%r2 # const char * | ||
373 | jg sys_uselib # branch to system call | ||
374 | |||
375 | .globl sys32_swapon_wrapper | ||
376 | sys32_swapon_wrapper: | ||
377 | llgtr %r2,%r2 # const char * | ||
378 | lgfr %r3,%r3 # int | ||
379 | jg sys_swapon # branch to system call | ||
380 | |||
381 | .globl sys32_reboot_wrapper | ||
382 | sys32_reboot_wrapper: | ||
383 | lgfr %r2,%r2 # int | ||
384 | lgfr %r3,%r3 # int | ||
385 | llgfr %r4,%r4 # unsigned int | ||
386 | llgtr %r5,%r5 # void * | ||
387 | jg sys_reboot # branch to system call | ||
388 | |||
389 | .globl old32_readdir_wrapper | ||
390 | old32_readdir_wrapper: | ||
391 | llgfr %r2,%r2 # unsigned int | ||
392 | llgtr %r3,%r3 # void * | ||
393 | llgfr %r4,%r4 # unsigned int | ||
394 | jg compat_sys_old_readdir # branch to system call | ||
395 | |||
396 | .globl old32_mmap_wrapper | ||
397 | old32_mmap_wrapper: | ||
398 | llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * | ||
399 | jg old32_mmap # branch to system call | ||
400 | |||
401 | .globl sys32_munmap_wrapper | ||
402 | sys32_munmap_wrapper: | ||
403 | llgfr %r2,%r2 # unsigned long | ||
404 | llgfr %r3,%r3 # size_t | ||
405 | jg sys_munmap # branch to system call | ||
406 | |||
407 | .globl sys32_truncate_wrapper | ||
408 | sys32_truncate_wrapper: | ||
409 | llgtr %r2,%r2 # const char * | ||
410 | llgfr %r3,%r3 # unsigned long | ||
411 | jg sys_truncate # branch to system call | ||
412 | |||
413 | .globl sys32_ftruncate_wrapper | ||
414 | sys32_ftruncate_wrapper: | ||
415 | llgfr %r2,%r2 # unsigned int | ||
416 | llgfr %r3,%r3 # unsigned long | ||
417 | jg sys_ftruncate # branch to system call | ||
418 | |||
419 | .globl sys32_fchmod_wrapper | ||
420 | sys32_fchmod_wrapper: | ||
421 | llgfr %r2,%r2 # unsigned int | ||
422 | llgfr %r3,%r3 # mode_t | ||
423 | jg sys_fchmod # branch to system call | ||
424 | |||
425 | .globl sys32_fchown16_wrapper | ||
426 | sys32_fchown16_wrapper: | ||
427 | llgfr %r2,%r2 # unsigned int | ||
428 | llgfr %r3,%r3 # compat_uid_t | ||
429 | llgfr %r4,%r4 # compat_uid_t | ||
430 | jg sys32_fchown16 # branch to system call | ||
431 | |||
432 | .globl sys32_getpriority_wrapper | ||
433 | sys32_getpriority_wrapper: | ||
434 | lgfr %r2,%r2 # int | ||
435 | lgfr %r3,%r3 # int | ||
436 | jg sys_getpriority # branch to system call | ||
437 | |||
438 | .globl sys32_setpriority_wrapper | ||
439 | sys32_setpriority_wrapper: | ||
440 | lgfr %r2,%r2 # int | ||
441 | lgfr %r3,%r3 # int | ||
442 | lgfr %r4,%r4 # int | ||
443 | jg sys_setpriority # branch to system call | ||
444 | |||
445 | .globl compat_sys_statfs_wrapper | ||
446 | compat_sys_statfs_wrapper: | ||
447 | llgtr %r2,%r2 # char * | ||
448 | llgtr %r3,%r3 # struct compat_statfs * | ||
449 | jg compat_sys_statfs # branch to system call | ||
450 | |||
451 | .globl compat_sys_fstatfs_wrapper | ||
452 | compat_sys_fstatfs_wrapper: | ||
453 | llgfr %r2,%r2 # unsigned int | ||
454 | llgtr %r3,%r3 # struct compat_statfs * | ||
455 | jg compat_sys_fstatfs # branch to system call | ||
456 | |||
457 | .globl compat_sys_socketcall_wrapper | ||
458 | compat_sys_socketcall_wrapper: | ||
459 | lgfr %r2,%r2 # int | ||
460 | llgtr %r3,%r3 # u32 * | ||
461 | jg compat_sys_socketcall # branch to system call | ||
462 | |||
463 | .globl sys32_syslog_wrapper | ||
464 | sys32_syslog_wrapper: | ||
465 | lgfr %r2,%r2 # int | ||
466 | llgtr %r3,%r3 # char * | ||
467 | lgfr %r4,%r4 # int | ||
468 | jg sys_syslog # branch to system call | ||
469 | |||
470 | .globl compat_sys_setitimer_wrapper | ||
471 | compat_sys_setitimer_wrapper: | ||
472 | lgfr %r2,%r2 # int | ||
473 | llgtr %r3,%r3 # struct itimerval_emu31 * | ||
474 | llgtr %r4,%r4 # struct itimerval_emu31 * | ||
475 | jg compat_sys_setitimer # branch to system call | ||
476 | |||
477 | .globl compat_sys_getitimer_wrapper | ||
478 | compat_sys_getitimer_wrapper: | ||
479 | lgfr %r2,%r2 # int | ||
480 | llgtr %r3,%r3 # struct itimerval_emu31 * | ||
481 | jg compat_sys_getitimer # branch to system call | ||
482 | |||
483 | .globl compat_sys_newstat_wrapper | ||
484 | compat_sys_newstat_wrapper: | ||
485 | llgtr %r2,%r2 # char * | ||
486 | llgtr %r3,%r3 # struct stat_emu31 * | ||
487 | jg compat_sys_newstat # branch to system call | ||
488 | |||
489 | .globl compat_sys_newlstat_wrapper | ||
490 | compat_sys_newlstat_wrapper: | ||
491 | llgtr %r2,%r2 # char * | ||
492 | llgtr %r3,%r3 # struct stat_emu31 * | ||
493 | jg compat_sys_newlstat # branch to system call | ||
494 | |||
495 | .globl compat_sys_newfstat_wrapper | ||
496 | compat_sys_newfstat_wrapper: | ||
497 | llgfr %r2,%r2 # unsigned int | ||
498 | llgtr %r3,%r3 # struct stat_emu31 * | ||
499 | jg compat_sys_newfstat # branch to system call | ||
500 | |||
501 | #sys32_vhangup_wrapper # void | ||
502 | |||
503 | .globl compat_sys_wait4_wrapper | ||
504 | compat_sys_wait4_wrapper: | ||
505 | lgfr %r2,%r2 # pid_t | ||
506 | llgtr %r3,%r3 # unsigned int * | ||
507 | lgfr %r4,%r4 # int | ||
508 | llgtr %r5,%r5 # struct rusage * | ||
509 | jg compat_sys_wait4 # branch to system call | ||
510 | |||
511 | .globl sys32_swapoff_wrapper | ||
512 | sys32_swapoff_wrapper: | ||
513 | llgtr %r2,%r2 # const char * | ||
514 | jg sys_swapoff # branch to system call | ||
515 | |||
516 | .globl sys32_sysinfo_wrapper | ||
517 | sys32_sysinfo_wrapper: | ||
518 | llgtr %r2,%r2 # struct sysinfo_emu31 * | ||
519 | jg sys32_sysinfo # branch to system call | ||
520 | |||
521 | .globl sys32_ipc_wrapper | ||
522 | sys32_ipc_wrapper: | ||
523 | llgfr %r2,%r2 # uint | ||
524 | lgfr %r3,%r3 # int | ||
525 | lgfr %r4,%r4 # int | ||
526 | lgfr %r5,%r5 # int | ||
527 | llgfr %r6,%r6 # u32 | ||
528 | jg sys32_ipc # branch to system call | ||
529 | |||
530 | .globl sys32_fsync_wrapper | ||
531 | sys32_fsync_wrapper: | ||
532 | llgfr %r2,%r2 # unsigned int | ||
533 | jg sys_fsync # branch to system call | ||
534 | |||
535 | #sys32_sigreturn_wrapper # done in sigreturn_glue | ||
536 | |||
537 | #sys32_clone_wrapper # done in clone_glue | ||
538 | |||
539 | .globl sys32_setdomainname_wrapper | ||
540 | sys32_setdomainname_wrapper: | ||
541 | llgtr %r2,%r2 # char * | ||
542 | lgfr %r3,%r3 # int | ||
543 | jg sys_setdomainname # branch to system call | ||
544 | |||
545 | .globl sys32_newuname_wrapper | ||
546 | sys32_newuname_wrapper: | ||
547 | llgtr %r2,%r2 # struct new_utsname * | ||
548 | jg s390x_newuname # branch to system call | ||
549 | |||
550 | .globl sys32_adjtimex_wrapper | ||
551 | sys32_adjtimex_wrapper: | ||
552 | llgtr %r2,%r2 # struct timex_emu31 * | ||
553 | jg sys32_adjtimex # branch to system call | ||
554 | |||
555 | .globl sys32_mprotect_wrapper | ||
556 | sys32_mprotect_wrapper: | ||
557 | llgtr %r2,%r2 # unsigned long (actually pointer | ||
558 | llgfr %r3,%r3 # size_t | ||
559 | llgfr %r4,%r4 # unsigned long | ||
560 | jg sys_mprotect # branch to system call | ||
561 | |||
562 | .globl compat_sys_sigprocmask_wrapper | ||
563 | compat_sys_sigprocmask_wrapper: | ||
564 | lgfr %r2,%r2 # int | ||
565 | llgtr %r3,%r3 # compat_old_sigset_t * | ||
566 | llgtr %r4,%r4 # compat_old_sigset_t * | ||
567 | jg compat_sys_sigprocmask # branch to system call | ||
568 | |||
569 | .globl sys32_init_module_wrapper | ||
570 | sys32_init_module_wrapper: | ||
571 | llgtr %r2,%r2 # void * | ||
572 | llgfr %r3,%r3 # unsigned long | ||
573 | llgtr %r4,%r4 # char * | ||
574 | jg sys32_init_module # branch to system call | ||
575 | |||
576 | .globl sys32_delete_module_wrapper | ||
577 | sys32_delete_module_wrapper: | ||
578 | llgtr %r2,%r2 # const char * | ||
579 | llgfr %r3,%r3 # unsigned int | ||
580 | jg sys32_delete_module # branch to system call | ||
581 | |||
582 | .globl sys32_quotactl_wrapper | ||
583 | sys32_quotactl_wrapper: | ||
584 | llgfr %r2,%r2 # unsigned int | ||
585 | llgtr %r3,%r3 # const char * | ||
586 | llgfr %r4,%r4 # qid_t | ||
587 | llgtr %r5,%r5 # caddr_t | ||
588 | jg sys_quotactl # branch to system call | ||
589 | |||
590 | .globl sys32_getpgid_wrapper | ||
591 | sys32_getpgid_wrapper: | ||
592 | lgfr %r2,%r2 # pid_t | ||
593 | jg sys_getpgid # branch to system call | ||
594 | |||
595 | .globl sys32_fchdir_wrapper | ||
596 | sys32_fchdir_wrapper: | ||
597 | llgfr %r2,%r2 # unsigned int | ||
598 | jg sys_fchdir # branch to system call | ||
599 | |||
600 | .globl sys32_bdflush_wrapper | ||
601 | sys32_bdflush_wrapper: | ||
602 | lgfr %r2,%r2 # int | ||
603 | lgfr %r3,%r3 # long | ||
604 | jg sys_bdflush # branch to system call | ||
605 | |||
606 | .globl sys32_sysfs_wrapper | ||
607 | sys32_sysfs_wrapper: | ||
608 | lgfr %r2,%r2 # int | ||
609 | llgfr %r3,%r3 # unsigned long | ||
610 | llgfr %r4,%r4 # unsigned long | ||
611 | jg sys_sysfs # branch to system call | ||
612 | |||
613 | .globl sys32_personality_wrapper | ||
614 | sys32_personality_wrapper: | ||
615 | llgfr %r2,%r2 # unsigned long | ||
616 | jg s390x_personality # branch to system call | ||
617 | |||
618 | .globl sys32_setfsuid16_wrapper | ||
619 | sys32_setfsuid16_wrapper: | ||
620 | llgfr %r2,%r2 # __kernel_old_uid_emu31_t | ||
621 | jg sys32_setfsuid16 # branch to system call | ||
622 | |||
623 | .globl sys32_setfsgid16_wrapper | ||
624 | sys32_setfsgid16_wrapper: | ||
625 | llgfr %r2,%r2 # __kernel_old_gid_emu31_t | ||
626 | jg sys32_setfsgid16 # branch to system call | ||
627 | |||
628 | .globl sys32_llseek_wrapper | ||
629 | sys32_llseek_wrapper: | ||
630 | llgfr %r2,%r2 # unsigned int | ||
631 | llgfr %r3,%r3 # unsigned long | ||
632 | llgfr %r4,%r4 # unsigned long | ||
633 | llgtr %r5,%r5 # loff_t * | ||
634 | llgfr %r6,%r6 # unsigned int | ||
635 | jg sys_llseek # branch to system call | ||
636 | |||
637 | .globl sys32_getdents_wrapper | ||
638 | sys32_getdents_wrapper: | ||
639 | llgfr %r2,%r2 # unsigned int | ||
640 | llgtr %r3,%r3 # void * | ||
641 | llgfr %r4,%r4 # unsigned int | ||
642 | jg compat_sys_getdents # branch to system call | ||
643 | |||
644 | .globl compat_sys_select_wrapper | ||
645 | compat_sys_select_wrapper: | ||
646 | lgfr %r2,%r2 # int | ||
647 | llgtr %r3,%r3 # compat_fd_set * | ||
648 | llgtr %r4,%r4 # compat_fd_set * | ||
649 | llgtr %r5,%r5 # compat_fd_set * | ||
650 | llgtr %r6,%r6 # struct compat_timeval * | ||
651 | jg compat_sys_select # branch to system call | ||
652 | |||
653 | .globl sys32_flock_wrapper | ||
654 | sys32_flock_wrapper: | ||
655 | llgfr %r2,%r2 # unsigned int | ||
656 | llgfr %r3,%r3 # unsigned int | ||
657 | jg sys_flock # branch to system call | ||
658 | |||
659 | .globl sys32_msync_wrapper | ||
660 | sys32_msync_wrapper: | ||
661 | llgfr %r2,%r2 # unsigned long | ||
662 | llgfr %r3,%r3 # size_t | ||
663 | lgfr %r4,%r4 # int | ||
664 | jg sys_msync # branch to system call | ||
665 | |||
666 | .globl compat_sys_readv_wrapper | ||
667 | compat_sys_readv_wrapper: | ||
668 | lgfr %r2,%r2 # int | ||
669 | llgtr %r3,%r3 # const struct compat_iovec * | ||
670 | llgfr %r4,%r4 # unsigned long | ||
671 | jg compat_sys_readv # branch to system call | ||
672 | |||
673 | .globl compat_sys_writev_wrapper | ||
674 | compat_sys_writev_wrapper: | ||
675 | lgfr %r2,%r2 # int | ||
676 | llgtr %r3,%r3 # const struct compat_iovec * | ||
677 | llgfr %r4,%r4 # unsigned long | ||
678 | jg compat_sys_writev # branch to system call | ||
679 | |||
680 | .globl sys32_getsid_wrapper | ||
681 | sys32_getsid_wrapper: | ||
682 | lgfr %r2,%r2 # pid_t | ||
683 | jg sys_getsid # branch to system call | ||
684 | |||
685 | .globl sys32_fdatasync_wrapper | ||
686 | sys32_fdatasync_wrapper: | ||
687 | llgfr %r2,%r2 # unsigned int | ||
688 | jg sys_fdatasync # branch to system call | ||
689 | |||
690 | #sys32_sysctl_wrapper # tbd | ||
691 | |||
692 | .globl sys32_mlock_wrapper | ||
693 | sys32_mlock_wrapper: | ||
694 | llgfr %r2,%r2 # unsigned long | ||
695 | llgfr %r3,%r3 # size_t | ||
696 | jg sys_mlock # branch to system call | ||
697 | |||
698 | .globl sys32_munlock_wrapper | ||
699 | sys32_munlock_wrapper: | ||
700 | llgfr %r2,%r2 # unsigned long | ||
701 | llgfr %r3,%r3 # size_t | ||
702 | jg sys_munlock # branch to system call | ||
703 | |||
704 | .globl sys32_mlockall_wrapper | ||
705 | sys32_mlockall_wrapper: | ||
706 | lgfr %r2,%r2 # int | ||
707 | jg sys_mlockall # branch to system call | ||
708 | |||
709 | #sys32_munlockall_wrapper # void | ||
710 | |||
711 | .globl sys32_sched_setparam_wrapper | ||
712 | sys32_sched_setparam_wrapper: | ||
713 | lgfr %r2,%r2 # pid_t | ||
714 | llgtr %r3,%r3 # struct sched_param * | ||
715 | jg sys_sched_setparam # branch to system call | ||
716 | |||
717 | .globl sys32_sched_getparam_wrapper | ||
718 | sys32_sched_getparam_wrapper: | ||
719 | lgfr %r2,%r2 # pid_t | ||
720 | llgtr %r3,%r3 # struct sched_param * | ||
721 | jg sys_sched_getparam # branch to system call | ||
722 | |||
723 | .globl sys32_sched_setscheduler_wrapper | ||
724 | sys32_sched_setscheduler_wrapper: | ||
725 | lgfr %r2,%r2 # pid_t | ||
726 | lgfr %r3,%r3 # int | ||
727 | llgtr %r4,%r4 # struct sched_param * | ||
728 | jg sys_sched_setscheduler # branch to system call | ||
729 | |||
730 | .globl sys32_sched_getscheduler_wrapper | ||
731 | sys32_sched_getscheduler_wrapper: | ||
732 | lgfr %r2,%r2 # pid_t | ||
733 | jg sys_sched_getscheduler # branch to system call | ||
734 | |||
735 | #sys32_sched_yield_wrapper # void | ||
736 | |||
737 | .globl sys32_sched_get_priority_max_wrapper | ||
738 | sys32_sched_get_priority_max_wrapper: | ||
739 | lgfr %r2,%r2 # int | ||
740 | jg sys_sched_get_priority_max # branch to system call | ||
741 | |||
742 | .globl sys32_sched_get_priority_min_wrapper | ||
743 | sys32_sched_get_priority_min_wrapper: | ||
744 | lgfr %r2,%r2 # int | ||
745 | jg sys_sched_get_priority_min # branch to system call | ||
746 | |||
747 | .globl sys32_sched_rr_get_interval_wrapper | ||
748 | sys32_sched_rr_get_interval_wrapper: | ||
749 | lgfr %r2,%r2 # pid_t | ||
750 | llgtr %r3,%r3 # struct compat_timespec * | ||
751 | jg sys32_sched_rr_get_interval # branch to system call | ||
752 | |||
753 | .globl compat_sys_nanosleep_wrapper | ||
754 | compat_sys_nanosleep_wrapper: | ||
755 | llgtr %r2,%r2 # struct compat_timespec * | ||
756 | llgtr %r3,%r3 # struct compat_timespec * | ||
757 | jg compat_sys_nanosleep # branch to system call | ||
758 | |||
759 | .globl sys32_mremap_wrapper | ||
760 | sys32_mremap_wrapper: | ||
761 | llgfr %r2,%r2 # unsigned long | ||
762 | llgfr %r3,%r3 # unsigned long | ||
763 | llgfr %r4,%r4 # unsigned long | ||
764 | llgfr %r5,%r5 # unsigned long | ||
765 | llgfr %r6,%r6 # unsigned long | ||
766 | jg sys_mremap # branch to system call | ||
767 | |||
768 | .globl sys32_setresuid16_wrapper | ||
769 | sys32_setresuid16_wrapper: | ||
770 | llgfr %r2,%r2 # __kernel_old_uid_emu31_t | ||
771 | llgfr %r3,%r3 # __kernel_old_uid_emu31_t | ||
772 | llgfr %r4,%r4 # __kernel_old_uid_emu31_t | ||
773 | jg sys32_setresuid16 # branch to system call | ||
774 | |||
775 | .globl sys32_getresuid16_wrapper | ||
776 | sys32_getresuid16_wrapper: | ||
777 | llgtr %r2,%r2 # __kernel_old_uid_emu31_t * | ||
778 | llgtr %r3,%r3 # __kernel_old_uid_emu31_t * | ||
779 | llgtr %r4,%r4 # __kernel_old_uid_emu31_t * | ||
780 | jg sys32_getresuid16 # branch to system call | ||
781 | |||
782 | .globl sys32_poll_wrapper | ||
783 | sys32_poll_wrapper: | ||
784 | llgtr %r2,%r2 # struct pollfd * | ||
785 | llgfr %r3,%r3 # unsigned int | ||
786 | lgfr %r4,%r4 # long | ||
787 | jg sys_poll # branch to system call | ||
788 | |||
789 | .globl compat_sys_nfsservctl_wrapper | ||
790 | compat_sys_nfsservctl_wrapper: | ||
791 | lgfr %r2,%r2 # int | ||
792 | llgtr %r3,%r3 # struct compat_nfsctl_arg* | ||
793 | llgtr %r4,%r4 # union compat_nfsctl_res* | ||
794 | jg compat_sys_nfsservctl # branch to system call | ||
795 | |||
796 | .globl sys32_setresgid16_wrapper | ||
797 | sys32_setresgid16_wrapper: | ||
798 | llgfr %r2,%r2 # __kernel_old_gid_emu31_t | ||
799 | llgfr %r3,%r3 # __kernel_old_gid_emu31_t | ||
800 | llgfr %r4,%r4 # __kernel_old_gid_emu31_t | ||
801 | jg sys32_setresgid16 # branch to system call | ||
802 | |||
803 | .globl sys32_getresgid16_wrapper | ||
804 | sys32_getresgid16_wrapper: | ||
805 | llgtr %r2,%r2 # __kernel_old_gid_emu31_t * | ||
806 | llgtr %r3,%r3 # __kernel_old_gid_emu31_t * | ||
807 | llgtr %r4,%r4 # __kernel_old_gid_emu31_t * | ||
808 | jg sys32_getresgid16 # branch to system call | ||
809 | |||
810 | .globl sys32_prctl_wrapper | ||
811 | sys32_prctl_wrapper: | ||
812 | lgfr %r2,%r2 # int | ||
813 | llgfr %r3,%r3 # unsigned long | ||
814 | llgfr %r4,%r4 # unsigned long | ||
815 | llgfr %r5,%r5 # unsigned long | ||
816 | llgfr %r6,%r6 # unsigned long | ||
817 | jg sys_prctl # branch to system call | ||
818 | |||
819 | #sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue | ||
820 | |||
821 | .globl sys32_rt_sigaction_wrapper | ||
822 | sys32_rt_sigaction_wrapper: | ||
823 | lgfr %r2,%r2 # int | ||
824 | llgtr %r3,%r3 # const struct sigaction_emu31 * | ||
825 | llgtr %r4,%r4 # const struct sigaction_emu31 * | ||
826 | llgfr %r5,%r5 # size_t | ||
827 | jg sys32_rt_sigaction # branch to system call | ||
828 | |||
829 | .globl sys32_rt_sigprocmask_wrapper | ||
830 | sys32_rt_sigprocmask_wrapper: | ||
831 | lgfr %r2,%r2 # int | ||
832 | llgtr %r3,%r3 # old_sigset_emu31 * | ||
833 | llgtr %r4,%r4 # old_sigset_emu31 * | ||
834 | llgfr %r5,%r5 # size_t | ||
835 | jg sys32_rt_sigprocmask # branch to system call | ||
836 | |||
837 | .globl sys32_rt_sigpending_wrapper | ||
838 | sys32_rt_sigpending_wrapper: | ||
839 | llgtr %r2,%r2 # sigset_emu31 * | ||
840 | llgfr %r3,%r3 # size_t | ||
841 | jg sys32_rt_sigpending # branch to system call | ||
842 | |||
843 | .globl compat_sys_rt_sigtimedwait_wrapper | ||
844 | compat_sys_rt_sigtimedwait_wrapper: | ||
845 | llgtr %r2,%r2 # const sigset_emu31_t * | ||
846 | llgtr %r3,%r3 # siginfo_emu31_t * | ||
847 | llgtr %r4,%r4 # const struct compat_timespec * | ||
848 | llgfr %r5,%r5 # size_t | ||
849 | jg compat_sys_rt_sigtimedwait # branch to system call | ||
850 | |||
851 | .globl sys32_rt_sigqueueinfo_wrapper | ||
852 | sys32_rt_sigqueueinfo_wrapper: | ||
853 | lgfr %r2,%r2 # int | ||
854 | lgfr %r3,%r3 # int | ||
855 | llgtr %r4,%r4 # siginfo_emu31_t * | ||
856 | jg sys32_rt_sigqueueinfo # branch to system call | ||
857 | |||
858 | #sys32_rt_sigsuspend_wrapper # done in rt_sigsuspend_glue | ||
859 | |||
860 | .globl sys32_pread64_wrapper | ||
861 | sys32_pread64_wrapper: | ||
862 | llgfr %r2,%r2 # unsigned int | ||
863 | llgtr %r3,%r3 # char * | ||
864 | llgfr %r4,%r4 # size_t | ||
865 | llgfr %r5,%r5 # u32 | ||
866 | llgfr %r6,%r6 # u32 | ||
867 | jg sys32_pread64 # branch to system call | ||
868 | |||
869 | .globl sys32_pwrite64_wrapper | ||
870 | sys32_pwrite64_wrapper: | ||
871 | llgfr %r2,%r2 # unsigned int | ||
872 | llgtr %r3,%r3 # const char * | ||
873 | llgfr %r4,%r4 # size_t | ||
874 | llgfr %r5,%r5 # u32 | ||
875 | llgfr %r6,%r6 # u32 | ||
876 | jg sys32_pwrite64 # branch to system call | ||
877 | |||
878 | .globl sys32_chown16_wrapper | ||
879 | sys32_chown16_wrapper: | ||
880 | llgtr %r2,%r2 # const char * | ||
881 | llgfr %r3,%r3 # __kernel_old_uid_emu31_t | ||
882 | llgfr %r4,%r4 # __kernel_old_gid_emu31_t | ||
883 | jg sys32_chown16 # branch to system call | ||
884 | |||
885 | .globl sys32_getcwd_wrapper | ||
886 | sys32_getcwd_wrapper: | ||
887 | llgtr %r2,%r2 # char * | ||
888 | llgfr %r3,%r3 # unsigned long | ||
889 | jg sys_getcwd # branch to system call | ||
890 | |||
891 | .globl sys32_capget_wrapper | ||
892 | sys32_capget_wrapper: | ||
893 | llgtr %r2,%r2 # cap_user_header_t | ||
894 | llgtr %r3,%r3 # cap_user_data_t | ||
895 | jg sys_capget # branch to system call | ||
896 | |||
897 | .globl sys32_capset_wrapper | ||
898 | sys32_capset_wrapper: | ||
899 | llgtr %r2,%r2 # cap_user_header_t | ||
900 | llgtr %r3,%r3 # const cap_user_data_t | ||
901 | jg sys_capset # branch to system call | ||
902 | |||
903 | .globl sys32_sigaltstack_wrapper | ||
904 | sys32_sigaltstack_wrapper: | ||
905 | llgtr %r2,%r2 # const stack_emu31_t * | ||
906 | llgtr %r3,%r3 # stack_emu31_t * | ||
907 | jg sys32_sigaltstack | ||
908 | |||
909 | .globl sys32_sendfile_wrapper | ||
910 | sys32_sendfile_wrapper: | ||
911 | lgfr %r2,%r2 # int | ||
912 | lgfr %r3,%r3 # int | ||
913 | llgtr %r4,%r4 # __kernel_off_emu31_t * | ||
914 | llgfr %r5,%r5 # size_t | ||
915 | jg sys32_sendfile # branch to system call | ||
916 | |||
917 | #sys32_vfork_wrapper # done in vfork_glue | ||
918 | |||
919 | .globl sys32_truncate64_wrapper | ||
920 | sys32_truncate64_wrapper: | ||
921 | llgtr %r2,%r2 # const char * | ||
922 | llgfr %r3,%r3 # unsigned long | ||
923 | llgfr %r4,%r4 # unsigned long | ||
924 | jg sys32_truncate64 # branch to system call | ||
925 | |||
926 | .globl sys32_ftruncate64_wrapper | ||
927 | sys32_ftruncate64_wrapper: | ||
928 | llgfr %r2,%r2 # unsigned int | ||
929 | llgfr %r3,%r3 # unsigned long | ||
930 | llgfr %r4,%r4 # unsigned long | ||
931 | jg sys32_ftruncate64 # branch to system call | ||
932 | |||
933 | .globl sys32_lchown_wrapper | ||
934 | sys32_lchown_wrapper: | ||
935 | llgtr %r2,%r2 # const char * | ||
936 | llgfr %r3,%r3 # uid_t | ||
937 | llgfr %r4,%r4 # gid_t | ||
938 | jg sys_lchown # branch to system call | ||
939 | |||
940 | #sys32_getuid_wrapper # void | ||
941 | #sys32_getgid_wrapper # void | ||
942 | #sys32_geteuid_wrapper # void | ||
943 | #sys32_getegid_wrapper # void | ||
944 | |||
945 | .globl sys32_setreuid_wrapper | ||
946 | sys32_setreuid_wrapper: | ||
947 | llgfr %r2,%r2 # uid_t | ||
948 | llgfr %r3,%r3 # uid_t | ||
949 | jg sys_setreuid # branch to system call | ||
950 | |||
951 | .globl sys32_setregid_wrapper | ||
952 | sys32_setregid_wrapper: | ||
953 | llgfr %r2,%r2 # gid_t | ||
954 | llgfr %r3,%r3 # gid_t | ||
955 | jg sys_setregid # branch to system call | ||
956 | |||
957 | .globl sys32_getgroups_wrapper | ||
958 | sys32_getgroups_wrapper: | ||
959 | lgfr %r2,%r2 # int | ||
960 | llgtr %r3,%r3 # gid_t * | ||
961 | jg sys_getgroups # branch to system call | ||
962 | |||
963 | .globl sys32_setgroups_wrapper | ||
964 | sys32_setgroups_wrapper: | ||
965 | lgfr %r2,%r2 # int | ||
966 | llgtr %r3,%r3 # gid_t * | ||
967 | jg sys_setgroups # branch to system call | ||
968 | |||
969 | .globl sys32_fchown_wrapper | ||
970 | sys32_fchown_wrapper: | ||
971 | llgfr %r2,%r2 # unsigned int | ||
972 | llgfr %r3,%r3 # uid_t | ||
973 | llgfr %r4,%r4 # gid_t | ||
974 | jg sys_fchown # branch to system call | ||
975 | |||
976 | .globl sys32_setresuid_wrapper | ||
977 | sys32_setresuid_wrapper: | ||
978 | llgfr %r2,%r2 # uid_t | ||
979 | llgfr %r3,%r3 # uid_t | ||
980 | llgfr %r4,%r4 # uid_t | ||
981 | jg sys_setresuid # branch to system call | ||
982 | |||
983 | .globl sys32_getresuid_wrapper | ||
984 | sys32_getresuid_wrapper: | ||
985 | llgtr %r2,%r2 # uid_t * | ||
986 | llgtr %r3,%r3 # uid_t * | ||
987 | llgtr %r4,%r4 # uid_t * | ||
988 | jg sys_getresuid # branch to system call | ||
989 | |||
990 | .globl sys32_setresgid_wrapper | ||
991 | sys32_setresgid_wrapper: | ||
992 | llgfr %r2,%r2 # gid_t | ||
993 | llgfr %r3,%r3 # gid_t | ||
994 | llgfr %r4,%r4 # gid_t | ||
995 | jg sys_setresgid # branch to system call | ||
996 | |||
997 | .globl sys32_getresgid_wrapper | ||
998 | sys32_getresgid_wrapper: | ||
999 | llgtr %r2,%r2 # gid_t * | ||
1000 | llgtr %r3,%r3 # gid_t * | ||
1001 | llgtr %r4,%r4 # gid_t * | ||
1002 | jg sys_getresgid # branch to system call | ||
1003 | |||
1004 | .globl sys32_chown_wrapper | ||
1005 | sys32_chown_wrapper: | ||
1006 | llgtr %r2,%r2 # const char * | ||
1007 | llgfr %r3,%r3 # uid_t | ||
1008 | llgfr %r4,%r4 # gid_t | ||
1009 | jg sys_chown # branch to system call | ||
1010 | |||
1011 | .globl sys32_setuid_wrapper | ||
1012 | sys32_setuid_wrapper: | ||
1013 | llgfr %r2,%r2 # uid_t | ||
1014 | jg sys_setuid # branch to system call | ||
1015 | |||
1016 | .globl sys32_setgid_wrapper | ||
1017 | sys32_setgid_wrapper: | ||
1018 | llgfr %r2,%r2 # gid_t | ||
1019 | jg sys_setgid # branch to system call | ||
1020 | |||
1021 | .globl sys32_setfsuid_wrapper | ||
1022 | sys32_setfsuid_wrapper: | ||
1023 | llgfr %r2,%r2 # uid_t | ||
1024 | jg sys_setfsuid # branch to system call | ||
1025 | |||
1026 | .globl sys32_setfsgid_wrapper | ||
1027 | sys32_setfsgid_wrapper: | ||
1028 | llgfr %r2,%r2 # gid_t | ||
1029 | jg sys_setfsgid # branch to system call | ||
1030 | |||
1031 | .globl sys32_pivot_root_wrapper | ||
1032 | sys32_pivot_root_wrapper: | ||
1033 | llgtr %r2,%r2 # const char * | ||
1034 | llgtr %r3,%r3 # const char * | ||
1035 | jg sys_pivot_root # branch to system call | ||
1036 | |||
1037 | .globl sys32_mincore_wrapper | ||
1038 | sys32_mincore_wrapper: | ||
1039 | llgfr %r2,%r2 # unsigned long | ||
1040 | llgfr %r3,%r3 # size_t | ||
1041 | llgtr %r4,%r4 # unsigned char * | ||
1042 | jg sys_mincore # branch to system call | ||
1043 | |||
1044 | .globl sys32_madvise_wrapper | ||
1045 | sys32_madvise_wrapper: | ||
1046 | llgfr %r2,%r2 # unsigned long | ||
1047 | llgfr %r3,%r3 # size_t | ||
1048 | lgfr %r4,%r4 # int | ||
1049 | jg sys_madvise # branch to system call | ||
1050 | |||
1051 | .globl sys32_getdents64_wrapper | ||
1052 | sys32_getdents64_wrapper: | ||
1053 | llgfr %r2,%r2 # unsigned int | ||
1054 | llgtr %r3,%r3 # void * | ||
1055 | llgfr %r4,%r4 # unsigned int | ||
1056 | jg sys_getdents64 # branch to system call | ||
1057 | |||
1058 | .globl compat_sys_fcntl64_wrapper | ||
1059 | compat_sys_fcntl64_wrapper: | ||
1060 | llgfr %r2,%r2 # unsigned int | ||
1061 | llgfr %r3,%r3 # unsigned int | ||
1062 | llgfr %r4,%r4 # unsigned long | ||
1063 | jg compat_sys_fcntl64 # branch to system call | ||
1064 | |||
1065 | .globl sys32_stat64_wrapper | ||
1066 | sys32_stat64_wrapper: | ||
1067 | llgtr %r2,%r2 # char * | ||
1068 | llgtr %r3,%r3 # struct stat64 * | ||
1069 | jg sys32_stat64 # branch to system call | ||
1070 | |||
1071 | .globl sys32_lstat64_wrapper | ||
1072 | sys32_lstat64_wrapper: | ||
1073 | llgtr %r2,%r2 # char * | ||
1074 | llgtr %r3,%r3 # struct stat64 * | ||
1075 | jg sys32_lstat64 # branch to system call | ||
1076 | |||
1077 | .globl sys32_stime_wrapper | ||
1078 | sys32_stime_wrapper: | ||
1079 | llgtr %r2,%r2 # long * | ||
1080 | jg compat_sys_stime # branch to system call | ||
1081 | |||
1082 | .globl sys32_sysctl_wrapper | ||
1083 | sys32_sysctl_wrapper: | ||
1084 | llgtr %r2,%r2 # struct __sysctl_args32 * | ||
1085 | jg sys32_sysctl | ||
1086 | |||
1087 | .globl sys32_fstat64_wrapper | ||
1088 | sys32_fstat64_wrapper: | ||
1089 | llgfr %r2,%r2 # unsigned long | ||
1090 | llgtr %r3,%r3 # struct stat64 * | ||
1091 | jg sys32_fstat64 # branch to system call | ||
1092 | |||
1093 | .globl compat_sys_futex_wrapper | ||
1094 | compat_sys_futex_wrapper: | ||
1095 | llgtr %r2,%r2 # u32 * | ||
1096 | lgfr %r3,%r3 # int | ||
1097 | lgfr %r4,%r4 # int | ||
1098 | llgtr %r5,%r5 # struct compat_timespec * | ||
1099 | llgtr %r6,%r6 # u32 * | ||
1100 | lgf %r0,164(%r15) # int | ||
1101 | stg %r0,160(%r15) | ||
1102 | jg compat_sys_futex # branch to system call | ||
1103 | |||
1104 | .globl sys32_setxattr_wrapper | ||
1105 | sys32_setxattr_wrapper: | ||
1106 | llgtr %r2,%r2 # char * | ||
1107 | llgtr %r3,%r3 # char * | ||
1108 | llgtr %r4,%r4 # void * | ||
1109 | llgfr %r5,%r5 # size_t | ||
1110 | lgfr %r6,%r6 # int | ||
1111 | jg sys_setxattr | ||
1112 | |||
1113 | .globl sys32_lsetxattr_wrapper | ||
1114 | sys32_lsetxattr_wrapper: | ||
1115 | llgtr %r2,%r2 # char * | ||
1116 | llgtr %r3,%r3 # char * | ||
1117 | llgtr %r4,%r4 # void * | ||
1118 | llgfr %r5,%r5 # size_t | ||
1119 | lgfr %r6,%r6 # int | ||
1120 | jg sys_lsetxattr | ||
1121 | |||
1122 | .globl sys32_fsetxattr_wrapper | ||
1123 | sys32_fsetxattr_wrapper: | ||
1124 | lgfr %r2,%r2 # int | ||
1125 | llgtr %r3,%r3 # char * | ||
1126 | llgtr %r4,%r4 # void * | ||
1127 | llgfr %r5,%r5 # size_t | ||
1128 | lgfr %r6,%r6 # int | ||
1129 | jg sys_fsetxattr | ||
1130 | |||
1131 | .globl sys32_getxattr_wrapper | ||
1132 | sys32_getxattr_wrapper: | ||
1133 | llgtr %r2,%r2 # char * | ||
1134 | llgtr %r3,%r3 # char * | ||
1135 | llgtr %r4,%r4 # void * | ||
1136 | llgfr %r5,%r5 # size_t | ||
1137 | jg sys_getxattr | ||
1138 | |||
1139 | .globl sys32_lgetxattr_wrapper | ||
1140 | sys32_lgetxattr_wrapper: | ||
1141 | llgtr %r2,%r2 # char * | ||
1142 | llgtr %r3,%r3 # char * | ||
1143 | llgtr %r4,%r4 # void * | ||
1144 | llgfr %r5,%r5 # size_t | ||
1145 | jg sys_lgetxattr | ||
1146 | |||
1147 | .globl sys32_fgetxattr_wrapper | ||
1148 | sys32_fgetxattr_wrapper: | ||
1149 | lgfr %r2,%r2 # int | ||
1150 | llgtr %r3,%r3 # char * | ||
1151 | llgtr %r4,%r4 # void * | ||
1152 | llgfr %r5,%r5 # size_t | ||
1153 | jg sys_fgetxattr | ||
1154 | |||
1155 | .globl sys32_listxattr_wrapper | ||
1156 | sys32_listxattr_wrapper: | ||
1157 | llgtr %r2,%r2 # char * | ||
1158 | llgtr %r3,%r3 # char * | ||
1159 | llgfr %r4,%r4 # size_t | ||
1160 | jg sys_listxattr | ||
1161 | |||
1162 | .globl sys32_llistxattr_wrapper | ||
1163 | sys32_llistxattr_wrapper: | ||
1164 | llgtr %r2,%r2 # char * | ||
1165 | llgtr %r3,%r3 # char * | ||
1166 | llgfr %r4,%r4 # size_t | ||
1167 | jg sys_llistxattr | ||
1168 | |||
1169 | .globl sys32_flistxattr_wrapper | ||
1170 | sys32_flistxattr_wrapper: | ||
1171 | lgfr %r2,%r2 # int | ||
1172 | llgtr %r3,%r3 # char * | ||
1173 | llgfr %r4,%r4 # size_t | ||
1174 | jg sys_flistxattr | ||
1175 | |||
1176 | .globl sys32_removexattr_wrapper | ||
1177 | sys32_removexattr_wrapper: | ||
1178 | llgtr %r2,%r2 # char * | ||
1179 | llgtr %r3,%r3 # char * | ||
1180 | jg sys_removexattr | ||
1181 | |||
1182 | .globl sys32_lremovexattr_wrapper | ||
1183 | sys32_lremovexattr_wrapper: | ||
1184 | llgtr %r2,%r2 # char * | ||
1185 | llgtr %r3,%r3 # char * | ||
1186 | jg sys_lremovexattr | ||
1187 | |||
1188 | .globl sys32_fremovexattr_wrapper | ||
1189 | sys32_fremovexattr_wrapper: | ||
1190 | lgfr %r2,%r2 # int | ||
1191 | llgtr %r3,%r3 # char * | ||
1192 | jg sys_fremovexattr | ||
1193 | |||
1194 | .globl sys32_sched_setaffinity_wrapper | ||
1195 | sys32_sched_setaffinity_wrapper: | ||
1196 | lgfr %r2,%r2 # int | ||
1197 | llgfr %r3,%r3 # unsigned int | ||
1198 | llgtr %r4,%r4 # unsigned long * | ||
1199 | jg compat_sys_sched_setaffinity | ||
1200 | |||
1201 | .globl sys32_sched_getaffinity_wrapper | ||
1202 | sys32_sched_getaffinity_wrapper: | ||
1203 | lgfr %r2,%r2 # int | ||
1204 | llgfr %r3,%r3 # unsigned int | ||
1205 | llgtr %r4,%r4 # unsigned long * | ||
1206 | jg compat_sys_sched_getaffinity | ||
1207 | |||
1208 | .globl sys32_exit_group_wrapper | ||
1209 | sys32_exit_group_wrapper: | ||
1210 | lgfr %r2,%r2 # int | ||
1211 | jg sys_exit_group # branch to system call | ||
1212 | |||
1213 | .globl sys32_set_tid_address_wrapper | ||
1214 | sys32_set_tid_address_wrapper: | ||
1215 | llgtr %r2,%r2 # int * | ||
1216 | jg sys_set_tid_address # branch to system call | ||
1217 | |||
1218 | .globl sys_epoll_create_wrapper | ||
1219 | sys_epoll_create_wrapper: | ||
1220 | lgfr %r2,%r2 # int | ||
1221 | jg sys_epoll_create # branch to system call | ||
1222 | |||
1223 | .globl sys_epoll_ctl_wrapper | ||
1224 | sys_epoll_ctl_wrapper: | ||
1225 | lgfr %r2,%r2 # int | ||
1226 | lgfr %r3,%r3 # int | ||
1227 | lgfr %r4,%r4 # int | ||
1228 | llgtr %r5,%r5 # struct epoll_event * | ||
1229 | jg sys_epoll_ctl # branch to system call | ||
1230 | |||
1231 | .globl sys_epoll_wait_wrapper | ||
1232 | sys_epoll_wait_wrapper: | ||
1233 | lgfr %r2,%r2 # int | ||
1234 | llgtr %r3,%r3 # struct epoll_event * | ||
1235 | lgfr %r4,%r4 # int | ||
1236 | lgfr %r5,%r5 # int | ||
1237 | jg sys_epoll_wait # branch to system call | ||
1238 | |||
1239 | .globl sys32_lookup_dcookie_wrapper | ||
1240 | sys32_lookup_dcookie_wrapper: | ||
1241 | sllg %r2,%r2,32 # get high word of 64bit dcookie | ||
1242 | or %r2,%r3 # get low word of 64bit dcookie | ||
1243 | llgtr %r3,%r4 # char * | ||
1244 | llgfr %r4,%r5 # size_t | ||
1245 | jg sys_lookup_dcookie | ||
1246 | |||
1247 | .globl sys32_fadvise64_wrapper | ||
1248 | sys32_fadvise64_wrapper: | ||
1249 | lgfr %r2,%r2 # int | ||
1250 | sllg %r3,%r3,32 # get high word of 64bit loff_t | ||
1251 | or %r3,%r4 # get low word of 64bit loff_t | ||
1252 | llgfr %r4,%r5 # size_t (unsigned long) | ||
1253 | lgfr %r5,%r6 # int | ||
1254 | jg sys_fadvise64 | ||
1255 | |||
1256 | .globl sys32_fadvise64_64_wrapper | ||
1257 | sys32_fadvise64_64_wrapper: | ||
1258 | llgtr %r2,%r2 # struct fadvise64_64_args * | ||
1259 | jg s390_fadvise64_64 | ||
1260 | |||
1261 | .globl sys32_clock_settime_wrapper | ||
1262 | sys32_clock_settime_wrapper: | ||
1263 | lgfr %r2,%r2 # clockid_t (int) | ||
1264 | llgtr %r3,%r3 # struct compat_timespec * | ||
1265 | jg compat_sys_clock_settime | ||
1266 | |||
1267 | .globl sys32_clock_gettime_wrapper | ||
1268 | sys32_clock_gettime_wrapper: | ||
1269 | lgfr %r2,%r2 # clockid_t (int) | ||
1270 | llgtr %r3,%r3 # struct compat_timespec * | ||
1271 | jg compat_sys_clock_gettime | ||
1272 | |||
1273 | .globl sys32_clock_getres_wrapper | ||
1274 | sys32_clock_getres_wrapper: | ||
1275 | lgfr %r2,%r2 # clockid_t (int) | ||
1276 | llgtr %r3,%r3 # struct compat_timespec * | ||
1277 | jg compat_sys_clock_getres | ||
1278 | |||
1279 | .globl sys32_clock_nanosleep_wrapper | ||
1280 | sys32_clock_nanosleep_wrapper: | ||
1281 | lgfr %r2,%r2 # clockid_t (int) | ||
1282 | lgfr %r3,%r3 # int | ||
1283 | llgtr %r4,%r4 # struct compat_timespec * | ||
1284 | llgtr %r5,%r5 # struct compat_timespec * | ||
1285 | jg compat_sys_clock_nanosleep | ||
1286 | |||
1287 | .globl sys32_timer_create_wrapper | ||
1288 | sys32_timer_create_wrapper: | ||
1289 | lgfr %r2,%r2 # timer_t (int) | ||
1290 | llgtr %r3,%r3 # struct compat_sigevent * | ||
1291 | llgtr %r4,%r4 # timer_t * | ||
1292 | jg sys32_timer_create | ||
1293 | |||
1294 | .globl sys32_timer_settime_wrapper | ||
1295 | sys32_timer_settime_wrapper: | ||
1296 | lgfr %r2,%r2 # timer_t (int) | ||
1297 | lgfr %r3,%r3 # int | ||
1298 | llgtr %r4,%r4 # struct compat_itimerspec * | ||
1299 | llgtr %r5,%r5 # struct compat_itimerspec * | ||
1300 | jg compat_sys_timer_settime | ||
1301 | |||
1302 | .globl sys32_timer_gettime_wrapper | ||
1303 | sys32_timer_gettime_wrapper: | ||
1304 | lgfr %r2,%r2 # timer_t (int) | ||
1305 | llgtr %r3,%r3 # struct compat_itimerspec * | ||
1306 | jg compat_sys_timer_gettime | ||
1307 | |||
1308 | .globl sys32_timer_getoverrun_wrapper | ||
1309 | sys32_timer_getoverrun_wrapper: | ||
1310 | lgfr %r2,%r2 # timer_t (int) | ||
1311 | jg sys_timer_getoverrun | ||
1312 | |||
1313 | .globl sys32_timer_delete_wrapper | ||
1314 | sys32_timer_delete_wrapper: | ||
1315 | lgfr %r2,%r2 # timer_t (int) | ||
1316 | jg sys_timer_delete | ||
1317 | |||
1318 | .globl sys32_io_setup_wrapper | ||
1319 | sys32_io_setup_wrapper: | ||
1320 | llgfr %r2,%r2 # unsigned int | ||
1321 | llgtr %r3,%r3 # u32 * | ||
1322 | jg compat_sys_io_setup | ||
1323 | |||
1324 | .globl sys32_io_destroy_wrapper | ||
1325 | sys32_io_destroy_wrapper: | ||
1326 | llgfr %r2,%r2 # (aio_context_t) u32 | ||
1327 | jg sys_io_destroy | ||
1328 | |||
1329 | .globl sys32_io_getevents_wrapper | ||
1330 | sys32_io_getevents_wrapper: | ||
1331 | llgfr %r2,%r2 # (aio_context_t) u32 | ||
1332 | lgfr %r3,%r3 # long | ||
1333 | lgfr %r4,%r4 # long | ||
1334 | llgtr %r5,%r5 # struct io_event * | ||
1335 | llgtr %r6,%r6 # struct compat_timespec * | ||
1336 | jg compat_sys_io_getevents | ||
1337 | |||
1338 | .globl sys32_io_submit_wrapper | ||
1339 | sys32_io_submit_wrapper: | ||
1340 | llgfr %r2,%r2 # (aio_context_t) u32 | ||
1341 | lgfr %r3,%r3 # long | ||
1342 | llgtr %r4,%r4 # struct iocb ** | ||
1343 | jg compat_sys_io_submit | ||
1344 | |||
1345 | .globl sys32_io_cancel_wrapper | ||
1346 | sys32_io_cancel_wrapper: | ||
1347 | llgfr %r2,%r2 # (aio_context_t) u32 | ||
1348 | llgtr %r3,%r3 # struct iocb * | ||
1349 | llgtr %r4,%r4 # struct io_event * | ||
1350 | jg sys_io_cancel | ||
1351 | |||
1352 | .globl compat_sys_statfs64_wrapper | ||
1353 | compat_sys_statfs64_wrapper: | ||
1354 | llgtr %r2,%r2 # const char * | ||
1355 | llgfr %r3,%r3 # compat_size_t | ||
1356 | llgtr %r4,%r4 # struct compat_statfs64 * | ||
1357 | jg compat_sys_statfs64 | ||
1358 | |||
1359 | .globl compat_sys_fstatfs64_wrapper | ||
1360 | compat_sys_fstatfs64_wrapper: | ||
1361 | llgfr %r2,%r2 # unsigned int fd | ||
1362 | llgfr %r3,%r3 # compat_size_t | ||
1363 | llgtr %r4,%r4 # struct compat_statfs64 * | ||
1364 | jg compat_sys_fstatfs64 | ||
1365 | |||
1366 | .globl compat_sys_mq_open_wrapper | ||
1367 | compat_sys_mq_open_wrapper: | ||
1368 | llgtr %r2,%r2 # const char * | ||
1369 | lgfr %r3,%r3 # int | ||
1370 | llgfr %r4,%r4 # mode_t | ||
1371 | llgtr %r5,%r5 # struct compat_mq_attr * | ||
1372 | jg compat_sys_mq_open | ||
1373 | |||
1374 | .globl sys32_mq_unlink_wrapper | ||
1375 | sys32_mq_unlink_wrapper: | ||
1376 | llgtr %r2,%r2 # const char * | ||
1377 | jg sys_mq_unlink | ||
1378 | |||
1379 | .globl compat_sys_mq_timedsend_wrapper | ||
1380 | compat_sys_mq_timedsend_wrapper: | ||
1381 | lgfr %r2,%r2 # mqd_t | ||
1382 | llgtr %r3,%r3 # const char * | ||
1383 | llgfr %r4,%r4 # size_t | ||
1384 | llgfr %r5,%r5 # unsigned int | ||
1385 | llgtr %r6,%r6 # const struct compat_timespec * | ||
1386 | jg compat_sys_mq_timedsend | ||
1387 | |||
1388 | .globl compat_sys_mq_timedreceive_wrapper | ||
1389 | compat_sys_mq_timedreceive_wrapper: | ||
1390 | lgfr %r2,%r2 # mqd_t | ||
1391 | llgtr %r3,%r3 # char * | ||
1392 | llgfr %r4,%r4 # size_t | ||
1393 | llgtr %r5,%r5 # unsigned int * | ||
1394 | llgtr %r6,%r6 # const struct compat_timespec * | ||
1395 | jg compat_sys_mq_timedreceive | ||
1396 | |||
1397 | .globl compat_sys_mq_notify_wrapper | ||
1398 | compat_sys_mq_notify_wrapper: | ||
1399 | lgfr %r2,%r2 # mqd_t | ||
1400 | llgtr %r3,%r3 # struct compat_sigevent * | ||
1401 | jg compat_sys_mq_notify | ||
1402 | |||
1403 | .globl compat_sys_mq_getsetattr_wrapper | ||
1404 | compat_sys_mq_getsetattr_wrapper: | ||
1405 | lgfr %r2,%r2 # mqd_t | ||
1406 | llgtr %r3,%r3 # struct compat_mq_attr * | ||
1407 | llgtr %r4,%r4 # struct compat_mq_attr * | ||
1408 | jg compat_sys_mq_getsetattr | ||
1409 | |||
1410 | .globl compat_sys_add_key_wrapper | ||
1411 | compat_sys_add_key_wrapper: | ||
1412 | llgtr %r2,%r2 # const char * | ||
1413 | llgtr %r3,%r3 # const char * | ||
1414 | llgtr %r4,%r4 # const void * | ||
1415 | llgfr %r5,%r5 # size_t | ||
1416 | llgfr %r6,%r6 # (key_serial_t) u32 | ||
1417 | jg sys_add_key | ||
1418 | |||
1419 | .globl compat_sys_request_key_wrapper | ||
1420 | compat_sys_request_key_wrapper: | ||
1421 | llgtr %r2,%r2 # const char * | ||
1422 | llgtr %r3,%r3 # const char * | ||
1423 | llgtr %r4,%r4 # const void * | ||
1424 | llgfr %r5,%r5 # (key_serial_t) u32 | ||
1425 | jg sys_request_key | ||
1426 | |||
1427 | .globl sys32_remap_file_pages_wrapper | ||
1428 | sys32_remap_file_pages_wrapper: | ||
1429 | llgfr %r2,%r2 # unsigned long | ||
1430 | llgfr %r3,%r3 # unsigned long | ||
1431 | llgfr %r4,%r4 # unsigned long | ||
1432 | llgfr %r5,%r5 # unsigned long | ||
1433 | llgfr %r6,%r6 # unsigned long | ||
1434 | jg sys_remap_file_pages | ||
1435 | |||
1436 | .globl compat_sys_waitid_wrapper | ||
1437 | compat_sys_waitid_wrapper: | ||
1438 | lgfr %r2,%r2 # int | ||
1439 | lgfr %r3,%r3 # pid_t | ||
1440 | llgtr %r4,%r4 # siginfo_emu31_t * | ||
1441 | lgfr %r5,%r5 # int | ||
1442 | llgtr %r6,%r6 # struct rusage_emu31 * | ||
1443 | jg compat_sys_waitid | ||
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c new file mode 100644 index 000000000000..44df8dc07c59 --- /dev/null +++ b/arch/s390/kernel/cpcmd.c | |||
@@ -0,0 +1,111 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/cpcmd.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * Christian Borntraeger (cborntra@de.ibm.com), | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/stddef.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <asm/ebcdic.h> | ||
17 | #include <asm/cpcmd.h> | ||
18 | #include <asm/system.h> | ||
19 | |||
20 | static DEFINE_SPINLOCK(cpcmd_lock); | ||
21 | static char cpcmd_buf[240]; | ||
22 | |||
23 | /* | ||
24 | * the caller of __cpcmd has to ensure that the response buffer is below 2 GB | ||
25 | */ | ||
26 | void __cpcmd(char *cmd, char *response, int rlen) | ||
27 | { | ||
28 | const int mask = 0x40000000L; | ||
29 | unsigned long flags; | ||
30 | int cmdlen; | ||
31 | |||
32 | spin_lock_irqsave(&cpcmd_lock, flags); | ||
33 | cmdlen = strlen(cmd); | ||
34 | BUG_ON(cmdlen > 240); | ||
35 | strcpy(cpcmd_buf, cmd); | ||
36 | ASCEBC(cpcmd_buf, cmdlen); | ||
37 | |||
38 | if (response != NULL && rlen > 0) { | ||
39 | memset(response, 0, rlen); | ||
40 | #ifndef CONFIG_ARCH_S390X | ||
41 | asm volatile ("LRA 2,0(%0)\n\t" | ||
42 | "LR 4,%1\n\t" | ||
43 | "O 4,%4\n\t" | ||
44 | "LRA 3,0(%2)\n\t" | ||
45 | "LR 5,%3\n\t" | ||
46 | ".long 0x83240008 # Diagnose X'08'\n\t" | ||
47 | : /* no output */ | ||
48 | : "a" (cpcmd_buf), "d" (cmdlen), | ||
49 | "a" (response), "d" (rlen), "m" (mask) | ||
50 | : "cc", "2", "3", "4", "5" ); | ||
51 | #else /* CONFIG_ARCH_S390X */ | ||
52 | asm volatile (" lrag 2,0(%0)\n" | ||
53 | " lgr 4,%1\n" | ||
54 | " o 4,%4\n" | ||
55 | " lrag 3,0(%2)\n" | ||
56 | " lgr 5,%3\n" | ||
57 | " sam31\n" | ||
58 | " .long 0x83240008 # Diagnose X'08'\n" | ||
59 | " sam64" | ||
60 | : /* no output */ | ||
61 | : "a" (cpcmd_buf), "d" (cmdlen), | ||
62 | "a" (response), "d" (rlen), "m" (mask) | ||
63 | : "cc", "2", "3", "4", "5" ); | ||
64 | #endif /* CONFIG_ARCH_S390X */ | ||
65 | EBCASC(response, rlen); | ||
66 | } else { | ||
67 | #ifndef CONFIG_ARCH_S390X | ||
68 | asm volatile ("LRA 2,0(%0)\n\t" | ||
69 | "LR 3,%1\n\t" | ||
70 | ".long 0x83230008 # Diagnose X'08'\n\t" | ||
71 | : /* no output */ | ||
72 | : "a" (cpcmd_buf), "d" (cmdlen) | ||
73 | : "2", "3" ); | ||
74 | #else /* CONFIG_ARCH_S390X */ | ||
75 | asm volatile (" lrag 2,0(%0)\n" | ||
76 | " lgr 3,%1\n" | ||
77 | " sam31\n" | ||
78 | " .long 0x83230008 # Diagnose X'08'\n" | ||
79 | " sam64" | ||
80 | : /* no output */ | ||
81 | : "a" (cpcmd_buf), "d" (cmdlen) | ||
82 | : "2", "3" ); | ||
83 | #endif /* CONFIG_ARCH_S390X */ | ||
84 | } | ||
85 | spin_unlock_irqrestore(&cpcmd_lock, flags); | ||
86 | } | ||
87 | |||
88 | EXPORT_SYMBOL(__cpcmd); | ||
89 | |||
90 | #ifdef CONFIG_ARCH_S390X | ||
91 | void cpcmd(char *cmd, char *response, int rlen) | ||
92 | { | ||
93 | char *lowbuf; | ||
94 | if ((rlen == 0) || (response == NULL) | ||
95 | || !((unsigned long)response >> 31)) | ||
96 | __cpcmd(cmd, response, rlen); | ||
97 | else { | ||
98 | lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); | ||
99 | if (!lowbuf) { | ||
100 | printk(KERN_WARNING | ||
101 | "cpcmd: could not allocate response buffer\n"); | ||
102 | return; | ||
103 | } | ||
104 | __cpcmd(cmd, lowbuf, rlen); | ||
105 | memcpy(response, lowbuf, rlen); | ||
106 | kfree(lowbuf); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | EXPORT_SYMBOL(cpcmd); | ||
111 | #endif /* CONFIG_ARCH_S390X */ | ||
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c new file mode 100644 index 000000000000..91f8ce5543d3 --- /dev/null +++ b/arch/s390/kernel/debug.c | |||
@@ -0,0 +1,1286 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/debug.c | ||
3 | * S/390 debug facility | ||
4 | * | ||
5 | * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH, | ||
6 | * IBM Corporation | ||
7 | * Author(s): Michael Holzheu (holzheu@de.ibm.com), | ||
8 | * Holger Smolinski (Holger.Smolinski@de.ibm.com) | ||
9 | * | ||
10 | * Bugreports to: <Linux390@de.ibm.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/stddef.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/ctype.h> | ||
19 | #include <linux/sysctl.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | #include <asm/semaphore.h> | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/init.h> | ||
25 | |||
26 | #include <asm/debug.h> | ||
27 | |||
28 | #define DEBUG_PROLOG_ENTRY -1 | ||
29 | |||
30 | /* typedefs */ | ||
31 | |||
32 | typedef struct file_private_info { | ||
33 | loff_t offset; /* offset of last read in file */ | ||
34 | int act_area; /* number of last formated area */ | ||
35 | int act_entry; /* last formated entry (offset */ | ||
36 | /* relative to beginning of last */ | ||
37 | /* formated area) */ | ||
38 | size_t act_entry_offset; /* up to this offset we copied */ | ||
39 | /* in last read the last formated */ | ||
40 | /* entry to userland */ | ||
41 | char temp_buf[2048]; /* buffer for output */ | ||
42 | debug_info_t *debug_info_org; /* original debug information */ | ||
43 | debug_info_t *debug_info_snap; /* snapshot of debug information */ | ||
44 | struct debug_view *view; /* used view of debug info */ | ||
45 | } file_private_info_t; | ||
46 | |||
47 | typedef struct | ||
48 | { | ||
49 | char *string; | ||
50 | /* | ||
51 | * This assumes that all args are converted into longs | ||
52 | * on L/390 this is the case for all types of parameter | ||
53 | * except of floats, and long long (32 bit) | ||
54 | * | ||
55 | */ | ||
56 | long args[0]; | ||
57 | } debug_sprintf_entry_t; | ||
58 | |||
59 | |||
60 | extern void tod_to_timeval(uint64_t todval, struct timeval *xtime); | ||
61 | |||
62 | /* internal function prototyes */ | ||
63 | |||
64 | static int debug_init(void); | ||
65 | static ssize_t debug_output(struct file *file, char __user *user_buf, | ||
66 | size_t user_len, loff_t * offset); | ||
67 | static ssize_t debug_input(struct file *file, const char __user *user_buf, | ||
68 | size_t user_len, loff_t * offset); | ||
69 | static int debug_open(struct inode *inode, struct file *file); | ||
70 | static int debug_close(struct inode *inode, struct file *file); | ||
71 | static debug_info_t* debug_info_create(char *name, int page_order, int nr_areas, int buf_size); | ||
72 | static void debug_info_get(debug_info_t *); | ||
73 | static void debug_info_put(debug_info_t *); | ||
74 | static int debug_prolog_level_fn(debug_info_t * id, | ||
75 | struct debug_view *view, char *out_buf); | ||
76 | static int debug_input_level_fn(debug_info_t * id, struct debug_view *view, | ||
77 | struct file *file, const char __user *user_buf, | ||
78 | size_t user_buf_size, loff_t * offset); | ||
79 | static int debug_input_flush_fn(debug_info_t * id, struct debug_view *view, | ||
80 | struct file *file, const char __user *user_buf, | ||
81 | size_t user_buf_size, loff_t * offset); | ||
82 | static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view, | ||
83 | char *out_buf, const char *in_buf); | ||
84 | static int debug_raw_format_fn(debug_info_t * id, | ||
85 | struct debug_view *view, char *out_buf, | ||
86 | const char *in_buf); | ||
87 | static int debug_raw_header_fn(debug_info_t * id, struct debug_view *view, | ||
88 | int area, debug_entry_t * entry, char *out_buf); | ||
89 | |||
90 | static int debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view, | ||
91 | char *out_buf, debug_sprintf_entry_t *curr_event); | ||
92 | |||
93 | /* globals */ | ||
94 | |||
95 | struct debug_view debug_raw_view = { | ||
96 | "raw", | ||
97 | NULL, | ||
98 | &debug_raw_header_fn, | ||
99 | &debug_raw_format_fn, | ||
100 | NULL, | ||
101 | NULL | ||
102 | }; | ||
103 | |||
104 | struct debug_view debug_hex_ascii_view = { | ||
105 | "hex_ascii", | ||
106 | NULL, | ||
107 | &debug_dflt_header_fn, | ||
108 | &debug_hex_ascii_format_fn, | ||
109 | NULL, | ||
110 | NULL | ||
111 | }; | ||
112 | |||
113 | struct debug_view debug_level_view = { | ||
114 | "level", | ||
115 | &debug_prolog_level_fn, | ||
116 | NULL, | ||
117 | NULL, | ||
118 | &debug_input_level_fn, | ||
119 | NULL | ||
120 | }; | ||
121 | |||
122 | struct debug_view debug_flush_view = { | ||
123 | "flush", | ||
124 | NULL, | ||
125 | NULL, | ||
126 | NULL, | ||
127 | &debug_input_flush_fn, | ||
128 | NULL | ||
129 | }; | ||
130 | |||
131 | struct debug_view debug_sprintf_view = { | ||
132 | "sprintf", | ||
133 | NULL, | ||
134 | &debug_dflt_header_fn, | ||
135 | (debug_format_proc_t*)&debug_sprintf_format_fn, | ||
136 | NULL, | ||
137 | NULL | ||
138 | }; | ||
139 | |||
140 | |||
141 | unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; | ||
142 | |||
143 | /* static globals */ | ||
144 | |||
145 | static debug_info_t *debug_area_first = NULL; | ||
146 | static debug_info_t *debug_area_last = NULL; | ||
147 | DECLARE_MUTEX(debug_lock); | ||
148 | |||
149 | static int initialized; | ||
150 | |||
151 | static struct file_operations debug_file_ops = { | ||
152 | .owner = THIS_MODULE, | ||
153 | .read = debug_output, | ||
154 | .write = debug_input, | ||
155 | .open = debug_open, | ||
156 | .release = debug_close, | ||
157 | }; | ||
158 | |||
159 | static struct proc_dir_entry *debug_proc_root_entry; | ||
160 | |||
161 | /* functions */ | ||
162 | |||
163 | /* | ||
164 | * debug_info_alloc | ||
165 | * - alloc new debug-info | ||
166 | */ | ||
167 | |||
168 | static debug_info_t* debug_info_alloc(char *name, int page_order, | ||
169 | int nr_areas, int buf_size) | ||
170 | { | ||
171 | debug_info_t* rc; | ||
172 | int i; | ||
173 | |||
174 | /* alloc everything */ | ||
175 | |||
176 | rc = (debug_info_t*) kmalloc(sizeof(debug_info_t), GFP_ATOMIC); | ||
177 | if(!rc) | ||
178 | goto fail_malloc_rc; | ||
179 | rc->active_entry = (int*)kmalloc(nr_areas * sizeof(int), GFP_ATOMIC); | ||
180 | if(!rc->active_entry) | ||
181 | goto fail_malloc_active_entry; | ||
182 | memset(rc->active_entry, 0, nr_areas * sizeof(int)); | ||
183 | rc->areas = (debug_entry_t **) kmalloc(nr_areas * | ||
184 | sizeof(debug_entry_t *), | ||
185 | GFP_ATOMIC); | ||
186 | if (!rc->areas) | ||
187 | goto fail_malloc_areas; | ||
188 | for (i = 0; i < nr_areas; i++) { | ||
189 | rc->areas[i] = (debug_entry_t *) __get_free_pages(GFP_ATOMIC, | ||
190 | page_order); | ||
191 | if (!rc->areas[i]) { | ||
192 | for (i--; i >= 0; i--) { | ||
193 | free_pages((unsigned long) rc->areas[i], | ||
194 | page_order); | ||
195 | } | ||
196 | goto fail_malloc_areas2; | ||
197 | } else { | ||
198 | memset(rc->areas[i], 0, PAGE_SIZE << page_order); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* initialize members */ | ||
203 | |||
204 | spin_lock_init(&rc->lock); | ||
205 | rc->page_order = page_order; | ||
206 | rc->nr_areas = nr_areas; | ||
207 | rc->active_area = 0; | ||
208 | rc->level = DEBUG_DEFAULT_LEVEL; | ||
209 | rc->buf_size = buf_size; | ||
210 | rc->entry_size = sizeof(debug_entry_t) + buf_size; | ||
211 | strlcpy(rc->name, name, sizeof(rc->name)); | ||
212 | memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *)); | ||
213 | #ifdef CONFIG_PROC_FS | ||
214 | memset(rc->proc_entries, 0 ,DEBUG_MAX_VIEWS * | ||
215 | sizeof(struct proc_dir_entry*)); | ||
216 | #endif /* CONFIG_PROC_FS */ | ||
217 | atomic_set(&(rc->ref_count), 0); | ||
218 | |||
219 | return rc; | ||
220 | |||
221 | fail_malloc_areas2: | ||
222 | kfree(rc->areas); | ||
223 | fail_malloc_areas: | ||
224 | kfree(rc->active_entry); | ||
225 | fail_malloc_active_entry: | ||
226 | kfree(rc); | ||
227 | fail_malloc_rc: | ||
228 | return NULL; | ||
229 | } | ||
230 | |||
231 | /* | ||
232 | * debug_info_free | ||
233 | * - free memory debug-info | ||
234 | */ | ||
235 | |||
236 | static void debug_info_free(debug_info_t* db_info){ | ||
237 | int i; | ||
238 | for (i = 0; i < db_info->nr_areas; i++) { | ||
239 | free_pages((unsigned long) db_info->areas[i], | ||
240 | db_info->page_order); | ||
241 | } | ||
242 | kfree(db_info->areas); | ||
243 | kfree(db_info->active_entry); | ||
244 | kfree(db_info); | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * debug_info_create | ||
249 | * - create new debug-info | ||
250 | */ | ||
251 | |||
252 | static debug_info_t* debug_info_create(char *name, int page_order, | ||
253 | int nr_areas, int buf_size) | ||
254 | { | ||
255 | debug_info_t* rc; | ||
256 | |||
257 | rc = debug_info_alloc(name, page_order, nr_areas, buf_size); | ||
258 | if(!rc) | ||
259 | goto out; | ||
260 | |||
261 | |||
262 | /* create proc rood directory */ | ||
263 | rc->proc_root_entry = proc_mkdir(rc->name, debug_proc_root_entry); | ||
264 | |||
265 | /* append new element to linked list */ | ||
266 | if (debug_area_first == NULL) { | ||
267 | /* first element in list */ | ||
268 | debug_area_first = rc; | ||
269 | rc->prev = NULL; | ||
270 | } else { | ||
271 | /* append element to end of list */ | ||
272 | debug_area_last->next = rc; | ||
273 | rc->prev = debug_area_last; | ||
274 | } | ||
275 | debug_area_last = rc; | ||
276 | rc->next = NULL; | ||
277 | |||
278 | debug_info_get(rc); | ||
279 | out: | ||
280 | return rc; | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * debug_info_copy | ||
285 | * - copy debug-info | ||
286 | */ | ||
287 | |||
288 | static debug_info_t* debug_info_copy(debug_info_t* in) | ||
289 | { | ||
290 | int i; | ||
291 | debug_info_t* rc; | ||
292 | rc = debug_info_alloc(in->name, in->page_order, | ||
293 | in->nr_areas, in->buf_size); | ||
294 | if(!rc) | ||
295 | goto out; | ||
296 | |||
297 | for(i = 0; i < in->nr_areas; i++){ | ||
298 | memcpy(rc->areas[i],in->areas[i], PAGE_SIZE << in->page_order); | ||
299 | } | ||
300 | out: | ||
301 | return rc; | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * debug_info_get | ||
306 | * - increments reference count for debug-info | ||
307 | */ | ||
308 | |||
309 | static void debug_info_get(debug_info_t * db_info) | ||
310 | { | ||
311 | if (db_info) | ||
312 | atomic_inc(&db_info->ref_count); | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * debug_info_put: | ||
317 | * - decreases reference count for debug-info and frees it if necessary | ||
318 | */ | ||
319 | |||
320 | static void debug_info_put(debug_info_t *db_info) | ||
321 | { | ||
322 | int i; | ||
323 | |||
324 | if (!db_info) | ||
325 | return; | ||
326 | if (atomic_dec_and_test(&db_info->ref_count)) { | ||
327 | #ifdef DEBUG | ||
328 | printk(KERN_INFO "debug: freeing debug area %p (%s)\n", | ||
329 | db_info, db_info->name); | ||
330 | #endif | ||
331 | for (i = 0; i < DEBUG_MAX_VIEWS; i++) { | ||
332 | if (db_info->views[i] == NULL) | ||
333 | continue; | ||
334 | #ifdef CONFIG_PROC_FS | ||
335 | remove_proc_entry(db_info->proc_entries[i]->name, | ||
336 | db_info->proc_root_entry); | ||
337 | #endif | ||
338 | } | ||
339 | #ifdef CONFIG_PROC_FS | ||
340 | remove_proc_entry(db_info->proc_root_entry->name, | ||
341 | debug_proc_root_entry); | ||
342 | #endif | ||
343 | if(db_info == debug_area_first) | ||
344 | debug_area_first = db_info->next; | ||
345 | if(db_info == debug_area_last) | ||
346 | debug_area_last = db_info->prev; | ||
347 | if(db_info->prev) db_info->prev->next = db_info->next; | ||
348 | if(db_info->next) db_info->next->prev = db_info->prev; | ||
349 | debug_info_free(db_info); | ||
350 | } | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * debug_format_entry: | ||
355 | * - format one debug entry and return size of formated data | ||
356 | */ | ||
357 | |||
358 | static int debug_format_entry(file_private_info_t *p_info) | ||
359 | { | ||
360 | debug_info_t *id_org = p_info->debug_info_org; | ||
361 | debug_info_t *id_snap = p_info->debug_info_snap; | ||
362 | struct debug_view *view = p_info->view; | ||
363 | debug_entry_t *act_entry; | ||
364 | size_t len = 0; | ||
365 | if(p_info->act_entry == DEBUG_PROLOG_ENTRY){ | ||
366 | /* print prolog */ | ||
367 | if (view->prolog_proc) | ||
368 | len += view->prolog_proc(id_org, view,p_info->temp_buf); | ||
369 | goto out; | ||
370 | } | ||
371 | |||
372 | act_entry = (debug_entry_t *) ((char*)id_snap->areas[p_info->act_area] + | ||
373 | p_info->act_entry); | ||
374 | |||
375 | if (act_entry->id.stck == 0LL) | ||
376 | goto out; /* empty entry */ | ||
377 | if (view->header_proc) | ||
378 | len += view->header_proc(id_org, view, p_info->act_area, | ||
379 | act_entry, p_info->temp_buf + len); | ||
380 | if (view->format_proc) | ||
381 | len += view->format_proc(id_org, view, p_info->temp_buf + len, | ||
382 | DEBUG_DATA(act_entry)); | ||
383 | out: | ||
384 | return len; | ||
385 | } | ||
386 | |||
387 | /* | ||
388 | * debug_next_entry: | ||
389 | * - goto next entry in p_info | ||
390 | */ | ||
391 | |||
392 | extern inline int debug_next_entry(file_private_info_t *p_info) | ||
393 | { | ||
394 | debug_info_t *id = p_info->debug_info_snap; | ||
395 | if(p_info->act_entry == DEBUG_PROLOG_ENTRY){ | ||
396 | p_info->act_entry = 0; | ||
397 | goto out; | ||
398 | } | ||
399 | if ((p_info->act_entry += id->entry_size) | ||
400 | > ((PAGE_SIZE << (id->page_order)) | ||
401 | - id->entry_size)){ | ||
402 | |||
403 | /* next area */ | ||
404 | p_info->act_entry = 0; | ||
405 | p_info->act_area++; | ||
406 | if(p_info->act_area >= id->nr_areas) | ||
407 | return 1; | ||
408 | } | ||
409 | out: | ||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * debug_output: | ||
415 | * - called for user read() | ||
416 | * - copies formated debug entries to the user buffer | ||
417 | */ | ||
418 | |||
419 | static ssize_t debug_output(struct file *file, /* file descriptor */ | ||
420 | char __user *user_buf, /* user buffer */ | ||
421 | size_t len, /* length of buffer */ | ||
422 | loff_t *offset) /* offset in the file */ | ||
423 | { | ||
424 | size_t count = 0; | ||
425 | size_t entry_offset, size = 0; | ||
426 | file_private_info_t *p_info; | ||
427 | |||
428 | p_info = ((file_private_info_t *) file->private_data); | ||
429 | if (*offset != p_info->offset) | ||
430 | return -EPIPE; | ||
431 | if(p_info->act_area >= p_info->debug_info_snap->nr_areas) | ||
432 | return 0; | ||
433 | |||
434 | entry_offset = p_info->act_entry_offset; | ||
435 | |||
436 | while(count < len){ | ||
437 | size = debug_format_entry(p_info); | ||
438 | size = min((len - count), (size - entry_offset)); | ||
439 | |||
440 | if(size){ | ||
441 | if (copy_to_user(user_buf + count, | ||
442 | p_info->temp_buf + entry_offset, size)) | ||
443 | return -EFAULT; | ||
444 | } | ||
445 | count += size; | ||
446 | entry_offset = 0; | ||
447 | if(count != len) | ||
448 | if(debug_next_entry(p_info)) | ||
449 | goto out; | ||
450 | } | ||
451 | out: | ||
452 | p_info->offset = *offset + count; | ||
453 | p_info->act_entry_offset = size; | ||
454 | *offset = p_info->offset; | ||
455 | return count; | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * debug_input: | ||
460 | * - called for user write() | ||
461 | * - calls input function of view | ||
462 | */ | ||
463 | |||
464 | static ssize_t debug_input(struct file *file, | ||
465 | const char __user *user_buf, size_t length, | ||
466 | loff_t *offset) | ||
467 | { | ||
468 | int rc = 0; | ||
469 | file_private_info_t *p_info; | ||
470 | |||
471 | down(&debug_lock); | ||
472 | p_info = ((file_private_info_t *) file->private_data); | ||
473 | if (p_info->view->input_proc) | ||
474 | rc = p_info->view->input_proc(p_info->debug_info_org, | ||
475 | p_info->view, file, user_buf, | ||
476 | length, offset); | ||
477 | else | ||
478 | rc = -EPERM; | ||
479 | up(&debug_lock); | ||
480 | return rc; /* number of input characters */ | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * debug_open: | ||
485 | * - called for user open() | ||
486 | * - copies formated output to private_data area of the file | ||
487 | * handle | ||
488 | */ | ||
489 | |||
490 | static int debug_open(struct inode *inode, struct file *file) | ||
491 | { | ||
492 | int i = 0, rc = 0; | ||
493 | file_private_info_t *p_info; | ||
494 | debug_info_t *debug_info, *debug_info_snapshot; | ||
495 | |||
496 | #ifdef DEBUG | ||
497 | printk("debug_open\n"); | ||
498 | #endif | ||
499 | down(&debug_lock); | ||
500 | |||
501 | /* find debug log and view */ | ||
502 | |||
503 | debug_info = debug_area_first; | ||
504 | while(debug_info != NULL){ | ||
505 | for (i = 0; i < DEBUG_MAX_VIEWS; i++) { | ||
506 | if (debug_info->views[i] == NULL) | ||
507 | continue; | ||
508 | else if (debug_info->proc_entries[i] == | ||
509 | PDE(file->f_dentry->d_inode)) { | ||
510 | goto found; /* found view ! */ | ||
511 | } | ||
512 | } | ||
513 | debug_info = debug_info->next; | ||
514 | } | ||
515 | /* no entry found */ | ||
516 | rc = -EINVAL; | ||
517 | goto out; | ||
518 | |||
519 | found: | ||
520 | |||
521 | /* make snapshot of current debug areas to get it consistent */ | ||
522 | |||
523 | debug_info_snapshot = debug_info_copy(debug_info); | ||
524 | |||
525 | if(!debug_info_snapshot){ | ||
526 | #ifdef DEBUG | ||
527 | printk(KERN_ERR "debug_open: debug_info_copy failed (out of mem)\n"); | ||
528 | #endif | ||
529 | rc = -ENOMEM; | ||
530 | goto out; | ||
531 | } | ||
532 | |||
533 | if ((file->private_data = | ||
534 | kmalloc(sizeof(file_private_info_t), GFP_ATOMIC)) == 0) { | ||
535 | #ifdef DEBUG | ||
536 | printk(KERN_ERR "debug_open: kmalloc failed\n"); | ||
537 | #endif | ||
538 | debug_info_free(debug_info_snapshot); | ||
539 | rc = -ENOMEM; | ||
540 | goto out; | ||
541 | } | ||
542 | p_info = (file_private_info_t *) file->private_data; | ||
543 | p_info->offset = 0; | ||
544 | p_info->debug_info_snap = debug_info_snapshot; | ||
545 | p_info->debug_info_org = debug_info; | ||
546 | p_info->view = debug_info->views[i]; | ||
547 | p_info->act_area = 0; | ||
548 | p_info->act_entry = DEBUG_PROLOG_ENTRY; | ||
549 | p_info->act_entry_offset = 0; | ||
550 | |||
551 | debug_info_get(debug_info); | ||
552 | |||
553 | out: | ||
554 | up(&debug_lock); | ||
555 | return rc; | ||
556 | } | ||
557 | |||
558 | /* | ||
559 | * debug_close: | ||
560 | * - called for user close() | ||
561 | * - deletes private_data area of the file handle | ||
562 | */ | ||
563 | |||
564 | static int debug_close(struct inode *inode, struct file *file) | ||
565 | { | ||
566 | file_private_info_t *p_info; | ||
567 | #ifdef DEBUG | ||
568 | printk("debug_close\n"); | ||
569 | #endif | ||
570 | p_info = (file_private_info_t *) file->private_data; | ||
571 | debug_info_free(p_info->debug_info_snap); | ||
572 | debug_info_put(p_info->debug_info_org); | ||
573 | kfree(file->private_data); | ||
574 | return 0; /* success */ | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * debug_register: | ||
579 | * - creates and initializes debug area for the caller | ||
580 | * - returns handle for debug area | ||
581 | */ | ||
582 | |||
583 | debug_info_t *debug_register | ||
584 | (char *name, int page_order, int nr_areas, int buf_size) | ||
585 | { | ||
586 | debug_info_t *rc = NULL; | ||
587 | |||
588 | if (!initialized) | ||
589 | BUG(); | ||
590 | down(&debug_lock); | ||
591 | |||
592 | /* create new debug_info */ | ||
593 | |||
594 | rc = debug_info_create(name, page_order, nr_areas, buf_size); | ||
595 | if(!rc) | ||
596 | goto out; | ||
597 | debug_register_view(rc, &debug_level_view); | ||
598 | debug_register_view(rc, &debug_flush_view); | ||
599 | #ifdef DEBUG | ||
600 | printk(KERN_INFO | ||
601 | "debug: reserved %d areas of %d pages for debugging %s\n", | ||
602 | nr_areas, 1 << page_order, rc->name); | ||
603 | #endif | ||
604 | out: | ||
605 | if (rc == NULL){ | ||
606 | printk(KERN_ERR "debug: debug_register failed for %s\n",name); | ||
607 | } | ||
608 | up(&debug_lock); | ||
609 | return rc; | ||
610 | } | ||
611 | |||
612 | /* | ||
613 | * debug_unregister: | ||
614 | * - give back debug area | ||
615 | */ | ||
616 | |||
617 | void debug_unregister(debug_info_t * id) | ||
618 | { | ||
619 | if (!id) | ||
620 | goto out; | ||
621 | down(&debug_lock); | ||
622 | #ifdef DEBUG | ||
623 | printk(KERN_INFO "debug: unregistering %s\n", id->name); | ||
624 | #endif | ||
625 | debug_info_put(id); | ||
626 | up(&debug_lock); | ||
627 | |||
628 | out: | ||
629 | return; | ||
630 | } | ||
631 | |||
632 | /* | ||
633 | * debug_set_level: | ||
634 | * - set actual debug level | ||
635 | */ | ||
636 | |||
637 | void debug_set_level(debug_info_t* id, int new_level) | ||
638 | { | ||
639 | unsigned long flags; | ||
640 | if(!id) | ||
641 | return; | ||
642 | spin_lock_irqsave(&id->lock,flags); | ||
643 | if(new_level == DEBUG_OFF_LEVEL){ | ||
644 | id->level = DEBUG_OFF_LEVEL; | ||
645 | printk(KERN_INFO "debug: %s: switched off\n",id->name); | ||
646 | } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) { | ||
647 | printk(KERN_INFO | ||
648 | "debug: %s: level %i is out of range (%i - %i)\n", | ||
649 | id->name, new_level, 0, DEBUG_MAX_LEVEL); | ||
650 | } else { | ||
651 | id->level = new_level; | ||
652 | #ifdef DEBUG | ||
653 | printk(KERN_INFO | ||
654 | "debug: %s: new level %i\n",id->name,id->level); | ||
655 | #endif | ||
656 | } | ||
657 | spin_unlock_irqrestore(&id->lock,flags); | ||
658 | } | ||
659 | |||
660 | |||
661 | /* | ||
662 | * proceed_active_entry: | ||
663 | * - set active entry to next in the ring buffer | ||
664 | */ | ||
665 | |||
666 | extern inline void proceed_active_entry(debug_info_t * id) | ||
667 | { | ||
668 | if ((id->active_entry[id->active_area] += id->entry_size) | ||
669 | > ((PAGE_SIZE << (id->page_order)) - id->entry_size)) | ||
670 | id->active_entry[id->active_area] = 0; | ||
671 | } | ||
672 | |||
673 | /* | ||
674 | * proceed_active_area: | ||
675 | * - set active area to next in the ring buffer | ||
676 | */ | ||
677 | |||
678 | extern inline void proceed_active_area(debug_info_t * id) | ||
679 | { | ||
680 | id->active_area++; | ||
681 | id->active_area = id->active_area % id->nr_areas; | ||
682 | } | ||
683 | |||
684 | /* | ||
685 | * get_active_entry: | ||
686 | */ | ||
687 | |||
688 | extern inline debug_entry_t *get_active_entry(debug_info_t * id) | ||
689 | { | ||
690 | return (debug_entry_t *) ((char *) id->areas[id->active_area] + | ||
691 | id->active_entry[id->active_area]); | ||
692 | } | ||
693 | |||
694 | /* | ||
695 | * debug_finish_entry: | ||
696 | * - set timestamp, caller address, cpu number etc. | ||
697 | */ | ||
698 | |||
699 | extern inline void debug_finish_entry(debug_info_t * id, debug_entry_t* active, | ||
700 | int level, int exception) | ||
701 | { | ||
702 | STCK(active->id.stck); | ||
703 | active->id.fields.cpuid = smp_processor_id(); | ||
704 | active->caller = __builtin_return_address(0); | ||
705 | active->id.fields.exception = exception; | ||
706 | active->id.fields.level = level; | ||
707 | proceed_active_entry(id); | ||
708 | if(exception) | ||
709 | proceed_active_area(id); | ||
710 | } | ||
711 | |||
712 | static int debug_stoppable=1; | ||
713 | static int debug_active=1; | ||
714 | |||
715 | #define CTL_S390DBF 5677 | ||
716 | #define CTL_S390DBF_STOPPABLE 5678 | ||
717 | #define CTL_S390DBF_ACTIVE 5679 | ||
718 | |||
719 | /* | ||
720 | * proc handler for the running debug_active sysctl | ||
721 | * always allow read, allow write only if debug_stoppable is set or | ||
722 | * if debug_active is already off | ||
723 | */ | ||
724 | static int s390dbf_procactive(ctl_table *table, int write, struct file *filp, | ||
725 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
726 | { | ||
727 | if (!write || debug_stoppable || !debug_active) | ||
728 | return proc_dointvec(table, write, filp, buffer, lenp, ppos); | ||
729 | else | ||
730 | return 0; | ||
731 | } | ||
732 | |||
733 | |||
734 | static struct ctl_table s390dbf_table[] = { | ||
735 | { | ||
736 | .ctl_name = CTL_S390DBF_STOPPABLE, | ||
737 | .procname = "debug_stoppable", | ||
738 | .data = &debug_stoppable, | ||
739 | .maxlen = sizeof(int), | ||
740 | .mode = S_IRUGO | S_IWUSR, | ||
741 | .proc_handler = &proc_dointvec, | ||
742 | .strategy = &sysctl_intvec, | ||
743 | }, | ||
744 | { | ||
745 | .ctl_name = CTL_S390DBF_ACTIVE, | ||
746 | .procname = "debug_active", | ||
747 | .data = &debug_active, | ||
748 | .maxlen = sizeof(int), | ||
749 | .mode = S_IRUGO | S_IWUSR, | ||
750 | .proc_handler = &s390dbf_procactive, | ||
751 | .strategy = &sysctl_intvec, | ||
752 | }, | ||
753 | { .ctl_name = 0 } | ||
754 | }; | ||
755 | |||
756 | static struct ctl_table s390dbf_dir_table[] = { | ||
757 | { | ||
758 | .ctl_name = CTL_S390DBF, | ||
759 | .procname = "s390dbf", | ||
760 | .maxlen = 0, | ||
761 | .mode = S_IRUGO | S_IXUGO, | ||
762 | .child = s390dbf_table, | ||
763 | }, | ||
764 | { .ctl_name = 0 } | ||
765 | }; | ||
766 | |||
767 | struct ctl_table_header *s390dbf_sysctl_header; | ||
768 | |||
769 | void debug_stop_all(void) | ||
770 | { | ||
771 | if (debug_stoppable) | ||
772 | debug_active = 0; | ||
773 | } | ||
774 | |||
775 | |||
776 | /* | ||
777 | * debug_event_common: | ||
778 | * - write debug entry with given size | ||
779 | */ | ||
780 | |||
781 | debug_entry_t *debug_event_common(debug_info_t * id, int level, const void *buf, | ||
782 | int len) | ||
783 | { | ||
784 | unsigned long flags; | ||
785 | debug_entry_t *active; | ||
786 | |||
787 | if (!debug_active) | ||
788 | return NULL; | ||
789 | spin_lock_irqsave(&id->lock, flags); | ||
790 | active = get_active_entry(id); | ||
791 | memset(DEBUG_DATA(active), 0, id->buf_size); | ||
792 | memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size)); | ||
793 | debug_finish_entry(id, active, level, 0); | ||
794 | spin_unlock_irqrestore(&id->lock, flags); | ||
795 | |||
796 | return active; | ||
797 | } | ||
798 | |||
799 | /* | ||
800 | * debug_exception_common: | ||
801 | * - write debug entry with given size and switch to next debug area | ||
802 | */ | ||
803 | |||
804 | debug_entry_t *debug_exception_common(debug_info_t * id, int level, | ||
805 | const void *buf, int len) | ||
806 | { | ||
807 | unsigned long flags; | ||
808 | debug_entry_t *active; | ||
809 | |||
810 | if (!debug_active) | ||
811 | return NULL; | ||
812 | spin_lock_irqsave(&id->lock, flags); | ||
813 | active = get_active_entry(id); | ||
814 | memset(DEBUG_DATA(active), 0, id->buf_size); | ||
815 | memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size)); | ||
816 | debug_finish_entry(id, active, level, 1); | ||
817 | spin_unlock_irqrestore(&id->lock, flags); | ||
818 | |||
819 | return active; | ||
820 | } | ||
821 | |||
822 | /* | ||
823 | * counts arguments in format string for sprintf view | ||
824 | */ | ||
825 | |||
826 | extern inline int debug_count_numargs(char *string) | ||
827 | { | ||
828 | int numargs=0; | ||
829 | |||
830 | while(*string) { | ||
831 | if(*string++=='%') | ||
832 | numargs++; | ||
833 | } | ||
834 | return(numargs); | ||
835 | } | ||
836 | |||
837 | /* | ||
838 | * debug_sprintf_event: | ||
839 | */ | ||
840 | |||
841 | debug_entry_t *debug_sprintf_event(debug_info_t* id, | ||
842 | int level,char *string,...) | ||
843 | { | ||
844 | va_list ap; | ||
845 | int numargs,idx; | ||
846 | unsigned long flags; | ||
847 | debug_sprintf_entry_t *curr_event; | ||
848 | debug_entry_t *active; | ||
849 | |||
850 | if((!id) || (level > id->level)) | ||
851 | return NULL; | ||
852 | if (!debug_active) | ||
853 | return NULL; | ||
854 | numargs=debug_count_numargs(string); | ||
855 | |||
856 | spin_lock_irqsave(&id->lock, flags); | ||
857 | active = get_active_entry(id); | ||
858 | curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active); | ||
859 | va_start(ap,string); | ||
860 | curr_event->string=string; | ||
861 | for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++) | ||
862 | curr_event->args[idx]=va_arg(ap,long); | ||
863 | va_end(ap); | ||
864 | debug_finish_entry(id, active, level, 0); | ||
865 | spin_unlock_irqrestore(&id->lock, flags); | ||
866 | |||
867 | return active; | ||
868 | } | ||
869 | |||
870 | /* | ||
871 | * debug_sprintf_exception: | ||
872 | */ | ||
873 | |||
874 | debug_entry_t *debug_sprintf_exception(debug_info_t* id, | ||
875 | int level,char *string,...) | ||
876 | { | ||
877 | va_list ap; | ||
878 | int numargs,idx; | ||
879 | unsigned long flags; | ||
880 | debug_sprintf_entry_t *curr_event; | ||
881 | debug_entry_t *active; | ||
882 | |||
883 | if((!id) || (level > id->level)) | ||
884 | return NULL; | ||
885 | if (!debug_active) | ||
886 | return NULL; | ||
887 | |||
888 | numargs=debug_count_numargs(string); | ||
889 | |||
890 | spin_lock_irqsave(&id->lock, flags); | ||
891 | active = get_active_entry(id); | ||
892 | curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active); | ||
893 | va_start(ap,string); | ||
894 | curr_event->string=string; | ||
895 | for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++) | ||
896 | curr_event->args[idx]=va_arg(ap,long); | ||
897 | va_end(ap); | ||
898 | debug_finish_entry(id, active, level, 1); | ||
899 | spin_unlock_irqrestore(&id->lock, flags); | ||
900 | |||
901 | return active; | ||
902 | } | ||
903 | |||
904 | /* | ||
905 | * debug_init: | ||
906 | * - is called exactly once to initialize the debug feature | ||
907 | */ | ||
908 | |||
909 | static int __init debug_init(void) | ||
910 | { | ||
911 | int rc = 0; | ||
912 | |||
913 | s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table, 1); | ||
914 | down(&debug_lock); | ||
915 | #ifdef CONFIG_PROC_FS | ||
916 | debug_proc_root_entry = proc_mkdir(DEBUG_DIR_ROOT, NULL); | ||
917 | #endif /* CONFIG_PROC_FS */ | ||
918 | printk(KERN_INFO "debug: Initialization complete\n"); | ||
919 | initialized = 1; | ||
920 | up(&debug_lock); | ||
921 | |||
922 | return rc; | ||
923 | } | ||
924 | |||
925 | /* | ||
926 | * debug_register_view: | ||
927 | */ | ||
928 | |||
929 | int debug_register_view(debug_info_t * id, struct debug_view *view) | ||
930 | { | ||
931 | int rc = 0; | ||
932 | int i; | ||
933 | unsigned long flags; | ||
934 | mode_t mode = S_IFREG; | ||
935 | struct proc_dir_entry *pde; | ||
936 | |||
937 | if (!id) | ||
938 | goto out; | ||
939 | if (view->prolog_proc || view->format_proc || view->header_proc) | ||
940 | mode |= S_IRUSR; | ||
941 | if (view->input_proc) | ||
942 | mode |= S_IWUSR; | ||
943 | pde = create_proc_entry(view->name, mode, id->proc_root_entry); | ||
944 | if (!pde){ | ||
945 | printk(KERN_WARNING "debug: create_proc_entry() failed! Cannot register view %s/%s\n", id->name,view->name); | ||
946 | rc = -1; | ||
947 | goto out; | ||
948 | } | ||
949 | |||
950 | spin_lock_irqsave(&id->lock, flags); | ||
951 | for (i = 0; i < DEBUG_MAX_VIEWS; i++) { | ||
952 | if (id->views[i] == NULL) | ||
953 | break; | ||
954 | } | ||
955 | if (i == DEBUG_MAX_VIEWS) { | ||
956 | printk(KERN_WARNING "debug: cannot register view %s/%s\n", | ||
957 | id->name,view->name); | ||
958 | printk(KERN_WARNING | ||
959 | "debug: maximum number of views reached (%i)!\n", i); | ||
960 | remove_proc_entry(pde->name, id->proc_root_entry); | ||
961 | rc = -1; | ||
962 | } | ||
963 | else { | ||
964 | id->views[i] = view; | ||
965 | pde->proc_fops = &debug_file_ops; | ||
966 | id->proc_entries[i] = pde; | ||
967 | } | ||
968 | spin_unlock_irqrestore(&id->lock, flags); | ||
969 | out: | ||
970 | return rc; | ||
971 | } | ||
972 | |||
973 | /* | ||
974 | * debug_unregister_view: | ||
975 | */ | ||
976 | |||
977 | int debug_unregister_view(debug_info_t * id, struct debug_view *view) | ||
978 | { | ||
979 | int rc = 0; | ||
980 | int i; | ||
981 | unsigned long flags; | ||
982 | |||
983 | if (!id) | ||
984 | goto out; | ||
985 | spin_lock_irqsave(&id->lock, flags); | ||
986 | for (i = 0; i < DEBUG_MAX_VIEWS; i++) { | ||
987 | if (id->views[i] == view) | ||
988 | break; | ||
989 | } | ||
990 | if (i == DEBUG_MAX_VIEWS) | ||
991 | rc = -1; | ||
992 | else { | ||
993 | #ifdef CONFIG_PROC_FS | ||
994 | remove_proc_entry(id->proc_entries[i]->name, | ||
995 | id->proc_root_entry); | ||
996 | #endif | ||
997 | id->views[i] = NULL; | ||
998 | rc = 0; | ||
999 | } | ||
1000 | spin_unlock_irqrestore(&id->lock, flags); | ||
1001 | out: | ||
1002 | return rc; | ||
1003 | } | ||
1004 | |||
1005 | /* | ||
1006 | * functions for debug-views | ||
1007 | *********************************** | ||
1008 | */ | ||
1009 | |||
1010 | /* | ||
1011 | * prints out actual debug level | ||
1012 | */ | ||
1013 | |||
1014 | static int debug_prolog_level_fn(debug_info_t * id, | ||
1015 | struct debug_view *view, char *out_buf) | ||
1016 | { | ||
1017 | int rc = 0; | ||
1018 | |||
1019 | if(id->level == -1) rc = sprintf(out_buf,"-\n"); | ||
1020 | else rc = sprintf(out_buf, "%i\n", id->level); | ||
1021 | return rc; | ||
1022 | } | ||
1023 | |||
1024 | /* | ||
1025 | * reads new debug level | ||
1026 | */ | ||
1027 | |||
1028 | static int debug_input_level_fn(debug_info_t * id, struct debug_view *view, | ||
1029 | struct file *file, const char __user *user_buf, | ||
1030 | size_t in_buf_size, loff_t * offset) | ||
1031 | { | ||
1032 | char input_buf[1]; | ||
1033 | int rc = in_buf_size; | ||
1034 | |||
1035 | if (*offset != 0) | ||
1036 | goto out; | ||
1037 | if (copy_from_user(input_buf, user_buf, 1)){ | ||
1038 | rc = -EFAULT; | ||
1039 | goto out; | ||
1040 | } | ||
1041 | if (isdigit(input_buf[0])) { | ||
1042 | int new_level = ((int) input_buf[0] - (int) '0'); | ||
1043 | debug_set_level(id, new_level); | ||
1044 | } else if(input_buf[0] == '-') { | ||
1045 | debug_set_level(id, DEBUG_OFF_LEVEL); | ||
1046 | } else { | ||
1047 | printk(KERN_INFO "debug: level `%c` is not valid\n", | ||
1048 | input_buf[0]); | ||
1049 | } | ||
1050 | out: | ||
1051 | *offset += in_buf_size; | ||
1052 | return rc; /* number of input characters */ | ||
1053 | } | ||
1054 | |||
1055 | |||
1056 | /* | ||
1057 | * flushes debug areas | ||
1058 | */ | ||
1059 | |||
1060 | void debug_flush(debug_info_t* id, int area) | ||
1061 | { | ||
1062 | unsigned long flags; | ||
1063 | int i; | ||
1064 | |||
1065 | if(!id) | ||
1066 | return; | ||
1067 | spin_lock_irqsave(&id->lock,flags); | ||
1068 | if(area == DEBUG_FLUSH_ALL){ | ||
1069 | id->active_area = 0; | ||
1070 | memset(id->active_entry, 0, id->nr_areas * sizeof(int)); | ||
1071 | for (i = 0; i < id->nr_areas; i++) | ||
1072 | memset(id->areas[i], 0, PAGE_SIZE << id->page_order); | ||
1073 | printk(KERN_INFO "debug: %s: all areas flushed\n",id->name); | ||
1074 | } else if(area >= 0 && area < id->nr_areas) { | ||
1075 | id->active_entry[area] = 0; | ||
1076 | memset(id->areas[area], 0, PAGE_SIZE << id->page_order); | ||
1077 | printk(KERN_INFO | ||
1078 | "debug: %s: area %i has been flushed\n", | ||
1079 | id->name, area); | ||
1080 | } else { | ||
1081 | printk(KERN_INFO | ||
1082 | "debug: %s: area %i cannot be flushed (range: %i - %i)\n", | ||
1083 | id->name, area, 0, id->nr_areas-1); | ||
1084 | } | ||
1085 | spin_unlock_irqrestore(&id->lock,flags); | ||
1086 | } | ||
1087 | |||
1088 | /* | ||
1089 | * view function: flushes debug areas | ||
1090 | */ | ||
1091 | |||
1092 | static int debug_input_flush_fn(debug_info_t * id, struct debug_view *view, | ||
1093 | struct file *file, const char __user *user_buf, | ||
1094 | size_t in_buf_size, loff_t * offset) | ||
1095 | { | ||
1096 | char input_buf[1]; | ||
1097 | int rc = in_buf_size; | ||
1098 | |||
1099 | if (*offset != 0) | ||
1100 | goto out; | ||
1101 | if (copy_from_user(input_buf, user_buf, 1)){ | ||
1102 | rc = -EFAULT; | ||
1103 | goto out; | ||
1104 | } | ||
1105 | if(input_buf[0] == '-') { | ||
1106 | debug_flush(id, DEBUG_FLUSH_ALL); | ||
1107 | goto out; | ||
1108 | } | ||
1109 | if (isdigit(input_buf[0])) { | ||
1110 | int area = ((int) input_buf[0] - (int) '0'); | ||
1111 | debug_flush(id, area); | ||
1112 | goto out; | ||
1113 | } | ||
1114 | |||
1115 | printk(KERN_INFO "debug: area `%c` is not valid\n", input_buf[0]); | ||
1116 | |||
1117 | out: | ||
1118 | *offset += in_buf_size; | ||
1119 | return rc; /* number of input characters */ | ||
1120 | } | ||
1121 | |||
1122 | /* | ||
1123 | * prints debug header in raw format | ||
1124 | */ | ||
1125 | |||
1126 | int debug_raw_header_fn(debug_info_t * id, struct debug_view *view, | ||
1127 | int area, debug_entry_t * entry, char *out_buf) | ||
1128 | { | ||
1129 | int rc; | ||
1130 | |||
1131 | rc = sizeof(debug_entry_t); | ||
1132 | memcpy(out_buf,entry,sizeof(debug_entry_t)); | ||
1133 | return rc; | ||
1134 | } | ||
1135 | |||
1136 | /* | ||
1137 | * prints debug data in raw format | ||
1138 | */ | ||
1139 | |||
1140 | static int debug_raw_format_fn(debug_info_t * id, struct debug_view *view, | ||
1141 | char *out_buf, const char *in_buf) | ||
1142 | { | ||
1143 | int rc; | ||
1144 | |||
1145 | rc = id->buf_size; | ||
1146 | memcpy(out_buf, in_buf, id->buf_size); | ||
1147 | return rc; | ||
1148 | } | ||
1149 | |||
1150 | /* | ||
1151 | * prints debug data in hex/ascii format | ||
1152 | */ | ||
1153 | |||
1154 | static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view, | ||
1155 | char *out_buf, const char *in_buf) | ||
1156 | { | ||
1157 | int i, rc = 0; | ||
1158 | |||
1159 | for (i = 0; i < id->buf_size; i++) { | ||
1160 | rc += sprintf(out_buf + rc, "%02x ", | ||
1161 | ((unsigned char *) in_buf)[i]); | ||
1162 | } | ||
1163 | rc += sprintf(out_buf + rc, "| "); | ||
1164 | for (i = 0; i < id->buf_size; i++) { | ||
1165 | unsigned char c = in_buf[i]; | ||
1166 | if (!isprint(c)) | ||
1167 | rc += sprintf(out_buf + rc, "."); | ||
1168 | else | ||
1169 | rc += sprintf(out_buf + rc, "%c", c); | ||
1170 | } | ||
1171 | rc += sprintf(out_buf + rc, "\n"); | ||
1172 | return rc; | ||
1173 | } | ||
1174 | |||
1175 | /* | ||
1176 | * prints header for debug entry | ||
1177 | */ | ||
1178 | |||
1179 | int debug_dflt_header_fn(debug_info_t * id, struct debug_view *view, | ||
1180 | int area, debug_entry_t * entry, char *out_buf) | ||
1181 | { | ||
1182 | struct timeval time_val; | ||
1183 | unsigned long long time; | ||
1184 | char *except_str; | ||
1185 | unsigned long caller; | ||
1186 | int rc = 0; | ||
1187 | unsigned int level; | ||
1188 | |||
1189 | level = entry->id.fields.level; | ||
1190 | time = entry->id.stck; | ||
1191 | /* adjust todclock to 1970 */ | ||
1192 | time -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); | ||
1193 | tod_to_timeval(time, &time_val); | ||
1194 | |||
1195 | if (entry->id.fields.exception) | ||
1196 | except_str = "*"; | ||
1197 | else | ||
1198 | except_str = "-"; | ||
1199 | caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN; | ||
1200 | rc += sprintf(out_buf, "%02i %011lu:%06lu %1u %1s %02i %p ", | ||
1201 | area, time_val.tv_sec, time_val.tv_usec, level, | ||
1202 | except_str, entry->id.fields.cpuid, (void *) caller); | ||
1203 | return rc; | ||
1204 | } | ||
1205 | |||
1206 | /* | ||
1207 | * prints debug data sprintf-formated: | ||
1208 | * debug_sprinf_event/exception calls must be used together with this view | ||
1209 | */ | ||
1210 | |||
1211 | #define DEBUG_SPRINTF_MAX_ARGS 10 | ||
1212 | |||
1213 | int debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view, | ||
1214 | char *out_buf, debug_sprintf_entry_t *curr_event) | ||
1215 | { | ||
1216 | int num_longs, num_used_args = 0,i, rc = 0; | ||
1217 | int index[DEBUG_SPRINTF_MAX_ARGS]; | ||
1218 | |||
1219 | /* count of longs fit into one entry */ | ||
1220 | num_longs = id->buf_size / sizeof(long); | ||
1221 | |||
1222 | if(num_longs < 1) | ||
1223 | goto out; /* bufsize of entry too small */ | ||
1224 | if(num_longs == 1) { | ||
1225 | /* no args, we use only the string */ | ||
1226 | strcpy(out_buf, curr_event->string); | ||
1227 | rc = strlen(curr_event->string); | ||
1228 | goto out; | ||
1229 | } | ||
1230 | |||
1231 | /* number of arguments used for sprintf (without the format string) */ | ||
1232 | num_used_args = min(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1)); | ||
1233 | |||
1234 | memset(index,0, DEBUG_SPRINTF_MAX_ARGS * sizeof(int)); | ||
1235 | |||
1236 | for(i = 0; i < num_used_args; i++) | ||
1237 | index[i] = i; | ||
1238 | |||
1239 | rc = sprintf(out_buf, curr_event->string, curr_event->args[index[0]], | ||
1240 | curr_event->args[index[1]], curr_event->args[index[2]], | ||
1241 | curr_event->args[index[3]], curr_event->args[index[4]], | ||
1242 | curr_event->args[index[5]], curr_event->args[index[6]], | ||
1243 | curr_event->args[index[7]], curr_event->args[index[8]], | ||
1244 | curr_event->args[index[9]]); | ||
1245 | |||
1246 | out: | ||
1247 | |||
1248 | return rc; | ||
1249 | } | ||
1250 | |||
1251 | /* | ||
1252 | * clean up module | ||
1253 | */ | ||
1254 | void __exit debug_exit(void) | ||
1255 | { | ||
1256 | #ifdef DEBUG | ||
1257 | printk("debug_cleanup_module: \n"); | ||
1258 | #endif | ||
1259 | #ifdef CONFIG_PROC_FS | ||
1260 | remove_proc_entry(debug_proc_root_entry->name, NULL); | ||
1261 | #endif /* CONFIG_PROC_FS */ | ||
1262 | unregister_sysctl_table(s390dbf_sysctl_header); | ||
1263 | return; | ||
1264 | } | ||
1265 | |||
1266 | /* | ||
1267 | * module definitions | ||
1268 | */ | ||
1269 | core_initcall(debug_init); | ||
1270 | module_exit(debug_exit); | ||
1271 | MODULE_LICENSE("GPL"); | ||
1272 | |||
1273 | EXPORT_SYMBOL(debug_register); | ||
1274 | EXPORT_SYMBOL(debug_unregister); | ||
1275 | EXPORT_SYMBOL(debug_set_level); | ||
1276 | EXPORT_SYMBOL(debug_stop_all); | ||
1277 | EXPORT_SYMBOL(debug_register_view); | ||
1278 | EXPORT_SYMBOL(debug_unregister_view); | ||
1279 | EXPORT_SYMBOL(debug_event_common); | ||
1280 | EXPORT_SYMBOL(debug_exception_common); | ||
1281 | EXPORT_SYMBOL(debug_hex_ascii_view); | ||
1282 | EXPORT_SYMBOL(debug_raw_view); | ||
1283 | EXPORT_SYMBOL(debug_dflt_header_fn); | ||
1284 | EXPORT_SYMBOL(debug_sprintf_view); | ||
1285 | EXPORT_SYMBOL(debug_sprintf_exception); | ||
1286 | EXPORT_SYMBOL(debug_sprintf_event); | ||
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c new file mode 100644 index 000000000000..bb0f973137f0 --- /dev/null +++ b/arch/s390/kernel/ebcdic.c | |||
@@ -0,0 +1,400 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/ebcdic.c | ||
3 | * ECBDIC -> ASCII, ASCII -> ECBDIC, | ||
4 | * upper to lower case (EBCDIC) conversion tables. | ||
5 | * | ||
6 | * S390 version | ||
7 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
8 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
9 | * Martin Peschke <peschke@fh-brandenburg.de> | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <asm/types.h> | ||
14 | |||
15 | /* | ||
16 | * ASCII (IBM PC 437) -> EBCDIC 037 | ||
17 | */ | ||
18 | __u8 _ascebc[256] = | ||
19 | { | ||
20 | /*00 NUL SOH STX ETX EOT ENQ ACK BEL */ | ||
21 | 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F, | ||
22 | /*08 BS HT LF VT FF CR SO SI */ | ||
23 | /* ->NL */ | ||
24 | 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, | ||
25 | /*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */ | ||
26 | 0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26, | ||
27 | /*18 CAN EM SUB ESC FS GS RS US */ | ||
28 | /* ->IGS ->IRS ->IUS */ | ||
29 | 0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F, | ||
30 | /*20 SP ! " # $ % & ' */ | ||
31 | 0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D, | ||
32 | /*28 ( ) * + , - . / */ | ||
33 | 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61, | ||
34 | /*30 0 1 2 3 4 5 6 7 */ | ||
35 | 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, | ||
36 | /*38 8 9 : ; < = > ? */ | ||
37 | 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F, | ||
38 | /*40 @ A B C D E F G */ | ||
39 | 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, | ||
40 | /*48 H I J K L M N O */ | ||
41 | 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, | ||
42 | /*50 P Q R S T U V W */ | ||
43 | 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, | ||
44 | /*58 X Y Z [ \ ] ^ _ */ | ||
45 | 0xE7, 0xE8, 0xE9, 0xBA, 0xE0, 0xBB, 0xB0, 0x6D, | ||
46 | /*60 ` a b c d e f g */ | ||
47 | 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, | ||
48 | /*68 h i j k l m n o */ | ||
49 | 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, | ||
50 | /*70 p q r s t u v w */ | ||
51 | 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, | ||
52 | /*78 x y z { | } ~ DL */ | ||
53 | 0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07, | ||
54 | /*80*/ | ||
55 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
56 | /*88*/ | ||
57 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
58 | /*90*/ | ||
59 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
60 | /*98*/ | ||
61 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
62 | /*A0*/ | ||
63 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
64 | /*A8*/ | ||
65 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
66 | /*B0*/ | ||
67 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
68 | /*B8*/ | ||
69 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
70 | /*C0*/ | ||
71 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
72 | /*C8*/ | ||
73 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
74 | /*D0*/ | ||
75 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
76 | /*D8*/ | ||
77 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
78 | /*E0 sz */ | ||
79 | 0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
80 | /*E8*/ | ||
81 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
82 | /*F0*/ | ||
83 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
84 | /*F8*/ | ||
85 | 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF | ||
86 | }; | ||
87 | |||
88 | /* | ||
89 | * EBCDIC 037 -> ASCII (IBM PC 437) | ||
90 | */ | ||
91 | __u8 _ebcasc[256] = | ||
92 | { | ||
93 | /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */ | ||
94 | 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F, | ||
95 | /* 0x08 -GE -SPS -RPT VT FF CR SO SI */ | ||
96 | 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, | ||
97 | /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC | ||
98 | -ENP ->LF */ | ||
99 | 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07, | ||
100 | /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB | ||
101 | -IUS */ | ||
102 | 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, | ||
103 | /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC | ||
104 | -INP */ | ||
105 | 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B, | ||
106 | /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL | ||
107 | -SW */ | ||
108 | 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07, | ||
109 | /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */ | ||
110 | 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04, | ||
111 | /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */ | ||
112 | 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A, | ||
113 | /* 0x40 SP RSP ä ---- */ | ||
114 | 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86, | ||
115 | /* 0x48 . < ( + | */ | ||
116 | 0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C, | ||
117 | /* 0x50 & ---- */ | ||
118 | 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07, | ||
119 | /* 0x58 ß ! $ * ) ; */ | ||
120 | 0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA, | ||
121 | /* 0x60 - / ---- Ä ---- ---- ---- */ | ||
122 | 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F, | ||
123 | /* 0x68 ---- , % _ > ? */ | ||
124 | 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F, | ||
125 | /* 0x70 ---- ---- ---- ---- ---- ---- ---- */ | ||
126 | 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, | ||
127 | /* 0x78 * ` : # @ ' = " */ | ||
128 | 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22, | ||
129 | /* 0x80 * a b c d e f g */ | ||
130 | 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, | ||
131 | /* 0x88 h i ---- ---- ---- */ | ||
132 | 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1, | ||
133 | /* 0x90 ° j k l m n o p */ | ||
134 | 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, | ||
135 | /* 0x98 q r ---- ---- */ | ||
136 | 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07, | ||
137 | /* 0xA0 ~ s t u v w x */ | ||
138 | 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, | ||
139 | /* 0xA8 y z ---- ---- ---- ---- */ | ||
140 | 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07, | ||
141 | /* 0xB0 ^ ---- § ---- */ | ||
142 | 0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC, | ||
143 | /* 0xB8 ---- [ ] ---- ---- ---- ---- */ | ||
144 | 0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07, | ||
145 | /* 0xC0 { A B C D E F G */ | ||
146 | 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, | ||
147 | /* 0xC8 H I ---- ö ---- */ | ||
148 | 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07, | ||
149 | /* 0xD0 } J K L M N O P */ | ||
150 | 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, | ||
151 | /* 0xD8 Q R ---- ü */ | ||
152 | 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98, | ||
153 | /* 0xE0 \ S T U V W X */ | ||
154 | 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, | ||
155 | /* 0xE8 Y Z ---- Ö ---- ---- ---- */ | ||
156 | 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07, | ||
157 | /* 0xF0 0 1 2 3 4 5 6 7 */ | ||
158 | 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, | ||
159 | /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */ | ||
160 | 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07 | ||
161 | }; | ||
162 | |||
163 | |||
164 | /* | ||
165 | * ASCII (IBM PC 437) -> EBCDIC 500 | ||
166 | */ | ||
167 | __u8 _ascebc_500[256] = | ||
168 | { | ||
169 | /*00 NUL SOH STX ETX EOT ENQ ACK BEL */ | ||
170 | 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F, | ||
171 | /*08 BS HT LF VT FF CR SO SI */ | ||
172 | /* ->NL */ | ||
173 | 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, | ||
174 | /*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */ | ||
175 | 0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26, | ||
176 | /*18 CAN EM SUB ESC FS GS RS US */ | ||
177 | /* ->IGS ->IRS ->IUS */ | ||
178 | 0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F, | ||
179 | /*20 SP ! " # $ % & ' */ | ||
180 | 0x40, 0x4F, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D, | ||
181 | /*28 ( ) * + , - . / */ | ||
182 | 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61, | ||
183 | /*30 0 1 2 3 4 5 6 7 */ | ||
184 | 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, | ||
185 | /*38 8 9 : ; < = > ? */ | ||
186 | 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F, | ||
187 | /*40 @ A B C D E F G */ | ||
188 | 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, | ||
189 | /*48 H I J K L M N O */ | ||
190 | 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, | ||
191 | /*50 P Q R S T U V W */ | ||
192 | 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, | ||
193 | /*58 X Y Z [ \ ] ^ _ */ | ||
194 | 0xE7, 0xE8, 0xE9, 0x4A, 0xE0, 0x5A, 0x5F, 0x6D, | ||
195 | /*60 ` a b c d e f g */ | ||
196 | 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, | ||
197 | /*68 h i j k l m n o */ | ||
198 | 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, | ||
199 | /*70 p q r s t u v w */ | ||
200 | 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, | ||
201 | /*78 x y z { | } ~ DL */ | ||
202 | 0xA7, 0xA8, 0xA9, 0xC0, 0xBB, 0xD0, 0xA1, 0x07, | ||
203 | /*80*/ | ||
204 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
205 | /*88*/ | ||
206 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
207 | /*90*/ | ||
208 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
209 | /*98*/ | ||
210 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
211 | /*A0*/ | ||
212 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
213 | /*A8*/ | ||
214 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
215 | /*B0*/ | ||
216 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
217 | /*B8*/ | ||
218 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
219 | /*C0*/ | ||
220 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
221 | /*C8*/ | ||
222 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
223 | /*D0*/ | ||
224 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
225 | /*D8*/ | ||
226 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
227 | /*E0 sz */ | ||
228 | 0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
229 | /*E8*/ | ||
230 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
231 | /*F0*/ | ||
232 | 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, | ||
233 | /*F8*/ | ||
234 | 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF | ||
235 | }; | ||
236 | |||
237 | /* | ||
238 | * EBCDIC 500 -> ASCII (IBM PC 437) | ||
239 | */ | ||
240 | __u8 _ebcasc_500[256] = | ||
241 | { | ||
242 | /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */ | ||
243 | 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F, | ||
244 | /* 0x08 -GE -SPS -RPT VT FF CR SO SI */ | ||
245 | 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, | ||
246 | /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC | ||
247 | -ENP ->LF */ | ||
248 | 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07, | ||
249 | /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB | ||
250 | -IUS */ | ||
251 | 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, | ||
252 | /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC | ||
253 | -INP */ | ||
254 | 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B, | ||
255 | /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL | ||
256 | -SW */ | ||
257 | 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07, | ||
258 | /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */ | ||
259 | 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04, | ||
260 | /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */ | ||
261 | 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A, | ||
262 | /* 0x40 SP RSP ä ---- */ | ||
263 | 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86, | ||
264 | /* 0x48 [ . < ( + ! */ | ||
265 | 0x87, 0xA4, 0x5B, 0x2E, 0x3C, 0x28, 0x2B, 0x21, | ||
266 | /* 0x50 & ---- */ | ||
267 | 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07, | ||
268 | /* 0x58 ß ] $ * ) ; ^ */ | ||
269 | 0x8D, 0xE1, 0x5D, 0x24, 0x2A, 0x29, 0x3B, 0x5E, | ||
270 | /* 0x60 - / ---- Ä ---- ---- ---- */ | ||
271 | 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F, | ||
272 | /* 0x68 ---- , % _ > ? */ | ||
273 | 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F, | ||
274 | /* 0x70 ---- ---- ---- ---- ---- ---- ---- */ | ||
275 | 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, | ||
276 | /* 0x78 * ` : # @ ' = " */ | ||
277 | 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22, | ||
278 | /* 0x80 * a b c d e f g */ | ||
279 | 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, | ||
280 | /* 0x88 h i ---- ---- ---- */ | ||
281 | 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1, | ||
282 | /* 0x90 ° j k l m n o p */ | ||
283 | 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, | ||
284 | /* 0x98 q r ---- ---- */ | ||
285 | 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07, | ||
286 | /* 0xA0 ~ s t u v w x */ | ||
287 | 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, | ||
288 | /* 0xA8 y z ---- ---- ---- ---- */ | ||
289 | 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07, | ||
290 | /* 0xB0 ---- § ---- */ | ||
291 | 0x9B, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC, | ||
292 | /* 0xB8 ---- | ---- ---- ---- ---- */ | ||
293 | 0xAB, 0x07, 0xAA, 0x7C, 0x07, 0x07, 0x07, 0x07, | ||
294 | /* 0xC0 { A B C D E F G */ | ||
295 | 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, | ||
296 | /* 0xC8 H I ---- ö ---- */ | ||
297 | 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07, | ||
298 | /* 0xD0 } J K L M N O P */ | ||
299 | 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, | ||
300 | /* 0xD8 Q R ---- ü */ | ||
301 | 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98, | ||
302 | /* 0xE0 \ S T U V W X */ | ||
303 | 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, | ||
304 | /* 0xE8 Y Z ---- Ö ---- ---- ---- */ | ||
305 | 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07, | ||
306 | /* 0xF0 0 1 2 3 4 5 6 7 */ | ||
307 | 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, | ||
308 | /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */ | ||
309 | 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07 | ||
310 | }; | ||
311 | |||
312 | |||
313 | /* | ||
314 | * EBCDIC 037/500 conversion table: | ||
315 | * from upper to lower case | ||
316 | */ | ||
317 | __u8 _ebc_tolower[256] = | ||
318 | { | ||
319 | 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, | ||
320 | 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, | ||
321 | 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, | ||
322 | 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, | ||
323 | 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, | ||
324 | 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, | ||
325 | 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, | ||
326 | 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, | ||
327 | 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, | ||
328 | 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, | ||
329 | 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, | ||
330 | 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, | ||
331 | 0x60, 0x61, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, | ||
332 | 0x48, 0x49, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, | ||
333 | 0x70, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, | ||
334 | 0x58, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, | ||
335 | 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, | ||
336 | 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F, | ||
337 | 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, | ||
338 | 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9C, 0x9F, | ||
339 | 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, | ||
340 | 0xA8, 0xA9, 0xAA, 0xAB, 0x8C, 0x8D, 0x8E, 0xAF, | ||
341 | 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, | ||
342 | 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF, | ||
343 | 0xC0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, | ||
344 | 0x88, 0x89, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF, | ||
345 | 0xD0, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, | ||
346 | 0x98, 0x99, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF, | ||
347 | 0xE0, 0xE1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, | ||
348 | 0xA8, 0xA9, 0xEA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF, | ||
349 | 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, | ||
350 | 0xF8, 0xF9, 0xFA, 0xDB, 0xDC, 0xDD, 0xDE, 0xFF | ||
351 | }; | ||
352 | |||
353 | |||
354 | /* | ||
355 | * EBCDIC 037/500 conversion table: | ||
356 | * from lower to upper case | ||
357 | */ | ||
358 | __u8 _ebc_toupper[256] = | ||
359 | { | ||
360 | 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, | ||
361 | 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, | ||
362 | 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, | ||
363 | 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, | ||
364 | 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, | ||
365 | 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, | ||
366 | 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, | ||
367 | 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, | ||
368 | 0x40, 0x41, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, | ||
369 | 0x68, 0x69, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, | ||
370 | 0x50, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, | ||
371 | 0x78, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, | ||
372 | 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, | ||
373 | 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, | ||
374 | 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, | ||
375 | 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, | ||
376 | 0x80, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, | ||
377 | 0xC8, 0xC9, 0x8A, 0x8B, 0xAC, 0xAD, 0xAE, 0x8F, | ||
378 | 0x90, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, | ||
379 | 0xD8, 0xD9, 0x9A, 0x9B, 0x9E, 0x9D, 0x9E, 0x9F, | ||
380 | 0xA0, 0xA1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, | ||
381 | 0xE8, 0xE9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, | ||
382 | 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, | ||
383 | 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF, | ||
384 | 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, | ||
385 | 0xC8, 0xC9, 0xCA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF, | ||
386 | 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, | ||
387 | 0xD8, 0xD9, 0xDA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF, | ||
388 | 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, | ||
389 | 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF, | ||
390 | 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, | ||
391 | 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF | ||
392 | }; | ||
393 | |||
394 | EXPORT_SYMBOL(_ascebc_500); | ||
395 | EXPORT_SYMBOL(_ebcasc_500); | ||
396 | EXPORT_SYMBOL(_ascebc); | ||
397 | EXPORT_SYMBOL(_ebcasc); | ||
398 | EXPORT_SYMBOL(_ebc_tolower); | ||
399 | EXPORT_SYMBOL(_ebc_toupper); | ||
400 | |||
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S new file mode 100644 index 000000000000..c0e09b33febe --- /dev/null +++ b/arch/s390/kernel/entry.S | |||
@@ -0,0 +1,868 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/entry.S | ||
3 | * S390 low-level entry points. | ||
4 | * | ||
5 | * S390 version | ||
6 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
8 | * Hartmut Penner (hp@de.ibm.com), | ||
9 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | ||
10 | */ | ||
11 | |||
12 | #include <linux/sys.h> | ||
13 | #include <linux/linkage.h> | ||
14 | #include <linux/config.h> | ||
15 | #include <asm/cache.h> | ||
16 | #include <asm/lowcore.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/ptrace.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/offsets.h> | ||
21 | #include <asm/unistd.h> | ||
22 | #include <asm/page.h> | ||
23 | |||
24 | /* | ||
25 | * Stack layout for the system_call stack entry. | ||
26 | * The first few entries are identical to the user_regs_struct. | ||
27 | */ | ||
28 | SP_PTREGS = STACK_FRAME_OVERHEAD | ||
29 | SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS | ||
30 | SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW | ||
31 | SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS | ||
32 | SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4 | ||
33 | SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 | ||
34 | SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12 | ||
35 | SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 | ||
36 | SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20 | ||
37 | SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 | ||
38 | SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28 | ||
39 | SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 | ||
40 | SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36 | ||
41 | SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 | ||
42 | SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44 | ||
43 | SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 | ||
44 | SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52 | ||
45 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 | ||
46 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 | ||
47 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | ||
48 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC | ||
49 | SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP | ||
50 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | ||
51 | |||
52 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ | ||
53 | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) | ||
54 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED) | ||
55 | |||
56 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | ||
57 | STACK_SIZE = 1 << STACK_SHIFT | ||
58 | |||
59 | #define BASED(name) name-system_call(%r13) | ||
60 | |||
61 | /* | ||
62 | * Register usage in interrupt handlers: | ||
63 | * R9 - pointer to current task structure | ||
64 | * R13 - pointer to literal pool | ||
65 | * R14 - return register for function calls | ||
66 | * R15 - kernel stack pointer | ||
67 | */ | ||
68 | |||
69 | .macro STORE_TIMER lc_offset | ||
70 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
71 | stpt \lc_offset | ||
72 | #endif | ||
73 | .endm | ||
74 | |||
75 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
76 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum | ||
77 | lm %r10,%r11,\lc_from | ||
78 | sl %r10,\lc_to | ||
79 | sl %r11,\lc_to+4 | ||
80 | bc 3,BASED(0f) | ||
81 | sl %r10,BASED(.Lc_1) | ||
82 | 0: al %r10,\lc_sum | ||
83 | al %r11,\lc_sum+4 | ||
84 | bc 12,BASED(1f) | ||
85 | al %r10,BASED(.Lc_1) | ||
86 | 1: stm %r10,%r11,\lc_sum | ||
87 | .endm | ||
88 | #endif | ||
89 | |||
90 | .macro SAVE_ALL_BASE savearea | ||
91 | stm %r12,%r15,\savearea | ||
92 | l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 | ||
93 | .endm | ||
94 | |||
95 | .macro SAVE_ALL psworg,savearea,sync | ||
96 | la %r12,\psworg | ||
97 | .if \sync | ||
98 | tm \psworg+1,0x01 # test problem state bit | ||
99 | bz BASED(2f) # skip stack setup save | ||
100 | l %r15,__LC_KERNEL_STACK # problem state -> load ksp | ||
101 | .else | ||
102 | tm \psworg+1,0x01 # test problem state bit | ||
103 | bnz BASED(1f) # from user -> load async stack | ||
104 | clc \psworg+4(4),BASED(.Lcritical_end) | ||
105 | bhe BASED(0f) | ||
106 | clc \psworg+4(4),BASED(.Lcritical_start) | ||
107 | bl BASED(0f) | ||
108 | l %r14,BASED(.Lcleanup_critical) | ||
109 | basr %r14,%r14 | ||
110 | tm 0(%r12),0x01 # retest problem state after cleanup | ||
111 | bnz BASED(1f) | ||
112 | 0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ? | ||
113 | slr %r14,%r15 | ||
114 | sra %r14,STACK_SHIFT | ||
115 | be BASED(2f) | ||
116 | 1: l %r15,__LC_ASYNC_STACK | ||
117 | .endif | ||
118 | #ifdef CONFIG_CHECK_STACK | ||
119 | b BASED(3f) | ||
120 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | ||
121 | bz BASED(stack_overflow) | ||
122 | 3: | ||
123 | #endif | ||
124 | 2: s %r15,BASED(.Lc_spsize) # make room for registers & psw | ||
125 | mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack | ||
126 | la %r12,\psworg | ||
127 | st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 | ||
128 | icm %r12,12,__LC_SVC_ILC | ||
129 | stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | ||
130 | st %r12,SP_ILC(%r15) | ||
131 | mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack | ||
132 | la %r12,0 | ||
133 | st %r12,__SF_BACKCHAIN(%r15) # clear back chain | ||
134 | .endm | ||
135 | |||
136 | .macro RESTORE_ALL sync | ||
137 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) # move user PSW to lowcore | ||
138 | .if !\sync | ||
139 | ni __LC_RETURN_PSW+1,0xfd # clear wait state bit | ||
140 | .endif | ||
141 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user | ||
142 | STORE_TIMER __LC_EXIT_TIMER | ||
143 | lpsw __LC_RETURN_PSW # back to caller | ||
144 | .endm | ||
145 | |||
146 | /* | ||
147 | * Scheduler resume function, called by switch_to | ||
148 | * gpr2 = (task_struct *) prev | ||
149 | * gpr3 = (task_struct *) next | ||
150 | * Returns: | ||
151 | * gpr2 = prev | ||
152 | */ | ||
153 | .globl __switch_to | ||
154 | __switch_to: | ||
155 | basr %r1,0 | ||
156 | __switch_to_base: | ||
157 | tm __THREAD_per(%r3),0xe8 # new process is using per ? | ||
158 | bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine | ||
159 | stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff | ||
160 | clc __THREAD_per(12,%r3),__SF_EMPTY(%r15) | ||
161 | be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's | ||
162 | lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't | ||
163 | __switch_to_noper: | ||
164 | stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task | ||
165 | st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp | ||
166 | l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp | ||
167 | lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task | ||
168 | st %r3,__LC_CURRENT # __LC_CURRENT = current task struct | ||
169 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | ||
170 | l %r3,__THREAD_info(%r3) # load thread_info from task struct | ||
171 | st %r3,__LC_THREAD_INFO | ||
172 | ahi %r3,STACK_SIZE | ||
173 | st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack | ||
174 | br %r14 | ||
175 | |||
176 | __critical_start: | ||
177 | /* | ||
178 | * SVC interrupt handler routine. System calls are synchronous events and | ||
179 | * are executed with interrupts enabled. | ||
180 | */ | ||
181 | |||
182 | .globl system_call | ||
183 | system_call: | ||
184 | STORE_TIMER __LC_SYNC_ENTER_TIMER | ||
185 | sysc_saveall: | ||
186 | SAVE_ALL_BASE __LC_SAVE_AREA | ||
187 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | ||
188 | lh %r7,0x8a # get svc number from lowcore | ||
189 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
190 | sysc_vtime: | ||
191 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
192 | bz BASED(sysc_do_svc) | ||
193 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
194 | sysc_stime: | ||
195 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
196 | sysc_update: | ||
197 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
198 | #endif | ||
199 | sysc_do_svc: | ||
200 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
201 | sla %r7,2 # *4 and test for svc 0 | ||
202 | bnz BASED(sysc_nr_ok) # svc number > 0 | ||
203 | # svc 0: system call number in %r1 | ||
204 | cl %r1,BASED(.Lnr_syscalls) | ||
205 | bnl BASED(sysc_nr_ok) | ||
206 | lr %r7,%r1 # copy svc number to %r7 | ||
207 | sla %r7,2 # *4 | ||
208 | sysc_nr_ok: | ||
209 | mvc SP_ARGS(4,%r15),SP_R7(%r15) | ||
210 | sysc_do_restart: | ||
211 | tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) | ||
212 | l %r8,sys_call_table-system_call(%r7,%r13) # get system call addr. | ||
213 | bnz BASED(sysc_tracesys) | ||
214 | basr %r14,%r8 # call sys_xxxx | ||
215 | st %r2,SP_R2(%r15) # store return value (change R2 on stack) | ||
216 | # ATTENTION: check sys_execve_glue before | ||
217 | # changing anything here !! | ||
218 | |||
219 | sysc_return: | ||
220 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
221 | bno BASED(sysc_leave) | ||
222 | tm __TI_flags+3(%r9),_TIF_WORK_SVC | ||
223 | bnz BASED(sysc_work) # there is work to do (signals etc.) | ||
224 | sysc_leave: | ||
225 | RESTORE_ALL 1 | ||
226 | |||
227 | # | ||
228 | # recheck if there is more work to do | ||
229 | # | ||
230 | sysc_work_loop: | ||
231 | tm __TI_flags+3(%r9),_TIF_WORK_SVC | ||
232 | bz BASED(sysc_leave) # there is no work to do | ||
233 | # | ||
234 | # One of the work bits is on. Find out which one. | ||
235 | # | ||
236 | sysc_work: | ||
237 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED | ||
238 | bo BASED(sysc_reschedule) | ||
239 | tm __TI_flags+3(%r9),_TIF_SIGPENDING | ||
240 | bo BASED(sysc_sigpending) | ||
241 | tm __TI_flags+3(%r9),_TIF_RESTART_SVC | ||
242 | bo BASED(sysc_restart) | ||
243 | tm __TI_flags+3(%r9),_TIF_SINGLE_STEP | ||
244 | bo BASED(sysc_singlestep) | ||
245 | b BASED(sysc_leave) | ||
246 | |||
247 | # | ||
248 | # _TIF_NEED_RESCHED is set, call schedule | ||
249 | # | ||
250 | sysc_reschedule: | ||
251 | l %r1,BASED(.Lschedule) | ||
252 | la %r14,BASED(sysc_work_loop) | ||
253 | br %r1 # call scheduler | ||
254 | |||
255 | # | ||
256 | # _TIF_SIGPENDING is set, call do_signal | ||
257 | # | ||
258 | sysc_sigpending: | ||
259 | ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP | ||
260 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
261 | sr %r3,%r3 # clear *oldset | ||
262 | l %r1,BASED(.Ldo_signal) | ||
263 | basr %r14,%r1 # call do_signal | ||
264 | tm __TI_flags+3(%r9),_TIF_RESTART_SVC | ||
265 | bo BASED(sysc_restart) | ||
266 | tm __TI_flags+3(%r9),_TIF_SINGLE_STEP | ||
267 | bo BASED(sysc_singlestep) | ||
268 | b BASED(sysc_leave) # out of here, do NOT recheck | ||
269 | |||
270 | # | ||
271 | # _TIF_RESTART_SVC is set, set up registers and restart svc | ||
272 | # | ||
273 | sysc_restart: | ||
274 | ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | ||
275 | l %r7,SP_R2(%r15) # load new svc number | ||
276 | sla %r7,2 | ||
277 | mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument | ||
278 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | ||
279 | b BASED(sysc_do_restart) # restart svc | ||
280 | |||
281 | # | ||
282 | # _TIF_SINGLE_STEP is set, call do_single_step | ||
283 | # | ||
284 | sysc_singlestep: | ||
285 | ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP | ||
286 | mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check | ||
287 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
288 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | ||
289 | la %r14,BASED(sysc_return) # load adr. of system return | ||
290 | br %r1 # branch to do_single_step | ||
291 | |||
292 | __critical_end: | ||
293 | |||
294 | # | ||
295 | # call trace before and after sys_call | ||
296 | # | ||
297 | sysc_tracesys: | ||
298 | l %r1,BASED(.Ltrace) | ||
299 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
300 | la %r3,0 | ||
301 | srl %r7,2 | ||
302 | st %r7,SP_R2(%r15) | ||
303 | basr %r14,%r1 | ||
304 | clc SP_R2(4,%r15),BASED(.Lnr_syscalls) | ||
305 | bnl BASED(sysc_tracenogo) | ||
306 | l %r7,SP_R2(%r15) # strace might have changed the | ||
307 | sll %r7,2 # system call | ||
308 | l %r8,sys_call_table-system_call(%r7,%r13) | ||
309 | sysc_tracego: | ||
310 | lm %r3,%r6,SP_R3(%r15) | ||
311 | l %r2,SP_ORIG_R2(%r15) | ||
312 | basr %r14,%r8 # call sys_xxx | ||
313 | st %r2,SP_R2(%r15) # store return value | ||
314 | sysc_tracenogo: | ||
315 | tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) | ||
316 | bz BASED(sysc_return) | ||
317 | l %r1,BASED(.Ltrace) | ||
318 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
319 | la %r3,1 | ||
320 | la %r14,BASED(sysc_return) | ||
321 | br %r1 | ||
322 | |||
323 | # | ||
324 | # a new process exits the kernel with ret_from_fork | ||
325 | # | ||
326 | .globl ret_from_fork | ||
327 | ret_from_fork: | ||
328 | l %r13,__LC_SVC_NEW_PSW+4 | ||
329 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
330 | tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? | ||
331 | bo BASED(0f) | ||
332 | st %r15,SP_R15(%r15) # store stack pointer for new kthread | ||
333 | 0: l %r1,BASED(.Lschedtail) | ||
334 | basr %r14,%r1 | ||
335 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
336 | b BASED(sysc_return) | ||
337 | |||
338 | # | ||
339 | # clone, fork, vfork, exec and sigreturn need glue, | ||
340 | # because they all expect pt_regs as parameter, | ||
341 | # but are called with different parameter. | ||
342 | # return-address is set up above | ||
343 | # | ||
344 | sys_clone_glue: | ||
345 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
346 | l %r1,BASED(.Lclone) | ||
347 | br %r1 # branch to sys_clone | ||
348 | |||
349 | sys_fork_glue: | ||
350 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
351 | l %r1,BASED(.Lfork) | ||
352 | br %r1 # branch to sys_fork | ||
353 | |||
354 | sys_vfork_glue: | ||
355 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
356 | l %r1,BASED(.Lvfork) | ||
357 | br %r1 # branch to sys_vfork | ||
358 | |||
359 | sys_execve_glue: | ||
360 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
361 | l %r1,BASED(.Lexecve) | ||
362 | lr %r12,%r14 # save return address | ||
363 | basr %r14,%r1 # call sys_execve | ||
364 | ltr %r2,%r2 # check if execve failed | ||
365 | bnz 0(%r12) # it did fail -> store result in gpr2 | ||
366 | b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8 | ||
367 | # in system_call/sysc_tracesys | ||
368 | |||
369 | sys_sigreturn_glue: | ||
370 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | ||
371 | l %r1,BASED(.Lsigreturn) | ||
372 | br %r1 # branch to sys_sigreturn | ||
373 | |||
374 | sys_rt_sigreturn_glue: | ||
375 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | ||
376 | l %r1,BASED(.Lrt_sigreturn) | ||
377 | br %r1 # branch to sys_sigreturn | ||
378 | |||
379 | # | ||
380 | # sigsuspend and rt_sigsuspend need pt_regs as an additional | ||
381 | # parameter and they have to skip the store of %r2 into the | ||
382 | # user register %r2 because the return value was set in | ||
383 | # sigsuspend and rt_sigsuspend already and must not be overwritten! | ||
384 | # | ||
385 | |||
386 | sys_sigsuspend_glue: | ||
387 | lr %r5,%r4 # move mask back | ||
388 | lr %r4,%r3 # move history1 parameter | ||
389 | lr %r3,%r2 # move history0 parameter | ||
390 | la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter | ||
391 | l %r1,BASED(.Lsigsuspend) | ||
392 | la %r14,4(%r14) # skip store of return value | ||
393 | br %r1 # branch to sys_sigsuspend | ||
394 | |||
395 | sys_rt_sigsuspend_glue: | ||
396 | lr %r4,%r3 # move sigsetsize parameter | ||
397 | lr %r3,%r2 # move unewset parameter | ||
398 | la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter | ||
399 | l %r1,BASED(.Lrt_sigsuspend) | ||
400 | la %r14,4(%r14) # skip store of return value | ||
401 | br %r1 # branch to sys_rt_sigsuspend | ||
402 | |||
403 | sys_sigaltstack_glue: | ||
404 | la %r4,SP_PTREGS(%r15) # load pt_regs as parameter | ||
405 | l %r1,BASED(.Lsigaltstack) | ||
406 | br %r1 # branch to sys_sigreturn | ||
407 | |||
408 | |||
409 | /* | ||
410 | * Program check handler routine | ||
411 | */ | ||
412 | |||
413 | .globl pgm_check_handler | ||
414 | pgm_check_handler: | ||
415 | /* | ||
416 | * First we need to check for a special case: | ||
417 | * Single stepping an instruction that disables the PER event mask will | ||
418 | * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. | ||
419 | * For a single stepped SVC the program check handler gets control after | ||
420 | * the SVC new PSW has been loaded. But we want to execute the SVC first and | ||
421 | * then handle the PER event. Therefore we update the SVC old PSW to point | ||
422 | * to the pgm_check_handler and branch to the SVC handler after we checked | ||
423 | * if we have to load the kernel stack register. | ||
424 | * For every other possible cause for PER event without the PER mask set | ||
425 | * we just ignore the PER event (FIXME: is there anything we have to do | ||
426 | * for LPSW?). | ||
427 | */ | ||
428 | STORE_TIMER __LC_SYNC_ENTER_TIMER | ||
429 | SAVE_ALL_BASE __LC_SAVE_AREA | ||
430 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | ||
431 | bnz BASED(pgm_per) # got per exception -> special case | ||
432 | SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 | ||
433 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
434 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
435 | bz BASED(pgm_no_vtime) | ||
436 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
437 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
438 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
439 | pgm_no_vtime: | ||
440 | #endif | ||
441 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
442 | l %r3,__LC_PGM_ILC # load program interruption code | ||
443 | la %r8,0x7f | ||
444 | nr %r8,%r3 | ||
445 | pgm_do_call: | ||
446 | l %r7,BASED(.Ljump_table) | ||
447 | sll %r8,2 | ||
448 | l %r7,0(%r8,%r7) # load address of handler routine | ||
449 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
450 | la %r14,BASED(sysc_return) | ||
451 | br %r7 # branch to interrupt-handler | ||
452 | |||
453 | # | ||
454 | # handle per exception | ||
455 | # | ||
456 | pgm_per: | ||
457 | tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on | ||
458 | bnz BASED(pgm_per_std) # ok, normal per event from user space | ||
459 | # ok its one of the special cases, now we need to find out which one | ||
460 | clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW | ||
461 | be BASED(pgm_svcper) | ||
462 | # no interesting special case, ignore PER event | ||
463 | lm %r12,%r15,__LC_SAVE_AREA | ||
464 | lpsw 0x28 | ||
465 | |||
466 | # | ||
467 | # Normal per exception | ||
468 | # | ||
469 | pgm_per_std: | ||
470 | SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 | ||
471 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
472 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
473 | bz BASED(pgm_no_vtime2) | ||
474 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
475 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
476 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
477 | pgm_no_vtime2: | ||
478 | #endif | ||
479 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
480 | l %r1,__TI_task(%r9) | ||
481 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID | ||
482 | mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS | ||
483 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | ||
484 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | ||
485 | l %r3,__LC_PGM_ILC # load program interruption code | ||
486 | la %r8,0x7f | ||
487 | nr %r8,%r3 # clear per-event-bit and ilc | ||
488 | be BASED(sysc_return) # only per or per+check ? | ||
489 | b BASED(pgm_do_call) | ||
490 | |||
491 | # | ||
492 | # it was a single stepped SVC that is causing all the trouble | ||
493 | # | ||
494 | pgm_svcper: | ||
495 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | ||
496 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
497 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
498 | bz BASED(pgm_no_vtime3) | ||
499 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
500 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
501 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
502 | pgm_no_vtime3: | ||
503 | #endif | ||
504 | lh %r7,0x8a # get svc number from lowcore | ||
505 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
506 | l %r1,__TI_task(%r9) | ||
507 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID | ||
508 | mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS | ||
509 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | ||
510 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | ||
511 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
512 | b BASED(sysc_do_svc) | ||
513 | |||
514 | /* | ||
515 | * IO interrupt handler routine | ||
516 | */ | ||
517 | |||
518 | .globl io_int_handler | ||
519 | io_int_handler: | ||
520 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | ||
521 | stck __LC_INT_CLOCK | ||
522 | SAVE_ALL_BASE __LC_SAVE_AREA+16 | ||
523 | SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0 | ||
524 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
525 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
526 | bz BASED(io_no_vtime) | ||
527 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | ||
528 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
529 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
530 | io_no_vtime: | ||
531 | #endif | ||
532 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
533 | l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ | ||
534 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
535 | basr %r14,%r1 # branch to standard irq handler | ||
536 | |||
537 | io_return: | ||
538 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
539 | #ifdef CONFIG_PREEMPT | ||
540 | bno BASED(io_preempt) # no -> check for preemptive scheduling | ||
541 | #else | ||
542 | bno BASED(io_leave) # no-> skip resched & signal | ||
543 | #endif | ||
544 | tm __TI_flags+3(%r9),_TIF_WORK_INT | ||
545 | bnz BASED(io_work) # there is work to do (signals etc.) | ||
546 | io_leave: | ||
547 | RESTORE_ALL 0 | ||
548 | |||
549 | #ifdef CONFIG_PREEMPT | ||
550 | io_preempt: | ||
551 | icm %r0,15,__TI_precount(%r9) | ||
552 | bnz BASED(io_leave) | ||
553 | l %r1,SP_R15(%r15) | ||
554 | s %r1,BASED(.Lc_spsize) | ||
555 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | ||
556 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain | ||
557 | lr %r15,%r1 | ||
558 | io_resume_loop: | ||
559 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED | ||
560 | bno BASED(io_leave) | ||
561 | mvc __TI_precount(4,%r9),BASED(.Lc_pactive) | ||
562 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
563 | l %r1,BASED(.Lschedule) | ||
564 | basr %r14,%r1 # call schedule | ||
565 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | ||
566 | xc __TI_precount(4,%r9),__TI_precount(%r9) | ||
567 | b BASED(io_resume_loop) | ||
568 | #endif | ||
569 | |||
570 | # | ||
571 | # switch to kernel stack, then check the TIF bits | ||
572 | # | ||
573 | io_work: | ||
574 | l %r1,__LC_KERNEL_STACK | ||
575 | s %r1,BASED(.Lc_spsize) | ||
576 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | ||
577 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain | ||
578 | lr %r15,%r1 | ||
579 | # | ||
580 | # One of the work bits is on. Find out which one. | ||
581 | # Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED | ||
582 | # | ||
583 | io_work_loop: | ||
584 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED | ||
585 | bo BASED(io_reschedule) | ||
586 | tm __TI_flags+3(%r9),_TIF_SIGPENDING | ||
587 | bo BASED(io_sigpending) | ||
588 | b BASED(io_leave) | ||
589 | |||
590 | # | ||
591 | # _TIF_NEED_RESCHED is set, call schedule | ||
592 | # | ||
593 | io_reschedule: | ||
594 | l %r1,BASED(.Lschedule) | ||
595 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
596 | basr %r14,%r1 # call scheduler | ||
597 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | ||
598 | tm __TI_flags+3(%r9),_TIF_WORK_INT | ||
599 | bz BASED(io_leave) # there is no work to do | ||
600 | b BASED(io_work_loop) | ||
601 | |||
602 | # | ||
603 | # _TIF_SIGPENDING is set, call do_signal | ||
604 | # | ||
605 | io_sigpending: | ||
606 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
607 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
608 | sr %r3,%r3 # clear *oldset | ||
609 | l %r1,BASED(.Ldo_signal) | ||
610 | basr %r14,%r1 # call do_signal | ||
611 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | ||
612 | b BASED(io_leave) # out of here, do NOT recheck | ||
613 | |||
614 | /* | ||
615 | * External interrupt handler routine | ||
616 | */ | ||
617 | |||
618 | .globl ext_int_handler | ||
619 | ext_int_handler: | ||
620 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | ||
621 | stck __LC_INT_CLOCK | ||
622 | SAVE_ALL_BASE __LC_SAVE_AREA+16 | ||
623 | SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0 | ||
624 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
625 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
626 | bz BASED(ext_no_vtime) | ||
627 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | ||
628 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
629 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
630 | ext_no_vtime: | ||
631 | #endif | ||
632 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
633 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
634 | lh %r3,__LC_EXT_INT_CODE # get interruption code | ||
635 | l %r1,BASED(.Ldo_extint) | ||
636 | basr %r14,%r1 | ||
637 | b BASED(io_return) | ||
638 | |||
639 | /* | ||
640 | * Machine check handler routines | ||
641 | */ | ||
642 | |||
643 | .globl mcck_int_handler | ||
644 | mcck_int_handler: | ||
645 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | ||
646 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | ||
647 | SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0 | ||
648 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
649 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
650 | bz BASED(mcck_no_vtime) | ||
651 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | ||
652 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
653 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
654 | mcck_no_vtime: | ||
655 | #endif | ||
656 | l %r1,BASED(.Ls390_mcck) | ||
657 | basr %r14,%r1 # call machine check handler | ||
658 | mcck_return: | ||
659 | RESTORE_ALL 0 | ||
660 | |||
661 | #ifdef CONFIG_SMP | ||
662 | /* | ||
663 | * Restart interruption handler, kick starter for additional CPUs | ||
664 | */ | ||
665 | .globl restart_int_handler | ||
666 | restart_int_handler: | ||
667 | l %r15,__LC_SAVE_AREA+60 # load ksp | ||
668 | lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs | ||
669 | lam %a0,%a15,__LC_AREGS_SAVE_AREA | ||
670 | lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone | ||
671 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | ||
672 | basr %r14,0 | ||
673 | l %r14,restart_addr-.(%r14) | ||
674 | br %r14 # branch to start_secondary | ||
675 | restart_addr: | ||
676 | .long start_secondary | ||
677 | #else | ||
678 | /* | ||
679 | * If we do not run with SMP enabled, let the new CPU crash ... | ||
680 | */ | ||
681 | .globl restart_int_handler | ||
682 | restart_int_handler: | ||
683 | basr %r1,0 | ||
684 | restart_base: | ||
685 | lpsw restart_crash-restart_base(%r1) | ||
686 | .align 8 | ||
687 | restart_crash: | ||
688 | .long 0x000a0000,0x00000000 | ||
689 | restart_go: | ||
690 | #endif | ||
691 | |||
692 | #ifdef CONFIG_CHECK_STACK | ||
693 | /* | ||
694 | * The synchronous or the asynchronous stack overflowed. We are dead. | ||
695 | * No need to properly save the registers, we are going to panic anyway. | ||
696 | * Setup a pt_regs so that show_trace can provide a good call trace. | ||
697 | */ | ||
698 | stack_overflow: | ||
699 | l %r15,__LC_PANIC_STACK # change to panic stack | ||
700 | sl %r15,BASED(.Lc_spsize) | ||
701 | mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack | ||
702 | stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | ||
703 | la %r1,__LC_SAVE_AREA | ||
704 | ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ? | ||
705 | be BASED(0f) | ||
706 | ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ? | ||
707 | be BASED(0f) | ||
708 | la %r1,__LC_SAVE_AREA+16 | ||
709 | 0: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack | ||
710 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain | ||
711 | l %r1,BASED(1f) # branch to kernel_stack_overflow | ||
712 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
713 | br %r1 | ||
714 | 1: .long kernel_stack_overflow | ||
715 | #endif | ||
716 | |||
717 | cleanup_table_system_call: | ||
718 | .long system_call + 0x80000000, sysc_do_svc + 0x80000000 | ||
719 | cleanup_table_sysc_return: | ||
720 | .long sysc_return + 0x80000000, sysc_leave + 0x80000000 | ||
721 | cleanup_table_sysc_leave: | ||
722 | .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000 | ||
723 | cleanup_table_sysc_work_loop: | ||
724 | .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000 | ||
725 | |||
726 | cleanup_critical: | ||
727 | clc 4(4,%r12),BASED(cleanup_table_system_call) | ||
728 | bl BASED(0f) | ||
729 | clc 4(4,%r12),BASED(cleanup_table_system_call+4) | ||
730 | bl BASED(cleanup_system_call) | ||
731 | 0: | ||
732 | clc 4(4,%r12),BASED(cleanup_table_sysc_return) | ||
733 | bl BASED(0f) | ||
734 | clc 4(4,%r12),BASED(cleanup_table_sysc_return+4) | ||
735 | bl BASED(cleanup_sysc_return) | ||
736 | 0: | ||
737 | clc 4(4,%r12),BASED(cleanup_table_sysc_leave) | ||
738 | bl BASED(0f) | ||
739 | clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4) | ||
740 | bl BASED(cleanup_sysc_leave) | ||
741 | 0: | ||
742 | clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop) | ||
743 | bl BASED(0f) | ||
744 | clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) | ||
745 | bl BASED(cleanup_sysc_leave) | ||
746 | 0: | ||
747 | br %r14 | ||
748 | |||
749 | cleanup_system_call: | ||
750 | mvc __LC_RETURN_PSW(8),0(%r12) | ||
751 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
752 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) | ||
753 | bh BASED(0f) | ||
754 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
755 | 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) | ||
756 | bhe BASED(cleanup_vtime) | ||
757 | #endif | ||
758 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) | ||
759 | bh BASED(0f) | ||
760 | mvc __LC_SAVE_AREA(16),__LC_SAVE_AREA+16 | ||
761 | 0: st %r13,__LC_SAVE_AREA+20 | ||
762 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | ||
763 | st %r15,__LC_SAVE_AREA+28 | ||
764 | lh %r7,0x8a | ||
765 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
766 | cleanup_vtime: | ||
767 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) | ||
768 | bhe BASED(cleanup_stime) | ||
769 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
770 | bz BASED(cleanup_novtime) | ||
771 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
772 | cleanup_stime: | ||
773 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16) | ||
774 | bh BASED(cleanup_update) | ||
775 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
776 | cleanup_update: | ||
777 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
778 | cleanup_novtime: | ||
779 | #endif | ||
780 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) | ||
781 | la %r12,__LC_RETURN_PSW | ||
782 | br %r14 | ||
783 | cleanup_system_call_insn: | ||
784 | .long sysc_saveall + 0x80000000 | ||
785 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
786 | .long system_call + 0x80000000 | ||
787 | .long sysc_vtime + 0x80000000 | ||
788 | .long sysc_stime + 0x80000000 | ||
789 | .long sysc_update + 0x80000000 | ||
790 | #endif | ||
791 | |||
792 | cleanup_sysc_return: | ||
793 | mvc __LC_RETURN_PSW(4),0(%r12) | ||
794 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return) | ||
795 | la %r12,__LC_RETURN_PSW | ||
796 | br %r14 | ||
797 | |||
798 | cleanup_sysc_leave: | ||
799 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) | ||
800 | be BASED(0f) | ||
801 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
802 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
803 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) | ||
804 | be BASED(0f) | ||
805 | #endif | ||
806 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | ||
807 | mvc __LC_SAVE_AREA+16(16),SP_R12(%r15) | ||
808 | lm %r0,%r11,SP_R0(%r15) | ||
809 | l %r15,SP_R15(%r15) | ||
810 | 0: la %r12,__LC_RETURN_PSW | ||
811 | br %r14 | ||
812 | cleanup_sysc_leave_insn: | ||
813 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
814 | .long sysc_leave + 14 + 0x80000000 | ||
815 | #endif | ||
816 | .long sysc_leave + 10 + 0x80000000 | ||
817 | |||
818 | /* | ||
819 | * Integer constants | ||
820 | */ | ||
821 | .align 4 | ||
822 | .Lc_spsize: .long SP_SIZE | ||
823 | .Lc_overhead: .long STACK_FRAME_OVERHEAD | ||
824 | .Lc_pactive: .long PREEMPT_ACTIVE | ||
825 | .Lnr_syscalls: .long NR_syscalls | ||
826 | .L0x018: .short 0x018 | ||
827 | .L0x020: .short 0x020 | ||
828 | .L0x028: .short 0x028 | ||
829 | .L0x030: .short 0x030 | ||
830 | .L0x038: .short 0x038 | ||
831 | .Lc_1: .long 1 | ||
832 | |||
833 | /* | ||
834 | * Symbol constants | ||
835 | */ | ||
836 | .Ls390_mcck: .long s390_do_machine_check | ||
837 | .Ldo_IRQ: .long do_IRQ | ||
838 | .Ldo_extint: .long do_extint | ||
839 | .Ldo_signal: .long do_signal | ||
840 | .Lhandle_per: .long do_single_step | ||
841 | .Ljump_table: .long pgm_check_table | ||
842 | .Lschedule: .long schedule | ||
843 | .Lclone: .long sys_clone | ||
844 | .Lexecve: .long sys_execve | ||
845 | .Lfork: .long sys_fork | ||
846 | .Lrt_sigreturn:.long sys_rt_sigreturn | ||
847 | .Lrt_sigsuspend: | ||
848 | .long sys_rt_sigsuspend | ||
849 | .Lsigreturn: .long sys_sigreturn | ||
850 | .Lsigsuspend: .long sys_sigsuspend | ||
851 | .Lsigaltstack: .long sys_sigaltstack | ||
852 | .Ltrace: .long syscall_trace | ||
853 | .Lvfork: .long sys_vfork | ||
854 | .Lschedtail: .long schedule_tail | ||
855 | |||
856 | .Lcritical_start: | ||
857 | .long __critical_start + 0x80000000 | ||
858 | .Lcritical_end: | ||
859 | .long __critical_end + 0x80000000 | ||
860 | .Lcleanup_critical: | ||
861 | .long cleanup_critical | ||
862 | |||
863 | #define SYSCALL(esa,esame,emu) .long esa | ||
864 | .globl sys_call_table | ||
865 | sys_call_table: | ||
866 | #include "syscalls.S" | ||
867 | #undef SYSCALL | ||
868 | |||
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S new file mode 100644 index 000000000000..51527ab8c8f9 --- /dev/null +++ b/arch/s390/kernel/entry64.S | |||
@@ -0,0 +1,881 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/entry.S | ||
3 | * S390 low-level entry points. | ||
4 | * | ||
5 | * S390 version | ||
6 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
8 | * Hartmut Penner (hp@de.ibm.com), | ||
9 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | ||
10 | */ | ||
11 | |||
12 | #include <linux/sys.h> | ||
13 | #include <linux/linkage.h> | ||
14 | #include <linux/config.h> | ||
15 | #include <asm/cache.h> | ||
16 | #include <asm/lowcore.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/ptrace.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/offsets.h> | ||
21 | #include <asm/unistd.h> | ||
22 | #include <asm/page.h> | ||
23 | |||
24 | /* | ||
25 | * Stack layout for the system_call stack entry. | ||
26 | * The first few entries are identical to the user_regs_struct. | ||
27 | */ | ||
28 | SP_PTREGS = STACK_FRAME_OVERHEAD | ||
29 | SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS | ||
30 | SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW | ||
31 | SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS | ||
32 | SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 | ||
33 | SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 | ||
34 | SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 | ||
35 | SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 | ||
36 | SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 | ||
37 | SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 | ||
38 | SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 | ||
39 | SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64 | ||
40 | SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72 | ||
41 | SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80 | ||
42 | SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88 | ||
43 | SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96 | ||
44 | SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104 | ||
45 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 | ||
46 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 | ||
47 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | ||
48 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC | ||
49 | SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP | ||
50 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | ||
51 | |||
52 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | ||
53 | STACK_SIZE = 1 << STACK_SHIFT | ||
54 | |||
55 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ | ||
56 | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) | ||
57 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED) | ||
58 | |||
59 | #define BASED(name) name-system_call(%r13) | ||
60 | |||
61 | .macro STORE_TIMER lc_offset | ||
62 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
63 | stpt \lc_offset | ||
64 | #endif | ||
65 | .endm | ||
66 | |||
67 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
68 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum | ||
69 | lg %r10,\lc_from | ||
70 | slg %r10,\lc_to | ||
71 | alg %r10,\lc_sum | ||
72 | stg %r10,\lc_sum | ||
73 | .endm | ||
74 | #endif | ||
75 | |||
76 | /* | ||
77 | * Register usage in interrupt handlers: | ||
78 | * R9 - pointer to current task structure | ||
79 | * R13 - pointer to literal pool | ||
80 | * R14 - return register for function calls | ||
81 | * R15 - kernel stack pointer | ||
82 | */ | ||
83 | |||
84 | .macro SAVE_ALL_BASE savearea | ||
85 | stmg %r12,%r15,\savearea | ||
86 | larl %r13,system_call | ||
87 | .endm | ||
88 | |||
89 | .macro SAVE_ALL psworg,savearea,sync | ||
90 | la %r12,\psworg | ||
91 | .if \sync | ||
92 | tm \psworg+1,0x01 # test problem state bit | ||
93 | jz 2f # skip stack setup save | ||
94 | lg %r15,__LC_KERNEL_STACK # problem state -> load ksp | ||
95 | .else | ||
96 | tm \psworg+1,0x01 # test problem state bit | ||
97 | jnz 1f # from user -> load kernel stack | ||
98 | clc \psworg+8(8),BASED(.Lcritical_end) | ||
99 | jhe 0f | ||
100 | clc \psworg+8(8),BASED(.Lcritical_start) | ||
101 | jl 0f | ||
102 | brasl %r14,cleanup_critical | ||
103 | tm 0(%r12),0x01 # retest problem state after cleanup | ||
104 | jnz 1f | ||
105 | 0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ? | ||
106 | slgr %r14,%r15 | ||
107 | srag %r14,%r14,STACK_SHIFT | ||
108 | jz 2f | ||
109 | 1: lg %r15,__LC_ASYNC_STACK # load async stack | ||
110 | .endif | ||
111 | #ifdef CONFIG_CHECK_STACK | ||
112 | j 3f | ||
113 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | ||
114 | jz stack_overflow | ||
115 | 3: | ||
116 | #endif | ||
117 | 2: aghi %r15,-SP_SIZE # make room for registers & psw | ||
118 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack | ||
119 | la %r12,\psworg | ||
120 | stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 | ||
121 | icm %r12,12,__LC_SVC_ILC | ||
122 | stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | ||
123 | st %r12,SP_ILC(%r15) | ||
124 | mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack | ||
125 | la %r12,0 | ||
126 | stg %r12,__SF_BACKCHAIN(%r15) | ||
127 | .endm | ||
128 | |||
129 | .macro RESTORE_ALL sync | ||
130 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore | ||
131 | .if !\sync | ||
132 | ni __LC_RETURN_PSW+1,0xfd # clear wait state bit | ||
133 | .endif | ||
134 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user | ||
135 | STORE_TIMER __LC_EXIT_TIMER | ||
136 | lpswe __LC_RETURN_PSW # back to caller | ||
137 | .endm | ||
138 | |||
139 | /* | ||
140 | * Scheduler resume function, called by switch_to | ||
141 | * gpr2 = (task_struct *) prev | ||
142 | * gpr3 = (task_struct *) next | ||
143 | * Returns: | ||
144 | * gpr2 = prev | ||
145 | */ | ||
146 | .globl __switch_to | ||
147 | __switch_to: | ||
148 | tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? | ||
149 | jz __switch_to_noper # if not we're fine | ||
150 | stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff | ||
151 | clc __THREAD_per(24,%r3),__SF_EMPTY(%r15) | ||
152 | je __switch_to_noper # we got away without bashing TLB's | ||
153 | lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't | ||
154 | __switch_to_noper: | ||
155 | stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task | ||
156 | stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp | ||
157 | lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp | ||
158 | lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task | ||
159 | stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct | ||
160 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | ||
161 | lg %r3,__THREAD_info(%r3) # load thread_info from task struct | ||
162 | stg %r3,__LC_THREAD_INFO | ||
163 | aghi %r3,STACK_SIZE | ||
164 | stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack | ||
165 | br %r14 | ||
166 | |||
167 | __critical_start: | ||
168 | /* | ||
169 | * SVC interrupt handler routine. System calls are synchronous events and | ||
170 | * are executed with interrupts enabled. | ||
171 | */ | ||
172 | |||
173 | .globl system_call | ||
174 | system_call: | ||
175 | STORE_TIMER __LC_SYNC_ENTER_TIMER | ||
176 | sysc_saveall: | ||
177 | SAVE_ALL_BASE __LC_SAVE_AREA | ||
178 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | ||
179 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | ||
180 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
181 | sysc_vtime: | ||
182 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
183 | jz sysc_do_svc | ||
184 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
185 | sysc_stime: | ||
186 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
187 | sysc_update: | ||
188 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
189 | #endif | ||
190 | sysc_do_svc: | ||
191 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
192 | slag %r7,%r7,2 # *4 and test for svc 0 | ||
193 | jnz sysc_nr_ok | ||
194 | # svc 0: system call number in %r1 | ||
195 | cl %r1,BASED(.Lnr_syscalls) | ||
196 | jnl sysc_nr_ok | ||
197 | lgfr %r7,%r1 # clear high word in r1 | ||
198 | slag %r7,%r7,2 # svc 0: system call number in %r1 | ||
199 | sysc_nr_ok: | ||
200 | mvc SP_ARGS(8,%r15),SP_R7(%r15) | ||
201 | sysc_do_restart: | ||
202 | larl %r10,sys_call_table | ||
203 | #ifdef CONFIG_S390_SUPPORT | ||
204 | tm SP_PSW+3(%r15),0x01 # are we running in 31 bit mode ? | ||
205 | jo sysc_noemu | ||
206 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls | ||
207 | sysc_noemu: | ||
208 | #endif | ||
209 | tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) | ||
210 | lgf %r8,0(%r7,%r10) # load address of system call routine | ||
211 | jnz sysc_tracesys | ||
212 | basr %r14,%r8 # call sys_xxxx | ||
213 | stg %r2,SP_R2(%r15) # store return value (change R2 on stack) | ||
214 | # ATTENTION: check sys_execve_glue before | ||
215 | # changing anything here !! | ||
216 | |||
217 | sysc_return: | ||
218 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
219 | jno sysc_leave | ||
220 | tm __TI_flags+7(%r9),_TIF_WORK_SVC | ||
221 | jnz sysc_work # there is work to do (signals etc.) | ||
222 | sysc_leave: | ||
223 | RESTORE_ALL 1 | ||
224 | |||
225 | # | ||
226 | # recheck if there is more work to do | ||
227 | # | ||
228 | sysc_work_loop: | ||
229 | tm __TI_flags+7(%r9),_TIF_WORK_SVC | ||
230 | jz sysc_leave # there is no work to do | ||
231 | # | ||
232 | # One of the work bits is on. Find out which one. | ||
233 | # | ||
234 | sysc_work: | ||
235 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | ||
236 | jo sysc_reschedule | ||
237 | tm __TI_flags+7(%r9),_TIF_SIGPENDING | ||
238 | jo sysc_sigpending | ||
239 | tm __TI_flags+7(%r9),_TIF_RESTART_SVC | ||
240 | jo sysc_restart | ||
241 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP | ||
242 | jo sysc_singlestep | ||
243 | j sysc_leave | ||
244 | |||
245 | # | ||
246 | # _TIF_NEED_RESCHED is set, call schedule | ||
247 | # | ||
248 | sysc_reschedule: | ||
249 | larl %r14,sysc_work_loop | ||
250 | jg schedule # return point is sysc_return | ||
251 | |||
252 | # | ||
253 | # _TIF_SIGPENDING is set, call do_signal | ||
254 | # | ||
255 | sysc_sigpending: | ||
256 | ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP | ||
257 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
258 | sgr %r3,%r3 # clear *oldset | ||
259 | brasl %r14,do_signal # call do_signal | ||
260 | tm __TI_flags+7(%r9),_TIF_RESTART_SVC | ||
261 | jo sysc_restart | ||
262 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP | ||
263 | jo sysc_singlestep | ||
264 | j sysc_leave # out of here, do NOT recheck | ||
265 | |||
266 | # | ||
267 | # _TIF_RESTART_SVC is set, set up registers and restart svc | ||
268 | # | ||
269 | sysc_restart: | ||
270 | ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | ||
271 | lg %r7,SP_R2(%r15) # load new svc number | ||
272 | slag %r7,%r7,2 # *4 | ||
273 | mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument | ||
274 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | ||
275 | j sysc_do_restart # restart svc | ||
276 | |||
277 | # | ||
278 | # _TIF_SINGLE_STEP is set, call do_single_step | ||
279 | # | ||
280 | sysc_singlestep: | ||
281 | ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP | ||
282 | lhi %r0,__LC_PGM_OLD_PSW | ||
283 | sth %r0,SP_TRAP(%r15) # set trap indication to pgm check | ||
284 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
285 | larl %r14,sysc_return # load adr. of system return | ||
286 | jg do_single_step # branch to do_sigtrap | ||
287 | |||
288 | |||
289 | __critical_end: | ||
290 | |||
291 | # | ||
292 | # call syscall_trace before and after system call | ||
293 | # special linkage: %r12 contains the return address for trace_svc | ||
294 | # | ||
295 | sysc_tracesys: | ||
296 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
297 | la %r3,0 | ||
298 | srl %r7,2 | ||
299 | stg %r7,SP_R2(%r15) | ||
300 | brasl %r14,syscall_trace | ||
301 | lghi %r0,NR_syscalls | ||
302 | clg %r0,SP_R2(%r15) | ||
303 | jnh sysc_tracenogo | ||
304 | lg %r7,SP_R2(%r15) # strace might have changed the | ||
305 | sll %r7,2 # system call | ||
306 | lgf %r8,0(%r7,%r10) | ||
307 | sysc_tracego: | ||
308 | lmg %r3,%r6,SP_R3(%r15) | ||
309 | lg %r2,SP_ORIG_R2(%r15) | ||
310 | basr %r14,%r8 # call sys_xxx | ||
311 | stg %r2,SP_R2(%r15) # store return value | ||
312 | sysc_tracenogo: | ||
313 | tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) | ||
314 | jz sysc_return | ||
315 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
316 | la %r3,1 | ||
317 | larl %r14,sysc_return # return point is sysc_return | ||
318 | jg syscall_trace | ||
319 | |||
320 | # | ||
321 | # a new process exits the kernel with ret_from_fork | ||
322 | # | ||
323 | .globl ret_from_fork | ||
324 | ret_from_fork: | ||
325 | lg %r13,__LC_SVC_NEW_PSW+8 | ||
326 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
327 | tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? | ||
328 | jo 0f | ||
329 | stg %r15,SP_R15(%r15) # store stack pointer for new kthread | ||
330 | 0: brasl %r14,schedule_tail | ||
331 | stosm 24(%r15),0x03 # reenable interrupts | ||
332 | j sysc_return | ||
333 | |||
334 | # | ||
335 | # clone, fork, vfork, exec and sigreturn need glue, | ||
336 | # because they all expect pt_regs as parameter, | ||
337 | # but are called with different parameter. | ||
338 | # return-address is set up above | ||
339 | # | ||
340 | sys_clone_glue: | ||
341 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
342 | jg sys_clone # branch to sys_clone | ||
343 | |||
344 | #ifdef CONFIG_S390_SUPPORT | ||
345 | sys32_clone_glue: | ||
346 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
347 | jg sys32_clone # branch to sys32_clone | ||
348 | #endif | ||
349 | |||
350 | sys_fork_glue: | ||
351 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
352 | jg sys_fork # branch to sys_fork | ||
353 | |||
354 | sys_vfork_glue: | ||
355 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
356 | jg sys_vfork # branch to sys_vfork | ||
357 | |||
358 | sys_execve_glue: | ||
359 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
360 | lgr %r12,%r14 # save return address | ||
361 | brasl %r14,sys_execve # call sys_execve | ||
362 | ltgr %r2,%r2 # check if execve failed | ||
363 | bnz 0(%r12) # it did fail -> store result in gpr2 | ||
364 | b 6(%r12) # SKIP STG 2,SP_R2(15) in | ||
365 | # system_call/sysc_tracesys | ||
366 | #ifdef CONFIG_S390_SUPPORT | ||
367 | sys32_execve_glue: | ||
368 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
369 | lgr %r12,%r14 # save return address | ||
370 | brasl %r14,sys32_execve # call sys32_execve | ||
371 | ltgr %r2,%r2 # check if execve failed | ||
372 | bnz 0(%r12) # it did fail -> store result in gpr2 | ||
373 | b 6(%r12) # SKIP STG 2,SP_R2(15) in | ||
374 | # system_call/sysc_tracesys | ||
375 | #endif | ||
376 | |||
377 | sys_sigreturn_glue: | ||
378 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | ||
379 | jg sys_sigreturn # branch to sys_sigreturn | ||
380 | |||
381 | #ifdef CONFIG_S390_SUPPORT | ||
382 | sys32_sigreturn_glue: | ||
383 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | ||
384 | jg sys32_sigreturn # branch to sys32_sigreturn | ||
385 | #endif | ||
386 | |||
387 | sys_rt_sigreturn_glue: | ||
388 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | ||
389 | jg sys_rt_sigreturn # branch to sys_sigreturn | ||
390 | |||
391 | #ifdef CONFIG_S390_SUPPORT | ||
392 | sys32_rt_sigreturn_glue: | ||
393 | la %r2,SP_PTREGS(%r15) # load pt_regs as parameter | ||
394 | jg sys32_rt_sigreturn # branch to sys32_sigreturn | ||
395 | #endif | ||
396 | |||
397 | # | ||
398 | # sigsuspend and rt_sigsuspend need pt_regs as an additional | ||
399 | # parameter and they have to skip the store of %r2 into the | ||
400 | # user register %r2 because the return value was set in | ||
401 | # sigsuspend and rt_sigsuspend already and must not be overwritten! | ||
402 | # | ||
403 | |||
404 | sys_sigsuspend_glue: | ||
405 | lgr %r5,%r4 # move mask back | ||
406 | lgr %r4,%r3 # move history1 parameter | ||
407 | lgr %r3,%r2 # move history0 parameter | ||
408 | la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter | ||
409 | la %r14,6(%r14) # skip store of return value | ||
410 | jg sys_sigsuspend # branch to sys_sigsuspend | ||
411 | |||
412 | #ifdef CONFIG_S390_SUPPORT | ||
413 | sys32_sigsuspend_glue: | ||
414 | llgfr %r4,%r4 # unsigned long | ||
415 | lgr %r5,%r4 # move mask back | ||
416 | lgfr %r3,%r3 # int | ||
417 | lgr %r4,%r3 # move history1 parameter | ||
418 | lgfr %r2,%r2 # int | ||
419 | lgr %r3,%r2 # move history0 parameter | ||
420 | la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter | ||
421 | la %r14,6(%r14) # skip store of return value | ||
422 | jg sys32_sigsuspend # branch to sys32_sigsuspend | ||
423 | #endif | ||
424 | |||
425 | sys_rt_sigsuspend_glue: | ||
426 | lgr %r4,%r3 # move sigsetsize parameter | ||
427 | lgr %r3,%r2 # move unewset parameter | ||
428 | la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter | ||
429 | la %r14,6(%r14) # skip store of return value | ||
430 | jg sys_rt_sigsuspend # branch to sys_rt_sigsuspend | ||
431 | |||
432 | #ifdef CONFIG_S390_SUPPORT | ||
433 | sys32_rt_sigsuspend_glue: | ||
434 | llgfr %r3,%r3 # size_t | ||
435 | lgr %r4,%r3 # move sigsetsize parameter | ||
436 | llgtr %r2,%r2 # sigset_emu31_t * | ||
437 | lgr %r3,%r2 # move unewset parameter | ||
438 | la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter | ||
439 | la %r14,6(%r14) # skip store of return value | ||
440 | jg sys32_rt_sigsuspend # branch to sys32_rt_sigsuspend | ||
441 | #endif | ||
442 | |||
443 | sys_sigaltstack_glue: | ||
444 | la %r4,SP_PTREGS(%r15) # load pt_regs as parameter | ||
445 | jg sys_sigaltstack # branch to sys_sigreturn | ||
446 | |||
447 | #ifdef CONFIG_S390_SUPPORT | ||
448 | sys32_sigaltstack_glue: | ||
449 | la %r4,SP_PTREGS(%r15) # load pt_regs as parameter | ||
450 | jg sys32_sigaltstack_wrapper # branch to sys_sigreturn | ||
451 | #endif | ||
452 | |||
453 | /* | ||
454 | * Program check handler routine | ||
455 | */ | ||
456 | |||
457 | .globl pgm_check_handler | ||
458 | pgm_check_handler: | ||
459 | /* | ||
460 | * First we need to check for a special case: | ||
461 | * Single stepping an instruction that disables the PER event mask will | ||
462 | * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. | ||
463 | * For a single stepped SVC the program check handler gets control after | ||
464 | * the SVC new PSW has been loaded. But we want to execute the SVC first and | ||
465 | * then handle the PER event. Therefore we update the SVC old PSW to point | ||
466 | * to the pgm_check_handler and branch to the SVC handler after we checked | ||
467 | * if we have to load the kernel stack register. | ||
468 | * For every other possible cause for PER event without the PER mask set | ||
469 | * we just ignore the PER event (FIXME: is there anything we have to do | ||
470 | * for LPSW?). | ||
471 | */ | ||
472 | STORE_TIMER __LC_SYNC_ENTER_TIMER | ||
473 | SAVE_ALL_BASE __LC_SAVE_AREA | ||
474 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | ||
475 | jnz pgm_per # got per exception -> special case | ||
476 | SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 | ||
477 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
478 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
479 | jz pgm_no_vtime | ||
480 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
481 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
482 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
483 | pgm_no_vtime: | ||
484 | #endif | ||
485 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
486 | lgf %r3,__LC_PGM_ILC # load program interruption code | ||
487 | lghi %r8,0x7f | ||
488 | ngr %r8,%r3 | ||
489 | pgm_do_call: | ||
490 | sll %r8,3 | ||
491 | larl %r1,pgm_check_table | ||
492 | lg %r1,0(%r8,%r1) # load address of handler routine | ||
493 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
494 | larl %r14,sysc_return | ||
495 | br %r1 # branch to interrupt-handler | ||
496 | |||
497 | # | ||
498 | # handle per exception | ||
499 | # | ||
500 | pgm_per: | ||
501 | tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on | ||
502 | jnz pgm_per_std # ok, normal per event from user space | ||
503 | # ok its one of the special cases, now we need to find out which one | ||
504 | clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW | ||
505 | je pgm_svcper | ||
506 | # no interesting special case, ignore PER event | ||
507 | lmg %r12,%r15,__LC_SAVE_AREA | ||
508 | lpswe __LC_PGM_OLD_PSW | ||
509 | |||
510 | # | ||
511 | # Normal per exception | ||
512 | # | ||
513 | pgm_per_std: | ||
514 | SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 | ||
515 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
516 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
517 | jz pgm_no_vtime2 | ||
518 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
519 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
520 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
521 | pgm_no_vtime2: | ||
522 | #endif | ||
523 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
524 | lg %r1,__TI_task(%r9) | ||
525 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID | ||
526 | mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS | ||
527 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | ||
528 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | ||
529 | lgf %r3,__LC_PGM_ILC # load program interruption code | ||
530 | lghi %r8,0x7f | ||
531 | ngr %r8,%r3 # clear per-event-bit and ilc | ||
532 | je sysc_return | ||
533 | j pgm_do_call | ||
534 | |||
535 | # | ||
536 | # it was a single stepped SVC that is causing all the trouble | ||
537 | # | ||
538 | pgm_svcper: | ||
539 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | ||
540 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
541 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
542 | jz pgm_no_vtime3 | ||
543 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
544 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
545 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
546 | pgm_no_vtime3: | ||
547 | #endif | ||
548 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | ||
549 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
550 | lg %r1,__TI_task(%r9) | ||
551 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID | ||
552 | mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS | ||
553 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | ||
554 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | ||
555 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
556 | j sysc_do_svc | ||
557 | |||
558 | /* | ||
559 | * IO interrupt handler routine | ||
560 | */ | ||
561 | .globl io_int_handler | ||
562 | io_int_handler: | ||
563 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | ||
564 | stck __LC_INT_CLOCK | ||
565 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | ||
566 | SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0 | ||
567 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
568 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
569 | jz io_no_vtime | ||
570 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | ||
571 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
572 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
573 | io_no_vtime: | ||
574 | #endif | ||
575 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
576 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
577 | brasl %r14,do_IRQ # call standard irq handler | ||
578 | |||
579 | io_return: | ||
580 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
581 | #ifdef CONFIG_PREEMPT | ||
582 | jno io_preempt # no -> check for preemptive scheduling | ||
583 | #else | ||
584 | jno io_leave # no-> skip resched & signal | ||
585 | #endif | ||
586 | tm __TI_flags+7(%r9),_TIF_WORK_INT | ||
587 | jnz io_work # there is work to do (signals etc.) | ||
588 | io_leave: | ||
589 | RESTORE_ALL 0 | ||
590 | |||
591 | #ifdef CONFIG_PREEMPT | ||
592 | io_preempt: | ||
593 | icm %r0,15,__TI_precount(%r9) | ||
594 | jnz io_leave | ||
595 | # switch to kernel stack | ||
596 | lg %r1,SP_R15(%r15) | ||
597 | aghi %r1,-SP_SIZE | ||
598 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | ||
599 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | ||
600 | lgr %r15,%r1 | ||
601 | io_resume_loop: | ||
602 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | ||
603 | jno io_leave | ||
604 | larl %r1,.Lc_pactive | ||
605 | mvc __TI_precount(4,%r9),0(%r1) | ||
606 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
607 | brasl %r14,schedule # call schedule | ||
608 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | ||
609 | xc __TI_precount(4,%r9),__TI_precount(%r9) | ||
610 | j io_resume_loop | ||
611 | #endif | ||
612 | |||
613 | # | ||
614 | # switch to kernel stack, then check TIF bits | ||
615 | # | ||
616 | io_work: | ||
617 | lg %r1,__LC_KERNEL_STACK | ||
618 | aghi %r1,-SP_SIZE | ||
619 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | ||
620 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain | ||
621 | lgr %r15,%r1 | ||
622 | # | ||
623 | # One of the work bits is on. Find out which one. | ||
624 | # Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED | ||
625 | # | ||
626 | io_work_loop: | ||
627 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | ||
628 | jo io_reschedule | ||
629 | tm __TI_flags+7(%r9),_TIF_SIGPENDING | ||
630 | jo io_sigpending | ||
631 | j io_leave | ||
632 | |||
633 | # | ||
634 | # _TIF_NEED_RESCHED is set, call schedule | ||
635 | # | ||
636 | io_reschedule: | ||
637 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
638 | brasl %r14,schedule # call scheduler | ||
639 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | ||
640 | tm __TI_flags+7(%r9),_TIF_WORK_INT | ||
641 | jz io_leave # there is no work to do | ||
642 | j io_work_loop | ||
643 | |||
644 | # | ||
645 | # _TIF_SIGPENDING is set, call do_signal | ||
646 | # | ||
647 | io_sigpending: | ||
648 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
649 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
650 | slgr %r3,%r3 # clear *oldset | ||
651 | brasl %r14,do_signal # call do_signal | ||
652 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | ||
653 | j sysc_leave # out of here, do NOT recheck | ||
654 | |||
655 | /* | ||
656 | * External interrupt handler routine | ||
657 | */ | ||
658 | .globl ext_int_handler | ||
659 | ext_int_handler: | ||
660 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | ||
661 | stck __LC_INT_CLOCK | ||
662 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | ||
663 | SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0 | ||
664 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
665 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
666 | jz ext_no_vtime | ||
667 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | ||
668 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
669 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
670 | ext_no_vtime: | ||
671 | #endif | ||
672 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | ||
673 | la %r2,SP_PTREGS(%r15) # address of register-save area | ||
674 | llgh %r3,__LC_EXT_INT_CODE # get interruption code | ||
675 | brasl %r14,do_extint | ||
676 | j io_return | ||
677 | |||
678 | /* | ||
679 | * Machine check handler routines | ||
680 | */ | ||
681 | .globl mcck_int_handler | ||
682 | mcck_int_handler: | ||
683 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | ||
684 | SAVE_ALL_BASE __LC_SAVE_AREA+64 | ||
685 | SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64,0 | ||
686 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
687 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
688 | jz mcck_no_vtime | ||
689 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | ||
690 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
691 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
692 | mcck_no_vtime: | ||
693 | #endif | ||
694 | brasl %r14,s390_do_machine_check | ||
695 | mcck_return: | ||
696 | RESTORE_ALL 0 | ||
697 | |||
698 | #ifdef CONFIG_SMP | ||
699 | /* | ||
700 | * Restart interruption handler, kick starter for additional CPUs | ||
701 | */ | ||
702 | .globl restart_int_handler | ||
703 | restart_int_handler: | ||
704 | lg %r15,__LC_SAVE_AREA+120 # load ksp | ||
705 | lghi %r10,__LC_CREGS_SAVE_AREA | ||
706 | lctlg %c0,%c15,0(%r10) # get new ctl regs | ||
707 | lghi %r10,__LC_AREGS_SAVE_AREA | ||
708 | lam %a0,%a15,0(%r10) | ||
709 | lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone | ||
710 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | ||
711 | jg start_secondary | ||
712 | #else | ||
713 | /* | ||
714 | * If we do not run with SMP enabled, let the new CPU crash ... | ||
715 | */ | ||
716 | .globl restart_int_handler | ||
717 | restart_int_handler: | ||
718 | basr %r1,0 | ||
719 | restart_base: | ||
720 | lpswe restart_crash-restart_base(%r1) | ||
721 | .align 8 | ||
722 | restart_crash: | ||
723 | .long 0x000a0000,0x00000000,0x00000000,0x00000000 | ||
724 | restart_go: | ||
725 | #endif | ||
726 | |||
727 | #ifdef CONFIG_CHECK_STACK | ||
728 | /* | ||
729 | * The synchronous or the asynchronous stack overflowed. We are dead. | ||
730 | * No need to properly save the registers, we are going to panic anyway. | ||
731 | * Setup a pt_regs so that show_trace can provide a good call trace. | ||
732 | */ | ||
733 | stack_overflow: | ||
734 | lg %r15,__LC_PANIC_STACK # change to panic stack | ||
735 | aghi %r1,-SP_SIZE | ||
736 | mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack | ||
737 | stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | ||
738 | la %r1,__LC_SAVE_AREA | ||
739 | chi %r12,__LC_SVC_OLD_PSW | ||
740 | je 0f | ||
741 | chi %r12,__LC_PGM_OLD_PSW | ||
742 | je 0f | ||
743 | la %r1,__LC_SAVE_AREA+16 | ||
744 | 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack | ||
745 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain | ||
746 | la %r2,SP_PTREGS(%r15) # load pt_regs | ||
747 | jg kernel_stack_overflow | ||
748 | #endif | ||
749 | |||
750 | cleanup_table_system_call: | ||
751 | .quad system_call, sysc_do_svc | ||
752 | cleanup_table_sysc_return: | ||
753 | .quad sysc_return, sysc_leave | ||
754 | cleanup_table_sysc_leave: | ||
755 | .quad sysc_leave, sysc_work_loop | ||
756 | cleanup_table_sysc_work_loop: | ||
757 | .quad sysc_work_loop, sysc_reschedule | ||
758 | |||
759 | cleanup_critical: | ||
760 | clc 8(8,%r12),BASED(cleanup_table_system_call) | ||
761 | jl 0f | ||
762 | clc 8(8,%r12),BASED(cleanup_table_system_call+8) | ||
763 | jl cleanup_system_call | ||
764 | 0: | ||
765 | clc 8(8,%r12),BASED(cleanup_table_sysc_return) | ||
766 | jl 0f | ||
767 | clc 8(8,%r12),BASED(cleanup_table_sysc_return+8) | ||
768 | jl cleanup_sysc_return | ||
769 | 0: | ||
770 | clc 8(8,%r12),BASED(cleanup_table_sysc_leave) | ||
771 | jl 0f | ||
772 | clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8) | ||
773 | jl cleanup_sysc_leave | ||
774 | 0: | ||
775 | clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop) | ||
776 | jl 0f | ||
777 | clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) | ||
778 | jl cleanup_sysc_leave | ||
779 | 0: | ||
780 | br %r14 | ||
781 | |||
782 | cleanup_system_call: | ||
783 | mvc __LC_RETURN_PSW(16),0(%r12) | ||
784 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
785 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) | ||
786 | jh 0f | ||
787 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
788 | 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) | ||
789 | jhe cleanup_vtime | ||
790 | #endif | ||
791 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) | ||
792 | jh 0f | ||
793 | mvc __LC_SAVE_AREA(32),__LC_SAVE_AREA+32 | ||
794 | 0: stg %r13,__LC_SAVE_AREA+40 | ||
795 | SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 | ||
796 | stg %r15,__LC_SAVE_AREA+56 | ||
797 | llgh %r7,__LC_SVC_INT_CODE | ||
798 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
799 | cleanup_vtime: | ||
800 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) | ||
801 | jhe cleanup_stime | ||
802 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | ||
803 | jz cleanup_novtime | ||
804 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | ||
805 | cleanup_stime: | ||
806 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32) | ||
807 | jh cleanup_update | ||
808 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | ||
809 | cleanup_update: | ||
810 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
811 | cleanup_novtime: | ||
812 | #endif | ||
813 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) | ||
814 | la %r12,__LC_RETURN_PSW | ||
815 | br %r14 | ||
816 | cleanup_system_call_insn: | ||
817 | .quad sysc_saveall | ||
818 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
819 | .quad system_call | ||
820 | .quad sysc_vtime | ||
821 | .quad sysc_stime | ||
822 | .quad sysc_update | ||
823 | #endif | ||
824 | |||
825 | cleanup_sysc_return: | ||
826 | mvc __LC_RETURN_PSW(8),0(%r12) | ||
827 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return) | ||
828 | la %r12,__LC_RETURN_PSW | ||
829 | br %r14 | ||
830 | |||
831 | cleanup_sysc_leave: | ||
832 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) | ||
833 | je 0f | ||
834 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
835 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
836 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) | ||
837 | je 0f | ||
838 | #endif | ||
839 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) | ||
840 | mvc __LC_SAVE_AREA+32(32),SP_R12(%r15) | ||
841 | lmg %r0,%r11,SP_R0(%r15) | ||
842 | lg %r15,SP_R15(%r15) | ||
843 | 0: la %r12,__LC_RETURN_PSW | ||
844 | br %r14 | ||
845 | cleanup_sysc_leave_insn: | ||
846 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
847 | .quad sysc_leave + 16 | ||
848 | #endif | ||
849 | .quad sysc_leave + 12 | ||
850 | |||
851 | /* | ||
852 | * Integer constants | ||
853 | */ | ||
854 | .align 4 | ||
855 | .Lconst: | ||
856 | .Lc_pactive: .long PREEMPT_ACTIVE | ||
857 | .Lnr_syscalls: .long NR_syscalls | ||
858 | .L0x0130: .short 0x130 | ||
859 | .L0x0140: .short 0x140 | ||
860 | .L0x0150: .short 0x150 | ||
861 | .L0x0160: .short 0x160 | ||
862 | .L0x0170: .short 0x170 | ||
863 | .Lcritical_start: | ||
864 | .quad __critical_start | ||
865 | .Lcritical_end: | ||
866 | .quad __critical_end | ||
867 | |||
868 | #define SYSCALL(esa,esame,emu) .long esame | ||
869 | .globl sys_call_table | ||
870 | sys_call_table: | ||
871 | #include "syscalls.S" | ||
872 | #undef SYSCALL | ||
873 | |||
874 | #ifdef CONFIG_S390_SUPPORT | ||
875 | |||
876 | #define SYSCALL(esa,esame,emu) .long emu | ||
877 | .globl sys_call_table_emu | ||
878 | sys_call_table_emu: | ||
879 | #include "syscalls.S" | ||
880 | #undef SYSCALL | ||
881 | #endif | ||
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S new file mode 100644 index 000000000000..b804c55bd919 --- /dev/null +++ b/arch/s390/kernel/head.S | |||
@@ -0,0 +1,772 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/head.S | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Hartmut Penner (hp@de.ibm.com), | ||
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
8 | * Rob van der Heij (rvdhei@iae.nl) | ||
9 | * | ||
10 | * There are 5 different IPL methods | ||
11 | * 1) load the image directly into ram at address 0 and do an PSW restart | ||
12 | * 2) linload will load the image from address 0x10000 to memory 0x10000 | ||
13 | * and start the code thru LPSW 0x0008000080010000 (VM only, deprecated) | ||
14 | * 3) generate the tape ipl header, store the generated image on a tape | ||
15 | * and ipl from it | ||
16 | * In case of SL tape you need to IPL 5 times to get past VOL1 etc | ||
17 | * 4) generate the vm reader ipl header, move the generated image to the | ||
18 | * VM reader (use option NOH!) and do a ipl from reader (VM only) | ||
19 | * 5) direct call of start by the SALIPL loader | ||
20 | * We use the cpuid to distinguish between VM and native ipl | ||
21 | * params for kernel are pushed to 0x10400 (see setup.h) | ||
22 | |||
23 | Changes: | ||
24 | Okt 25 2000 <rvdheij@iae.nl> | ||
25 | added code to skip HDR and EOF to allow SL tape IPL (5 retries) | ||
26 | changed first CCW from rewind to backspace block | ||
27 | |||
28 | */ | ||
29 | |||
30 | #include <linux/config.h> | ||
31 | #include <asm/setup.h> | ||
32 | #include <asm/lowcore.h> | ||
33 | #include <asm/offsets.h> | ||
34 | #include <asm/thread_info.h> | ||
35 | #include <asm/page.h> | ||
36 | |||
37 | #ifndef CONFIG_IPL | ||
38 | .org 0 | ||
39 | .long 0x00080000,0x80000000+startup # Just a restart PSW | ||
40 | #else | ||
41 | #ifdef CONFIG_IPL_TAPE | ||
42 | #define IPL_BS 1024 | ||
43 | .org 0 | ||
44 | .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded | ||
45 | .long 0x27000000,0x60000001 # by ipl to addresses 0-23. | ||
46 | .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs). | ||
47 | .long 0x00000000,0x00000000 # external old psw | ||
48 | .long 0x00000000,0x00000000 # svc old psw | ||
49 | .long 0x00000000,0x00000000 # program check old psw | ||
50 | .long 0x00000000,0x00000000 # machine check old psw | ||
51 | .long 0x00000000,0x00000000 # io old psw | ||
52 | .long 0x00000000,0x00000000 | ||
53 | .long 0x00000000,0x00000000 | ||
54 | .long 0x00000000,0x00000000 | ||
55 | .long 0x000a0000,0x00000058 # external new psw | ||
56 | .long 0x000a0000,0x00000060 # svc new psw | ||
57 | .long 0x000a0000,0x00000068 # program check new psw | ||
58 | .long 0x000a0000,0x00000070 # machine check new psw | ||
59 | .long 0x00080000,0x80000000+.Lioint # io new psw | ||
60 | |||
61 | .org 0x100 | ||
62 | # | ||
63 | # subroutine for loading from tape | ||
64 | # Paramters: | ||
65 | # R1 = device number | ||
66 | # R2 = load address | ||
67 | .Lloader: | ||
68 | st %r14,.Lldret | ||
69 | la %r3,.Lorbread # r3 = address of orb | ||
70 | la %r5,.Lirb # r5 = address of irb | ||
71 | st %r2,.Lccwread+4 # initialize CCW data addresses | ||
72 | lctl %c6,%c6,.Lcr6 | ||
73 | slr %r2,%r2 | ||
74 | .Lldlp: | ||
75 | la %r6,3 # 3 retries | ||
76 | .Lssch: | ||
77 | ssch 0(%r3) # load chunk of IPL_BS bytes | ||
78 | bnz .Llderr | ||
79 | .Lw4end: | ||
80 | bas %r14,.Lwait4io | ||
81 | tm 8(%r5),0x82 # do we have a problem ? | ||
82 | bnz .Lrecov | ||
83 | slr %r7,%r7 | ||
84 | icm %r7,3,10(%r5) # get residual count | ||
85 | lcr %r7,%r7 | ||
86 | la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read | ||
87 | ar %r2,%r7 # add to total size | ||
88 | tm 8(%r5),0x01 # found a tape mark ? | ||
89 | bnz .Ldone | ||
90 | l %r0,.Lccwread+4 # update CCW data addresses | ||
91 | ar %r0,%r7 | ||
92 | st %r0,.Lccwread+4 | ||
93 | b .Lldlp | ||
94 | .Ldone: | ||
95 | l %r14,.Lldret | ||
96 | br %r14 # r2 contains the total size | ||
97 | .Lrecov: | ||
98 | bas %r14,.Lsense # do the sensing | ||
99 | bct %r6,.Lssch # dec. retry count & branch | ||
100 | b .Llderr | ||
101 | # | ||
102 | # Sense subroutine | ||
103 | # | ||
104 | .Lsense: | ||
105 | st %r14,.Lsnsret | ||
106 | la %r7,.Lorbsense | ||
107 | ssch 0(%r7) # start sense command | ||
108 | bnz .Llderr | ||
109 | bas %r14,.Lwait4io | ||
110 | l %r14,.Lsnsret | ||
111 | tm 8(%r5),0x82 # do we have a problem ? | ||
112 | bnz .Llderr | ||
113 | br %r14 | ||
114 | # | ||
115 | # Wait for interrupt subroutine | ||
116 | # | ||
117 | .Lwait4io: | ||
118 | lpsw .Lwaitpsw | ||
119 | .Lioint: | ||
120 | c %r1,0xb8 # compare subchannel number | ||
121 | bne .Lwait4io | ||
122 | tsch 0(%r5) | ||
123 | slr %r0,%r0 | ||
124 | tm 8(%r5),0x82 # do we have a problem ? | ||
125 | bnz .Lwtexit | ||
126 | tm 8(%r5),0x04 # got device end ? | ||
127 | bz .Lwait4io | ||
128 | .Lwtexit: | ||
129 | br %r14 | ||
130 | .Llderr: | ||
131 | lpsw .Lcrash | ||
132 | |||
133 | .align 8 | ||
134 | .Lorbread: | ||
135 | .long 0x00000000,0x0080ff00,.Lccwread | ||
136 | .align 8 | ||
137 | .Lorbsense: | ||
138 | .long 0x00000000,0x0080ff00,.Lccwsense | ||
139 | .align 8 | ||
140 | .Lccwread: | ||
141 | .long 0x02200000+IPL_BS,0x00000000 | ||
142 | .Lccwsense: | ||
143 | .long 0x04200001,0x00000000 | ||
144 | .Lwaitpsw: | ||
145 | .long 0x020a0000,0x80000000+.Lioint | ||
146 | |||
147 | .Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 | ||
148 | .Lcr6: .long 0xff000000 | ||
149 | .align 8 | ||
150 | .Lcrash:.long 0x000a0000,0x00000000 | ||
151 | .Lldret:.long 0 | ||
152 | .Lsnsret: .long 0 | ||
153 | #endif /* CONFIG_IPL_TAPE */ | ||
154 | |||
155 | #ifdef CONFIG_IPL_VM | ||
156 | #define IPL_BS 0x730 | ||
157 | .org 0 | ||
158 | .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded | ||
159 | .long 0x02000018,0x60000050 # by ipl to addresses 0-23. | ||
160 | .long 0x02000068,0x60000050 # (a PSW and two CCWs). | ||
161 | .fill 80-24,1,0x40 # bytes 24-79 are discarded !! | ||
162 | .long 0x020000f0,0x60000050 # The next 160 byte are loaded | ||
163 | .long 0x02000140,0x60000050 # to addresses 0x18-0xb7 | ||
164 | .long 0x02000190,0x60000050 # They form the continuation | ||
165 | .long 0x020001e0,0x60000050 # of the CCW program started | ||
166 | .long 0x02000230,0x60000050 # by ipl and load the range | ||
167 | .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image | ||
168 | .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730 | ||
169 | .long 0x02000320,0x60000050 # in memory. At the end of | ||
170 | .long 0x02000370,0x60000050 # the channel program the PSW | ||
171 | .long 0x020003c0,0x60000050 # at location 0 is loaded. | ||
172 | .long 0x02000410,0x60000050 # Initial processing starts | ||
173 | .long 0x02000460,0x60000050 # at 0xf0 = iplstart. | ||
174 | .long 0x020004b0,0x60000050 | ||
175 | .long 0x02000500,0x60000050 | ||
176 | .long 0x02000550,0x60000050 | ||
177 | .long 0x020005a0,0x60000050 | ||
178 | .long 0x020005f0,0x60000050 | ||
179 | .long 0x02000640,0x60000050 | ||
180 | .long 0x02000690,0x60000050 | ||
181 | .long 0x020006e0,0x20000050 | ||
182 | |||
183 | .org 0xf0 | ||
184 | # | ||
185 | # subroutine for loading cards from the reader | ||
186 | # | ||
187 | .Lloader: | ||
188 | la %r3,.Lorb # r2 = address of orb into r2 | ||
189 | la %r5,.Lirb # r4 = address of irb | ||
190 | la %r6,.Lccws | ||
191 | la %r7,20 | ||
192 | .Linit: | ||
193 | st %r2,4(%r6) # initialize CCW data addresses | ||
194 | la %r2,0x50(%r2) | ||
195 | la %r6,8(%r6) | ||
196 | bct 7,.Linit | ||
197 | |||
198 | lctl %c6,%c6,.Lcr6 # set IO subclass mask | ||
199 | slr %r2,%r2 | ||
200 | .Lldlp: | ||
201 | ssch 0(%r3) # load chunk of 1600 bytes | ||
202 | bnz .Llderr | ||
203 | .Lwait4irq: | ||
204 | mvc __LC_IO_NEW_PSW(8),.Lnewpsw # set up IO interrupt psw | ||
205 | lpsw .Lwaitpsw | ||
206 | .Lioint: | ||
207 | c %r1,0xb8 # compare subchannel number | ||
208 | bne .Lwait4irq | ||
209 | tsch 0(%r5) | ||
210 | |||
211 | slr %r0,%r0 | ||
212 | ic %r0,8(%r5) # get device status | ||
213 | chi %r0,8 # channel end ? | ||
214 | be .Lcont | ||
215 | chi %r0,12 # channel end + device end ? | ||
216 | be .Lcont | ||
217 | |||
218 | l %r0,4(%r5) | ||
219 | s %r0,8(%r3) # r0/8 = number of ccws executed | ||
220 | mhi %r0,10 # *10 = number of bytes in ccws | ||
221 | lh %r3,10(%r5) # get residual count | ||
222 | sr %r0,%r3 # #ccws*80-residual=#bytes read | ||
223 | ar %r2,%r0 | ||
224 | |||
225 | br %r14 # r2 contains the total size | ||
226 | |||
227 | .Lcont: | ||
228 | ahi %r2,0x640 # add 0x640 to total size | ||
229 | la %r6,.Lccws | ||
230 | la %r7,20 | ||
231 | .Lincr: | ||
232 | l %r0,4(%r6) # update CCW data addresses | ||
233 | ahi %r0,0x640 | ||
234 | st %r0,4(%r6) | ||
235 | ahi %r6,8 | ||
236 | bct 7,.Lincr | ||
237 | |||
238 | b .Lldlp | ||
239 | .Llderr: | ||
240 | lpsw .Lcrash | ||
241 | |||
242 | .align 8 | ||
243 | .Lorb: .long 0x00000000,0x0080ff00,.Lccws | ||
244 | .Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 | ||
245 | .Lcr6: .long 0xff000000 | ||
246 | .Lloadp:.long 0,0 | ||
247 | .align 8 | ||
248 | .Lcrash:.long 0x000a0000,0x00000000 | ||
249 | .Lnewpsw: | ||
250 | .long 0x00080000,0x80000000+.Lioint | ||
251 | .Lwaitpsw: | ||
252 | .long 0x020a0000,0x80000000+.Lioint | ||
253 | |||
254 | .align 8 | ||
255 | .Lccws: .rept 19 | ||
256 | .long 0x02600050,0x00000000 | ||
257 | .endr | ||
258 | .long 0x02200050,0x00000000 | ||
259 | #endif /* CONFIG_IPL_VM */ | ||
260 | |||
261 | iplstart: | ||
262 | lh %r1,0xb8 # test if subchannel number | ||
263 | bct %r1,.Lnoload # is valid | ||
264 | l %r1,0xb8 # load ipl subchannel number | ||
265 | la %r2,IPL_BS # load start address | ||
266 | bas %r14,.Lloader # load rest of ipl image | ||
267 | l %r12,.Lparm # pointer to parameter area | ||
268 | st %r1,IPL_DEVICE-PARMAREA(%r12) # store ipl device number | ||
269 | |||
270 | # | ||
271 | # load parameter file from ipl device | ||
272 | # | ||
273 | .Lagain1: | ||
274 | l %r2,INITRD_START-PARMAREA(%r12) # use ramdisk location as temp | ||
275 | bas %r14,.Lloader # load parameter file | ||
276 | ltr %r2,%r2 # got anything ? | ||
277 | bz .Lnopf | ||
278 | chi %r2,895 | ||
279 | bnh .Lnotrunc | ||
280 | la %r2,895 | ||
281 | .Lnotrunc: | ||
282 | l %r4,INITRD_START-PARMAREA(%r12) | ||
283 | clc 0(3,%r4),.L_hdr # if it is HDRx | ||
284 | bz .Lagain1 # skip dataset header | ||
285 | clc 0(3,%r4),.L_eof # if it is EOFx | ||
286 | bz .Lagain1 # skip dateset trailer | ||
287 | la %r5,0(%r4,%r2) | ||
288 | lr %r3,%r2 | ||
289 | .Lidebc: | ||
290 | tm 0(%r5),0x80 # high order bit set ? | ||
291 | bo .Ldocv # yes -> convert from EBCDIC | ||
292 | ahi %r5,-1 | ||
293 | bct %r3,.Lidebc | ||
294 | b .Lnocv | ||
295 | .Ldocv: | ||
296 | l %r3,.Lcvtab | ||
297 | tr 0(256,%r4),0(%r3) # convert parameters to ascii | ||
298 | tr 256(256,%r4),0(%r3) | ||
299 | tr 512(256,%r4),0(%r3) | ||
300 | tr 768(122,%r4),0(%r3) | ||
301 | .Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line | ||
302 | mvc 0(256,%r3),0(%r4) | ||
303 | mvc 256(256,%r3),256(%r4) | ||
304 | mvc 512(256,%r3),512(%r4) | ||
305 | mvc 768(122,%r3),768(%r4) | ||
306 | slr %r0,%r0 | ||
307 | b .Lcntlp | ||
308 | .Ldelspc: | ||
309 | ic %r0,0(%r2,%r3) | ||
310 | chi %r0,0x20 # is it a space ? | ||
311 | be .Lcntlp | ||
312 | ahi %r2,1 | ||
313 | b .Leolp | ||
314 | .Lcntlp: | ||
315 | brct %r2,.Ldelspc | ||
316 | .Leolp: | ||
317 | slr %r0,%r0 | ||
318 | stc %r0,0(%r2,%r3) # terminate buffer | ||
319 | .Lnopf: | ||
320 | |||
321 | # | ||
322 | # load ramdisk from ipl device | ||
323 | # | ||
324 | .Lagain2: | ||
325 | l %r2,INITRD_START-PARMAREA(%r12) # load adr. of ramdisk | ||
326 | bas %r14,.Lloader # load ramdisk | ||
327 | st %r2,INITRD_SIZE-PARMAREA(%r12) # store size of ramdisk | ||
328 | ltr %r2,%r2 | ||
329 | bnz .Lrdcont | ||
330 | st %r2,INITRD_START-PARMAREA(%r12) # no ramdisk found, null it | ||
331 | .Lrdcont: | ||
332 | l %r2,INITRD_START-PARMAREA(%r12) | ||
333 | |||
334 | clc 0(3,%r2),.L_hdr # skip HDRx and EOFx | ||
335 | bz .Lagain2 | ||
336 | clc 0(3,%r2),.L_eof | ||
337 | bz .Lagain2 | ||
338 | |||
339 | #ifdef CONFIG_IPL_VM | ||
340 | # | ||
341 | # reset files in VM reader | ||
342 | # | ||
343 | stidp __LC_CPUID # store cpuid | ||
344 | tm __LC_CPUID,0xff # running VM ? | ||
345 | bno .Lnoreset | ||
346 | la %r2,.Lreset | ||
347 | lhi %r3,26 | ||
348 | .long 0x83230008 | ||
349 | .Lnoreset: | ||
350 | #endif | ||
351 | |||
352 | # | ||
353 | # everything loaded, go for it | ||
354 | # | ||
355 | .Lnoload: | ||
356 | l %r1,.Lstartup | ||
357 | br %r1 | ||
358 | |||
359 | .Lparm: .long PARMAREA | ||
360 | .Lstartup: .long startup | ||
361 | .Lcvtab:.long _ebcasc # ebcdic to ascii table | ||
362 | .Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 | ||
363 | .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 | ||
364 | .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" | ||
365 | .L_eof: .long 0xc5d6c600 /* C'EOF' */ | ||
366 | .L_hdr: .long 0xc8c4d900 /* C'HDR' */ | ||
367 | |||
368 | #endif /* CONFIG_IPL */ | ||
369 | |||
370 | # | ||
371 | # SALIPL loader support. Based on a patch by Rob van der Heij. | ||
372 | # This entry point is called directly from the SALIPL loader and | ||
373 | # doesn't need a builtin ipl record. | ||
374 | # | ||
375 | .org 0x800 | ||
376 | .globl start | ||
377 | start: | ||
378 | stm %r0,%r15,0x07b0 # store registers | ||
379 | basr %r12,%r0 | ||
380 | .base: | ||
381 | l %r11,.parm | ||
382 | l %r8,.cmd # pointer to command buffer | ||
383 | |||
384 | ltr %r9,%r9 # do we have SALIPL parameters? | ||
385 | bp .sk8x8 | ||
386 | |||
387 | mvc 0(64,%r8),0x00b0 # copy saved registers | ||
388 | xc 64(240-64,%r8),0(%r8) # remainder of buffer | ||
389 | tr 0(64,%r8),.lowcase | ||
390 | b .gotr | ||
391 | .sk8x8: | ||
392 | mvc 0(240,%r8),0(%r9) # copy iplparms into buffer | ||
393 | .gotr: | ||
394 | l %r10,.tbl # EBCDIC to ASCII table | ||
395 | tr 0(240,%r8),0(%r10) | ||
396 | stidp __LC_CPUID # Are we running on VM maybe | ||
397 | cli __LC_CPUID,0xff | ||
398 | bnz .test | ||
399 | .long 0x83300060 # diag 3,0,x'0060' - storage size | ||
400 | b .done | ||
401 | .test: | ||
402 | mvc 0x68(8),.pgmnw # set up pgm check handler | ||
403 | l %r2,.fourmeg | ||
404 | lr %r3,%r2 | ||
405 | bctr %r3,%r0 # 4M-1 | ||
406 | .loop: iske %r0,%r3 | ||
407 | ar %r3,%r2 | ||
408 | .pgmx: | ||
409 | sr %r3,%r2 | ||
410 | la %r3,1(%r3) | ||
411 | .done: | ||
412 | l %r1,.memsize | ||
413 | st %r3,0(%r1) | ||
414 | slr %r0,%r0 | ||
415 | st %r0,INITRD_SIZE-PARMAREA(%r11) | ||
416 | st %r0,INITRD_START-PARMAREA(%r11) | ||
417 | j startup # continue with startup | ||
418 | .tbl: .long _ebcasc # translate table | ||
419 | .cmd: .long COMMAND_LINE # address of command line buffer | ||
420 | .parm: .long PARMAREA | ||
421 | .memsize: .long memory_size | ||
422 | .fourmeg: .long 0x00400000 # 4M | ||
423 | .pgmnw: .long 0x00080000,.pgmx | ||
424 | .lowcase: | ||
425 | .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 | ||
426 | .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f | ||
427 | .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17 | ||
428 | .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f | ||
429 | .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27 | ||
430 | .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f | ||
431 | .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37 | ||
432 | .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f | ||
433 | .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47 | ||
434 | .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f | ||
435 | .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57 | ||
436 | .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f | ||
437 | .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67 | ||
438 | .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f | ||
439 | .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77 | ||
440 | .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f | ||
441 | |||
442 | .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87 | ||
443 | .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f | ||
444 | .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97 | ||
445 | .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f | ||
446 | .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 | ||
447 | .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf | ||
448 | .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7 | ||
449 | .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf | ||
450 | .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg | ||
451 | .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi | ||
452 | .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop | ||
453 | .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr | ||
454 | .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx | ||
455 | .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz | ||
456 | .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 | ||
457 | .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff | ||
458 | |||
459 | # | ||
460 | # startup-code at 0x10000, running in real mode | ||
461 | # this is called either by the ipl loader or directly by PSW restart | ||
462 | # or linload or SALIPL | ||
463 | # | ||
464 | .org 0x10000 | ||
465 | startup:basr %r13,0 # get base | ||
466 | .LPG1: lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers | ||
467 | la %r12,_pstart-.LPG1(%r13) # pointer to parameter area | ||
468 | # move IPL device to lowcore | ||
469 | mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12) | ||
470 | |||
471 | # | ||
472 | # clear bss memory | ||
473 | # | ||
474 | l %r2,.Lbss_bgn-.LPG1(%r13) # start of bss | ||
475 | l %r3,.Lbss_end-.LPG1(%r13) # end of bss | ||
476 | sr %r3,%r2 # length of bss | ||
477 | sr %r4,%r4 # | ||
478 | sr %r5,%r5 # set src,length and pad to zero | ||
479 | sr %r0,%r0 # | ||
480 | mvcle %r2,%r4,0 # clear mem | ||
481 | jo .-4 # branch back, if not finish | ||
482 | |||
483 | l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word | ||
484 | .Lservicecall: | ||
485 | stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts | ||
486 | |||
487 | stctl %r0, %r0,.Lcr-.LPG1(%r13) # get cr0 | ||
488 | la %r1,0x200 # set bit 22 | ||
489 | o %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1 | ||
490 | st %r1,.Lcr-.LPG1(%r13) | ||
491 | lctl %r0, %r0,.Lcr-.LPG1(%r13) # load modified cr0 | ||
492 | |||
493 | mvc __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw | ||
494 | la %r1, .Lsclph-.LPG1(%r13) | ||
495 | a %r1,__LC_EXT_NEW_PSW+4 # set handler | ||
496 | st %r1,__LC_EXT_NEW_PSW+4 | ||
497 | |||
498 | la %r4,_pstart-.LPG1(%r13) # %r4 is our index for sccb stuff | ||
499 | la %r1, .Lsccb-PARMAREA(%r4) # our sccb | ||
500 | .insn rre,0xb2200000,%r2,%r1 # service call | ||
501 | ipm %r1 | ||
502 | srl %r1,28 # get cc code | ||
503 | xr %r3, %r3 | ||
504 | chi %r1,3 | ||
505 | be .Lfchunk-.LPG1(%r13) # leave | ||
506 | chi %r1,2 | ||
507 | be .Lservicecall-.LPG1(%r13) | ||
508 | lpsw .Lwaitsclp-.LPG1(%r13) | ||
509 | .Lsclph: | ||
510 | lh %r1,.Lsccbr-PARMAREA(%r4) | ||
511 | chi %r1,0x10 # 0x0010 is the sucess code | ||
512 | je .Lprocsccb # let's process the sccb | ||
513 | chi %r1,0x1f0 | ||
514 | bne .Lfchunk-.LPG1(%r13) # unhandled error code | ||
515 | c %r2, .Lrcp-.LPG1(%r13) # Did we try Read SCP forced | ||
516 | bne .Lfchunk-.LPG1(%r13) # if no, give up | ||
517 | l %r2, .Lrcp2-.LPG1(%r13) # try with Read SCP | ||
518 | b .Lservicecall-.LPG1(%r13) | ||
519 | .Lprocsccb: | ||
520 | lh %r1,.Lscpincr1-PARMAREA(%r4) # use this one if != 0 | ||
521 | chi %r1,0x00 | ||
522 | jne .Lscnd | ||
523 | l %r1,.Lscpincr2-PARMAREA(%r4) # otherwise use this one | ||
524 | .Lscnd: | ||
525 | xr %r3,%r3 # same logic | ||
526 | ic %r3,.Lscpa1-PARMAREA(%r4) | ||
527 | chi %r3,0x00 | ||
528 | jne .Lcompmem | ||
529 | l %r3,.Lscpa2-PARMAREA(%r13) | ||
530 | .Lcompmem: | ||
531 | mr %r2,%r1 # mem in MB on 128-bit | ||
532 | l %r1,.Lonemb-.LPG1(%r13) | ||
533 | mr %r2,%r1 # mem size in bytes in %r3 | ||
534 | b .Lfchunk-.LPG1(%r13) | ||
535 | |||
536 | .Lpmask: | ||
537 | .byte 0 | ||
538 | .align 8 | ||
539 | .Lpcext:.long 0x00080000,0x80000000 | ||
540 | .Lcr: | ||
541 | .long 0x00 # place holder for cr0 | ||
542 | .Lwaitsclp: | ||
543 | .long 0x020A0000 | ||
544 | .long .Lsclph | ||
545 | .Lrcp: | ||
546 | .int 0x00120001 # Read SCP forced code | ||
547 | .Lrcp2: | ||
548 | .int 0x00020001 # Read SCP code | ||
549 | .Lonemb: | ||
550 | .int 0x100000 | ||
551 | .Lfchunk: | ||
552 | |||
553 | # | ||
554 | # find memory chunks. | ||
555 | # | ||
556 | lr %r9,%r3 # end of mem | ||
557 | mvc __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13) | ||
558 | la %r1,1 # test in increments of 128KB | ||
559 | sll %r1,17 | ||
560 | l %r3,.Lmchunk-.LPG1(%r13) # get pointer to memory_chunk array | ||
561 | slr %r4,%r4 # set start of chunk to zero | ||
562 | slr %r5,%r5 # set end of chunk to zero | ||
563 | slr %r6,%r6 # set access code to zero | ||
564 | la %r10, MEMORY_CHUNKS # number of chunks | ||
565 | .Lloop: | ||
566 | tprot 0(%r5),0 # test protection of first byte | ||
567 | ipm %r7 | ||
568 | srl %r7,28 | ||
569 | clr %r6,%r7 # compare cc with last access code | ||
570 | be .Lsame-.LPG1(%r13) | ||
571 | b .Lchkmem-.LPG1(%r13) | ||
572 | .Lsame: | ||
573 | ar %r5,%r1 # add 128KB to end of chunk | ||
574 | bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop | ||
575 | .Lchkmem: # > 2GB or tprot got a program check | ||
576 | clr %r4,%r5 # chunk size > 0? | ||
577 | be .Lchkloop-.LPG1(%r13) | ||
578 | st %r4,0(%r3) # store start address of chunk | ||
579 | lr %r0,%r5 | ||
580 | slr %r0,%r4 | ||
581 | st %r0,4(%r3) # store size of chunk | ||
582 | st %r6,8(%r3) # store type of chunk | ||
583 | la %r3,12(%r3) | ||
584 | l %r4,.Lmemsize-.LPG1(%r13) # address of variable memory_size | ||
585 | st %r5,0(%r4) # store last end to memory size | ||
586 | ahi %r10,-1 # update chunk number | ||
587 | .Lchkloop: | ||
588 | lr %r6,%r7 # set access code to last cc | ||
589 | # we got an exception or we're starting a new | ||
590 | # chunk , we must check if we should | ||
591 | # still try to find valid memory (if we detected | ||
592 | # the amount of available storage), and if we | ||
593 | # have chunks left | ||
594 | xr %r0,%r0 | ||
595 | clr %r0,%r9 # did we detect memory? | ||
596 | je .Ldonemem # if not, leave | ||
597 | chi %r10,0 # do we have chunks left? | ||
598 | je .Ldonemem | ||
599 | alr %r5,%r1 # add 128KB to end of chunk | ||
600 | lr %r4,%r5 # potential new chunk | ||
601 | clr %r5,%r9 # should we go on? | ||
602 | jl .Lloop | ||
603 | .Ldonemem: | ||
604 | l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags | ||
605 | # | ||
606 | # find out if we are running under VM | ||
607 | # | ||
608 | stidp __LC_CPUID # store cpuid | ||
609 | tm __LC_CPUID,0xff # running under VM ? | ||
610 | bno .Lnovm-.LPG1(%r13) | ||
611 | oi 3(%r12),1 # set VM flag | ||
612 | .Lnovm: | ||
613 | lh %r0,__LC_CPUID+4 # get cpu version | ||
614 | chi %r0,0x7490 # running on a P/390 ? | ||
615 | bne .Lnop390-.LPG1(%r13) | ||
616 | oi 3(%r12),4 # set P/390 flag | ||
617 | .Lnop390: | ||
618 | |||
619 | # | ||
620 | # find out if we have an IEEE fpu | ||
621 | # | ||
622 | mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13) | ||
623 | efpc %r0,0 # test IEEE extract fpc instruction | ||
624 | oi 3(%r12),2 # set IEEE fpu flag | ||
625 | .Lchkfpu: | ||
626 | |||
627 | # | ||
628 | # find out if we have the CSP instruction | ||
629 | # | ||
630 | mvc __LC_PGM_NEW_PSW(8),.Lpccsp-.LPG1(%r13) | ||
631 | la %r0,0 | ||
632 | lr %r1,%r0 | ||
633 | la %r2,4 | ||
634 | csp %r0,%r2 # Test CSP instruction | ||
635 | oi 3(%r12),8 # set CSP flag | ||
636 | .Lchkcsp: | ||
637 | |||
638 | # | ||
639 | # find out if we have the MVPG instruction | ||
640 | # | ||
641 | mvc __LC_PGM_NEW_PSW(8),.Lpcmvpg-.LPG1(%r13) | ||
642 | sr %r0,%r0 | ||
643 | la %r1,0 | ||
644 | la %r2,0 | ||
645 | mvpg %r1,%r2 # Test CSP instruction | ||
646 | oi 3(%r12),16 # set MVPG flag | ||
647 | .Lchkmvpg: | ||
648 | |||
649 | # | ||
650 | # find out if we have the IDTE instruction | ||
651 | # | ||
652 | mvc __LC_PGM_NEW_PSW(8),.Lpcidte-.LPG1(%r13) | ||
653 | .long 0xb2b10000 # store facility list | ||
654 | tm 0xc8,0x08 # check bit for clearing-by-ASCE | ||
655 | bno .Lchkidte-.LPG1(%r13) | ||
656 | lhi %r1,2094 | ||
657 | lhi %r2,0 | ||
658 | .long 0xb98e2001 | ||
659 | oi 3(%r12),0x80 # set IDTE flag | ||
660 | .Lchkidte: | ||
661 | |||
662 | lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space, | ||
663 | # virtual and never return ... | ||
664 | .align 8 | ||
665 | .Lentry:.long 0x00080000,0x80000000 + _stext | ||
666 | .Lctl: .long 0x04b50002 # cr0: various things | ||
667 | .long 0 # cr1: primary space segment table | ||
668 | .long .Lduct # cr2: dispatchable unit control table | ||
669 | .long 0 # cr3: instruction authorization | ||
670 | .long 0 # cr4: instruction authorization | ||
671 | .long 0xffffffff # cr5: primary-aste origin | ||
672 | .long 0 # cr6: I/O interrupts | ||
673 | .long 0 # cr7: secondary space segment table | ||
674 | .long 0 # cr8: access registers translation | ||
675 | .long 0 # cr9: tracing off | ||
676 | .long 0 # cr10: tracing off | ||
677 | .long 0 # cr11: tracing off | ||
678 | .long 0 # cr12: tracing off | ||
679 | .long 0 # cr13: home space segment table | ||
680 | .long 0xc0000000 # cr14: machine check handling off | ||
681 | .long 0 # cr15: linkage stack operations | ||
682 | .Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem | ||
683 | .Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu | ||
684 | .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp | ||
685 | .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg | ||
686 | .Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte | ||
687 | .Lmemsize:.long memory_size | ||
688 | .Lmchunk:.long memory_chunk | ||
689 | .Lmflags:.long machine_flags | ||
690 | .Lbss_bgn: .long __bss_start | ||
691 | .Lbss_end: .long _end | ||
692 | |||
693 | .org PARMAREA-64 | ||
694 | .Lduct: .long 0,0,0,0,0,0,0,0 | ||
695 | .long 0,0,0,0,0,0,0,0 | ||
696 | |||
697 | # | ||
698 | # params at 10400 (setup.h) | ||
699 | # | ||
700 | .org PARMAREA | ||
701 | .global _pstart | ||
702 | _pstart: | ||
703 | .long 0,0 # IPL_DEVICE | ||
704 | .long 0,RAMDISK_ORIGIN # INITRD_START | ||
705 | .long 0,RAMDISK_SIZE # INITRD_SIZE | ||
706 | |||
707 | .org COMMAND_LINE | ||
708 | .byte "root=/dev/ram0 ro" | ||
709 | .byte 0 | ||
710 | .org 0x11000 | ||
711 | .Lsccb: | ||
712 | .hword 0x1000 # length, one page | ||
713 | .byte 0x00,0x00,0x00 | ||
714 | .byte 0x80 # variable response bit set | ||
715 | .Lsccbr: | ||
716 | .hword 0x00 # response code | ||
717 | .Lscpincr1: | ||
718 | .hword 0x00 | ||
719 | .Lscpa1: | ||
720 | .byte 0x00 | ||
721 | .fill 89,1,0 | ||
722 | .Lscpa2: | ||
723 | .int 0x00 | ||
724 | .Lscpincr2: | ||
725 | .quad 0x00 | ||
726 | .fill 3984,1,0 | ||
727 | .org 0x12000 | ||
728 | .global _pend | ||
729 | _pend: | ||
730 | |||
731 | #ifdef CONFIG_SHARED_KERNEL | ||
732 | .org 0x100000 | ||
733 | #endif | ||
734 | |||
735 | # | ||
736 | # startup-code, running in virtual mode | ||
737 | # | ||
738 | .globl _stext | ||
739 | _stext: basr %r13,0 # get base | ||
740 | .LPG2: | ||
741 | # | ||
742 | # Setup stack | ||
743 | # | ||
744 | l %r15,.Linittu-.LPG2(%r13) | ||
745 | mvc __LC_CURRENT(4),__TI_task(%r15) | ||
746 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE | ||
747 | st %r15,__LC_KERNEL_STACK # set end of kernel stack | ||
748 | ahi %r15,-96 | ||
749 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain | ||
750 | |||
751 | # check control registers | ||
752 | stctl %c0,%c15,0(%r15) | ||
753 | oi 2(%r15),0x20 # enable sigp external interrupts | ||
754 | oi 0(%r15),0x10 # switch on low address protection | ||
755 | lctl %c0,%c15,0(%r15) | ||
756 | |||
757 | # | ||
758 | lam 0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess | ||
759 | l %r14,.Lstart-.LPG2(%r13) | ||
760 | basr %r14,%r14 # call start_kernel | ||
761 | # | ||
762 | # We returned from start_kernel ?!? PANIK | ||
763 | # | ||
764 | basr %r13,0 | ||
765 | lpsw .Ldw-.(%r13) # load disabled wait psw | ||
766 | # | ||
767 | .align 8 | ||
768 | .Ldw: .long 0x000a0000,0x00000000 | ||
769 | .Linittu: .long init_thread_union | ||
770 | .Lstart: .long start_kernel | ||
771 | .Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 | ||
772 | |||
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S new file mode 100644 index 000000000000..8366793bc371 --- /dev/null +++ b/arch/s390/kernel/head64.S | |||
@@ -0,0 +1,769 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/head.S | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Hartmut Penner (hp@de.ibm.com), | ||
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
8 | * Rob van der Heij (rvdhei@iae.nl) | ||
9 | * | ||
10 | * There are 5 different IPL methods | ||
11 | * 1) load the image directly into ram at address 0 and do an PSW restart | ||
12 | * 2) linload will load the image from address 0x10000 to memory 0x10000 | ||
13 | * and start the code thru LPSW 0x0008000080010000 (VM only, deprecated) | ||
14 | * 3) generate the tape ipl header, store the generated image on a tape | ||
15 | * and ipl from it | ||
16 | * In case of SL tape you need to IPL 5 times to get past VOL1 etc | ||
17 | * 4) generate the vm reader ipl header, move the generated image to the | ||
18 | * VM reader (use option NOH!) and do a ipl from reader (VM only) | ||
19 | * 5) direct call of start by the SALIPL loader | ||
20 | * We use the cpuid to distinguish between VM and native ipl | ||
21 | * params for kernel are pushed to 0x10400 (see setup.h) | ||
22 | |||
23 | Changes: | ||
24 | Okt 25 2000 <rvdheij@iae.nl> | ||
25 | added code to skip HDR and EOF to allow SL tape IPL (5 retries) | ||
26 | changed first CCW from rewind to backspace block | ||
27 | |||
28 | */ | ||
29 | |||
30 | #include <linux/config.h> | ||
31 | #include <asm/setup.h> | ||
32 | #include <asm/lowcore.h> | ||
33 | #include <asm/offsets.h> | ||
34 | #include <asm/thread_info.h> | ||
35 | #include <asm/page.h> | ||
36 | |||
37 | #ifndef CONFIG_IPL | ||
38 | .org 0 | ||
39 | .long 0x00080000,0x80000000+startup # Just a restart PSW | ||
40 | #else | ||
41 | #ifdef CONFIG_IPL_TAPE | ||
42 | #define IPL_BS 1024 | ||
43 | .org 0 | ||
44 | .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded | ||
45 | .long 0x27000000,0x60000001 # by ipl to addresses 0-23. | ||
46 | .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs). | ||
47 | .long 0x00000000,0x00000000 # external old psw | ||
48 | .long 0x00000000,0x00000000 # svc old psw | ||
49 | .long 0x00000000,0x00000000 # program check old psw | ||
50 | .long 0x00000000,0x00000000 # machine check old psw | ||
51 | .long 0x00000000,0x00000000 # io old psw | ||
52 | .long 0x00000000,0x00000000 | ||
53 | .long 0x00000000,0x00000000 | ||
54 | .long 0x00000000,0x00000000 | ||
55 | .long 0x000a0000,0x00000058 # external new psw | ||
56 | .long 0x000a0000,0x00000060 # svc new psw | ||
57 | .long 0x000a0000,0x00000068 # program check new psw | ||
58 | .long 0x000a0000,0x00000070 # machine check new psw | ||
59 | .long 0x00080000,0x80000000+.Lioint # io new psw | ||
60 | |||
61 | .org 0x100 | ||
62 | # | ||
63 | # subroutine for loading from tape | ||
64 | # Paramters: | ||
65 | # R1 = device number | ||
66 | # R2 = load address | ||
67 | .Lloader: | ||
68 | st %r14,.Lldret | ||
69 | la %r3,.Lorbread # r3 = address of orb | ||
70 | la %r5,.Lirb # r5 = address of irb | ||
71 | st %r2,.Lccwread+4 # initialize CCW data addresses | ||
72 | lctl %c6,%c6,.Lcr6 | ||
73 | slr %r2,%r2 | ||
74 | .Lldlp: | ||
75 | la %r6,3 # 3 retries | ||
76 | .Lssch: | ||
77 | ssch 0(%r3) # load chunk of IPL_BS bytes | ||
78 | bnz .Llderr | ||
79 | .Lw4end: | ||
80 | bas %r14,.Lwait4io | ||
81 | tm 8(%r5),0x82 # do we have a problem ? | ||
82 | bnz .Lrecov | ||
83 | slr %r7,%r7 | ||
84 | icm %r7,3,10(%r5) # get residual count | ||
85 | lcr %r7,%r7 | ||
86 | la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read | ||
87 | ar %r2,%r7 # add to total size | ||
88 | tm 8(%r5),0x01 # found a tape mark ? | ||
89 | bnz .Ldone | ||
90 | l %r0,.Lccwread+4 # update CCW data addresses | ||
91 | ar %r0,%r7 | ||
92 | st %r0,.Lccwread+4 | ||
93 | b .Lldlp | ||
94 | .Ldone: | ||
95 | l %r14,.Lldret | ||
96 | br %r14 # r2 contains the total size | ||
97 | .Lrecov: | ||
98 | bas %r14,.Lsense # do the sensing | ||
99 | bct %r6,.Lssch # dec. retry count & branch | ||
100 | b .Llderr | ||
101 | # | ||
102 | # Sense subroutine | ||
103 | # | ||
104 | .Lsense: | ||
105 | st %r14,.Lsnsret | ||
106 | la %r7,.Lorbsense | ||
107 | ssch 0(%r7) # start sense command | ||
108 | bnz .Llderr | ||
109 | bas %r14,.Lwait4io | ||
110 | l %r14,.Lsnsret | ||
111 | tm 8(%r5),0x82 # do we have a problem ? | ||
112 | bnz .Llderr | ||
113 | br %r14 | ||
114 | # | ||
115 | # Wait for interrupt subroutine | ||
116 | # | ||
117 | .Lwait4io: | ||
118 | lpsw .Lwaitpsw | ||
119 | .Lioint: | ||
120 | c %r1,0xb8 # compare subchannel number | ||
121 | bne .Lwait4io | ||
122 | tsch 0(%r5) | ||
123 | slr %r0,%r0 | ||
124 | tm 8(%r5),0x82 # do we have a problem ? | ||
125 | bnz .Lwtexit | ||
126 | tm 8(%r5),0x04 # got device end ? | ||
127 | bz .Lwait4io | ||
128 | .Lwtexit: | ||
129 | br %r14 | ||
130 | .Llderr: | ||
131 | lpsw .Lcrash | ||
132 | |||
133 | .align 8 | ||
134 | .Lorbread: | ||
135 | .long 0x00000000,0x0080ff00,.Lccwread | ||
136 | .align 8 | ||
137 | .Lorbsense: | ||
138 | .long 0x00000000,0x0080ff00,.Lccwsense | ||
139 | .align 8 | ||
140 | .Lccwread: | ||
141 | .long 0x02200000+IPL_BS,0x00000000 | ||
142 | .Lccwsense: | ||
143 | .long 0x04200001,0x00000000 | ||
144 | .Lwaitpsw: | ||
145 | .long 0x020a0000,0x80000000+.Lioint | ||
146 | |||
147 | .Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 | ||
148 | .Lcr6: .long 0xff000000 | ||
149 | .align 8 | ||
150 | .Lcrash:.long 0x000a0000,0x00000000 | ||
151 | .Lldret:.long 0 | ||
152 | .Lsnsret: .long 0 | ||
153 | #endif /* CONFIG_IPL_TAPE */ | ||
154 | |||
155 | #ifdef CONFIG_IPL_VM | ||
156 | #define IPL_BS 0x730 | ||
157 | .org 0 | ||
158 | .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded | ||
159 | .long 0x02000018,0x60000050 # by ipl to addresses 0-23. | ||
160 | .long 0x02000068,0x60000050 # (a PSW and two CCWs). | ||
161 | .fill 80-24,1,0x40 # bytes 24-79 are discarded !! | ||
162 | .long 0x020000f0,0x60000050 # The next 160 byte are loaded | ||
163 | .long 0x02000140,0x60000050 # to addresses 0x18-0xb7 | ||
164 | .long 0x02000190,0x60000050 # They form the continuation | ||
165 | .long 0x020001e0,0x60000050 # of the CCW program started | ||
166 | .long 0x02000230,0x60000050 # by ipl and load the range | ||
167 | .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image | ||
168 | .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730 | ||
169 | .long 0x02000320,0x60000050 # in memory. At the end of | ||
170 | .long 0x02000370,0x60000050 # the channel program the PSW | ||
171 | .long 0x020003c0,0x60000050 # at location 0 is loaded. | ||
172 | .long 0x02000410,0x60000050 # Initial processing starts | ||
173 | .long 0x02000460,0x60000050 # at 0xf0 = iplstart. | ||
174 | .long 0x020004b0,0x60000050 | ||
175 | .long 0x02000500,0x60000050 | ||
176 | .long 0x02000550,0x60000050 | ||
177 | .long 0x020005a0,0x60000050 | ||
178 | .long 0x020005f0,0x60000050 | ||
179 | .long 0x02000640,0x60000050 | ||
180 | .long 0x02000690,0x60000050 | ||
181 | .long 0x020006e0,0x20000050 | ||
182 | |||
183 | .org 0xf0 | ||
184 | # | ||
185 | # subroutine for loading cards from the reader | ||
186 | # | ||
187 | .Lloader: | ||
188 | la %r3,.Lorb # r2 = address of orb into r2 | ||
189 | la %r5,.Lirb # r4 = address of irb | ||
190 | la %r6,.Lccws | ||
191 | la %r7,20 | ||
192 | .Linit: | ||
193 | st %r2,4(%r6) # initialize CCW data addresses | ||
194 | la %r2,0x50(%r2) | ||
195 | la %r6,8(%r6) | ||
196 | bct 7,.Linit | ||
197 | |||
198 | lctl %c6,%c6,.Lcr6 # set IO subclass mask | ||
199 | slr %r2,%r2 | ||
200 | .Lldlp: | ||
201 | ssch 0(%r3) # load chunk of 1600 bytes | ||
202 | bnz .Llderr | ||
203 | .Lwait4irq: | ||
204 | mvc 0x78(8),.Lnewpsw # set up IO interrupt psw | ||
205 | lpsw .Lwaitpsw | ||
206 | .Lioint: | ||
207 | c %r1,0xb8 # compare subchannel number | ||
208 | bne .Lwait4irq | ||
209 | tsch 0(%r5) | ||
210 | |||
211 | slr %r0,%r0 | ||
212 | ic %r0,8(%r5) # get device status | ||
213 | chi %r0,8 # channel end ? | ||
214 | be .Lcont | ||
215 | chi %r0,12 # channel end + device end ? | ||
216 | be .Lcont | ||
217 | |||
218 | l %r0,4(%r5) | ||
219 | s %r0,8(%r3) # r0/8 = number of ccws executed | ||
220 | mhi %r0,10 # *10 = number of bytes in ccws | ||
221 | lh %r3,10(%r5) # get residual count | ||
222 | sr %r0,%r3 # #ccws*80-residual=#bytes read | ||
223 | ar %r2,%r0 | ||
224 | |||
225 | br %r14 # r2 contains the total size | ||
226 | |||
227 | .Lcont: | ||
228 | ahi %r2,0x640 # add 0x640 to total size | ||
229 | la %r6,.Lccws | ||
230 | la %r7,20 | ||
231 | .Lincr: | ||
232 | l %r0,4(%r6) # update CCW data addresses | ||
233 | ahi %r0,0x640 | ||
234 | st %r0,4(%r6) | ||
235 | ahi %r6,8 | ||
236 | bct 7,.Lincr | ||
237 | |||
238 | b .Lldlp | ||
239 | .Llderr: | ||
240 | lpsw .Lcrash | ||
241 | |||
242 | .align 8 | ||
243 | .Lorb: .long 0x00000000,0x0080ff00,.Lccws | ||
244 | .Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 | ||
245 | .Lcr6: .long 0xff000000 | ||
246 | .Lloadp:.long 0,0 | ||
247 | .align 8 | ||
248 | .Lcrash:.long 0x000a0000,0x00000000 | ||
249 | .Lnewpsw: | ||
250 | .long 0x00080000,0x80000000+.Lioint | ||
251 | .Lwaitpsw: | ||
252 | .long 0x020a0000,0x80000000+.Lioint | ||
253 | |||
254 | .align 8 | ||
255 | .Lccws: .rept 19 | ||
256 | .long 0x02600050,0x00000000 | ||
257 | .endr | ||
258 | .long 0x02200050,0x00000000 | ||
259 | #endif /* CONFIG_IPL_VM */ | ||
260 | |||
261 | iplstart: | ||
262 | lh %r1,0xb8 # test if subchannel number | ||
263 | bct %r1,.Lnoload # is valid | ||
264 | l %r1,0xb8 # load ipl subchannel number | ||
265 | la %r2,IPL_BS # load start address | ||
266 | bas %r14,.Lloader # load rest of ipl image | ||
267 | larl %r12,_pstart # pointer to parameter area | ||
268 | st %r1,IPL_DEVICE+4-PARMAREA(%r12) # store ipl device number | ||
269 | |||
270 | # | ||
271 | # load parameter file from ipl device | ||
272 | # | ||
273 | .Lagain1: | ||
274 | l %r2,INITRD_START+4-PARMAREA(%r12)# use ramdisk location as temp | ||
275 | bas %r14,.Lloader # load parameter file | ||
276 | ltr %r2,%r2 # got anything ? | ||
277 | bz .Lnopf | ||
278 | chi %r2,895 | ||
279 | bnh .Lnotrunc | ||
280 | la %r2,895 | ||
281 | .Lnotrunc: | ||
282 | l %r4,INITRD_START+4-PARMAREA(%r12) | ||
283 | clc 0(3,%r4),.L_hdr # if it is HDRx | ||
284 | bz .Lagain1 # skip dataset header | ||
285 | clc 0(3,%r4),.L_eof # if it is EOFx | ||
286 | bz .Lagain1 # skip dateset trailer | ||
287 | la %r5,0(%r4,%r2) | ||
288 | lr %r3,%r2 | ||
289 | .Lidebc: | ||
290 | tm 0(%r5),0x80 # high order bit set ? | ||
291 | bo .Ldocv # yes -> convert from EBCDIC | ||
292 | ahi %r5,-1 | ||
293 | bct %r3,.Lidebc | ||
294 | b .Lnocv | ||
295 | .Ldocv: | ||
296 | l %r3,.Lcvtab | ||
297 | tr 0(256,%r4),0(%r3) # convert parameters to ascii | ||
298 | tr 256(256,%r4),0(%r3) | ||
299 | tr 512(256,%r4),0(%r3) | ||
300 | tr 768(122,%r4),0(%r3) | ||
301 | .Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line | ||
302 | mvc 0(256,%r3),0(%r4) | ||
303 | mvc 256(256,%r3),256(%r4) | ||
304 | mvc 512(256,%r3),512(%r4) | ||
305 | mvc 768(122,%r3),768(%r4) | ||
306 | slr %r0,%r0 | ||
307 | b .Lcntlp | ||
308 | .Ldelspc: | ||
309 | ic %r0,0(%r2,%r3) | ||
310 | chi %r0,0x20 # is it a space ? | ||
311 | be .Lcntlp | ||
312 | ahi %r2,1 | ||
313 | b .Leolp | ||
314 | .Lcntlp: | ||
315 | brct %r2,.Ldelspc | ||
316 | .Leolp: | ||
317 | slr %r0,%r0 | ||
318 | stc %r0,0(%r2,%r3) # terminate buffer | ||
319 | .Lnopf: | ||
320 | |||
321 | # | ||
322 | # load ramdisk from ipl device | ||
323 | # | ||
324 | .Lagain2: | ||
325 | l %r2,INITRD_START+4-PARMAREA(%r12)# load adr. of ramdisk | ||
326 | bas %r14,.Lloader # load ramdisk | ||
327 | st %r2,INITRD_SIZE+4-PARMAREA(%r12) # store size of ramdisk | ||
328 | ltr %r2,%r2 | ||
329 | bnz .Lrdcont | ||
330 | st %r2,INITRD_START+4-PARMAREA(%r12)# no ramdisk found, null it | ||
331 | .Lrdcont: | ||
332 | l %r2,INITRD_START+4-PARMAREA(%r12) | ||
333 | clc 0(3,%r2),.L_hdr # skip HDRx and EOFx | ||
334 | bz .Lagain2 | ||
335 | clc 0(3,%r2),.L_eof | ||
336 | bz .Lagain2 | ||
337 | |||
338 | #ifdef CONFIG_IPL_VM | ||
339 | # | ||
340 | # reset files in VM reader | ||
341 | # | ||
342 | stidp __LC_CPUID # store cpuid | ||
343 | tm __LC_CPUID,0xff # running VM ? | ||
344 | bno .Lnoreset | ||
345 | la %r2,.Lreset | ||
346 | lhi %r3,26 | ||
347 | .long 0x83230008 | ||
348 | .Lnoreset: | ||
349 | #endif | ||
350 | |||
351 | # | ||
352 | # everything loaded, go for it | ||
353 | # | ||
354 | .Lnoload: | ||
355 | l %r1,.Lstartup | ||
356 | br %r1 | ||
357 | |||
358 | .Lstartup: .long startup | ||
359 | .Lcvtab:.long _ebcasc # ebcdic to ascii table | ||
360 | .Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 | ||
361 | .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 | ||
362 | .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" | ||
363 | .L_eof: .long 0xc5d6c600 /* C'EOF' */ | ||
364 | .L_hdr: .long 0xc8c4d900 /* C'HDR' */ | ||
365 | #endif /* CONFIG_IPL */ | ||
366 | |||
367 | # | ||
368 | # SALIPL loader support. Based on a patch by Rob van der Heij. | ||
369 | # This entry point is called directly from the SALIPL loader and | ||
370 | # doesn't need a builtin ipl record. | ||
371 | # | ||
372 | .org 0x800 | ||
373 | .globl start | ||
374 | start: | ||
375 | stm %r0,%r15,0x07b0 # store registers | ||
376 | basr %r12,%r0 | ||
377 | .base: | ||
378 | l %r11,.parm | ||
379 | l %r8,.cmd # pointer to command buffer | ||
380 | |||
381 | ltr %r9,%r9 # do we have SALIPL parameters? | ||
382 | bp .sk8x8 | ||
383 | |||
384 | mvc 0(64,%r8),0x00b0 # copy saved registers | ||
385 | xc 64(240-64,%r8),0(%r8) # remainder of buffer | ||
386 | tr 0(64,%r8),.lowcase | ||
387 | b .gotr | ||
388 | .sk8x8: | ||
389 | mvc 0(240,%r8),0(%r9) # copy iplparms into buffer | ||
390 | .gotr: | ||
391 | l %r10,.tbl # EBCDIC to ASCII table | ||
392 | tr 0(240,%r8),0(%r10) | ||
393 | stidp __LC_CPUID # Are we running on VM maybe | ||
394 | cli __LC_CPUID,0xff | ||
395 | bnz .test | ||
396 | .long 0x83300060 # diag 3,0,x'0060' - storage size | ||
397 | b .done | ||
398 | .test: | ||
399 | mvc 0x68(8),.pgmnw # set up pgm check handler | ||
400 | l %r2,.fourmeg | ||
401 | lr %r3,%r2 | ||
402 | bctr %r3,%r0 # 4M-1 | ||
403 | .loop: iske %r0,%r3 | ||
404 | ar %r3,%r2 | ||
405 | .pgmx: | ||
406 | sr %r3,%r2 | ||
407 | la %r3,1(%r3) | ||
408 | .done: | ||
409 | l %r1,.memsize | ||
410 | st %r3,4(%r1) | ||
411 | slr %r0,%r0 | ||
412 | st %r0,INITRD_SIZE+4-PARMAREA(%r11) | ||
413 | st %r0,INITRD_START+4-PARMAREA(%r11) | ||
414 | j startup # continue with startup | ||
415 | .tbl: .long _ebcasc # translate table | ||
416 | .cmd: .long COMMAND_LINE # address of command line buffer | ||
417 | .parm: .long PARMAREA | ||
418 | .fourmeg: .long 0x00400000 # 4M | ||
419 | .pgmnw: .long 0x00080000,.pgmx | ||
420 | .memsize: .long memory_size | ||
421 | .lowcase: | ||
422 | .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 | ||
423 | .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f | ||
424 | .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17 | ||
425 | .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f | ||
426 | .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27 | ||
427 | .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f | ||
428 | .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37 | ||
429 | .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f | ||
430 | .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47 | ||
431 | .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f | ||
432 | .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57 | ||
433 | .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f | ||
434 | .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67 | ||
435 | .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f | ||
436 | .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77 | ||
437 | .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f | ||
438 | |||
439 | .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87 | ||
440 | .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f | ||
441 | .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97 | ||
442 | .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f | ||
443 | .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 | ||
444 | .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf | ||
445 | .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7 | ||
446 | .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf | ||
447 | .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg | ||
448 | .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi | ||
449 | .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop | ||
450 | .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr | ||
451 | .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx | ||
452 | .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz | ||
453 | .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 | ||
454 | .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff | ||
455 | |||
456 | # | ||
457 | # startup-code at 0x10000, running in real mode | ||
458 | # this is called either by the ipl loader or directly by PSW restart | ||
459 | # or linload or SALIPL | ||
460 | # | ||
461 | .org 0x10000 | ||
462 | startup:basr %r13,0 # get base | ||
463 | .LPG1: sll %r13,1 # remove high order bit | ||
464 | srl %r13,1 | ||
465 | lhi %r1,1 # mode 1 = esame | ||
466 | slr %r0,%r0 # set cpuid to zero | ||
467 | sigp %r1,%r0,0x12 # switch to esame mode | ||
468 | sam64 # switch to 64 bit mode | ||
469 | lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers | ||
470 | larl %r12,_pstart # pointer to parameter area | ||
471 | # move IPL device to lowcore | ||
472 | mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12) | ||
473 | |||
474 | # | ||
475 | # clear bss memory | ||
476 | # | ||
477 | larl %r2,__bss_start # start of bss segment | ||
478 | larl %r3,_end # end of bss segment | ||
479 | sgr %r3,%r2 # length of bss | ||
480 | sgr %r4,%r4 # | ||
481 | sgr %r5,%r5 # set src,length and pad to zero | ||
482 | mvcle %r2,%r4,0 # clear mem | ||
483 | jo .-4 # branch back, if not finish | ||
484 | |||
485 | l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word | ||
486 | .Lservicecall: | ||
487 | stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts | ||
488 | |||
489 | stctg %r0,%r0,.Lcr-.LPG1(%r13) # get cr0 | ||
490 | la %r1,0x200 # set bit 22 | ||
491 | og %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1 | ||
492 | stg %r1,.Lcr-.LPG1(%r13) | ||
493 | lctlg %r0,%r0,.Lcr-.LPG1(%r13) # load modified cr0 | ||
494 | |||
495 | mvc __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw | ||
496 | larl %r1,.Lsclph | ||
497 | stg %r1,__LC_EXT_NEW_PSW+8 # set handler | ||
498 | |||
499 | larl %r4,_pstart # %r4 is our index for sccb stuff | ||
500 | la %r1,.Lsccb-PARMAREA(%r4) # our sccb | ||
501 | .insn rre,0xb2200000,%r2,%r1 # service call | ||
502 | ipm %r1 | ||
503 | srl %r1,28 # get cc code | ||
504 | xr %r3,%r3 | ||
505 | chi %r1,3 | ||
506 | be .Lfchunk-.LPG1(%r13) # leave | ||
507 | chi %r1,2 | ||
508 | be .Lservicecall-.LPG1(%r13) | ||
509 | lpsw .Lwaitsclp-.LPG1(%r13) | ||
510 | .Lsclph: | ||
511 | lh %r1,.Lsccbr-PARMAREA(%r4) | ||
512 | chi %r1,0x10 # 0x0010 is the sucess code | ||
513 | je .Lprocsccb # let's process the sccb | ||
514 | chi %r1,0x1f0 | ||
515 | bne .Lfchunk-.LPG1(%r13) # unhandled error code | ||
516 | c %r2,.Lrcp-.LPG1(%r13) # Did we try Read SCP forced | ||
517 | bne .Lfchunk-.LPG1(%r13) # if no, give up | ||
518 | l %r2,.Lrcp2-.LPG1(%r13) # try with Read SCP | ||
519 | b .Lservicecall-.LPG1(%r13) | ||
520 | .Lprocsccb: | ||
521 | lh %r1,.Lscpincr1-PARMAREA(%r4) # use this one if != 0 | ||
522 | chi %r1,0x00 | ||
523 | jne .Lscnd | ||
524 | lg %r1,.Lscpincr2-PARMAREA(%r4) # otherwise use this one | ||
525 | .Lscnd: | ||
526 | xr %r3,%r3 # same logic | ||
527 | ic %r3,.Lscpa1-PARMAREA(%r4) | ||
528 | chi %r3,0x00 | ||
529 | jne .Lcompmem | ||
530 | l %r3,.Lscpa2-PARMAREA(%r13) | ||
531 | .Lcompmem: | ||
532 | mlgr %r2,%r1 # mem in MB on 128-bit | ||
533 | l %r1,.Lonemb-.LPG1(%r13) | ||
534 | mlgr %r2,%r1 # mem size in bytes in %r3 | ||
535 | b .Lfchunk-.LPG1(%r13) | ||
536 | |||
537 | .Lpmask: | ||
538 | .byte 0 | ||
539 | .align 8 | ||
540 | .Lcr: | ||
541 | .quad 0x00 # place holder for cr0 | ||
542 | .Lwaitsclp: | ||
543 | .long 0x020A0000 | ||
544 | .quad .Lsclph | ||
545 | .Lrcp: | ||
546 | .int 0x00120001 # Read SCP forced code | ||
547 | .Lrcp2: | ||
548 | .int 0x00020001 # Read SCP code | ||
549 | .Lonemb: | ||
550 | .int 0x100000 | ||
551 | |||
552 | .Lfchunk: | ||
553 | # set program check new psw mask | ||
554 | mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) | ||
555 | |||
556 | # | ||
557 | # find memory chunks. | ||
558 | # | ||
559 | lgr %r9,%r3 # end of mem | ||
560 | larl %r1,.Lchkmem # set program check address | ||
561 | stg %r1,__LC_PGM_NEW_PSW+8 | ||
562 | la %r1,1 # test in increments of 128KB | ||
563 | sllg %r1,%r1,17 | ||
564 | larl %r3,memory_chunk | ||
565 | slgr %r4,%r4 # set start of chunk to zero | ||
566 | slgr %r5,%r5 # set end of chunk to zero | ||
567 | slr %r6,%r6 # set access code to zero | ||
568 | la %r10,MEMORY_CHUNKS # number of chunks | ||
569 | .Lloop: | ||
570 | tprot 0(%r5),0 # test protection of first byte | ||
571 | ipm %r7 | ||
572 | srl %r7,28 | ||
573 | clr %r6,%r7 # compare cc with last access code | ||
574 | je .Lsame | ||
575 | j .Lchkmem | ||
576 | .Lsame: | ||
577 | algr %r5,%r1 # add 128KB to end of chunk | ||
578 | # no need to check here, | ||
579 | brc 12,.Lloop # this is the same chunk | ||
580 | .Lchkmem: # > 16EB or tprot got a program check | ||
581 | clgr %r4,%r5 # chunk size > 0? | ||
582 | je .Lchkloop | ||
583 | stg %r4,0(%r3) # store start address of chunk | ||
584 | lgr %r0,%r5 | ||
585 | slgr %r0,%r4 | ||
586 | stg %r0,8(%r3) # store size of chunk | ||
587 | st %r6,20(%r3) # store type of chunk | ||
588 | la %r3,24(%r3) | ||
589 | larl %r8,memory_size | ||
590 | stg %r5,0(%r8) # store memory size | ||
591 | ahi %r10,-1 # update chunk number | ||
592 | .Lchkloop: | ||
593 | lr %r6,%r7 # set access code to last cc | ||
594 | # we got an exception or we're starting a new | ||
595 | # chunk , we must check if we should | ||
596 | # still try to find valid memory (if we detected | ||
597 | # the amount of available storage), and if we | ||
598 | # have chunks left | ||
599 | lghi %r4,1 | ||
600 | sllg %r4,%r4,31 | ||
601 | clgr %r5,%r4 | ||
602 | je .Lhsaskip | ||
603 | xr %r0, %r0 | ||
604 | clgr %r0, %r9 # did we detect memory? | ||
605 | je .Ldonemem # if not, leave | ||
606 | chi %r10, 0 # do we have chunks left? | ||
607 | je .Ldonemem | ||
608 | .Lhsaskip: | ||
609 | algr %r5,%r1 # add 128KB to end of chunk | ||
610 | lgr %r4,%r5 # potential new chunk | ||
611 | clgr %r5,%r9 # should we go on? | ||
612 | jl .Lloop | ||
613 | .Ldonemem: | ||
614 | |||
615 | larl %r12,machine_flags | ||
616 | # | ||
617 | # find out if we are running under VM | ||
618 | # | ||
619 | stidp __LC_CPUID # store cpuid | ||
620 | tm __LC_CPUID,0xff # running under VM ? | ||
621 | bno 0f-.LPG1(%r13) | ||
622 | oi 7(%r12),1 # set VM flag | ||
623 | 0: lh %r0,__LC_CPUID+4 # get cpu version | ||
624 | chi %r0,0x7490 # running on a P/390 ? | ||
625 | bne 1f-.LPG1(%r13) | ||
626 | oi 7(%r12),4 # set P/390 flag | ||
627 | 1: | ||
628 | |||
629 | # | ||
630 | # find out if we have the MVPG instruction | ||
631 | # | ||
632 | la %r1,0f-.LPG1(%r13) # set program check address | ||
633 | stg %r1,__LC_PGM_NEW_PSW+8 | ||
634 | sgr %r0,%r0 | ||
635 | lghi %r1,0 | ||
636 | lghi %r2,0 | ||
637 | mvpg %r1,%r2 # test MVPG instruction | ||
638 | oi 7(%r12),16 # set MVPG flag | ||
639 | 0: | ||
640 | |||
641 | # | ||
642 | # find out if the diag 0x44 works in 64 bit mode | ||
643 | # | ||
644 | la %r1,0f-.LPG1(%r13) # set program check address | ||
645 | stg %r1,__LC_PGM_NEW_PSW+8 | ||
646 | mvc __LC_DIAG44_OPCODE(8),.Lnop-.LPG1(%r13) | ||
647 | diag 0,0,0x44 # test diag 0x44 | ||
648 | oi 7(%r12),32 # set diag44 flag | ||
649 | mvc __LC_DIAG44_OPCODE(8),.Ldiag44-.LPG1(%r13) | ||
650 | 0: | ||
651 | |||
652 | # | ||
653 | # find out if we have the IDTE instruction | ||
654 | # | ||
655 | la %r1,0f-.LPG1(%r13) # set program check address | ||
656 | stg %r1,__LC_PGM_NEW_PSW+8 | ||
657 | .long 0xb2b10000 # store facility list | ||
658 | tm 0xc8,0x08 # check bit for clearing-by-ASCE | ||
659 | bno 0f-.LPG1(%r13) | ||
660 | lhi %r1,2094 | ||
661 | lhi %r2,0 | ||
662 | .long 0xb98e2001 | ||
663 | oi 7(%r12),0x80 # set IDTE flag | ||
664 | 0: | ||
665 | |||
666 | lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, | ||
667 | # virtual and never return ... | ||
668 | .align 16 | ||
669 | .Lentry:.quad 0x0000000180000000,_stext | ||
670 | .Lctl: .quad 0x04b50002 # cr0: various things | ||
671 | .quad 0 # cr1: primary space segment table | ||
672 | .quad .Lduct # cr2: dispatchable unit control table | ||
673 | .quad 0 # cr3: instruction authorization | ||
674 | .quad 0 # cr4: instruction authorization | ||
675 | .quad 0xffffffffffffffff # cr5: primary-aste origin | ||
676 | .quad 0 # cr6: I/O interrupts | ||
677 | .quad 0 # cr7: secondary space segment table | ||
678 | .quad 0 # cr8: access registers translation | ||
679 | .quad 0 # cr9: tracing off | ||
680 | .quad 0 # cr10: tracing off | ||
681 | .quad 0 # cr11: tracing off | ||
682 | .quad 0 # cr12: tracing off | ||
683 | .quad 0 # cr13: home space segment table | ||
684 | .quad 0xc0000000 # cr14: machine check handling off | ||
685 | .quad 0 # cr15: linkage stack operations | ||
686 | .Lpcmsk:.quad 0x0000000180000000 | ||
687 | .L4malign:.quad 0xffffffffffc00000 | ||
688 | .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 | ||
689 | .Lnop: .long 0x07000700 | ||
690 | .Ldiag44:.long 0x83000044 | ||
691 | |||
692 | .org PARMAREA-64 | ||
693 | .Lduct: .long 0,0,0,0,0,0,0,0 | ||
694 | .long 0,0,0,0,0,0,0,0 | ||
695 | |||
696 | # | ||
697 | # params at 10400 (setup.h) | ||
698 | # | ||
699 | .org PARMAREA | ||
700 | .global _pstart | ||
701 | _pstart: | ||
702 | .quad 0 # IPL_DEVICE | ||
703 | .quad RAMDISK_ORIGIN # INITRD_START | ||
704 | .quad RAMDISK_SIZE # INITRD_SIZE | ||
705 | |||
706 | .org COMMAND_LINE | ||
707 | .byte "root=/dev/ram0 ro" | ||
708 | .byte 0 | ||
709 | .org 0x11000 | ||
710 | .Lsccb: | ||
711 | .hword 0x1000 # length, one page | ||
712 | .byte 0x00,0x00,0x00 | ||
713 | .byte 0x80 # variable response bit set | ||
714 | .Lsccbr: | ||
715 | .hword 0x00 # response code | ||
716 | .Lscpincr1: | ||
717 | .hword 0x00 | ||
718 | .Lscpa1: | ||
719 | .byte 0x00 | ||
720 | .fill 89,1,0 | ||
721 | .Lscpa2: | ||
722 | .int 0x00 | ||
723 | .Lscpincr2: | ||
724 | .quad 0x00 | ||
725 | .fill 3984,1,0 | ||
726 | .org 0x12000 | ||
727 | .global _pend | ||
728 | _pend: | ||
729 | |||
730 | #ifdef CONFIG_SHARED_KERNEL | ||
731 | .org 0x100000 | ||
732 | #endif | ||
733 | |||
734 | # | ||
735 | # startup-code, running in virtual mode | ||
736 | # | ||
737 | .globl _stext | ||
738 | _stext: basr %r13,0 # get base | ||
739 | .LPG2: | ||
740 | # | ||
741 | # Setup stack | ||
742 | # | ||
743 | larl %r15,init_thread_union | ||
744 | lg %r14,__TI_task(%r15) # cache current in lowcore | ||
745 | stg %r14,__LC_CURRENT | ||
746 | aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE | ||
747 | stg %r15,__LC_KERNEL_STACK # set end of kernel stack | ||
748 | aghi %r15,-160 | ||
749 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain | ||
750 | |||
751 | # check control registers | ||
752 | stctg %c0,%c15,0(%r15) | ||
753 | oi 6(%r15),0x20 # enable sigp external interrupts | ||
754 | oi 4(%r15),0x10 # switch on low address proctection | ||
755 | lctlg %c0,%c15,0(%r15) | ||
756 | |||
757 | # | ||
758 | lam 0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess | ||
759 | brasl %r14,start_kernel # go to C code | ||
760 | # | ||
761 | # We returned from start_kernel ?!? PANIK | ||
762 | # | ||
763 | basr %r13,0 | ||
764 | lpswe .Ldw-.(%r13) # load disabled wait psw | ||
765 | # | ||
766 | .align 8 | ||
767 | .Ldw: .quad 0x0002000180000000,0x0000000000000000 | ||
768 | .Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 | ||
769 | |||
diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c new file mode 100644 index 000000000000..d73a74013e73 --- /dev/null +++ b/arch/s390/kernel/init_task.c | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/init_task.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * | ||
6 | * Derived from "arch/i386/kernel/init_task.c" | ||
7 | */ | ||
8 | |||
9 | #include <linux/mm.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/init_task.h> | ||
13 | #include <linux/mqueue.h> | ||
14 | |||
15 | #include <asm/uaccess.h> | ||
16 | #include <asm/pgtable.h> | ||
17 | |||
18 | static struct fs_struct init_fs = INIT_FS; | ||
19 | static struct files_struct init_files = INIT_FILES; | ||
20 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
21 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
22 | struct mm_struct init_mm = INIT_MM(init_mm); | ||
23 | |||
24 | EXPORT_SYMBOL(init_mm); | ||
25 | |||
26 | /* | ||
27 | * Initial thread structure. | ||
28 | * | ||
29 | * We need to make sure that this is 8192-byte aligned due to the | ||
30 | * way process stacks are handled. This is done by having a special | ||
31 | * "init_task" linker map entry.. | ||
32 | */ | ||
33 | union thread_union init_thread_union | ||
34 | __attribute__((__section__(".data.init_task"))) = | ||
35 | { INIT_THREAD_INFO(init_task) }; | ||
36 | |||
37 | /* | ||
38 | * Initial task structure. | ||
39 | * | ||
40 | * All other task structs will be allocated on slabs in fork.c | ||
41 | */ | ||
42 | struct task_struct init_task = INIT_TASK(init_task); | ||
43 | |||
44 | EXPORT_SYMBOL(init_task); | ||
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c new file mode 100644 index 000000000000..480b6a5fef3a --- /dev/null +++ b/arch/s390/kernel/irq.c | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/irq.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * | ||
8 | * This file contains interrupt related functions. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/kernel_stat.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/seq_file.h> | ||
16 | #include <linux/cpu.h> | ||
17 | |||
18 | /* | ||
19 | * show_interrupts is needed by /proc/interrupts. | ||
20 | */ | ||
21 | int show_interrupts(struct seq_file *p, void *v) | ||
22 | { | ||
23 | static const char *intrclass_names[] = { "EXT", "I/O", }; | ||
24 | int i = *(loff_t *) v, j; | ||
25 | |||
26 | if (i == 0) { | ||
27 | seq_puts(p, " "); | ||
28 | for_each_online_cpu(j) | ||
29 | seq_printf(p, "CPU%d ",j); | ||
30 | seq_putc(p, '\n'); | ||
31 | } | ||
32 | |||
33 | if (i < NR_IRQS) { | ||
34 | seq_printf(p, "%s: ", intrclass_names[i]); | ||
35 | #ifndef CONFIG_SMP | ||
36 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
37 | #else | ||
38 | for_each_online_cpu(j) | ||
39 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
40 | #endif | ||
41 | seq_putc(p, '\n'); | ||
42 | |||
43 | } | ||
44 | |||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * For compatibilty only. S/390 specific setup of interrupts et al. is done | ||
50 | * much later in init_channel_subsystem(). | ||
51 | */ | ||
52 | void __init | ||
53 | init_IRQ(void) | ||
54 | { | ||
55 | /* nothing... */ | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Switch to the asynchronous interrupt stack for softirq execution. | ||
60 | */ | ||
61 | extern void __do_softirq(void); | ||
62 | |||
63 | asmlinkage void do_softirq(void) | ||
64 | { | ||
65 | unsigned long flags, old, new; | ||
66 | |||
67 | if (in_interrupt()) | ||
68 | return; | ||
69 | |||
70 | local_irq_save(flags); | ||
71 | |||
72 | account_system_vtime(current); | ||
73 | |||
74 | local_bh_disable(); | ||
75 | |||
76 | if (local_softirq_pending()) { | ||
77 | /* Get current stack pointer. */ | ||
78 | asm volatile("la %0,0(15)" : "=a" (old)); | ||
79 | /* Check against async. stack address range. */ | ||
80 | new = S390_lowcore.async_stack; | ||
81 | if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) { | ||
82 | /* Need to switch to the async. stack. */ | ||
83 | new -= STACK_FRAME_OVERHEAD; | ||
84 | ((struct stack_frame *) new)->back_chain = old; | ||
85 | |||
86 | asm volatile(" la 15,0(%0)\n" | ||
87 | " basr 14,%2\n" | ||
88 | " la 15,0(%1)\n" | ||
89 | : : "a" (new), "a" (old), | ||
90 | "a" (__do_softirq) | ||
91 | : "0", "1", "2", "3", "4", "5", "14", | ||
92 | "cc", "memory" ); | ||
93 | } else | ||
94 | /* We are already on the async stack. */ | ||
95 | __do_softirq(); | ||
96 | } | ||
97 | |||
98 | account_system_vtime(current); | ||
99 | |||
100 | __local_bh_enable(); | ||
101 | |||
102 | local_irq_restore(flags); | ||
103 | } | ||
104 | |||
105 | EXPORT_SYMBOL(do_softirq); | ||
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c new file mode 100644 index 000000000000..607d506689c8 --- /dev/null +++ b/arch/s390/kernel/module.c | |||
@@ -0,0 +1,405 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/module.c - Kernel module help for s390. | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH, | ||
6 | * IBM Corporation | ||
7 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | ||
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
9 | * | ||
10 | * based on i386 version | ||
11 | * Copyright (C) 2001 Rusty Russell. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
26 | */ | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/elf.h> | ||
29 | #include <linux/vmalloc.h> | ||
30 | #include <linux/fs.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/kernel.h> | ||
33 | |||
34 | #if 0 | ||
35 | #define DEBUGP printk | ||
36 | #else | ||
37 | #define DEBUGP(fmt , ...) | ||
38 | #endif | ||
39 | |||
40 | #ifndef CONFIG_ARCH_S390X | ||
41 | #define PLT_ENTRY_SIZE 12 | ||
42 | #else /* CONFIG_ARCH_S390X */ | ||
43 | #define PLT_ENTRY_SIZE 20 | ||
44 | #endif /* CONFIG_ARCH_S390X */ | ||
45 | |||
46 | void *module_alloc(unsigned long size) | ||
47 | { | ||
48 | if (size == 0) | ||
49 | return NULL; | ||
50 | return vmalloc(size); | ||
51 | } | ||
52 | |||
53 | /* Free memory returned from module_alloc */ | ||
54 | void module_free(struct module *mod, void *module_region) | ||
55 | { | ||
56 | vfree(module_region); | ||
57 | /* FIXME: If module_region == mod->init_region, trim exception | ||
58 | table entries. */ | ||
59 | } | ||
60 | |||
61 | static inline void | ||
62 | check_rela(Elf_Rela *rela, struct module *me) | ||
63 | { | ||
64 | struct mod_arch_syminfo *info; | ||
65 | |||
66 | info = me->arch.syminfo + ELF_R_SYM (rela->r_info); | ||
67 | switch (ELF_R_TYPE (rela->r_info)) { | ||
68 | case R_390_GOT12: /* 12 bit GOT offset. */ | ||
69 | case R_390_GOT16: /* 16 bit GOT offset. */ | ||
70 | case R_390_GOT20: /* 20 bit GOT offset. */ | ||
71 | case R_390_GOT32: /* 32 bit GOT offset. */ | ||
72 | case R_390_GOT64: /* 64 bit GOT offset. */ | ||
73 | case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */ | ||
74 | case R_390_GOTPLT12: /* 12 bit offset to jump slot. */ | ||
75 | case R_390_GOTPLT16: /* 16 bit offset to jump slot. */ | ||
76 | case R_390_GOTPLT20: /* 20 bit offset to jump slot. */ | ||
77 | case R_390_GOTPLT32: /* 32 bit offset to jump slot. */ | ||
78 | case R_390_GOTPLT64: /* 64 bit offset to jump slot. */ | ||
79 | case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */ | ||
80 | if (info->got_offset == -1UL) { | ||
81 | info->got_offset = me->arch.got_size; | ||
82 | me->arch.got_size += sizeof(void*); | ||
83 | } | ||
84 | break; | ||
85 | case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ | ||
86 | case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ | ||
87 | case R_390_PLT32: /* 32 bit PC relative PLT address. */ | ||
88 | case R_390_PLT64: /* 64 bit PC relative PLT address. */ | ||
89 | case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */ | ||
90 | case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ | ||
91 | case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ | ||
92 | if (info->plt_offset == -1UL) { | ||
93 | info->plt_offset = me->arch.plt_size; | ||
94 | me->arch.plt_size += PLT_ENTRY_SIZE; | ||
95 | } | ||
96 | break; | ||
97 | case R_390_COPY: | ||
98 | case R_390_GLOB_DAT: | ||
99 | case R_390_JMP_SLOT: | ||
100 | case R_390_RELATIVE: | ||
101 | /* Only needed if we want to support loading of | ||
102 | modules linked with -shared. */ | ||
103 | break; | ||
104 | } | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Account for GOT and PLT relocations. We can't add sections for | ||
109 | * got and plt but we can increase the core module size. | ||
110 | */ | ||
111 | int | ||
112 | module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | ||
113 | char *secstrings, struct module *me) | ||
114 | { | ||
115 | Elf_Shdr *symtab; | ||
116 | Elf_Sym *symbols; | ||
117 | Elf_Rela *rela; | ||
118 | char *strings; | ||
119 | int nrela, i, j; | ||
120 | |||
121 | /* Find symbol table and string table. */ | ||
122 | symtab = 0; | ||
123 | for (i = 0; i < hdr->e_shnum; i++) | ||
124 | switch (sechdrs[i].sh_type) { | ||
125 | case SHT_SYMTAB: | ||
126 | symtab = sechdrs + i; | ||
127 | break; | ||
128 | } | ||
129 | if (!symtab) { | ||
130 | printk(KERN_ERR "module %s: no symbol table\n", me->name); | ||
131 | return -ENOEXEC; | ||
132 | } | ||
133 | |||
134 | /* Allocate one syminfo structure per symbol. */ | ||
135 | me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); | ||
136 | me->arch.syminfo = vmalloc(me->arch.nsyms * | ||
137 | sizeof(struct mod_arch_syminfo)); | ||
138 | if (!me->arch.syminfo) | ||
139 | return -ENOMEM; | ||
140 | symbols = (void *) hdr + symtab->sh_offset; | ||
141 | strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset; | ||
142 | for (i = 0; i < me->arch.nsyms; i++) { | ||
143 | if (symbols[i].st_shndx == SHN_UNDEF && | ||
144 | strcmp(strings + symbols[i].st_name, | ||
145 | "_GLOBAL_OFFSET_TABLE_") == 0) | ||
146 | /* "Define" it as absolute. */ | ||
147 | symbols[i].st_shndx = SHN_ABS; | ||
148 | me->arch.syminfo[i].got_offset = -1UL; | ||
149 | me->arch.syminfo[i].plt_offset = -1UL; | ||
150 | me->arch.syminfo[i].got_initialized = 0; | ||
151 | me->arch.syminfo[i].plt_initialized = 0; | ||
152 | } | ||
153 | |||
154 | /* Search for got/plt relocations. */ | ||
155 | me->arch.got_size = me->arch.plt_size = 0; | ||
156 | for (i = 0; i < hdr->e_shnum; i++) { | ||
157 | if (sechdrs[i].sh_type != SHT_RELA) | ||
158 | continue; | ||
159 | nrela = sechdrs[i].sh_size / sizeof(Elf_Rela); | ||
160 | rela = (void *) hdr + sechdrs[i].sh_offset; | ||
161 | for (j = 0; j < nrela; j++) | ||
162 | check_rela(rela + j, me); | ||
163 | } | ||
164 | |||
165 | /* Increase core size by size of got & plt and set start | ||
166 | offsets for got and plt. */ | ||
167 | me->core_size = ALIGN(me->core_size, 4); | ||
168 | me->arch.got_offset = me->core_size; | ||
169 | me->core_size += me->arch.got_size; | ||
170 | me->arch.plt_offset = me->core_size; | ||
171 | me->core_size += me->arch.plt_size; | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | int | ||
176 | apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, | ||
177 | unsigned int relsec, struct module *me) | ||
178 | { | ||
179 | printk(KERN_ERR "module %s: RELOCATION unsupported\n", | ||
180 | me->name); | ||
181 | return -ENOEXEC; | ||
182 | } | ||
183 | |||
184 | static inline int | ||
185 | apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | ||
186 | struct module *me) | ||
187 | { | ||
188 | struct mod_arch_syminfo *info; | ||
189 | Elf_Addr loc, val; | ||
190 | int r_type, r_sym; | ||
191 | |||
192 | /* This is where to make the change */ | ||
193 | loc = base + rela->r_offset; | ||
194 | /* This is the symbol it is referring to. Note that all | ||
195 | undefined symbols have been resolved. */ | ||
196 | r_sym = ELF_R_SYM(rela->r_info); | ||
197 | r_type = ELF_R_TYPE(rela->r_info); | ||
198 | info = me->arch.syminfo + r_sym; | ||
199 | val = symtab[r_sym].st_value; | ||
200 | |||
201 | switch (r_type) { | ||
202 | case R_390_8: /* Direct 8 bit. */ | ||
203 | case R_390_12: /* Direct 12 bit. */ | ||
204 | case R_390_16: /* Direct 16 bit. */ | ||
205 | case R_390_20: /* Direct 20 bit. */ | ||
206 | case R_390_32: /* Direct 32 bit. */ | ||
207 | case R_390_64: /* Direct 64 bit. */ | ||
208 | val += rela->r_addend; | ||
209 | if (r_type == R_390_8) | ||
210 | *(unsigned char *) loc = val; | ||
211 | else if (r_type == R_390_12) | ||
212 | *(unsigned short *) loc = (val & 0xfff) | | ||
213 | (*(unsigned short *) loc & 0xf000); | ||
214 | else if (r_type == R_390_16) | ||
215 | *(unsigned short *) loc = val; | ||
216 | else if (r_type == R_390_20) | ||
217 | *(unsigned int *) loc = | ||
218 | (*(unsigned int *) loc & 0xf00000ff) | | ||
219 | (val & 0xfff) << 16 | (val & 0xff000) >> 4; | ||
220 | else if (r_type == R_390_32) | ||
221 | *(unsigned int *) loc = val; | ||
222 | else if (r_type == R_390_64) | ||
223 | *(unsigned long *) loc = val; | ||
224 | break; | ||
225 | case R_390_PC16: /* PC relative 16 bit. */ | ||
226 | case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ | ||
227 | case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */ | ||
228 | case R_390_PC32: /* PC relative 32 bit. */ | ||
229 | case R_390_PC64: /* PC relative 64 bit. */ | ||
230 | val += rela->r_addend - loc; | ||
231 | if (r_type == R_390_PC16) | ||
232 | *(unsigned short *) loc = val; | ||
233 | else if (r_type == R_390_PC16DBL) | ||
234 | *(unsigned short *) loc = val >> 1; | ||
235 | else if (r_type == R_390_PC32DBL) | ||
236 | *(unsigned int *) loc = val >> 1; | ||
237 | else if (r_type == R_390_PC32) | ||
238 | *(unsigned int *) loc = val; | ||
239 | else if (r_type == R_390_PC64) | ||
240 | *(unsigned long *) loc = val; | ||
241 | break; | ||
242 | case R_390_GOT12: /* 12 bit GOT offset. */ | ||
243 | case R_390_GOT16: /* 16 bit GOT offset. */ | ||
244 | case R_390_GOT20: /* 20 bit GOT offset. */ | ||
245 | case R_390_GOT32: /* 32 bit GOT offset. */ | ||
246 | case R_390_GOT64: /* 64 bit GOT offset. */ | ||
247 | case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */ | ||
248 | case R_390_GOTPLT12: /* 12 bit offset to jump slot. */ | ||
249 | case R_390_GOTPLT20: /* 20 bit offset to jump slot. */ | ||
250 | case R_390_GOTPLT16: /* 16 bit offset to jump slot. */ | ||
251 | case R_390_GOTPLT32: /* 32 bit offset to jump slot. */ | ||
252 | case R_390_GOTPLT64: /* 64 bit offset to jump slot. */ | ||
253 | case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */ | ||
254 | if (info->got_initialized == 0) { | ||
255 | Elf_Addr *gotent; | ||
256 | |||
257 | gotent = me->module_core + me->arch.got_offset + | ||
258 | info->got_offset; | ||
259 | *gotent = val; | ||
260 | info->got_initialized = 1; | ||
261 | } | ||
262 | val = info->got_offset + rela->r_addend; | ||
263 | if (r_type == R_390_GOT12 || | ||
264 | r_type == R_390_GOTPLT12) | ||
265 | *(unsigned short *) loc = (val & 0xfff) | | ||
266 | (*(unsigned short *) loc & 0xf000); | ||
267 | else if (r_type == R_390_GOT16 || | ||
268 | r_type == R_390_GOTPLT16) | ||
269 | *(unsigned short *) loc = val; | ||
270 | else if (r_type == R_390_GOT20 || | ||
271 | r_type == R_390_GOTPLT20) | ||
272 | *(unsigned int *) loc = | ||
273 | (*(unsigned int *) loc & 0xf00000ff) | | ||
274 | (val & 0xfff) << 16 | (val & 0xff000) >> 4; | ||
275 | else if (r_type == R_390_GOT32 || | ||
276 | r_type == R_390_GOTPLT32) | ||
277 | *(unsigned int *) loc = val; | ||
278 | else if (r_type == R_390_GOTENT || | ||
279 | r_type == R_390_GOTPLTENT) | ||
280 | *(unsigned int *) loc = | ||
281 | (val + (Elf_Addr) me->module_core - loc) >> 1; | ||
282 | else if (r_type == R_390_GOT64 || | ||
283 | r_type == R_390_GOTPLT64) | ||
284 | *(unsigned long *) loc = val; | ||
285 | break; | ||
286 | case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ | ||
287 | case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ | ||
288 | case R_390_PLT32: /* 32 bit PC relative PLT address. */ | ||
289 | case R_390_PLT64: /* 64 bit PC relative PLT address. */ | ||
290 | case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */ | ||
291 | case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ | ||
292 | case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ | ||
293 | if (info->plt_initialized == 0) { | ||
294 | unsigned int *ip; | ||
295 | ip = me->module_core + me->arch.plt_offset + | ||
296 | info->plt_offset; | ||
297 | #ifndef CONFIG_ARCH_S390X | ||
298 | ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ | ||
299 | ip[1] = 0x100607f1; | ||
300 | ip[2] = val; | ||
301 | #else /* CONFIG_ARCH_S390X */ | ||
302 | ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ | ||
303 | ip[1] = 0x100a0004; | ||
304 | ip[2] = 0x07f10000; | ||
305 | ip[3] = (unsigned int) (val >> 32); | ||
306 | ip[4] = (unsigned int) val; | ||
307 | #endif /* CONFIG_ARCH_S390X */ | ||
308 | info->plt_initialized = 1; | ||
309 | } | ||
310 | if (r_type == R_390_PLTOFF16 || | ||
311 | r_type == R_390_PLTOFF32 | ||
312 | || r_type == R_390_PLTOFF64 | ||
313 | ) | ||
314 | val = me->arch.plt_offset - me->arch.got_offset + | ||
315 | info->plt_offset + rela->r_addend; | ||
316 | else | ||
317 | val = (Elf_Addr) me->module_core + | ||
318 | me->arch.plt_offset + info->plt_offset + | ||
319 | rela->r_addend - loc; | ||
320 | if (r_type == R_390_PLT16DBL) | ||
321 | *(unsigned short *) loc = val >> 1; | ||
322 | else if (r_type == R_390_PLTOFF16) | ||
323 | *(unsigned short *) loc = val; | ||
324 | else if (r_type == R_390_PLT32DBL) | ||
325 | *(unsigned int *) loc = val >> 1; | ||
326 | else if (r_type == R_390_PLT32 || | ||
327 | r_type == R_390_PLTOFF32) | ||
328 | *(unsigned int *) loc = val; | ||
329 | else if (r_type == R_390_PLT64 || | ||
330 | r_type == R_390_PLTOFF64) | ||
331 | *(unsigned long *) loc = val; | ||
332 | break; | ||
333 | case R_390_GOTOFF16: /* 16 bit offset to GOT. */ | ||
334 | case R_390_GOTOFF32: /* 32 bit offset to GOT. */ | ||
335 | case R_390_GOTOFF64: /* 64 bit offset to GOT. */ | ||
336 | val = val + rela->r_addend - | ||
337 | ((Elf_Addr) me->module_core + me->arch.got_offset); | ||
338 | if (r_type == R_390_GOTOFF16) | ||
339 | *(unsigned short *) loc = val; | ||
340 | else if (r_type == R_390_GOTOFF32) | ||
341 | *(unsigned int *) loc = val; | ||
342 | else if (r_type == R_390_GOTOFF64) | ||
343 | *(unsigned long *) loc = val; | ||
344 | break; | ||
345 | case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ | ||
346 | case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ | ||
347 | val = (Elf_Addr) me->module_core + me->arch.got_offset + | ||
348 | rela->r_addend - loc; | ||
349 | if (r_type == R_390_GOTPC) | ||
350 | *(unsigned int *) loc = val; | ||
351 | else if (r_type == R_390_GOTPCDBL) | ||
352 | *(unsigned int *) loc = val >> 1; | ||
353 | break; | ||
354 | case R_390_COPY: | ||
355 | case R_390_GLOB_DAT: /* Create GOT entry. */ | ||
356 | case R_390_JMP_SLOT: /* Create PLT entry. */ | ||
357 | case R_390_RELATIVE: /* Adjust by program base. */ | ||
358 | /* Only needed if we want to support loading of | ||
359 | modules linked with -shared. */ | ||
360 | break; | ||
361 | default: | ||
362 | printk(KERN_ERR "module %s: Unknown relocation: %u\n", | ||
363 | me->name, r_type); | ||
364 | return -ENOEXEC; | ||
365 | } | ||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | int | ||
370 | apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, | ||
371 | unsigned int symindex, unsigned int relsec, | ||
372 | struct module *me) | ||
373 | { | ||
374 | Elf_Addr base; | ||
375 | Elf_Sym *symtab; | ||
376 | Elf_Rela *rela; | ||
377 | unsigned long i, n; | ||
378 | int rc; | ||
379 | |||
380 | DEBUGP("Applying relocate section %u to %u\n", | ||
381 | relsec, sechdrs[relsec].sh_info); | ||
382 | base = sechdrs[sechdrs[relsec].sh_info].sh_addr; | ||
383 | symtab = (Elf_Sym *) sechdrs[symindex].sh_addr; | ||
384 | rela = (Elf_Rela *) sechdrs[relsec].sh_addr; | ||
385 | n = sechdrs[relsec].sh_size / sizeof(Elf_Rela); | ||
386 | |||
387 | for (i = 0; i < n; i++, rela++) { | ||
388 | rc = apply_rela(rela, base, symtab, me); | ||
389 | if (rc) | ||
390 | return rc; | ||
391 | } | ||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | int module_finalize(const Elf_Ehdr *hdr, | ||
396 | const Elf_Shdr *sechdrs, | ||
397 | struct module *me) | ||
398 | { | ||
399 | vfree(me->arch.syminfo); | ||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | void module_arch_cleanup(struct module *mod) | ||
404 | { | ||
405 | } | ||
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c new file mode 100644 index 000000000000..7aea25d6e300 --- /dev/null +++ b/arch/s390/kernel/process.c | |||
@@ -0,0 +1,416 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/process.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * Hartmut Penner (hp@de.ibm.com), | ||
8 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | ||
9 | * | ||
10 | * Derived from "arch/i386/kernel/process.c" | ||
11 | * Copyright (C) 1995, Linus Torvalds | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * This file handles the architecture-dependent parts of process handling.. | ||
16 | */ | ||
17 | |||
18 | #include <linux/config.h> | ||
19 | #include <linux/compiler.h> | ||
20 | #include <linux/cpu.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/smp.h> | ||
26 | #include <linux/smp_lock.h> | ||
27 | #include <linux/stddef.h> | ||
28 | #include <linux/unistd.h> | ||
29 | #include <linux/ptrace.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/vmalloc.h> | ||
32 | #include <linux/user.h> | ||
33 | #include <linux/a.out.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/reboot.h> | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/module.h> | ||
39 | #include <linux/notifier.h> | ||
40 | |||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/pgtable.h> | ||
43 | #include <asm/system.h> | ||
44 | #include <asm/io.h> | ||
45 | #include <asm/processor.h> | ||
46 | #include <asm/irq.h> | ||
47 | #include <asm/timer.h> | ||
48 | |||
49 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | ||
50 | |||
51 | /* | ||
52 | * Return saved PC of a blocked thread. used in kernel/sched. | ||
53 | * resume in entry.S does not create a new stack frame, it | ||
54 | * just stores the registers %r6-%r15 to the frame given by | ||
55 | * schedule. We want to return the address of the caller of | ||
56 | * schedule, so we have to walk the backchain one time to | ||
57 | * find the frame schedule() store its return address. | ||
58 | */ | ||
59 | unsigned long thread_saved_pc(struct task_struct *tsk) | ||
60 | { | ||
61 | struct stack_frame *sf; | ||
62 | |||
63 | sf = (struct stack_frame *) tsk->thread.ksp; | ||
64 | sf = (struct stack_frame *) sf->back_chain; | ||
65 | return sf->gprs[8]; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * Need to know about CPUs going idle? | ||
70 | */ | ||
71 | static struct notifier_block *idle_chain; | ||
72 | |||
73 | int register_idle_notifier(struct notifier_block *nb) | ||
74 | { | ||
75 | return notifier_chain_register(&idle_chain, nb); | ||
76 | } | ||
77 | EXPORT_SYMBOL(register_idle_notifier); | ||
78 | |||
79 | int unregister_idle_notifier(struct notifier_block *nb) | ||
80 | { | ||
81 | return notifier_chain_unregister(&idle_chain, nb); | ||
82 | } | ||
83 | EXPORT_SYMBOL(unregister_idle_notifier); | ||
84 | |||
85 | void do_monitor_call(struct pt_regs *regs, long interruption_code) | ||
86 | { | ||
87 | /* disable monitor call class 0 */ | ||
88 | __ctl_clear_bit(8, 15); | ||
89 | |||
90 | notifier_call_chain(&idle_chain, CPU_NOT_IDLE, | ||
91 | (void *)(long) smp_processor_id()); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * The idle loop on a S390... | ||
96 | */ | ||
97 | void default_idle(void) | ||
98 | { | ||
99 | psw_t wait_psw; | ||
100 | unsigned long reg; | ||
101 | int cpu, rc; | ||
102 | |||
103 | local_irq_disable(); | ||
104 | if (need_resched()) { | ||
105 | local_irq_enable(); | ||
106 | schedule(); | ||
107 | return; | ||
108 | } | ||
109 | |||
110 | /* CPU is going idle. */ | ||
111 | cpu = smp_processor_id(); | ||
112 | rc = notifier_call_chain(&idle_chain, CPU_IDLE, (void *)(long) cpu); | ||
113 | if (rc != NOTIFY_OK && rc != NOTIFY_DONE) | ||
114 | BUG(); | ||
115 | if (rc != NOTIFY_OK) { | ||
116 | local_irq_enable(); | ||
117 | return; | ||
118 | } | ||
119 | |||
120 | /* enable monitor call class 0 */ | ||
121 | __ctl_set_bit(8, 15); | ||
122 | |||
123 | #ifdef CONFIG_HOTPLUG_CPU | ||
124 | if (cpu_is_offline(smp_processor_id())) | ||
125 | cpu_die(); | ||
126 | #endif | ||
127 | |||
128 | /* | ||
129 | * Wait for external, I/O or machine check interrupt and | ||
130 | * switch off machine check bit after the wait has ended. | ||
131 | */ | ||
132 | wait_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK | PSW_MASK_WAIT | | ||
133 | PSW_MASK_IO | PSW_MASK_EXT; | ||
134 | #ifndef CONFIG_ARCH_S390X | ||
135 | asm volatile ( | ||
136 | " basr %0,0\n" | ||
137 | "0: la %0,1f-0b(%0)\n" | ||
138 | " st %0,4(%1)\n" | ||
139 | " oi 4(%1),0x80\n" | ||
140 | " lpsw 0(%1)\n" | ||
141 | "1: la %0,2f-1b(%0)\n" | ||
142 | " st %0,4(%1)\n" | ||
143 | " oi 4(%1),0x80\n" | ||
144 | " ni 1(%1),0xf9\n" | ||
145 | " lpsw 0(%1)\n" | ||
146 | "2:" | ||
147 | : "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" ); | ||
148 | #else /* CONFIG_ARCH_S390X */ | ||
149 | asm volatile ( | ||
150 | " larl %0,0f\n" | ||
151 | " stg %0,8(%1)\n" | ||
152 | " lpswe 0(%1)\n" | ||
153 | "0: larl %0,1f\n" | ||
154 | " stg %0,8(%1)\n" | ||
155 | " ni 1(%1),0xf9\n" | ||
156 | " lpswe 0(%1)\n" | ||
157 | "1:" | ||
158 | : "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" ); | ||
159 | #endif /* CONFIG_ARCH_S390X */ | ||
160 | } | ||
161 | |||
162 | void cpu_idle(void) | ||
163 | { | ||
164 | for (;;) | ||
165 | default_idle(); | ||
166 | } | ||
167 | |||
168 | void show_regs(struct pt_regs *regs) | ||
169 | { | ||
170 | struct task_struct *tsk = current; | ||
171 | |||
172 | printk("CPU: %d %s\n", tsk->thread_info->cpu, print_tainted()); | ||
173 | printk("Process %s (pid: %d, task: %p, ksp: %p)\n", | ||
174 | current->comm, current->pid, (void *) tsk, | ||
175 | (void *) tsk->thread.ksp); | ||
176 | |||
177 | show_registers(regs); | ||
178 | /* Show stack backtrace if pt_regs is from kernel mode */ | ||
179 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | ||
180 | show_trace(0,(unsigned long *) regs->gprs[15]); | ||
181 | } | ||
182 | |||
183 | extern void kernel_thread_starter(void); | ||
184 | |||
185 | __asm__(".align 4\n" | ||
186 | "kernel_thread_starter:\n" | ||
187 | " la 2,0(10)\n" | ||
188 | " basr 14,9\n" | ||
189 | " la 2,0\n" | ||
190 | " br 11\n"); | ||
191 | |||
192 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | ||
193 | { | ||
194 | struct pt_regs regs; | ||
195 | |||
196 | memset(®s, 0, sizeof(regs)); | ||
197 | regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; | ||
198 | regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; | ||
199 | regs.gprs[9] = (unsigned long) fn; | ||
200 | regs.gprs[10] = (unsigned long) arg; | ||
201 | regs.gprs[11] = (unsigned long) do_exit; | ||
202 | regs.orig_gpr2 = -1; | ||
203 | |||
204 | /* Ok, create the new process.. */ | ||
205 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, | ||
206 | 0, ®s, 0, NULL, NULL); | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Free current thread data structures etc.. | ||
211 | */ | ||
212 | void exit_thread(void) | ||
213 | { | ||
214 | } | ||
215 | |||
216 | void flush_thread(void) | ||
217 | { | ||
218 | clear_used_math(); | ||
219 | clear_tsk_thread_flag(current, TIF_USEDFPU); | ||
220 | } | ||
221 | |||
222 | void release_thread(struct task_struct *dead_task) | ||
223 | { | ||
224 | } | ||
225 | |||
226 | int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, | ||
227 | unsigned long unused, | ||
228 | struct task_struct * p, struct pt_regs * regs) | ||
229 | { | ||
230 | struct fake_frame | ||
231 | { | ||
232 | struct stack_frame sf; | ||
233 | struct pt_regs childregs; | ||
234 | } *frame; | ||
235 | |||
236 | frame = ((struct fake_frame *) | ||
237 | (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; | ||
238 | p->thread.ksp = (unsigned long) frame; | ||
239 | /* Store access registers to kernel stack of new process. */ | ||
240 | frame->childregs = *regs; | ||
241 | frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ | ||
242 | frame->childregs.gprs[15] = new_stackp; | ||
243 | frame->sf.back_chain = 0; | ||
244 | |||
245 | /* new return point is ret_from_fork */ | ||
246 | frame->sf.gprs[8] = (unsigned long) ret_from_fork; | ||
247 | |||
248 | /* fake return stack for resume(), don't go back to schedule */ | ||
249 | frame->sf.gprs[9] = (unsigned long) frame; | ||
250 | |||
251 | /* Save access registers to new thread structure. */ | ||
252 | save_access_regs(&p->thread.acrs[0]); | ||
253 | |||
254 | #ifndef CONFIG_ARCH_S390X | ||
255 | /* | ||
256 | * save fprs to current->thread.fp_regs to merge them with | ||
257 | * the emulated registers and then copy the result to the child. | ||
258 | */ | ||
259 | save_fp_regs(¤t->thread.fp_regs); | ||
260 | memcpy(&p->thread.fp_regs, ¤t->thread.fp_regs, | ||
261 | sizeof(s390_fp_regs)); | ||
262 | p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _SEGMENT_TABLE; | ||
263 | /* Set a new TLS ? */ | ||
264 | if (clone_flags & CLONE_SETTLS) | ||
265 | p->thread.acrs[0] = regs->gprs[6]; | ||
266 | #else /* CONFIG_ARCH_S390X */ | ||
267 | /* Save the fpu registers to new thread structure. */ | ||
268 | save_fp_regs(&p->thread.fp_regs); | ||
269 | p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE; | ||
270 | /* Set a new TLS ? */ | ||
271 | if (clone_flags & CLONE_SETTLS) { | ||
272 | if (test_thread_flag(TIF_31BIT)) { | ||
273 | p->thread.acrs[0] = (unsigned int) regs->gprs[6]; | ||
274 | } else { | ||
275 | p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32); | ||
276 | p->thread.acrs[1] = (unsigned int) regs->gprs[6]; | ||
277 | } | ||
278 | } | ||
279 | #endif /* CONFIG_ARCH_S390X */ | ||
280 | /* start new process with ar4 pointing to the correct address space */ | ||
281 | p->thread.mm_segment = get_fs(); | ||
282 | /* Don't copy debug registers */ | ||
283 | memset(&p->thread.per_info,0,sizeof(p->thread.per_info)); | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | asmlinkage long sys_fork(struct pt_regs regs) | ||
289 | { | ||
290 | return do_fork(SIGCHLD, regs.gprs[15], ®s, 0, NULL, NULL); | ||
291 | } | ||
292 | |||
293 | asmlinkage long sys_clone(struct pt_regs regs) | ||
294 | { | ||
295 | unsigned long clone_flags; | ||
296 | unsigned long newsp; | ||
297 | int __user *parent_tidptr, *child_tidptr; | ||
298 | |||
299 | clone_flags = regs.gprs[3]; | ||
300 | newsp = regs.orig_gpr2; | ||
301 | parent_tidptr = (int __user *) regs.gprs[4]; | ||
302 | child_tidptr = (int __user *) regs.gprs[5]; | ||
303 | if (!newsp) | ||
304 | newsp = regs.gprs[15]; | ||
305 | return do_fork(clone_flags, newsp, ®s, 0, | ||
306 | parent_tidptr, child_tidptr); | ||
307 | } | ||
308 | |||
309 | /* | ||
310 | * This is trivial, and on the face of it looks like it | ||
311 | * could equally well be done in user mode. | ||
312 | * | ||
313 | * Not so, for quite unobvious reasons - register pressure. | ||
314 | * In user mode vfork() cannot have a stack frame, and if | ||
315 | * done by calling the "clone()" system call directly, you | ||
316 | * do not have enough call-clobbered registers to hold all | ||
317 | * the information you need. | ||
318 | */ | ||
319 | asmlinkage long sys_vfork(struct pt_regs regs) | ||
320 | { | ||
321 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, | ||
322 | regs.gprs[15], ®s, 0, NULL, NULL); | ||
323 | } | ||
324 | |||
325 | /* | ||
326 | * sys_execve() executes a new program. | ||
327 | */ | ||
328 | asmlinkage long sys_execve(struct pt_regs regs) | ||
329 | { | ||
330 | int error; | ||
331 | char * filename; | ||
332 | |||
333 | filename = getname((char __user *) regs.orig_gpr2); | ||
334 | error = PTR_ERR(filename); | ||
335 | if (IS_ERR(filename)) | ||
336 | goto out; | ||
337 | error = do_execve(filename, (char __user * __user *) regs.gprs[3], | ||
338 | (char __user * __user *) regs.gprs[4], ®s); | ||
339 | if (error == 0) { | ||
340 | task_lock(current); | ||
341 | current->ptrace &= ~PT_DTRACE; | ||
342 | task_unlock(current); | ||
343 | current->thread.fp_regs.fpc = 0; | ||
344 | if (MACHINE_HAS_IEEE) | ||
345 | asm volatile("sfpc %0,%0" : : "d" (0)); | ||
346 | } | ||
347 | putname(filename); | ||
348 | out: | ||
349 | return error; | ||
350 | } | ||
351 | |||
352 | |||
353 | /* | ||
354 | * fill in the FPU structure for a core dump. | ||
355 | */ | ||
356 | int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) | ||
357 | { | ||
358 | #ifndef CONFIG_ARCH_S390X | ||
359 | /* | ||
360 | * save fprs to current->thread.fp_regs to merge them with | ||
361 | * the emulated registers and then copy the result to the dump. | ||
362 | */ | ||
363 | save_fp_regs(¤t->thread.fp_regs); | ||
364 | memcpy(fpregs, ¤t->thread.fp_regs, sizeof(s390_fp_regs)); | ||
365 | #else /* CONFIG_ARCH_S390X */ | ||
366 | save_fp_regs(fpregs); | ||
367 | #endif /* CONFIG_ARCH_S390X */ | ||
368 | return 1; | ||
369 | } | ||
370 | |||
371 | /* | ||
372 | * fill in the user structure for a core dump.. | ||
373 | */ | ||
374 | void dump_thread(struct pt_regs * regs, struct user * dump) | ||
375 | { | ||
376 | |||
377 | /* changed the size calculations - should hopefully work better. lbt */ | ||
378 | dump->magic = CMAGIC; | ||
379 | dump->start_code = 0; | ||
380 | dump->start_stack = regs->gprs[15] & ~(PAGE_SIZE - 1); | ||
381 | dump->u_tsize = current->mm->end_code >> PAGE_SHIFT; | ||
382 | dump->u_dsize = (current->mm->brk + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
383 | dump->u_dsize -= dump->u_tsize; | ||
384 | dump->u_ssize = 0; | ||
385 | if (dump->start_stack < TASK_SIZE) | ||
386 | dump->u_ssize = (TASK_SIZE - dump->start_stack) >> PAGE_SHIFT; | ||
387 | memcpy(&dump->regs, regs, sizeof(s390_regs)); | ||
388 | dump_fpu (regs, &dump->regs.fp_regs); | ||
389 | dump->regs.per_info = current->thread.per_info; | ||
390 | } | ||
391 | |||
392 | unsigned long get_wchan(struct task_struct *p) | ||
393 | { | ||
394 | struct stack_frame *sf, *low, *high; | ||
395 | unsigned long return_address; | ||
396 | int count; | ||
397 | |||
398 | if (!p || p == current || p->state == TASK_RUNNING || !p->thread_info) | ||
399 | return 0; | ||
400 | low = (struct stack_frame *) p->thread_info; | ||
401 | high = (struct stack_frame *) | ||
402 | ((unsigned long) p->thread_info + THREAD_SIZE) - 1; | ||
403 | sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN); | ||
404 | if (sf <= low || sf > high) | ||
405 | return 0; | ||
406 | for (count = 0; count < 16; count++) { | ||
407 | sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN); | ||
408 | if (sf <= low || sf > high) | ||
409 | return 0; | ||
410 | return_address = sf->gprs[8] & PSW_ADDR_INSN; | ||
411 | if (!in_sched_functions(return_address)) | ||
412 | return return_address; | ||
413 | } | ||
414 | return 0; | ||
415 | } | ||
416 | |||
diff --git a/arch/s390/kernel/profile.c b/arch/s390/kernel/profile.c new file mode 100644 index 000000000000..7ba777eec1a8 --- /dev/null +++ b/arch/s390/kernel/profile.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/profile.c | ||
3 | * | ||
4 | * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
5 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | ||
6 | * | ||
7 | */ | ||
8 | #include <linux/proc_fs.h> | ||
9 | #include <linux/profile.h> | ||
10 | |||
11 | static struct proc_dir_entry * root_irq_dir; | ||
12 | |||
13 | void init_irq_proc(void) | ||
14 | { | ||
15 | /* create /proc/irq */ | ||
16 | root_irq_dir = proc_mkdir("irq", 0); | ||
17 | |||
18 | /* create /proc/irq/prof_cpu_mask */ | ||
19 | create_prof_cpu_mask(root_irq_dir); | ||
20 | } | ||
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c new file mode 100644 index 000000000000..647233c02fc8 --- /dev/null +++ b/arch/s390/kernel/ptrace.c | |||
@@ -0,0 +1,738 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/ptrace.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | ||
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
8 | * | ||
9 | * Based on PowerPC version | ||
10 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
11 | * | ||
12 | * Derived from "arch/m68k/kernel/ptrace.c" | ||
13 | * Copyright (C) 1994 by Hamish Macdonald | ||
14 | * Taken from linux/kernel/ptrace.c and modified for M680x0. | ||
15 | * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds | ||
16 | * | ||
17 | * Modified by Cort Dougan (cort@cs.nmt.edu) | ||
18 | * | ||
19 | * | ||
20 | * This file is subject to the terms and conditions of the GNU General | ||
21 | * Public License. See the file README.legal in the main directory of | ||
22 | * this archive for more details. | ||
23 | */ | ||
24 | |||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/smp.h> | ||
29 | #include <linux/smp_lock.h> | ||
30 | #include <linux/errno.h> | ||
31 | #include <linux/ptrace.h> | ||
32 | #include <linux/user.h> | ||
33 | #include <linux/security.h> | ||
34 | #include <linux/audit.h> | ||
35 | |||
36 | #include <asm/segment.h> | ||
37 | #include <asm/page.h> | ||
38 | #include <asm/pgtable.h> | ||
39 | #include <asm/pgalloc.h> | ||
40 | #include <asm/system.h> | ||
41 | #include <asm/uaccess.h> | ||
42 | |||
43 | #ifdef CONFIG_S390_SUPPORT | ||
44 | #include "compat_ptrace.h" | ||
45 | #endif | ||
46 | |||
47 | static void | ||
48 | FixPerRegisters(struct task_struct *task) | ||
49 | { | ||
50 | struct pt_regs *regs; | ||
51 | per_struct *per_info; | ||
52 | |||
53 | regs = __KSTK_PTREGS(task); | ||
54 | per_info = (per_struct *) &task->thread.per_info; | ||
55 | per_info->control_regs.bits.em_instruction_fetch = | ||
56 | per_info->single_step | per_info->instruction_fetch; | ||
57 | |||
58 | if (per_info->single_step) { | ||
59 | per_info->control_regs.bits.starting_addr = 0; | ||
60 | #ifdef CONFIG_S390_SUPPORT | ||
61 | if (test_thread_flag(TIF_31BIT)) | ||
62 | per_info->control_regs.bits.ending_addr = 0x7fffffffUL; | ||
63 | else | ||
64 | #endif | ||
65 | per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN; | ||
66 | } else { | ||
67 | per_info->control_regs.bits.starting_addr = | ||
68 | per_info->starting_addr; | ||
69 | per_info->control_regs.bits.ending_addr = | ||
70 | per_info->ending_addr; | ||
71 | } | ||
72 | /* | ||
73 | * if any of the control reg tracing bits are on | ||
74 | * we switch on per in the psw | ||
75 | */ | ||
76 | if (per_info->control_regs.words.cr[0] & PER_EM_MASK) | ||
77 | regs->psw.mask |= PSW_MASK_PER; | ||
78 | else | ||
79 | regs->psw.mask &= ~PSW_MASK_PER; | ||
80 | |||
81 | if (per_info->control_regs.bits.em_storage_alteration) | ||
82 | per_info->control_regs.bits.storage_alt_space_ctl = 1; | ||
83 | else | ||
84 | per_info->control_regs.bits.storage_alt_space_ctl = 0; | ||
85 | } | ||
86 | |||
87 | void | ||
88 | set_single_step(struct task_struct *task) | ||
89 | { | ||
90 | task->thread.per_info.single_step = 1; | ||
91 | FixPerRegisters(task); | ||
92 | } | ||
93 | |||
94 | void | ||
95 | clear_single_step(struct task_struct *task) | ||
96 | { | ||
97 | task->thread.per_info.single_step = 0; | ||
98 | FixPerRegisters(task); | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Called by kernel/ptrace.c when detaching.. | ||
103 | * | ||
104 | * Make sure single step bits etc are not set. | ||
105 | */ | ||
106 | void | ||
107 | ptrace_disable(struct task_struct *child) | ||
108 | { | ||
109 | /* make sure the single step bit is not set. */ | ||
110 | clear_single_step(child); | ||
111 | } | ||
112 | |||
113 | #ifndef CONFIG_ARCH_S390X | ||
114 | # define __ADDR_MASK 3 | ||
115 | #else | ||
116 | # define __ADDR_MASK 7 | ||
117 | #endif | ||
118 | |||
119 | /* | ||
120 | * Read the word at offset addr from the user area of a process. The | ||
121 | * trouble here is that the information is littered over different | ||
122 | * locations. The process registers are found on the kernel stack, | ||
123 | * the floating point stuff and the trace settings are stored in | ||
124 | * the task structure. In addition the different structures in | ||
125 | * struct user contain pad bytes that should be read as zeroes. | ||
126 | * Lovely... | ||
127 | */ | ||
128 | static int | ||
129 | peek_user(struct task_struct *child, addr_t addr, addr_t data) | ||
130 | { | ||
131 | struct user *dummy = NULL; | ||
132 | addr_t offset, tmp; | ||
133 | |||
134 | /* | ||
135 | * Stupid gdb peeks/pokes the access registers in 64 bit with | ||
136 | * an alignment of 4. Programmers from hell... | ||
137 | */ | ||
138 | if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK) | ||
139 | return -EIO; | ||
140 | |||
141 | if (addr < (addr_t) &dummy->regs.acrs) { | ||
142 | /* | ||
143 | * psw and gprs are stored on the stack | ||
144 | */ | ||
145 | tmp = *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr); | ||
146 | if (addr == (addr_t) &dummy->regs.psw.mask) | ||
147 | /* Remove per bit from user psw. */ | ||
148 | tmp &= ~PSW_MASK_PER; | ||
149 | |||
150 | } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { | ||
151 | /* | ||
152 | * access registers are stored in the thread structure | ||
153 | */ | ||
154 | offset = addr - (addr_t) &dummy->regs.acrs; | ||
155 | tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); | ||
156 | |||
157 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { | ||
158 | /* | ||
159 | * orig_gpr2 is stored on the kernel stack | ||
160 | */ | ||
161 | tmp = (addr_t) __KSTK_PTREGS(child)->orig_gpr2; | ||
162 | |||
163 | } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { | ||
164 | /* | ||
165 | * floating point regs. are stored in the thread structure | ||
166 | */ | ||
167 | offset = addr - (addr_t) &dummy->regs.fp_regs; | ||
168 | tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); | ||
169 | |||
170 | } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { | ||
171 | /* | ||
172 | * per_info is found in the thread structure | ||
173 | */ | ||
174 | offset = addr - (addr_t) &dummy->regs.per_info; | ||
175 | tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset); | ||
176 | |||
177 | } else | ||
178 | tmp = 0; | ||
179 | |||
180 | return put_user(tmp, (addr_t __user *) data); | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * Write a word to the user area of a process at location addr. This | ||
185 | * operation does have an additional problem compared to peek_user. | ||
186 | * Stores to the program status word and on the floating point | ||
187 | * control register needs to get checked for validity. | ||
188 | */ | ||
189 | static int | ||
190 | poke_user(struct task_struct *child, addr_t addr, addr_t data) | ||
191 | { | ||
192 | struct user *dummy = NULL; | ||
193 | addr_t offset; | ||
194 | |||
195 | /* | ||
196 | * Stupid gdb peeks/pokes the access registers in 64 bit with | ||
197 | * an alignment of 4. Programmers from hell indeed... | ||
198 | */ | ||
199 | if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK) | ||
200 | return -EIO; | ||
201 | |||
202 | if (addr < (addr_t) &dummy->regs.acrs) { | ||
203 | /* | ||
204 | * psw and gprs are stored on the stack | ||
205 | */ | ||
206 | if (addr == (addr_t) &dummy->regs.psw.mask && | ||
207 | #ifdef CONFIG_S390_SUPPORT | ||
208 | data != PSW_MASK_MERGE(PSW_USER32_BITS, data) && | ||
209 | #endif | ||
210 | data != PSW_MASK_MERGE(PSW_USER_BITS, data)) | ||
211 | /* Invalid psw mask. */ | ||
212 | return -EINVAL; | ||
213 | #ifndef CONFIG_ARCH_S390X | ||
214 | if (addr == (addr_t) &dummy->regs.psw.addr) | ||
215 | /* I'd like to reject addresses without the | ||
216 | high order bit but older gdb's rely on it */ | ||
217 | data |= PSW_ADDR_AMODE; | ||
218 | #endif | ||
219 | *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr) = data; | ||
220 | |||
221 | } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { | ||
222 | /* | ||
223 | * access registers are stored in the thread structure | ||
224 | */ | ||
225 | offset = addr - (addr_t) &dummy->regs.acrs; | ||
226 | *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; | ||
227 | |||
228 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { | ||
229 | /* | ||
230 | * orig_gpr2 is stored on the kernel stack | ||
231 | */ | ||
232 | __KSTK_PTREGS(child)->orig_gpr2 = data; | ||
233 | |||
234 | } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { | ||
235 | /* | ||
236 | * floating point regs. are stored in the thread structure | ||
237 | */ | ||
238 | if (addr == (addr_t) &dummy->regs.fp_regs.fpc && | ||
239 | (data & ~FPC_VALID_MASK) != 0) | ||
240 | return -EINVAL; | ||
241 | offset = addr - (addr_t) &dummy->regs.fp_regs; | ||
242 | *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; | ||
243 | |||
244 | } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { | ||
245 | /* | ||
246 | * per_info is found in the thread structure | ||
247 | */ | ||
248 | offset = addr - (addr_t) &dummy->regs.per_info; | ||
249 | *(addr_t *)((addr_t) &child->thread.per_info + offset) = data; | ||
250 | |||
251 | } | ||
252 | |||
253 | FixPerRegisters(child); | ||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | static int | ||
258 | do_ptrace_normal(struct task_struct *child, long request, long addr, long data) | ||
259 | { | ||
260 | unsigned long tmp; | ||
261 | ptrace_area parea; | ||
262 | int copied, ret; | ||
263 | |||
264 | switch (request) { | ||
265 | case PTRACE_PEEKTEXT: | ||
266 | case PTRACE_PEEKDATA: | ||
267 | /* Remove high order bit from address (only for 31 bit). */ | ||
268 | addr &= PSW_ADDR_INSN; | ||
269 | /* read word at location addr. */ | ||
270 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | ||
271 | if (copied != sizeof(tmp)) | ||
272 | return -EIO; | ||
273 | return put_user(tmp, (unsigned long __user *) data); | ||
274 | |||
275 | case PTRACE_PEEKUSR: | ||
276 | /* read the word at location addr in the USER area. */ | ||
277 | return peek_user(child, addr, data); | ||
278 | |||
279 | case PTRACE_POKETEXT: | ||
280 | case PTRACE_POKEDATA: | ||
281 | /* Remove high order bit from address (only for 31 bit). */ | ||
282 | addr &= PSW_ADDR_INSN; | ||
283 | /* write the word at location addr. */ | ||
284 | copied = access_process_vm(child, addr, &data, sizeof(data),1); | ||
285 | if (copied != sizeof(data)) | ||
286 | return -EIO; | ||
287 | return 0; | ||
288 | |||
289 | case PTRACE_POKEUSR: | ||
290 | /* write the word at location addr in the USER area */ | ||
291 | return poke_user(child, addr, data); | ||
292 | |||
293 | case PTRACE_PEEKUSR_AREA: | ||
294 | case PTRACE_POKEUSR_AREA: | ||
295 | if (copy_from_user(&parea, (void __user *) addr, | ||
296 | sizeof(parea))) | ||
297 | return -EFAULT; | ||
298 | addr = parea.kernel_addr; | ||
299 | data = parea.process_addr; | ||
300 | copied = 0; | ||
301 | while (copied < parea.len) { | ||
302 | if (request == PTRACE_PEEKUSR_AREA) | ||
303 | ret = peek_user(child, addr, data); | ||
304 | else { | ||
305 | addr_t tmp; | ||
306 | if (get_user (tmp, (addr_t __user *) data)) | ||
307 | return -EFAULT; | ||
308 | ret = poke_user(child, addr, tmp); | ||
309 | } | ||
310 | if (ret) | ||
311 | return ret; | ||
312 | addr += sizeof(unsigned long); | ||
313 | data += sizeof(unsigned long); | ||
314 | copied += sizeof(unsigned long); | ||
315 | } | ||
316 | return 0; | ||
317 | } | ||
318 | return ptrace_request(child, request, addr, data); | ||
319 | } | ||
320 | |||
321 | #ifdef CONFIG_S390_SUPPORT | ||
322 | /* | ||
323 | * Now the fun part starts... a 31 bit program running in the | ||
324 | * 31 bit emulation tracing another program. PTRACE_PEEKTEXT, | ||
325 | * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy | ||
326 | * to handle, the difference to the 64 bit versions of the requests | ||
327 | * is that the access is done in multiples of 4 byte instead of | ||
328 | * 8 bytes (sizeof(unsigned long) on 31/64 bit). | ||
329 | * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA, | ||
330 | * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program | ||
331 | * is a 31 bit program too, the content of struct user can be | ||
332 | * emulated. A 31 bit program peeking into the struct user of | ||
333 | * a 64 bit program is a no-no. | ||
334 | */ | ||
335 | |||
336 | /* | ||
337 | * Same as peek_user but for a 31 bit program. | ||
338 | */ | ||
339 | static int | ||
340 | peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data) | ||
341 | { | ||
342 | struct user32 *dummy32 = NULL; | ||
343 | per_struct32 *dummy_per32 = NULL; | ||
344 | addr_t offset; | ||
345 | __u32 tmp; | ||
346 | |||
347 | if (!test_thread_flag(TIF_31BIT) || | ||
348 | (addr & 3) || addr > sizeof(struct user) - 3) | ||
349 | return -EIO; | ||
350 | |||
351 | if (addr < (addr_t) &dummy32->regs.acrs) { | ||
352 | /* | ||
353 | * psw and gprs are stored on the stack | ||
354 | */ | ||
355 | if (addr == (addr_t) &dummy32->regs.psw.mask) { | ||
356 | /* Fake a 31 bit psw mask. */ | ||
357 | tmp = (__u32)(__KSTK_PTREGS(child)->psw.mask >> 32); | ||
358 | tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp); | ||
359 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { | ||
360 | /* Fake a 31 bit psw address. */ | ||
361 | tmp = (__u32) __KSTK_PTREGS(child)->psw.addr | | ||
362 | PSW32_ADDR_AMODE31; | ||
363 | } else { | ||
364 | /* gpr 0-15 */ | ||
365 | tmp = *(__u32 *)((addr_t) &__KSTK_PTREGS(child)->psw + | ||
366 | addr*2 + 4); | ||
367 | } | ||
368 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { | ||
369 | /* | ||
370 | * access registers are stored in the thread structure | ||
371 | */ | ||
372 | offset = addr - (addr_t) &dummy32->regs.acrs; | ||
373 | tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); | ||
374 | |||
375 | } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { | ||
376 | /* | ||
377 | * orig_gpr2 is stored on the kernel stack | ||
378 | */ | ||
379 | tmp = *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4); | ||
380 | |||
381 | } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { | ||
382 | /* | ||
383 | * floating point regs. are stored in the thread structure | ||
384 | */ | ||
385 | offset = addr - (addr_t) &dummy32->regs.fp_regs; | ||
386 | tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset); | ||
387 | |||
388 | } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { | ||
389 | /* | ||
390 | * per_info is found in the thread structure | ||
391 | */ | ||
392 | offset = addr - (addr_t) &dummy32->regs.per_info; | ||
393 | /* This is magic. See per_struct and per_struct32. */ | ||
394 | if ((offset >= (addr_t) &dummy_per32->control_regs && | ||
395 | offset < (addr_t) (&dummy_per32->control_regs + 1)) || | ||
396 | (offset >= (addr_t) &dummy_per32->starting_addr && | ||
397 | offset <= (addr_t) &dummy_per32->ending_addr) || | ||
398 | offset == (addr_t) &dummy_per32->lowcore.words.address) | ||
399 | offset = offset*2 + 4; | ||
400 | else | ||
401 | offset = offset*2; | ||
402 | tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset); | ||
403 | |||
404 | } else | ||
405 | tmp = 0; | ||
406 | |||
407 | return put_user(tmp, (__u32 __user *) data); | ||
408 | } | ||
409 | |||
410 | /* | ||
411 | * Same as poke_user but for a 31 bit program. | ||
412 | */ | ||
413 | static int | ||
414 | poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data) | ||
415 | { | ||
416 | struct user32 *dummy32 = NULL; | ||
417 | per_struct32 *dummy_per32 = NULL; | ||
418 | addr_t offset; | ||
419 | __u32 tmp; | ||
420 | |||
421 | if (!test_thread_flag(TIF_31BIT) || | ||
422 | (addr & 3) || addr > sizeof(struct user32) - 3) | ||
423 | return -EIO; | ||
424 | |||
425 | tmp = (__u32) data; | ||
426 | |||
427 | if (addr < (addr_t) &dummy32->regs.acrs) { | ||
428 | /* | ||
429 | * psw, gprs, acrs and orig_gpr2 are stored on the stack | ||
430 | */ | ||
431 | if (addr == (addr_t) &dummy32->regs.psw.mask) { | ||
432 | /* Build a 64 bit psw mask from 31 bit mask. */ | ||
433 | if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp)) | ||
434 | /* Invalid psw mask. */ | ||
435 | return -EINVAL; | ||
436 | __KSTK_PTREGS(child)->psw.mask = | ||
437 | PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32); | ||
438 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { | ||
439 | /* Build a 64 bit psw address from 31 bit address. */ | ||
440 | __KSTK_PTREGS(child)->psw.addr = | ||
441 | (__u64) tmp & PSW32_ADDR_INSN; | ||
442 | } else { | ||
443 | /* gpr 0-15 */ | ||
444 | *(__u32*)((addr_t) &__KSTK_PTREGS(child)->psw | ||
445 | + addr*2 + 4) = tmp; | ||
446 | } | ||
447 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { | ||
448 | /* | ||
449 | * access registers are stored in the thread structure | ||
450 | */ | ||
451 | offset = addr - (addr_t) &dummy32->regs.acrs; | ||
452 | *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; | ||
453 | |||
454 | } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { | ||
455 | /* | ||
456 | * orig_gpr2 is stored on the kernel stack | ||
457 | */ | ||
458 | *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4) = tmp; | ||
459 | |||
460 | } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { | ||
461 | /* | ||
462 | * floating point regs. are stored in the thread structure | ||
463 | */ | ||
464 | if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && | ||
465 | (tmp & ~FPC_VALID_MASK) != 0) | ||
466 | /* Invalid floating point control. */ | ||
467 | return -EINVAL; | ||
468 | offset = addr - (addr_t) &dummy32->regs.fp_regs; | ||
469 | *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; | ||
470 | |||
471 | } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { | ||
472 | /* | ||
473 | * per_info is found in the thread structure. | ||
474 | */ | ||
475 | offset = addr - (addr_t) &dummy32->regs.per_info; | ||
476 | /* | ||
477 | * This is magic. See per_struct and per_struct32. | ||
478 | * By incident the offsets in per_struct are exactly | ||
479 | * twice the offsets in per_struct32 for all fields. | ||
480 | * The 8 byte fields need special handling though, | ||
481 | * because the second half (bytes 4-7) is needed and | ||
482 | * not the first half. | ||
483 | */ | ||
484 | if ((offset >= (addr_t) &dummy_per32->control_regs && | ||
485 | offset < (addr_t) (&dummy_per32->control_regs + 1)) || | ||
486 | (offset >= (addr_t) &dummy_per32->starting_addr && | ||
487 | offset <= (addr_t) &dummy_per32->ending_addr) || | ||
488 | offset == (addr_t) &dummy_per32->lowcore.words.address) | ||
489 | offset = offset*2 + 4; | ||
490 | else | ||
491 | offset = offset*2; | ||
492 | *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp; | ||
493 | |||
494 | } | ||
495 | |||
496 | FixPerRegisters(child); | ||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static int | ||
501 | do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) | ||
502 | { | ||
503 | unsigned int tmp; /* 4 bytes !! */ | ||
504 | ptrace_area_emu31 parea; | ||
505 | int copied, ret; | ||
506 | |||
507 | switch (request) { | ||
508 | case PTRACE_PEEKTEXT: | ||
509 | case PTRACE_PEEKDATA: | ||
510 | /* read word at location addr. */ | ||
511 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | ||
512 | if (copied != sizeof(tmp)) | ||
513 | return -EIO; | ||
514 | return put_user(tmp, (unsigned int __user *) data); | ||
515 | |||
516 | case PTRACE_PEEKUSR: | ||
517 | /* read the word at location addr in the USER area. */ | ||
518 | return peek_user_emu31(child, addr, data); | ||
519 | |||
520 | case PTRACE_POKETEXT: | ||
521 | case PTRACE_POKEDATA: | ||
522 | /* write the word at location addr. */ | ||
523 | tmp = data; | ||
524 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1); | ||
525 | if (copied != sizeof(tmp)) | ||
526 | return -EIO; | ||
527 | return 0; | ||
528 | |||
529 | case PTRACE_POKEUSR: | ||
530 | /* write the word at location addr in the USER area */ | ||
531 | return poke_user_emu31(child, addr, data); | ||
532 | |||
533 | case PTRACE_PEEKUSR_AREA: | ||
534 | case PTRACE_POKEUSR_AREA: | ||
535 | if (copy_from_user(&parea, (void __user *) addr, | ||
536 | sizeof(parea))) | ||
537 | return -EFAULT; | ||
538 | addr = parea.kernel_addr; | ||
539 | data = parea.process_addr; | ||
540 | copied = 0; | ||
541 | while (copied < parea.len) { | ||
542 | if (request == PTRACE_PEEKUSR_AREA) | ||
543 | ret = peek_user_emu31(child, addr, data); | ||
544 | else { | ||
545 | __u32 tmp; | ||
546 | if (get_user (tmp, (__u32 __user *) data)) | ||
547 | return -EFAULT; | ||
548 | ret = poke_user_emu31(child, addr, tmp); | ||
549 | } | ||
550 | if (ret) | ||
551 | return ret; | ||
552 | addr += sizeof(unsigned int); | ||
553 | data += sizeof(unsigned int); | ||
554 | copied += sizeof(unsigned int); | ||
555 | } | ||
556 | return 0; | ||
557 | case PTRACE_GETEVENTMSG: | ||
558 | return put_user((__u32) child->ptrace_message, | ||
559 | (unsigned int __user *) data); | ||
560 | case PTRACE_GETSIGINFO: | ||
561 | if (child->last_siginfo == NULL) | ||
562 | return -EINVAL; | ||
563 | return copy_siginfo_to_user32((compat_siginfo_t __user *) data, | ||
564 | child->last_siginfo); | ||
565 | case PTRACE_SETSIGINFO: | ||
566 | if (child->last_siginfo == NULL) | ||
567 | return -EINVAL; | ||
568 | return copy_siginfo_from_user32(child->last_siginfo, | ||
569 | (compat_siginfo_t __user *) data); | ||
570 | } | ||
571 | return ptrace_request(child, request, addr, data); | ||
572 | } | ||
573 | #endif | ||
574 | |||
575 | #define PT32_IEEE_IP 0x13c | ||
576 | |||
577 | static int | ||
578 | do_ptrace(struct task_struct *child, long request, long addr, long data) | ||
579 | { | ||
580 | int ret; | ||
581 | |||
582 | if (request == PTRACE_ATTACH) | ||
583 | return ptrace_attach(child); | ||
584 | |||
585 | /* | ||
586 | * Special cases to get/store the ieee instructions pointer. | ||
587 | */ | ||
588 | if (child == current) { | ||
589 | if (request == PTRACE_PEEKUSR && addr == PT_IEEE_IP) | ||
590 | return peek_user(child, addr, data); | ||
591 | if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP) | ||
592 | return poke_user(child, addr, data); | ||
593 | #ifdef CONFIG_S390_SUPPORT | ||
594 | if (request == PTRACE_PEEKUSR && | ||
595 | addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT)) | ||
596 | return peek_user_emu31(child, addr, data); | ||
597 | if (request == PTRACE_POKEUSR && | ||
598 | addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT)) | ||
599 | return poke_user_emu31(child, addr, data); | ||
600 | #endif | ||
601 | } | ||
602 | |||
603 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | ||
604 | if (ret < 0) | ||
605 | return ret; | ||
606 | |||
607 | switch (request) { | ||
608 | case PTRACE_SYSCALL: | ||
609 | /* continue and stop at next (return from) syscall */ | ||
610 | case PTRACE_CONT: | ||
611 | /* restart after signal. */ | ||
612 | if ((unsigned long) data >= _NSIG) | ||
613 | return -EIO; | ||
614 | if (request == PTRACE_SYSCALL) | ||
615 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
616 | else | ||
617 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
618 | child->exit_code = data; | ||
619 | /* make sure the single step bit is not set. */ | ||
620 | clear_single_step(child); | ||
621 | wake_up_process(child); | ||
622 | return 0; | ||
623 | |||
624 | case PTRACE_KILL: | ||
625 | /* | ||
626 | * make the child exit. Best I can do is send it a sigkill. | ||
627 | * perhaps it should be put in the status that it wants to | ||
628 | * exit. | ||
629 | */ | ||
630 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ | ||
631 | return 0; | ||
632 | child->exit_code = SIGKILL; | ||
633 | /* make sure the single step bit is not set. */ | ||
634 | clear_single_step(child); | ||
635 | wake_up_process(child); | ||
636 | return 0; | ||
637 | |||
638 | case PTRACE_SINGLESTEP: | ||
639 | /* set the trap flag. */ | ||
640 | if ((unsigned long) data >= _NSIG) | ||
641 | return -EIO; | ||
642 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
643 | child->exit_code = data; | ||
644 | if (data) | ||
645 | set_tsk_thread_flag(child, TIF_SINGLE_STEP); | ||
646 | else | ||
647 | set_single_step(child); | ||
648 | /* give it a chance to run. */ | ||
649 | wake_up_process(child); | ||
650 | return 0; | ||
651 | |||
652 | case PTRACE_DETACH: | ||
653 | /* detach a process that was attached. */ | ||
654 | return ptrace_detach(child, data); | ||
655 | |||
656 | |||
657 | /* Do requests that differ for 31/64 bit */ | ||
658 | default: | ||
659 | #ifdef CONFIG_S390_SUPPORT | ||
660 | if (test_thread_flag(TIF_31BIT)) | ||
661 | return do_ptrace_emu31(child, request, addr, data); | ||
662 | #endif | ||
663 | return do_ptrace_normal(child, request, addr, data); | ||
664 | } | ||
665 | /* Not reached. */ | ||
666 | return -EIO; | ||
667 | } | ||
668 | |||
669 | asmlinkage long | ||
670 | sys_ptrace(long request, long pid, long addr, long data) | ||
671 | { | ||
672 | struct task_struct *child; | ||
673 | int ret; | ||
674 | |||
675 | lock_kernel(); | ||
676 | |||
677 | if (request == PTRACE_TRACEME) { | ||
678 | /* are we already being traced? */ | ||
679 | ret = -EPERM; | ||
680 | if (current->ptrace & PT_PTRACED) | ||
681 | goto out; | ||
682 | ret = security_ptrace(current->parent, current); | ||
683 | if (ret) | ||
684 | goto out; | ||
685 | /* set the ptrace bit in the process flags. */ | ||
686 | current->ptrace |= PT_PTRACED; | ||
687 | goto out; | ||
688 | } | ||
689 | |||
690 | ret = -EPERM; | ||
691 | if (pid == 1) /* you may not mess with init */ | ||
692 | goto out; | ||
693 | |||
694 | ret = -ESRCH; | ||
695 | read_lock(&tasklist_lock); | ||
696 | child = find_task_by_pid(pid); | ||
697 | if (child) | ||
698 | get_task_struct(child); | ||
699 | read_unlock(&tasklist_lock); | ||
700 | if (!child) | ||
701 | goto out; | ||
702 | |||
703 | ret = do_ptrace(child, request, addr, data); | ||
704 | |||
705 | put_task_struct(child); | ||
706 | out: | ||
707 | unlock_kernel(); | ||
708 | return ret; | ||
709 | } | ||
710 | |||
711 | asmlinkage void | ||
712 | syscall_trace(struct pt_regs *regs, int entryexit) | ||
713 | { | ||
714 | if (unlikely(current->audit_context)) { | ||
715 | if (!entryexit) | ||
716 | audit_syscall_entry(current, regs->gprs[2], | ||
717 | regs->orig_gpr2, regs->gprs[3], | ||
718 | regs->gprs[4], regs->gprs[5]); | ||
719 | else | ||
720 | audit_syscall_exit(current, regs->gprs[2]); | ||
721 | } | ||
722 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
723 | return; | ||
724 | if (!(current->ptrace & PT_PTRACED)) | ||
725 | return; | ||
726 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | ||
727 | ? 0x80 : 0)); | ||
728 | |||
729 | /* | ||
730 | * this isn't the same as continuing with a signal, but it will do | ||
731 | * for normal use. strace only continues with a signal if the | ||
732 | * stopping signal is not SIGTRAP. -brl | ||
733 | */ | ||
734 | if (current->exit_code) { | ||
735 | send_sig(current->exit_code, current, 1); | ||
736 | current->exit_code = 0; | ||
737 | } | ||
738 | } | ||
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S new file mode 100644 index 000000000000..658e5ac484f9 --- /dev/null +++ b/arch/s390/kernel/reipl.S | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/reipl.S | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) | ||
7 | */ | ||
8 | |||
9 | #include <asm/lowcore.h> | ||
10 | |||
11 | .globl do_reipl | ||
12 | do_reipl: basr %r13,0 | ||
13 | .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) | ||
14 | .Lpg1: lctl %c6,%c6,.Lall-.Lpg0(%r13) | ||
15 | stctl %c0,%c0,.Lctlsave-.Lpg0(%r13) | ||
16 | ni .Lctlsave-.Lpg0(%r13),0xef | ||
17 | lctl %c0,%c0,.Lctlsave-.Lpg0(%r13) | ||
18 | lr %r1,%r2 | ||
19 | mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) | ||
20 | stsch .Lschib-.Lpg0(%r13) | ||
21 | oi .Lschib+5-.Lpg0(%r13),0x84 | ||
22 | .Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 | ||
23 | msch .Lschib-.Lpg0(%r13) | ||
24 | lhi %r0,5 | ||
25 | .Lssch: ssch .Liplorb-.Lpg0(%r13) | ||
26 | jz .L001 | ||
27 | brct %r0,.Lssch | ||
28 | bas %r14,.Ldisab-.Lpg0(%r13) | ||
29 | .L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13) | ||
30 | .Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13) | ||
31 | .Lcont: c %r1,__LC_SUBCHANNEL_ID | ||
32 | jnz .Ltpi | ||
33 | clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) | ||
34 | jnz .Ltpi | ||
35 | tsch .Liplirb-.Lpg0(%r13) | ||
36 | tm .Liplirb+9-.Lpg0(%r13),0xbf | ||
37 | jz .L002 | ||
38 | bas %r14,.Ldisab-.Lpg0(%r13) | ||
39 | .L002: tm .Liplirb+8-.Lpg0(%r13),0xf3 | ||
40 | jz .L003 | ||
41 | bas %r14,.Ldisab-.Lpg0(%r13) | ||
42 | .L003: spx .Lnull-.Lpg0(%r13) | ||
43 | st %r1,__LC_SUBCHANNEL_ID | ||
44 | lpsw 0 | ||
45 | sigp 0,0,0(6) | ||
46 | .Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) | ||
47 | lpsw .Ldispsw-.Lpg0(%r13) | ||
48 | .align 8 | ||
49 | .Lall: .long 0xff000000 | ||
50 | .Lnull: .long 0x00000000 | ||
51 | .Lctlsave: .long 0x00000000 | ||
52 | .align 8 | ||
53 | .Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 | ||
54 | .Lpcnew: .long 0x00080000,0x80000000+.Lecs | ||
55 | .Lionew: .long 0x00080000,0x80000000+.Lcont | ||
56 | .Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi | ||
57 | .Ldispsw: .long 0x000a0000,0x00000000 | ||
58 | .Liplccws: .long 0x02000000,0x60000018 | ||
59 | .long 0x08000008,0x20000001 | ||
60 | .Liplorb: .long 0x0049504c,0x0040ff80 | ||
61 | .long 0x00000000+.Liplccws | ||
62 | .Lschib: .long 0x00000000,0x00000000 | ||
63 | .long 0x00000000,0x00000000 | ||
64 | .long 0x00000000,0x00000000 | ||
65 | .long 0x00000000,0x00000000 | ||
66 | .long 0x00000000,0x00000000 | ||
67 | .long 0x00000000,0x00000000 | ||
68 | .Liplirb: .long 0x00000000,0x00000000 | ||
69 | .long 0x00000000,0x00000000 | ||
70 | .long 0x00000000,0x00000000 | ||
71 | .long 0x00000000,0x00000000 | ||
72 | .long 0x00000000,0x00000000 | ||
73 | .long 0x00000000,0x00000000 | ||
74 | .long 0x00000000,0x00000000 | ||
75 | .long 0x00000000,0x00000000 | ||
76 | |||
77 | |||
78 | |||
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S new file mode 100644 index 000000000000..4d090d60f3ef --- /dev/null +++ b/arch/s390/kernel/reipl64.S | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/reipl.S | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) | ||
7 | Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) | ||
8 | */ | ||
9 | |||
10 | #include <asm/lowcore.h> | ||
11 | .globl do_reipl | ||
12 | do_reipl: basr %r13,0 | ||
13 | .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) | ||
14 | .Lpg1: lctlg %c6,%c6,.Lall-.Lpg0(%r13) | ||
15 | stctg %c0,%c0,.Lctlsave-.Lpg0(%r13) | ||
16 | ni .Lctlsave+4-.Lpg0(%r13),0xef | ||
17 | lctlg %c0,%c0,.Lctlsave-.Lpg0(%r13) | ||
18 | lgr %r1,%r2 | ||
19 | mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) | ||
20 | stsch .Lschib-.Lpg0(%r13) | ||
21 | oi .Lschib+5-.Lpg0(%r13),0x84 | ||
22 | .Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 | ||
23 | msch .Lschib-.Lpg0(%r13) | ||
24 | lghi %r0,5 | ||
25 | .Lssch: ssch .Liplorb-.Lpg0(%r13) | ||
26 | jz .L001 | ||
27 | brct %r0,.Lssch | ||
28 | bas %r14,.Ldisab-.Lpg0(%r13) | ||
29 | .L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13) | ||
30 | .Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13) | ||
31 | .Lcont: c %r1,__LC_SUBCHANNEL_ID | ||
32 | jnz .Ltpi | ||
33 | clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) | ||
34 | jnz .Ltpi | ||
35 | tsch .Liplirb-.Lpg0(%r13) | ||
36 | tm .Liplirb+9-.Lpg0(%r13),0xbf | ||
37 | jz .L002 | ||
38 | bas %r14,.Ldisab-.Lpg0(%r13) | ||
39 | .L002: tm .Liplirb+8-.Lpg0(%r13),0xf3 | ||
40 | jz .L003 | ||
41 | bas %r14,.Ldisab-.Lpg0(%r13) | ||
42 | .L003: spx .Lnull-.Lpg0(%r13) | ||
43 | st %r1,__LC_SUBCHANNEL_ID | ||
44 | lhi %r1,0 # mode 0 = esa | ||
45 | slr %r0,%r0 # set cpuid to zero | ||
46 | sigp %r1,%r0,0x12 # switch to esa mode | ||
47 | lpsw 0 | ||
48 | .Ldisab: sll %r14,1 | ||
49 | srl %r14,1 # need to kill hi bit to avoid specification exceptions. | ||
50 | st %r14,.Ldispsw+12-.Lpg0(%r13) | ||
51 | lpswe .Ldispsw-.Lpg0(%r13) | ||
52 | .align 8 | ||
53 | .Lall: .quad 0x00000000ff000000 | ||
54 | .Lctlsave: .quad 0x0000000000000000 | ||
55 | .Lnull: .long 0x0000000000000000 | ||
56 | .align 16 | ||
57 | /* | ||
58 | * These addresses have to be 31 bit otherwise | ||
59 | * the sigp will throw a specifcation exception | ||
60 | * when switching to ESA mode as bit 31 be set | ||
61 | * in the ESA psw. | ||
62 | * Bit 31 of the addresses has to be 0 for the | ||
63 | * 31bit lpswe instruction a fact they appear to have | ||
64 | * ommited from the pop. | ||
65 | */ | ||
66 | .Lnewpsw: .quad 0x0000000080000000 | ||
67 | .quad .Lpg1 | ||
68 | .Lpcnew: .quad 0x0000000080000000 | ||
69 | .quad .Lecs | ||
70 | .Lionew: .quad 0x0000000080000000 | ||
71 | .quad .Lcont | ||
72 | .Lwaitpsw: .quad 0x0202000080000000 | ||
73 | .quad .Ltpi | ||
74 | .Ldispsw: .quad 0x0002000080000000 | ||
75 | .quad 0x0000000000000000 | ||
76 | .Liplccws: .long 0x02000000,0x60000018 | ||
77 | .long 0x08000008,0x20000001 | ||
78 | .Liplorb: .long 0x0049504c,0x0040ff80 | ||
79 | .long 0x00000000+.Liplccws | ||
80 | .Lschib: .long 0x00000000,0x00000000 | ||
81 | .long 0x00000000,0x00000000 | ||
82 | .long 0x00000000,0x00000000 | ||
83 | .long 0x00000000,0x00000000 | ||
84 | .long 0x00000000,0x00000000 | ||
85 | .long 0x00000000,0x00000000 | ||
86 | .Liplirb: .long 0x00000000,0x00000000 | ||
87 | .long 0x00000000,0x00000000 | ||
88 | .long 0x00000000,0x00000000 | ||
89 | .long 0x00000000,0x00000000 | ||
90 | .long 0x00000000,0x00000000 | ||
91 | .long 0x00000000,0x00000000 | ||
92 | .long 0x00000000,0x00000000 | ||
93 | .long 0x00000000,0x00000000 | ||
94 | |||
95 | |||
96 | |||
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c new file mode 100644 index 000000000000..3bdd38ec71da --- /dev/null +++ b/arch/s390/kernel/s390_ext.c | |||
@@ -0,0 +1,135 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/s390_ext.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com), | ||
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/kernel_stat.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | |||
17 | #include <asm/lowcore.h> | ||
18 | #include <asm/s390_ext.h> | ||
19 | #include <asm/irq.h> | ||
20 | |||
21 | /* | ||
22 | * Simple hash strategy: index = code & 0xff; | ||
23 | * ext_int_hash[index] is the start of the list for all external interrupts | ||
24 | * that hash to this index. With the current set of external interrupts | ||
25 | * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000 | ||
26 | * iucv and 0x2603 pfault) this is always the first element. | ||
27 | */ | ||
28 | ext_int_info_t *ext_int_hash[256] = { 0, }; | ||
29 | |||
30 | int register_external_interrupt(__u16 code, ext_int_handler_t handler) | ||
31 | { | ||
32 | ext_int_info_t *p; | ||
33 | int index; | ||
34 | |||
35 | p = (ext_int_info_t *) kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC); | ||
36 | if (p == NULL) | ||
37 | return -ENOMEM; | ||
38 | p->code = code; | ||
39 | p->handler = handler; | ||
40 | index = code & 0xff; | ||
41 | p->next = ext_int_hash[index]; | ||
42 | ext_int_hash[index] = p; | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | int register_early_external_interrupt(__u16 code, ext_int_handler_t handler, | ||
47 | ext_int_info_t *p) | ||
48 | { | ||
49 | int index; | ||
50 | |||
51 | if (p == NULL) | ||
52 | return -EINVAL; | ||
53 | p->code = code; | ||
54 | p->handler = handler; | ||
55 | index = code & 0xff; | ||
56 | p->next = ext_int_hash[index]; | ||
57 | ext_int_hash[index] = p; | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | int unregister_external_interrupt(__u16 code, ext_int_handler_t handler) | ||
62 | { | ||
63 | ext_int_info_t *p, *q; | ||
64 | int index; | ||
65 | |||
66 | index = code & 0xff; | ||
67 | q = NULL; | ||
68 | p = ext_int_hash[index]; | ||
69 | while (p != NULL) { | ||
70 | if (p->code == code && p->handler == handler) | ||
71 | break; | ||
72 | q = p; | ||
73 | p = p->next; | ||
74 | } | ||
75 | if (p == NULL) | ||
76 | return -ENOENT; | ||
77 | if (q != NULL) | ||
78 | q->next = p->next; | ||
79 | else | ||
80 | ext_int_hash[index] = p->next; | ||
81 | kfree(p); | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler, | ||
86 | ext_int_info_t *p) | ||
87 | { | ||
88 | ext_int_info_t *q; | ||
89 | int index; | ||
90 | |||
91 | if (p == NULL || p->code != code || p->handler != handler) | ||
92 | return -EINVAL; | ||
93 | index = code & 0xff; | ||
94 | q = ext_int_hash[index]; | ||
95 | if (p != q) { | ||
96 | while (q != NULL) { | ||
97 | if (q->next == p) | ||
98 | break; | ||
99 | q = q->next; | ||
100 | } | ||
101 | if (q == NULL) | ||
102 | return -ENOENT; | ||
103 | q->next = p->next; | ||
104 | } else | ||
105 | ext_int_hash[index] = p->next; | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | void do_extint(struct pt_regs *regs, unsigned short code) | ||
110 | { | ||
111 | ext_int_info_t *p; | ||
112 | int index; | ||
113 | |||
114 | irq_enter(); | ||
115 | asm volatile ("mc 0,0"); | ||
116 | if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) | ||
117 | /** | ||
118 | * Make sure that the i/o interrupt did not "overtake" | ||
119 | * the last HZ timer interrupt. | ||
120 | */ | ||
121 | account_ticks(regs); | ||
122 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; | ||
123 | index = code & 0xff; | ||
124 | for (p = ext_int_hash[index]; p; p = p->next) { | ||
125 | if (likely(p->code == code)) { | ||
126 | if (likely(p->handler)) | ||
127 | p->handler(regs, code); | ||
128 | } | ||
129 | } | ||
130 | irq_exit(); | ||
131 | } | ||
132 | |||
133 | EXPORT_SYMBOL(register_external_interrupt); | ||
134 | EXPORT_SYMBOL(unregister_external_interrupt); | ||
135 | |||
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c new file mode 100644 index 000000000000..11fd6d556d8f --- /dev/null +++ b/arch/s390/kernel/s390_ksyms.c | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/s390_ksyms.c | ||
3 | * | ||
4 | * S390 version | ||
5 | */ | ||
6 | #include <linux/config.h> | ||
7 | #include <linux/highuid.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/smp.h> | ||
11 | #include <linux/syscalls.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/ioctl32.h> | ||
14 | #include <asm/checksum.h> | ||
15 | #include <asm/cpcmd.h> | ||
16 | #include <asm/delay.h> | ||
17 | #include <asm/pgalloc.h> | ||
18 | #include <asm/setup.h> | ||
19 | #ifdef CONFIG_IP_MULTICAST | ||
20 | #include <net/arp.h> | ||
21 | #endif | ||
22 | |||
23 | /* | ||
24 | * memory management | ||
25 | */ | ||
26 | EXPORT_SYMBOL(_oi_bitmap); | ||
27 | EXPORT_SYMBOL(_ni_bitmap); | ||
28 | EXPORT_SYMBOL(_zb_findmap); | ||
29 | EXPORT_SYMBOL(_sb_findmap); | ||
30 | EXPORT_SYMBOL(__copy_from_user_asm); | ||
31 | EXPORT_SYMBOL(__copy_to_user_asm); | ||
32 | EXPORT_SYMBOL(__copy_in_user_asm); | ||
33 | EXPORT_SYMBOL(__clear_user_asm); | ||
34 | EXPORT_SYMBOL(__strncpy_from_user_asm); | ||
35 | EXPORT_SYMBOL(__strnlen_user_asm); | ||
36 | EXPORT_SYMBOL(diag10); | ||
37 | EXPORT_SYMBOL(default_storage_key); | ||
38 | |||
39 | /* | ||
40 | * semaphore ops | ||
41 | */ | ||
42 | EXPORT_SYMBOL(__up); | ||
43 | EXPORT_SYMBOL(__down); | ||
44 | EXPORT_SYMBOL(__down_interruptible); | ||
45 | |||
46 | /* | ||
47 | * binfmt_elf loader | ||
48 | */ | ||
49 | extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs); | ||
50 | EXPORT_SYMBOL(dump_fpu); | ||
51 | EXPORT_SYMBOL(overflowuid); | ||
52 | EXPORT_SYMBOL(overflowgid); | ||
53 | EXPORT_SYMBOL(empty_zero_page); | ||
54 | |||
55 | /* | ||
56 | * misc. | ||
57 | */ | ||
58 | EXPORT_SYMBOL(machine_flags); | ||
59 | EXPORT_SYMBOL(__udelay); | ||
60 | EXPORT_SYMBOL(kernel_thread); | ||
61 | EXPORT_SYMBOL(csum_fold); | ||
62 | EXPORT_SYMBOL(console_mode); | ||
63 | EXPORT_SYMBOL(console_devno); | ||
64 | EXPORT_SYMBOL(console_irq); | ||
65 | EXPORT_SYMBOL(sys_wait4); | ||
diff --git a/arch/s390/kernel/semaphore.c b/arch/s390/kernel/semaphore.c new file mode 100644 index 000000000000..8dfb690c159f --- /dev/null +++ b/arch/s390/kernel/semaphore.c | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * linux/arch/s390/kernel/semaphore.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1998-2000 IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky | ||
7 | * | ||
8 | * Derived from "linux/arch/i386/kernel/semaphore.c | ||
9 | * Copyright (C) 1999, Linus Torvalds | ||
10 | * | ||
11 | */ | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/init.h> | ||
15 | |||
16 | #include <asm/semaphore.h> | ||
17 | |||
18 | /* | ||
19 | * Atomically update sem->count. Equivalent to: | ||
20 | * old_val = sem->count.counter; | ||
21 | * new_val = ((old_val >= 0) ? old_val : 0) + incr; | ||
22 | * sem->count.counter = new_val; | ||
23 | * return old_val; | ||
24 | */ | ||
25 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
26 | { | ||
27 | int old_val, new_val; | ||
28 | |||
29 | __asm__ __volatile__(" l %0,0(%3)\n" | ||
30 | "0: ltr %1,%0\n" | ||
31 | " jhe 1f\n" | ||
32 | " lhi %1,0\n" | ||
33 | "1: ar %1,%4\n" | ||
34 | " cs %0,%1,0(%3)\n" | ||
35 | " jl 0b\n" | ||
36 | : "=&d" (old_val), "=&d" (new_val), | ||
37 | "=m" (sem->count) | ||
38 | : "a" (&sem->count), "d" (incr), "m" (sem->count) | ||
39 | : "cc" ); | ||
40 | return old_val; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * The inline function up() incremented count but the result | ||
45 | * was <= 0. This indicates that some process is waiting on | ||
46 | * the semaphore. The semaphore is free and we'll wake the | ||
47 | * first sleeping process, so we set count to 1 unless some | ||
48 | * other cpu has called up in the meantime in which case | ||
49 | * we just increment count by 1. | ||
50 | */ | ||
51 | void __up(struct semaphore *sem) | ||
52 | { | ||
53 | __sem_update_count(sem, 1); | ||
54 | wake_up(&sem->wait); | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * The inline function down() decremented count and the result | ||
59 | * was < 0. The wait loop will atomically test and update the | ||
60 | * semaphore counter following the rules: | ||
61 | * count > 0: decrement count, wake up queue and exit. | ||
62 | * count <= 0: set count to -1, go to sleep. | ||
63 | */ | ||
64 | void __sched __down(struct semaphore * sem) | ||
65 | { | ||
66 | struct task_struct *tsk = current; | ||
67 | DECLARE_WAITQUEUE(wait, tsk); | ||
68 | |||
69 | __set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
70 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
71 | while (__sem_update_count(sem, -1) <= 0) { | ||
72 | schedule(); | ||
73 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
74 | } | ||
75 | remove_wait_queue(&sem->wait, &wait); | ||
76 | __set_task_state(tsk, TASK_RUNNING); | ||
77 | wake_up(&sem->wait); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Same as __down() with an additional test for signals. | ||
82 | * If a signal is pending the count is updated as follows: | ||
83 | * count > 0: wake up queue and exit. | ||
84 | * count <= 0: set count to 0, wake up queue and exit. | ||
85 | */ | ||
86 | int __sched __down_interruptible(struct semaphore * sem) | ||
87 | { | ||
88 | int retval = 0; | ||
89 | struct task_struct *tsk = current; | ||
90 | DECLARE_WAITQUEUE(wait, tsk); | ||
91 | |||
92 | __set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
93 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
94 | while (__sem_update_count(sem, -1) <= 0) { | ||
95 | if (signal_pending(current)) { | ||
96 | __sem_update_count(sem, 0); | ||
97 | retval = -EINTR; | ||
98 | break; | ||
99 | } | ||
100 | schedule(); | ||
101 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
102 | } | ||
103 | remove_wait_queue(&sem->wait, &wait); | ||
104 | __set_task_state(tsk, TASK_RUNNING); | ||
105 | wake_up(&sem->wait); | ||
106 | return retval; | ||
107 | } | ||
108 | |||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c new file mode 100644 index 000000000000..c879c40aa7a5 --- /dev/null +++ b/arch/s390/kernel/setup.c | |||
@@ -0,0 +1,632 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/setup.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Hartmut Penner (hp@de.ibm.com), | ||
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
8 | * | ||
9 | * Derived from "arch/i386/kernel/setup.c" | ||
10 | * Copyright (C) 1995, Linus Torvalds | ||
11 | */ | ||
12 | |||
13 | /* | ||
14 | * This file handles the architecture-dependent parts of initialization | ||
15 | */ | ||
16 | |||
17 | #include <linux/errno.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/stddef.h> | ||
23 | #include <linux/unistd.h> | ||
24 | #include <linux/ptrace.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/user.h> | ||
27 | #include <linux/a.out.h> | ||
28 | #include <linux/tty.h> | ||
29 | #include <linux/ioport.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/config.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/initrd.h> | ||
34 | #include <linux/bootmem.h> | ||
35 | #include <linux/root_dev.h> | ||
36 | #include <linux/console.h> | ||
37 | #include <linux/seq_file.h> | ||
38 | #include <linux/kernel_stat.h> | ||
39 | |||
40 | #include <asm/uaccess.h> | ||
41 | #include <asm/system.h> | ||
42 | #include <asm/smp.h> | ||
43 | #include <asm/mmu_context.h> | ||
44 | #include <asm/cpcmd.h> | ||
45 | #include <asm/lowcore.h> | ||
46 | #include <asm/irq.h> | ||
47 | |||
48 | /* | ||
49 | * Machine setup.. | ||
50 | */ | ||
51 | unsigned int console_mode = 0; | ||
52 | unsigned int console_devno = -1; | ||
53 | unsigned int console_irq = -1; | ||
54 | unsigned long memory_size = 0; | ||
55 | unsigned long machine_flags = 0; | ||
56 | unsigned int default_storage_key = 0; | ||
57 | struct { | ||
58 | unsigned long addr, size, type; | ||
59 | } memory_chunk[MEMORY_CHUNKS] = { { 0 } }; | ||
60 | #define CHUNK_READ_WRITE 0 | ||
61 | #define CHUNK_READ_ONLY 1 | ||
62 | volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ | ||
63 | |||
64 | /* | ||
65 | * Setup options | ||
66 | */ | ||
67 | extern int _text,_etext, _edata, _end; | ||
68 | |||
69 | /* | ||
70 | * This is set up by the setup-routine at boot-time | ||
71 | * for S390 need to find out, what we have to setup | ||
72 | * using address 0x10400 ... | ||
73 | */ | ||
74 | |||
75 | #include <asm/setup.h> | ||
76 | |||
77 | static char command_line[COMMAND_LINE_SIZE] = { 0, }; | ||
78 | |||
79 | static struct resource code_resource = { | ||
80 | .name = "Kernel code", | ||
81 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | ||
82 | }; | ||
83 | |||
84 | static struct resource data_resource = { | ||
85 | .name = "Kernel data", | ||
86 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | ||
87 | }; | ||
88 | |||
89 | /* | ||
90 | * cpu_init() initializes state that is per-CPU. | ||
91 | */ | ||
92 | void __devinit cpu_init (void) | ||
93 | { | ||
94 | int addr = hard_smp_processor_id(); | ||
95 | |||
96 | /* | ||
97 | * Store processor id in lowcore (used e.g. in timer_interrupt) | ||
98 | */ | ||
99 | asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id)); | ||
100 | S390_lowcore.cpu_data.cpu_addr = addr; | ||
101 | |||
102 | /* | ||
103 | * Force FPU initialization: | ||
104 | */ | ||
105 | clear_thread_flag(TIF_USEDFPU); | ||
106 | clear_used_math(); | ||
107 | |||
108 | atomic_inc(&init_mm.mm_count); | ||
109 | current->active_mm = &init_mm; | ||
110 | if (current->mm) | ||
111 | BUG(); | ||
112 | enter_lazy_tlb(&init_mm, current); | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * VM halt and poweroff setup routines | ||
117 | */ | ||
118 | char vmhalt_cmd[128] = ""; | ||
119 | char vmpoff_cmd[128] = ""; | ||
120 | |||
121 | static inline void strncpy_skip_quote(char *dst, char *src, int n) | ||
122 | { | ||
123 | int sx, dx; | ||
124 | |||
125 | dx = 0; | ||
126 | for (sx = 0; src[sx] != 0; sx++) { | ||
127 | if (src[sx] == '"') continue; | ||
128 | dst[dx++] = src[sx]; | ||
129 | if (dx >= n) break; | ||
130 | } | ||
131 | } | ||
132 | |||
133 | static int __init vmhalt_setup(char *str) | ||
134 | { | ||
135 | strncpy_skip_quote(vmhalt_cmd, str, 127); | ||
136 | vmhalt_cmd[127] = 0; | ||
137 | return 1; | ||
138 | } | ||
139 | |||
140 | __setup("vmhalt=", vmhalt_setup); | ||
141 | |||
142 | static int __init vmpoff_setup(char *str) | ||
143 | { | ||
144 | strncpy_skip_quote(vmpoff_cmd, str, 127); | ||
145 | vmpoff_cmd[127] = 0; | ||
146 | return 1; | ||
147 | } | ||
148 | |||
149 | __setup("vmpoff=", vmpoff_setup); | ||
150 | |||
151 | /* | ||
152 | * condev= and conmode= setup parameter. | ||
153 | */ | ||
154 | |||
155 | static int __init condev_setup(char *str) | ||
156 | { | ||
157 | int vdev; | ||
158 | |||
159 | vdev = simple_strtoul(str, &str, 0); | ||
160 | if (vdev >= 0 && vdev < 65536) { | ||
161 | console_devno = vdev; | ||
162 | console_irq = -1; | ||
163 | } | ||
164 | return 1; | ||
165 | } | ||
166 | |||
167 | __setup("condev=", condev_setup); | ||
168 | |||
169 | static int __init conmode_setup(char *str) | ||
170 | { | ||
171 | #if defined(CONFIG_SCLP_CONSOLE) | ||
172 | if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) | ||
173 | SET_CONSOLE_SCLP; | ||
174 | #endif | ||
175 | #if defined(CONFIG_TN3215_CONSOLE) | ||
176 | if (strncmp(str, "3215", 5) == 0) | ||
177 | SET_CONSOLE_3215; | ||
178 | #endif | ||
179 | #if defined(CONFIG_TN3270_CONSOLE) | ||
180 | if (strncmp(str, "3270", 5) == 0) | ||
181 | SET_CONSOLE_3270; | ||
182 | #endif | ||
183 | return 1; | ||
184 | } | ||
185 | |||
186 | __setup("conmode=", conmode_setup); | ||
187 | |||
188 | static void __init conmode_default(void) | ||
189 | { | ||
190 | char query_buffer[1024]; | ||
191 | char *ptr; | ||
192 | |||
193 | if (MACHINE_IS_VM) { | ||
194 | __cpcmd("QUERY CONSOLE", query_buffer, 1024); | ||
195 | console_devno = simple_strtoul(query_buffer + 5, NULL, 16); | ||
196 | ptr = strstr(query_buffer, "SUBCHANNEL ="); | ||
197 | console_irq = simple_strtoul(ptr + 13, NULL, 16); | ||
198 | __cpcmd("QUERY TERM", query_buffer, 1024); | ||
199 | ptr = strstr(query_buffer, "CONMODE"); | ||
200 | /* | ||
201 | * Set the conmode to 3215 so that the device recognition | ||
202 | * will set the cu_type of the console to 3215. If the | ||
203 | * conmode is 3270 and we don't set it back then both | ||
204 | * 3215 and the 3270 driver will try to access the console | ||
205 | * device (3215 as console and 3270 as normal tty). | ||
206 | */ | ||
207 | __cpcmd("TERM CONMODE 3215", NULL, 0); | ||
208 | if (ptr == NULL) { | ||
209 | #if defined(CONFIG_SCLP_CONSOLE) | ||
210 | SET_CONSOLE_SCLP; | ||
211 | #endif | ||
212 | return; | ||
213 | } | ||
214 | if (strncmp(ptr + 8, "3270", 4) == 0) { | ||
215 | #if defined(CONFIG_TN3270_CONSOLE) | ||
216 | SET_CONSOLE_3270; | ||
217 | #elif defined(CONFIG_TN3215_CONSOLE) | ||
218 | SET_CONSOLE_3215; | ||
219 | #elif defined(CONFIG_SCLP_CONSOLE) | ||
220 | SET_CONSOLE_SCLP; | ||
221 | #endif | ||
222 | } else if (strncmp(ptr + 8, "3215", 4) == 0) { | ||
223 | #if defined(CONFIG_TN3215_CONSOLE) | ||
224 | SET_CONSOLE_3215; | ||
225 | #elif defined(CONFIG_TN3270_CONSOLE) | ||
226 | SET_CONSOLE_3270; | ||
227 | #elif defined(CONFIG_SCLP_CONSOLE) | ||
228 | SET_CONSOLE_SCLP; | ||
229 | #endif | ||
230 | } | ||
231 | } else if (MACHINE_IS_P390) { | ||
232 | #if defined(CONFIG_TN3215_CONSOLE) | ||
233 | SET_CONSOLE_3215; | ||
234 | #elif defined(CONFIG_TN3270_CONSOLE) | ||
235 | SET_CONSOLE_3270; | ||
236 | #endif | ||
237 | } else { | ||
238 | #if defined(CONFIG_SCLP_CONSOLE) | ||
239 | SET_CONSOLE_SCLP; | ||
240 | #endif | ||
241 | } | ||
242 | } | ||
243 | |||
244 | #ifdef CONFIG_SMP | ||
245 | extern void machine_restart_smp(char *); | ||
246 | extern void machine_halt_smp(void); | ||
247 | extern void machine_power_off_smp(void); | ||
248 | |||
249 | void (*_machine_restart)(char *command) = machine_restart_smp; | ||
250 | void (*_machine_halt)(void) = machine_halt_smp; | ||
251 | void (*_machine_power_off)(void) = machine_power_off_smp; | ||
252 | #else | ||
253 | /* | ||
254 | * Reboot, halt and power_off routines for non SMP. | ||
255 | */ | ||
256 | extern void reipl(unsigned long devno); | ||
257 | static void do_machine_restart_nonsmp(char * __unused) | ||
258 | { | ||
259 | if (MACHINE_IS_VM) | ||
260 | cpcmd ("IPL", NULL, 0); | ||
261 | else | ||
262 | reipl (0x10000 | S390_lowcore.ipl_device); | ||
263 | } | ||
264 | |||
265 | static void do_machine_halt_nonsmp(void) | ||
266 | { | ||
267 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) | ||
268 | cpcmd(vmhalt_cmd, NULL, 0); | ||
269 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | ||
270 | } | ||
271 | |||
272 | static void do_machine_power_off_nonsmp(void) | ||
273 | { | ||
274 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) | ||
275 | cpcmd(vmpoff_cmd, NULL, 0); | ||
276 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | ||
277 | } | ||
278 | |||
279 | void (*_machine_restart)(char *command) = do_machine_restart_nonsmp; | ||
280 | void (*_machine_halt)(void) = do_machine_halt_nonsmp; | ||
281 | void (*_machine_power_off)(void) = do_machine_power_off_nonsmp; | ||
282 | #endif | ||
283 | |||
284 | /* | ||
285 | * Reboot, halt and power_off stubs. They just call _machine_restart, | ||
286 | * _machine_halt or _machine_power_off. | ||
287 | */ | ||
288 | |||
289 | void machine_restart(char *command) | ||
290 | { | ||
291 | console_unblank(); | ||
292 | _machine_restart(command); | ||
293 | } | ||
294 | |||
295 | EXPORT_SYMBOL(machine_restart); | ||
296 | |||
297 | void machine_halt(void) | ||
298 | { | ||
299 | console_unblank(); | ||
300 | _machine_halt(); | ||
301 | } | ||
302 | |||
303 | EXPORT_SYMBOL(machine_halt); | ||
304 | |||
305 | void machine_power_off(void) | ||
306 | { | ||
307 | console_unblank(); | ||
308 | _machine_power_off(); | ||
309 | } | ||
310 | |||
311 | EXPORT_SYMBOL(machine_power_off); | ||
312 | |||
313 | /* | ||
314 | * Setup function called from init/main.c just after the banner | ||
315 | * was printed. | ||
316 | */ | ||
317 | extern char _pstart, _pend, _stext; | ||
318 | |||
319 | void __init setup_arch(char **cmdline_p) | ||
320 | { | ||
321 | unsigned long bootmap_size; | ||
322 | unsigned long memory_start, memory_end; | ||
323 | char c = ' ', cn, *to = command_line, *from = COMMAND_LINE; | ||
324 | unsigned long start_pfn, end_pfn; | ||
325 | static unsigned int smptrap=0; | ||
326 | unsigned long delay = 0; | ||
327 | struct _lowcore *lc; | ||
328 | int i; | ||
329 | |||
330 | if (smptrap) | ||
331 | return; | ||
332 | smptrap=1; | ||
333 | |||
334 | /* | ||
335 | * print what head.S has found out about the machine | ||
336 | */ | ||
337 | #ifndef CONFIG_ARCH_S390X | ||
338 | printk((MACHINE_IS_VM) ? | ||
339 | "We are running under VM (31 bit mode)\n" : | ||
340 | "We are running native (31 bit mode)\n"); | ||
341 | printk((MACHINE_HAS_IEEE) ? | ||
342 | "This machine has an IEEE fpu\n" : | ||
343 | "This machine has no IEEE fpu\n"); | ||
344 | #else /* CONFIG_ARCH_S390X */ | ||
345 | printk((MACHINE_IS_VM) ? | ||
346 | "We are running under VM (64 bit mode)\n" : | ||
347 | "We are running native (64 bit mode)\n"); | ||
348 | #endif /* CONFIG_ARCH_S390X */ | ||
349 | |||
350 | ROOT_DEV = Root_RAM0; | ||
351 | memory_start = (unsigned long) &_end; /* fixit if use $CODELO etc*/ | ||
352 | #ifndef CONFIG_ARCH_S390X | ||
353 | memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */ | ||
354 | /* | ||
355 | * We need some free virtual space to be able to do vmalloc. | ||
356 | * On a machine with 2GB memory we make sure that we have at | ||
357 | * least 128 MB free space for vmalloc. | ||
358 | */ | ||
359 | if (memory_end > 1920*1024*1024) | ||
360 | memory_end = 1920*1024*1024; | ||
361 | #else /* CONFIG_ARCH_S390X */ | ||
362 | memory_end = memory_size & ~0x200000UL; /* detected in head.s */ | ||
363 | #endif /* CONFIG_ARCH_S390X */ | ||
364 | init_mm.start_code = PAGE_OFFSET; | ||
365 | init_mm.end_code = (unsigned long) &_etext; | ||
366 | init_mm.end_data = (unsigned long) &_edata; | ||
367 | init_mm.brk = (unsigned long) &_end; | ||
368 | |||
369 | code_resource.start = (unsigned long) &_text; | ||
370 | code_resource.end = (unsigned long) &_etext - 1; | ||
371 | data_resource.start = (unsigned long) &_etext; | ||
372 | data_resource.end = (unsigned long) &_edata - 1; | ||
373 | |||
374 | /* Save unparsed command line copy for /proc/cmdline */ | ||
375 | memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); | ||
376 | saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; | ||
377 | |||
378 | for (;;) { | ||
379 | /* | ||
380 | * "mem=XXX[kKmM]" sets memsize | ||
381 | */ | ||
382 | if (c == ' ' && strncmp(from, "mem=", 4) == 0) { | ||
383 | memory_end = simple_strtoul(from+4, &from, 0); | ||
384 | if ( *from == 'K' || *from == 'k' ) { | ||
385 | memory_end = memory_end << 10; | ||
386 | from++; | ||
387 | } else if ( *from == 'M' || *from == 'm' ) { | ||
388 | memory_end = memory_end << 20; | ||
389 | from++; | ||
390 | } | ||
391 | } | ||
392 | /* | ||
393 | * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes | ||
394 | */ | ||
395 | if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) { | ||
396 | delay = simple_strtoul(from+9, &from, 0); | ||
397 | if (*from == 's' || *from == 'S') { | ||
398 | delay = delay*1000000; | ||
399 | from++; | ||
400 | } else if (*from == 'm' || *from == 'M') { | ||
401 | delay = delay*60*1000000; | ||
402 | from++; | ||
403 | } | ||
404 | /* now wait for the requested amount of time */ | ||
405 | udelay(delay); | ||
406 | } | ||
407 | cn = *(from++); | ||
408 | if (!cn) | ||
409 | break; | ||
410 | if (cn == '\n') | ||
411 | cn = ' '; /* replace newlines with space */ | ||
412 | if (cn == 0x0d) | ||
413 | cn = ' '; /* replace 0x0d with space */ | ||
414 | if (cn == ' ' && c == ' ') | ||
415 | continue; /* remove additional spaces */ | ||
416 | c = cn; | ||
417 | if (to - command_line >= COMMAND_LINE_SIZE) | ||
418 | break; | ||
419 | *(to++) = c; | ||
420 | } | ||
421 | if (c == ' ' && to > command_line) to--; | ||
422 | *to = '\0'; | ||
423 | *cmdline_p = command_line; | ||
424 | |||
425 | /* | ||
426 | * partially used pages are not usable - thus | ||
427 | * we are rounding upwards: | ||
428 | */ | ||
429 | start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
430 | end_pfn = max_pfn = memory_end >> PAGE_SHIFT; | ||
431 | |||
432 | /* | ||
433 | * Initialize the boot-time allocator (with low memory only): | ||
434 | */ | ||
435 | bootmap_size = init_bootmem(start_pfn, end_pfn); | ||
436 | |||
437 | /* | ||
438 | * Register RAM areas with the bootmem allocator. | ||
439 | */ | ||
440 | for (i = 0; i < 16 && memory_chunk[i].size > 0; i++) { | ||
441 | unsigned long start_chunk, end_chunk; | ||
442 | |||
443 | if (memory_chunk[i].type != CHUNK_READ_WRITE) | ||
444 | continue; | ||
445 | start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); | ||
446 | start_chunk >>= PAGE_SHIFT; | ||
447 | end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); | ||
448 | end_chunk >>= PAGE_SHIFT; | ||
449 | if (start_chunk < start_pfn) | ||
450 | start_chunk = start_pfn; | ||
451 | if (end_chunk > end_pfn) | ||
452 | end_chunk = end_pfn; | ||
453 | if (start_chunk < end_chunk) | ||
454 | free_bootmem(start_chunk << PAGE_SHIFT, | ||
455 | (end_chunk - start_chunk) << PAGE_SHIFT); | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * Reserve the bootmem bitmap itself as well. We do this in two | ||
460 | * steps (first step was init_bootmem()) because this catches | ||
461 | * the (very unlikely) case of us accidentally initializing the | ||
462 | * bootmem allocator with an invalid RAM area. | ||
463 | */ | ||
464 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size); | ||
465 | |||
466 | #ifdef CONFIG_BLK_DEV_INITRD | ||
467 | if (INITRD_START) { | ||
468 | if (INITRD_START + INITRD_SIZE <= memory_end) { | ||
469 | reserve_bootmem(INITRD_START, INITRD_SIZE); | ||
470 | initrd_start = INITRD_START; | ||
471 | initrd_end = initrd_start + INITRD_SIZE; | ||
472 | } else { | ||
473 | printk("initrd extends beyond end of memory " | ||
474 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | ||
475 | initrd_start + INITRD_SIZE, memory_end); | ||
476 | initrd_start = initrd_end = 0; | ||
477 | } | ||
478 | } | ||
479 | #endif | ||
480 | |||
481 | for (i = 0; i < 16 && memory_chunk[i].size > 0; i++) { | ||
482 | struct resource *res; | ||
483 | |||
484 | res = alloc_bootmem_low(sizeof(struct resource)); | ||
485 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | ||
486 | |||
487 | switch (memory_chunk[i].type) { | ||
488 | case CHUNK_READ_WRITE: | ||
489 | res->name = "System RAM"; | ||
490 | break; | ||
491 | case CHUNK_READ_ONLY: | ||
492 | res->name = "System ROM"; | ||
493 | res->flags |= IORESOURCE_READONLY; | ||
494 | break; | ||
495 | default: | ||
496 | res->name = "reserved"; | ||
497 | } | ||
498 | res->start = memory_chunk[i].addr; | ||
499 | res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; | ||
500 | request_resource(&iomem_resource, res); | ||
501 | request_resource(res, &code_resource); | ||
502 | request_resource(res, &data_resource); | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | * Setup lowcore for boot cpu | ||
507 | */ | ||
508 | #ifndef CONFIG_ARCH_S390X | ||
509 | lc = (struct _lowcore *) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); | ||
510 | memset(lc, 0, PAGE_SIZE); | ||
511 | #else /* CONFIG_ARCH_S390X */ | ||
512 | lc = (struct _lowcore *) __alloc_bootmem(2*PAGE_SIZE, 2*PAGE_SIZE, 0); | ||
513 | memset(lc, 0, 2*PAGE_SIZE); | ||
514 | #endif /* CONFIG_ARCH_S390X */ | ||
515 | lc->restart_psw.mask = PSW_BASE_BITS; | ||
516 | lc->restart_psw.addr = | ||
517 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | ||
518 | lc->external_new_psw.mask = PSW_KERNEL_BITS; | ||
519 | lc->external_new_psw.addr = | ||
520 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; | ||
521 | lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; | ||
522 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; | ||
523 | lc->program_new_psw.mask = PSW_KERNEL_BITS; | ||
524 | lc->program_new_psw.addr = | ||
525 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; | ||
526 | lc->mcck_new_psw.mask = PSW_KERNEL_BITS; | ||
527 | lc->mcck_new_psw.addr = | ||
528 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; | ||
529 | lc->io_new_psw.mask = PSW_KERNEL_BITS; | ||
530 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | ||
531 | lc->ipl_device = S390_lowcore.ipl_device; | ||
532 | lc->jiffy_timer = -1LL; | ||
533 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | ||
534 | lc->async_stack = (unsigned long) | ||
535 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; | ||
536 | #ifdef CONFIG_CHECK_STACK | ||
537 | lc->panic_stack = (unsigned long) | ||
538 | __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; | ||
539 | #endif | ||
540 | lc->current_task = (unsigned long) init_thread_union.thread_info.task; | ||
541 | lc->thread_info = (unsigned long) &init_thread_union; | ||
542 | #ifdef CONFIG_ARCH_S390X | ||
543 | if (MACHINE_HAS_DIAG44) | ||
544 | lc->diag44_opcode = 0x83000044; | ||
545 | else | ||
546 | lc->diag44_opcode = 0x07000700; | ||
547 | #endif /* CONFIG_ARCH_S390X */ | ||
548 | set_prefix((u32)(unsigned long) lc); | ||
549 | cpu_init(); | ||
550 | __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; | ||
551 | |||
552 | /* | ||
553 | * Create kernel page tables and switch to virtual addressing. | ||
554 | */ | ||
555 | paging_init(); | ||
556 | |||
557 | /* Setup default console */ | ||
558 | conmode_default(); | ||
559 | } | ||
560 | |||
561 | void print_cpu_info(struct cpuinfo_S390 *cpuinfo) | ||
562 | { | ||
563 | printk("cpu %d " | ||
564 | #ifdef CONFIG_SMP | ||
565 | "phys_idx=%d " | ||
566 | #endif | ||
567 | "vers=%02X ident=%06X machine=%04X unused=%04X\n", | ||
568 | cpuinfo->cpu_nr, | ||
569 | #ifdef CONFIG_SMP | ||
570 | cpuinfo->cpu_addr, | ||
571 | #endif | ||
572 | cpuinfo->cpu_id.version, | ||
573 | cpuinfo->cpu_id.ident, | ||
574 | cpuinfo->cpu_id.machine, | ||
575 | cpuinfo->cpu_id.unused); | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * show_cpuinfo - Get information on one CPU for use by procfs. | ||
580 | */ | ||
581 | |||
582 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
583 | { | ||
584 | struct cpuinfo_S390 *cpuinfo; | ||
585 | unsigned long n = (unsigned long) v - 1; | ||
586 | |||
587 | if (!n) { | ||
588 | seq_printf(m, "vendor_id : IBM/S390\n" | ||
589 | "# processors : %i\n" | ||
590 | "bogomips per cpu: %lu.%02lu\n", | ||
591 | num_online_cpus(), loops_per_jiffy/(500000/HZ), | ||
592 | (loops_per_jiffy/(5000/HZ))%100); | ||
593 | } | ||
594 | if (cpu_online(n)) { | ||
595 | #ifdef CONFIG_SMP | ||
596 | if (smp_processor_id() == n) | ||
597 | cpuinfo = &S390_lowcore.cpu_data; | ||
598 | else | ||
599 | cpuinfo = &lowcore_ptr[n]->cpu_data; | ||
600 | #else | ||
601 | cpuinfo = &S390_lowcore.cpu_data; | ||
602 | #endif | ||
603 | seq_printf(m, "processor %li: " | ||
604 | "version = %02X, " | ||
605 | "identification = %06X, " | ||
606 | "machine = %04X\n", | ||
607 | n, cpuinfo->cpu_id.version, | ||
608 | cpuinfo->cpu_id.ident, | ||
609 | cpuinfo->cpu_id.machine); | ||
610 | } | ||
611 | return 0; | ||
612 | } | ||
613 | |||
614 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
615 | { | ||
616 | return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; | ||
617 | } | ||
618 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
619 | { | ||
620 | ++*pos; | ||
621 | return c_start(m, pos); | ||
622 | } | ||
623 | static void c_stop(struct seq_file *m, void *v) | ||
624 | { | ||
625 | } | ||
626 | struct seq_operations cpuinfo_op = { | ||
627 | .start = c_start, | ||
628 | .next = c_next, | ||
629 | .stop = c_stop, | ||
630 | .show = show_cpuinfo, | ||
631 | }; | ||
632 | |||
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c new file mode 100644 index 000000000000..610c1d03e975 --- /dev/null +++ b/arch/s390/kernel/signal.c | |||
@@ -0,0 +1,527 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/signal.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) | ||
7 | * | ||
8 | * Based on Intel version | ||
9 | * | ||
10 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
11 | * | ||
12 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | ||
13 | */ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/signal.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/wait.h> | ||
24 | #include <linux/ptrace.h> | ||
25 | #include <linux/unistd.h> | ||
26 | #include <linux/stddef.h> | ||
27 | #include <linux/tty.h> | ||
28 | #include <linux/personality.h> | ||
29 | #include <linux/binfmts.h> | ||
30 | #include <asm/ucontext.h> | ||
31 | #include <asm/uaccess.h> | ||
32 | #include <asm/lowcore.h> | ||
33 | |||
34 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
35 | |||
36 | |||
37 | typedef struct | ||
38 | { | ||
39 | __u8 callee_used_stack[__SIGNAL_FRAMESIZE]; | ||
40 | struct sigcontext sc; | ||
41 | _sigregs sregs; | ||
42 | int signo; | ||
43 | __u8 retcode[S390_SYSCALL_SIZE]; | ||
44 | } sigframe; | ||
45 | |||
46 | typedef struct | ||
47 | { | ||
48 | __u8 callee_used_stack[__SIGNAL_FRAMESIZE]; | ||
49 | __u8 retcode[S390_SYSCALL_SIZE]; | ||
50 | struct siginfo info; | ||
51 | struct ucontext uc; | ||
52 | } rt_sigframe; | ||
53 | |||
54 | int do_signal(struct pt_regs *regs, sigset_t *oldset); | ||
55 | |||
56 | /* | ||
57 | * Atomically swap in the new signal mask, and wait for a signal. | ||
58 | */ | ||
59 | asmlinkage int | ||
60 | sys_sigsuspend(struct pt_regs * regs, int history0, int history1, | ||
61 | old_sigset_t mask) | ||
62 | { | ||
63 | sigset_t saveset; | ||
64 | |||
65 | mask &= _BLOCKABLE; | ||
66 | spin_lock_irq(¤t->sighand->siglock); | ||
67 | saveset = current->blocked; | ||
68 | siginitset(¤t->blocked, mask); | ||
69 | recalc_sigpending(); | ||
70 | spin_unlock_irq(¤t->sighand->siglock); | ||
71 | regs->gprs[2] = -EINTR; | ||
72 | |||
73 | while (1) { | ||
74 | set_current_state(TASK_INTERRUPTIBLE); | ||
75 | schedule(); | ||
76 | if (do_signal(regs, &saveset)) | ||
77 | return -EINTR; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | asmlinkage long | ||
82 | sys_rt_sigsuspend(struct pt_regs *regs, sigset_t __user *unewset, | ||
83 | size_t sigsetsize) | ||
84 | { | ||
85 | sigset_t saveset, newset; | ||
86 | |||
87 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
88 | if (sigsetsize != sizeof(sigset_t)) | ||
89 | return -EINVAL; | ||
90 | |||
91 | if (copy_from_user(&newset, unewset, sizeof(newset))) | ||
92 | return -EFAULT; | ||
93 | sigdelsetmask(&newset, ~_BLOCKABLE); | ||
94 | |||
95 | spin_lock_irq(¤t->sighand->siglock); | ||
96 | saveset = current->blocked; | ||
97 | current->blocked = newset; | ||
98 | recalc_sigpending(); | ||
99 | spin_unlock_irq(¤t->sighand->siglock); | ||
100 | regs->gprs[2] = -EINTR; | ||
101 | |||
102 | while (1) { | ||
103 | set_current_state(TASK_INTERRUPTIBLE); | ||
104 | schedule(); | ||
105 | if (do_signal(regs, &saveset)) | ||
106 | return -EINTR; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | asmlinkage long | ||
111 | sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
112 | struct old_sigaction __user *oact) | ||
113 | { | ||
114 | struct k_sigaction new_ka, old_ka; | ||
115 | int ret; | ||
116 | |||
117 | if (act) { | ||
118 | old_sigset_t mask; | ||
119 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
120 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | ||
121 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | ||
122 | return -EFAULT; | ||
123 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
124 | __get_user(mask, &act->sa_mask); | ||
125 | siginitset(&new_ka.sa.sa_mask, mask); | ||
126 | } | ||
127 | |||
128 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
129 | |||
130 | if (!ret && oact) { | ||
131 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
132 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | ||
133 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | ||
134 | return -EFAULT; | ||
135 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
136 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
137 | } | ||
138 | |||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | asmlinkage long | ||
143 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | ||
144 | struct pt_regs *regs) | ||
145 | { | ||
146 | return do_sigaltstack(uss, uoss, regs->gprs[15]); | ||
147 | } | ||
148 | |||
149 | |||
150 | |||
151 | /* Returns non-zero on fault. */ | ||
152 | static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | ||
153 | { | ||
154 | unsigned long old_mask = regs->psw.mask; | ||
155 | int err; | ||
156 | |||
157 | save_access_regs(current->thread.acrs); | ||
158 | |||
159 | /* Copy a 'clean' PSW mask to the user to avoid leaking | ||
160 | information about whether PER is currently on. */ | ||
161 | regs->psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask); | ||
162 | err = __copy_to_user(&sregs->regs.psw, ®s->psw, | ||
163 | sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs)); | ||
164 | regs->psw.mask = old_mask; | ||
165 | if (err != 0) | ||
166 | return err; | ||
167 | err = __copy_to_user(&sregs->regs.acrs, current->thread.acrs, | ||
168 | sizeof(sregs->regs.acrs)); | ||
169 | if (err != 0) | ||
170 | return err; | ||
171 | /* | ||
172 | * We have to store the fp registers to current->thread.fp_regs | ||
173 | * to merge them with the emulated registers. | ||
174 | */ | ||
175 | save_fp_regs(¤t->thread.fp_regs); | ||
176 | return __copy_to_user(&sregs->fpregs, ¤t->thread.fp_regs, | ||
177 | sizeof(s390_fp_regs)); | ||
178 | } | ||
179 | |||
180 | /* Returns positive number on error */ | ||
181 | static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | ||
182 | { | ||
183 | unsigned long old_mask = regs->psw.mask; | ||
184 | int err; | ||
185 | |||
186 | /* Alwys make any pending restarted system call return -EINTR */ | ||
187 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
188 | |||
189 | err = __copy_from_user(®s->psw, &sregs->regs.psw, | ||
190 | sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs)); | ||
191 | regs->psw.mask = PSW_MASK_MERGE(old_mask, regs->psw.mask); | ||
192 | regs->psw.addr |= PSW_ADDR_AMODE; | ||
193 | if (err) | ||
194 | return err; | ||
195 | err = __copy_from_user(¤t->thread.acrs, &sregs->regs.acrs, | ||
196 | sizeof(sregs->regs.acrs)); | ||
197 | if (err) | ||
198 | return err; | ||
199 | restore_access_regs(current->thread.acrs); | ||
200 | |||
201 | err = __copy_from_user(¤t->thread.fp_regs, &sregs->fpregs, | ||
202 | sizeof(s390_fp_regs)); | ||
203 | current->thread.fp_regs.fpc &= FPC_VALID_MASK; | ||
204 | if (err) | ||
205 | return err; | ||
206 | |||
207 | restore_fp_regs(¤t->thread.fp_regs); | ||
208 | regs->trap = -1; /* disable syscall checks */ | ||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | asmlinkage long sys_sigreturn(struct pt_regs *regs) | ||
213 | { | ||
214 | sigframe __user *frame = (sigframe __user *)regs->gprs[15]; | ||
215 | sigset_t set; | ||
216 | |||
217 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
218 | goto badframe; | ||
219 | if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) | ||
220 | goto badframe; | ||
221 | |||
222 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
223 | spin_lock_irq(¤t->sighand->siglock); | ||
224 | current->blocked = set; | ||
225 | recalc_sigpending(); | ||
226 | spin_unlock_irq(¤t->sighand->siglock); | ||
227 | |||
228 | if (restore_sigregs(regs, &frame->sregs)) | ||
229 | goto badframe; | ||
230 | |||
231 | return regs->gprs[2]; | ||
232 | |||
233 | badframe: | ||
234 | force_sig(SIGSEGV, current); | ||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | ||
239 | { | ||
240 | rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15]; | ||
241 | sigset_t set; | ||
242 | |||
243 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
244 | goto badframe; | ||
245 | if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set))) | ||
246 | goto badframe; | ||
247 | |||
248 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
249 | spin_lock_irq(¤t->sighand->siglock); | ||
250 | current->blocked = set; | ||
251 | recalc_sigpending(); | ||
252 | spin_unlock_irq(¤t->sighand->siglock); | ||
253 | |||
254 | if (restore_sigregs(regs, &frame->uc.uc_mcontext)) | ||
255 | goto badframe; | ||
256 | |||
257 | /* It is more difficult to avoid calling this function than to | ||
258 | call it and ignore errors. */ | ||
259 | do_sigaltstack(&frame->uc.uc_stack, NULL, regs->gprs[15]); | ||
260 | return regs->gprs[2]; | ||
261 | |||
262 | badframe: | ||
263 | force_sig(SIGSEGV, current); | ||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | /* | ||
268 | * Set up a signal frame. | ||
269 | */ | ||
270 | |||
271 | |||
272 | /* | ||
273 | * Determine which stack to use.. | ||
274 | */ | ||
275 | static inline void __user * | ||
276 | get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | ||
277 | { | ||
278 | unsigned long sp; | ||
279 | |||
280 | /* Default to using normal stack */ | ||
281 | sp = regs->gprs[15]; | ||
282 | |||
283 | /* This is the X/Open sanctioned signal stack switching. */ | ||
284 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
285 | if (! sas_ss_flags(sp)) | ||
286 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
287 | } | ||
288 | |||
289 | /* This is the legacy signal stack switching. */ | ||
290 | else if (!user_mode(regs) && | ||
291 | !(ka->sa.sa_flags & SA_RESTORER) && | ||
292 | ka->sa.sa_restorer) { | ||
293 | sp = (unsigned long) ka->sa.sa_restorer; | ||
294 | } | ||
295 | |||
296 | return (void __user *)((sp - frame_size) & -8ul); | ||
297 | } | ||
298 | |||
299 | static inline int map_signal(int sig) | ||
300 | { | ||
301 | if (current_thread_info()->exec_domain | ||
302 | && current_thread_info()->exec_domain->signal_invmap | ||
303 | && sig < 32) | ||
304 | return current_thread_info()->exec_domain->signal_invmap[sig]; | ||
305 | else | ||
306 | return sig; | ||
307 | } | ||
308 | |||
309 | static void setup_frame(int sig, struct k_sigaction *ka, | ||
310 | sigset_t *set, struct pt_regs * regs) | ||
311 | { | ||
312 | sigframe __user *frame; | ||
313 | |||
314 | frame = get_sigframe(ka, regs, sizeof(sigframe)); | ||
315 | if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe))) | ||
316 | goto give_sigsegv; | ||
317 | |||
318 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) | ||
319 | goto give_sigsegv; | ||
320 | |||
321 | if (save_sigregs(regs, &frame->sregs)) | ||
322 | goto give_sigsegv; | ||
323 | if (__put_user(&frame->sregs, &frame->sc.sregs)) | ||
324 | goto give_sigsegv; | ||
325 | |||
326 | /* Set up to return from userspace. If provided, use a stub | ||
327 | already in userspace. */ | ||
328 | if (ka->sa.sa_flags & SA_RESTORER) { | ||
329 | regs->gprs[14] = (unsigned long) | ||
330 | ka->sa.sa_restorer | PSW_ADDR_AMODE; | ||
331 | } else { | ||
332 | regs->gprs[14] = (unsigned long) | ||
333 | frame->retcode | PSW_ADDR_AMODE; | ||
334 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, | ||
335 | (u16 __user *)(frame->retcode))) | ||
336 | goto give_sigsegv; | ||
337 | } | ||
338 | |||
339 | /* Set up backchain. */ | ||
340 | if (__put_user(regs->gprs[15], (addr_t __user *) frame)) | ||
341 | goto give_sigsegv; | ||
342 | |||
343 | /* Set up registers for signal handler */ | ||
344 | regs->gprs[15] = (unsigned long) frame; | ||
345 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; | ||
346 | |||
347 | regs->gprs[2] = map_signal(sig); | ||
348 | regs->gprs[3] = (unsigned long) &frame->sc; | ||
349 | |||
350 | /* We forgot to include these in the sigcontext. | ||
351 | To avoid breaking binary compatibility, they are passed as args. */ | ||
352 | regs->gprs[4] = current->thread.trap_no; | ||
353 | regs->gprs[5] = current->thread.prot_addr; | ||
354 | |||
355 | /* Place signal number on stack to allow backtrace from handler. */ | ||
356 | if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) | ||
357 | goto give_sigsegv; | ||
358 | return; | ||
359 | |||
360 | give_sigsegv: | ||
361 | force_sigsegv(sig, current); | ||
362 | } | ||
363 | |||
364 | static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
365 | sigset_t *set, struct pt_regs * regs) | ||
366 | { | ||
367 | int err = 0; | ||
368 | rt_sigframe __user *frame; | ||
369 | |||
370 | frame = get_sigframe(ka, regs, sizeof(rt_sigframe)); | ||
371 | if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe))) | ||
372 | goto give_sigsegv; | ||
373 | |||
374 | if (copy_siginfo_to_user(&frame->info, info)) | ||
375 | goto give_sigsegv; | ||
376 | |||
377 | /* Create the ucontext. */ | ||
378 | err |= __put_user(0, &frame->uc.uc_flags); | ||
379 | err |= __put_user(0, &frame->uc.uc_link); | ||
380 | err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | ||
381 | err |= __put_user(sas_ss_flags(regs->gprs[15]), | ||
382 | &frame->uc.uc_stack.ss_flags); | ||
383 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
384 | err |= save_sigregs(regs, &frame->uc.uc_mcontext); | ||
385 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
386 | if (err) | ||
387 | goto give_sigsegv; | ||
388 | |||
389 | /* Set up to return from userspace. If provided, use a stub | ||
390 | already in userspace. */ | ||
391 | if (ka->sa.sa_flags & SA_RESTORER) { | ||
392 | regs->gprs[14] = (unsigned long) | ||
393 | ka->sa.sa_restorer | PSW_ADDR_AMODE; | ||
394 | } else { | ||
395 | regs->gprs[14] = (unsigned long) | ||
396 | frame->retcode | PSW_ADDR_AMODE; | ||
397 | err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, | ||
398 | (u16 __user *)(frame->retcode)); | ||
399 | } | ||
400 | |||
401 | /* Set up backchain. */ | ||
402 | if (__put_user(regs->gprs[15], (addr_t __user *) frame)) | ||
403 | goto give_sigsegv; | ||
404 | |||
405 | /* Set up registers for signal handler */ | ||
406 | regs->gprs[15] = (unsigned long) frame; | ||
407 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; | ||
408 | |||
409 | regs->gprs[2] = map_signal(sig); | ||
410 | regs->gprs[3] = (unsigned long) &frame->info; | ||
411 | regs->gprs[4] = (unsigned long) &frame->uc; | ||
412 | return; | ||
413 | |||
414 | give_sigsegv: | ||
415 | force_sigsegv(sig, current); | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * OK, we're invoking a handler | ||
420 | */ | ||
421 | |||
422 | static void | ||
423 | handle_signal(unsigned long sig, struct k_sigaction *ka, | ||
424 | siginfo_t *info, sigset_t *oldset, struct pt_regs * regs) | ||
425 | { | ||
426 | /* Set up the stack frame */ | ||
427 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
428 | setup_rt_frame(sig, ka, info, oldset, regs); | ||
429 | else | ||
430 | setup_frame(sig, ka, oldset, regs); | ||
431 | |||
432 | if (!(ka->sa.sa_flags & SA_NODEFER)) { | ||
433 | spin_lock_irq(¤t->sighand->siglock); | ||
434 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
435 | sigaddset(¤t->blocked,sig); | ||
436 | recalc_sigpending(); | ||
437 | spin_unlock_irq(¤t->sighand->siglock); | ||
438 | } | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
443 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
444 | * mistake. | ||
445 | * | ||
446 | * Note that we go through the signals twice: once to check the signals that | ||
447 | * the kernel can handle, and then we build all the user-level signal handling | ||
448 | * stack-frames in one go after that. | ||
449 | */ | ||
450 | int do_signal(struct pt_regs *regs, sigset_t *oldset) | ||
451 | { | ||
452 | unsigned long retval = 0, continue_addr = 0, restart_addr = 0; | ||
453 | siginfo_t info; | ||
454 | int signr; | ||
455 | struct k_sigaction ka; | ||
456 | |||
457 | /* | ||
458 | * We want the common case to go fast, which | ||
459 | * is why we may in certain cases get here from | ||
460 | * kernel mode. Just return without doing anything | ||
461 | * if so. | ||
462 | */ | ||
463 | if (!user_mode(regs)) | ||
464 | return 1; | ||
465 | |||
466 | if (!oldset) | ||
467 | oldset = ¤t->blocked; | ||
468 | |||
469 | /* Are we from a system call? */ | ||
470 | if (regs->trap == __LC_SVC_OLD_PSW) { | ||
471 | continue_addr = regs->psw.addr; | ||
472 | restart_addr = continue_addr - regs->ilc; | ||
473 | retval = regs->gprs[2]; | ||
474 | |||
475 | /* Prepare for system call restart. We do this here so that a | ||
476 | debugger will see the already changed PSW. */ | ||
477 | if (retval == -ERESTARTNOHAND || | ||
478 | retval == -ERESTARTSYS || | ||
479 | retval == -ERESTARTNOINTR) { | ||
480 | regs->gprs[2] = regs->orig_gpr2; | ||
481 | regs->psw.addr = restart_addr; | ||
482 | } else if (retval == -ERESTART_RESTARTBLOCK) { | ||
483 | regs->gprs[2] = -EINTR; | ||
484 | } | ||
485 | } | ||
486 | |||
487 | /* Get signal to deliver. When running under ptrace, at this point | ||
488 | the debugger may change all our registers ... */ | ||
489 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
490 | |||
491 | /* Depending on the signal settings we may need to revert the | ||
492 | decision to restart the system call. */ | ||
493 | if (signr > 0 && regs->psw.addr == restart_addr) { | ||
494 | if (retval == -ERESTARTNOHAND | ||
495 | || (retval == -ERESTARTSYS | ||
496 | && !(current->sighand->action[signr-1].sa.sa_flags | ||
497 | & SA_RESTART))) { | ||
498 | regs->gprs[2] = -EINTR; | ||
499 | regs->psw.addr = continue_addr; | ||
500 | } | ||
501 | } | ||
502 | |||
503 | if (signr > 0) { | ||
504 | /* Whee! Actually deliver the signal. */ | ||
505 | #ifdef CONFIG_S390_SUPPORT | ||
506 | if (test_thread_flag(TIF_31BIT)) { | ||
507 | extern void handle_signal32(unsigned long sig, | ||
508 | struct k_sigaction *ka, | ||
509 | siginfo_t *info, | ||
510 | sigset_t *oldset, | ||
511 | struct pt_regs *regs); | ||
512 | handle_signal32(signr, &ka, &info, oldset, regs); | ||
513 | return 1; | ||
514 | } | ||
515 | #endif | ||
516 | handle_signal(signr, &ka, &info, oldset, regs); | ||
517 | return 1; | ||
518 | } | ||
519 | |||
520 | /* Restart a different system call. */ | ||
521 | if (retval == -ERESTART_RESTARTBLOCK | ||
522 | && regs->psw.addr == continue_addr) { | ||
523 | regs->gprs[2] = __NR_restart_syscall; | ||
524 | set_thread_flag(TIF_RESTART_SVC); | ||
525 | } | ||
526 | return 0; | ||
527 | } | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c new file mode 100644 index 000000000000..fdfcf0488b49 --- /dev/null +++ b/arch/s390/kernel/smp.c | |||
@@ -0,0 +1,840 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/smp.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | ||
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
8 | * Heiko Carstens (heiko.carstens@de.ibm.com) | ||
9 | * | ||
10 | * based on other smp stuff by | ||
11 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> | ||
12 | * (c) 1998 Ingo Molnar | ||
13 | * | ||
14 | * We work with logical cpu numbering everywhere we can. The only | ||
15 | * functions using the real cpu address (got from STAP) are the sigp | ||
16 | * functions. For all other functions we use the identity mapping. | ||
17 | * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is | ||
18 | * used e.g. to find the idle task belonging to a logical cpu. Every array | ||
19 | * in the kernel is sorted by the logical cpu number and not by the physical | ||
20 | * one which is causing all the confusion with __cpu_logical_map and | ||
21 | * cpu_number_map in other architectures. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/init.h> | ||
26 | |||
27 | #include <linux/mm.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/kernel_stat.h> | ||
30 | #include <linux/smp_lock.h> | ||
31 | |||
32 | #include <linux/delay.h> | ||
33 | #include <linux/cache.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/cpu.h> | ||
36 | |||
37 | #include <asm/sigp.h> | ||
38 | #include <asm/pgalloc.h> | ||
39 | #include <asm/irq.h> | ||
40 | #include <asm/s390_ext.h> | ||
41 | #include <asm/cpcmd.h> | ||
42 | #include <asm/tlbflush.h> | ||
43 | |||
44 | /* prototypes */ | ||
45 | |||
46 | extern volatile int __cpu_logical_map[]; | ||
47 | |||
48 | /* | ||
49 | * An array with a pointer the lowcore of every CPU. | ||
50 | */ | ||
51 | |||
52 | struct _lowcore *lowcore_ptr[NR_CPUS]; | ||
53 | |||
54 | cpumask_t cpu_online_map; | ||
55 | cpumask_t cpu_possible_map; | ||
56 | |||
57 | static struct task_struct *current_set[NR_CPUS]; | ||
58 | |||
59 | EXPORT_SYMBOL(cpu_online_map); | ||
60 | |||
61 | /* | ||
62 | * Reboot, halt and power_off routines for SMP. | ||
63 | */ | ||
64 | extern char vmhalt_cmd[]; | ||
65 | extern char vmpoff_cmd[]; | ||
66 | |||
67 | extern void reipl(unsigned long devno); | ||
68 | |||
69 | static void smp_ext_bitcall(int, ec_bit_sig); | ||
70 | static void smp_ext_bitcall_others(ec_bit_sig); | ||
71 | |||
72 | /* | ||
73 | * Structure and data for smp_call_function(). This is designed to minimise | ||
74 | * static memory requirements. It also looks cleaner. | ||
75 | */ | ||
76 | static DEFINE_SPINLOCK(call_lock); | ||
77 | |||
78 | struct call_data_struct { | ||
79 | void (*func) (void *info); | ||
80 | void *info; | ||
81 | atomic_t started; | ||
82 | atomic_t finished; | ||
83 | int wait; | ||
84 | }; | ||
85 | |||
86 | static struct call_data_struct * call_data; | ||
87 | |||
88 | /* | ||
89 | * 'Call function' interrupt callback | ||
90 | */ | ||
91 | static void do_call_function(void) | ||
92 | { | ||
93 | void (*func) (void *info) = call_data->func; | ||
94 | void *info = call_data->info; | ||
95 | int wait = call_data->wait; | ||
96 | |||
97 | atomic_inc(&call_data->started); | ||
98 | (*func)(info); | ||
99 | if (wait) | ||
100 | atomic_inc(&call_data->finished); | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * this function sends a 'generic call function' IPI to all other CPUs | ||
105 | * in the system. | ||
106 | */ | ||
107 | |||
108 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | ||
109 | int wait) | ||
110 | /* | ||
111 | * [SUMMARY] Run a function on all other CPUs. | ||
112 | * <func> The function to run. This must be fast and non-blocking. | ||
113 | * <info> An arbitrary pointer to pass to the function. | ||
114 | * <nonatomic> currently unused. | ||
115 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
116 | * [RETURNS] 0 on success, else a negative status code. Does not return until | ||
117 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
118 | * | ||
119 | * You must not call this function with disabled interrupts or from a | ||
120 | * hardware interrupt handler or from a bottom half handler. | ||
121 | */ | ||
122 | { | ||
123 | struct call_data_struct data; | ||
124 | int cpus = num_online_cpus()-1; | ||
125 | |||
126 | if (cpus <= 0) | ||
127 | return 0; | ||
128 | |||
129 | /* Can deadlock when called with interrupts disabled */ | ||
130 | WARN_ON(irqs_disabled()); | ||
131 | |||
132 | data.func = func; | ||
133 | data.info = info; | ||
134 | atomic_set(&data.started, 0); | ||
135 | data.wait = wait; | ||
136 | if (wait) | ||
137 | atomic_set(&data.finished, 0); | ||
138 | |||
139 | spin_lock(&call_lock); | ||
140 | call_data = &data; | ||
141 | /* Send a message to all other CPUs and wait for them to respond */ | ||
142 | smp_ext_bitcall_others(ec_call_function); | ||
143 | |||
144 | /* Wait for response */ | ||
145 | while (atomic_read(&data.started) != cpus) | ||
146 | cpu_relax(); | ||
147 | |||
148 | if (wait) | ||
149 | while (atomic_read(&data.finished) != cpus) | ||
150 | cpu_relax(); | ||
151 | spin_unlock(&call_lock); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Call a function on one CPU | ||
158 | * cpu : the CPU the function should be executed on | ||
159 | * | ||
160 | * You must not call this function with disabled interrupts or from a | ||
161 | * hardware interrupt handler. You may call it from a bottom half. | ||
162 | * | ||
163 | * It is guaranteed that the called function runs on the specified CPU, | ||
164 | * preemption is disabled. | ||
165 | */ | ||
166 | int smp_call_function_on(void (*func) (void *info), void *info, | ||
167 | int nonatomic, int wait, int cpu) | ||
168 | { | ||
169 | struct call_data_struct data; | ||
170 | int curr_cpu; | ||
171 | |||
172 | if (!cpu_online(cpu)) | ||
173 | return -EINVAL; | ||
174 | |||
175 | /* disable preemption for local function call */ | ||
176 | curr_cpu = get_cpu(); | ||
177 | |||
178 | if (curr_cpu == cpu) { | ||
179 | /* direct call to function */ | ||
180 | func(info); | ||
181 | put_cpu(); | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | data.func = func; | ||
186 | data.info = info; | ||
187 | atomic_set(&data.started, 0); | ||
188 | data.wait = wait; | ||
189 | if (wait) | ||
190 | atomic_set(&data.finished, 0); | ||
191 | |||
192 | spin_lock_bh(&call_lock); | ||
193 | call_data = &data; | ||
194 | smp_ext_bitcall(cpu, ec_call_function); | ||
195 | |||
196 | /* Wait for response */ | ||
197 | while (atomic_read(&data.started) != 1) | ||
198 | cpu_relax(); | ||
199 | |||
200 | if (wait) | ||
201 | while (atomic_read(&data.finished) != 1) | ||
202 | cpu_relax(); | ||
203 | |||
204 | spin_unlock_bh(&call_lock); | ||
205 | put_cpu(); | ||
206 | return 0; | ||
207 | } | ||
208 | EXPORT_SYMBOL(smp_call_function_on); | ||
209 | |||
210 | static inline void do_send_stop(void) | ||
211 | { | ||
212 | int cpu, rc; | ||
213 | |||
214 | /* stop all processors */ | ||
215 | for_each_online_cpu(cpu) { | ||
216 | if (cpu == smp_processor_id()) | ||
217 | continue; | ||
218 | do { | ||
219 | rc = signal_processor(cpu, sigp_stop); | ||
220 | } while (rc == sigp_busy); | ||
221 | } | ||
222 | } | ||
223 | |||
224 | static inline void do_store_status(void) | ||
225 | { | ||
226 | int cpu, rc; | ||
227 | |||
228 | /* store status of all processors in their lowcores (real 0) */ | ||
229 | for_each_online_cpu(cpu) { | ||
230 | if (cpu == smp_processor_id()) | ||
231 | continue; | ||
232 | do { | ||
233 | rc = signal_processor_p( | ||
234 | (__u32)(unsigned long) lowcore_ptr[cpu], cpu, | ||
235 | sigp_store_status_at_address); | ||
236 | } while(rc == sigp_busy); | ||
237 | } | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * this function sends a 'stop' sigp to all other CPUs in the system. | ||
242 | * it goes straight through. | ||
243 | */ | ||
244 | void smp_send_stop(void) | ||
245 | { | ||
246 | /* write magic number to zero page (absolute 0) */ | ||
247 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; | ||
248 | |||
249 | /* stop other processors. */ | ||
250 | do_send_stop(); | ||
251 | |||
252 | /* store status of other processors. */ | ||
253 | do_store_status(); | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Reboot, halt and power_off routines for SMP. | ||
258 | */ | ||
259 | |||
260 | static void do_machine_restart(void * __unused) | ||
261 | { | ||
262 | int cpu; | ||
263 | static atomic_t cpuid = ATOMIC_INIT(-1); | ||
264 | |||
265 | if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) | ||
266 | signal_processor(smp_processor_id(), sigp_stop); | ||
267 | |||
268 | /* Wait for all other cpus to enter stopped state */ | ||
269 | for_each_online_cpu(cpu) { | ||
270 | if (cpu == smp_processor_id()) | ||
271 | continue; | ||
272 | while(!smp_cpu_not_running(cpu)) | ||
273 | cpu_relax(); | ||
274 | } | ||
275 | |||
276 | /* Store status of other cpus. */ | ||
277 | do_store_status(); | ||
278 | |||
279 | /* | ||
280 | * Finally call reipl. Because we waited for all other | ||
281 | * cpus to enter this function we know that they do | ||
282 | * not hold any s390irq-locks (the cpus have been | ||
283 | * interrupted by an external interrupt and s390irq | ||
284 | * locks are always held disabled). | ||
285 | */ | ||
286 | if (MACHINE_IS_VM) | ||
287 | cpcmd ("IPL", NULL, 0); | ||
288 | else | ||
289 | reipl (0x10000 | S390_lowcore.ipl_device); | ||
290 | } | ||
291 | |||
292 | void machine_restart_smp(char * __unused) | ||
293 | { | ||
294 | on_each_cpu(do_machine_restart, NULL, 0, 0); | ||
295 | } | ||
296 | |||
297 | static void do_wait_for_stop(void) | ||
298 | { | ||
299 | unsigned long cr[16]; | ||
300 | |||
301 | __ctl_store(cr, 0, 15); | ||
302 | cr[0] &= ~0xffff; | ||
303 | cr[6] = 0; | ||
304 | __ctl_load(cr, 0, 15); | ||
305 | for (;;) | ||
306 | enabled_wait(); | ||
307 | } | ||
308 | |||
309 | static void do_machine_halt(void * __unused) | ||
310 | { | ||
311 | static atomic_t cpuid = ATOMIC_INIT(-1); | ||
312 | |||
313 | if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { | ||
314 | smp_send_stop(); | ||
315 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) | ||
316 | cpcmd(vmhalt_cmd, NULL, 0); | ||
317 | signal_processor(smp_processor_id(), | ||
318 | sigp_stop_and_store_status); | ||
319 | } | ||
320 | do_wait_for_stop(); | ||
321 | } | ||
322 | |||
323 | void machine_halt_smp(void) | ||
324 | { | ||
325 | on_each_cpu(do_machine_halt, NULL, 0, 0); | ||
326 | } | ||
327 | |||
328 | static void do_machine_power_off(void * __unused) | ||
329 | { | ||
330 | static atomic_t cpuid = ATOMIC_INIT(-1); | ||
331 | |||
332 | if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { | ||
333 | smp_send_stop(); | ||
334 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) | ||
335 | cpcmd(vmpoff_cmd, NULL, 0); | ||
336 | signal_processor(smp_processor_id(), | ||
337 | sigp_stop_and_store_status); | ||
338 | } | ||
339 | do_wait_for_stop(); | ||
340 | } | ||
341 | |||
342 | void machine_power_off_smp(void) | ||
343 | { | ||
344 | on_each_cpu(do_machine_power_off, NULL, 0, 0); | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * This is the main routine where commands issued by other | ||
349 | * cpus are handled. | ||
350 | */ | ||
351 | |||
352 | void do_ext_call_interrupt(struct pt_regs *regs, __u16 code) | ||
353 | { | ||
354 | unsigned long bits; | ||
355 | |||
356 | /* | ||
357 | * handle bit signal external calls | ||
358 | * | ||
359 | * For the ec_schedule signal we have to do nothing. All the work | ||
360 | * is done automatically when we return from the interrupt. | ||
361 | */ | ||
362 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | ||
363 | |||
364 | if (test_bit(ec_call_function, &bits)) | ||
365 | do_call_function(); | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * Send an external call sigp to another cpu and return without waiting | ||
370 | * for its completion. | ||
371 | */ | ||
372 | static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | ||
373 | { | ||
374 | /* | ||
375 | * Set signaling bit in lowcore of target cpu and kick it | ||
376 | */ | ||
377 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | ||
378 | while(signal_processor(cpu, sigp_external_call) == sigp_busy) | ||
379 | udelay(10); | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * Send an external call sigp to every other cpu in the system and | ||
384 | * return without waiting for its completion. | ||
385 | */ | ||
386 | static void smp_ext_bitcall_others(ec_bit_sig sig) | ||
387 | { | ||
388 | int cpu; | ||
389 | |||
390 | for_each_online_cpu(cpu) { | ||
391 | if (cpu == smp_processor_id()) | ||
392 | continue; | ||
393 | /* | ||
394 | * Set signaling bit in lowcore of target cpu and kick it | ||
395 | */ | ||
396 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | ||
397 | while (signal_processor(cpu, sigp_external_call) == sigp_busy) | ||
398 | udelay(10); | ||
399 | } | ||
400 | } | ||
401 | |||
402 | #ifndef CONFIG_ARCH_S390X | ||
403 | /* | ||
404 | * this function sends a 'purge tlb' signal to another CPU. | ||
405 | */ | ||
406 | void smp_ptlb_callback(void *info) | ||
407 | { | ||
408 | local_flush_tlb(); | ||
409 | } | ||
410 | |||
411 | void smp_ptlb_all(void) | ||
412 | { | ||
413 | on_each_cpu(smp_ptlb_callback, NULL, 0, 1); | ||
414 | } | ||
415 | EXPORT_SYMBOL(smp_ptlb_all); | ||
416 | #endif /* ! CONFIG_ARCH_S390X */ | ||
417 | |||
418 | /* | ||
419 | * this function sends a 'reschedule' IPI to another CPU. | ||
420 | * it goes straight through and wastes no time serializing | ||
421 | * anything. Worst case is that we lose a reschedule ... | ||
422 | */ | ||
423 | void smp_send_reschedule(int cpu) | ||
424 | { | ||
425 | smp_ext_bitcall(cpu, ec_schedule); | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * parameter area for the set/clear control bit callbacks | ||
430 | */ | ||
431 | typedef struct | ||
432 | { | ||
433 | __u16 start_ctl; | ||
434 | __u16 end_ctl; | ||
435 | unsigned long orvals[16]; | ||
436 | unsigned long andvals[16]; | ||
437 | } ec_creg_mask_parms; | ||
438 | |||
439 | /* | ||
440 | * callback for setting/clearing control bits | ||
441 | */ | ||
442 | void smp_ctl_bit_callback(void *info) { | ||
443 | ec_creg_mask_parms *pp; | ||
444 | unsigned long cregs[16]; | ||
445 | int i; | ||
446 | |||
447 | pp = (ec_creg_mask_parms *) info; | ||
448 | __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); | ||
449 | for (i = pp->start_ctl; i <= pp->end_ctl; i++) | ||
450 | cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; | ||
451 | __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * Set a bit in a control register of all cpus | ||
456 | */ | ||
457 | void smp_ctl_set_bit(int cr, int bit) { | ||
458 | ec_creg_mask_parms parms; | ||
459 | |||
460 | parms.start_ctl = cr; | ||
461 | parms.end_ctl = cr; | ||
462 | parms.orvals[cr] = 1 << bit; | ||
463 | parms.andvals[cr] = -1L; | ||
464 | preempt_disable(); | ||
465 | smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); | ||
466 | __ctl_set_bit(cr, bit); | ||
467 | preempt_enable(); | ||
468 | } | ||
469 | |||
470 | /* | ||
471 | * Clear a bit in a control register of all cpus | ||
472 | */ | ||
473 | void smp_ctl_clear_bit(int cr, int bit) { | ||
474 | ec_creg_mask_parms parms; | ||
475 | |||
476 | parms.start_ctl = cr; | ||
477 | parms.end_ctl = cr; | ||
478 | parms.orvals[cr] = 0; | ||
479 | parms.andvals[cr] = ~(1L << bit); | ||
480 | preempt_disable(); | ||
481 | smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); | ||
482 | __ctl_clear_bit(cr, bit); | ||
483 | preempt_enable(); | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * Lets check how many CPUs we have. | ||
488 | */ | ||
489 | |||
490 | void | ||
491 | __init smp_check_cpus(unsigned int max_cpus) | ||
492 | { | ||
493 | int cpu, num_cpus; | ||
494 | __u16 boot_cpu_addr; | ||
495 | |||
496 | /* | ||
497 | * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. | ||
498 | */ | ||
499 | |||
500 | boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; | ||
501 | current_thread_info()->cpu = 0; | ||
502 | num_cpus = 1; | ||
503 | for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) { | ||
504 | if ((__u16) cpu == boot_cpu_addr) | ||
505 | continue; | ||
506 | __cpu_logical_map[num_cpus] = (__u16) cpu; | ||
507 | if (signal_processor(num_cpus, sigp_sense) == | ||
508 | sigp_not_operational) | ||
509 | continue; | ||
510 | cpu_set(num_cpus, cpu_present_map); | ||
511 | num_cpus++; | ||
512 | } | ||
513 | |||
514 | for (cpu = 1; cpu < max_cpus; cpu++) | ||
515 | cpu_set(cpu, cpu_possible_map); | ||
516 | |||
517 | printk("Detected %d CPU's\n",(int) num_cpus); | ||
518 | printk("Boot cpu address %2X\n", boot_cpu_addr); | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * Activate a secondary processor. | ||
523 | */ | ||
524 | extern void init_cpu_timer(void); | ||
525 | extern void init_cpu_vtimer(void); | ||
526 | extern int pfault_init(void); | ||
527 | extern void pfault_fini(void); | ||
528 | |||
529 | int __devinit start_secondary(void *cpuvoid) | ||
530 | { | ||
531 | /* Setup the cpu */ | ||
532 | cpu_init(); | ||
533 | /* init per CPU timer */ | ||
534 | init_cpu_timer(); | ||
535 | #ifdef CONFIG_VIRT_TIMER | ||
536 | init_cpu_vtimer(); | ||
537 | #endif | ||
538 | #ifdef CONFIG_PFAULT | ||
539 | /* Enable pfault pseudo page faults on this cpu. */ | ||
540 | pfault_init(); | ||
541 | #endif | ||
542 | /* Mark this cpu as online */ | ||
543 | cpu_set(smp_processor_id(), cpu_online_map); | ||
544 | /* Switch on interrupts */ | ||
545 | local_irq_enable(); | ||
546 | /* Print info about this processor */ | ||
547 | print_cpu_info(&S390_lowcore.cpu_data); | ||
548 | /* cpu_idle will call schedule for us */ | ||
549 | cpu_idle(); | ||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | static void __init smp_create_idle(unsigned int cpu) | ||
554 | { | ||
555 | struct task_struct *p; | ||
556 | |||
557 | /* | ||
558 | * don't care about the psw and regs settings since we'll never | ||
559 | * reschedule the forked task. | ||
560 | */ | ||
561 | p = fork_idle(cpu); | ||
562 | if (IS_ERR(p)) | ||
563 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | ||
564 | current_set[cpu] = p; | ||
565 | } | ||
566 | |||
567 | /* Reserving and releasing of CPUs */ | ||
568 | |||
569 | static DEFINE_SPINLOCK(smp_reserve_lock); | ||
570 | static int smp_cpu_reserved[NR_CPUS]; | ||
571 | |||
572 | int | ||
573 | smp_get_cpu(cpumask_t cpu_mask) | ||
574 | { | ||
575 | unsigned long flags; | ||
576 | int cpu; | ||
577 | |||
578 | spin_lock_irqsave(&smp_reserve_lock, flags); | ||
579 | /* Try to find an already reserved cpu. */ | ||
580 | for_each_cpu_mask(cpu, cpu_mask) { | ||
581 | if (smp_cpu_reserved[cpu] != 0) { | ||
582 | smp_cpu_reserved[cpu]++; | ||
583 | /* Found one. */ | ||
584 | goto out; | ||
585 | } | ||
586 | } | ||
587 | /* Reserve a new cpu from cpu_mask. */ | ||
588 | for_each_cpu_mask(cpu, cpu_mask) { | ||
589 | if (cpu_online(cpu)) { | ||
590 | smp_cpu_reserved[cpu]++; | ||
591 | goto out; | ||
592 | } | ||
593 | } | ||
594 | cpu = -ENODEV; | ||
595 | out: | ||
596 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | ||
597 | return cpu; | ||
598 | } | ||
599 | |||
600 | void | ||
601 | smp_put_cpu(int cpu) | ||
602 | { | ||
603 | unsigned long flags; | ||
604 | |||
605 | spin_lock_irqsave(&smp_reserve_lock, flags); | ||
606 | smp_cpu_reserved[cpu]--; | ||
607 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | ||
608 | } | ||
609 | |||
610 | static inline int | ||
611 | cpu_stopped(int cpu) | ||
612 | { | ||
613 | __u32 status; | ||
614 | |||
615 | /* Check for stopped state */ | ||
616 | if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { | ||
617 | if (status & 0x40) | ||
618 | return 1; | ||
619 | } | ||
620 | return 0; | ||
621 | } | ||
622 | |||
623 | /* Upping and downing of CPUs */ | ||
624 | |||
625 | int | ||
626 | __cpu_up(unsigned int cpu) | ||
627 | { | ||
628 | struct task_struct *idle; | ||
629 | struct _lowcore *cpu_lowcore; | ||
630 | struct stack_frame *sf; | ||
631 | sigp_ccode ccode; | ||
632 | int curr_cpu; | ||
633 | |||
634 | for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { | ||
635 | __cpu_logical_map[cpu] = (__u16) curr_cpu; | ||
636 | if (cpu_stopped(cpu)) | ||
637 | break; | ||
638 | } | ||
639 | |||
640 | if (!cpu_stopped(cpu)) | ||
641 | return -ENODEV; | ||
642 | |||
643 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), | ||
644 | cpu, sigp_set_prefix); | ||
645 | if (ccode){ | ||
646 | printk("sigp_set_prefix failed for cpu %d " | ||
647 | "with condition code %d\n", | ||
648 | (int) cpu, (int) ccode); | ||
649 | return -EIO; | ||
650 | } | ||
651 | |||
652 | idle = current_set[cpu]; | ||
653 | cpu_lowcore = lowcore_ptr[cpu]; | ||
654 | cpu_lowcore->kernel_stack = (unsigned long) | ||
655 | idle->thread_info + (THREAD_SIZE); | ||
656 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack | ||
657 | - sizeof(struct pt_regs) | ||
658 | - sizeof(struct stack_frame)); | ||
659 | memset(sf, 0, sizeof(struct stack_frame)); | ||
660 | sf->gprs[9] = (unsigned long) sf; | ||
661 | cpu_lowcore->save_area[15] = (unsigned long) sf; | ||
662 | __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); | ||
663 | __asm__ __volatile__("stam 0,15,0(%0)" | ||
664 | : : "a" (&cpu_lowcore->access_regs_save_area) | ||
665 | : "memory"); | ||
666 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; | ||
667 | cpu_lowcore->current_task = (unsigned long) idle; | ||
668 | cpu_lowcore->cpu_data.cpu_nr = cpu; | ||
669 | eieio(); | ||
670 | signal_processor(cpu,sigp_restart); | ||
671 | |||
672 | while (!cpu_online(cpu)) | ||
673 | cpu_relax(); | ||
674 | return 0; | ||
675 | } | ||
676 | |||
677 | int | ||
678 | __cpu_disable(void) | ||
679 | { | ||
680 | unsigned long flags; | ||
681 | ec_creg_mask_parms cr_parms; | ||
682 | |||
683 | spin_lock_irqsave(&smp_reserve_lock, flags); | ||
684 | if (smp_cpu_reserved[smp_processor_id()] != 0) { | ||
685 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | ||
686 | return -EBUSY; | ||
687 | } | ||
688 | |||
689 | #ifdef CONFIG_PFAULT | ||
690 | /* Disable pfault pseudo page faults on this cpu. */ | ||
691 | pfault_fini(); | ||
692 | #endif | ||
693 | |||
694 | /* disable all external interrupts */ | ||
695 | |||
696 | cr_parms.start_ctl = 0; | ||
697 | cr_parms.end_ctl = 0; | ||
698 | cr_parms.orvals[0] = 0; | ||
699 | cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | | ||
700 | 1<<11 | 1<<10 | 1<< 6 | 1<< 4); | ||
701 | smp_ctl_bit_callback(&cr_parms); | ||
702 | |||
703 | /* disable all I/O interrupts */ | ||
704 | |||
705 | cr_parms.start_ctl = 6; | ||
706 | cr_parms.end_ctl = 6; | ||
707 | cr_parms.orvals[6] = 0; | ||
708 | cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | | ||
709 | 1<<27 | 1<<26 | 1<<25 | 1<<24); | ||
710 | smp_ctl_bit_callback(&cr_parms); | ||
711 | |||
712 | /* disable most machine checks */ | ||
713 | |||
714 | cr_parms.start_ctl = 14; | ||
715 | cr_parms.end_ctl = 14; | ||
716 | cr_parms.orvals[14] = 0; | ||
717 | cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); | ||
718 | smp_ctl_bit_callback(&cr_parms); | ||
719 | |||
720 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | ||
721 | return 0; | ||
722 | } | ||
723 | |||
724 | void | ||
725 | __cpu_die(unsigned int cpu) | ||
726 | { | ||
727 | /* Wait until target cpu is down */ | ||
728 | while (!smp_cpu_not_running(cpu)) | ||
729 | cpu_relax(); | ||
730 | printk("Processor %d spun down\n", cpu); | ||
731 | } | ||
732 | |||
733 | void | ||
734 | cpu_die(void) | ||
735 | { | ||
736 | idle_task_exit(); | ||
737 | signal_processor(smp_processor_id(), sigp_stop); | ||
738 | BUG(); | ||
739 | for(;;); | ||
740 | } | ||
741 | |||
742 | /* | ||
743 | * Cycle through the processors and setup structures. | ||
744 | */ | ||
745 | |||
746 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
747 | { | ||
748 | unsigned long stack; | ||
749 | unsigned int cpu; | ||
750 | int i; | ||
751 | |||
752 | /* request the 0x1202 external interrupt */ | ||
753 | if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) | ||
754 | panic("Couldn't request external interrupt 0x1202"); | ||
755 | smp_check_cpus(max_cpus); | ||
756 | memset(lowcore_ptr,0,sizeof(lowcore_ptr)); | ||
757 | /* | ||
758 | * Initialize prefix pages and stacks for all possible cpus | ||
759 | */ | ||
760 | print_cpu_info(&S390_lowcore.cpu_data); | ||
761 | |||
762 | for(i = 0; i < NR_CPUS; i++) { | ||
763 | if (!cpu_possible(i)) | ||
764 | continue; | ||
765 | lowcore_ptr[i] = (struct _lowcore *) | ||
766 | __get_free_pages(GFP_KERNEL|GFP_DMA, | ||
767 | sizeof(void*) == 8 ? 1 : 0); | ||
768 | stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); | ||
769 | if (lowcore_ptr[i] == NULL || stack == 0ULL) | ||
770 | panic("smp_boot_cpus failed to allocate memory\n"); | ||
771 | |||
772 | *(lowcore_ptr[i]) = S390_lowcore; | ||
773 | lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); | ||
774 | #ifdef CONFIG_CHECK_STACK | ||
775 | stack = __get_free_pages(GFP_KERNEL,0); | ||
776 | if (stack == 0ULL) | ||
777 | panic("smp_boot_cpus failed to allocate memory\n"); | ||
778 | lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); | ||
779 | #endif | ||
780 | } | ||
781 | set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); | ||
782 | |||
783 | for_each_cpu(cpu) | ||
784 | if (cpu != smp_processor_id()) | ||
785 | smp_create_idle(cpu); | ||
786 | } | ||
787 | |||
788 | void __devinit smp_prepare_boot_cpu(void) | ||
789 | { | ||
790 | BUG_ON(smp_processor_id() != 0); | ||
791 | |||
792 | cpu_set(0, cpu_online_map); | ||
793 | cpu_set(0, cpu_present_map); | ||
794 | cpu_set(0, cpu_possible_map); | ||
795 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | ||
796 | current_set[0] = current; | ||
797 | } | ||
798 | |||
799 | void smp_cpus_done(unsigned int max_cpus) | ||
800 | { | ||
801 | cpu_present_map = cpu_possible_map; | ||
802 | } | ||
803 | |||
804 | /* | ||
805 | * the frequency of the profiling timer can be changed | ||
806 | * by writing a multiplier value into /proc/profile. | ||
807 | * | ||
808 | * usually you want to run this on all CPUs ;) | ||
809 | */ | ||
810 | int setup_profiling_timer(unsigned int multiplier) | ||
811 | { | ||
812 | return 0; | ||
813 | } | ||
814 | |||
815 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | ||
816 | |||
817 | static int __init topology_init(void) | ||
818 | { | ||
819 | int cpu; | ||
820 | int ret; | ||
821 | |||
822 | for_each_cpu(cpu) { | ||
823 | ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); | ||
824 | if (ret) | ||
825 | printk(KERN_WARNING "topology_init: register_cpu %d " | ||
826 | "failed (%d)\n", cpu, ret); | ||
827 | } | ||
828 | return 0; | ||
829 | } | ||
830 | |||
831 | subsys_initcall(topology_init); | ||
832 | |||
833 | EXPORT_SYMBOL(cpu_possible_map); | ||
834 | EXPORT_SYMBOL(lowcore_ptr); | ||
835 | EXPORT_SYMBOL(smp_ctl_set_bit); | ||
836 | EXPORT_SYMBOL(smp_ctl_clear_bit); | ||
837 | EXPORT_SYMBOL(smp_call_function); | ||
838 | EXPORT_SYMBOL(smp_get_cpu); | ||
839 | EXPORT_SYMBOL(smp_put_cpu); | ||
840 | |||
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c new file mode 100644 index 000000000000..efe6b83b53f7 --- /dev/null +++ b/arch/s390/kernel/sys_s390.c | |||
@@ -0,0 +1,270 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/sys_s390.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * Thomas Spatzier (tspat@de.ibm.com) | ||
8 | * | ||
9 | * Derived from "arch/i386/kernel/sys_i386.c" | ||
10 | * | ||
11 | * This file contains various random system calls that | ||
12 | * have a non-standard calling sequence on the Linux/s390 | ||
13 | * platform. | ||
14 | */ | ||
15 | |||
16 | #include <linux/errno.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/smp.h> | ||
20 | #include <linux/smp_lock.h> | ||
21 | #include <linux/sem.h> | ||
22 | #include <linux/msg.h> | ||
23 | #include <linux/shm.h> | ||
24 | #include <linux/stat.h> | ||
25 | #include <linux/syscalls.h> | ||
26 | #include <linux/mman.h> | ||
27 | #include <linux/file.h> | ||
28 | #include <linux/utsname.h> | ||
29 | #ifdef CONFIG_ARCH_S390X | ||
30 | #include <linux/personality.h> | ||
31 | #endif /* CONFIG_ARCH_S390X */ | ||
32 | |||
33 | #include <asm/uaccess.h> | ||
34 | #include <asm/ipc.h> | ||
35 | |||
36 | /* | ||
37 | * sys_pipe() is the normal C calling standard for creating | ||
38 | * a pipe. It's not the way Unix traditionally does this, though. | ||
39 | */ | ||
40 | asmlinkage long sys_pipe(unsigned long __user *fildes) | ||
41 | { | ||
42 | int fd[2]; | ||
43 | int error; | ||
44 | |||
45 | error = do_pipe(fd); | ||
46 | if (!error) { | ||
47 | if (copy_to_user(fildes, fd, 2*sizeof(int))) | ||
48 | error = -EFAULT; | ||
49 | } | ||
50 | return error; | ||
51 | } | ||
52 | |||
53 | /* common code for old and new mmaps */ | ||
54 | static inline long do_mmap2( | ||
55 | unsigned long addr, unsigned long len, | ||
56 | unsigned long prot, unsigned long flags, | ||
57 | unsigned long fd, unsigned long pgoff) | ||
58 | { | ||
59 | long error = -EBADF; | ||
60 | struct file * file = NULL; | ||
61 | |||
62 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
63 | if (!(flags & MAP_ANONYMOUS)) { | ||
64 | file = fget(fd); | ||
65 | if (!file) | ||
66 | goto out; | ||
67 | } | ||
68 | |||
69 | down_write(¤t->mm->mmap_sem); | ||
70 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
71 | up_write(¤t->mm->mmap_sem); | ||
72 | |||
73 | if (file) | ||
74 | fput(file); | ||
75 | out: | ||
76 | return error; | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * Perform the select(nd, in, out, ex, tv) and mmap() system | ||
81 | * calls. Linux for S/390 isn't able to handle more than 5 | ||
82 | * system call parameters, so these system calls used a memory | ||
83 | * block for parameter passing.. | ||
84 | */ | ||
85 | |||
86 | struct mmap_arg_struct { | ||
87 | unsigned long addr; | ||
88 | unsigned long len; | ||
89 | unsigned long prot; | ||
90 | unsigned long flags; | ||
91 | unsigned long fd; | ||
92 | unsigned long offset; | ||
93 | }; | ||
94 | |||
95 | asmlinkage long sys_mmap2(struct mmap_arg_struct __user *arg) | ||
96 | { | ||
97 | struct mmap_arg_struct a; | ||
98 | int error = -EFAULT; | ||
99 | |||
100 | if (copy_from_user(&a, arg, sizeof(a))) | ||
101 | goto out; | ||
102 | error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); | ||
103 | out: | ||
104 | return error; | ||
105 | } | ||
106 | |||
107 | asmlinkage long old_mmap(struct mmap_arg_struct __user *arg) | ||
108 | { | ||
109 | struct mmap_arg_struct a; | ||
110 | long error = -EFAULT; | ||
111 | |||
112 | if (copy_from_user(&a, arg, sizeof(a))) | ||
113 | goto out; | ||
114 | |||
115 | error = -EINVAL; | ||
116 | if (a.offset & ~PAGE_MASK) | ||
117 | goto out; | ||
118 | |||
119 | error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | ||
120 | out: | ||
121 | return error; | ||
122 | } | ||
123 | |||
124 | #ifndef CONFIG_ARCH_S390X | ||
125 | struct sel_arg_struct { | ||
126 | unsigned long n; | ||
127 | fd_set *inp, *outp, *exp; | ||
128 | struct timeval *tvp; | ||
129 | }; | ||
130 | |||
131 | asmlinkage long old_select(struct sel_arg_struct __user *arg) | ||
132 | { | ||
133 | struct sel_arg_struct a; | ||
134 | |||
135 | if (copy_from_user(&a, arg, sizeof(a))) | ||
136 | return -EFAULT; | ||
137 | /* sys_select() does the appropriate kernel locking */ | ||
138 | return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); | ||
139 | |||
140 | } | ||
141 | #endif /* CONFIG_ARCH_S390X */ | ||
142 | |||
143 | /* | ||
144 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | ||
145 | * | ||
146 | * This is really horribly ugly. | ||
147 | */ | ||
148 | asmlinkage long sys_ipc(uint call, int first, unsigned long second, | ||
149 | unsigned long third, void __user *ptr) | ||
150 | { | ||
151 | struct ipc_kludge tmp; | ||
152 | int ret; | ||
153 | |||
154 | switch (call) { | ||
155 | case SEMOP: | ||
156 | return sys_semtimedop(first, (struct sembuf __user *)ptr, | ||
157 | (unsigned)second, NULL); | ||
158 | case SEMTIMEDOP: | ||
159 | return sys_semtimedop(first, (struct sembuf __user *)ptr, | ||
160 | (unsigned)second, | ||
161 | (const struct timespec __user *) third); | ||
162 | case SEMGET: | ||
163 | return sys_semget(first, (int)second, third); | ||
164 | case SEMCTL: { | ||
165 | union semun fourth; | ||
166 | if (!ptr) | ||
167 | return -EINVAL; | ||
168 | if (get_user(fourth.__pad, (void __user * __user *) ptr)) | ||
169 | return -EFAULT; | ||
170 | return sys_semctl(first, (int)second, third, fourth); | ||
171 | } | ||
172 | case MSGSND: | ||
173 | return sys_msgsnd (first, (struct msgbuf __user *) ptr, | ||
174 | (size_t)second, third); | ||
175 | break; | ||
176 | case MSGRCV: | ||
177 | if (!ptr) | ||
178 | return -EINVAL; | ||
179 | if (copy_from_user (&tmp, (struct ipc_kludge __user *) ptr, | ||
180 | sizeof (struct ipc_kludge))) | ||
181 | return -EFAULT; | ||
182 | return sys_msgrcv (first, tmp.msgp, | ||
183 | (size_t)second, tmp.msgtyp, third); | ||
184 | case MSGGET: | ||
185 | return sys_msgget((key_t)first, (int)second); | ||
186 | case MSGCTL: | ||
187 | return sys_msgctl(first, (int)second, | ||
188 | (struct msqid_ds __user *)ptr); | ||
189 | |||
190 | case SHMAT: { | ||
191 | ulong raddr; | ||
192 | ret = do_shmat(first, (char __user *)ptr, | ||
193 | (int)second, &raddr); | ||
194 | if (ret) | ||
195 | return ret; | ||
196 | return put_user (raddr, (ulong __user *) third); | ||
197 | break; | ||
198 | } | ||
199 | case SHMDT: | ||
200 | return sys_shmdt ((char __user *)ptr); | ||
201 | case SHMGET: | ||
202 | return sys_shmget(first, (size_t)second, third); | ||
203 | case SHMCTL: | ||
204 | return sys_shmctl(first, (int)second, | ||
205 | (struct shmid_ds __user *) ptr); | ||
206 | default: | ||
207 | return -ENOSYS; | ||
208 | |||
209 | } | ||
210 | |||
211 | return -EINVAL; | ||
212 | } | ||
213 | |||
214 | #ifdef CONFIG_ARCH_S390X | ||
215 | asmlinkage long s390x_newuname(struct new_utsname __user *name) | ||
216 | { | ||
217 | int ret = sys_newuname(name); | ||
218 | |||
219 | if (current->personality == PER_LINUX32 && !ret) { | ||
220 | ret = copy_to_user(name->machine, "s390\0\0\0\0", 8); | ||
221 | if (ret) ret = -EFAULT; | ||
222 | } | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | asmlinkage long s390x_personality(unsigned long personality) | ||
227 | { | ||
228 | int ret; | ||
229 | |||
230 | if (current->personality == PER_LINUX32 && personality == PER_LINUX) | ||
231 | personality = PER_LINUX32; | ||
232 | ret = sys_personality(personality); | ||
233 | if (ret == PER_LINUX32) | ||
234 | ret = PER_LINUX; | ||
235 | |||
236 | return ret; | ||
237 | } | ||
238 | #endif /* CONFIG_ARCH_S390X */ | ||
239 | |||
240 | /* | ||
241 | * Wrapper function for sys_fadvise64/fadvise64_64 | ||
242 | */ | ||
243 | #ifndef CONFIG_ARCH_S390X | ||
244 | |||
245 | asmlinkage long | ||
246 | s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice) | ||
247 | { | ||
248 | return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low, | ||
249 | len, advice); | ||
250 | } | ||
251 | |||
252 | #endif | ||
253 | |||
254 | struct fadvise64_64_args { | ||
255 | int fd; | ||
256 | long long offset; | ||
257 | long long len; | ||
258 | int advice; | ||
259 | }; | ||
260 | |||
261 | asmlinkage long | ||
262 | s390_fadvise64_64(struct fadvise64_64_args __user *args) | ||
263 | { | ||
264 | struct fadvise64_64_args a; | ||
265 | |||
266 | if ( copy_from_user(&a, args, sizeof(a)) ) | ||
267 | return -EFAULT; | ||
268 | return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); | ||
269 | } | ||
270 | |||
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S new file mode 100644 index 000000000000..515938628f82 --- /dev/null +++ b/arch/s390/kernel/syscalls.S | |||
@@ -0,0 +1,292 @@ | |||
1 | /* | ||
2 | * definitions for sys_call_table, each line represents an | ||
3 | * entry in the table in the form | ||
4 | * SYSCALL(31 bit syscall, 64 bit syscall, 31 bit emulated syscall) | ||
5 | * | ||
6 | * this file is meant to be included from entry.S and entry64.S | ||
7 | */ | ||
8 | |||
9 | #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall) | ||
10 | |||
11 | NI_SYSCALL /* 0 */ | ||
12 | SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper) | ||
13 | SYSCALL(sys_fork_glue,sys_fork_glue,sys_fork_glue) | ||
14 | SYSCALL(sys_read,sys_read,sys32_read_wrapper) | ||
15 | SYSCALL(sys_write,sys_write,sys32_write_wrapper) | ||
16 | SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */ | ||
17 | SYSCALL(sys_close,sys_close,sys32_close_wrapper) | ||
18 | SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall) | ||
19 | SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper) | ||
20 | SYSCALL(sys_link,sys_link,sys32_link_wrapper) | ||
21 | SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */ | ||
22 | SYSCALL(sys_execve_glue,sys_execve_glue,sys32_execve_glue) | ||
23 | SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper) | ||
24 | SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */ | ||
25 | SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper) | ||
26 | SYSCALL(sys_chmod,sys_chmod,sys32_chmod_wrapper) /* 15 */ | ||
27 | SYSCALL(sys_lchown16,sys_ni_syscall,sys32_lchown16_wrapper) /* old lchown16 syscall*/ | ||
28 | NI_SYSCALL /* old break syscall holder */ | ||
29 | NI_SYSCALL /* old stat syscall holder */ | ||
30 | SYSCALL(sys_lseek,sys_lseek,sys32_lseek_wrapper) | ||
31 | SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */ | ||
32 | SYSCALL(sys_mount,sys_mount,sys32_mount_wrapper) | ||
33 | SYSCALL(sys_oldumount,sys_oldumount,sys32_oldumount_wrapper) | ||
34 | SYSCALL(sys_setuid16,sys_ni_syscall,sys32_setuid16_wrapper) /* old setuid16 syscall*/ | ||
35 | SYSCALL(sys_getuid16,sys_ni_syscall,sys32_getuid16) /* old getuid16 syscall*/ | ||
36 | SYSCALL(sys_stime,sys_ni_syscall,sys32_stime_wrapper) /* 25 old stime syscall */ | ||
37 | SYSCALL(sys_ptrace,sys_ptrace,sys32_ptrace_wrapper) | ||
38 | SYSCALL(sys_alarm,sys_alarm,sys32_alarm_wrapper) | ||
39 | NI_SYSCALL /* old fstat syscall */ | ||
40 | SYSCALL(sys_pause,sys_pause,sys32_pause) | ||
41 | SYSCALL(sys_utime,sys_utime,compat_sys_utime_wrapper) /* 30 */ | ||
42 | NI_SYSCALL /* old stty syscall */ | ||
43 | NI_SYSCALL /* old gtty syscall */ | ||
44 | SYSCALL(sys_access,sys_access,sys32_access_wrapper) | ||
45 | SYSCALL(sys_nice,sys_nice,sys32_nice_wrapper) | ||
46 | NI_SYSCALL /* 35 old ftime syscall */ | ||
47 | SYSCALL(sys_sync,sys_sync,sys_sync) | ||
48 | SYSCALL(sys_kill,sys_kill,sys32_kill_wrapper) | ||
49 | SYSCALL(sys_rename,sys_rename,sys32_rename_wrapper) | ||
50 | SYSCALL(sys_mkdir,sys_mkdir,sys32_mkdir_wrapper) | ||
51 | SYSCALL(sys_rmdir,sys_rmdir,sys32_rmdir_wrapper) /* 40 */ | ||
52 | SYSCALL(sys_dup,sys_dup,sys32_dup_wrapper) | ||
53 | SYSCALL(sys_pipe,sys_pipe,sys32_pipe_wrapper) | ||
54 | SYSCALL(sys_times,sys_times,compat_sys_times_wrapper) | ||
55 | NI_SYSCALL /* old prof syscall */ | ||
56 | SYSCALL(sys_brk,sys_brk,sys32_brk_wrapper) /* 45 */ | ||
57 | SYSCALL(sys_setgid16,sys_ni_syscall,sys32_setgid16_wrapper) /* old setgid16 syscall*/ | ||
58 | SYSCALL(sys_getgid16,sys_ni_syscall,sys32_getgid16) /* old getgid16 syscall*/ | ||
59 | SYSCALL(sys_signal,sys_signal,sys32_signal_wrapper) | ||
60 | SYSCALL(sys_geteuid16,sys_ni_syscall,sys32_geteuid16) /* old geteuid16 syscall */ | ||
61 | SYSCALL(sys_getegid16,sys_ni_syscall,sys32_getegid16) /* 50 old getegid16 syscall */ | ||
62 | SYSCALL(sys_acct,sys_acct,sys32_acct_wrapper) | ||
63 | SYSCALL(sys_umount,sys_umount,sys32_umount_wrapper) | ||
64 | NI_SYSCALL /* old lock syscall */ | ||
65 | SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl_wrapper) | ||
66 | SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl_wrapper) /* 55 */ | ||
67 | NI_SYSCALL /* intel mpx syscall */ | ||
68 | SYSCALL(sys_setpgid,sys_setpgid,sys32_setpgid_wrapper) | ||
69 | NI_SYSCALL /* old ulimit syscall */ | ||
70 | NI_SYSCALL /* old uname syscall */ | ||
71 | SYSCALL(sys_umask,sys_umask,sys32_umask_wrapper) /* 60 */ | ||
72 | SYSCALL(sys_chroot,sys_chroot,sys32_chroot_wrapper) | ||
73 | SYSCALL(sys_ustat,sys_ustat,sys32_ustat_wrapper) | ||
74 | SYSCALL(sys_dup2,sys_dup2,sys32_dup2_wrapper) | ||
75 | SYSCALL(sys_getppid,sys_getppid,sys_getppid) | ||
76 | SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */ | ||
77 | SYSCALL(sys_setsid,sys_setsid,sys_setsid) | ||
78 | SYSCALL(sys_sigaction,sys_sigaction,sys32_sigaction_wrapper) | ||
79 | NI_SYSCALL /* old sgetmask syscall*/ | ||
80 | NI_SYSCALL /* old ssetmask syscall*/ | ||
81 | SYSCALL(sys_setreuid16,sys_ni_syscall,sys32_setreuid16_wrapper) /* old setreuid16 syscall */ | ||
82 | SYSCALL(sys_setregid16,sys_ni_syscall,sys32_setregid16_wrapper) /* old setregid16 syscall */ | ||
83 | SYSCALL(sys_sigsuspend_glue,sys_sigsuspend_glue,sys32_sigsuspend_glue) | ||
84 | SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending_wrapper) | ||
85 | SYSCALL(sys_sethostname,sys_sethostname,sys32_sethostname_wrapper) | ||
86 | SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit_wrapper) /* 75 */ | ||
87 | SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit_wrapper) | ||
88 | SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage_wrapper) | ||
89 | SYSCALL(sys_gettimeofday,sys_gettimeofday,sys32_gettimeofday_wrapper) | ||
90 | SYSCALL(sys_settimeofday,sys_settimeofday,sys32_settimeofday_wrapper) | ||
91 | SYSCALL(sys_getgroups16,sys_ni_syscall,sys32_getgroups16_wrapper) /* 80 old getgroups16 syscall */ | ||
92 | SYSCALL(sys_setgroups16,sys_ni_syscall,sys32_setgroups16_wrapper) /* old setgroups16 syscall */ | ||
93 | NI_SYSCALL /* old select syscall */ | ||
94 | SYSCALL(sys_symlink,sys_symlink,sys32_symlink_wrapper) | ||
95 | NI_SYSCALL /* old lstat syscall */ | ||
96 | SYSCALL(sys_readlink,sys_readlink,sys32_readlink_wrapper) /* 85 */ | ||
97 | SYSCALL(sys_uselib,sys_uselib,sys32_uselib_wrapper) | ||
98 | SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper) | ||
99 | SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper) | ||
100 | SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */ | ||
101 | SYSCALL(old_mmap,old_mmap,old32_mmap_wrapper) /* 90 */ | ||
102 | SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper) | ||
103 | SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper) | ||
104 | SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper) | ||
105 | SYSCALL(sys_fchmod,sys_fchmod,sys32_fchmod_wrapper) | ||
106 | SYSCALL(sys_fchown16,sys_ni_syscall,sys32_fchown16_wrapper) /* 95 old fchown16 syscall*/ | ||
107 | SYSCALL(sys_getpriority,sys_getpriority,sys32_getpriority_wrapper) | ||
108 | SYSCALL(sys_setpriority,sys_setpriority,sys32_setpriority_wrapper) | ||
109 | NI_SYSCALL /* old profil syscall */ | ||
110 | SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs_wrapper) | ||
111 | SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs_wrapper) /* 100 */ | ||
112 | NI_SYSCALL /* ioperm for i386 */ | ||
113 | SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall_wrapper) | ||
114 | SYSCALL(sys_syslog,sys_syslog,sys32_syslog_wrapper) | ||
115 | SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer_wrapper) | ||
116 | SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer_wrapper) /* 105 */ | ||
117 | SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat_wrapper) | ||
118 | SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat_wrapper) | ||
119 | SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat_wrapper) | ||
120 | NI_SYSCALL /* old uname syscall */ | ||
121 | SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,sys32_lookup_dcookie_wrapper) /* 110 */ | ||
122 | SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup) | ||
123 | NI_SYSCALL /* old "idle" system call */ | ||
124 | NI_SYSCALL /* vm86old for i386 */ | ||
125 | SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper) | ||
126 | SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */ | ||
127 | SYSCALL(sys_sysinfo,sys_sysinfo,sys32_sysinfo_wrapper) | ||
128 | SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper) | ||
129 | SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper) | ||
130 | SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue) | ||
131 | SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */ | ||
132 | SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) | ||
133 | SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper) | ||
134 | NI_SYSCALL /* modify_ldt for i386 */ | ||
135 | SYSCALL(sys_adjtimex,sys_adjtimex,sys32_adjtimex_wrapper) | ||
136 | SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */ | ||
137 | SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper) | ||
138 | NI_SYSCALL /* old "create module" */ | ||
139 | SYSCALL(sys_init_module,sys_init_module,sys32_init_module_wrapper) | ||
140 | SYSCALL(sys_delete_module,sys_delete_module,sys32_delete_module_wrapper) | ||
141 | NI_SYSCALL /* 130: old get_kernel_syms */ | ||
142 | SYSCALL(sys_quotactl,sys_quotactl,sys32_quotactl_wrapper) | ||
143 | SYSCALL(sys_getpgid,sys_getpgid,sys32_getpgid_wrapper) | ||
144 | SYSCALL(sys_fchdir,sys_fchdir,sys32_fchdir_wrapper) | ||
145 | SYSCALL(sys_bdflush,sys_bdflush,sys32_bdflush_wrapper) | ||
146 | SYSCALL(sys_sysfs,sys_sysfs,sys32_sysfs_wrapper) /* 135 */ | ||
147 | SYSCALL(sys_personality,s390x_personality,sys32_personality_wrapper) | ||
148 | NI_SYSCALL /* for afs_syscall */ | ||
149 | SYSCALL(sys_setfsuid16,sys_ni_syscall,sys32_setfsuid16_wrapper) /* old setfsuid16 syscall */ | ||
150 | SYSCALL(sys_setfsgid16,sys_ni_syscall,sys32_setfsgid16_wrapper) /* old setfsgid16 syscall */ | ||
151 | SYSCALL(sys_llseek,sys_llseek,sys32_llseek_wrapper) /* 140 */ | ||
152 | SYSCALL(sys_getdents,sys_getdents,sys32_getdents_wrapper) | ||
153 | SYSCALL(sys_select,sys_select,compat_sys_select_wrapper) | ||
154 | SYSCALL(sys_flock,sys_flock,sys32_flock_wrapper) | ||
155 | SYSCALL(sys_msync,sys_msync,sys32_msync_wrapper) | ||
156 | SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper) /* 145 */ | ||
157 | SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper) | ||
158 | SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper) | ||
159 | SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper) | ||
160 | SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper) | ||
161 | SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper) /* 150 */ | ||
162 | SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper) | ||
163 | SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper) | ||
164 | SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall) | ||
165 | SYSCALL(sys_sched_setparam,sys_sched_setparam,sys32_sched_setparam_wrapper) | ||
166 | SYSCALL(sys_sched_getparam,sys_sched_getparam,sys32_sched_getparam_wrapper) /* 155 */ | ||
167 | SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,sys32_sched_setscheduler_wrapper) | ||
168 | SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,sys32_sched_getscheduler_wrapper) | ||
169 | SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield) | ||
170 | SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,sys32_sched_get_priority_max_wrapper) | ||
171 | SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,sys32_sched_get_priority_min_wrapper) /* 160 */ | ||
172 | SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,sys32_sched_rr_get_interval_wrapper) | ||
173 | SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep_wrapper) | ||
174 | SYSCALL(sys_mremap,sys_mremap,sys32_mremap_wrapper) | ||
175 | SYSCALL(sys_setresuid16,sys_ni_syscall,sys32_setresuid16_wrapper) /* old setresuid16 syscall */ | ||
176 | SYSCALL(sys_getresuid16,sys_ni_syscall,sys32_getresuid16_wrapper) /* 165 old getresuid16 syscall */ | ||
177 | NI_SYSCALL /* for vm86 */ | ||
178 | NI_SYSCALL /* old sys_query_module */ | ||
179 | SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper) | ||
180 | SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper) | ||
181 | SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */ | ||
182 | SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ | ||
183 | SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper) | ||
184 | SYSCALL(sys_rt_sigreturn_glue,sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue) | ||
185 | SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper) | ||
186 | SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */ | ||
187 | SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper) | ||
188 | SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait_wrapper) | ||
189 | SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,sys32_rt_sigqueueinfo_wrapper) | ||
190 | SYSCALL(sys_rt_sigsuspend_glue,sys_rt_sigsuspend_glue,sys32_rt_sigsuspend_glue) | ||
191 | SYSCALL(sys_pread64,sys_pread64,sys32_pread64_wrapper) /* 180 */ | ||
192 | SYSCALL(sys_pwrite64,sys_pwrite64,sys32_pwrite64_wrapper) | ||
193 | SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall */ | ||
194 | SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper) | ||
195 | SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper) | ||
196 | SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */ | ||
197 | SYSCALL(sys_sigaltstack_glue,sys_sigaltstack_glue,sys32_sigaltstack_glue) | ||
198 | SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper) | ||
199 | NI_SYSCALL /* streams1 */ | ||
200 | NI_SYSCALL /* streams2 */ | ||
201 | SYSCALL(sys_vfork_glue,sys_vfork_glue,sys_vfork_glue) /* 190 */ | ||
202 | SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper) | ||
203 | SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper) | ||
204 | SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper) | ||
205 | SYSCALL(sys_ftruncate64,sys_ni_syscall,sys32_ftruncate64_wrapper) | ||
206 | SYSCALL(sys_stat64,sys_ni_syscall,sys32_stat64_wrapper) /* 195 */ | ||
207 | SYSCALL(sys_lstat64,sys_ni_syscall,sys32_lstat64_wrapper) | ||
208 | SYSCALL(sys_fstat64,sys_ni_syscall,sys32_fstat64_wrapper) | ||
209 | SYSCALL(sys_lchown,sys_lchown,sys32_lchown_wrapper) | ||
210 | SYSCALL(sys_getuid,sys_getuid,sys_getuid) | ||
211 | SYSCALL(sys_getgid,sys_getgid,sys_getgid) /* 200 */ | ||
212 | SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid) | ||
213 | SYSCALL(sys_getegid,sys_getegid,sys_getegid) | ||
214 | SYSCALL(sys_setreuid,sys_setreuid,sys32_setreuid_wrapper) | ||
215 | SYSCALL(sys_setregid,sys_setregid,sys32_setregid_wrapper) | ||
216 | SYSCALL(sys_getgroups,sys_getgroups,sys32_getgroups_wrapper) /* 205 */ | ||
217 | SYSCALL(sys_setgroups,sys_setgroups,sys32_setgroups_wrapper) | ||
218 | SYSCALL(sys_fchown,sys_fchown,sys32_fchown_wrapper) | ||
219 | SYSCALL(sys_setresuid,sys_setresuid,sys32_setresuid_wrapper) | ||
220 | SYSCALL(sys_getresuid,sys_getresuid,sys32_getresuid_wrapper) | ||
221 | SYSCALL(sys_setresgid,sys_setresgid,sys32_setresgid_wrapper) /* 210 */ | ||
222 | SYSCALL(sys_getresgid,sys_getresgid,sys32_getresgid_wrapper) | ||
223 | SYSCALL(sys_chown,sys_chown,sys32_chown_wrapper) | ||
224 | SYSCALL(sys_setuid,sys_setuid,sys32_setuid_wrapper) | ||
225 | SYSCALL(sys_setgid,sys_setgid,sys32_setgid_wrapper) | ||
226 | SYSCALL(sys_setfsuid,sys_setfsuid,sys32_setfsuid_wrapper) /* 215 */ | ||
227 | SYSCALL(sys_setfsgid,sys_setfsgid,sys32_setfsgid_wrapper) | ||
228 | SYSCALL(sys_pivot_root,sys_pivot_root,sys32_pivot_root_wrapper) | ||
229 | SYSCALL(sys_mincore,sys_mincore,sys32_mincore_wrapper) | ||
230 | SYSCALL(sys_madvise,sys_madvise,sys32_madvise_wrapper) | ||
231 | SYSCALL(sys_getdents64,sys_getdents64,sys32_getdents64_wrapper) /* 220 */ | ||
232 | SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64_wrapper) | ||
233 | SYSCALL(sys_readahead,sys_readahead,sys32_readahead) | ||
234 | SYSCALL(sys_sendfile64,sys_ni_syscall,sys32_sendfile64) | ||
235 | SYSCALL(sys_setxattr,sys_setxattr,sys32_setxattr_wrapper) | ||
236 | SYSCALL(sys_lsetxattr,sys_lsetxattr,sys32_lsetxattr_wrapper) /* 225 */ | ||
237 | SYSCALL(sys_fsetxattr,sys_fsetxattr,sys32_fsetxattr_wrapper) | ||
238 | SYSCALL(sys_getxattr,sys_getxattr,sys32_getxattr_wrapper) | ||
239 | SYSCALL(sys_lgetxattr,sys_lgetxattr,sys32_lgetxattr_wrapper) | ||
240 | SYSCALL(sys_fgetxattr,sys_fgetxattr,sys32_fgetxattr_wrapper) | ||
241 | SYSCALL(sys_listxattr,sys_listxattr,sys32_listxattr_wrapper) /* 230 */ | ||
242 | SYSCALL(sys_llistxattr,sys_llistxattr,sys32_llistxattr_wrapper) | ||
243 | SYSCALL(sys_flistxattr,sys_flistxattr,sys32_flistxattr_wrapper) | ||
244 | SYSCALL(sys_removexattr,sys_removexattr,sys32_removexattr_wrapper) | ||
245 | SYSCALL(sys_lremovexattr,sys_lremovexattr,sys32_lremovexattr_wrapper) | ||
246 | SYSCALL(sys_fremovexattr,sys_fremovexattr,sys32_fremovexattr_wrapper) /* 235 */ | ||
247 | SYSCALL(sys_gettid,sys_gettid,sys_gettid) | ||
248 | SYSCALL(sys_tkill,sys_tkill,sys_tkill) | ||
249 | SYSCALL(sys_futex,sys_futex,compat_sys_futex_wrapper) | ||
250 | SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,sys32_sched_setaffinity_wrapper) | ||
251 | SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,sys32_sched_getaffinity_wrapper) /* 240 */ | ||
252 | SYSCALL(sys_tgkill,sys_tgkill,sys_tgkill) | ||
253 | NI_SYSCALL /* reserved for TUX */ | ||
254 | SYSCALL(sys_io_setup,sys_io_setup,sys32_io_setup_wrapper) | ||
255 | SYSCALL(sys_io_destroy,sys_io_destroy,sys32_io_destroy_wrapper) | ||
256 | SYSCALL(sys_io_getevents,sys_io_getevents,sys32_io_getevents_wrapper) /* 245 */ | ||
257 | SYSCALL(sys_io_submit,sys_io_submit,sys32_io_submit_wrapper) | ||
258 | SYSCALL(sys_io_cancel,sys_io_cancel,sys32_io_cancel_wrapper) | ||
259 | SYSCALL(sys_exit_group,sys_exit_group,sys32_exit_group_wrapper) | ||
260 | SYSCALL(sys_epoll_create,sys_epoll_create,sys_epoll_create_wrapper) | ||
261 | SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper) /* 250 */ | ||
262 | SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper) | ||
263 | SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper) | ||
264 | SYSCALL(s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper) | ||
265 | SYSCALL(sys_timer_create,sys_timer_create,sys32_timer_create_wrapper) | ||
266 | SYSCALL(sys_timer_settime,sys_timer_settime,sys32_timer_settime_wrapper) /* 255 */ | ||
267 | SYSCALL(sys_timer_gettime,sys_timer_gettime,sys32_timer_gettime_wrapper) | ||
268 | SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,sys32_timer_getoverrun_wrapper) | ||
269 | SYSCALL(sys_timer_delete,sys_timer_delete,sys32_timer_delete_wrapper) | ||
270 | SYSCALL(sys_clock_settime,sys_clock_settime,sys32_clock_settime_wrapper) | ||
271 | SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper) /* 260 */ | ||
272 | SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper) | ||
273 | SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper) | ||
274 | NI_SYSCALL /* reserved for vserver */ | ||
275 | SYSCALL(s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper) | ||
276 | SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper) | ||
277 | SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper) | ||
278 | SYSCALL(sys_remap_file_pages,sys_remap_file_pages,sys32_remap_file_pages_wrapper) | ||
279 | NI_SYSCALL /* 268 sys_mbind */ | ||
280 | NI_SYSCALL /* 269 sys_get_mempolicy */ | ||
281 | NI_SYSCALL /* 270 sys_set_mempolicy */ | ||
282 | SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open_wrapper) | ||
283 | SYSCALL(sys_mq_unlink,sys_mq_unlink,sys32_mq_unlink_wrapper) | ||
284 | SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend_wrapper) | ||
285 | SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive_wrapper) | ||
286 | SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify_wrapper) /* 275 */ | ||
287 | SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr_wrapper) | ||
288 | NI_SYSCALL /* reserved for kexec */ | ||
289 | SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key_wrapper) | ||
290 | SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key_wrapper) | ||
291 | SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl) /* 280 */ | ||
292 | SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid_wrapper) | ||
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c new file mode 100644 index 000000000000..061e81138dc2 --- /dev/null +++ b/arch/s390/kernel/time.c | |||
@@ -0,0 +1,382 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/time.c | ||
3 | * Time of day based timer functions. | ||
4 | * | ||
5 | * S390 version | ||
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Hartmut Penner (hp@de.ibm.com), | ||
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
9 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) | ||
10 | * | ||
11 | * Derived from "arch/i386/kernel/time.c" | ||
12 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | ||
13 | */ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/param.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/time.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/smp.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/profile.h> | ||
30 | #include <linux/timex.h> | ||
31 | #include <linux/notifier.h> | ||
32 | |||
33 | #include <asm/uaccess.h> | ||
34 | #include <asm/delay.h> | ||
35 | #include <asm/s390_ext.h> | ||
36 | #include <asm/div64.h> | ||
37 | #include <asm/irq.h> | ||
38 | #include <asm/timer.h> | ||
39 | |||
40 | /* change this if you have some constant time drift */ | ||
41 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) | ||
42 | #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) | ||
43 | |||
44 | /* | ||
45 | * Create a small time difference between the timer interrupts | ||
46 | * on the different cpus to avoid lock contention. | ||
47 | */ | ||
48 | #define CPU_DEVIATION (smp_processor_id() << 12) | ||
49 | |||
50 | #define TICK_SIZE tick | ||
51 | |||
52 | u64 jiffies_64 = INITIAL_JIFFIES; | ||
53 | |||
54 | EXPORT_SYMBOL(jiffies_64); | ||
55 | |||
56 | static ext_int_info_t ext_int_info_cc; | ||
57 | static u64 init_timer_cc; | ||
58 | static u64 jiffies_timer_cc; | ||
59 | static u64 xtime_cc; | ||
60 | |||
61 | extern unsigned long wall_jiffies; | ||
62 | |||
63 | /* | ||
64 | * Scheduler clock - returns current time in nanosec units. | ||
65 | */ | ||
66 | unsigned long long sched_clock(void) | ||
67 | { | ||
68 | return ((get_clock() - jiffies_timer_cc) * 1000) >> 12; | ||
69 | } | ||
70 | |||
71 | void tod_to_timeval(__u64 todval, struct timespec *xtime) | ||
72 | { | ||
73 | unsigned long long sec; | ||
74 | |||
75 | sec = todval >> 12; | ||
76 | do_div(sec, 1000000); | ||
77 | xtime->tv_sec = sec; | ||
78 | todval -= (sec * 1000000) << 12; | ||
79 | xtime->tv_nsec = ((todval * 1000) >> 12); | ||
80 | } | ||
81 | |||
82 | static inline unsigned long do_gettimeoffset(void) | ||
83 | { | ||
84 | __u64 now; | ||
85 | |||
86 | now = (get_clock() - jiffies_timer_cc) >> 12; | ||
87 | /* We require the offset from the latest update of xtime */ | ||
88 | now -= (__u64) wall_jiffies*USECS_PER_JIFFY; | ||
89 | return (unsigned long) now; | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * This version of gettimeofday has microsecond resolution. | ||
94 | */ | ||
95 | void do_gettimeofday(struct timeval *tv) | ||
96 | { | ||
97 | unsigned long flags; | ||
98 | unsigned long seq; | ||
99 | unsigned long usec, sec; | ||
100 | |||
101 | do { | ||
102 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
103 | |||
104 | sec = xtime.tv_sec; | ||
105 | usec = xtime.tv_nsec / 1000 + do_gettimeoffset(); | ||
106 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
107 | |||
108 | while (usec >= 1000000) { | ||
109 | usec -= 1000000; | ||
110 | sec++; | ||
111 | } | ||
112 | |||
113 | tv->tv_sec = sec; | ||
114 | tv->tv_usec = usec; | ||
115 | } | ||
116 | |||
117 | EXPORT_SYMBOL(do_gettimeofday); | ||
118 | |||
119 | int do_settimeofday(struct timespec *tv) | ||
120 | { | ||
121 | time_t wtm_sec, sec = tv->tv_sec; | ||
122 | long wtm_nsec, nsec = tv->tv_nsec; | ||
123 | |||
124 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
125 | return -EINVAL; | ||
126 | |||
127 | write_seqlock_irq(&xtime_lock); | ||
128 | /* This is revolting. We need to set the xtime.tv_nsec | ||
129 | * correctly. However, the value in this location is | ||
130 | * is value at the last tick. | ||
131 | * Discover what correction gettimeofday | ||
132 | * would have done, and then undo it! | ||
133 | */ | ||
134 | nsec -= do_gettimeoffset() * 1000; | ||
135 | |||
136 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
137 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
138 | |||
139 | set_normalized_timespec(&xtime, sec, nsec); | ||
140 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
141 | |||
142 | time_adjust = 0; /* stop active adjtime() */ | ||
143 | time_status |= STA_UNSYNC; | ||
144 | time_maxerror = NTP_PHASE_LIMIT; | ||
145 | time_esterror = NTP_PHASE_LIMIT; | ||
146 | write_sequnlock_irq(&xtime_lock); | ||
147 | clock_was_set(); | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | EXPORT_SYMBOL(do_settimeofday); | ||
152 | |||
153 | |||
154 | #ifdef CONFIG_PROFILING | ||
155 | #define s390_do_profile(regs) profile_tick(CPU_PROFILING, regs) | ||
156 | #else | ||
157 | #define s390_do_profile(regs) do { ; } while(0) | ||
158 | #endif /* CONFIG_PROFILING */ | ||
159 | |||
160 | |||
161 | /* | ||
162 | * timer_interrupt() needs to keep up the real-time clock, | ||
163 | * as well as call the "do_timer()" routine every clocktick | ||
164 | */ | ||
165 | void account_ticks(struct pt_regs *regs) | ||
166 | { | ||
167 | __u64 tmp; | ||
168 | __u32 ticks, xticks; | ||
169 | |||
170 | /* Calculate how many ticks have passed. */ | ||
171 | if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) { | ||
172 | /* | ||
173 | * We have to program the clock comparator even if | ||
174 | * no tick has passed. That happens if e.g. an i/o | ||
175 | * interrupt wakes up an idle processor that has | ||
176 | * switched off its hz timer. | ||
177 | */ | ||
178 | tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION; | ||
179 | asm volatile ("SCKC %0" : : "m" (tmp)); | ||
180 | return; | ||
181 | } | ||
182 | tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer; | ||
183 | if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ | ||
184 | ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; | ||
185 | S390_lowcore.jiffy_timer += | ||
186 | CLK_TICKS_PER_JIFFY * (__u64) ticks; | ||
187 | } else if (tmp >= CLK_TICKS_PER_JIFFY) { | ||
188 | ticks = 2; | ||
189 | S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY; | ||
190 | } else { | ||
191 | ticks = 1; | ||
192 | S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; | ||
193 | } | ||
194 | |||
195 | /* set clock comparator for next tick */ | ||
196 | tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION; | ||
197 | asm volatile ("SCKC %0" : : "m" (tmp)); | ||
198 | |||
199 | #ifdef CONFIG_SMP | ||
200 | /* | ||
201 | * Do not rely on the boot cpu to do the calls to do_timer. | ||
202 | * Spread it over all cpus instead. | ||
203 | */ | ||
204 | write_seqlock(&xtime_lock); | ||
205 | if (S390_lowcore.jiffy_timer > xtime_cc) { | ||
206 | tmp = S390_lowcore.jiffy_timer - xtime_cc; | ||
207 | if (tmp >= 2*CLK_TICKS_PER_JIFFY) { | ||
208 | xticks = __div(tmp, CLK_TICKS_PER_JIFFY); | ||
209 | xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY; | ||
210 | } else { | ||
211 | xticks = 1; | ||
212 | xtime_cc += CLK_TICKS_PER_JIFFY; | ||
213 | } | ||
214 | while (xticks--) | ||
215 | do_timer(regs); | ||
216 | } | ||
217 | write_sequnlock(&xtime_lock); | ||
218 | #else | ||
219 | for (xticks = ticks; xticks > 0; xticks--) | ||
220 | do_timer(regs); | ||
221 | #endif | ||
222 | |||
223 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
224 | account_user_vtime(current); | ||
225 | #else | ||
226 | while (ticks--) | ||
227 | update_process_times(user_mode(regs)); | ||
228 | #endif | ||
229 | |||
230 | s390_do_profile(regs); | ||
231 | } | ||
232 | |||
233 | #ifdef CONFIG_NO_IDLE_HZ | ||
234 | |||
235 | #ifdef CONFIG_NO_IDLE_HZ_INIT | ||
236 | int sysctl_hz_timer = 0; | ||
237 | #else | ||
238 | int sysctl_hz_timer = 1; | ||
239 | #endif | ||
240 | |||
241 | /* | ||
242 | * Stop the HZ tick on the current CPU. | ||
243 | * Only cpu_idle may call this function. | ||
244 | */ | ||
245 | static inline void stop_hz_timer(void) | ||
246 | { | ||
247 | __u64 timer; | ||
248 | |||
249 | if (sysctl_hz_timer != 0) | ||
250 | return; | ||
251 | |||
252 | cpu_set(smp_processor_id(), nohz_cpu_mask); | ||
253 | |||
254 | /* | ||
255 | * Leave the clock comparator set up for the next timer | ||
256 | * tick if either rcu or a softirq is pending. | ||
257 | */ | ||
258 | if (rcu_pending(smp_processor_id()) || local_softirq_pending()) { | ||
259 | cpu_clear(smp_processor_id(), nohz_cpu_mask); | ||
260 | return; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * This cpu is going really idle. Set up the clock comparator | ||
265 | * for the next event. | ||
266 | */ | ||
267 | timer = (__u64) (next_timer_interrupt() - jiffies) + jiffies_64; | ||
268 | timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY; | ||
269 | asm volatile ("SCKC %0" : : "m" (timer)); | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * Start the HZ tick on the current CPU. | ||
274 | * Only cpu_idle may call this function. | ||
275 | */ | ||
276 | static inline void start_hz_timer(void) | ||
277 | { | ||
278 | if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) | ||
279 | return; | ||
280 | account_ticks(__KSTK_PTREGS(current)); | ||
281 | cpu_clear(smp_processor_id(), nohz_cpu_mask); | ||
282 | } | ||
283 | |||
284 | static int nohz_idle_notify(struct notifier_block *self, | ||
285 | unsigned long action, void *hcpu) | ||
286 | { | ||
287 | switch (action) { | ||
288 | case CPU_IDLE: | ||
289 | stop_hz_timer(); | ||
290 | break; | ||
291 | case CPU_NOT_IDLE: | ||
292 | start_hz_timer(); | ||
293 | break; | ||
294 | } | ||
295 | return NOTIFY_OK; | ||
296 | } | ||
297 | |||
298 | static struct notifier_block nohz_idle_nb = { | ||
299 | .notifier_call = nohz_idle_notify, | ||
300 | }; | ||
301 | |||
302 | void __init nohz_init(void) | ||
303 | { | ||
304 | if (register_idle_notifier(&nohz_idle_nb)) | ||
305 | panic("Couldn't register idle notifier"); | ||
306 | } | ||
307 | |||
308 | #endif | ||
309 | |||
310 | /* | ||
311 | * Start the clock comparator on the current CPU. | ||
312 | */ | ||
313 | void init_cpu_timer(void) | ||
314 | { | ||
315 | unsigned long cr0; | ||
316 | __u64 timer; | ||
317 | |||
318 | timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY; | ||
319 | S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY; | ||
320 | timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION; | ||
321 | asm volatile ("SCKC %0" : : "m" (timer)); | ||
322 | /* allow clock comparator timer interrupt */ | ||
323 | __ctl_store(cr0, 0, 0); | ||
324 | cr0 |= 0x800; | ||
325 | __ctl_load(cr0, 0, 0); | ||
326 | } | ||
327 | |||
328 | extern void vtime_init(void); | ||
329 | |||
330 | /* | ||
331 | * Initialize the TOD clock and the CPU timer of | ||
332 | * the boot cpu. | ||
333 | */ | ||
334 | void __init time_init(void) | ||
335 | { | ||
336 | __u64 set_time_cc; | ||
337 | int cc; | ||
338 | |||
339 | /* kick the TOD clock */ | ||
340 | asm volatile ("STCK 0(%1)\n\t" | ||
341 | "IPM %0\n\t" | ||
342 | "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc) | ||
343 | : "memory", "cc"); | ||
344 | switch (cc) { | ||
345 | case 0: /* clock in set state: all is fine */ | ||
346 | break; | ||
347 | case 1: /* clock in non-set state: FIXME */ | ||
348 | printk("time_init: TOD clock in non-set state\n"); | ||
349 | break; | ||
350 | case 2: /* clock in error state: FIXME */ | ||
351 | printk("time_init: TOD clock in error state\n"); | ||
352 | break; | ||
353 | case 3: /* clock in stopped or not-operational state: FIXME */ | ||
354 | printk("time_init: TOD clock stopped/non-operational\n"); | ||
355 | break; | ||
356 | } | ||
357 | jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; | ||
358 | |||
359 | /* set xtime */ | ||
360 | xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; | ||
361 | set_time_cc = init_timer_cc - 0x8126d60e46000000LL + | ||
362 | (0x3c26700LL*1000000*4096); | ||
363 | tod_to_timeval(set_time_cc, &xtime); | ||
364 | set_normalized_timespec(&wall_to_monotonic, | ||
365 | -xtime.tv_sec, -xtime.tv_nsec); | ||
366 | |||
367 | /* request the clock comparator external interrupt */ | ||
368 | if (register_early_external_interrupt(0x1004, 0, | ||
369 | &ext_int_info_cc) != 0) | ||
370 | panic("Couldn't request external interrupt 0x1004"); | ||
371 | |||
372 | init_cpu_timer(); | ||
373 | |||
374 | #ifdef CONFIG_NO_IDLE_HZ | ||
375 | nohz_init(); | ||
376 | #endif | ||
377 | |||
378 | #ifdef CONFIG_VIRT_TIMER | ||
379 | vtime_init(); | ||
380 | #endif | ||
381 | } | ||
382 | |||
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c new file mode 100644 index 000000000000..8b90e9528b91 --- /dev/null +++ b/arch/s390/kernel/traps.c | |||
@@ -0,0 +1,738 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/traps.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | ||
8 | * | ||
9 | * Derived from "arch/i386/kernel/traps.c" | ||
10 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
11 | */ | ||
12 | |||
13 | /* | ||
14 | * 'Traps.c' handles hardware traps and faults after we have saved some | ||
15 | * state in 'asm.s'. | ||
16 | */ | ||
17 | #include <linux/config.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/ptrace.h> | ||
23 | #include <linux/timer.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/smp.h> | ||
26 | #include <linux/smp_lock.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/kallsyms.h> | ||
32 | |||
33 | #include <asm/system.h> | ||
34 | #include <asm/uaccess.h> | ||
35 | #include <asm/io.h> | ||
36 | #include <asm/atomic.h> | ||
37 | #include <asm/mathemu.h> | ||
38 | #include <asm/cpcmd.h> | ||
39 | #include <asm/s390_ext.h> | ||
40 | #include <asm/lowcore.h> | ||
41 | #include <asm/debug.h> | ||
42 | |||
43 | /* Called from entry.S only */ | ||
44 | extern void handle_per_exception(struct pt_regs *regs); | ||
45 | |||
46 | typedef void pgm_check_handler_t(struct pt_regs *, long); | ||
47 | pgm_check_handler_t *pgm_check_table[128]; | ||
48 | |||
49 | #ifdef CONFIG_SYSCTL | ||
50 | #ifdef CONFIG_PROCESS_DEBUG | ||
51 | int sysctl_userprocess_debug = 1; | ||
52 | #else | ||
53 | int sysctl_userprocess_debug = 0; | ||
54 | #endif | ||
55 | #endif | ||
56 | |||
57 | extern pgm_check_handler_t do_protection_exception; | ||
58 | extern pgm_check_handler_t do_dat_exception; | ||
59 | extern pgm_check_handler_t do_pseudo_page_fault; | ||
60 | #ifdef CONFIG_PFAULT | ||
61 | extern int pfault_init(void); | ||
62 | extern void pfault_fini(void); | ||
63 | extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code); | ||
64 | static ext_int_info_t ext_int_pfault; | ||
65 | #endif | ||
66 | extern pgm_check_handler_t do_monitor_call; | ||
67 | |||
68 | #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) | ||
69 | |||
70 | #ifndef CONFIG_ARCH_S390X | ||
71 | #define FOURLONG "%08lx %08lx %08lx %08lx\n" | ||
72 | static int kstack_depth_to_print = 12; | ||
73 | #else /* CONFIG_ARCH_S390X */ | ||
74 | #define FOURLONG "%016lx %016lx %016lx %016lx\n" | ||
75 | static int kstack_depth_to_print = 20; | ||
76 | #endif /* CONFIG_ARCH_S390X */ | ||
77 | |||
78 | /* | ||
79 | * For show_trace we have tree different stack to consider: | ||
80 | * - the panic stack which is used if the kernel stack has overflown | ||
81 | * - the asynchronous interrupt stack (cpu related) | ||
82 | * - the synchronous kernel stack (process related) | ||
83 | * The stack trace can start at any of the three stack and can potentially | ||
84 | * touch all of them. The order is: panic stack, async stack, sync stack. | ||
85 | */ | ||
86 | static unsigned long | ||
87 | __show_trace(unsigned long sp, unsigned long low, unsigned long high) | ||
88 | { | ||
89 | struct stack_frame *sf; | ||
90 | struct pt_regs *regs; | ||
91 | |||
92 | while (1) { | ||
93 | sp = sp & PSW_ADDR_INSN; | ||
94 | if (sp < low || sp > high - sizeof(*sf)) | ||
95 | return sp; | ||
96 | sf = (struct stack_frame *) sp; | ||
97 | printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); | ||
98 | print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); | ||
99 | /* Follow the backchain. */ | ||
100 | while (1) { | ||
101 | low = sp; | ||
102 | sp = sf->back_chain & PSW_ADDR_INSN; | ||
103 | if (!sp) | ||
104 | break; | ||
105 | if (sp <= low || sp > high - sizeof(*sf)) | ||
106 | return sp; | ||
107 | sf = (struct stack_frame *) sp; | ||
108 | printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); | ||
109 | print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); | ||
110 | } | ||
111 | /* Zero backchain detected, check for interrupt frame. */ | ||
112 | sp = (unsigned long) (sf + 1); | ||
113 | if (sp <= low || sp > high - sizeof(*regs)) | ||
114 | return sp; | ||
115 | regs = (struct pt_regs *) sp; | ||
116 | printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); | ||
117 | print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); | ||
118 | low = sp; | ||
119 | sp = regs->gprs[15]; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | void show_trace(struct task_struct *task, unsigned long * stack) | ||
124 | { | ||
125 | register unsigned long __r15 asm ("15"); | ||
126 | unsigned long sp; | ||
127 | |||
128 | sp = (unsigned long) stack; | ||
129 | if (!sp) | ||
130 | sp = task ? task->thread.ksp : __r15; | ||
131 | printk("Call Trace:\n"); | ||
132 | #ifdef CONFIG_CHECK_STACK | ||
133 | sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, | ||
134 | S390_lowcore.panic_stack); | ||
135 | #endif | ||
136 | sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, | ||
137 | S390_lowcore.async_stack); | ||
138 | if (task) | ||
139 | __show_trace(sp, (unsigned long) task->thread_info, | ||
140 | (unsigned long) task->thread_info + THREAD_SIZE); | ||
141 | else | ||
142 | __show_trace(sp, S390_lowcore.thread_info, | ||
143 | S390_lowcore.thread_info + THREAD_SIZE); | ||
144 | printk("\n"); | ||
145 | } | ||
146 | |||
147 | void show_stack(struct task_struct *task, unsigned long *sp) | ||
148 | { | ||
149 | register unsigned long * __r15 asm ("15"); | ||
150 | unsigned long *stack; | ||
151 | int i; | ||
152 | |||
153 | // debugging aid: "show_stack(NULL);" prints the | ||
154 | // back trace for this cpu. | ||
155 | |||
156 | if (!sp) | ||
157 | sp = task ? (unsigned long *) task->thread.ksp : __r15; | ||
158 | |||
159 | stack = sp; | ||
160 | for (i = 0; i < kstack_depth_to_print; i++) { | ||
161 | if (((addr_t) stack & (THREAD_SIZE-1)) == 0) | ||
162 | break; | ||
163 | if (i && ((i * sizeof (long) % 32) == 0)) | ||
164 | printk("\n "); | ||
165 | printk("%p ", (void *)*stack++); | ||
166 | } | ||
167 | printk("\n"); | ||
168 | show_trace(task, sp); | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * The architecture-independent dump_stack generator | ||
173 | */ | ||
174 | void dump_stack(void) | ||
175 | { | ||
176 | show_stack(0, 0); | ||
177 | } | ||
178 | |||
179 | EXPORT_SYMBOL(dump_stack); | ||
180 | |||
181 | void show_registers(struct pt_regs *regs) | ||
182 | { | ||
183 | mm_segment_t old_fs; | ||
184 | char *mode; | ||
185 | int i; | ||
186 | |||
187 | mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; | ||
188 | printk("%s PSW : %p %p", | ||
189 | mode, (void *) regs->psw.mask, | ||
190 | (void *) regs->psw.addr); | ||
191 | print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); | ||
192 | printk("%s GPRS: " FOURLONG, mode, | ||
193 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); | ||
194 | printk(" " FOURLONG, | ||
195 | regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); | ||
196 | printk(" " FOURLONG, | ||
197 | regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); | ||
198 | printk(" " FOURLONG, | ||
199 | regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); | ||
200 | |||
201 | #if 0 | ||
202 | /* FIXME: this isn't needed any more but it changes the ksymoops | ||
203 | * input. To remove or not to remove ... */ | ||
204 | save_access_regs(regs->acrs); | ||
205 | printk("%s ACRS: %08x %08x %08x %08x\n", mode, | ||
206 | regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]); | ||
207 | printk(" %08x %08x %08x %08x\n", | ||
208 | regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]); | ||
209 | printk(" %08x %08x %08x %08x\n", | ||
210 | regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]); | ||
211 | printk(" %08x %08x %08x %08x\n", | ||
212 | regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]); | ||
213 | #endif | ||
214 | |||
215 | /* | ||
216 | * Print the first 20 byte of the instruction stream at the | ||
217 | * time of the fault. | ||
218 | */ | ||
219 | old_fs = get_fs(); | ||
220 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
221 | set_fs(USER_DS); | ||
222 | else | ||
223 | set_fs(KERNEL_DS); | ||
224 | printk("%s Code: ", mode); | ||
225 | for (i = 0; i < 20; i++) { | ||
226 | unsigned char c; | ||
227 | if (__get_user(c, (char __user *)(regs->psw.addr + i))) { | ||
228 | printk(" Bad PSW."); | ||
229 | break; | ||
230 | } | ||
231 | printk("%02x ", c); | ||
232 | } | ||
233 | set_fs(old_fs); | ||
234 | |||
235 | printk("\n"); | ||
236 | } | ||
237 | |||
238 | /* This is called from fs/proc/array.c */ | ||
239 | char *task_show_regs(struct task_struct *task, char *buffer) | ||
240 | { | ||
241 | struct pt_regs *regs; | ||
242 | |||
243 | regs = __KSTK_PTREGS(task); | ||
244 | buffer += sprintf(buffer, "task: %p, ksp: %p\n", | ||
245 | task, (void *)task->thread.ksp); | ||
246 | buffer += sprintf(buffer, "User PSW : %p %p\n", | ||
247 | (void *) regs->psw.mask, (void *)regs->psw.addr); | ||
248 | |||
249 | buffer += sprintf(buffer, "User GPRS: " FOURLONG, | ||
250 | regs->gprs[0], regs->gprs[1], | ||
251 | regs->gprs[2], regs->gprs[3]); | ||
252 | buffer += sprintf(buffer, " " FOURLONG, | ||
253 | regs->gprs[4], regs->gprs[5], | ||
254 | regs->gprs[6], regs->gprs[7]); | ||
255 | buffer += sprintf(buffer, " " FOURLONG, | ||
256 | regs->gprs[8], regs->gprs[9], | ||
257 | regs->gprs[10], regs->gprs[11]); | ||
258 | buffer += sprintf(buffer, " " FOURLONG, | ||
259 | regs->gprs[12], regs->gprs[13], | ||
260 | regs->gprs[14], regs->gprs[15]); | ||
261 | buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n", | ||
262 | task->thread.acrs[0], task->thread.acrs[1], | ||
263 | task->thread.acrs[2], task->thread.acrs[3]); | ||
264 | buffer += sprintf(buffer, " %08x %08x %08x %08x\n", | ||
265 | task->thread.acrs[4], task->thread.acrs[5], | ||
266 | task->thread.acrs[6], task->thread.acrs[7]); | ||
267 | buffer += sprintf(buffer, " %08x %08x %08x %08x\n", | ||
268 | task->thread.acrs[8], task->thread.acrs[9], | ||
269 | task->thread.acrs[10], task->thread.acrs[11]); | ||
270 | buffer += sprintf(buffer, " %08x %08x %08x %08x\n", | ||
271 | task->thread.acrs[12], task->thread.acrs[13], | ||
272 | task->thread.acrs[14], task->thread.acrs[15]); | ||
273 | return buffer; | ||
274 | } | ||
275 | |||
276 | DEFINE_SPINLOCK(die_lock); | ||
277 | |||
278 | void die(const char * str, struct pt_regs * regs, long err) | ||
279 | { | ||
280 | static int die_counter; | ||
281 | |||
282 | debug_stop_all(); | ||
283 | console_verbose(); | ||
284 | spin_lock_irq(&die_lock); | ||
285 | bust_spinlocks(1); | ||
286 | printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); | ||
287 | show_regs(regs); | ||
288 | bust_spinlocks(0); | ||
289 | spin_unlock_irq(&die_lock); | ||
290 | if (in_interrupt()) | ||
291 | panic("Fatal exception in interrupt"); | ||
292 | if (panic_on_oops) | ||
293 | panic("Fatal exception: panic_on_oops"); | ||
294 | do_exit(SIGSEGV); | ||
295 | } | ||
296 | |||
297 | static void inline | ||
298 | report_user_fault(long interruption_code, struct pt_regs *regs) | ||
299 | { | ||
300 | #if defined(CONFIG_SYSCTL) | ||
301 | if (!sysctl_userprocess_debug) | ||
302 | return; | ||
303 | #endif | ||
304 | #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) | ||
305 | printk("User process fault: interruption code 0x%lX\n", | ||
306 | interruption_code); | ||
307 | show_regs(regs); | ||
308 | #endif | ||
309 | } | ||
310 | |||
311 | static void inline do_trap(long interruption_code, int signr, char *str, | ||
312 | struct pt_regs *regs, siginfo_t *info) | ||
313 | { | ||
314 | /* | ||
315 | * We got all needed information from the lowcore and can | ||
316 | * now safely switch on interrupts. | ||
317 | */ | ||
318 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
319 | local_irq_enable(); | ||
320 | |||
321 | if (regs->psw.mask & PSW_MASK_PSTATE) { | ||
322 | struct task_struct *tsk = current; | ||
323 | |||
324 | tsk->thread.trap_no = interruption_code & 0xffff; | ||
325 | force_sig_info(signr, info, tsk); | ||
326 | report_user_fault(interruption_code, regs); | ||
327 | } else { | ||
328 | const struct exception_table_entry *fixup; | ||
329 | fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); | ||
330 | if (fixup) | ||
331 | regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; | ||
332 | else | ||
333 | die(str, regs, interruption_code); | ||
334 | } | ||
335 | } | ||
336 | |||
337 | static inline void *get_check_address(struct pt_regs *regs) | ||
338 | { | ||
339 | return (void *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN); | ||
340 | } | ||
341 | |||
342 | void do_single_step(struct pt_regs *regs) | ||
343 | { | ||
344 | if ((current->ptrace & PT_PTRACED) != 0) | ||
345 | force_sig(SIGTRAP, current); | ||
346 | } | ||
347 | |||
348 | asmlinkage void | ||
349 | default_trap_handler(struct pt_regs * regs, long interruption_code) | ||
350 | { | ||
351 | if (regs->psw.mask & PSW_MASK_PSTATE) { | ||
352 | local_irq_enable(); | ||
353 | do_exit(SIGSEGV); | ||
354 | report_user_fault(interruption_code, regs); | ||
355 | } else | ||
356 | die("Unknown program exception", regs, interruption_code); | ||
357 | } | ||
358 | |||
359 | #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ | ||
360 | asmlinkage void name(struct pt_regs * regs, long interruption_code) \ | ||
361 | { \ | ||
362 | siginfo_t info; \ | ||
363 | info.si_signo = signr; \ | ||
364 | info.si_errno = 0; \ | ||
365 | info.si_code = sicode; \ | ||
366 | info.si_addr = (void *)siaddr; \ | ||
367 | do_trap(interruption_code, signr, str, regs, &info); \ | ||
368 | } | ||
369 | |||
370 | DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception, | ||
371 | ILL_ILLADR, get_check_address(regs)) | ||
372 | DO_ERROR_INFO(SIGILL, "execute exception", execute_exception, | ||
373 | ILL_ILLOPN, get_check_address(regs)) | ||
374 | DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception, | ||
375 | FPE_INTDIV, get_check_address(regs)) | ||
376 | DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception, | ||
377 | FPE_INTOVF, get_check_address(regs)) | ||
378 | DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception, | ||
379 | FPE_FLTOVF, get_check_address(regs)) | ||
380 | DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception, | ||
381 | FPE_FLTUND, get_check_address(regs)) | ||
382 | DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception, | ||
383 | FPE_FLTRES, get_check_address(regs)) | ||
384 | DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception, | ||
385 | FPE_FLTDIV, get_check_address(regs)) | ||
386 | DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception, | ||
387 | FPE_FLTINV, get_check_address(regs)) | ||
388 | DO_ERROR_INFO(SIGILL, "operand exception", operand_exception, | ||
389 | ILL_ILLOPN, get_check_address(regs)) | ||
390 | DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op, | ||
391 | ILL_PRVOPC, get_check_address(regs)) | ||
392 | DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception, | ||
393 | ILL_ILLOPN, get_check_address(regs)) | ||
394 | DO_ERROR_INFO(SIGILL, "translation exception", translation_exception, | ||
395 | ILL_ILLOPN, get_check_address(regs)) | ||
396 | |||
397 | static inline void | ||
398 | do_fp_trap(struct pt_regs *regs, void *location, | ||
399 | int fpc, long interruption_code) | ||
400 | { | ||
401 | siginfo_t si; | ||
402 | |||
403 | si.si_signo = SIGFPE; | ||
404 | si.si_errno = 0; | ||
405 | si.si_addr = location; | ||
406 | si.si_code = 0; | ||
407 | /* FPC[2] is Data Exception Code */ | ||
408 | if ((fpc & 0x00000300) == 0) { | ||
409 | /* bits 6 and 7 of DXC are 0 iff IEEE exception */ | ||
410 | if (fpc & 0x8000) /* invalid fp operation */ | ||
411 | si.si_code = FPE_FLTINV; | ||
412 | else if (fpc & 0x4000) /* div by 0 */ | ||
413 | si.si_code = FPE_FLTDIV; | ||
414 | else if (fpc & 0x2000) /* overflow */ | ||
415 | si.si_code = FPE_FLTOVF; | ||
416 | else if (fpc & 0x1000) /* underflow */ | ||
417 | si.si_code = FPE_FLTUND; | ||
418 | else if (fpc & 0x0800) /* inexact */ | ||
419 | si.si_code = FPE_FLTRES; | ||
420 | } | ||
421 | current->thread.ieee_instruction_pointer = (addr_t) location; | ||
422 | do_trap(interruption_code, SIGFPE, | ||
423 | "floating point exception", regs, &si); | ||
424 | } | ||
425 | |||
426 | asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) | ||
427 | { | ||
428 | siginfo_t info; | ||
429 | __u8 opcode[6]; | ||
430 | __u16 *location; | ||
431 | int signal = 0; | ||
432 | |||
433 | location = (__u16 *) get_check_address(regs); | ||
434 | |||
435 | /* | ||
436 | * We got all needed information from the lowcore and can | ||
437 | * now safely switch on interrupts. | ||
438 | */ | ||
439 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
440 | local_irq_enable(); | ||
441 | |||
442 | if (regs->psw.mask & PSW_MASK_PSTATE) { | ||
443 | get_user(*((__u16 *) opcode), (__u16 __user *) location); | ||
444 | if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { | ||
445 | if (current->ptrace & PT_PTRACED) | ||
446 | force_sig(SIGTRAP, current); | ||
447 | else | ||
448 | signal = SIGILL; | ||
449 | #ifdef CONFIG_MATHEMU | ||
450 | } else if (opcode[0] == 0xb3) { | ||
451 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
452 | signal = math_emu_b3(opcode, regs); | ||
453 | } else if (opcode[0] == 0xed) { | ||
454 | get_user(*((__u32 *) (opcode+2)), | ||
455 | (__u32 *)(location+1)); | ||
456 | signal = math_emu_ed(opcode, regs); | ||
457 | } else if (*((__u16 *) opcode) == 0xb299) { | ||
458 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
459 | signal = math_emu_srnm(opcode, regs); | ||
460 | } else if (*((__u16 *) opcode) == 0xb29c) { | ||
461 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
462 | signal = math_emu_stfpc(opcode, regs); | ||
463 | } else if (*((__u16 *) opcode) == 0xb29d) { | ||
464 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
465 | signal = math_emu_lfpc(opcode, regs); | ||
466 | #endif | ||
467 | } else | ||
468 | signal = SIGILL; | ||
469 | } else | ||
470 | signal = SIGILL; | ||
471 | |||
472 | #ifdef CONFIG_MATHEMU | ||
473 | if (signal == SIGFPE) | ||
474 | do_fp_trap(regs, location, | ||
475 | current->thread.fp_regs.fpc, interruption_code); | ||
476 | else if (signal == SIGSEGV) { | ||
477 | info.si_signo = signal; | ||
478 | info.si_errno = 0; | ||
479 | info.si_code = SEGV_MAPERR; | ||
480 | info.si_addr = (void *) location; | ||
481 | do_trap(interruption_code, signal, | ||
482 | "user address fault", regs, &info); | ||
483 | } else | ||
484 | #endif | ||
485 | if (signal) { | ||
486 | info.si_signo = signal; | ||
487 | info.si_errno = 0; | ||
488 | info.si_code = ILL_ILLOPC; | ||
489 | info.si_addr = (void *) location; | ||
490 | do_trap(interruption_code, signal, | ||
491 | "illegal operation", regs, &info); | ||
492 | } | ||
493 | } | ||
494 | |||
495 | |||
496 | #ifdef CONFIG_MATHEMU | ||
497 | asmlinkage void | ||
498 | specification_exception(struct pt_regs * regs, long interruption_code) | ||
499 | { | ||
500 | __u8 opcode[6]; | ||
501 | __u16 *location = NULL; | ||
502 | int signal = 0; | ||
503 | |||
504 | location = (__u16 *) get_check_address(regs); | ||
505 | |||
506 | /* | ||
507 | * We got all needed information from the lowcore and can | ||
508 | * now safely switch on interrupts. | ||
509 | */ | ||
510 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
511 | local_irq_enable(); | ||
512 | |||
513 | if (regs->psw.mask & PSW_MASK_PSTATE) { | ||
514 | get_user(*((__u16 *) opcode), location); | ||
515 | switch (opcode[0]) { | ||
516 | case 0x28: /* LDR Rx,Ry */ | ||
517 | signal = math_emu_ldr(opcode); | ||
518 | break; | ||
519 | case 0x38: /* LER Rx,Ry */ | ||
520 | signal = math_emu_ler(opcode); | ||
521 | break; | ||
522 | case 0x60: /* STD R,D(X,B) */ | ||
523 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
524 | signal = math_emu_std(opcode, regs); | ||
525 | break; | ||
526 | case 0x68: /* LD R,D(X,B) */ | ||
527 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
528 | signal = math_emu_ld(opcode, regs); | ||
529 | break; | ||
530 | case 0x70: /* STE R,D(X,B) */ | ||
531 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
532 | signal = math_emu_ste(opcode, regs); | ||
533 | break; | ||
534 | case 0x78: /* LE R,D(X,B) */ | ||
535 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
536 | signal = math_emu_le(opcode, regs); | ||
537 | break; | ||
538 | default: | ||
539 | signal = SIGILL; | ||
540 | break; | ||
541 | } | ||
542 | } else | ||
543 | signal = SIGILL; | ||
544 | |||
545 | if (signal == SIGFPE) | ||
546 | do_fp_trap(regs, location, | ||
547 | current->thread.fp_regs.fpc, interruption_code); | ||
548 | else if (signal) { | ||
549 | siginfo_t info; | ||
550 | info.si_signo = signal; | ||
551 | info.si_errno = 0; | ||
552 | info.si_code = ILL_ILLOPN; | ||
553 | info.si_addr = location; | ||
554 | do_trap(interruption_code, signal, | ||
555 | "specification exception", regs, &info); | ||
556 | } | ||
557 | } | ||
558 | #else | ||
559 | DO_ERROR_INFO(SIGILL, "specification exception", specification_exception, | ||
560 | ILL_ILLOPN, get_check_address(regs)); | ||
561 | #endif | ||
562 | |||
563 | asmlinkage void data_exception(struct pt_regs * regs, long interruption_code) | ||
564 | { | ||
565 | __u16 *location; | ||
566 | int signal = 0; | ||
567 | |||
568 | location = (__u16 *) get_check_address(regs); | ||
569 | |||
570 | /* | ||
571 | * We got all needed information from the lowcore and can | ||
572 | * now safely switch on interrupts. | ||
573 | */ | ||
574 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
575 | local_irq_enable(); | ||
576 | |||
577 | if (MACHINE_HAS_IEEE) | ||
578 | __asm__ volatile ("stfpc %0\n\t" | ||
579 | : "=m" (current->thread.fp_regs.fpc)); | ||
580 | |||
581 | #ifdef CONFIG_MATHEMU | ||
582 | else if (regs->psw.mask & PSW_MASK_PSTATE) { | ||
583 | __u8 opcode[6]; | ||
584 | get_user(*((__u16 *) opcode), location); | ||
585 | switch (opcode[0]) { | ||
586 | case 0x28: /* LDR Rx,Ry */ | ||
587 | signal = math_emu_ldr(opcode); | ||
588 | break; | ||
589 | case 0x38: /* LER Rx,Ry */ | ||
590 | signal = math_emu_ler(opcode); | ||
591 | break; | ||
592 | case 0x60: /* STD R,D(X,B) */ | ||
593 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
594 | signal = math_emu_std(opcode, regs); | ||
595 | break; | ||
596 | case 0x68: /* LD R,D(X,B) */ | ||
597 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
598 | signal = math_emu_ld(opcode, regs); | ||
599 | break; | ||
600 | case 0x70: /* STE R,D(X,B) */ | ||
601 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
602 | signal = math_emu_ste(opcode, regs); | ||
603 | break; | ||
604 | case 0x78: /* LE R,D(X,B) */ | ||
605 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
606 | signal = math_emu_le(opcode, regs); | ||
607 | break; | ||
608 | case 0xb3: | ||
609 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
610 | signal = math_emu_b3(opcode, regs); | ||
611 | break; | ||
612 | case 0xed: | ||
613 | get_user(*((__u32 *) (opcode+2)), | ||
614 | (__u32 *)(location+1)); | ||
615 | signal = math_emu_ed(opcode, regs); | ||
616 | break; | ||
617 | case 0xb2: | ||
618 | if (opcode[1] == 0x99) { | ||
619 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
620 | signal = math_emu_srnm(opcode, regs); | ||
621 | } else if (opcode[1] == 0x9c) { | ||
622 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
623 | signal = math_emu_stfpc(opcode, regs); | ||
624 | } else if (opcode[1] == 0x9d) { | ||
625 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
626 | signal = math_emu_lfpc(opcode, regs); | ||
627 | } else | ||
628 | signal = SIGILL; | ||
629 | break; | ||
630 | default: | ||
631 | signal = SIGILL; | ||
632 | break; | ||
633 | } | ||
634 | } | ||
635 | #endif | ||
636 | if (current->thread.fp_regs.fpc & FPC_DXC_MASK) | ||
637 | signal = SIGFPE; | ||
638 | else | ||
639 | signal = SIGILL; | ||
640 | if (signal == SIGFPE) | ||
641 | do_fp_trap(regs, location, | ||
642 | current->thread.fp_regs.fpc, interruption_code); | ||
643 | else if (signal) { | ||
644 | siginfo_t info; | ||
645 | info.si_signo = signal; | ||
646 | info.si_errno = 0; | ||
647 | info.si_code = ILL_ILLOPN; | ||
648 | info.si_addr = location; | ||
649 | do_trap(interruption_code, signal, | ||
650 | "data exception", regs, &info); | ||
651 | } | ||
652 | } | ||
653 | |||
654 | asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code) | ||
655 | { | ||
656 | siginfo_t info; | ||
657 | |||
658 | /* Set user psw back to home space mode. */ | ||
659 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
660 | regs->psw.mask |= PSW_ASC_HOME; | ||
661 | /* Send SIGILL. */ | ||
662 | info.si_signo = SIGILL; | ||
663 | info.si_errno = 0; | ||
664 | info.si_code = ILL_PRVOPC; | ||
665 | info.si_addr = get_check_address(regs); | ||
666 | do_trap(int_code, SIGILL, "space switch event", regs, &info); | ||
667 | } | ||
668 | |||
669 | asmlinkage void kernel_stack_overflow(struct pt_regs * regs) | ||
670 | { | ||
671 | die("Kernel stack overflow", regs, 0); | ||
672 | panic("Corrupt kernel stack, can't continue."); | ||
673 | } | ||
674 | |||
675 | |||
676 | /* init is done in lowcore.S and head.S */ | ||
677 | |||
678 | void __init trap_init(void) | ||
679 | { | ||
680 | int i; | ||
681 | |||
682 | for (i = 0; i < 128; i++) | ||
683 | pgm_check_table[i] = &default_trap_handler; | ||
684 | pgm_check_table[1] = &illegal_op; | ||
685 | pgm_check_table[2] = &privileged_op; | ||
686 | pgm_check_table[3] = &execute_exception; | ||
687 | pgm_check_table[4] = &do_protection_exception; | ||
688 | pgm_check_table[5] = &addressing_exception; | ||
689 | pgm_check_table[6] = &specification_exception; | ||
690 | pgm_check_table[7] = &data_exception; | ||
691 | pgm_check_table[8] = &overflow_exception; | ||
692 | pgm_check_table[9] = ÷_exception; | ||
693 | pgm_check_table[0x0A] = &overflow_exception; | ||
694 | pgm_check_table[0x0B] = ÷_exception; | ||
695 | pgm_check_table[0x0C] = &hfp_overflow_exception; | ||
696 | pgm_check_table[0x0D] = &hfp_underflow_exception; | ||
697 | pgm_check_table[0x0E] = &hfp_significance_exception; | ||
698 | pgm_check_table[0x0F] = &hfp_divide_exception; | ||
699 | pgm_check_table[0x10] = &do_dat_exception; | ||
700 | pgm_check_table[0x11] = &do_dat_exception; | ||
701 | pgm_check_table[0x12] = &translation_exception; | ||
702 | pgm_check_table[0x13] = &special_op_exception; | ||
703 | #ifndef CONFIG_ARCH_S390X | ||
704 | pgm_check_table[0x14] = &do_pseudo_page_fault; | ||
705 | #else /* CONFIG_ARCH_S390X */ | ||
706 | pgm_check_table[0x38] = &do_dat_exception; | ||
707 | pgm_check_table[0x39] = &do_dat_exception; | ||
708 | pgm_check_table[0x3A] = &do_dat_exception; | ||
709 | pgm_check_table[0x3B] = &do_dat_exception; | ||
710 | #endif /* CONFIG_ARCH_S390X */ | ||
711 | pgm_check_table[0x15] = &operand_exception; | ||
712 | pgm_check_table[0x1C] = &space_switch_exception; | ||
713 | pgm_check_table[0x1D] = &hfp_sqrt_exception; | ||
714 | pgm_check_table[0x40] = &do_monitor_call; | ||
715 | |||
716 | if (MACHINE_IS_VM) { | ||
717 | /* | ||
718 | * First try to get pfault pseudo page faults going. | ||
719 | * If this isn't available turn on pagex page faults. | ||
720 | */ | ||
721 | #ifdef CONFIG_PFAULT | ||
722 | /* request the 0x2603 external interrupt */ | ||
723 | if (register_early_external_interrupt(0x2603, pfault_interrupt, | ||
724 | &ext_int_pfault) != 0) | ||
725 | panic("Couldn't request external interrupt 0x2603"); | ||
726 | |||
727 | if (pfault_init() == 0) | ||
728 | return; | ||
729 | |||
730 | /* Tough luck, no pfault. */ | ||
731 | unregister_early_external_interrupt(0x2603, pfault_interrupt, | ||
732 | &ext_int_pfault); | ||
733 | #endif | ||
734 | #ifndef CONFIG_ARCH_S390X | ||
735 | cpcmd("SET PAGEX ON", NULL, 0); | ||
736 | #endif | ||
737 | } | ||
738 | } | ||
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..89fdb3808bc0 --- /dev/null +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,130 @@ | |||
1 | /* ld script to make s390 Linux kernel | ||
2 | * Written by Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
3 | */ | ||
4 | |||
5 | #include <asm-generic/vmlinux.lds.h> | ||
6 | #include <linux/config.h> | ||
7 | |||
8 | #ifndef CONFIG_ARCH_S390X | ||
9 | OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") | ||
10 | OUTPUT_ARCH(s390) | ||
11 | ENTRY(_start) | ||
12 | jiffies = jiffies_64 + 4; | ||
13 | #else | ||
14 | OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") | ||
15 | OUTPUT_ARCH(s390:64-bit) | ||
16 | ENTRY(_start) | ||
17 | jiffies = jiffies_64; | ||
18 | #endif | ||
19 | |||
20 | SECTIONS | ||
21 | { | ||
22 | . = 0x00000000; | ||
23 | _text = .; /* Text and read-only data */ | ||
24 | .text : { | ||
25 | *(.text) | ||
26 | SCHED_TEXT | ||
27 | LOCK_TEXT | ||
28 | *(.fixup) | ||
29 | *(.gnu.warning) | ||
30 | } = 0x0700 | ||
31 | |||
32 | _etext = .; /* End of text section */ | ||
33 | |||
34 | . = ALIGN(16); /* Exception table */ | ||
35 | __start___ex_table = .; | ||
36 | __ex_table : { *(__ex_table) } | ||
37 | __stop___ex_table = .; | ||
38 | |||
39 | RODATA | ||
40 | |||
41 | #ifdef CONFIG_SHARED_KERNEL | ||
42 | . = ALIGN(1048576); /* VM shared segments are 1MB aligned */ | ||
43 | |||
44 | _eshared = .; /* End of shareable data */ | ||
45 | #endif | ||
46 | |||
47 | .data : { /* Data */ | ||
48 | *(.data) | ||
49 | CONSTRUCTORS | ||
50 | } | ||
51 | |||
52 | . = ALIGN(4096); | ||
53 | __nosave_begin = .; | ||
54 | .data_nosave : { *(.data.nosave) } | ||
55 | . = ALIGN(4096); | ||
56 | __nosave_end = .; | ||
57 | |||
58 | . = ALIGN(4096); | ||
59 | .data.page_aligned : { *(.data.idt) } | ||
60 | |||
61 | . = ALIGN(32); | ||
62 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | ||
63 | |||
64 | _edata = .; /* End of data section */ | ||
65 | |||
66 | . = ALIGN(8192); /* init_task */ | ||
67 | .data.init_task : { *(.data.init_task) } | ||
68 | |||
69 | /* will be freed after init */ | ||
70 | . = ALIGN(4096); /* Init code and data */ | ||
71 | __init_begin = .; | ||
72 | .init.text : { | ||
73 | _sinittext = .; | ||
74 | *(.init.text) | ||
75 | _einittext = .; | ||
76 | } | ||
77 | .init.data : { *(.init.data) } | ||
78 | . = ALIGN(256); | ||
79 | __setup_start = .; | ||
80 | .init.setup : { *(.init.setup) } | ||
81 | __setup_end = .; | ||
82 | __initcall_start = .; | ||
83 | .initcall.init : { | ||
84 | *(.initcall1.init) | ||
85 | *(.initcall2.init) | ||
86 | *(.initcall3.init) | ||
87 | *(.initcall4.init) | ||
88 | *(.initcall5.init) | ||
89 | *(.initcall6.init) | ||
90 | *(.initcall7.init) | ||
91 | } | ||
92 | __initcall_end = .; | ||
93 | __con_initcall_start = .; | ||
94 | .con_initcall.init : { *(.con_initcall.init) } | ||
95 | __con_initcall_end = .; | ||
96 | SECURITY_INIT | ||
97 | . = ALIGN(256); | ||
98 | __initramfs_start = .; | ||
99 | .init.ramfs : { *(.init.initramfs) } | ||
100 | . = ALIGN(2); | ||
101 | __initramfs_end = .; | ||
102 | . = ALIGN(256); | ||
103 | __per_cpu_start = .; | ||
104 | .data.percpu : { *(.data.percpu) } | ||
105 | __per_cpu_end = .; | ||
106 | . = ALIGN(4096); | ||
107 | __init_end = .; | ||
108 | /* freed after init ends here */ | ||
109 | |||
110 | __bss_start = .; /* BSS */ | ||
111 | .bss : { *(.bss) } | ||
112 | . = ALIGN(2); | ||
113 | __bss_stop = .; | ||
114 | |||
115 | _end = . ; | ||
116 | |||
117 | /* Sections to be discarded */ | ||
118 | /DISCARD/ : { | ||
119 | *(.exitcall.exit) | ||
120 | } | ||
121 | |||
122 | /* Stabs debugging sections. */ | ||
123 | .stab 0 : { *(.stab) } | ||
124 | .stabstr 0 : { *(.stabstr) } | ||
125 | .stab.excl 0 : { *(.stab.excl) } | ||
126 | .stab.exclstr 0 : { *(.stab.exclstr) } | ||
127 | .stab.index 0 : { *(.stab.index) } | ||
128 | .stab.indexstr 0 : { *(.stab.indexstr) } | ||
129 | .comment 0 : { *(.comment) } | ||
130 | } | ||
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c new file mode 100644 index 000000000000..bb6cf02418a2 --- /dev/null +++ b/arch/s390/kernel/vtime.c | |||
@@ -0,0 +1,565 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/vtime.c | ||
3 | * Virtual cpu timer based timer functions. | ||
4 | * | ||
5 | * S390 version | ||
6 | * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/time.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/timex.h> | ||
19 | #include <linux/notifier.h> | ||
20 | #include <linux/kernel_stat.h> | ||
21 | #include <linux/rcupdate.h> | ||
22 | #include <linux/posix-timers.h> | ||
23 | |||
24 | #include <asm/s390_ext.h> | ||
25 | #include <asm/timer.h> | ||
26 | |||
27 | #define VTIMER_MAGIC (TIMER_MAGIC + 1) | ||
28 | static ext_int_info_t ext_int_info_timer; | ||
29 | DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); | ||
30 | |||
31 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
32 | /* | ||
33 | * Update process times based on virtual cpu times stored by entry.S | ||
34 | * to the lowcore fields user_timer, system_timer & steal_clock. | ||
35 | */ | ||
36 | void account_user_vtime(struct task_struct *tsk) | ||
37 | { | ||
38 | cputime_t cputime; | ||
39 | __u64 timer, clock; | ||
40 | int rcu_user_flag; | ||
41 | |||
42 | timer = S390_lowcore.last_update_timer; | ||
43 | clock = S390_lowcore.last_update_clock; | ||
44 | asm volatile (" STPT %0\n" /* Store current cpu timer value */ | ||
45 | " STCK %1" /* Store current tod clock value */ | ||
46 | : "=m" (S390_lowcore.last_update_timer), | ||
47 | "=m" (S390_lowcore.last_update_clock) ); | ||
48 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | ||
49 | S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock; | ||
50 | |||
51 | cputime = S390_lowcore.user_timer >> 12; | ||
52 | rcu_user_flag = cputime != 0; | ||
53 | S390_lowcore.user_timer -= cputime << 12; | ||
54 | S390_lowcore.steal_clock -= cputime << 12; | ||
55 | account_user_time(tsk, cputime); | ||
56 | |||
57 | cputime = S390_lowcore.system_timer >> 12; | ||
58 | S390_lowcore.system_timer -= cputime << 12; | ||
59 | S390_lowcore.steal_clock -= cputime << 12; | ||
60 | account_system_time(tsk, HARDIRQ_OFFSET, cputime); | ||
61 | |||
62 | cputime = S390_lowcore.steal_clock; | ||
63 | if ((__s64) cputime > 0) { | ||
64 | cputime >>= 12; | ||
65 | S390_lowcore.steal_clock -= cputime << 12; | ||
66 | account_steal_time(tsk, cputime); | ||
67 | } | ||
68 | |||
69 | run_local_timers(); | ||
70 | if (rcu_pending(smp_processor_id())) | ||
71 | rcu_check_callbacks(smp_processor_id(), rcu_user_flag); | ||
72 | scheduler_tick(); | ||
73 | run_posix_cpu_timers(tsk); | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Update process times based on virtual cpu times stored by entry.S | ||
78 | * to the lowcore fields user_timer, system_timer & steal_clock. | ||
79 | */ | ||
80 | void account_system_vtime(struct task_struct *tsk) | ||
81 | { | ||
82 | cputime_t cputime; | ||
83 | __u64 timer; | ||
84 | |||
85 | timer = S390_lowcore.last_update_timer; | ||
86 | asm volatile (" STPT %0" /* Store current cpu timer value */ | ||
87 | : "=m" (S390_lowcore.last_update_timer) ); | ||
88 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | ||
89 | |||
90 | cputime = S390_lowcore.system_timer >> 12; | ||
91 | S390_lowcore.system_timer -= cputime << 12; | ||
92 | S390_lowcore.steal_clock -= cputime << 12; | ||
93 | account_system_time(tsk, 0, cputime); | ||
94 | } | ||
95 | |||
96 | static inline void set_vtimer(__u64 expires) | ||
97 | { | ||
98 | __u64 timer; | ||
99 | |||
100 | asm volatile (" STPT %0\n" /* Store current cpu timer value */ | ||
101 | " SPT %1" /* Set new value immediatly afterwards */ | ||
102 | : "=m" (timer) : "m" (expires) ); | ||
103 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; | ||
104 | S390_lowcore.last_update_timer = expires; | ||
105 | |||
106 | /* store expire time for this CPU timer */ | ||
107 | per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; | ||
108 | } | ||
109 | #else | ||
110 | static inline void set_vtimer(__u64 expires) | ||
111 | { | ||
112 | S390_lowcore.last_update_timer = expires; | ||
113 | asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); | ||
114 | |||
115 | /* store expire time for this CPU timer */ | ||
116 | per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; | ||
117 | } | ||
118 | #endif | ||
119 | |||
120 | static void start_cpu_timer(void) | ||
121 | { | ||
122 | struct vtimer_queue *vt_list; | ||
123 | |||
124 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); | ||
125 | set_vtimer(vt_list->idle); | ||
126 | } | ||
127 | |||
128 | static void stop_cpu_timer(void) | ||
129 | { | ||
130 | __u64 done; | ||
131 | struct vtimer_queue *vt_list; | ||
132 | |||
133 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); | ||
134 | |||
135 | /* nothing to do */ | ||
136 | if (list_empty(&vt_list->list)) { | ||
137 | vt_list->idle = VTIMER_MAX_SLICE; | ||
138 | goto fire; | ||
139 | } | ||
140 | |||
141 | /* store progress */ | ||
142 | asm volatile ("STPT %0" : "=m" (done)); | ||
143 | |||
144 | /* | ||
145 | * If done is negative we do not stop the CPU timer | ||
146 | * because we will get instantly an interrupt that | ||
147 | * will start the CPU timer again. | ||
148 | */ | ||
149 | if (done & 1LL<<63) | ||
150 | return; | ||
151 | else | ||
152 | vt_list->offset += vt_list->to_expire - done; | ||
153 | |||
154 | /* save the actual expire value */ | ||
155 | vt_list->idle = done; | ||
156 | |||
157 | /* | ||
158 | * We cannot halt the CPU timer, we just write a value that | ||
159 | * nearly never expires (only after 71 years) and re-write | ||
160 | * the stored expire value if we continue the timer | ||
161 | */ | ||
162 | fire: | ||
163 | set_vtimer(VTIMER_MAX_SLICE); | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * Sorted add to a list. List is linear searched until first bigger | ||
168 | * element is found. | ||
169 | */ | ||
170 | static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) | ||
171 | { | ||
172 | struct vtimer_list *event; | ||
173 | |||
174 | list_for_each_entry(event, head, entry) { | ||
175 | if (event->expires > timer->expires) { | ||
176 | list_add_tail(&timer->entry, &event->entry); | ||
177 | return; | ||
178 | } | ||
179 | } | ||
180 | list_add_tail(&timer->entry, head); | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * Do the callback functions of expired vtimer events. | ||
185 | * Called from within the interrupt handler. | ||
186 | */ | ||
187 | static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs) | ||
188 | { | ||
189 | struct vtimer_queue *vt_list; | ||
190 | struct vtimer_list *event, *tmp; | ||
191 | void (*fn)(unsigned long, struct pt_regs*); | ||
192 | unsigned long data; | ||
193 | |||
194 | if (list_empty(cb_list)) | ||
195 | return; | ||
196 | |||
197 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); | ||
198 | |||
199 | list_for_each_entry_safe(event, tmp, cb_list, entry) { | ||
200 | fn = event->function; | ||
201 | data = event->data; | ||
202 | fn(data, regs); | ||
203 | |||
204 | if (!event->interval) | ||
205 | /* delete one shot timer */ | ||
206 | list_del_init(&event->entry); | ||
207 | else { | ||
208 | /* move interval timer back to list */ | ||
209 | spin_lock(&vt_list->lock); | ||
210 | list_del_init(&event->entry); | ||
211 | list_add_sorted(event, &vt_list->list); | ||
212 | spin_unlock(&vt_list->lock); | ||
213 | } | ||
214 | } | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Handler for the virtual CPU timer. | ||
219 | */ | ||
220 | static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code) | ||
221 | { | ||
222 | int cpu; | ||
223 | __u64 next, delta; | ||
224 | struct vtimer_queue *vt_list; | ||
225 | struct vtimer_list *event, *tmp; | ||
226 | struct list_head *ptr; | ||
227 | /* the callback queue */ | ||
228 | struct list_head cb_list; | ||
229 | |||
230 | INIT_LIST_HEAD(&cb_list); | ||
231 | cpu = smp_processor_id(); | ||
232 | vt_list = &per_cpu(virt_cpu_timer, cpu); | ||
233 | |||
234 | /* walk timer list, fire all expired events */ | ||
235 | spin_lock(&vt_list->lock); | ||
236 | |||
237 | if (vt_list->to_expire < VTIMER_MAX_SLICE) | ||
238 | vt_list->offset += vt_list->to_expire; | ||
239 | |||
240 | list_for_each_entry_safe(event, tmp, &vt_list->list, entry) { | ||
241 | if (event->expires > vt_list->offset) | ||
242 | /* found first unexpired event, leave */ | ||
243 | break; | ||
244 | |||
245 | /* re-charge interval timer, we have to add the offset */ | ||
246 | if (event->interval) | ||
247 | event->expires = event->interval + vt_list->offset; | ||
248 | |||
249 | /* move expired timer to the callback queue */ | ||
250 | list_move_tail(&event->entry, &cb_list); | ||
251 | } | ||
252 | spin_unlock(&vt_list->lock); | ||
253 | do_callbacks(&cb_list, regs); | ||
254 | |||
255 | /* next event is first in list */ | ||
256 | spin_lock(&vt_list->lock); | ||
257 | if (!list_empty(&vt_list->list)) { | ||
258 | ptr = vt_list->list.next; | ||
259 | event = list_entry(ptr, struct vtimer_list, entry); | ||
260 | next = event->expires - vt_list->offset; | ||
261 | |||
262 | /* add the expired time from this interrupt handler | ||
263 | * and the callback functions | ||
264 | */ | ||
265 | asm volatile ("STPT %0" : "=m" (delta)); | ||
266 | delta = 0xffffffffffffffffLL - delta + 1; | ||
267 | vt_list->offset += delta; | ||
268 | next -= delta; | ||
269 | } else { | ||
270 | vt_list->offset = 0; | ||
271 | next = VTIMER_MAX_SLICE; | ||
272 | } | ||
273 | spin_unlock(&vt_list->lock); | ||
274 | set_vtimer(next); | ||
275 | } | ||
276 | |||
277 | void init_virt_timer(struct vtimer_list *timer) | ||
278 | { | ||
279 | timer->magic = VTIMER_MAGIC; | ||
280 | timer->function = NULL; | ||
281 | INIT_LIST_HEAD(&timer->entry); | ||
282 | spin_lock_init(&timer->lock); | ||
283 | } | ||
284 | EXPORT_SYMBOL(init_virt_timer); | ||
285 | |||
286 | static inline int check_vtimer(struct vtimer_list *timer) | ||
287 | { | ||
288 | if (timer->magic != VTIMER_MAGIC) | ||
289 | return -EINVAL; | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static inline int vtimer_pending(struct vtimer_list *timer) | ||
294 | { | ||
295 | return (!list_empty(&timer->entry)); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * this function should only run on the specified CPU | ||
300 | */ | ||
301 | static void internal_add_vtimer(struct vtimer_list *timer) | ||
302 | { | ||
303 | unsigned long flags; | ||
304 | __u64 done; | ||
305 | struct vtimer_list *event; | ||
306 | struct vtimer_queue *vt_list; | ||
307 | |||
308 | vt_list = &per_cpu(virt_cpu_timer, timer->cpu); | ||
309 | spin_lock_irqsave(&vt_list->lock, flags); | ||
310 | |||
311 | if (timer->cpu != smp_processor_id()) | ||
312 | printk("internal_add_vtimer: BUG, running on wrong CPU"); | ||
313 | |||
314 | /* if list is empty we only have to set the timer */ | ||
315 | if (list_empty(&vt_list->list)) { | ||
316 | /* reset the offset, this may happen if the last timer was | ||
317 | * just deleted by mod_virt_timer and the interrupt | ||
318 | * didn't happen until here | ||
319 | */ | ||
320 | vt_list->offset = 0; | ||
321 | goto fire; | ||
322 | } | ||
323 | |||
324 | /* save progress */ | ||
325 | asm volatile ("STPT %0" : "=m" (done)); | ||
326 | |||
327 | /* calculate completed work */ | ||
328 | done = vt_list->to_expire - done + vt_list->offset; | ||
329 | vt_list->offset = 0; | ||
330 | |||
331 | list_for_each_entry(event, &vt_list->list, entry) | ||
332 | event->expires -= done; | ||
333 | |||
334 | fire: | ||
335 | list_add_sorted(timer, &vt_list->list); | ||
336 | |||
337 | /* get first element, which is the next vtimer slice */ | ||
338 | event = list_entry(vt_list->list.next, struct vtimer_list, entry); | ||
339 | |||
340 | set_vtimer(event->expires); | ||
341 | spin_unlock_irqrestore(&vt_list->lock, flags); | ||
342 | /* release CPU aquired in prepare_vtimer or mod_virt_timer() */ | ||
343 | put_cpu(); | ||
344 | } | ||
345 | |||
346 | static inline int prepare_vtimer(struct vtimer_list *timer) | ||
347 | { | ||
348 | if (check_vtimer(timer) || !timer->function) { | ||
349 | printk("add_virt_timer: uninitialized timer\n"); | ||
350 | return -EINVAL; | ||
351 | } | ||
352 | |||
353 | if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) { | ||
354 | printk("add_virt_timer: invalid timer expire value!\n"); | ||
355 | return -EINVAL; | ||
356 | } | ||
357 | |||
358 | if (vtimer_pending(timer)) { | ||
359 | printk("add_virt_timer: timer pending\n"); | ||
360 | return -EBUSY; | ||
361 | } | ||
362 | |||
363 | timer->cpu = get_cpu(); | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | /* | ||
368 | * add_virt_timer - add an oneshot virtual CPU timer | ||
369 | */ | ||
370 | void add_virt_timer(void *new) | ||
371 | { | ||
372 | struct vtimer_list *timer; | ||
373 | |||
374 | timer = (struct vtimer_list *)new; | ||
375 | |||
376 | if (prepare_vtimer(timer) < 0) | ||
377 | return; | ||
378 | |||
379 | timer->interval = 0; | ||
380 | internal_add_vtimer(timer); | ||
381 | } | ||
382 | EXPORT_SYMBOL(add_virt_timer); | ||
383 | |||
384 | /* | ||
385 | * add_virt_timer_int - add an interval virtual CPU timer | ||
386 | */ | ||
387 | void add_virt_timer_periodic(void *new) | ||
388 | { | ||
389 | struct vtimer_list *timer; | ||
390 | |||
391 | timer = (struct vtimer_list *)new; | ||
392 | |||
393 | if (prepare_vtimer(timer) < 0) | ||
394 | return; | ||
395 | |||
396 | timer->interval = timer->expires; | ||
397 | internal_add_vtimer(timer); | ||
398 | } | ||
399 | EXPORT_SYMBOL(add_virt_timer_periodic); | ||
400 | |||
401 | /* | ||
402 | * If we change a pending timer the function must be called on the CPU | ||
403 | * where the timer is running on, e.g. by smp_call_function_on() | ||
404 | * | ||
405 | * The original mod_timer adds the timer if it is not pending. For compatibility | ||
406 | * we do the same. The timer will be added on the current CPU as a oneshot timer. | ||
407 | * | ||
408 | * returns whether it has modified a pending timer (1) or not (0) | ||
409 | */ | ||
410 | int mod_virt_timer(struct vtimer_list *timer, __u64 expires) | ||
411 | { | ||
412 | struct vtimer_queue *vt_list; | ||
413 | unsigned long flags; | ||
414 | int cpu; | ||
415 | |||
416 | if (check_vtimer(timer) || !timer->function) { | ||
417 | printk("mod_virt_timer: uninitialized timer\n"); | ||
418 | return -EINVAL; | ||
419 | } | ||
420 | |||
421 | if (!expires || expires > VTIMER_MAX_SLICE) { | ||
422 | printk("mod_virt_timer: invalid expire range\n"); | ||
423 | return -EINVAL; | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * This is a common optimization triggered by the | ||
428 | * networking code - if the timer is re-modified | ||
429 | * to be the same thing then just return: | ||
430 | */ | ||
431 | if (timer->expires == expires && vtimer_pending(timer)) | ||
432 | return 1; | ||
433 | |||
434 | cpu = get_cpu(); | ||
435 | vt_list = &per_cpu(virt_cpu_timer, cpu); | ||
436 | |||
437 | /* disable interrupts before test if timer is pending */ | ||
438 | spin_lock_irqsave(&vt_list->lock, flags); | ||
439 | |||
440 | /* if timer isn't pending add it on the current CPU */ | ||
441 | if (!vtimer_pending(timer)) { | ||
442 | spin_unlock_irqrestore(&vt_list->lock, flags); | ||
443 | /* we do not activate an interval timer with mod_virt_timer */ | ||
444 | timer->interval = 0; | ||
445 | timer->expires = expires; | ||
446 | timer->cpu = cpu; | ||
447 | internal_add_vtimer(timer); | ||
448 | return 0; | ||
449 | } | ||
450 | |||
451 | /* check if we run on the right CPU */ | ||
452 | if (timer->cpu != cpu) { | ||
453 | printk("mod_virt_timer: running on wrong CPU, check your code\n"); | ||
454 | spin_unlock_irqrestore(&vt_list->lock, flags); | ||
455 | put_cpu(); | ||
456 | return -EINVAL; | ||
457 | } | ||
458 | |||
459 | list_del_init(&timer->entry); | ||
460 | timer->expires = expires; | ||
461 | |||
462 | /* also change the interval if we have an interval timer */ | ||
463 | if (timer->interval) | ||
464 | timer->interval = expires; | ||
465 | |||
466 | /* the timer can't expire anymore so we can release the lock */ | ||
467 | spin_unlock_irqrestore(&vt_list->lock, flags); | ||
468 | internal_add_vtimer(timer); | ||
469 | return 1; | ||
470 | } | ||
471 | EXPORT_SYMBOL(mod_virt_timer); | ||
472 | |||
473 | /* | ||
474 | * delete a virtual timer | ||
475 | * | ||
476 | * returns whether the deleted timer was pending (1) or not (0) | ||
477 | */ | ||
478 | int del_virt_timer(struct vtimer_list *timer) | ||
479 | { | ||
480 | unsigned long flags; | ||
481 | struct vtimer_queue *vt_list; | ||
482 | |||
483 | if (check_vtimer(timer)) { | ||
484 | printk("del_virt_timer: timer not initialized\n"); | ||
485 | return -EINVAL; | ||
486 | } | ||
487 | |||
488 | /* check if timer is pending */ | ||
489 | if (!vtimer_pending(timer)) | ||
490 | return 0; | ||
491 | |||
492 | vt_list = &per_cpu(virt_cpu_timer, timer->cpu); | ||
493 | spin_lock_irqsave(&vt_list->lock, flags); | ||
494 | |||
495 | /* we don't interrupt a running timer, just let it expire! */ | ||
496 | list_del_init(&timer->entry); | ||
497 | |||
498 | /* last timer removed */ | ||
499 | if (list_empty(&vt_list->list)) { | ||
500 | vt_list->to_expire = 0; | ||
501 | vt_list->offset = 0; | ||
502 | } | ||
503 | |||
504 | spin_unlock_irqrestore(&vt_list->lock, flags); | ||
505 | return 1; | ||
506 | } | ||
507 | EXPORT_SYMBOL(del_virt_timer); | ||
508 | |||
509 | /* | ||
510 | * Start the virtual CPU timer on the current CPU. | ||
511 | */ | ||
512 | void init_cpu_vtimer(void) | ||
513 | { | ||
514 | struct vtimer_queue *vt_list; | ||
515 | unsigned long cr0; | ||
516 | |||
517 | /* kick the virtual timer */ | ||
518 | S390_lowcore.exit_timer = VTIMER_MAX_SLICE; | ||
519 | S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; | ||
520 | asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); | ||
521 | asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); | ||
522 | __ctl_store(cr0, 0, 0); | ||
523 | cr0 |= 0x400; | ||
524 | __ctl_load(cr0, 0, 0); | ||
525 | |||
526 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); | ||
527 | INIT_LIST_HEAD(&vt_list->list); | ||
528 | spin_lock_init(&vt_list->lock); | ||
529 | vt_list->to_expire = 0; | ||
530 | vt_list->offset = 0; | ||
531 | vt_list->idle = 0; | ||
532 | |||
533 | } | ||
534 | |||
535 | static int vtimer_idle_notify(struct notifier_block *self, | ||
536 | unsigned long action, void *hcpu) | ||
537 | { | ||
538 | switch (action) { | ||
539 | case CPU_IDLE: | ||
540 | stop_cpu_timer(); | ||
541 | break; | ||
542 | case CPU_NOT_IDLE: | ||
543 | start_cpu_timer(); | ||
544 | break; | ||
545 | } | ||
546 | return NOTIFY_OK; | ||
547 | } | ||
548 | |||
549 | static struct notifier_block vtimer_idle_nb = { | ||
550 | .notifier_call = vtimer_idle_notify, | ||
551 | }; | ||
552 | |||
553 | void __init vtime_init(void) | ||
554 | { | ||
555 | /* request the cpu timer external interrupt */ | ||
556 | if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt, | ||
557 | &ext_int_info_timer) != 0) | ||
558 | panic("Couldn't request external interrupt 0x1005"); | ||
559 | |||
560 | if (register_idle_notifier(&vtimer_idle_nb)) | ||
561 | panic("Couldn't register idle notifier"); | ||
562 | |||
563 | init_cpu_vtimer(); | ||
564 | } | ||
565 | |||
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile new file mode 100644 index 000000000000..a8758b1d20a9 --- /dev/null +++ b/arch/s390/lib/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | # | ||
2 | # Makefile for s390-specific library files.. | ||
3 | # | ||
4 | |||
5 | EXTRA_AFLAGS := -traditional | ||
6 | |||
7 | lib-y += delay.o string.o | ||
8 | lib-$(CONFIG_ARCH_S390_31) += uaccess.o | ||
9 | lib-$(CONFIG_ARCH_S390X) += uaccess64.o | ||
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c new file mode 100644 index 000000000000..e96c35bddac7 --- /dev/null +++ b/arch/s390/lib/delay.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/delay.c | ||
3 | * Precise Delay Loops for S390 | ||
4 | * | ||
5 | * S390 version | ||
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
8 | * | ||
9 | * Derived from "arch/i386/lib/delay.c" | ||
10 | * Copyright (C) 1993 Linus Torvalds | ||
11 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> | ||
12 | */ | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/delay.h> | ||
17 | |||
18 | #ifdef CONFIG_SMP | ||
19 | #include <asm/smp.h> | ||
20 | #endif | ||
21 | |||
22 | void __delay(unsigned long loops) | ||
23 | { | ||
24 | /* | ||
25 | * To end the bloody studid and useless discussion about the | ||
26 | * BogoMips number I took the liberty to define the __delay | ||
27 | * function in a way that that resulting BogoMips number will | ||
28 | * yield the megahertz number of the cpu. The important function | ||
29 | * is udelay and that is done using the tod clock. -- martin. | ||
30 | */ | ||
31 | __asm__ __volatile__( | ||
32 | "0: brct %0,0b" | ||
33 | : /* no outputs */ : "r" (loops/2) ); | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * Waits for 'usecs' microseconds using the tod clock, giving up the time slice | ||
38 | * of the virtual PU inbetween to avoid congestion. | ||
39 | */ | ||
40 | void __udelay(unsigned long usecs) | ||
41 | { | ||
42 | uint64_t start_cc, end_cc; | ||
43 | |||
44 | if (usecs == 0) | ||
45 | return; | ||
46 | asm volatile ("STCK %0" : "=m" (start_cc)); | ||
47 | do { | ||
48 | cpu_relax(); | ||
49 | asm volatile ("STCK %0" : "=m" (end_cc)); | ||
50 | } while (((end_cc - start_cc)/4096) < usecs); | ||
51 | } | ||
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c new file mode 100644 index 000000000000..8240cc77e06e --- /dev/null +++ b/arch/s390/lib/string.c | |||
@@ -0,0 +1,381 @@ | |||
1 | /* | ||
2 | * arch/s390/lib/string.c | ||
3 | * Optimized string functions | ||
4 | * | ||
5 | * S390 version | ||
6 | * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
8 | */ | ||
9 | |||
10 | #define IN_ARCH_STRING_C 1 | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/module.h> | ||
14 | |||
15 | /* | ||
16 | * Helper functions to find the end of a string | ||
17 | */ | ||
18 | static inline char *__strend(const char *s) | ||
19 | { | ||
20 | register unsigned long r0 asm("0") = 0; | ||
21 | |||
22 | asm volatile ("0: srst %0,%1\n" | ||
23 | " jo 0b" | ||
24 | : "+d" (r0), "+a" (s) : : "cc" ); | ||
25 | return (char *) r0; | ||
26 | } | ||
27 | |||
28 | static inline char *__strnend(const char *s, size_t n) | ||
29 | { | ||
30 | register unsigned long r0 asm("0") = 0; | ||
31 | const char *p = s + n; | ||
32 | |||
33 | asm volatile ("0: srst %0,%1\n" | ||
34 | " jo 0b" | ||
35 | : "+d" (p), "+a" (s) : "d" (r0) : "cc" ); | ||
36 | return (char *) p; | ||
37 | } | ||
38 | |||
39 | /** | ||
40 | * strlen - Find the length of a string | ||
41 | * @s: The string to be sized | ||
42 | * | ||
43 | * returns the length of @s | ||
44 | */ | ||
45 | size_t strlen(const char *s) | ||
46 | { | ||
47 | return __strend(s) - s; | ||
48 | } | ||
49 | EXPORT_SYMBOL(strlen); | ||
50 | |||
51 | /** | ||
52 | * strnlen - Find the length of a length-limited string | ||
53 | * @s: The string to be sized | ||
54 | * @n: The maximum number of bytes to search | ||
55 | * | ||
56 | * returns the minimum of the length of @s and @n | ||
57 | */ | ||
58 | size_t strnlen(const char * s, size_t n) | ||
59 | { | ||
60 | return __strnend(s, n) - s; | ||
61 | } | ||
62 | EXPORT_SYMBOL(strnlen); | ||
63 | |||
64 | /** | ||
65 | * strcpy - Copy a %NUL terminated string | ||
66 | * @dest: Where to copy the string to | ||
67 | * @src: Where to copy the string from | ||
68 | * | ||
69 | * returns a pointer to @dest | ||
70 | */ | ||
71 | char *strcpy(char *dest, const char *src) | ||
72 | { | ||
73 | register int r0 asm("0") = 0; | ||
74 | char *ret = dest; | ||
75 | |||
76 | asm volatile ("0: mvst %0,%1\n" | ||
77 | " jo 0b" | ||
78 | : "+&a" (dest), "+&a" (src) : "d" (r0) | ||
79 | : "cc", "memory" ); | ||
80 | return ret; | ||
81 | } | ||
82 | EXPORT_SYMBOL(strcpy); | ||
83 | |||
84 | /** | ||
85 | * strlcpy - Copy a %NUL terminated string into a sized buffer | ||
86 | * @dest: Where to copy the string to | ||
87 | * @src: Where to copy the string from | ||
88 | * @size: size of destination buffer | ||
89 | * | ||
90 | * Compatible with *BSD: the result is always a valid | ||
91 | * NUL-terminated string that fits in the buffer (unless, | ||
92 | * of course, the buffer size is zero). It does not pad | ||
93 | * out the result like strncpy() does. | ||
94 | */ | ||
95 | size_t strlcpy(char *dest, const char *src, size_t size) | ||
96 | { | ||
97 | size_t ret = __strend(src) - src; | ||
98 | |||
99 | if (size) { | ||
100 | size_t len = (ret >= size) ? size-1 : ret; | ||
101 | dest[len] = '\0'; | ||
102 | __builtin_memcpy(dest, src, len); | ||
103 | } | ||
104 | return ret; | ||
105 | } | ||
106 | EXPORT_SYMBOL(strlcpy); | ||
107 | |||
108 | /** | ||
109 | * strncpy - Copy a length-limited, %NUL-terminated string | ||
110 | * @dest: Where to copy the string to | ||
111 | * @src: Where to copy the string from | ||
112 | * @n: The maximum number of bytes to copy | ||
113 | * | ||
114 | * The result is not %NUL-terminated if the source exceeds | ||
115 | * @n bytes. | ||
116 | */ | ||
117 | char *strncpy(char *dest, const char *src, size_t n) | ||
118 | { | ||
119 | size_t len = __strnend(src, n) - src; | ||
120 | __builtin_memset(dest + len, 0, n - len); | ||
121 | __builtin_memcpy(dest, src, len); | ||
122 | return dest; | ||
123 | } | ||
124 | EXPORT_SYMBOL(strncpy); | ||
125 | |||
126 | /** | ||
127 | * strcat - Append one %NUL-terminated string to another | ||
128 | * @dest: The string to be appended to | ||
129 | * @src: The string to append to it | ||
130 | * | ||
131 | * returns a pointer to @dest | ||
132 | */ | ||
133 | char *strcat(char *dest, const char *src) | ||
134 | { | ||
135 | register int r0 asm("0") = 0; | ||
136 | unsigned long dummy; | ||
137 | char *ret = dest; | ||
138 | |||
139 | asm volatile ("0: srst %0,%1\n" | ||
140 | " jo 0b\n" | ||
141 | "1: mvst %0,%2\n" | ||
142 | " jo 1b" | ||
143 | : "=&a" (dummy), "+a" (dest), "+a" (src) | ||
144 | : "d" (r0), "0" (0UL) : "cc", "memory" ); | ||
145 | return ret; | ||
146 | } | ||
147 | EXPORT_SYMBOL(strcat); | ||
148 | |||
149 | /** | ||
150 | * strlcat - Append a length-limited, %NUL-terminated string to another | ||
151 | * @dest: The string to be appended to | ||
152 | * @src: The string to append to it | ||
153 | * @n: The size of the destination buffer. | ||
154 | */ | ||
155 | size_t strlcat(char *dest, const char *src, size_t n) | ||
156 | { | ||
157 | size_t dsize = __strend(dest) - dest; | ||
158 | size_t len = __strend(src) - src; | ||
159 | size_t res = dsize + len; | ||
160 | |||
161 | if (dsize < n) { | ||
162 | dest += dsize; | ||
163 | n -= dsize; | ||
164 | if (len >= n) | ||
165 | len = n - 1; | ||
166 | dest[len] = '\0'; | ||
167 | __builtin_memcpy(dest, src, len); | ||
168 | } | ||
169 | return res; | ||
170 | } | ||
171 | EXPORT_SYMBOL(strlcat); | ||
172 | |||
173 | /** | ||
174 | * strncat - Append a length-limited, %NUL-terminated string to another | ||
175 | * @dest: The string to be appended to | ||
176 | * @src: The string to append to it | ||
177 | * @n: The maximum numbers of bytes to copy | ||
178 | * | ||
179 | * returns a pointer to @dest | ||
180 | * | ||
181 | * Note that in contrast to strncpy, strncat ensures the result is | ||
182 | * terminated. | ||
183 | */ | ||
184 | char *strncat(char *dest, const char *src, size_t n) | ||
185 | { | ||
186 | size_t len = __strnend(src, n) - src; | ||
187 | char *p = __strend(dest); | ||
188 | |||
189 | p[len] = '\0'; | ||
190 | __builtin_memcpy(p, src, len); | ||
191 | return dest; | ||
192 | } | ||
193 | EXPORT_SYMBOL(strncat); | ||
194 | |||
195 | /** | ||
196 | * strcmp - Compare two strings | ||
197 | * @cs: One string | ||
198 | * @ct: Another string | ||
199 | * | ||
200 | * returns 0 if @cs and @ct are equal, | ||
201 | * < 0 if @cs is less than @ct | ||
202 | * > 0 if @cs is greater than @ct | ||
203 | */ | ||
204 | int strcmp(const char *cs, const char *ct) | ||
205 | { | ||
206 | register int r0 asm("0") = 0; | ||
207 | int ret = 0; | ||
208 | |||
209 | asm volatile ("0: clst %2,%3\n" | ||
210 | " jo 0b\n" | ||
211 | " je 1f\n" | ||
212 | " ic %0,0(%2)\n" | ||
213 | " ic %1,0(%3)\n" | ||
214 | " sr %0,%1\n" | ||
215 | "1:" | ||
216 | : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct) | ||
217 | : : "cc" ); | ||
218 | return ret; | ||
219 | } | ||
220 | EXPORT_SYMBOL(strcmp); | ||
221 | |||
222 | /** | ||
223 | * strrchr - Find the last occurrence of a character in a string | ||
224 | * @s: The string to be searched | ||
225 | * @c: The character to search for | ||
226 | */ | ||
227 | char * strrchr(const char * s, int c) | ||
228 | { | ||
229 | size_t len = __strend(s) - s; | ||
230 | |||
231 | if (len) | ||
232 | do { | ||
233 | if (s[len] == (char) c) | ||
234 | return (char *) s + len; | ||
235 | } while (--len > 0); | ||
236 | return 0; | ||
237 | } | ||
238 | EXPORT_SYMBOL(strrchr); | ||
239 | |||
240 | /** | ||
241 | * strstr - Find the first substring in a %NUL terminated string | ||
242 | * @s1: The string to be searched | ||
243 | * @s2: The string to search for | ||
244 | */ | ||
245 | char * strstr(const char * s1,const char * s2) | ||
246 | { | ||
247 | int l1, l2; | ||
248 | |||
249 | l2 = __strend(s2) - s2; | ||
250 | if (!l2) | ||
251 | return (char *) s1; | ||
252 | l1 = __strend(s1) - s1; | ||
253 | while (l1-- >= l2) { | ||
254 | register unsigned long r2 asm("2") = (unsigned long) s1; | ||
255 | register unsigned long r3 asm("3") = (unsigned long) l2; | ||
256 | register unsigned long r4 asm("4") = (unsigned long) s2; | ||
257 | register unsigned long r5 asm("5") = (unsigned long) l2; | ||
258 | int cc; | ||
259 | |||
260 | asm volatile ("0: clcle %1,%3,0\n" | ||
261 | " jo 0b\n" | ||
262 | " ipm %0\n" | ||
263 | " srl %0,28" | ||
264 | : "=&d" (cc), "+a" (r2), "+a" (r3), | ||
265 | "+a" (r4), "+a" (r5) : : "cc" ); | ||
266 | if (!cc) | ||
267 | return (char *) s1; | ||
268 | s1++; | ||
269 | } | ||
270 | return 0; | ||
271 | } | ||
272 | EXPORT_SYMBOL(strstr); | ||
273 | |||
274 | /** | ||
275 | * memchr - Find a character in an area of memory. | ||
276 | * @s: The memory area | ||
277 | * @c: The byte to search for | ||
278 | * @n: The size of the area. | ||
279 | * | ||
280 | * returns the address of the first occurrence of @c, or %NULL | ||
281 | * if @c is not found | ||
282 | */ | ||
283 | void *memchr(const void *s, int c, size_t n) | ||
284 | { | ||
285 | register int r0 asm("0") = (char) c; | ||
286 | const void *ret = s + n; | ||
287 | |||
288 | asm volatile ("0: srst %0,%1\n" | ||
289 | " jo 0b\n" | ||
290 | " jl 1f\n" | ||
291 | " la %0,0\n" | ||
292 | "1:" | ||
293 | : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); | ||
294 | return (void *) ret; | ||
295 | } | ||
296 | EXPORT_SYMBOL(memchr); | ||
297 | |||
298 | /** | ||
299 | * memcmp - Compare two areas of memory | ||
300 | * @cs: One area of memory | ||
301 | * @ct: Another area of memory | ||
302 | * @count: The size of the area. | ||
303 | */ | ||
304 | int memcmp(const void *cs, const void *ct, size_t n) | ||
305 | { | ||
306 | register unsigned long r2 asm("2") = (unsigned long) cs; | ||
307 | register unsigned long r3 asm("3") = (unsigned long) n; | ||
308 | register unsigned long r4 asm("4") = (unsigned long) ct; | ||
309 | register unsigned long r5 asm("5") = (unsigned long) n; | ||
310 | int ret; | ||
311 | |||
312 | asm volatile ("0: clcle %1,%3,0\n" | ||
313 | " jo 0b\n" | ||
314 | " ipm %0\n" | ||
315 | " srl %0,28" | ||
316 | : "=&d" (ret), "+a" (r2), "+a" (r3), "+a" (r4), "+a" (r5) | ||
317 | : : "cc" ); | ||
318 | if (ret) | ||
319 | ret = *(char *) r2 - *(char *) r4; | ||
320 | return ret; | ||
321 | } | ||
322 | EXPORT_SYMBOL(memcmp); | ||
323 | |||
324 | /** | ||
325 | * memscan - Find a character in an area of memory. | ||
326 | * @s: The memory area | ||
327 | * @c: The byte to search for | ||
328 | * @n: The size of the area. | ||
329 | * | ||
330 | * returns the address of the first occurrence of @c, or 1 byte past | ||
331 | * the area if @c is not found | ||
332 | */ | ||
333 | void *memscan(void *s, int c, size_t n) | ||
334 | { | ||
335 | register int r0 asm("0") = (char) c; | ||
336 | const void *ret = s + n; | ||
337 | |||
338 | asm volatile ("0: srst %0,%1\n" | ||
339 | " jo 0b\n" | ||
340 | : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); | ||
341 | return (void *) ret; | ||
342 | } | ||
343 | EXPORT_SYMBOL(memscan); | ||
344 | |||
345 | /** | ||
346 | * memcpy - Copy one area of memory to another | ||
347 | * @dest: Where to copy to | ||
348 | * @src: Where to copy from | ||
349 | * @n: The size of the area. | ||
350 | * | ||
351 | * returns a pointer to @dest | ||
352 | */ | ||
353 | void *memcpy(void *dest, const void *src, size_t n) | ||
354 | { | ||
355 | return __builtin_memcpy(dest, src, n); | ||
356 | } | ||
357 | EXPORT_SYMBOL(memcpy); | ||
358 | |||
359 | /** | ||
360 | * memset - Fill a region of memory with the given value | ||
361 | * @s: Pointer to the start of the area. | ||
362 | * @c: The byte to fill the area with | ||
363 | * @n: The size of the area. | ||
364 | * | ||
365 | * returns a pointer to @s | ||
366 | */ | ||
367 | void *memset(void *s, int c, size_t n) | ||
368 | { | ||
369 | char *xs; | ||
370 | |||
371 | if (c == 0) | ||
372 | return __builtin_memset(s, 0, n); | ||
373 | |||
374 | xs = (char *) s; | ||
375 | if (n > 0) | ||
376 | do { | ||
377 | *xs++ = c; | ||
378 | } while (--n > 0); | ||
379 | return s; | ||
380 | } | ||
381 | EXPORT_SYMBOL(memset); | ||
diff --git a/arch/s390/lib/uaccess.S b/arch/s390/lib/uaccess.S new file mode 100644 index 000000000000..e8029ef42ef2 --- /dev/null +++ b/arch/s390/lib/uaccess.S | |||
@@ -0,0 +1,210 @@ | |||
1 | /* | ||
2 | * arch/s390/lib/uaccess.S | ||
3 | * __copy_{from|to}_user functions. | ||
4 | * | ||
5 | * s390 | ||
6 | * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
8 | * | ||
9 | * These functions have standard call interface | ||
10 | */ | ||
11 | |||
12 | #include <linux/errno.h> | ||
13 | #include <asm/lowcore.h> | ||
14 | #include <asm/offsets.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | .globl __copy_from_user_asm | ||
19 | # %r2 = to, %r3 = n, %r4 = from | ||
20 | __copy_from_user_asm: | ||
21 | slr %r0,%r0 | ||
22 | 0: mvcp 0(%r3,%r2),0(%r4),%r0 | ||
23 | jnz 1f | ||
24 | slr %r2,%r2 | ||
25 | br %r14 | ||
26 | 1: la %r2,256(%r2) | ||
27 | la %r4,256(%r4) | ||
28 | ahi %r3,-256 | ||
29 | 2: mvcp 0(%r3,%r2),0(%r4),%r0 | ||
30 | jnz 1b | ||
31 | 3: slr %r2,%r2 | ||
32 | br %r14 | ||
33 | 4: lhi %r0,-4096 | ||
34 | lr %r5,%r4 | ||
35 | slr %r5,%r0 | ||
36 | nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 | ||
37 | slr %r5,%r4 # %r5 = #bytes to next user page boundary | ||
38 | clr %r3,%r5 # copy crosses next page boundary ? | ||
39 | jnh 6f # no, the current page faulted | ||
40 | # move with the reduced length which is < 256 | ||
41 | 5: mvcp 0(%r5,%r2),0(%r4),%r0 | ||
42 | slr %r3,%r5 | ||
43 | 6: lr %r2,%r3 | ||
44 | br %r14 | ||
45 | .section __ex_table,"a" | ||
46 | .long 0b,4b | ||
47 | .long 2b,4b | ||
48 | .long 5b,6b | ||
49 | .previous | ||
50 | |||
51 | .align 4 | ||
52 | .text | ||
53 | .globl __copy_to_user_asm | ||
54 | # %r2 = from, %r3 = n, %r4 = to | ||
55 | __copy_to_user_asm: | ||
56 | slr %r0,%r0 | ||
57 | 0: mvcs 0(%r3,%r4),0(%r2),%r0 | ||
58 | jnz 1f | ||
59 | slr %r2,%r2 | ||
60 | br %r14 | ||
61 | 1: la %r2,256(%r2) | ||
62 | la %r4,256(%r4) | ||
63 | ahi %r3,-256 | ||
64 | 2: mvcs 0(%r3,%r4),0(%r2),%r0 | ||
65 | jnz 1b | ||
66 | 3: slr %r2,%r2 | ||
67 | br %r14 | ||
68 | 4: lhi %r0,-4096 | ||
69 | lr %r5,%r4 | ||
70 | slr %r5,%r0 | ||
71 | nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 | ||
72 | slr %r5,%r4 # %r5 = #bytes to next user page boundary | ||
73 | clr %r3,%r5 # copy crosses next page boundary ? | ||
74 | jnh 6f # no, the current page faulted | ||
75 | # move with the reduced length which is < 256 | ||
76 | 5: mvcs 0(%r5,%r4),0(%r2),%r0 | ||
77 | slr %r3,%r5 | ||
78 | 6: lr %r2,%r3 | ||
79 | br %r14 | ||
80 | .section __ex_table,"a" | ||
81 | .long 0b,4b | ||
82 | .long 2b,4b | ||
83 | .long 5b,6b | ||
84 | .previous | ||
85 | |||
86 | .align 4 | ||
87 | .text | ||
88 | .globl __copy_in_user_asm | ||
89 | # %r2 = from, %r3 = n, %r4 = to | ||
90 | __copy_in_user_asm: | ||
91 | sacf 256 | ||
92 | bras 1,1f | ||
93 | mvc 0(1,%r4),0(%r2) | ||
94 | 0: mvc 0(256,%r4),0(%r2) | ||
95 | la %r2,256(%r2) | ||
96 | la %r4,256(%r4) | ||
97 | 1: ahi %r3,-256 | ||
98 | jnm 0b | ||
99 | 2: ex %r3,0(%r1) | ||
100 | sacf 0 | ||
101 | slr %r2,%r2 | ||
102 | br 14 | ||
103 | 3: mvc 0(1,%r4),0(%r2) | ||
104 | la %r2,1(%r2) | ||
105 | la %r4,1(%r4) | ||
106 | ahi %r3,-1 | ||
107 | jnm 3b | ||
108 | 4: lr %r2,%r3 | ||
109 | sacf 0 | ||
110 | br %r14 | ||
111 | .section __ex_table,"a" | ||
112 | .long 0b,3b | ||
113 | .long 2b,3b | ||
114 | .long 3b,4b | ||
115 | .previous | ||
116 | |||
117 | .align 4 | ||
118 | .text | ||
119 | .globl __clear_user_asm | ||
120 | # %r2 = to, %r3 = n | ||
121 | __clear_user_asm: | ||
122 | bras %r5,0f | ||
123 | .long empty_zero_page | ||
124 | 0: l %r5,0(%r5) | ||
125 | slr %r0,%r0 | ||
126 | 1: mvcs 0(%r3,%r2),0(%r5),%r0 | ||
127 | jnz 2f | ||
128 | slr %r2,%r2 | ||
129 | br %r14 | ||
130 | 2: la %r2,256(%r2) | ||
131 | ahi %r3,-256 | ||
132 | 3: mvcs 0(%r3,%r2),0(%r5),%r0 | ||
133 | jnz 2b | ||
134 | 4: slr %r2,%r2 | ||
135 | br %r14 | ||
136 | 5: lhi %r0,-4096 | ||
137 | lr %r4,%r2 | ||
138 | slr %r4,%r0 | ||
139 | nr %r4,%r0 # %r4 = (%r2 + 4096) & -4096 | ||
140 | slr %r4,%r2 # %r4 = #bytes to next user page boundary | ||
141 | clr %r3,%r4 # clear crosses next page boundary ? | ||
142 | jnh 7f # no, the current page faulted | ||
143 | # clear with the reduced length which is < 256 | ||
144 | 6: mvcs 0(%r4,%r2),0(%r5),%r0 | ||
145 | slr %r3,%r4 | ||
146 | 7: lr %r2,%r3 | ||
147 | br %r14 | ||
148 | .section __ex_table,"a" | ||
149 | .long 1b,5b | ||
150 | .long 3b,5b | ||
151 | .long 6b,7b | ||
152 | .previous | ||
153 | |||
154 | .align 4 | ||
155 | .text | ||
156 | .globl __strncpy_from_user_asm | ||
157 | # %r2 = count, %r3 = dst, %r4 = src | ||
158 | __strncpy_from_user_asm: | ||
159 | lhi %r0,0 | ||
160 | lr %r1,%r4 | ||
161 | la %r4,0(%r4) # clear high order bit from %r4 | ||
162 | la %r2,0(%r2,%r4) # %r2 points to first byte after string | ||
163 | sacf 256 | ||
164 | 0: srst %r2,%r1 | ||
165 | jo 0b | ||
166 | sacf 0 | ||
167 | lr %r1,%r2 | ||
168 | jh 1f # \0 found in string ? | ||
169 | ahi %r1,1 # include \0 in copy | ||
170 | 1: slr %r1,%r4 # %r1 = copy length (without \0) | ||
171 | slr %r2,%r4 # %r2 = return length (including \0) | ||
172 | 2: mvcp 0(%r1,%r3),0(%r4),%r0 | ||
173 | jnz 3f | ||
174 | br %r14 | ||
175 | 3: la %r3,256(%r3) | ||
176 | la %r4,256(%r4) | ||
177 | ahi %r1,-256 | ||
178 | mvcp 0(%r1,%r3),0(%r4),%r0 | ||
179 | jnz 3b | ||
180 | br %r14 | ||
181 | 4: sacf 0 | ||
182 | lhi %r2,-EFAULT | ||
183 | br %r14 | ||
184 | .section __ex_table,"a" | ||
185 | .long 0b,4b | ||
186 | .previous | ||
187 | |||
188 | .align 4 | ||
189 | .text | ||
190 | .globl __strnlen_user_asm | ||
191 | # %r2 = count, %r3 = src | ||
192 | __strnlen_user_asm: | ||
193 | lhi %r0,0 | ||
194 | lr %r1,%r3 | ||
195 | la %r3,0(%r3) # clear high order bit from %r4 | ||
196 | la %r2,0(%r2,%r3) # %r2 points to first byte after string | ||
197 | sacf 256 | ||
198 | 0: srst %r2,%r1 | ||
199 | jo 0b | ||
200 | sacf 0 | ||
201 | jh 1f # \0 found in string ? | ||
202 | ahi %r2,1 # strnlen_user result includes the \0 | ||
203 | 1: slr %r2,%r3 | ||
204 | br %r14 | ||
205 | 2: sacf 0 | ||
206 | lhi %r2,-EFAULT | ||
207 | br %r14 | ||
208 | .section __ex_table,"a" | ||
209 | .long 0b,2b | ||
210 | .previous | ||
diff --git a/arch/s390/lib/uaccess64.S b/arch/s390/lib/uaccess64.S new file mode 100644 index 000000000000..0ca56972f4f0 --- /dev/null +++ b/arch/s390/lib/uaccess64.S | |||
@@ -0,0 +1,206 @@ | |||
1 | /* | ||
2 | * arch/s390x/lib/uaccess.S | ||
3 | * __copy_{from|to}_user functions. | ||
4 | * | ||
5 | * s390 | ||
6 | * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
8 | * | ||
9 | * These functions have standard call interface | ||
10 | */ | ||
11 | |||
12 | #include <linux/errno.h> | ||
13 | #include <asm/lowcore.h> | ||
14 | #include <asm/offsets.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | .globl __copy_from_user_asm | ||
19 | # %r2 = to, %r3 = n, %r4 = from | ||
20 | __copy_from_user_asm: | ||
21 | slgr %r0,%r0 | ||
22 | 0: mvcp 0(%r3,%r2),0(%r4),%r0 | ||
23 | jnz 1f | ||
24 | slgr %r2,%r2 | ||
25 | br %r14 | ||
26 | 1: la %r2,256(%r2) | ||
27 | la %r4,256(%r4) | ||
28 | aghi %r3,-256 | ||
29 | 2: mvcp 0(%r3,%r2),0(%r4),%r0 | ||
30 | jnz 1b | ||
31 | 3: slgr %r2,%r2 | ||
32 | br %r14 | ||
33 | 4: lghi %r0,-4096 | ||
34 | lgr %r5,%r4 | ||
35 | slgr %r5,%r0 | ||
36 | ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 | ||
37 | slgr %r5,%r4 # %r5 = #bytes to next user page boundary | ||
38 | clgr %r3,%r5 # copy crosses next page boundary ? | ||
39 | jnh 6f # no, the current page faulted | ||
40 | # move with the reduced length which is < 256 | ||
41 | 5: mvcp 0(%r5,%r2),0(%r4),%r0 | ||
42 | slgr %r3,%r5 | ||
43 | 6: lgr %r2,%r3 | ||
44 | br %r14 | ||
45 | .section __ex_table,"a" | ||
46 | .quad 0b,4b | ||
47 | .quad 2b,4b | ||
48 | .quad 5b,6b | ||
49 | .previous | ||
50 | |||
51 | .align 4 | ||
52 | .text | ||
53 | .globl __copy_to_user_asm | ||
54 | # %r2 = from, %r3 = n, %r4 = to | ||
55 | __copy_to_user_asm: | ||
56 | slgr %r0,%r0 | ||
57 | 0: mvcs 0(%r3,%r4),0(%r2),%r0 | ||
58 | jnz 1f | ||
59 | slgr %r2,%r2 | ||
60 | br %r14 | ||
61 | 1: la %r2,256(%r2) | ||
62 | la %r4,256(%r4) | ||
63 | aghi %r3,-256 | ||
64 | 2: mvcs 0(%r3,%r4),0(%r2),%r0 | ||
65 | jnz 1b | ||
66 | 3: slgr %r2,%r2 | ||
67 | br %r14 | ||
68 | 4: lghi %r0,-4096 | ||
69 | lgr %r5,%r4 | ||
70 | slgr %r5,%r0 | ||
71 | ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 | ||
72 | slgr %r5,%r4 # %r5 = #bytes to next user page boundary | ||
73 | clgr %r3,%r5 # copy crosses next page boundary ? | ||
74 | jnh 6f # no, the current page faulted | ||
75 | # move with the reduced length which is < 256 | ||
76 | 5: mvcs 0(%r5,%r4),0(%r2),%r0 | ||
77 | slgr %r3,%r5 | ||
78 | 6: lgr %r2,%r3 | ||
79 | br %r14 | ||
80 | .section __ex_table,"a" | ||
81 | .quad 0b,4b | ||
82 | .quad 2b,4b | ||
83 | .quad 5b,6b | ||
84 | .previous | ||
85 | |||
86 | .align 4 | ||
87 | .text | ||
88 | .globl __copy_in_user_asm | ||
89 | # %r2 = from, %r3 = n, %r4 = to | ||
90 | __copy_in_user_asm: | ||
91 | sacf 256 | ||
92 | bras 1,1f | ||
93 | mvc 0(1,%r4),0(%r2) | ||
94 | 0: mvc 0(256,%r4),0(%r2) | ||
95 | la %r2,256(%r2) | ||
96 | la %r4,256(%r4) | ||
97 | 1: aghi %r3,-256 | ||
98 | jnm 0b | ||
99 | 2: ex %r3,0(%r1) | ||
100 | sacf 0 | ||
101 | slgr %r2,%r2 | ||
102 | br 14 | ||
103 | 3: mvc 0(1,%r4),0(%r2) | ||
104 | la %r2,1(%r2) | ||
105 | la %r4,1(%r4) | ||
106 | aghi %r3,-1 | ||
107 | jnm 3b | ||
108 | 4: lgr %r2,%r3 | ||
109 | sacf 0 | ||
110 | br %r14 | ||
111 | .section __ex_table,"a" | ||
112 | .quad 0b,3b | ||
113 | .quad 2b,3b | ||
114 | .quad 3b,4b | ||
115 | .previous | ||
116 | |||
117 | .align 4 | ||
118 | .text | ||
119 | .globl __clear_user_asm | ||
120 | # %r2 = to, %r3 = n | ||
121 | __clear_user_asm: | ||
122 | slgr %r0,%r0 | ||
123 | larl %r5,empty_zero_page | ||
124 | 1: mvcs 0(%r3,%r2),0(%r5),%r0 | ||
125 | jnz 2f | ||
126 | slgr %r2,%r2 | ||
127 | br %r14 | ||
128 | 2: la %r2,256(%r2) | ||
129 | aghi %r3,-256 | ||
130 | 3: mvcs 0(%r3,%r2),0(%r5),%r0 | ||
131 | jnz 2b | ||
132 | 4: slgr %r2,%r2 | ||
133 | br %r14 | ||
134 | 5: lghi %r0,-4096 | ||
135 | lgr %r4,%r2 | ||
136 | slgr %r4,%r0 | ||
137 | ngr %r4,%r0 # %r4 = (%r2 + 4096) & -4096 | ||
138 | slgr %r4,%r2 # %r4 = #bytes to next user page boundary | ||
139 | clgr %r3,%r4 # clear crosses next page boundary ? | ||
140 | jnh 7f # no, the current page faulted | ||
141 | # clear with the reduced length which is < 256 | ||
142 | 6: mvcs 0(%r4,%r2),0(%r5),%r0 | ||
143 | slgr %r3,%r4 | ||
144 | 7: lgr %r2,%r3 | ||
145 | br %r14 | ||
146 | .section __ex_table,"a" | ||
147 | .quad 1b,5b | ||
148 | .quad 3b,5b | ||
149 | .quad 6b,7b | ||
150 | .previous | ||
151 | |||
152 | .align 4 | ||
153 | .text | ||
154 | .globl __strncpy_from_user_asm | ||
155 | # %r2 = count, %r3 = dst, %r4 = src | ||
156 | __strncpy_from_user_asm: | ||
157 | lghi %r0,0 | ||
158 | lgr %r1,%r4 | ||
159 | la %r2,0(%r2,%r4) # %r2 points to first byte after string | ||
160 | sacf 256 | ||
161 | 0: srst %r2,%r1 | ||
162 | jo 0b | ||
163 | sacf 0 | ||
164 | lgr %r1,%r2 | ||
165 | jh 1f # \0 found in string ? | ||
166 | aghi %r1,1 # include \0 in copy | ||
167 | 1: slgr %r1,%r4 # %r1 = copy length (without \0) | ||
168 | slgr %r2,%r4 # %r2 = return length (including \0) | ||
169 | 2: mvcp 0(%r1,%r3),0(%r4),%r0 | ||
170 | jnz 3f | ||
171 | br %r14 | ||
172 | 3: la %r3,256(%r3) | ||
173 | la %r4,256(%r4) | ||
174 | aghi %r1,-256 | ||
175 | mvcp 0(%r1,%r3),0(%r4),%r0 | ||
176 | jnz 3b | ||
177 | br %r14 | ||
178 | 4: sacf 0 | ||
179 | lghi %r2,-EFAULT | ||
180 | br %r14 | ||
181 | .section __ex_table,"a" | ||
182 | .quad 0b,4b | ||
183 | .previous | ||
184 | |||
185 | .align 4 | ||
186 | .text | ||
187 | .globl __strnlen_user_asm | ||
188 | # %r2 = count, %r3 = src | ||
189 | __strnlen_user_asm: | ||
190 | lghi %r0,0 | ||
191 | lgr %r1,%r3 | ||
192 | la %r2,0(%r2,%r3) # %r2 points to first byte after string | ||
193 | sacf 256 | ||
194 | 0: srst %r2,%r1 | ||
195 | jo 0b | ||
196 | sacf 0 | ||
197 | jh 1f # \0 found in string ? | ||
198 | aghi %r2,1 # strnlen_user result includes the \0 | ||
199 | 1: slgr %r2,%r3 | ||
200 | br %r14 | ||
201 | 2: sacf 0 | ||
202 | lghi %r2,-EFAULT | ||
203 | br %r14 | ||
204 | .section __ex_table,"a" | ||
205 | .quad 0b,2b | ||
206 | .previous | ||
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile new file mode 100644 index 000000000000..c10df144f2ab --- /dev/null +++ b/arch/s390/math-emu/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Makefile for the FPU instruction emulation. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_MATHEMU) := math.o qrnnd.o | ||
6 | |||
7 | EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w | ||
8 | EXTRA_AFLAGS := -traditional | ||
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c new file mode 100644 index 000000000000..648df7140335 --- /dev/null +++ b/arch/s390/math-emu/math.c | |||
@@ -0,0 +1,2258 @@ | |||
1 | /* | ||
2 | * arch/s390/math-emu/math.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999-2001 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * | ||
8 | * 'math.c' emulates IEEE instructions on a S390 processor | ||
9 | * that does not have the IEEE fpu (all processors before G5). | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <asm/uaccess.h> | ||
17 | #include <asm/lowcore.h> | ||
18 | |||
19 | #include "sfp-util.h" | ||
20 | #include <math-emu/soft-fp.h> | ||
21 | #include <math-emu/single.h> | ||
22 | #include <math-emu/double.h> | ||
23 | #include <math-emu/quad.h> | ||
24 | |||
25 | /* | ||
26 | * I miss a macro to round a floating point number to the | ||
27 | * nearest integer in the same floating point format. | ||
28 | */ | ||
29 | #define _FP_TO_FPINT_ROUND(fs, wc, X) \ | ||
30 | do { \ | ||
31 | switch (X##_c) \ | ||
32 | { \ | ||
33 | case FP_CLS_NORMAL: \ | ||
34 | if (X##_e > _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs) \ | ||
35 | { /* floating point number has no bits after the dot. */ \ | ||
36 | } \ | ||
37 | else if (X##_e <= _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs && \ | ||
38 | X##_e > _FP_EXPBIAS_##fs) \ | ||
39 | { /* some bits before the dot, some after it. */ \ | ||
40 | _FP_FRAC_SRS_##wc(X, _FP_WFRACBITS_##fs, \ | ||
41 | X##_e - _FP_EXPBIAS_##fs \ | ||
42 | + _FP_FRACBITS_##fs); \ | ||
43 | _FP_ROUND(wc, X); \ | ||
44 | _FP_FRAC_SLL_##wc(X, X##_e - _FP_EXPBIAS_##fs \ | ||
45 | + _FP_FRACBITS_##fs); \ | ||
46 | } \ | ||
47 | else \ | ||
48 | { /* all bits after the dot. */ \ | ||
49 | FP_SET_EXCEPTION(FP_EX_INEXACT); \ | ||
50 | X##_c = FP_CLS_ZERO; \ | ||
51 | } \ | ||
52 | break; \ | ||
53 | case FP_CLS_NAN: \ | ||
54 | case FP_CLS_INF: \ | ||
55 | case FP_CLS_ZERO: \ | ||
56 | break; \ | ||
57 | } \ | ||
58 | } while (0) | ||
59 | |||
60 | #define FP_TO_FPINT_ROUND_S(X) _FP_TO_FPINT_ROUND(S,1,X) | ||
61 | #define FP_TO_FPINT_ROUND_D(X) _FP_TO_FPINT_ROUND(D,2,X) | ||
62 | #define FP_TO_FPINT_ROUND_Q(X) _FP_TO_FPINT_ROUND(Q,4,X) | ||
63 | |||
64 | typedef union { | ||
65 | long double ld; | ||
66 | struct { | ||
67 | __u64 high; | ||
68 | __u64 low; | ||
69 | } w; | ||
70 | } mathemu_ldcv; | ||
71 | |||
72 | #ifdef CONFIG_SYSCTL | ||
73 | int sysctl_ieee_emulation_warnings=1; | ||
74 | #endif | ||
75 | |||
76 | #define mathemu_put_user(x, p) \ | ||
77 | do { \ | ||
78 | if (put_user((x),(p))) \ | ||
79 | return SIGSEGV; \ | ||
80 | } while (0) | ||
81 | |||
82 | #define mathemu_get_user(x, p) \ | ||
83 | do { \ | ||
84 | if (get_user((x),(p))) \ | ||
85 | return SIGSEGV; \ | ||
86 | } while (0) | ||
87 | |||
88 | #define mathemu_copy_from_user(d, s, n)\ | ||
89 | do { \ | ||
90 | if (copy_from_user((d),(s),(n)) != 0) \ | ||
91 | return SIGSEGV; \ | ||
92 | } while (0) | ||
93 | |||
94 | #define mathemu_copy_to_user(d, s, n) \ | ||
95 | do { \ | ||
96 | if (copy_to_user((d),(s),(n)) != 0) \ | ||
97 | return SIGSEGV; \ | ||
98 | } while (0) | ||
99 | |||
100 | static void display_emulation_not_implemented(struct pt_regs *regs, char *instr) | ||
101 | { | ||
102 | __u16 *location; | ||
103 | |||
104 | #ifdef CONFIG_SYSCTL | ||
105 | if(sysctl_ieee_emulation_warnings) | ||
106 | #endif | ||
107 | { | ||
108 | location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc); | ||
109 | printk("%s ieee fpu instruction not emulated " | ||
110 | "process name: %s pid: %d \n", | ||
111 | instr, current->comm, current->pid); | ||
112 | printk("%s's PSW: %08lx %08lx\n", instr, | ||
113 | (unsigned long) regs->psw.mask, | ||
114 | (unsigned long) location); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | static inline void emu_set_CC (struct pt_regs *regs, int cc) | ||
119 | { | ||
120 | regs->psw.mask = (regs->psw.mask & 0xFFFFCFFF) | ((cc&3) << 12); | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Set the condition code in the user psw. | ||
125 | * 0 : Result is zero | ||
126 | * 1 : Result is less than zero | ||
127 | * 2 : Result is greater than zero | ||
128 | * 3 : Result is NaN or INF | ||
129 | */ | ||
130 | static inline void emu_set_CC_cs(struct pt_regs *regs, int class, int sign) | ||
131 | { | ||
132 | switch (class) { | ||
133 | case FP_CLS_NORMAL: | ||
134 | case FP_CLS_INF: | ||
135 | emu_set_CC(regs, sign ? 1 : 2); | ||
136 | break; | ||
137 | case FP_CLS_ZERO: | ||
138 | emu_set_CC(regs, 0); | ||
139 | break; | ||
140 | case FP_CLS_NAN: | ||
141 | emu_set_CC(regs, 3); | ||
142 | break; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | /* Add long double */ | ||
147 | static int emu_axbr (struct pt_regs *regs, int rx, int ry) { | ||
148 | FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); | ||
149 | FP_DECL_EX; | ||
150 | mathemu_ldcv cvt; | ||
151 | int mode; | ||
152 | |||
153 | mode = current->thread.fp_regs.fpc & 3; | ||
154 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
155 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
156 | FP_UNPACK_QP(QA, &cvt.ld); | ||
157 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
158 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
159 | FP_UNPACK_QP(QB, &cvt.ld); | ||
160 | FP_ADD_Q(QR, QA, QB); | ||
161 | FP_PACK_QP(&cvt.ld, QR); | ||
162 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
163 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
164 | emu_set_CC_cs(regs, QR_c, QR_s); | ||
165 | return _fex; | ||
166 | } | ||
167 | |||
168 | /* Add double */ | ||
169 | static int emu_adbr (struct pt_regs *regs, int rx, int ry) { | ||
170 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
171 | FP_DECL_EX; | ||
172 | int mode; | ||
173 | |||
174 | mode = current->thread.fp_regs.fpc & 3; | ||
175 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
176 | FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
177 | FP_ADD_D(DR, DA, DB); | ||
178 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
179 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
180 | return _fex; | ||
181 | } | ||
182 | |||
183 | /* Add double */ | ||
184 | static int emu_adb (struct pt_regs *regs, int rx, double *val) { | ||
185 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
186 | FP_DECL_EX; | ||
187 | int mode; | ||
188 | |||
189 | mode = current->thread.fp_regs.fpc & 3; | ||
190 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
191 | FP_UNPACK_DP(DB, val); | ||
192 | FP_ADD_D(DR, DA, DB); | ||
193 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
194 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
195 | return _fex; | ||
196 | } | ||
197 | |||
198 | /* Add float */ | ||
199 | static int emu_aebr (struct pt_regs *regs, int rx, int ry) { | ||
200 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
201 | FP_DECL_EX; | ||
202 | int mode; | ||
203 | |||
204 | mode = current->thread.fp_regs.fpc & 3; | ||
205 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
206 | FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
207 | FP_ADD_S(SR, SA, SB); | ||
208 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
209 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
210 | return _fex; | ||
211 | } | ||
212 | |||
213 | /* Add float */ | ||
214 | static int emu_aeb (struct pt_regs *regs, int rx, float *val) { | ||
215 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
216 | FP_DECL_EX; | ||
217 | int mode; | ||
218 | |||
219 | mode = current->thread.fp_regs.fpc & 3; | ||
220 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
221 | FP_UNPACK_SP(SB, val); | ||
222 | FP_ADD_S(SR, SA, SB); | ||
223 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
224 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
225 | return _fex; | ||
226 | } | ||
227 | |||
228 | /* Compare long double */ | ||
229 | static int emu_cxbr (struct pt_regs *regs, int rx, int ry) { | ||
230 | FP_DECL_Q(QA); FP_DECL_Q(QB); | ||
231 | mathemu_ldcv cvt; | ||
232 | int IR; | ||
233 | |||
234 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
235 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
236 | FP_UNPACK_RAW_QP(QA, &cvt.ld); | ||
237 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
238 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
239 | FP_UNPACK_RAW_QP(QB, &cvt.ld); | ||
240 | FP_CMP_Q(IR, QA, QB, 3); | ||
241 | /* | ||
242 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
243 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
244 | */ | ||
245 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | /* Compare double */ | ||
250 | static int emu_cdbr (struct pt_regs *regs, int rx, int ry) { | ||
251 | FP_DECL_D(DA); FP_DECL_D(DB); | ||
252 | int IR; | ||
253 | |||
254 | FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
255 | FP_UNPACK_RAW_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
256 | FP_CMP_D(IR, DA, DB, 3); | ||
257 | /* | ||
258 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
259 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
260 | */ | ||
261 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | /* Compare double */ | ||
266 | static int emu_cdb (struct pt_regs *regs, int rx, double *val) { | ||
267 | FP_DECL_D(DA); FP_DECL_D(DB); | ||
268 | int IR; | ||
269 | |||
270 | FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
271 | FP_UNPACK_RAW_DP(DB, val); | ||
272 | FP_CMP_D(IR, DA, DB, 3); | ||
273 | /* | ||
274 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
275 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
276 | */ | ||
277 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | /* Compare float */ | ||
282 | static int emu_cebr (struct pt_regs *regs, int rx, int ry) { | ||
283 | FP_DECL_S(SA); FP_DECL_S(SB); | ||
284 | int IR; | ||
285 | |||
286 | FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
287 | FP_UNPACK_RAW_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
288 | FP_CMP_S(IR, SA, SB, 3); | ||
289 | /* | ||
290 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
291 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
292 | */ | ||
293 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | /* Compare float */ | ||
298 | static int emu_ceb (struct pt_regs *regs, int rx, float *val) { | ||
299 | FP_DECL_S(SA); FP_DECL_S(SB); | ||
300 | int IR; | ||
301 | |||
302 | FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
303 | FP_UNPACK_RAW_SP(SB, val); | ||
304 | FP_CMP_S(IR, SA, SB, 3); | ||
305 | /* | ||
306 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
307 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
308 | */ | ||
309 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | /* Compare and signal long double */ | ||
314 | static int emu_kxbr (struct pt_regs *regs, int rx, int ry) { | ||
315 | FP_DECL_Q(QA); FP_DECL_Q(QB); | ||
316 | FP_DECL_EX; | ||
317 | mathemu_ldcv cvt; | ||
318 | int IR; | ||
319 | |||
320 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
321 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
322 | FP_UNPACK_RAW_QP(QA, &cvt.ld); | ||
323 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
324 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
325 | FP_UNPACK_QP(QB, &cvt.ld); | ||
326 | FP_CMP_Q(IR, QA, QB, 3); | ||
327 | /* | ||
328 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
329 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
330 | */ | ||
331 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
332 | if (IR == 3) | ||
333 | FP_SET_EXCEPTION (FP_EX_INVALID); | ||
334 | return _fex; | ||
335 | } | ||
336 | |||
337 | /* Compare and signal double */ | ||
338 | static int emu_kdbr (struct pt_regs *regs, int rx, int ry) { | ||
339 | FP_DECL_D(DA); FP_DECL_D(DB); | ||
340 | FP_DECL_EX; | ||
341 | int IR; | ||
342 | |||
343 | FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
344 | FP_UNPACK_RAW_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
345 | FP_CMP_D(IR, DA, DB, 3); | ||
346 | /* | ||
347 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
348 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
349 | */ | ||
350 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
351 | if (IR == 3) | ||
352 | FP_SET_EXCEPTION (FP_EX_INVALID); | ||
353 | return _fex; | ||
354 | } | ||
355 | |||
356 | /* Compare and signal double */ | ||
357 | static int emu_kdb (struct pt_regs *regs, int rx, double *val) { | ||
358 | FP_DECL_D(DA); FP_DECL_D(DB); | ||
359 | FP_DECL_EX; | ||
360 | int IR; | ||
361 | |||
362 | FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
363 | FP_UNPACK_RAW_DP(DB, val); | ||
364 | FP_CMP_D(IR, DA, DB, 3); | ||
365 | /* | ||
366 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
367 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
368 | */ | ||
369 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
370 | if (IR == 3) | ||
371 | FP_SET_EXCEPTION (FP_EX_INVALID); | ||
372 | return _fex; | ||
373 | } | ||
374 | |||
375 | /* Compare and signal float */ | ||
376 | static int emu_kebr (struct pt_regs *regs, int rx, int ry) { | ||
377 | FP_DECL_S(SA); FP_DECL_S(SB); | ||
378 | FP_DECL_EX; | ||
379 | int IR; | ||
380 | |||
381 | FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
382 | FP_UNPACK_RAW_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
383 | FP_CMP_S(IR, SA, SB, 3); | ||
384 | /* | ||
385 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
386 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
387 | */ | ||
388 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
389 | if (IR == 3) | ||
390 | FP_SET_EXCEPTION (FP_EX_INVALID); | ||
391 | return _fex; | ||
392 | } | ||
393 | |||
394 | /* Compare and signal float */ | ||
395 | static int emu_keb (struct pt_regs *regs, int rx, float *val) { | ||
396 | FP_DECL_S(SA); FP_DECL_S(SB); | ||
397 | FP_DECL_EX; | ||
398 | int IR; | ||
399 | |||
400 | FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
401 | FP_UNPACK_RAW_SP(SB, val); | ||
402 | FP_CMP_S(IR, SA, SB, 3); | ||
403 | /* | ||
404 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
405 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
406 | */ | ||
407 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
408 | if (IR == 3) | ||
409 | FP_SET_EXCEPTION (FP_EX_INVALID); | ||
410 | return _fex; | ||
411 | } | ||
412 | |||
413 | /* Convert from fixed long double */ | ||
414 | static int emu_cxfbr (struct pt_regs *regs, int rx, int ry) { | ||
415 | FP_DECL_Q(QR); | ||
416 | FP_DECL_EX; | ||
417 | mathemu_ldcv cvt; | ||
418 | __s32 si; | ||
419 | int mode; | ||
420 | |||
421 | mode = current->thread.fp_regs.fpc & 3; | ||
422 | si = regs->gprs[ry]; | ||
423 | FP_FROM_INT_Q(QR, si, 32, int); | ||
424 | FP_PACK_QP(&cvt.ld, QR); | ||
425 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
426 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
427 | return _fex; | ||
428 | } | ||
429 | |||
430 | /* Convert from fixed double */ | ||
431 | static int emu_cdfbr (struct pt_regs *regs, int rx, int ry) { | ||
432 | FP_DECL_D(DR); | ||
433 | FP_DECL_EX; | ||
434 | __s32 si; | ||
435 | int mode; | ||
436 | |||
437 | mode = current->thread.fp_regs.fpc & 3; | ||
438 | si = regs->gprs[ry]; | ||
439 | FP_FROM_INT_D(DR, si, 32, int); | ||
440 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
441 | return _fex; | ||
442 | } | ||
443 | |||
444 | /* Convert from fixed float */ | ||
445 | static int emu_cefbr (struct pt_regs *regs, int rx, int ry) { | ||
446 | FP_DECL_S(SR); | ||
447 | FP_DECL_EX; | ||
448 | __s32 si; | ||
449 | int mode; | ||
450 | |||
451 | mode = current->thread.fp_regs.fpc & 3; | ||
452 | si = regs->gprs[ry]; | ||
453 | FP_FROM_INT_S(SR, si, 32, int); | ||
454 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
455 | return _fex; | ||
456 | } | ||
457 | |||
458 | /* Convert to fixed long double */ | ||
459 | static int emu_cfxbr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
460 | FP_DECL_Q(QA); | ||
461 | FP_DECL_EX; | ||
462 | mathemu_ldcv cvt; | ||
463 | __s32 si; | ||
464 | int mode; | ||
465 | |||
466 | if (mask == 0) | ||
467 | mode = current->thread.fp_regs.fpc & 3; | ||
468 | else if (mask == 1) | ||
469 | mode = FP_RND_NEAREST; | ||
470 | else | ||
471 | mode = mask - 4; | ||
472 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
473 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
474 | FP_UNPACK_QP(QA, &cvt.ld); | ||
475 | FP_TO_INT_ROUND_Q(si, QA, 32, 1); | ||
476 | regs->gprs[rx] = si; | ||
477 | emu_set_CC_cs(regs, QA_c, QA_s); | ||
478 | return _fex; | ||
479 | } | ||
480 | |||
481 | /* Convert to fixed double */ | ||
482 | static int emu_cfdbr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
483 | FP_DECL_D(DA); | ||
484 | FP_DECL_EX; | ||
485 | __s32 si; | ||
486 | int mode; | ||
487 | |||
488 | if (mask == 0) | ||
489 | mode = current->thread.fp_regs.fpc & 3; | ||
490 | else if (mask == 1) | ||
491 | mode = FP_RND_NEAREST; | ||
492 | else | ||
493 | mode = mask - 4; | ||
494 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
495 | FP_TO_INT_ROUND_D(si, DA, 32, 1); | ||
496 | regs->gprs[rx] = si; | ||
497 | emu_set_CC_cs(regs, DA_c, DA_s); | ||
498 | return _fex; | ||
499 | } | ||
500 | |||
501 | /* Convert to fixed float */ | ||
502 | static int emu_cfebr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
503 | FP_DECL_S(SA); | ||
504 | FP_DECL_EX; | ||
505 | __s32 si; | ||
506 | int mode; | ||
507 | |||
508 | if (mask == 0) | ||
509 | mode = current->thread.fp_regs.fpc & 3; | ||
510 | else if (mask == 1) | ||
511 | mode = FP_RND_NEAREST; | ||
512 | else | ||
513 | mode = mask - 4; | ||
514 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
515 | FP_TO_INT_ROUND_S(si, SA, 32, 1); | ||
516 | regs->gprs[rx] = si; | ||
517 | emu_set_CC_cs(regs, SA_c, SA_s); | ||
518 | return _fex; | ||
519 | } | ||
520 | |||
521 | /* Divide long double */ | ||
522 | static int emu_dxbr (struct pt_regs *regs, int rx, int ry) { | ||
523 | FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); | ||
524 | FP_DECL_EX; | ||
525 | mathemu_ldcv cvt; | ||
526 | int mode; | ||
527 | |||
528 | mode = current->thread.fp_regs.fpc & 3; | ||
529 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
530 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
531 | FP_UNPACK_QP(QA, &cvt.ld); | ||
532 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
533 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
534 | FP_UNPACK_QP(QB, &cvt.ld); | ||
535 | FP_DIV_Q(QR, QA, QB); | ||
536 | FP_PACK_QP(&cvt.ld, QR); | ||
537 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
538 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
539 | return _fex; | ||
540 | } | ||
541 | |||
542 | /* Divide double */ | ||
543 | static int emu_ddbr (struct pt_regs *regs, int rx, int ry) { | ||
544 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
545 | FP_DECL_EX; | ||
546 | int mode; | ||
547 | |||
548 | mode = current->thread.fp_regs.fpc & 3; | ||
549 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
550 | FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
551 | FP_DIV_D(DR, DA, DB); | ||
552 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
553 | return _fex; | ||
554 | } | ||
555 | |||
556 | /* Divide double */ | ||
557 | static int emu_ddb (struct pt_regs *regs, int rx, double *val) { | ||
558 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
559 | FP_DECL_EX; | ||
560 | int mode; | ||
561 | |||
562 | mode = current->thread.fp_regs.fpc & 3; | ||
563 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
564 | FP_UNPACK_DP(DB, val); | ||
565 | FP_DIV_D(DR, DA, DB); | ||
566 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
567 | return _fex; | ||
568 | } | ||
569 | |||
570 | /* Divide float */ | ||
571 | static int emu_debr (struct pt_regs *regs, int rx, int ry) { | ||
572 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
573 | FP_DECL_EX; | ||
574 | int mode; | ||
575 | |||
576 | mode = current->thread.fp_regs.fpc & 3; | ||
577 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
578 | FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
579 | FP_DIV_S(SR, SA, SB); | ||
580 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
581 | return _fex; | ||
582 | } | ||
583 | |||
584 | /* Divide float */ | ||
585 | static int emu_deb (struct pt_regs *regs, int rx, float *val) { | ||
586 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
587 | FP_DECL_EX; | ||
588 | int mode; | ||
589 | |||
590 | mode = current->thread.fp_regs.fpc & 3; | ||
591 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
592 | FP_UNPACK_SP(SB, val); | ||
593 | FP_DIV_S(SR, SA, SB); | ||
594 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
595 | return _fex; | ||
596 | } | ||
597 | |||
598 | /* Divide to integer double */ | ||
599 | static int emu_didbr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
600 | display_emulation_not_implemented(regs, "didbr"); | ||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | /* Divide to integer float */ | ||
605 | static int emu_diebr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
606 | display_emulation_not_implemented(regs, "diebr"); | ||
607 | return 0; | ||
608 | } | ||
609 | |||
610 | /* Extract fpc */ | ||
611 | static int emu_efpc (struct pt_regs *regs, int rx, int ry) { | ||
612 | regs->gprs[rx] = current->thread.fp_regs.fpc; | ||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | /* Load and test long double */ | ||
617 | static int emu_ltxbr (struct pt_regs *regs, int rx, int ry) { | ||
618 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
619 | mathemu_ldcv cvt; | ||
620 | FP_DECL_Q(QA); | ||
621 | FP_DECL_EX; | ||
622 | |||
623 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
624 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
625 | FP_UNPACK_QP(QA, &cvt.ld); | ||
626 | fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui; | ||
627 | fp_regs->fprs[rx+2].ui = fp_regs->fprs[ry+2].ui; | ||
628 | emu_set_CC_cs(regs, QA_c, QA_s); | ||
629 | return _fex; | ||
630 | } | ||
631 | |||
632 | /* Load and test double */ | ||
633 | static int emu_ltdbr (struct pt_regs *regs, int rx, int ry) { | ||
634 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
635 | FP_DECL_D(DA); | ||
636 | FP_DECL_EX; | ||
637 | |||
638 | FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d); | ||
639 | fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui; | ||
640 | emu_set_CC_cs(regs, DA_c, DA_s); | ||
641 | return _fex; | ||
642 | } | ||
643 | |||
644 | /* Load and test double */ | ||
645 | static int emu_ltebr (struct pt_regs *regs, int rx, int ry) { | ||
646 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
647 | FP_DECL_S(SA); | ||
648 | FP_DECL_EX; | ||
649 | |||
650 | FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f); | ||
651 | fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui; | ||
652 | emu_set_CC_cs(regs, SA_c, SA_s); | ||
653 | return _fex; | ||
654 | } | ||
655 | |||
656 | /* Load complement long double */ | ||
657 | static int emu_lcxbr (struct pt_regs *regs, int rx, int ry) { | ||
658 | FP_DECL_Q(QA); FP_DECL_Q(QR); | ||
659 | FP_DECL_EX; | ||
660 | mathemu_ldcv cvt; | ||
661 | int mode; | ||
662 | |||
663 | mode = current->thread.fp_regs.fpc & 3; | ||
664 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
665 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
666 | FP_UNPACK_QP(QA, &cvt.ld); | ||
667 | FP_NEG_Q(QR, QA); | ||
668 | FP_PACK_QP(&cvt.ld, QR); | ||
669 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
670 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
671 | emu_set_CC_cs(regs, QR_c, QR_s); | ||
672 | return _fex; | ||
673 | } | ||
674 | |||
675 | /* Load complement double */ | ||
676 | static int emu_lcdbr (struct pt_regs *regs, int rx, int ry) { | ||
677 | FP_DECL_D(DA); FP_DECL_D(DR); | ||
678 | FP_DECL_EX; | ||
679 | int mode; | ||
680 | |||
681 | mode = current->thread.fp_regs.fpc & 3; | ||
682 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
683 | FP_NEG_D(DR, DA); | ||
684 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
685 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
686 | return _fex; | ||
687 | } | ||
688 | |||
689 | /* Load complement float */ | ||
690 | static int emu_lcebr (struct pt_regs *regs, int rx, int ry) { | ||
691 | FP_DECL_S(SA); FP_DECL_S(SR); | ||
692 | FP_DECL_EX; | ||
693 | int mode; | ||
694 | |||
695 | mode = current->thread.fp_regs.fpc & 3; | ||
696 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
697 | FP_NEG_S(SR, SA); | ||
698 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
699 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
700 | return _fex; | ||
701 | } | ||
702 | |||
703 | /* Load floating point integer long double */ | ||
704 | static int emu_fixbr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
705 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
706 | FP_DECL_Q(QA); | ||
707 | FP_DECL_EX; | ||
708 | mathemu_ldcv cvt; | ||
709 | __s32 si; | ||
710 | int mode; | ||
711 | |||
712 | if (mask == 0) | ||
713 | mode = fp_regs->fpc & 3; | ||
714 | else if (mask == 1) | ||
715 | mode = FP_RND_NEAREST; | ||
716 | else | ||
717 | mode = mask - 4; | ||
718 | cvt.w.high = fp_regs->fprs[ry].ui; | ||
719 | cvt.w.low = fp_regs->fprs[ry+2].ui; | ||
720 | FP_UNPACK_QP(QA, &cvt.ld); | ||
721 | FP_TO_FPINT_ROUND_Q(QA); | ||
722 | FP_PACK_QP(&cvt.ld, QA); | ||
723 | fp_regs->fprs[rx].ui = cvt.w.high; | ||
724 | fp_regs->fprs[rx+2].ui = cvt.w.low; | ||
725 | return _fex; | ||
726 | } | ||
727 | |||
728 | /* Load floating point integer double */ | ||
729 | static int emu_fidbr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
730 | /* FIXME: rounding mode !! */ | ||
731 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
732 | FP_DECL_D(DA); | ||
733 | FP_DECL_EX; | ||
734 | __s32 si; | ||
735 | int mode; | ||
736 | |||
737 | if (mask == 0) | ||
738 | mode = fp_regs->fpc & 3; | ||
739 | else if (mask == 1) | ||
740 | mode = FP_RND_NEAREST; | ||
741 | else | ||
742 | mode = mask - 4; | ||
743 | FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d); | ||
744 | FP_TO_FPINT_ROUND_D(DA); | ||
745 | FP_PACK_DP(&fp_regs->fprs[rx].d, DA); | ||
746 | return _fex; | ||
747 | } | ||
748 | |||
749 | /* Load floating point integer float */ | ||
750 | static int emu_fiebr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
751 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
752 | FP_DECL_S(SA); | ||
753 | FP_DECL_EX; | ||
754 | __s32 si; | ||
755 | int mode; | ||
756 | |||
757 | if (mask == 0) | ||
758 | mode = fp_regs->fpc & 3; | ||
759 | else if (mask == 1) | ||
760 | mode = FP_RND_NEAREST; | ||
761 | else | ||
762 | mode = mask - 4; | ||
763 | FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f); | ||
764 | FP_TO_FPINT_ROUND_S(SA); | ||
765 | FP_PACK_SP(&fp_regs->fprs[rx].f, SA); | ||
766 | return _fex; | ||
767 | } | ||
768 | |||
769 | /* Load lengthened double to long double */ | ||
770 | static int emu_lxdbr (struct pt_regs *regs, int rx, int ry) { | ||
771 | FP_DECL_D(DA); FP_DECL_Q(QR); | ||
772 | FP_DECL_EX; | ||
773 | mathemu_ldcv cvt; | ||
774 | int mode; | ||
775 | |||
776 | mode = current->thread.fp_regs.fpc & 3; | ||
777 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
778 | FP_CONV (Q, D, 4, 2, QR, DA); | ||
779 | FP_PACK_QP(&cvt.ld, QR); | ||
780 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
781 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
782 | return _fex; | ||
783 | } | ||
784 | |||
785 | /* Load lengthened double to long double */ | ||
786 | static int emu_lxdb (struct pt_regs *regs, int rx, double *val) { | ||
787 | FP_DECL_D(DA); FP_DECL_Q(QR); | ||
788 | FP_DECL_EX; | ||
789 | mathemu_ldcv cvt; | ||
790 | int mode; | ||
791 | |||
792 | mode = current->thread.fp_regs.fpc & 3; | ||
793 | FP_UNPACK_DP(DA, val); | ||
794 | FP_CONV (Q, D, 4, 2, QR, DA); | ||
795 | FP_PACK_QP(&cvt.ld, QR); | ||
796 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
797 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
798 | return _fex; | ||
799 | } | ||
800 | |||
801 | /* Load lengthened float to long double */ | ||
802 | static int emu_lxebr (struct pt_regs *regs, int rx, int ry) { | ||
803 | FP_DECL_S(SA); FP_DECL_Q(QR); | ||
804 | FP_DECL_EX; | ||
805 | mathemu_ldcv cvt; | ||
806 | int mode; | ||
807 | |||
808 | mode = current->thread.fp_regs.fpc & 3; | ||
809 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
810 | FP_CONV (Q, S, 4, 1, QR, SA); | ||
811 | FP_PACK_QP(&cvt.ld, QR); | ||
812 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
813 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
814 | return _fex; | ||
815 | } | ||
816 | |||
817 | /* Load lengthened float to long double */ | ||
818 | static int emu_lxeb (struct pt_regs *regs, int rx, float *val) { | ||
819 | FP_DECL_S(SA); FP_DECL_Q(QR); | ||
820 | FP_DECL_EX; | ||
821 | mathemu_ldcv cvt; | ||
822 | int mode; | ||
823 | |||
824 | mode = current->thread.fp_regs.fpc & 3; | ||
825 | FP_UNPACK_SP(SA, val); | ||
826 | FP_CONV (Q, S, 4, 1, QR, SA); | ||
827 | FP_PACK_QP(&cvt.ld, QR); | ||
828 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
829 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
830 | return _fex; | ||
831 | } | ||
832 | |||
833 | /* Load lengthened float to double */ | ||
834 | static int emu_ldebr (struct pt_regs *regs, int rx, int ry) { | ||
835 | FP_DECL_S(SA); FP_DECL_D(DR); | ||
836 | FP_DECL_EX; | ||
837 | int mode; | ||
838 | |||
839 | mode = current->thread.fp_regs.fpc & 3; | ||
840 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
841 | FP_CONV (D, S, 2, 1, DR, SA); | ||
842 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
843 | return _fex; | ||
844 | } | ||
845 | |||
846 | /* Load lengthened float to double */ | ||
847 | static int emu_ldeb (struct pt_regs *regs, int rx, float *val) { | ||
848 | FP_DECL_S(SA); FP_DECL_D(DR); | ||
849 | FP_DECL_EX; | ||
850 | int mode; | ||
851 | |||
852 | mode = current->thread.fp_regs.fpc & 3; | ||
853 | FP_UNPACK_SP(SA, val); | ||
854 | FP_CONV (D, S, 2, 1, DR, SA); | ||
855 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
856 | return _fex; | ||
857 | } | ||
858 | |||
859 | /* Load negative long double */ | ||
860 | static int emu_lnxbr (struct pt_regs *regs, int rx, int ry) { | ||
861 | FP_DECL_Q(QA); FP_DECL_Q(QR); | ||
862 | FP_DECL_EX; | ||
863 | mathemu_ldcv cvt; | ||
864 | int mode; | ||
865 | |||
866 | mode = current->thread.fp_regs.fpc & 3; | ||
867 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
868 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
869 | FP_UNPACK_QP(QA, &cvt.ld); | ||
870 | if (QA_s == 0) { | ||
871 | FP_NEG_Q(QR, QA); | ||
872 | FP_PACK_QP(&cvt.ld, QR); | ||
873 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
874 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
875 | } else { | ||
876 | current->thread.fp_regs.fprs[rx].ui = | ||
877 | current->thread.fp_regs.fprs[ry].ui; | ||
878 | current->thread.fp_regs.fprs[rx+2].ui = | ||
879 | current->thread.fp_regs.fprs[ry+2].ui; | ||
880 | } | ||
881 | emu_set_CC_cs(regs, QR_c, QR_s); | ||
882 | return _fex; | ||
883 | } | ||
884 | |||
885 | /* Load negative double */ | ||
886 | static int emu_lndbr (struct pt_regs *regs, int rx, int ry) { | ||
887 | FP_DECL_D(DA); FP_DECL_D(DR); | ||
888 | FP_DECL_EX; | ||
889 | int mode; | ||
890 | |||
891 | mode = current->thread.fp_regs.fpc & 3; | ||
892 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
893 | if (DA_s == 0) { | ||
894 | FP_NEG_D(DR, DA); | ||
895 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
896 | } else | ||
897 | current->thread.fp_regs.fprs[rx].ui = | ||
898 | current->thread.fp_regs.fprs[ry].ui; | ||
899 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
900 | return _fex; | ||
901 | } | ||
902 | |||
903 | /* Load negative float */ | ||
904 | static int emu_lnebr (struct pt_regs *regs, int rx, int ry) { | ||
905 | FP_DECL_S(SA); FP_DECL_S(SR); | ||
906 | FP_DECL_EX; | ||
907 | int mode; | ||
908 | |||
909 | mode = current->thread.fp_regs.fpc & 3; | ||
910 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
911 | if (SA_s == 0) { | ||
912 | FP_NEG_S(SR, SA); | ||
913 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
914 | } else | ||
915 | current->thread.fp_regs.fprs[rx].ui = | ||
916 | current->thread.fp_regs.fprs[ry].ui; | ||
917 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
918 | return _fex; | ||
919 | } | ||
920 | |||
921 | /* Load positive long double */ | ||
922 | static int emu_lpxbr (struct pt_regs *regs, int rx, int ry) { | ||
923 | FP_DECL_Q(QA); FP_DECL_Q(QR); | ||
924 | FP_DECL_EX; | ||
925 | mathemu_ldcv cvt; | ||
926 | int mode; | ||
927 | |||
928 | mode = current->thread.fp_regs.fpc & 3; | ||
929 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
930 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
931 | FP_UNPACK_QP(QA, &cvt.ld); | ||
932 | if (QA_s != 0) { | ||
933 | FP_NEG_Q(QR, QA); | ||
934 | FP_PACK_QP(&cvt.ld, QR); | ||
935 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
936 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
937 | } else{ | ||
938 | current->thread.fp_regs.fprs[rx].ui = | ||
939 | current->thread.fp_regs.fprs[ry].ui; | ||
940 | current->thread.fp_regs.fprs[rx+2].ui = | ||
941 | current->thread.fp_regs.fprs[ry+2].ui; | ||
942 | } | ||
943 | emu_set_CC_cs(regs, QR_c, QR_s); | ||
944 | return _fex; | ||
945 | } | ||
946 | |||
947 | /* Load positive double */ | ||
948 | static int emu_lpdbr (struct pt_regs *regs, int rx, int ry) { | ||
949 | FP_DECL_D(DA); FP_DECL_D(DR); | ||
950 | FP_DECL_EX; | ||
951 | int mode; | ||
952 | |||
953 | mode = current->thread.fp_regs.fpc & 3; | ||
954 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
955 | if (DA_s != 0) { | ||
956 | FP_NEG_D(DR, DA); | ||
957 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
958 | } else | ||
959 | current->thread.fp_regs.fprs[rx].ui = | ||
960 | current->thread.fp_regs.fprs[ry].ui; | ||
961 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
962 | return _fex; | ||
963 | } | ||
964 | |||
965 | /* Load positive float */ | ||
966 | static int emu_lpebr (struct pt_regs *regs, int rx, int ry) { | ||
967 | FP_DECL_S(SA); FP_DECL_S(SR); | ||
968 | FP_DECL_EX; | ||
969 | int mode; | ||
970 | |||
971 | mode = current->thread.fp_regs.fpc & 3; | ||
972 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
973 | if (SA_s != 0) { | ||
974 | FP_NEG_S(SR, SA); | ||
975 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
976 | } else | ||
977 | current->thread.fp_regs.fprs[rx].ui = | ||
978 | current->thread.fp_regs.fprs[ry].ui; | ||
979 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
980 | return _fex; | ||
981 | } | ||
982 | |||
983 | /* Load rounded long double to double */ | ||
984 | static int emu_ldxbr (struct pt_regs *regs, int rx, int ry) { | ||
985 | FP_DECL_Q(QA); FP_DECL_D(DR); | ||
986 | FP_DECL_EX; | ||
987 | mathemu_ldcv cvt; | ||
988 | int mode; | ||
989 | |||
990 | mode = current->thread.fp_regs.fpc & 3; | ||
991 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
992 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
993 | FP_UNPACK_QP(QA, &cvt.ld); | ||
994 | FP_CONV (D, Q, 2, 4, DR, QA); | ||
995 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].f, DR); | ||
996 | return _fex; | ||
997 | } | ||
998 | |||
999 | /* Load rounded long double to float */ | ||
1000 | static int emu_lexbr (struct pt_regs *regs, int rx, int ry) { | ||
1001 | FP_DECL_Q(QA); FP_DECL_S(SR); | ||
1002 | FP_DECL_EX; | ||
1003 | mathemu_ldcv cvt; | ||
1004 | int mode; | ||
1005 | |||
1006 | mode = current->thread.fp_regs.fpc & 3; | ||
1007 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
1008 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
1009 | FP_UNPACK_QP(QA, &cvt.ld); | ||
1010 | FP_CONV (S, Q, 1, 4, SR, QA); | ||
1011 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1012 | return _fex; | ||
1013 | } | ||
1014 | |||
1015 | /* Load rounded double to float */ | ||
1016 | static int emu_ledbr (struct pt_regs *regs, int rx, int ry) { | ||
1017 | FP_DECL_D(DA); FP_DECL_S(SR); | ||
1018 | FP_DECL_EX; | ||
1019 | int mode; | ||
1020 | |||
1021 | mode = current->thread.fp_regs.fpc & 3; | ||
1022 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
1023 | FP_CONV (S, D, 1, 2, SR, DA); | ||
1024 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1025 | return _fex; | ||
1026 | } | ||
1027 | |||
1028 | /* Multiply long double */ | ||
1029 | static int emu_mxbr (struct pt_regs *regs, int rx, int ry) { | ||
1030 | FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); | ||
1031 | FP_DECL_EX; | ||
1032 | mathemu_ldcv cvt; | ||
1033 | int mode; | ||
1034 | |||
1035 | mode = current->thread.fp_regs.fpc & 3; | ||
1036 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
1037 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
1038 | FP_UNPACK_QP(QA, &cvt.ld); | ||
1039 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
1040 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
1041 | FP_UNPACK_QP(QB, &cvt.ld); | ||
1042 | FP_MUL_Q(QR, QA, QB); | ||
1043 | FP_PACK_QP(&cvt.ld, QR); | ||
1044 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
1045 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
1046 | return _fex; | ||
1047 | } | ||
1048 | |||
1049 | /* Multiply double */ | ||
1050 | static int emu_mdbr (struct pt_regs *regs, int rx, int ry) { | ||
1051 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
1052 | FP_DECL_EX; | ||
1053 | int mode; | ||
1054 | |||
1055 | mode = current->thread.fp_regs.fpc & 3; | ||
1056 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1057 | FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
1058 | FP_MUL_D(DR, DA, DB); | ||
1059 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1060 | return _fex; | ||
1061 | } | ||
1062 | |||
1063 | /* Multiply double */ | ||
1064 | static int emu_mdb (struct pt_regs *regs, int rx, double *val) { | ||
1065 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
1066 | FP_DECL_EX; | ||
1067 | int mode; | ||
1068 | |||
1069 | mode = current->thread.fp_regs.fpc & 3; | ||
1070 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1071 | FP_UNPACK_DP(DB, val); | ||
1072 | FP_MUL_D(DR, DA, DB); | ||
1073 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1074 | return _fex; | ||
1075 | } | ||
1076 | |||
1077 | /* Multiply double to long double */ | ||
1078 | static int emu_mxdbr (struct pt_regs *regs, int rx, int ry) { | ||
1079 | FP_DECL_D(DA); FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); | ||
1080 | FP_DECL_EX; | ||
1081 | mathemu_ldcv cvt; | ||
1082 | int mode; | ||
1083 | |||
1084 | mode = current->thread.fp_regs.fpc & 3; | ||
1085 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1086 | FP_CONV (Q, D, 4, 2, QA, DA); | ||
1087 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
1088 | FP_CONV (Q, D, 4, 2, QB, DA); | ||
1089 | FP_MUL_Q(QR, QA, QB); | ||
1090 | FP_PACK_QP(&cvt.ld, QR); | ||
1091 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
1092 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
1093 | return _fex; | ||
1094 | } | ||
1095 | |||
1096 | /* Multiply double to long double */ | ||
1097 | static int emu_mxdb (struct pt_regs *regs, int rx, long double *val) { | ||
1098 | FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); | ||
1099 | FP_DECL_EX; | ||
1100 | mathemu_ldcv cvt; | ||
1101 | int mode; | ||
1102 | |||
1103 | mode = current->thread.fp_regs.fpc & 3; | ||
1104 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
1105 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
1106 | FP_UNPACK_QP(QA, &cvt.ld); | ||
1107 | FP_UNPACK_QP(QB, val); | ||
1108 | FP_MUL_Q(QR, QA, QB); | ||
1109 | FP_PACK_QP(&cvt.ld, QR); | ||
1110 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
1111 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
1112 | return _fex; | ||
1113 | } | ||
1114 | |||
1115 | /* Multiply float */ | ||
1116 | static int emu_meebr (struct pt_regs *regs, int rx, int ry) { | ||
1117 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
1118 | FP_DECL_EX; | ||
1119 | int mode; | ||
1120 | |||
1121 | mode = current->thread.fp_regs.fpc & 3; | ||
1122 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1123 | FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
1124 | FP_MUL_S(SR, SA, SB); | ||
1125 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1126 | return _fex; | ||
1127 | } | ||
1128 | |||
1129 | /* Multiply float */ | ||
1130 | static int emu_meeb (struct pt_regs *regs, int rx, float *val) { | ||
1131 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
1132 | FP_DECL_EX; | ||
1133 | int mode; | ||
1134 | |||
1135 | mode = current->thread.fp_regs.fpc & 3; | ||
1136 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1137 | FP_UNPACK_SP(SB, val); | ||
1138 | FP_MUL_S(SR, SA, SB); | ||
1139 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1140 | return _fex; | ||
1141 | } | ||
1142 | |||
1143 | /* Multiply float to double */ | ||
1144 | static int emu_mdebr (struct pt_regs *regs, int rx, int ry) { | ||
1145 | FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
1146 | FP_DECL_EX; | ||
1147 | int mode; | ||
1148 | |||
1149 | mode = current->thread.fp_regs.fpc & 3; | ||
1150 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1151 | FP_CONV (D, S, 2, 1, DA, SA); | ||
1152 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
1153 | FP_CONV (D, S, 2, 1, DB, SA); | ||
1154 | FP_MUL_D(DR, DA, DB); | ||
1155 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1156 | return _fex; | ||
1157 | } | ||
1158 | |||
1159 | /* Multiply float to double */ | ||
1160 | static int emu_mdeb (struct pt_regs *regs, int rx, float *val) { | ||
1161 | FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
1162 | FP_DECL_EX; | ||
1163 | int mode; | ||
1164 | |||
1165 | mode = current->thread.fp_regs.fpc & 3; | ||
1166 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1167 | FP_CONV (D, S, 2, 1, DA, SA); | ||
1168 | FP_UNPACK_SP(SA, val); | ||
1169 | FP_CONV (D, S, 2, 1, DB, SA); | ||
1170 | FP_MUL_D(DR, DA, DB); | ||
1171 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1172 | return _fex; | ||
1173 | } | ||
1174 | |||
1175 | /* Multiply and add double */ | ||
1176 | static int emu_madbr (struct pt_regs *regs, int rx, int ry, int rz) { | ||
1177 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); | ||
1178 | FP_DECL_EX; | ||
1179 | int mode; | ||
1180 | |||
1181 | mode = current->thread.fp_regs.fpc & 3; | ||
1182 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1183 | FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
1184 | FP_UNPACK_DP(DC, ¤t->thread.fp_regs.fprs[rz].d); | ||
1185 | FP_MUL_D(DR, DA, DB); | ||
1186 | FP_ADD_D(DR, DR, DC); | ||
1187 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rz].d, DR); | ||
1188 | return _fex; | ||
1189 | } | ||
1190 | |||
1191 | /* Multiply and add double */ | ||
1192 | static int emu_madb (struct pt_regs *regs, int rx, double *val, int rz) { | ||
1193 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); | ||
1194 | FP_DECL_EX; | ||
1195 | int mode; | ||
1196 | |||
1197 | mode = current->thread.fp_regs.fpc & 3; | ||
1198 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1199 | FP_UNPACK_DP(DB, val); | ||
1200 | FP_UNPACK_DP(DC, ¤t->thread.fp_regs.fprs[rz].d); | ||
1201 | FP_MUL_D(DR, DA, DB); | ||
1202 | FP_ADD_D(DR, DR, DC); | ||
1203 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rz].d, DR); | ||
1204 | return _fex; | ||
1205 | } | ||
1206 | |||
1207 | /* Multiply and add float */ | ||
1208 | static int emu_maebr (struct pt_regs *regs, int rx, int ry, int rz) { | ||
1209 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); | ||
1210 | FP_DECL_EX; | ||
1211 | int mode; | ||
1212 | |||
1213 | mode = current->thread.fp_regs.fpc & 3; | ||
1214 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1215 | FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
1216 | FP_UNPACK_SP(SC, ¤t->thread.fp_regs.fprs[rz].f); | ||
1217 | FP_MUL_S(SR, SA, SB); | ||
1218 | FP_ADD_S(SR, SR, SC); | ||
1219 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rz].f, SR); | ||
1220 | return _fex; | ||
1221 | } | ||
1222 | |||
1223 | /* Multiply and add float */ | ||
1224 | static int emu_maeb (struct pt_regs *regs, int rx, float *val, int rz) { | ||
1225 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); | ||
1226 | FP_DECL_EX; | ||
1227 | int mode; | ||
1228 | |||
1229 | mode = current->thread.fp_regs.fpc & 3; | ||
1230 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1231 | FP_UNPACK_SP(SB, val); | ||
1232 | FP_UNPACK_SP(SC, ¤t->thread.fp_regs.fprs[rz].f); | ||
1233 | FP_MUL_S(SR, SA, SB); | ||
1234 | FP_ADD_S(SR, SR, SC); | ||
1235 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rz].f, SR); | ||
1236 | return _fex; | ||
1237 | } | ||
1238 | |||
1239 | /* Multiply and subtract double */ | ||
1240 | static int emu_msdbr (struct pt_regs *regs, int rx, int ry, int rz) { | ||
1241 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); | ||
1242 | FP_DECL_EX; | ||
1243 | int mode; | ||
1244 | |||
1245 | mode = current->thread.fp_regs.fpc & 3; | ||
1246 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1247 | FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
1248 | FP_UNPACK_DP(DC, ¤t->thread.fp_regs.fprs[rz].d); | ||
1249 | FP_MUL_D(DR, DA, DB); | ||
1250 | FP_SUB_D(DR, DR, DC); | ||
1251 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rz].d, DR); | ||
1252 | return _fex; | ||
1253 | } | ||
1254 | |||
1255 | /* Multiply and subtract double */ | ||
1256 | static int emu_msdb (struct pt_regs *regs, int rx, double *val, int rz) { | ||
1257 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); | ||
1258 | FP_DECL_EX; | ||
1259 | int mode; | ||
1260 | |||
1261 | mode = current->thread.fp_regs.fpc & 3; | ||
1262 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1263 | FP_UNPACK_DP(DB, val); | ||
1264 | FP_UNPACK_DP(DC, ¤t->thread.fp_regs.fprs[rz].d); | ||
1265 | FP_MUL_D(DR, DA, DB); | ||
1266 | FP_SUB_D(DR, DR, DC); | ||
1267 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rz].d, DR); | ||
1268 | return _fex; | ||
1269 | } | ||
1270 | |||
1271 | /* Multiply and subtract float */ | ||
1272 | static int emu_msebr (struct pt_regs *regs, int rx, int ry, int rz) { | ||
1273 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); | ||
1274 | FP_DECL_EX; | ||
1275 | int mode; | ||
1276 | |||
1277 | mode = current->thread.fp_regs.fpc & 3; | ||
1278 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1279 | FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
1280 | FP_UNPACK_SP(SC, ¤t->thread.fp_regs.fprs[rz].f); | ||
1281 | FP_MUL_S(SR, SA, SB); | ||
1282 | FP_SUB_S(SR, SR, SC); | ||
1283 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rz].f, SR); | ||
1284 | return _fex; | ||
1285 | } | ||
1286 | |||
1287 | /* Multiply and subtract float */ | ||
1288 | static int emu_mseb (struct pt_regs *regs, int rx, float *val, int rz) { | ||
1289 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); | ||
1290 | FP_DECL_EX; | ||
1291 | int mode; | ||
1292 | |||
1293 | mode = current->thread.fp_regs.fpc & 3; | ||
1294 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1295 | FP_UNPACK_SP(SB, val); | ||
1296 | FP_UNPACK_SP(SC, ¤t->thread.fp_regs.fprs[rz].f); | ||
1297 | FP_MUL_S(SR, SA, SB); | ||
1298 | FP_SUB_S(SR, SR, SC); | ||
1299 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rz].f, SR); | ||
1300 | return _fex; | ||
1301 | } | ||
1302 | |||
1303 | /* Set floating point control word */ | ||
1304 | static int emu_sfpc (struct pt_regs *regs, int rx, int ry) { | ||
1305 | __u32 temp; | ||
1306 | |||
1307 | temp = regs->gprs[rx]; | ||
1308 | if ((temp & ~FPC_VALID_MASK) != 0) | ||
1309 | return SIGILL; | ||
1310 | current->thread.fp_regs.fpc = temp; | ||
1311 | return 0; | ||
1312 | } | ||
1313 | |||
1314 | /* Square root long double */ | ||
1315 | static int emu_sqxbr (struct pt_regs *regs, int rx, int ry) { | ||
1316 | FP_DECL_Q(QA); FP_DECL_Q(QR); | ||
1317 | FP_DECL_EX; | ||
1318 | mathemu_ldcv cvt; | ||
1319 | int mode; | ||
1320 | |||
1321 | mode = current->thread.fp_regs.fpc & 3; | ||
1322 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
1323 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
1324 | FP_UNPACK_QP(QA, &cvt.ld); | ||
1325 | FP_SQRT_Q(QR, QA); | ||
1326 | FP_PACK_QP(&cvt.ld, QR); | ||
1327 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
1328 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
1329 | emu_set_CC_cs(regs, QR_c, QR_s); | ||
1330 | return _fex; | ||
1331 | } | ||
1332 | |||
1333 | /* Square root double */ | ||
1334 | static int emu_sqdbr (struct pt_regs *regs, int rx, int ry) { | ||
1335 | FP_DECL_D(DA); FP_DECL_D(DR); | ||
1336 | FP_DECL_EX; | ||
1337 | int mode; | ||
1338 | |||
1339 | mode = current->thread.fp_regs.fpc & 3; | ||
1340 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
1341 | FP_SQRT_D(DR, DA); | ||
1342 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1343 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
1344 | return _fex; | ||
1345 | } | ||
1346 | |||
1347 | /* Square root double */ | ||
1348 | static int emu_sqdb (struct pt_regs *regs, int rx, double *val) { | ||
1349 | FP_DECL_D(DA); FP_DECL_D(DR); | ||
1350 | FP_DECL_EX; | ||
1351 | int mode; | ||
1352 | |||
1353 | mode = current->thread.fp_regs.fpc & 3; | ||
1354 | FP_UNPACK_DP(DA, val); | ||
1355 | FP_SQRT_D(DR, DA); | ||
1356 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1357 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
1358 | return _fex; | ||
1359 | } | ||
1360 | |||
1361 | /* Square root float */ | ||
1362 | static int emu_sqebr (struct pt_regs *regs, int rx, int ry) { | ||
1363 | FP_DECL_S(SA); FP_DECL_S(SR); | ||
1364 | FP_DECL_EX; | ||
1365 | int mode; | ||
1366 | |||
1367 | mode = current->thread.fp_regs.fpc & 3; | ||
1368 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
1369 | FP_SQRT_S(SR, SA); | ||
1370 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1371 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
1372 | return _fex; | ||
1373 | } | ||
1374 | |||
1375 | /* Square root float */ | ||
1376 | static int emu_sqeb (struct pt_regs *regs, int rx, float *val) { | ||
1377 | FP_DECL_S(SA); FP_DECL_S(SR); | ||
1378 | FP_DECL_EX; | ||
1379 | int mode; | ||
1380 | |||
1381 | mode = current->thread.fp_regs.fpc & 3; | ||
1382 | FP_UNPACK_SP(SA, val); | ||
1383 | FP_SQRT_S(SR, SA); | ||
1384 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1385 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
1386 | return _fex; | ||
1387 | } | ||
1388 | |||
1389 | /* Subtract long double */ | ||
1390 | static int emu_sxbr (struct pt_regs *regs, int rx, int ry) { | ||
1391 | FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); | ||
1392 | FP_DECL_EX; | ||
1393 | mathemu_ldcv cvt; | ||
1394 | int mode; | ||
1395 | |||
1396 | mode = current->thread.fp_regs.fpc & 3; | ||
1397 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
1398 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
1399 | FP_UNPACK_QP(QA, &cvt.ld); | ||
1400 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
1401 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
1402 | FP_UNPACK_QP(QB, &cvt.ld); | ||
1403 | FP_SUB_Q(QR, QA, QB); | ||
1404 | FP_PACK_QP(&cvt.ld, QR); | ||
1405 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
1406 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
1407 | emu_set_CC_cs(regs, QR_c, QR_s); | ||
1408 | return _fex; | ||
1409 | } | ||
1410 | |||
1411 | /* Subtract double */ | ||
1412 | static int emu_sdbr (struct pt_regs *regs, int rx, int ry) { | ||
1413 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
1414 | FP_DECL_EX; | ||
1415 | int mode; | ||
1416 | |||
1417 | mode = current->thread.fp_regs.fpc & 3; | ||
1418 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1419 | FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
1420 | FP_SUB_D(DR, DA, DB); | ||
1421 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1422 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
1423 | return _fex; | ||
1424 | } | ||
1425 | |||
1426 | /* Subtract double */ | ||
1427 | static int emu_sdb (struct pt_regs *regs, int rx, double *val) { | ||
1428 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
1429 | FP_DECL_EX; | ||
1430 | int mode; | ||
1431 | |||
1432 | mode = current->thread.fp_regs.fpc & 3; | ||
1433 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1434 | FP_UNPACK_DP(DB, val); | ||
1435 | FP_SUB_D(DR, DA, DB); | ||
1436 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1437 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
1438 | return _fex; | ||
1439 | } | ||
1440 | |||
1441 | /* Subtract float */ | ||
1442 | static int emu_sebr (struct pt_regs *regs, int rx, int ry) { | ||
1443 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
1444 | FP_DECL_EX; | ||
1445 | int mode; | ||
1446 | |||
1447 | mode = current->thread.fp_regs.fpc & 3; | ||
1448 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1449 | FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
1450 | FP_SUB_S(SR, SA, SB); | ||
1451 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1452 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
1453 | return _fex; | ||
1454 | } | ||
1455 | |||
1456 | /* Subtract float */ | ||
1457 | static int emu_seb (struct pt_regs *regs, int rx, float *val) { | ||
1458 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
1459 | FP_DECL_EX; | ||
1460 | int mode; | ||
1461 | |||
1462 | mode = current->thread.fp_regs.fpc & 3; | ||
1463 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1464 | FP_UNPACK_SP(SB, val); | ||
1465 | FP_SUB_S(SR, SA, SB); | ||
1466 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1467 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
1468 | return _fex; | ||
1469 | } | ||
1470 | |||
1471 | /* Test data class long double */ | ||
1472 | static int emu_tcxb (struct pt_regs *regs, int rx, long val) { | ||
1473 | FP_DECL_Q(QA); | ||
1474 | mathemu_ldcv cvt; | ||
1475 | int bit; | ||
1476 | |||
1477 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
1478 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
1479 | FP_UNPACK_RAW_QP(QA, &cvt.ld); | ||
1480 | switch (QA_e) { | ||
1481 | default: | ||
1482 | bit = 8; /* normalized number */ | ||
1483 | break; | ||
1484 | case 0: | ||
1485 | if (_FP_FRAC_ZEROP_4(QA)) | ||
1486 | bit = 10; /* zero */ | ||
1487 | else | ||
1488 | bit = 6; /* denormalized number */ | ||
1489 | break; | ||
1490 | case _FP_EXPMAX_Q: | ||
1491 | if (_FP_FRAC_ZEROP_4(QA)) | ||
1492 | bit = 4; /* infinity */ | ||
1493 | else if (_FP_FRAC_HIGH_RAW_Q(QA) & _FP_QNANBIT_Q) | ||
1494 | bit = 2; /* quiet NAN */ | ||
1495 | else | ||
1496 | bit = 0; /* signaling NAN */ | ||
1497 | break; | ||
1498 | } | ||
1499 | if (!QA_s) | ||
1500 | bit++; | ||
1501 | emu_set_CC(regs, ((__u32) val >> bit) & 1); | ||
1502 | return 0; | ||
1503 | } | ||
1504 | |||
1505 | /* Test data class double */ | ||
1506 | static int emu_tcdb (struct pt_regs *regs, int rx, long val) { | ||
1507 | FP_DECL_D(DA); | ||
1508 | int bit; | ||
1509 | |||
1510 | FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1511 | switch (DA_e) { | ||
1512 | default: | ||
1513 | bit = 8; /* normalized number */ | ||
1514 | break; | ||
1515 | case 0: | ||
1516 | if (_FP_FRAC_ZEROP_2(DA)) | ||
1517 | bit = 10; /* zero */ | ||
1518 | else | ||
1519 | bit = 6; /* denormalized number */ | ||
1520 | break; | ||
1521 | case _FP_EXPMAX_D: | ||
1522 | if (_FP_FRAC_ZEROP_2(DA)) | ||
1523 | bit = 4; /* infinity */ | ||
1524 | else if (_FP_FRAC_HIGH_RAW_D(DA) & _FP_QNANBIT_D) | ||
1525 | bit = 2; /* quiet NAN */ | ||
1526 | else | ||
1527 | bit = 0; /* signaling NAN */ | ||
1528 | break; | ||
1529 | } | ||
1530 | if (!DA_s) | ||
1531 | bit++; | ||
1532 | emu_set_CC(regs, ((__u32) val >> bit) & 1); | ||
1533 | return 0; | ||
1534 | } | ||
1535 | |||
1536 | /* Test data class float */ | ||
1537 | static int emu_tceb (struct pt_regs *regs, int rx, long val) { | ||
1538 | FP_DECL_S(SA); | ||
1539 | int bit; | ||
1540 | |||
1541 | FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1542 | switch (SA_e) { | ||
1543 | default: | ||
1544 | bit = 8; /* normalized number */ | ||
1545 | break; | ||
1546 | case 0: | ||
1547 | if (_FP_FRAC_ZEROP_1(SA)) | ||
1548 | bit = 10; /* zero */ | ||
1549 | else | ||
1550 | bit = 6; /* denormalized number */ | ||
1551 | break; | ||
1552 | case _FP_EXPMAX_S: | ||
1553 | if (_FP_FRAC_ZEROP_1(SA)) | ||
1554 | bit = 4; /* infinity */ | ||
1555 | else if (_FP_FRAC_HIGH_RAW_S(SA) & _FP_QNANBIT_S) | ||
1556 | bit = 2; /* quiet NAN */ | ||
1557 | else | ||
1558 | bit = 0; /* signaling NAN */ | ||
1559 | break; | ||
1560 | } | ||
1561 | if (!SA_s) | ||
1562 | bit++; | ||
1563 | emu_set_CC(regs, ((__u32) val >> bit) & 1); | ||
1564 | return 0; | ||
1565 | } | ||
1566 | |||
1567 | static inline void emu_load_regd(int reg) { | ||
1568 | if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ | ||
1569 | return; | ||
1570 | asm volatile ( /* load reg from fp_regs.fprs[reg] */ | ||
1571 | " bras 1,0f\n" | ||
1572 | " ld 0,0(%1)\n" | ||
1573 | "0: ex %0,0(1)" | ||
1574 | : /* no output */ | ||
1575 | : "a" (reg<<4),"a" (¤t->thread.fp_regs.fprs[reg].d) | ||
1576 | : "1" ); | ||
1577 | } | ||
1578 | |||
1579 | static inline void emu_load_rege(int reg) { | ||
1580 | if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ | ||
1581 | return; | ||
1582 | asm volatile ( /* load reg from fp_regs.fprs[reg] */ | ||
1583 | " bras 1,0f\n" | ||
1584 | " le 0,0(%1)\n" | ||
1585 | "0: ex %0,0(1)" | ||
1586 | : /* no output */ | ||
1587 | : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].f) | ||
1588 | : "1" ); | ||
1589 | } | ||
1590 | |||
1591 | static inline void emu_store_regd(int reg) { | ||
1592 | if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ | ||
1593 | return; | ||
1594 | asm volatile ( /* store reg to fp_regs.fprs[reg] */ | ||
1595 | " bras 1,0f\n" | ||
1596 | " std 0,0(%1)\n" | ||
1597 | "0: ex %0,0(1)" | ||
1598 | : /* no output */ | ||
1599 | : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].d) | ||
1600 | : "1" ); | ||
1601 | } | ||
1602 | |||
1603 | |||
1604 | static inline void emu_store_rege(int reg) { | ||
1605 | if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ | ||
1606 | return; | ||
1607 | asm volatile ( /* store reg to fp_regs.fprs[reg] */ | ||
1608 | " bras 1,0f\n" | ||
1609 | " ste 0,0(%1)\n" | ||
1610 | "0: ex %0,0(1)" | ||
1611 | : /* no output */ | ||
1612 | : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].f) | ||
1613 | : "1" ); | ||
1614 | } | ||
1615 | |||
1616 | int math_emu_b3(__u8 *opcode, struct pt_regs * regs) { | ||
1617 | int _fex = 0; | ||
1618 | static const __u8 format_table[256] = { | ||
1619 | [0x00] = 0x03,[0x01] = 0x03,[0x02] = 0x03,[0x03] = 0x03, | ||
1620 | [0x04] = 0x0f,[0x05] = 0x0d,[0x06] = 0x0e,[0x07] = 0x0d, | ||
1621 | [0x08] = 0x03,[0x09] = 0x03,[0x0a] = 0x03,[0x0b] = 0x03, | ||
1622 | [0x0c] = 0x0f,[0x0d] = 0x03,[0x0e] = 0x06,[0x0f] = 0x06, | ||
1623 | [0x10] = 0x02,[0x11] = 0x02,[0x12] = 0x02,[0x13] = 0x02, | ||
1624 | [0x14] = 0x03,[0x15] = 0x02,[0x16] = 0x01,[0x17] = 0x03, | ||
1625 | [0x18] = 0x02,[0x19] = 0x02,[0x1a] = 0x02,[0x1b] = 0x02, | ||
1626 | [0x1c] = 0x02,[0x1d] = 0x02,[0x1e] = 0x05,[0x1f] = 0x05, | ||
1627 | [0x40] = 0x01,[0x41] = 0x01,[0x42] = 0x01,[0x43] = 0x01, | ||
1628 | [0x44] = 0x12,[0x45] = 0x0d,[0x46] = 0x11,[0x47] = 0x04, | ||
1629 | [0x48] = 0x01,[0x49] = 0x01,[0x4a] = 0x01,[0x4b] = 0x01, | ||
1630 | [0x4c] = 0x01,[0x4d] = 0x01,[0x53] = 0x06,[0x57] = 0x06, | ||
1631 | [0x5b] = 0x05,[0x5f] = 0x05,[0x84] = 0x13,[0x8c] = 0x13, | ||
1632 | [0x94] = 0x09,[0x95] = 0x08,[0x96] = 0x07,[0x98] = 0x0c, | ||
1633 | [0x99] = 0x0b,[0x9a] = 0x0a | ||
1634 | }; | ||
1635 | static const void *jump_table[256]= { | ||
1636 | [0x00] = emu_lpebr,[0x01] = emu_lnebr,[0x02] = emu_ltebr, | ||
1637 | [0x03] = emu_lcebr,[0x04] = emu_ldebr,[0x05] = emu_lxdbr, | ||
1638 | [0x06] = emu_lxebr,[0x07] = emu_mxdbr,[0x08] = emu_kebr, | ||
1639 | [0x09] = emu_cebr, [0x0a] = emu_aebr, [0x0b] = emu_sebr, | ||
1640 | [0x0c] = emu_mdebr,[0x0d] = emu_debr, [0x0e] = emu_maebr, | ||
1641 | [0x0f] = emu_msebr,[0x10] = emu_lpdbr,[0x11] = emu_lndbr, | ||
1642 | [0x12] = emu_ltdbr,[0x13] = emu_lcdbr,[0x14] = emu_sqebr, | ||
1643 | [0x15] = emu_sqdbr,[0x16] = emu_sqxbr,[0x17] = emu_meebr, | ||
1644 | [0x18] = emu_kdbr, [0x19] = emu_cdbr, [0x1a] = emu_adbr, | ||
1645 | [0x1b] = emu_sdbr, [0x1c] = emu_mdbr, [0x1d] = emu_ddbr, | ||
1646 | [0x1e] = emu_madbr,[0x1f] = emu_msdbr,[0x40] = emu_lpxbr, | ||
1647 | [0x41] = emu_lnxbr,[0x42] = emu_ltxbr,[0x43] = emu_lcxbr, | ||
1648 | [0x44] = emu_ledbr,[0x45] = emu_ldxbr,[0x46] = emu_lexbr, | ||
1649 | [0x47] = emu_fixbr,[0x48] = emu_kxbr, [0x49] = emu_cxbr, | ||
1650 | [0x4a] = emu_axbr, [0x4b] = emu_sxbr, [0x4c] = emu_mxbr, | ||
1651 | [0x4d] = emu_dxbr, [0x53] = emu_diebr,[0x57] = emu_fiebr, | ||
1652 | [0x5b] = emu_didbr,[0x5f] = emu_fidbr,[0x84] = emu_sfpc, | ||
1653 | [0x8c] = emu_efpc, [0x94] = emu_cefbr,[0x95] = emu_cdfbr, | ||
1654 | [0x96] = emu_cxfbr,[0x98] = emu_cfebr,[0x99] = emu_cfdbr, | ||
1655 | [0x9a] = emu_cfxbr | ||
1656 | }; | ||
1657 | |||
1658 | switch (format_table[opcode[1]]) { | ||
1659 | case 1: /* RRE format, long double operation */ | ||
1660 | if (opcode[3] & 0x22) | ||
1661 | return SIGILL; | ||
1662 | emu_store_regd((opcode[3] >> 4) & 15); | ||
1663 | emu_store_regd(((opcode[3] >> 4) & 15) + 2); | ||
1664 | emu_store_regd(opcode[3] & 15); | ||
1665 | emu_store_regd((opcode[3] & 15) + 2); | ||
1666 | /* call the emulation function */ | ||
1667 | _fex = ((int (*)(struct pt_regs *,int, int)) | ||
1668 | jump_table[opcode[1]]) | ||
1669 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1670 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1671 | emu_load_regd(((opcode[3] >> 4) & 15) + 2); | ||
1672 | emu_load_regd(opcode[3] & 15); | ||
1673 | emu_load_regd((opcode[3] & 15) + 2); | ||
1674 | break; | ||
1675 | case 2: /* RRE format, double operation */ | ||
1676 | emu_store_regd((opcode[3] >> 4) & 15); | ||
1677 | emu_store_regd(opcode[3] & 15); | ||
1678 | /* call the emulation function */ | ||
1679 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1680 | jump_table[opcode[1]]) | ||
1681 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1682 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1683 | emu_load_regd(opcode[3] & 15); | ||
1684 | break; | ||
1685 | case 3: /* RRE format, float operation */ | ||
1686 | emu_store_rege((opcode[3] >> 4) & 15); | ||
1687 | emu_store_rege(opcode[3] & 15); | ||
1688 | /* call the emulation function */ | ||
1689 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1690 | jump_table[opcode[1]]) | ||
1691 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1692 | emu_load_rege((opcode[3] >> 4) & 15); | ||
1693 | emu_load_rege(opcode[3] & 15); | ||
1694 | break; | ||
1695 | case 4: /* RRF format, long double operation */ | ||
1696 | if (opcode[3] & 0x22) | ||
1697 | return SIGILL; | ||
1698 | emu_store_regd((opcode[3] >> 4) & 15); | ||
1699 | emu_store_regd(((opcode[3] >> 4) & 15) + 2); | ||
1700 | emu_store_regd(opcode[3] & 15); | ||
1701 | emu_store_regd((opcode[3] & 15) + 2); | ||
1702 | /* call the emulation function */ | ||
1703 | _fex = ((int (*)(struct pt_regs *, int, int, int)) | ||
1704 | jump_table[opcode[1]]) | ||
1705 | (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); | ||
1706 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1707 | emu_load_regd(((opcode[3] >> 4) & 15) + 2); | ||
1708 | emu_load_regd(opcode[3] & 15); | ||
1709 | emu_load_regd((opcode[3] & 15) + 2); | ||
1710 | break; | ||
1711 | case 5: /* RRF format, double operation */ | ||
1712 | emu_store_regd((opcode[2] >> 4) & 15); | ||
1713 | emu_store_regd((opcode[3] >> 4) & 15); | ||
1714 | emu_store_regd(opcode[3] & 15); | ||
1715 | /* call the emulation function */ | ||
1716 | _fex = ((int (*)(struct pt_regs *, int, int, int)) | ||
1717 | jump_table[opcode[1]]) | ||
1718 | (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); | ||
1719 | emu_load_regd((opcode[2] >> 4) & 15); | ||
1720 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1721 | emu_load_regd(opcode[3] & 15); | ||
1722 | break; | ||
1723 | case 6: /* RRF format, float operation */ | ||
1724 | emu_store_rege((opcode[2] >> 4) & 15); | ||
1725 | emu_store_rege((opcode[3] >> 4) & 15); | ||
1726 | emu_store_rege(opcode[3] & 15); | ||
1727 | /* call the emulation function */ | ||
1728 | _fex = ((int (*)(struct pt_regs *, int, int, int)) | ||
1729 | jump_table[opcode[1]]) | ||
1730 | (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); | ||
1731 | emu_load_rege((opcode[2] >> 4) & 15); | ||
1732 | emu_load_rege((opcode[3] >> 4) & 15); | ||
1733 | emu_load_rege(opcode[3] & 15); | ||
1734 | break; | ||
1735 | case 7: /* RRE format, cxfbr instruction */ | ||
1736 | /* call the emulation function */ | ||
1737 | if (opcode[3] & 0x20) | ||
1738 | return SIGILL; | ||
1739 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1740 | jump_table[opcode[1]]) | ||
1741 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1742 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1743 | emu_load_regd(((opcode[3] >> 4) & 15) + 2); | ||
1744 | break; | ||
1745 | case 8: /* RRE format, cdfbr instruction */ | ||
1746 | /* call the emulation function */ | ||
1747 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1748 | jump_table[opcode[1]]) | ||
1749 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1750 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1751 | break; | ||
1752 | case 9: /* RRE format, cefbr instruction */ | ||
1753 | /* call the emulation function */ | ||
1754 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1755 | jump_table[opcode[1]]) | ||
1756 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1757 | emu_load_rege((opcode[3] >> 4) & 15); | ||
1758 | break; | ||
1759 | case 10: /* RRF format, cfxbr instruction */ | ||
1760 | if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32) | ||
1761 | /* mask of { 2,3,8-15 } is invalid */ | ||
1762 | return SIGILL; | ||
1763 | if (opcode[3] & 2) | ||
1764 | return SIGILL; | ||
1765 | emu_store_regd(opcode[3] & 15); | ||
1766 | emu_store_regd((opcode[3] & 15) + 2); | ||
1767 | /* call the emulation function */ | ||
1768 | _fex = ((int (*)(struct pt_regs *, int, int, int)) | ||
1769 | jump_table[opcode[1]]) | ||
1770 | (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); | ||
1771 | break; | ||
1772 | case 11: /* RRF format, cfdbr instruction */ | ||
1773 | if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32) | ||
1774 | /* mask of { 2,3,8-15 } is invalid */ | ||
1775 | return SIGILL; | ||
1776 | emu_store_regd(opcode[3] & 15); | ||
1777 | /* call the emulation function */ | ||
1778 | _fex = ((int (*)(struct pt_regs *, int, int, int)) | ||
1779 | jump_table[opcode[1]]) | ||
1780 | (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); | ||
1781 | break; | ||
1782 | case 12: /* RRF format, cfebr instruction */ | ||
1783 | if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32) | ||
1784 | /* mask of { 2,3,8-15 } is invalid */ | ||
1785 | return SIGILL; | ||
1786 | emu_store_rege(opcode[3] & 15); | ||
1787 | /* call the emulation function */ | ||
1788 | _fex = ((int (*)(struct pt_regs *, int, int, int)) | ||
1789 | jump_table[opcode[1]]) | ||
1790 | (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); | ||
1791 | break; | ||
1792 | case 13: /* RRE format, ldxbr & mdxbr instruction */ | ||
1793 | /* double store but long double load */ | ||
1794 | if (opcode[3] & 0x20) | ||
1795 | return SIGILL; | ||
1796 | emu_store_regd((opcode[3] >> 4) & 15); | ||
1797 | emu_store_regd(opcode[3] & 15); | ||
1798 | /* call the emulation function */ | ||
1799 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1800 | jump_table[opcode[1]]) | ||
1801 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1802 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1803 | emu_load_regd(((opcode[3] >> 4) & 15) + 2); | ||
1804 | break; | ||
1805 | case 14: /* RRE format, ldxbr & mdxbr instruction */ | ||
1806 | /* float store but long double load */ | ||
1807 | if (opcode[3] & 0x20) | ||
1808 | return SIGILL; | ||
1809 | emu_store_rege((opcode[3] >> 4) & 15); | ||
1810 | emu_store_rege(opcode[3] & 15); | ||
1811 | /* call the emulation function */ | ||
1812 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1813 | jump_table[opcode[1]]) | ||
1814 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1815 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1816 | emu_load_regd(((opcode[3] >> 4) & 15) + 2); | ||
1817 | break; | ||
1818 | case 15: /* RRE format, ldebr & mdebr instruction */ | ||
1819 | /* float store but double load */ | ||
1820 | emu_store_rege((opcode[3] >> 4) & 15); | ||
1821 | emu_store_rege(opcode[3] & 15); | ||
1822 | /* call the emulation function */ | ||
1823 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1824 | jump_table[opcode[1]]) | ||
1825 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1826 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1827 | break; | ||
1828 | case 16: /* RRE format, ldxbr instruction */ | ||
1829 | /* long double store but double load */ | ||
1830 | if (opcode[3] & 2) | ||
1831 | return SIGILL; | ||
1832 | emu_store_regd(opcode[3] & 15); | ||
1833 | emu_store_regd((opcode[3] & 15) + 2); | ||
1834 | /* call the emulation function */ | ||
1835 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1836 | jump_table[opcode[1]]) | ||
1837 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1838 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1839 | break; | ||
1840 | case 17: /* RRE format, ldxbr instruction */ | ||
1841 | /* long double store but float load */ | ||
1842 | if (opcode[3] & 2) | ||
1843 | return SIGILL; | ||
1844 | emu_store_regd(opcode[3] & 15); | ||
1845 | emu_store_regd((opcode[3] & 15) + 2); | ||
1846 | /* call the emulation function */ | ||
1847 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1848 | jump_table[opcode[1]]) | ||
1849 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1850 | emu_load_rege((opcode[3] >> 4) & 15); | ||
1851 | break; | ||
1852 | case 18: /* RRE format, ledbr instruction */ | ||
1853 | /* double store but float load */ | ||
1854 | emu_store_regd(opcode[3] & 15); | ||
1855 | /* call the emulation function */ | ||
1856 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1857 | jump_table[opcode[1]]) | ||
1858 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1859 | emu_load_rege((opcode[3] >> 4) & 15); | ||
1860 | break; | ||
1861 | case 19: /* RRE format, efpc & sfpc instruction */ | ||
1862 | /* call the emulation function */ | ||
1863 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1864 | jump_table[opcode[1]]) | ||
1865 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1866 | break; | ||
1867 | default: /* invalid operation */ | ||
1868 | return SIGILL; | ||
1869 | } | ||
1870 | if (_fex != 0) { | ||
1871 | current->thread.fp_regs.fpc |= _fex; | ||
1872 | if (current->thread.fp_regs.fpc & (_fex << 8)) | ||
1873 | return SIGFPE; | ||
1874 | } | ||
1875 | return 0; | ||
1876 | } | ||
1877 | |||
1878 | static void* calc_addr(struct pt_regs *regs, int rx, int rb, int disp) | ||
1879 | { | ||
1880 | addr_t addr; | ||
1881 | |||
1882 | rx &= 15; | ||
1883 | rb &= 15; | ||
1884 | addr = disp & 0xfff; | ||
1885 | addr += (rx != 0) ? regs->gprs[rx] : 0; /* + index */ | ||
1886 | addr += (rb != 0) ? regs->gprs[rb] : 0; /* + base */ | ||
1887 | return (void*) addr; | ||
1888 | } | ||
1889 | |||
1890 | int math_emu_ed(__u8 *opcode, struct pt_regs * regs) { | ||
1891 | int _fex = 0; | ||
1892 | |||
1893 | static const __u8 format_table[256] = { | ||
1894 | [0x04] = 0x06,[0x05] = 0x05,[0x06] = 0x07,[0x07] = 0x05, | ||
1895 | [0x08] = 0x02,[0x09] = 0x02,[0x0a] = 0x02,[0x0b] = 0x02, | ||
1896 | [0x0c] = 0x06,[0x0d] = 0x02,[0x0e] = 0x04,[0x0f] = 0x04, | ||
1897 | [0x10] = 0x08,[0x11] = 0x09,[0x12] = 0x0a,[0x14] = 0x02, | ||
1898 | [0x15] = 0x01,[0x17] = 0x02,[0x18] = 0x01,[0x19] = 0x01, | ||
1899 | [0x1a] = 0x01,[0x1b] = 0x01,[0x1c] = 0x01,[0x1d] = 0x01, | ||
1900 | [0x1e] = 0x03,[0x1f] = 0x03, | ||
1901 | }; | ||
1902 | static const void *jump_table[]= { | ||
1903 | [0x04] = emu_ldeb,[0x05] = emu_lxdb,[0x06] = emu_lxeb, | ||
1904 | [0x07] = emu_mxdb,[0x08] = emu_keb, [0x09] = emu_ceb, | ||
1905 | [0x0a] = emu_aeb, [0x0b] = emu_seb, [0x0c] = emu_mdeb, | ||
1906 | [0x0d] = emu_deb, [0x0e] = emu_maeb,[0x0f] = emu_mseb, | ||
1907 | [0x10] = emu_tceb,[0x11] = emu_tcdb,[0x12] = emu_tcxb, | ||
1908 | [0x14] = emu_sqeb,[0x15] = emu_sqdb,[0x17] = emu_meeb, | ||
1909 | [0x18] = emu_kdb, [0x19] = emu_cdb, [0x1a] = emu_adb, | ||
1910 | [0x1b] = emu_sdb, [0x1c] = emu_mdb, [0x1d] = emu_ddb, | ||
1911 | [0x1e] = emu_madb,[0x1f] = emu_msdb | ||
1912 | }; | ||
1913 | |||
1914 | switch (format_table[opcode[5]]) { | ||
1915 | case 1: /* RXE format, double constant */ { | ||
1916 | __u64 *dxb, temp; | ||
1917 | __u32 opc; | ||
1918 | |||
1919 | emu_store_regd((opcode[1] >> 4) & 15); | ||
1920 | opc = *((__u32 *) opcode); | ||
1921 | dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
1922 | mathemu_copy_from_user(&temp, dxb, 8); | ||
1923 | /* call the emulation function */ | ||
1924 | _fex = ((int (*)(struct pt_regs *, int, double *)) | ||
1925 | jump_table[opcode[5]]) | ||
1926 | (regs, opcode[1] >> 4, (double *) &temp); | ||
1927 | emu_load_regd((opcode[1] >> 4) & 15); | ||
1928 | break; | ||
1929 | } | ||
1930 | case 2: /* RXE format, float constant */ { | ||
1931 | __u32 *dxb, temp; | ||
1932 | __u32 opc; | ||
1933 | |||
1934 | emu_store_rege((opcode[1] >> 4) & 15); | ||
1935 | opc = *((__u32 *) opcode); | ||
1936 | dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
1937 | mathemu_get_user(temp, dxb); | ||
1938 | /* call the emulation function */ | ||
1939 | _fex = ((int (*)(struct pt_regs *, int, float *)) | ||
1940 | jump_table[opcode[5]]) | ||
1941 | (regs, opcode[1] >> 4, (float *) &temp); | ||
1942 | emu_load_rege((opcode[1] >> 4) & 15); | ||
1943 | break; | ||
1944 | } | ||
1945 | case 3: /* RXF format, double constant */ { | ||
1946 | __u64 *dxb, temp; | ||
1947 | __u32 opc; | ||
1948 | |||
1949 | emu_store_regd((opcode[1] >> 4) & 15); | ||
1950 | emu_store_regd((opcode[4] >> 4) & 15); | ||
1951 | opc = *((__u32 *) opcode); | ||
1952 | dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
1953 | mathemu_copy_from_user(&temp, dxb, 8); | ||
1954 | /* call the emulation function */ | ||
1955 | _fex = ((int (*)(struct pt_regs *, int, double *, int)) | ||
1956 | jump_table[opcode[5]]) | ||
1957 | (regs, opcode[1] >> 4, (double *) &temp, opcode[4] >> 4); | ||
1958 | emu_load_regd((opcode[1] >> 4) & 15); | ||
1959 | break; | ||
1960 | } | ||
1961 | case 4: /* RXF format, float constant */ { | ||
1962 | __u32 *dxb, temp; | ||
1963 | __u32 opc; | ||
1964 | |||
1965 | emu_store_rege((opcode[1] >> 4) & 15); | ||
1966 | emu_store_rege((opcode[4] >> 4) & 15); | ||
1967 | opc = *((__u32 *) opcode); | ||
1968 | dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
1969 | mathemu_get_user(temp, dxb); | ||
1970 | /* call the emulation function */ | ||
1971 | _fex = ((int (*)(struct pt_regs *, int, float *, int)) | ||
1972 | jump_table[opcode[5]]) | ||
1973 | (regs, opcode[1] >> 4, (float *) &temp, opcode[4] >> 4); | ||
1974 | emu_load_rege((opcode[4] >> 4) & 15); | ||
1975 | break; | ||
1976 | } | ||
1977 | case 5: /* RXE format, double constant */ | ||
1978 | /* store double and load long double */ | ||
1979 | { | ||
1980 | __u64 *dxb, temp; | ||
1981 | __u32 opc; | ||
1982 | if ((opcode[1] >> 4) & 0x20) | ||
1983 | return SIGILL; | ||
1984 | emu_store_regd((opcode[1] >> 4) & 15); | ||
1985 | opc = *((__u32 *) opcode); | ||
1986 | dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
1987 | mathemu_copy_from_user(&temp, dxb, 8); | ||
1988 | /* call the emulation function */ | ||
1989 | _fex = ((int (*)(struct pt_regs *, int, double *)) | ||
1990 | jump_table[opcode[5]]) | ||
1991 | (regs, opcode[1] >> 4, (double *) &temp); | ||
1992 | emu_load_regd((opcode[1] >> 4) & 15); | ||
1993 | emu_load_regd(((opcode[1] >> 4) & 15) + 2); | ||
1994 | break; | ||
1995 | } | ||
1996 | case 6: /* RXE format, float constant */ | ||
1997 | /* store float and load double */ | ||
1998 | { | ||
1999 | __u32 *dxb, temp; | ||
2000 | __u32 opc; | ||
2001 | emu_store_rege((opcode[1] >> 4) & 15); | ||
2002 | opc = *((__u32 *) opcode); | ||
2003 | dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2004 | mathemu_get_user(temp, dxb); | ||
2005 | /* call the emulation function */ | ||
2006 | _fex = ((int (*)(struct pt_regs *, int, float *)) | ||
2007 | jump_table[opcode[5]]) | ||
2008 | (regs, opcode[1] >> 4, (float *) &temp); | ||
2009 | emu_load_regd((opcode[1] >> 4) & 15); | ||
2010 | break; | ||
2011 | } | ||
2012 | case 7: /* RXE format, float constant */ | ||
2013 | /* store float and load long double */ | ||
2014 | { | ||
2015 | __u32 *dxb, temp; | ||
2016 | __u32 opc; | ||
2017 | if ((opcode[1] >> 4) & 0x20) | ||
2018 | return SIGILL; | ||
2019 | emu_store_rege((opcode[1] >> 4) & 15); | ||
2020 | opc = *((__u32 *) opcode); | ||
2021 | dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2022 | mathemu_get_user(temp, dxb); | ||
2023 | /* call the emulation function */ | ||
2024 | _fex = ((int (*)(struct pt_regs *, int, float *)) | ||
2025 | jump_table[opcode[5]]) | ||
2026 | (regs, opcode[1] >> 4, (float *) &temp); | ||
2027 | emu_load_regd((opcode[1] >> 4) & 15); | ||
2028 | emu_load_regd(((opcode[1] >> 4) & 15) + 2); | ||
2029 | break; | ||
2030 | } | ||
2031 | case 8: /* RXE format, RX address used as int value */ { | ||
2032 | __u64 dxb; | ||
2033 | __u32 opc; | ||
2034 | |||
2035 | emu_store_rege((opcode[1] >> 4) & 15); | ||
2036 | opc = *((__u32 *) opcode); | ||
2037 | dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2038 | /* call the emulation function */ | ||
2039 | _fex = ((int (*)(struct pt_regs *, int, long)) | ||
2040 | jump_table[opcode[5]]) | ||
2041 | (regs, opcode[1] >> 4, dxb); | ||
2042 | break; | ||
2043 | } | ||
2044 | case 9: /* RXE format, RX address used as int value */ { | ||
2045 | __u64 dxb; | ||
2046 | __u32 opc; | ||
2047 | |||
2048 | emu_store_regd((opcode[1] >> 4) & 15); | ||
2049 | opc = *((__u32 *) opcode); | ||
2050 | dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2051 | /* call the emulation function */ | ||
2052 | _fex = ((int (*)(struct pt_regs *, int, long)) | ||
2053 | jump_table[opcode[5]]) | ||
2054 | (regs, opcode[1] >> 4, dxb); | ||
2055 | break; | ||
2056 | } | ||
2057 | case 10: /* RXE format, RX address used as int value */ { | ||
2058 | __u64 dxb; | ||
2059 | __u32 opc; | ||
2060 | |||
2061 | if ((opcode[1] >> 4) & 2) | ||
2062 | return SIGILL; | ||
2063 | emu_store_regd((opcode[1] >> 4) & 15); | ||
2064 | emu_store_regd(((opcode[1] >> 4) & 15) + 2); | ||
2065 | opc = *((__u32 *) opcode); | ||
2066 | dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2067 | /* call the emulation function */ | ||
2068 | _fex = ((int (*)(struct pt_regs *, int, long)) | ||
2069 | jump_table[opcode[5]]) | ||
2070 | (regs, opcode[1] >> 4, dxb); | ||
2071 | break; | ||
2072 | } | ||
2073 | default: /* invalid operation */ | ||
2074 | return SIGILL; | ||
2075 | } | ||
2076 | if (_fex != 0) { | ||
2077 | current->thread.fp_regs.fpc |= _fex; | ||
2078 | if (current->thread.fp_regs.fpc & (_fex << 8)) | ||
2079 | return SIGFPE; | ||
2080 | } | ||
2081 | return 0; | ||
2082 | } | ||
2083 | |||
2084 | /* | ||
2085 | * Emulate LDR Rx,Ry with Rx or Ry not in {0, 2, 4, 6} | ||
2086 | */ | ||
2087 | int math_emu_ldr(__u8 *opcode) { | ||
2088 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
2089 | __u16 opc = *((__u16 *) opcode); | ||
2090 | |||
2091 | if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ | ||
2092 | /* we got an exception therfore ry can't be in {0,2,4,6} */ | ||
2093 | __asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */ | ||
2094 | " bras 1,0f\n" | ||
2095 | " ld 0,0(%1)\n" | ||
2096 | "0: ex %0,0(1)" | ||
2097 | : /* no output */ | ||
2098 | : "a" (opc & 0xf0), | ||
2099 | "a" (&fp_regs->fprs[opc & 0xf].d) | ||
2100 | : "1" ); | ||
2101 | } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ | ||
2102 | __asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */ | ||
2103 | " bras 1,0f\n" | ||
2104 | " std 0,0(%1)\n" | ||
2105 | "0: ex %0,0(1)" | ||
2106 | : /* no output */ | ||
2107 | : "a" ((opc & 0xf) << 4), | ||
2108 | "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d) | ||
2109 | : "1" ); | ||
2110 | } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ | ||
2111 | fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; | ||
2112 | return 0; | ||
2113 | } | ||
2114 | |||
2115 | /* | ||
2116 | * Emulate LER Rx,Ry with Rx or Ry not in {0, 2, 4, 6} | ||
2117 | */ | ||
2118 | int math_emu_ler(__u8 *opcode) { | ||
2119 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
2120 | __u16 opc = *((__u16 *) opcode); | ||
2121 | |||
2122 | if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ | ||
2123 | /* we got an exception therfore ry can't be in {0,2,4,6} */ | ||
2124 | __asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */ | ||
2125 | " bras 1,0f\n" | ||
2126 | " le 0,0(%1)\n" | ||
2127 | "0: ex %0,0(1)" | ||
2128 | : /* no output */ | ||
2129 | : "a" (opc & 0xf0), | ||
2130 | "a" (&fp_regs->fprs[opc & 0xf].f) | ||
2131 | : "1" ); | ||
2132 | } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ | ||
2133 | __asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */ | ||
2134 | " bras 1,0f\n" | ||
2135 | " ste 0,0(%1)\n" | ||
2136 | "0: ex %0,0(1)" | ||
2137 | : /* no output */ | ||
2138 | : "a" ((opc & 0xf) << 4), | ||
2139 | "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f) | ||
2140 | : "1" ); | ||
2141 | } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ | ||
2142 | fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; | ||
2143 | return 0; | ||
2144 | } | ||
2145 | |||
2146 | /* | ||
2147 | * Emulate LD R,D(X,B) with R not in {0, 2, 4, 6} | ||
2148 | */ | ||
2149 | int math_emu_ld(__u8 *opcode, struct pt_regs * regs) { | ||
2150 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
2151 | __u32 opc = *((__u32 *) opcode); | ||
2152 | __u64 *dxb; | ||
2153 | |||
2154 | dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2155 | mathemu_copy_from_user(&fp_regs->fprs[(opc >> 20) & 0xf].d, dxb, 8); | ||
2156 | return 0; | ||
2157 | } | ||
2158 | |||
2159 | /* | ||
2160 | * Emulate LE R,D(X,B) with R not in {0, 2, 4, 6} | ||
2161 | */ | ||
2162 | int math_emu_le(__u8 *opcode, struct pt_regs * regs) { | ||
2163 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
2164 | __u32 opc = *((__u32 *) opcode); | ||
2165 | __u32 *mem, *dxb; | ||
2166 | |||
2167 | dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2168 | mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f); | ||
2169 | mathemu_get_user(mem[0], dxb); | ||
2170 | return 0; | ||
2171 | } | ||
2172 | |||
2173 | /* | ||
2174 | * Emulate STD R,D(X,B) with R not in {0, 2, 4, 6} | ||
2175 | */ | ||
2176 | int math_emu_std(__u8 *opcode, struct pt_regs * regs) { | ||
2177 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
2178 | __u32 opc = *((__u32 *) opcode); | ||
2179 | __u64 *dxb; | ||
2180 | |||
2181 | dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2182 | mathemu_copy_to_user(dxb, &fp_regs->fprs[(opc >> 20) & 0xf].d, 8); | ||
2183 | return 0; | ||
2184 | } | ||
2185 | |||
2186 | /* | ||
2187 | * Emulate STE R,D(X,B) with R not in {0, 2, 4, 6} | ||
2188 | */ | ||
2189 | int math_emu_ste(__u8 *opcode, struct pt_regs * regs) { | ||
2190 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
2191 | __u32 opc = *((__u32 *) opcode); | ||
2192 | __u32 *mem, *dxb; | ||
2193 | |||
2194 | dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2195 | mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f); | ||
2196 | mathemu_put_user(mem[0], dxb); | ||
2197 | return 0; | ||
2198 | } | ||
2199 | |||
2200 | /* | ||
2201 | * Emulate LFPC D(B) | ||
2202 | */ | ||
2203 | int math_emu_lfpc(__u8 *opcode, struct pt_regs *regs) { | ||
2204 | __u32 opc = *((__u32 *) opcode); | ||
2205 | __u32 *dxb, temp; | ||
2206 | |||
2207 | dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc); | ||
2208 | mathemu_get_user(temp, dxb); | ||
2209 | if ((temp & ~FPC_VALID_MASK) != 0) | ||
2210 | return SIGILL; | ||
2211 | current->thread.fp_regs.fpc = temp; | ||
2212 | return 0; | ||
2213 | } | ||
2214 | |||
2215 | /* | ||
2216 | * Emulate STFPC D(B) | ||
2217 | */ | ||
2218 | int math_emu_stfpc(__u8 *opcode, struct pt_regs *regs) { | ||
2219 | __u32 opc = *((__u32 *) opcode); | ||
2220 | __u32 *dxb; | ||
2221 | |||
2222 | dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc); | ||
2223 | mathemu_put_user(current->thread.fp_regs.fpc, dxb); | ||
2224 | return 0; | ||
2225 | } | ||
2226 | |||
2227 | /* | ||
2228 | * Emulate SRNM D(B) | ||
2229 | */ | ||
2230 | int math_emu_srnm(__u8 *opcode, struct pt_regs *regs) { | ||
2231 | __u32 opc = *((__u32 *) opcode); | ||
2232 | __u32 temp; | ||
2233 | |||
2234 | temp = calc_addr(regs, 0, opc>>12, opc); | ||
2235 | current->thread.fp_regs.fpc &= ~3; | ||
2236 | current->thread.fp_regs.fpc |= (temp & 3); | ||
2237 | return 0; | ||
2238 | } | ||
2239 | |||
2240 | /* broken compiler ... */ | ||
2241 | long long | ||
2242 | __negdi2 (long long u) | ||
2243 | { | ||
2244 | |||
2245 | union lll { | ||
2246 | long long ll; | ||
2247 | long s[2]; | ||
2248 | }; | ||
2249 | |||
2250 | union lll w,uu; | ||
2251 | |||
2252 | uu.ll = u; | ||
2253 | |||
2254 | w.s[1] = -uu.s[1]; | ||
2255 | w.s[0] = -uu.s[0] - ((int) w.s[1] != 0); | ||
2256 | |||
2257 | return w.ll; | ||
2258 | } | ||
diff --git a/arch/s390/math-emu/qrnnd.S b/arch/s390/math-emu/qrnnd.S new file mode 100644 index 000000000000..b01c2b648e22 --- /dev/null +++ b/arch/s390/math-emu/qrnnd.S | |||
@@ -0,0 +1,77 @@ | |||
1 | # S/390 __udiv_qrnnd | ||
2 | |||
3 | # r2 : &__r | ||
4 | # r3 : upper half of 64 bit word n | ||
5 | # r4 : lower half of 64 bit word n | ||
6 | # r5 : divisor d | ||
7 | # the reminder r of the division is to be stored to &__r and | ||
8 | # the quotient q is to be returned | ||
9 | |||
10 | .text | ||
11 | .globl __udiv_qrnnd | ||
12 | __udiv_qrnnd: | ||
13 | st %r2,24(%r15) # store pointer to reminder for later | ||
14 | lr %r0,%r3 # reload n | ||
15 | lr %r1,%r4 | ||
16 | ltr %r2,%r5 # reload and test divisor | ||
17 | jp 5f | ||
18 | # divisor >= 0x80000000 | ||
19 | srdl %r0,2 # n/4 | ||
20 | srl %r2,1 # d/2 | ||
21 | slr %r1,%r2 # special case if last bit of d is set | ||
22 | brc 3,0f # (n/4) div (n/2) can overflow by 1 | ||
23 | ahi %r0,-1 # trick: subtract n/2, then divide | ||
24 | 0: dr %r0,%r2 # signed division | ||
25 | ahi %r1,1 # trick part 2: add 1 to the quotient | ||
26 | # now (n >> 2) = (d >> 1) * %r1 + %r0 | ||
27 | lhi %r3,1 | ||
28 | nr %r3,%r1 # test last bit of q | ||
29 | jz 1f | ||
30 | alr %r0,%r2 # add (d>>1) to r | ||
31 | 1: srl %r1,1 # q >>= 1 | ||
32 | # now (n >> 2) = (d&-2) * %r1 + %r0 | ||
33 | lhi %r3,1 | ||
34 | nr %r3,%r5 # test last bit of d | ||
35 | jz 2f | ||
36 | slr %r0,%r1 # r -= q | ||
37 | brc 3,2f # borrow ? | ||
38 | alr %r0,%r5 # r += d | ||
39 | ahi %r1,-1 | ||
40 | 2: # now (n >> 2) = d * %r1 + %r0 | ||
41 | alr %r1,%r1 # q <<= 1 | ||
42 | alr %r0,%r0 # r <<= 1 | ||
43 | brc 12,3f # overflow on r ? | ||
44 | slr %r0,%r5 # r -= d | ||
45 | ahi %r1,1 # q += 1 | ||
46 | 3: lhi %r3,2 | ||
47 | nr %r3,%r4 # test next to last bit of n | ||
48 | jz 4f | ||
49 | ahi %r0,1 # r += 1 | ||
50 | 4: clr %r0,%r5 # r >= d ? | ||
51 | jl 6f | ||
52 | slr %r0,%r5 # r -= d | ||
53 | ahi %r1,1 # q += 1 | ||
54 | # now (n >> 1) = d * %r1 + %r0 | ||
55 | j 6f | ||
56 | 5: # divisor < 0x80000000 | ||
57 | srdl %r0,1 | ||
58 | dr %r0,%r2 # signed division | ||
59 | # now (n >> 1) = d * %r1 + %r0 | ||
60 | 6: alr %r1,%r1 # q <<= 1 | ||
61 | alr %r0,%r0 # r <<= 1 | ||
62 | brc 12,7f # overflow on r ? | ||
63 | slr %r0,%r5 # r -= d | ||
64 | ahi %r1,1 # q += 1 | ||
65 | 7: lhi %r3,1 | ||
66 | nr %r3,%r4 # isolate last bit of n | ||
67 | alr %r0,%r3 # r += (n & 1) | ||
68 | clr %r0,%r5 # r >= d ? | ||
69 | jl 8f | ||
70 | slr %r0,%r5 # r -= d | ||
71 | ahi %r1,1 # q += 1 | ||
72 | 8: # now n = d * %r1 + %r0 | ||
73 | l %r2,24(%r15) | ||
74 | st %r0,0(%r2) | ||
75 | lr %r2,%r1 | ||
76 | br %r14 | ||
77 | .end __udiv_qrnnd | ||
diff --git a/arch/s390/math-emu/sfp-util.h b/arch/s390/math-emu/sfp-util.h new file mode 100644 index 000000000000..ab556b600f73 --- /dev/null +++ b/arch/s390/math-emu/sfp-util.h | |||
@@ -0,0 +1,63 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/sched.h> | ||
3 | #include <linux/types.h> | ||
4 | #include <asm/byteorder.h> | ||
5 | |||
6 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \ | ||
7 | unsigned int __sh = (ah); \ | ||
8 | unsigned int __sl = (al); \ | ||
9 | __asm__ (" alr %1,%3\n" \ | ||
10 | " brc 12,0f\n" \ | ||
11 | " ahi %0,1\n" \ | ||
12 | "0: alr %0,%2" \ | ||
13 | : "+&d" (__sh), "+d" (__sl) \ | ||
14 | : "d" (bh), "d" (bl) : "cc" ); \ | ||
15 | (sh) = __sh; \ | ||
16 | (sl) = __sl; \ | ||
17 | }) | ||
18 | |||
19 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \ | ||
20 | unsigned int __sh = (ah); \ | ||
21 | unsigned int __sl = (al); \ | ||
22 | __asm__ (" slr %1,%3\n" \ | ||
23 | " brc 3,0f\n" \ | ||
24 | " ahi %0,-1\n" \ | ||
25 | "0: slr %0,%2" \ | ||
26 | : "+&d" (__sh), "+d" (__sl) \ | ||
27 | : "d" (bh), "d" (bl) : "cc" ); \ | ||
28 | (sh) = __sh; \ | ||
29 | (sl) = __sl; \ | ||
30 | }) | ||
31 | |||
32 | /* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */ | ||
33 | #define umul_ppmm(wh, wl, u, v) ({ \ | ||
34 | unsigned int __wh = u; \ | ||
35 | unsigned int __wl = v; \ | ||
36 | __asm__ (" ltr 1,%0\n" \ | ||
37 | " mr 0,%1\n" \ | ||
38 | " jnm 0f\n" \ | ||
39 | " alr 0,%1\n" \ | ||
40 | "0: ltr %1,%1\n" \ | ||
41 | " jnm 1f\n" \ | ||
42 | " alr 0,%0\n" \ | ||
43 | "1: lr %0,0\n" \ | ||
44 | " lr %1,1\n" \ | ||
45 | : "+d" (__wh), "+d" (__wl) \ | ||
46 | : : "0", "1", "cc" ); \ | ||
47 | wh = __wh; \ | ||
48 | wl = __wl; \ | ||
49 | }) | ||
50 | |||
51 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
52 | do { unsigned long __r; \ | ||
53 | (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ | ||
54 | (r) = __r; \ | ||
55 | } while (0) | ||
56 | extern unsigned long __udiv_qrnnd (unsigned long *, unsigned long, | ||
57 | unsigned long , unsigned long); | ||
58 | |||
59 | #define UDIV_NEEDS_NORMALIZATION 0 | ||
60 | |||
61 | #define abort() return 0 | ||
62 | |||
63 | #define __BYTE_ORDER __BIG_ENDIAN | ||
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile new file mode 100644 index 000000000000..aa9a42b6e62d --- /dev/null +++ b/arch/s390/mm/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # | ||
2 | # Makefile for the linux s390-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init.o fault.o ioremap.o extmem.o mmap.o | ||
6 | obj-$(CONFIG_CMM) += cmm.o | ||
7 | |||
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c new file mode 100644 index 000000000000..d30cdb4248a9 --- /dev/null +++ b/arch/s390/mm/cmm.c | |||
@@ -0,0 +1,443 @@ | |||
1 | /* | ||
2 | * arch/s390/mm/cmm.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * Collaborative memory management interface. | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/fs.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/sysctl.h> | ||
18 | #include <linux/ctype.h> | ||
19 | |||
20 | #include <asm/pgalloc.h> | ||
21 | #include <asm/uaccess.h> | ||
22 | |||
23 | #include "../../../drivers/s390/net/smsgiucv.h" | ||
24 | |||
25 | #define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2) | ||
26 | |||
27 | struct cmm_page_array { | ||
28 | struct cmm_page_array *next; | ||
29 | unsigned long index; | ||
30 | unsigned long pages[CMM_NR_PAGES]; | ||
31 | }; | ||
32 | |||
33 | static long cmm_pages = 0; | ||
34 | static long cmm_timed_pages = 0; | ||
35 | static volatile long cmm_pages_target = 0; | ||
36 | static volatile long cmm_timed_pages_target = 0; | ||
37 | static long cmm_timeout_pages = 0; | ||
38 | static long cmm_timeout_seconds = 0; | ||
39 | |||
40 | static struct cmm_page_array *cmm_page_list = 0; | ||
41 | static struct cmm_page_array *cmm_timed_page_list = 0; | ||
42 | |||
43 | static unsigned long cmm_thread_active = 0; | ||
44 | static struct work_struct cmm_thread_starter; | ||
45 | static wait_queue_head_t cmm_thread_wait; | ||
46 | static struct timer_list cmm_timer; | ||
47 | |||
48 | static void cmm_timer_fn(unsigned long); | ||
49 | static void cmm_set_timer(void); | ||
50 | |||
51 | static long | ||
52 | cmm_strtoul(const char *cp, char **endp) | ||
53 | { | ||
54 | unsigned int base = 10; | ||
55 | |||
56 | if (*cp == '0') { | ||
57 | base = 8; | ||
58 | cp++; | ||
59 | if ((*cp == 'x' || *cp == 'X') && isxdigit(cp[1])) { | ||
60 | base = 16; | ||
61 | cp++; | ||
62 | } | ||
63 | } | ||
64 | return simple_strtoul(cp, endp, base); | ||
65 | } | ||
66 | |||
67 | static long | ||
68 | cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list) | ||
69 | { | ||
70 | struct cmm_page_array *pa; | ||
71 | unsigned long page; | ||
72 | |||
73 | pa = *list; | ||
74 | while (pages) { | ||
75 | page = __get_free_page(GFP_NOIO); | ||
76 | if (!page) | ||
77 | break; | ||
78 | if (!pa || pa->index >= CMM_NR_PAGES) { | ||
79 | /* Need a new page for the page list. */ | ||
80 | pa = (struct cmm_page_array *) | ||
81 | __get_free_page(GFP_NOIO); | ||
82 | if (!pa) { | ||
83 | free_page(page); | ||
84 | break; | ||
85 | } | ||
86 | pa->next = *list; | ||
87 | pa->index = 0; | ||
88 | *list = pa; | ||
89 | } | ||
90 | diag10(page); | ||
91 | pa->pages[pa->index++] = page; | ||
92 | (*counter)++; | ||
93 | pages--; | ||
94 | } | ||
95 | return pages; | ||
96 | } | ||
97 | |||
98 | static void | ||
99 | cmm_free_pages(long pages, long *counter, struct cmm_page_array **list) | ||
100 | { | ||
101 | struct cmm_page_array *pa; | ||
102 | unsigned long page; | ||
103 | |||
104 | pa = *list; | ||
105 | while (pages) { | ||
106 | if (!pa || pa->index <= 0) | ||
107 | break; | ||
108 | page = pa->pages[--pa->index]; | ||
109 | if (pa->index == 0) { | ||
110 | pa = pa->next; | ||
111 | free_page((unsigned long) *list); | ||
112 | *list = pa; | ||
113 | } | ||
114 | free_page(page); | ||
115 | (*counter)--; | ||
116 | pages--; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static int | ||
121 | cmm_thread(void *dummy) | ||
122 | { | ||
123 | int rc; | ||
124 | |||
125 | daemonize("cmmthread"); | ||
126 | while (1) { | ||
127 | rc = wait_event_interruptible(cmm_thread_wait, | ||
128 | (cmm_pages != cmm_pages_target || | ||
129 | cmm_timed_pages != cmm_timed_pages_target)); | ||
130 | if (rc == -ERESTARTSYS) { | ||
131 | /* Got kill signal. End thread. */ | ||
132 | clear_bit(0, &cmm_thread_active); | ||
133 | cmm_pages_target = cmm_pages; | ||
134 | cmm_timed_pages_target = cmm_timed_pages; | ||
135 | break; | ||
136 | } | ||
137 | if (cmm_pages_target > cmm_pages) { | ||
138 | if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list)) | ||
139 | cmm_pages_target = cmm_pages; | ||
140 | } else if (cmm_pages_target < cmm_pages) { | ||
141 | cmm_free_pages(1, &cmm_pages, &cmm_page_list); | ||
142 | } | ||
143 | if (cmm_timed_pages_target > cmm_timed_pages) { | ||
144 | if (cmm_alloc_pages(1, &cmm_timed_pages, | ||
145 | &cmm_timed_page_list)) | ||
146 | cmm_timed_pages_target = cmm_timed_pages; | ||
147 | } else if (cmm_timed_pages_target < cmm_timed_pages) { | ||
148 | cmm_free_pages(1, &cmm_timed_pages, | ||
149 | &cmm_timed_page_list); | ||
150 | } | ||
151 | if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer)) | ||
152 | cmm_set_timer(); | ||
153 | } | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static void | ||
158 | cmm_start_thread(void) | ||
159 | { | ||
160 | kernel_thread(cmm_thread, 0, 0); | ||
161 | } | ||
162 | |||
163 | static void | ||
164 | cmm_kick_thread(void) | ||
165 | { | ||
166 | if (!test_and_set_bit(0, &cmm_thread_active)) | ||
167 | schedule_work(&cmm_thread_starter); | ||
168 | wake_up(&cmm_thread_wait); | ||
169 | } | ||
170 | |||
171 | static void | ||
172 | cmm_set_timer(void) | ||
173 | { | ||
174 | if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) { | ||
175 | if (timer_pending(&cmm_timer)) | ||
176 | del_timer(&cmm_timer); | ||
177 | return; | ||
178 | } | ||
179 | if (timer_pending(&cmm_timer)) { | ||
180 | if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ)) | ||
181 | return; | ||
182 | } | ||
183 | cmm_timer.function = cmm_timer_fn; | ||
184 | cmm_timer.data = 0; | ||
185 | cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ; | ||
186 | add_timer(&cmm_timer); | ||
187 | } | ||
188 | |||
189 | static void | ||
190 | cmm_timer_fn(unsigned long ignored) | ||
191 | { | ||
192 | long pages; | ||
193 | |||
194 | pages = cmm_timed_pages_target - cmm_timeout_pages; | ||
195 | if (pages < 0) | ||
196 | cmm_timed_pages_target = 0; | ||
197 | else | ||
198 | cmm_timed_pages_target = pages; | ||
199 | cmm_kick_thread(); | ||
200 | cmm_set_timer(); | ||
201 | } | ||
202 | |||
203 | void | ||
204 | cmm_set_pages(long pages) | ||
205 | { | ||
206 | cmm_pages_target = pages; | ||
207 | cmm_kick_thread(); | ||
208 | } | ||
209 | |||
210 | long | ||
211 | cmm_get_pages(void) | ||
212 | { | ||
213 | return cmm_pages; | ||
214 | } | ||
215 | |||
216 | void | ||
217 | cmm_add_timed_pages(long pages) | ||
218 | { | ||
219 | cmm_timed_pages_target += pages; | ||
220 | cmm_kick_thread(); | ||
221 | } | ||
222 | |||
223 | long | ||
224 | cmm_get_timed_pages(void) | ||
225 | { | ||
226 | return cmm_timed_pages; | ||
227 | } | ||
228 | |||
229 | void | ||
230 | cmm_set_timeout(long pages, long seconds) | ||
231 | { | ||
232 | cmm_timeout_pages = pages; | ||
233 | cmm_timeout_seconds = seconds; | ||
234 | cmm_set_timer(); | ||
235 | } | ||
236 | |||
237 | static inline int | ||
238 | cmm_skip_blanks(char *cp, char **endp) | ||
239 | { | ||
240 | char *str; | ||
241 | |||
242 | for (str = cp; *str == ' ' || *str == '\t'; str++); | ||
243 | *endp = str; | ||
244 | return str != cp; | ||
245 | } | ||
246 | |||
247 | #ifdef CONFIG_CMM_PROC | ||
248 | /* These will someday get removed. */ | ||
249 | #define VM_CMM_PAGES 1111 | ||
250 | #define VM_CMM_TIMED_PAGES 1112 | ||
251 | #define VM_CMM_TIMEOUT 1113 | ||
252 | |||
253 | static struct ctl_table cmm_table[]; | ||
254 | |||
255 | static int | ||
256 | cmm_pages_handler(ctl_table *ctl, int write, struct file *filp, | ||
257 | void *buffer, size_t *lenp, loff_t *ppos) | ||
258 | { | ||
259 | char buf[16], *p; | ||
260 | long pages; | ||
261 | int len; | ||
262 | |||
263 | if (!*lenp || (*ppos && !write)) { | ||
264 | *lenp = 0; | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | if (write) { | ||
269 | len = *lenp; | ||
270 | if (copy_from_user(buf, buffer, | ||
271 | len > sizeof(buf) ? sizeof(buf) : len)) | ||
272 | return -EFAULT; | ||
273 | buf[sizeof(buf) - 1] = '\0'; | ||
274 | cmm_skip_blanks(buf, &p); | ||
275 | pages = cmm_strtoul(p, &p); | ||
276 | if (ctl == &cmm_table[0]) | ||
277 | cmm_set_pages(pages); | ||
278 | else | ||
279 | cmm_add_timed_pages(pages); | ||
280 | } else { | ||
281 | if (ctl == &cmm_table[0]) | ||
282 | pages = cmm_get_pages(); | ||
283 | else | ||
284 | pages = cmm_get_timed_pages(); | ||
285 | len = sprintf(buf, "%ld\n", pages); | ||
286 | if (len > *lenp) | ||
287 | len = *lenp; | ||
288 | if (copy_to_user(buffer, buf, len)) | ||
289 | return -EFAULT; | ||
290 | } | ||
291 | *lenp = len; | ||
292 | *ppos += len; | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | static int | ||
297 | cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp, | ||
298 | void *buffer, size_t *lenp, loff_t *ppos) | ||
299 | { | ||
300 | char buf[64], *p; | ||
301 | long pages, seconds; | ||
302 | int len; | ||
303 | |||
304 | if (!*lenp || (*ppos && !write)) { | ||
305 | *lenp = 0; | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | if (write) { | ||
310 | len = *lenp; | ||
311 | if (copy_from_user(buf, buffer, | ||
312 | len > sizeof(buf) ? sizeof(buf) : len)) | ||
313 | return -EFAULT; | ||
314 | buf[sizeof(buf) - 1] = '\0'; | ||
315 | cmm_skip_blanks(buf, &p); | ||
316 | pages = cmm_strtoul(p, &p); | ||
317 | cmm_skip_blanks(p, &p); | ||
318 | seconds = cmm_strtoul(p, &p); | ||
319 | cmm_set_timeout(pages, seconds); | ||
320 | } else { | ||
321 | len = sprintf(buf, "%ld %ld\n", | ||
322 | cmm_timeout_pages, cmm_timeout_seconds); | ||
323 | if (len > *lenp) | ||
324 | len = *lenp; | ||
325 | if (copy_to_user(buffer, buf, len)) | ||
326 | return -EFAULT; | ||
327 | } | ||
328 | *lenp = len; | ||
329 | *ppos += len; | ||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | static struct ctl_table cmm_table[] = { | ||
334 | { | ||
335 | .ctl_name = VM_CMM_PAGES, | ||
336 | .procname = "cmm_pages", | ||
337 | .mode = 0600, | ||
338 | .proc_handler = &cmm_pages_handler, | ||
339 | }, | ||
340 | { | ||
341 | .ctl_name = VM_CMM_TIMED_PAGES, | ||
342 | .procname = "cmm_timed_pages", | ||
343 | .mode = 0600, | ||
344 | .proc_handler = &cmm_pages_handler, | ||
345 | }, | ||
346 | { | ||
347 | .ctl_name = VM_CMM_TIMEOUT, | ||
348 | .procname = "cmm_timeout", | ||
349 | .mode = 0600, | ||
350 | .proc_handler = &cmm_timeout_handler, | ||
351 | }, | ||
352 | { .ctl_name = 0 } | ||
353 | }; | ||
354 | |||
355 | static struct ctl_table cmm_dir_table[] = { | ||
356 | { | ||
357 | .ctl_name = CTL_VM, | ||
358 | .procname = "vm", | ||
359 | .maxlen = 0, | ||
360 | .mode = 0555, | ||
361 | .child = cmm_table, | ||
362 | }, | ||
363 | { .ctl_name = 0 } | ||
364 | }; | ||
365 | #endif | ||
366 | |||
367 | #ifdef CONFIG_CMM_IUCV | ||
368 | #define SMSG_PREFIX "CMM" | ||
369 | static void | ||
370 | cmm_smsg_target(char *msg) | ||
371 | { | ||
372 | long pages, seconds; | ||
373 | |||
374 | if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg)) | ||
375 | return; | ||
376 | if (strncmp(msg, "SHRINK", 6) == 0) { | ||
377 | if (!cmm_skip_blanks(msg + 6, &msg)) | ||
378 | return; | ||
379 | pages = cmm_strtoul(msg, &msg); | ||
380 | cmm_skip_blanks(msg, &msg); | ||
381 | if (*msg == '\0') | ||
382 | cmm_set_pages(pages); | ||
383 | } else if (strncmp(msg, "RELEASE", 7) == 0) { | ||
384 | if (!cmm_skip_blanks(msg + 7, &msg)) | ||
385 | return; | ||
386 | pages = cmm_strtoul(msg, &msg); | ||
387 | cmm_skip_blanks(msg, &msg); | ||
388 | if (*msg == '\0') | ||
389 | cmm_add_timed_pages(pages); | ||
390 | } else if (strncmp(msg, "REUSE", 5) == 0) { | ||
391 | if (!cmm_skip_blanks(msg + 5, &msg)) | ||
392 | return; | ||
393 | pages = cmm_strtoul(msg, &msg); | ||
394 | if (!cmm_skip_blanks(msg, &msg)) | ||
395 | return; | ||
396 | seconds = cmm_strtoul(msg, &msg); | ||
397 | cmm_skip_blanks(msg, &msg); | ||
398 | if (*msg == '\0') | ||
399 | cmm_set_timeout(pages, seconds); | ||
400 | } | ||
401 | } | ||
402 | #endif | ||
403 | |||
404 | struct ctl_table_header *cmm_sysctl_header; | ||
405 | |||
406 | static int | ||
407 | cmm_init (void) | ||
408 | { | ||
409 | #ifdef CONFIG_CMM_PROC | ||
410 | cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1); | ||
411 | #endif | ||
412 | #ifdef CONFIG_CMM_IUCV | ||
413 | smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); | ||
414 | #endif | ||
415 | INIT_WORK(&cmm_thread_starter, (void *) cmm_start_thread, 0); | ||
416 | init_waitqueue_head(&cmm_thread_wait); | ||
417 | init_timer(&cmm_timer); | ||
418 | return 0; | ||
419 | } | ||
420 | |||
421 | static void | ||
422 | cmm_exit(void) | ||
423 | { | ||
424 | cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); | ||
425 | cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); | ||
426 | #ifdef CONFIG_CMM_PROC | ||
427 | unregister_sysctl_table(cmm_sysctl_header); | ||
428 | #endif | ||
429 | #ifdef CONFIG_CMM_IUCV | ||
430 | smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); | ||
431 | #endif | ||
432 | } | ||
433 | |||
434 | module_init(cmm_init); | ||
435 | module_exit(cmm_exit); | ||
436 | |||
437 | EXPORT_SYMBOL(cmm_set_pages); | ||
438 | EXPORT_SYMBOL(cmm_get_pages); | ||
439 | EXPORT_SYMBOL(cmm_add_timed_pages); | ||
440 | EXPORT_SYMBOL(cmm_get_timed_pages); | ||
441 | EXPORT_SYMBOL(cmm_set_timeout); | ||
442 | |||
443 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c new file mode 100644 index 000000000000..648deed17e25 --- /dev/null +++ b/arch/s390/mm/extmem.c | |||
@@ -0,0 +1,588 @@ | |||
1 | /* | ||
2 | * File...........: arch/s390/mm/extmem.c | ||
3 | * Author(s)......: Carsten Otte <cotte@de.ibm.com> | ||
4 | * Rob M van der Heij <rvdheij@nl.ibm.com> | ||
5 | * Steven Shultz <shultzss@us.ibm.com> | ||
6 | * Bugreports.to..: <Linux390@de.ibm.com> | ||
7 | * (C) IBM Corporation 2002-2004 | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/list.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/bootmem.h> | ||
17 | #include <asm/page.h> | ||
18 | #include <asm/ebcdic.h> | ||
19 | #include <asm/errno.h> | ||
20 | #include <asm/extmem.h> | ||
21 | #include <asm/cpcmd.h> | ||
22 | #include <linux/ctype.h> | ||
23 | |||
24 | #define DCSS_DEBUG /* Debug messages on/off */ | ||
25 | |||
26 | #define DCSS_NAME "extmem" | ||
27 | #ifdef DCSS_DEBUG | ||
28 | #define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x) | ||
29 | #else | ||
30 | #define PRINT_DEBUG(x...) do {} while (0) | ||
31 | #endif | ||
32 | #define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x) | ||
33 | #define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x) | ||
34 | #define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x) | ||
35 | |||
36 | |||
37 | #define DCSS_LOADSHR 0x00 | ||
38 | #define DCSS_LOADNSR 0x04 | ||
39 | #define DCSS_PURGESEG 0x08 | ||
40 | #define DCSS_FINDSEG 0x0c | ||
41 | #define DCSS_LOADNOLY 0x10 | ||
42 | #define DCSS_SEGEXT 0x18 | ||
43 | #define DCSS_FINDSEGA 0x0c | ||
44 | |||
45 | struct qrange { | ||
46 | unsigned int start; // 3byte start address, 1 byte type | ||
47 | unsigned int end; // 3byte end address, 1 byte reserved | ||
48 | }; | ||
49 | |||
50 | struct qout64 { | ||
51 | int segstart; | ||
52 | int segend; | ||
53 | int segcnt; | ||
54 | int segrcnt; | ||
55 | struct qrange range[6]; | ||
56 | }; | ||
57 | |||
58 | struct qin64 { | ||
59 | char qopcode; | ||
60 | char rsrv1[3]; | ||
61 | char qrcode; | ||
62 | char rsrv2[3]; | ||
63 | char qname[8]; | ||
64 | unsigned int qoutptr; | ||
65 | short int qoutlen; | ||
66 | }; | ||
67 | |||
68 | struct dcss_segment { | ||
69 | struct list_head list; | ||
70 | char dcss_name[8]; | ||
71 | unsigned long start_addr; | ||
72 | unsigned long end; | ||
73 | atomic_t ref_count; | ||
74 | int do_nonshared; | ||
75 | unsigned int vm_segtype; | ||
76 | struct qrange range[6]; | ||
77 | int segcnt; | ||
78 | }; | ||
79 | |||
80 | static DEFINE_SPINLOCK(dcss_lock); | ||
81 | static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list); | ||
82 | static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", | ||
83 | "EW/EN-MIXED" }; | ||
84 | |||
85 | extern struct { | ||
86 | unsigned long addr, size, type; | ||
87 | } memory_chunk[MEMORY_CHUNKS]; | ||
88 | |||
89 | /* | ||
90 | * Create the 8 bytes, ebcdic VM segment name from | ||
91 | * an ascii name. | ||
92 | */ | ||
93 | static void inline | ||
94 | dcss_mkname(char *name, char *dcss_name) | ||
95 | { | ||
96 | int i; | ||
97 | |||
98 | for (i = 0; i < 8; i++) { | ||
99 | if (name[i] == '\0') | ||
100 | break; | ||
101 | dcss_name[i] = toupper(name[i]); | ||
102 | }; | ||
103 | for (; i < 8; i++) | ||
104 | dcss_name[i] = ' '; | ||
105 | ASCEBC(dcss_name, 8); | ||
106 | } | ||
107 | |||
108 | |||
109 | /* | ||
110 | * search all segments in dcss_list, and return the one | ||
111 | * namend *name. If not found, return NULL. | ||
112 | */ | ||
113 | static struct dcss_segment * | ||
114 | segment_by_name (char *name) | ||
115 | { | ||
116 | char dcss_name[9]; | ||
117 | struct list_head *l; | ||
118 | struct dcss_segment *tmp, *retval = NULL; | ||
119 | |||
120 | assert_spin_locked(&dcss_lock); | ||
121 | dcss_mkname (name, dcss_name); | ||
122 | list_for_each (l, &dcss_list) { | ||
123 | tmp = list_entry (l, struct dcss_segment, list); | ||
124 | if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) { | ||
125 | retval = tmp; | ||
126 | break; | ||
127 | } | ||
128 | } | ||
129 | return retval; | ||
130 | } | ||
131 | |||
132 | |||
133 | /* | ||
134 | * Perform a function on a dcss segment. | ||
135 | */ | ||
136 | static inline int | ||
137 | dcss_diag (__u8 func, void *parameter, | ||
138 | unsigned long *ret1, unsigned long *ret2) | ||
139 | { | ||
140 | unsigned long rx, ry; | ||
141 | int rc; | ||
142 | |||
143 | rx = (unsigned long) parameter; | ||
144 | ry = (unsigned long) func; | ||
145 | __asm__ __volatile__( | ||
146 | #ifdef CONFIG_ARCH_S390X | ||
147 | " sam31\n" // switch to 31 bit | ||
148 | " diag %0,%1,0x64\n" | ||
149 | " sam64\n" // switch back to 64 bit | ||
150 | #else | ||
151 | " diag %0,%1,0x64\n" | ||
152 | #endif | ||
153 | " ipm %2\n" | ||
154 | " srl %2,28\n" | ||
155 | : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" ); | ||
156 | *ret1 = rx; | ||
157 | *ret2 = ry; | ||
158 | return rc; | ||
159 | } | ||
160 | |||
161 | static inline int | ||
162 | dcss_diag_translate_rc (int vm_rc) { | ||
163 | if (vm_rc == 44) | ||
164 | return -ENOENT; | ||
165 | return -EIO; | ||
166 | } | ||
167 | |||
168 | |||
169 | /* do a diag to get info about a segment. | ||
170 | * fills start_address, end and vm_segtype fields | ||
171 | */ | ||
172 | static int | ||
173 | query_segment_type (struct dcss_segment *seg) | ||
174 | { | ||
175 | struct qin64 *qin = kmalloc (sizeof(struct qin64), GFP_DMA); | ||
176 | struct qout64 *qout = kmalloc (sizeof(struct qout64), GFP_DMA); | ||
177 | |||
178 | int diag_cc, rc, i; | ||
179 | unsigned long dummy, vmrc; | ||
180 | |||
181 | if ((qin == NULL) || (qout == NULL)) { | ||
182 | rc = -ENOMEM; | ||
183 | goto out_free; | ||
184 | } | ||
185 | |||
186 | /* initialize diag input parameters */ | ||
187 | qin->qopcode = DCSS_FINDSEGA; | ||
188 | qin->qoutptr = (unsigned long) qout; | ||
189 | qin->qoutlen = sizeof(struct qout64); | ||
190 | memcpy (qin->qname, seg->dcss_name, 8); | ||
191 | |||
192 | diag_cc = dcss_diag (DCSS_SEGEXT, qin, &dummy, &vmrc); | ||
193 | |||
194 | if (diag_cc > 1) { | ||
195 | rc = dcss_diag_translate_rc (vmrc); | ||
196 | goto out_free; | ||
197 | } | ||
198 | |||
199 | if (qout->segcnt > 6) { | ||
200 | rc = -ENOTSUPP; | ||
201 | goto out_free; | ||
202 | } | ||
203 | |||
204 | if (qout->segcnt == 1) { | ||
205 | seg->vm_segtype = qout->range[0].start & 0xff; | ||
206 | } else { | ||
207 | /* multi-part segment. only one type supported here: | ||
208 | - all parts are contiguous | ||
209 | - all parts are either EW or EN type | ||
210 | - maximum 6 parts allowed */ | ||
211 | unsigned long start = qout->segstart >> PAGE_SHIFT; | ||
212 | for (i=0; i<qout->segcnt; i++) { | ||
213 | if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) && | ||
214 | ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) { | ||
215 | rc = -ENOTSUPP; | ||
216 | goto out_free; | ||
217 | } | ||
218 | if (start != qout->range[i].start >> PAGE_SHIFT) { | ||
219 | rc = -ENOTSUPP; | ||
220 | goto out_free; | ||
221 | } | ||
222 | start = (qout->range[i].end >> PAGE_SHIFT) + 1; | ||
223 | } | ||
224 | seg->vm_segtype = SEG_TYPE_EWEN; | ||
225 | } | ||
226 | |||
227 | /* analyze diag output and update seg */ | ||
228 | seg->start_addr = qout->segstart; | ||
229 | seg->end = qout->segend; | ||
230 | |||
231 | memcpy (seg->range, qout->range, 6*sizeof(struct qrange)); | ||
232 | seg->segcnt = qout->segcnt; | ||
233 | |||
234 | rc = 0; | ||
235 | |||
236 | out_free: | ||
237 | if (qin) kfree(qin); | ||
238 | if (qout) kfree(qout); | ||
239 | return rc; | ||
240 | } | ||
241 | |||
242 | /* | ||
243 | * check if the given segment collides with guest storage. | ||
244 | * returns 1 if this is the case, 0 if no collision was found | ||
245 | */ | ||
246 | static int | ||
247 | segment_overlaps_storage(struct dcss_segment *seg) | ||
248 | { | ||
249 | int i; | ||
250 | |||
251 | for (i=0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
252 | if (memory_chunk[i].type != 0) | ||
253 | continue; | ||
254 | if ((memory_chunk[i].addr >> 20) > (seg->end >> 20)) | ||
255 | continue; | ||
256 | if (((memory_chunk[i].addr + memory_chunk[i].size - 1) >> 20) | ||
257 | < (seg->start_addr >> 20)) | ||
258 | continue; | ||
259 | return 1; | ||
260 | } | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * check if segment collides with other segments that are currently loaded | ||
266 | * returns 1 if this is the case, 0 if no collision was found | ||
267 | */ | ||
268 | static int | ||
269 | segment_overlaps_others (struct dcss_segment *seg) | ||
270 | { | ||
271 | struct list_head *l; | ||
272 | struct dcss_segment *tmp; | ||
273 | |||
274 | assert_spin_locked(&dcss_lock); | ||
275 | list_for_each(l, &dcss_list) { | ||
276 | tmp = list_entry(l, struct dcss_segment, list); | ||
277 | if ((tmp->start_addr >> 20) > (seg->end >> 20)) | ||
278 | continue; | ||
279 | if ((tmp->end >> 20) < (seg->start_addr >> 20)) | ||
280 | continue; | ||
281 | if (seg == tmp) | ||
282 | continue; | ||
283 | return 1; | ||
284 | } | ||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * check if segment exceeds the kernel mapping range (detected or set via mem=) | ||
290 | * returns 1 if this is the case, 0 if segment fits into the range | ||
291 | */ | ||
292 | static inline int | ||
293 | segment_exceeds_range (struct dcss_segment *seg) | ||
294 | { | ||
295 | int seg_last_pfn = (seg->end) >> PAGE_SHIFT; | ||
296 | if (seg_last_pfn > max_pfn) | ||
297 | return 1; | ||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | /* | ||
302 | * get info about a segment | ||
303 | * possible return values: | ||
304 | * -ENOSYS : we are not running on VM | ||
305 | * -EIO : could not perform query diagnose | ||
306 | * -ENOENT : no such segment | ||
307 | * -ENOTSUPP: multi-part segment cannot be used with linux | ||
308 | * -ENOSPC : segment cannot be used (overlaps with storage) | ||
309 | * -ENOMEM : out of memory | ||
310 | * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h | ||
311 | */ | ||
312 | int | ||
313 | segment_type (char* name) | ||
314 | { | ||
315 | int rc; | ||
316 | struct dcss_segment seg; | ||
317 | |||
318 | if (!MACHINE_IS_VM) | ||
319 | return -ENOSYS; | ||
320 | |||
321 | dcss_mkname(name, seg.dcss_name); | ||
322 | rc = query_segment_type (&seg); | ||
323 | if (rc < 0) | ||
324 | return rc; | ||
325 | return seg.vm_segtype; | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * real segment loading function, called from segment_load | ||
330 | */ | ||
331 | static int | ||
332 | __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end) | ||
333 | { | ||
334 | struct dcss_segment *seg = kmalloc(sizeof(struct dcss_segment), | ||
335 | GFP_DMA); | ||
336 | int dcss_command, rc, diag_cc; | ||
337 | |||
338 | if (seg == NULL) { | ||
339 | rc = -ENOMEM; | ||
340 | goto out; | ||
341 | } | ||
342 | dcss_mkname (name, seg->dcss_name); | ||
343 | rc = query_segment_type (seg); | ||
344 | if (rc < 0) | ||
345 | goto out_free; | ||
346 | if (segment_exceeds_range(seg)) { | ||
347 | PRINT_WARN ("segment_load: not loading segment %s - exceeds" | ||
348 | " kernel mapping range\n",name); | ||
349 | rc = -ERANGE; | ||
350 | goto out_free; | ||
351 | } | ||
352 | if (segment_overlaps_storage(seg)) { | ||
353 | PRINT_WARN ("segment_load: not loading segment %s - overlaps" | ||
354 | " storage\n",name); | ||
355 | rc = -ENOSPC; | ||
356 | goto out_free; | ||
357 | } | ||
358 | if (segment_overlaps_others(seg)) { | ||
359 | PRINT_WARN ("segment_load: not loading segment %s - overlaps" | ||
360 | " other segments\n",name); | ||
361 | rc = -EBUSY; | ||
362 | goto out_free; | ||
363 | } | ||
364 | if (do_nonshared) | ||
365 | dcss_command = DCSS_LOADNSR; | ||
366 | else | ||
367 | dcss_command = DCSS_LOADNOLY; | ||
368 | |||
369 | diag_cc = dcss_diag(dcss_command, seg->dcss_name, | ||
370 | &seg->start_addr, &seg->end); | ||
371 | if (diag_cc > 1) { | ||
372 | PRINT_WARN ("segment_load: could not load segment %s - " | ||
373 | "diag returned error (%ld)\n",name,seg->end); | ||
374 | rc = dcss_diag_translate_rc (seg->end); | ||
375 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | ||
376 | &seg->start_addr, &seg->end); | ||
377 | goto out_free; | ||
378 | } | ||
379 | seg->do_nonshared = do_nonshared; | ||
380 | atomic_set(&seg->ref_count, 1); | ||
381 | list_add(&seg->list, &dcss_list); | ||
382 | rc = seg->vm_segtype; | ||
383 | *addr = seg->start_addr; | ||
384 | *end = seg->end; | ||
385 | if (do_nonshared) | ||
386 | PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " | ||
387 | "type %s in non-shared mode\n", name, | ||
388 | (void*)seg->start_addr, (void*)seg->end, | ||
389 | segtype_string[seg->vm_segtype]); | ||
390 | else | ||
391 | PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " | ||
392 | "type %s in shared mode\n", name, | ||
393 | (void*)seg->start_addr, (void*)seg->end, | ||
394 | segtype_string[seg->vm_segtype]); | ||
395 | goto out; | ||
396 | out_free: | ||
397 | kfree (seg); | ||
398 | out: | ||
399 | return rc; | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * this function loads a DCSS segment | ||
404 | * name : name of the DCSS | ||
405 | * do_nonshared : 0 indicates that the dcss should be shared with other linux images | ||
406 | * 1 indicates that the dcss should be exclusive for this linux image | ||
407 | * addr : will be filled with start address of the segment | ||
408 | * end : will be filled with end address of the segment | ||
409 | * return values: | ||
410 | * -ENOSYS : we are not running on VM | ||
411 | * -EIO : could not perform query or load diagnose | ||
412 | * -ENOENT : no such segment | ||
413 | * -ENOTSUPP: multi-part segment cannot be used with linux | ||
414 | * -ENOSPC : segment cannot be used (overlaps with storage) | ||
415 | * -EBUSY : segment can temporarily not be used (overlaps with dcss) | ||
416 | * -ERANGE : segment cannot be used (exceeds kernel mapping range) | ||
417 | * -EPERM : segment is currently loaded with incompatible permissions | ||
418 | * -ENOMEM : out of memory | ||
419 | * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h | ||
420 | */ | ||
421 | int | ||
422 | segment_load (char *name, int do_nonshared, unsigned long *addr, | ||
423 | unsigned long *end) | ||
424 | { | ||
425 | struct dcss_segment *seg; | ||
426 | int rc; | ||
427 | |||
428 | if (!MACHINE_IS_VM) | ||
429 | return -ENOSYS; | ||
430 | |||
431 | spin_lock (&dcss_lock); | ||
432 | seg = segment_by_name (name); | ||
433 | if (seg == NULL) | ||
434 | rc = __segment_load (name, do_nonshared, addr, end); | ||
435 | else { | ||
436 | if (do_nonshared == seg->do_nonshared) { | ||
437 | atomic_inc(&seg->ref_count); | ||
438 | *addr = seg->start_addr; | ||
439 | *end = seg->end; | ||
440 | rc = seg->vm_segtype; | ||
441 | } else { | ||
442 | *addr = *end = 0; | ||
443 | rc = -EPERM; | ||
444 | } | ||
445 | } | ||
446 | spin_unlock (&dcss_lock); | ||
447 | return rc; | ||
448 | } | ||
449 | |||
450 | /* | ||
451 | * this function modifies the shared state of a DCSS segment. note that | ||
452 | * name : name of the DCSS | ||
453 | * do_nonshared : 0 indicates that the dcss should be shared with other linux images | ||
454 | * 1 indicates that the dcss should be exclusive for this linux image | ||
455 | * return values: | ||
456 | * -EIO : could not perform load diagnose (segment gone!) | ||
457 | * -ENOENT : no such segment (segment gone!) | ||
458 | * -EAGAIN : segment is in use by other exploiters, try later | ||
459 | * -EINVAL : no segment with the given name is currently loaded - name invalid | ||
460 | * 0 : operation succeeded | ||
461 | */ | ||
462 | int | ||
463 | segment_modify_shared (char *name, int do_nonshared) | ||
464 | { | ||
465 | struct dcss_segment *seg; | ||
466 | unsigned long dummy; | ||
467 | int dcss_command, rc, diag_cc; | ||
468 | |||
469 | spin_lock (&dcss_lock); | ||
470 | seg = segment_by_name (name); | ||
471 | if (seg == NULL) { | ||
472 | rc = -EINVAL; | ||
473 | goto out_unlock; | ||
474 | } | ||
475 | if (do_nonshared == seg->do_nonshared) { | ||
476 | PRINT_INFO ("segment_modify_shared: not reloading segment %s" | ||
477 | " - already in requested mode\n",name); | ||
478 | rc = 0; | ||
479 | goto out_unlock; | ||
480 | } | ||
481 | if (atomic_read (&seg->ref_count) != 1) { | ||
482 | PRINT_WARN ("segment_modify_shared: not reloading segment %s - " | ||
483 | "segment is in use by other driver(s)\n",name); | ||
484 | rc = -EAGAIN; | ||
485 | goto out_unlock; | ||
486 | } | ||
487 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | ||
488 | &dummy, &dummy); | ||
489 | if (do_nonshared) | ||
490 | dcss_command = DCSS_LOADNSR; | ||
491 | else | ||
492 | dcss_command = DCSS_LOADNOLY; | ||
493 | diag_cc = dcss_diag(dcss_command, seg->dcss_name, | ||
494 | &seg->start_addr, &seg->end); | ||
495 | if (diag_cc > 1) { | ||
496 | PRINT_WARN ("segment_modify_shared: could not reload segment %s" | ||
497 | " - diag returned error (%ld)\n",name,seg->end); | ||
498 | rc = dcss_diag_translate_rc (seg->end); | ||
499 | goto out_del; | ||
500 | } | ||
501 | seg->do_nonshared = do_nonshared; | ||
502 | rc = 0; | ||
503 | goto out_unlock; | ||
504 | out_del: | ||
505 | list_del(&seg->list); | ||
506 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | ||
507 | &dummy, &dummy); | ||
508 | kfree (seg); | ||
509 | out_unlock: | ||
510 | spin_unlock(&dcss_lock); | ||
511 | return rc; | ||
512 | } | ||
513 | |||
514 | /* | ||
515 | * Decrease the use count of a DCSS segment and remove | ||
516 | * it from the address space if nobody is using it | ||
517 | * any longer. | ||
518 | */ | ||
519 | void | ||
520 | segment_unload(char *name) | ||
521 | { | ||
522 | unsigned long dummy; | ||
523 | struct dcss_segment *seg; | ||
524 | |||
525 | if (!MACHINE_IS_VM) | ||
526 | return; | ||
527 | |||
528 | spin_lock(&dcss_lock); | ||
529 | seg = segment_by_name (name); | ||
530 | if (seg == NULL) { | ||
531 | PRINT_ERR ("could not find segment %s in segment_unload, " | ||
532 | "please report to linux390@de.ibm.com\n",name); | ||
533 | goto out_unlock; | ||
534 | } | ||
535 | if (atomic_dec_return(&seg->ref_count) == 0) { | ||
536 | list_del(&seg->list); | ||
537 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | ||
538 | &dummy, &dummy); | ||
539 | kfree(seg); | ||
540 | } | ||
541 | out_unlock: | ||
542 | spin_unlock(&dcss_lock); | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * save segment content permanently | ||
547 | */ | ||
548 | void | ||
549 | segment_save(char *name) | ||
550 | { | ||
551 | struct dcss_segment *seg; | ||
552 | int startpfn = 0; | ||
553 | int endpfn = 0; | ||
554 | char cmd1[160]; | ||
555 | char cmd2[80]; | ||
556 | int i; | ||
557 | |||
558 | if (!MACHINE_IS_VM) | ||
559 | return; | ||
560 | |||
561 | spin_lock(&dcss_lock); | ||
562 | seg = segment_by_name (name); | ||
563 | |||
564 | if (seg == NULL) { | ||
565 | PRINT_ERR ("could not find segment %s in segment_save, please report to linux390@de.ibm.com\n",name); | ||
566 | return; | ||
567 | } | ||
568 | |||
569 | startpfn = seg->start_addr >> PAGE_SHIFT; | ||
570 | endpfn = (seg->end) >> PAGE_SHIFT; | ||
571 | sprintf(cmd1, "DEFSEG %s", name); | ||
572 | for (i=0; i<seg->segcnt; i++) { | ||
573 | sprintf(cmd1+strlen(cmd1), " %X-%X %s", | ||
574 | seg->range[i].start >> PAGE_SHIFT, | ||
575 | seg->range[i].end >> PAGE_SHIFT, | ||
576 | segtype_string[seg->range[i].start & 0xff]); | ||
577 | } | ||
578 | sprintf(cmd2, "SAVESEG %s", name); | ||
579 | cpcmd(cmd1, NULL, 0); | ||
580 | cpcmd(cmd2, NULL, 0); | ||
581 | spin_unlock(&dcss_lock); | ||
582 | } | ||
583 | |||
584 | EXPORT_SYMBOL(segment_load); | ||
585 | EXPORT_SYMBOL(segment_unload); | ||
586 | EXPORT_SYMBOL(segment_save); | ||
587 | EXPORT_SYMBOL(segment_type); | ||
588 | EXPORT_SYMBOL(segment_modify_shared); | ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c new file mode 100644 index 000000000000..80306bc8c799 --- /dev/null +++ b/arch/s390/mm/fault.c | |||
@@ -0,0 +1,586 @@ | |||
1 | /* | ||
2 | * arch/s390/mm/fault.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Hartmut Penner (hp@de.ibm.com) | ||
7 | * Ulrich Weigand (uweigand@de.ibm.com) | ||
8 | * | ||
9 | * Derived from "arch/i386/mm/fault.c" | ||
10 | * Copyright (C) 1995 Linus Torvalds | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/signal.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/ptrace.h> | ||
21 | #include <linux/mman.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/console.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/hardirq.h> | ||
29 | |||
30 | #include <asm/system.h> | ||
31 | #include <asm/uaccess.h> | ||
32 | #include <asm/pgtable.h> | ||
33 | |||
34 | #ifndef CONFIG_ARCH_S390X | ||
35 | #define __FAIL_ADDR_MASK 0x7ffff000 | ||
36 | #define __FIXUP_MASK 0x7fffffff | ||
37 | #define __SUBCODE_MASK 0x0200 | ||
38 | #define __PF_RES_FIELD 0ULL | ||
39 | #else /* CONFIG_ARCH_S390X */ | ||
40 | #define __FAIL_ADDR_MASK -4096L | ||
41 | #define __FIXUP_MASK ~0L | ||
42 | #define __SUBCODE_MASK 0x0600 | ||
43 | #define __PF_RES_FIELD 0x8000000000000000ULL | ||
44 | #endif /* CONFIG_ARCH_S390X */ | ||
45 | |||
46 | #ifdef CONFIG_SYSCTL | ||
47 | extern int sysctl_userprocess_debug; | ||
48 | #endif | ||
49 | |||
50 | extern void die(const char *,struct pt_regs *,long); | ||
51 | |||
52 | extern spinlock_t timerlist_lock; | ||
53 | |||
54 | /* | ||
55 | * Unlock any spinlocks which will prevent us from getting the | ||
56 | * message out (timerlist_lock is acquired through the | ||
57 | * console unblank code) | ||
58 | */ | ||
59 | void bust_spinlocks(int yes) | ||
60 | { | ||
61 | if (yes) { | ||
62 | oops_in_progress = 1; | ||
63 | } else { | ||
64 | int loglevel_save = console_loglevel; | ||
65 | console_unblank(); | ||
66 | oops_in_progress = 0; | ||
67 | /* | ||
68 | * OK, the message is on the console. Now we call printk() | ||
69 | * without oops_in_progress set so that printk will give klogd | ||
70 | * a poke. Hold onto your hats... | ||
71 | */ | ||
72 | console_loglevel = 15; | ||
73 | printk(" "); | ||
74 | console_loglevel = loglevel_save; | ||
75 | } | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Check which address space is addressed by the access | ||
80 | * register in S390_lowcore.exc_access_id. | ||
81 | * Returns 1 for user space and 0 for kernel space. | ||
82 | */ | ||
83 | static int __check_access_register(struct pt_regs *regs, int error_code) | ||
84 | { | ||
85 | int areg = S390_lowcore.exc_access_id; | ||
86 | |||
87 | if (areg == 0) | ||
88 | /* Access via access register 0 -> kernel address */ | ||
89 | return 0; | ||
90 | save_access_regs(current->thread.acrs); | ||
91 | if (regs && areg < NUM_ACRS && current->thread.acrs[areg] <= 1) | ||
92 | /* | ||
93 | * access register contains 0 -> kernel address, | ||
94 | * access register contains 1 -> user space address | ||
95 | */ | ||
96 | return current->thread.acrs[areg]; | ||
97 | |||
98 | /* Something unhealthy was done with the access registers... */ | ||
99 | die("page fault via unknown access register", regs, error_code); | ||
100 | do_exit(SIGKILL); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Check which address space the address belongs to. | ||
106 | * Returns 1 for user space and 0 for kernel space. | ||
107 | */ | ||
108 | static inline int check_user_space(struct pt_regs *regs, int error_code) | ||
109 | { | ||
110 | /* | ||
111 | * The lowest two bits of S390_lowcore.trans_exc_code indicate | ||
112 | * which paging table was used: | ||
113 | * 0: Primary Segment Table Descriptor | ||
114 | * 1: STD determined via access register | ||
115 | * 2: Secondary Segment Table Descriptor | ||
116 | * 3: Home Segment Table Descriptor | ||
117 | */ | ||
118 | int descriptor = S390_lowcore.trans_exc_code & 3; | ||
119 | if (unlikely(descriptor == 1)) | ||
120 | return __check_access_register(regs, error_code); | ||
121 | if (descriptor == 2) | ||
122 | return current->thread.mm_segment.ar4; | ||
123 | return descriptor != 0; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Send SIGSEGV to task. This is an external routine | ||
128 | * to keep the stack usage of do_page_fault small. | ||
129 | */ | ||
130 | static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, | ||
131 | int si_code, unsigned long address) | ||
132 | { | ||
133 | struct siginfo si; | ||
134 | |||
135 | #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) | ||
136 | #if defined(CONFIG_SYSCTL) | ||
137 | if (sysctl_userprocess_debug) | ||
138 | #endif | ||
139 | { | ||
140 | printk("User process fault: interruption code 0x%lX\n", | ||
141 | error_code); | ||
142 | printk("failing address: %lX\n", address); | ||
143 | show_regs(regs); | ||
144 | } | ||
145 | #endif | ||
146 | si.si_signo = SIGSEGV; | ||
147 | si.si_code = si_code; | ||
148 | si.si_addr = (void *) address; | ||
149 | force_sig_info(SIGSEGV, &si, current); | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * This routine handles page faults. It determines the address, | ||
154 | * and the problem, and then passes it off to one of the appropriate | ||
155 | * routines. | ||
156 | * | ||
157 | * error_code: | ||
158 | * 04 Protection -> Write-Protection (suprression) | ||
159 | * 10 Segment translation -> Not present (nullification) | ||
160 | * 11 Page translation -> Not present (nullification) | ||
161 | * 3b Region third trans. -> Not present (nullification) | ||
162 | */ | ||
163 | extern inline void | ||
164 | do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) | ||
165 | { | ||
166 | struct task_struct *tsk; | ||
167 | struct mm_struct *mm; | ||
168 | struct vm_area_struct * vma; | ||
169 | unsigned long address; | ||
170 | int user_address; | ||
171 | const struct exception_table_entry *fixup; | ||
172 | int si_code = SEGV_MAPERR; | ||
173 | |||
174 | tsk = current; | ||
175 | mm = tsk->mm; | ||
176 | |||
177 | /* | ||
178 | * Check for low-address protection. This needs to be treated | ||
179 | * as a special case because the translation exception code | ||
180 | * field is not guaranteed to contain valid data in this case. | ||
181 | */ | ||
182 | if (is_protection && !(S390_lowcore.trans_exc_code & 4)) { | ||
183 | |||
184 | /* Low-address protection hit in kernel mode means | ||
185 | NULL pointer write access in kernel mode. */ | ||
186 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) { | ||
187 | address = 0; | ||
188 | user_address = 0; | ||
189 | goto no_context; | ||
190 | } | ||
191 | |||
192 | /* Low-address protection hit in user mode 'cannot happen'. */ | ||
193 | die ("Low-address protection", regs, error_code); | ||
194 | do_exit(SIGKILL); | ||
195 | } | ||
196 | |||
197 | /* | ||
198 | * get the failing address | ||
199 | * more specific the segment and page table portion of | ||
200 | * the address | ||
201 | */ | ||
202 | address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; | ||
203 | user_address = check_user_space(regs, error_code); | ||
204 | |||
205 | /* | ||
206 | * Verify that the fault happened in user space, that | ||
207 | * we are not in an interrupt and that there is a | ||
208 | * user context. | ||
209 | */ | ||
210 | if (user_address == 0 || in_interrupt() || !mm) | ||
211 | goto no_context; | ||
212 | |||
213 | /* | ||
214 | * When we get here, the fault happened in the current | ||
215 | * task's user address space, so we can switch on the | ||
216 | * interrupts again and then search the VMAs | ||
217 | */ | ||
218 | local_irq_enable(); | ||
219 | |||
220 | down_read(&mm->mmap_sem); | ||
221 | |||
222 | vma = find_vma(mm, address); | ||
223 | if (!vma) | ||
224 | goto bad_area; | ||
225 | if (vma->vm_start <= address) | ||
226 | goto good_area; | ||
227 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
228 | goto bad_area; | ||
229 | if (expand_stack(vma, address)) | ||
230 | goto bad_area; | ||
231 | /* | ||
232 | * Ok, we have a good vm_area for this memory access, so | ||
233 | * we can handle it.. | ||
234 | */ | ||
235 | good_area: | ||
236 | si_code = SEGV_ACCERR; | ||
237 | if (!is_protection) { | ||
238 | /* page not present, check vm flags */ | ||
239 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | ||
240 | goto bad_area; | ||
241 | } else { | ||
242 | if (!(vma->vm_flags & VM_WRITE)) | ||
243 | goto bad_area; | ||
244 | } | ||
245 | |||
246 | survive: | ||
247 | /* | ||
248 | * If for any reason at all we couldn't handle the fault, | ||
249 | * make sure we exit gracefully rather than endlessly redo | ||
250 | * the fault. | ||
251 | */ | ||
252 | switch (handle_mm_fault(mm, vma, address, is_protection)) { | ||
253 | case VM_FAULT_MINOR: | ||
254 | tsk->min_flt++; | ||
255 | break; | ||
256 | case VM_FAULT_MAJOR: | ||
257 | tsk->maj_flt++; | ||
258 | break; | ||
259 | case VM_FAULT_SIGBUS: | ||
260 | goto do_sigbus; | ||
261 | case VM_FAULT_OOM: | ||
262 | goto out_of_memory; | ||
263 | default: | ||
264 | BUG(); | ||
265 | } | ||
266 | |||
267 | up_read(&mm->mmap_sem); | ||
268 | /* | ||
269 | * The instruction that caused the program check will | ||
270 | * be repeated. Don't signal single step via SIGTRAP. | ||
271 | */ | ||
272 | clear_tsk_thread_flag(current, TIF_SINGLE_STEP); | ||
273 | return; | ||
274 | |||
275 | /* | ||
276 | * Something tried to access memory that isn't in our memory map.. | ||
277 | * Fix it, but check if it's kernel or user first.. | ||
278 | */ | ||
279 | bad_area: | ||
280 | up_read(&mm->mmap_sem); | ||
281 | |||
282 | /* User mode accesses just cause a SIGSEGV */ | ||
283 | if (regs->psw.mask & PSW_MASK_PSTATE) { | ||
284 | tsk->thread.prot_addr = address; | ||
285 | tsk->thread.trap_no = error_code; | ||
286 | do_sigsegv(regs, error_code, si_code, address); | ||
287 | return; | ||
288 | } | ||
289 | |||
290 | no_context: | ||
291 | /* Are we prepared to handle this kernel fault? */ | ||
292 | fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); | ||
293 | if (fixup) { | ||
294 | regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; | ||
295 | return; | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Oops. The kernel tried to access some bad page. We'll have to | ||
300 | * terminate things with extreme prejudice. | ||
301 | */ | ||
302 | if (user_address == 0) | ||
303 | printk(KERN_ALERT "Unable to handle kernel pointer dereference" | ||
304 | " at virtual kernel address %p\n", (void *)address); | ||
305 | else | ||
306 | printk(KERN_ALERT "Unable to handle kernel paging request" | ||
307 | " at virtual user address %p\n", (void *)address); | ||
308 | |||
309 | die("Oops", regs, error_code); | ||
310 | do_exit(SIGKILL); | ||
311 | |||
312 | |||
313 | /* | ||
314 | * We ran out of memory, or some other thing happened to us that made | ||
315 | * us unable to handle the page fault gracefully. | ||
316 | */ | ||
317 | out_of_memory: | ||
318 | up_read(&mm->mmap_sem); | ||
319 | if (tsk->pid == 1) { | ||
320 | yield(); | ||
321 | goto survive; | ||
322 | } | ||
323 | printk("VM: killing process %s\n", tsk->comm); | ||
324 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
325 | do_exit(SIGKILL); | ||
326 | goto no_context; | ||
327 | |||
328 | do_sigbus: | ||
329 | up_read(&mm->mmap_sem); | ||
330 | |||
331 | /* | ||
332 | * Send a sigbus, regardless of whether we were in kernel | ||
333 | * or user mode. | ||
334 | */ | ||
335 | tsk->thread.prot_addr = address; | ||
336 | tsk->thread.trap_no = error_code; | ||
337 | force_sig(SIGBUS, tsk); | ||
338 | |||
339 | /* Kernel mode? Handle exceptions or die */ | ||
340 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | ||
341 | goto no_context; | ||
342 | } | ||
343 | |||
344 | void do_protection_exception(struct pt_regs *regs, unsigned long error_code) | ||
345 | { | ||
346 | regs->psw.addr -= (error_code >> 16); | ||
347 | do_exception(regs, 4, 1); | ||
348 | } | ||
349 | |||
350 | void do_dat_exception(struct pt_regs *regs, unsigned long error_code) | ||
351 | { | ||
352 | do_exception(regs, error_code & 0xff, 0); | ||
353 | } | ||
354 | |||
355 | #ifndef CONFIG_ARCH_S390X | ||
356 | |||
357 | typedef struct _pseudo_wait_t { | ||
358 | struct _pseudo_wait_t *next; | ||
359 | wait_queue_head_t queue; | ||
360 | unsigned long address; | ||
361 | int resolved; | ||
362 | } pseudo_wait_t; | ||
363 | |||
364 | static pseudo_wait_t *pseudo_lock_queue = NULL; | ||
365 | static spinlock_t pseudo_wait_spinlock; /* spinlock to protect lock queue */ | ||
366 | |||
367 | /* | ||
368 | * This routine handles 'pagex' pseudo page faults. | ||
369 | */ | ||
370 | asmlinkage void | ||
371 | do_pseudo_page_fault(struct pt_regs *regs, unsigned long error_code) | ||
372 | { | ||
373 | pseudo_wait_t wait_struct; | ||
374 | pseudo_wait_t *ptr, *last, *next; | ||
375 | unsigned long address; | ||
376 | |||
377 | /* | ||
378 | * get the failing address | ||
379 | * more specific the segment and page table portion of | ||
380 | * the address | ||
381 | */ | ||
382 | address = S390_lowcore.trans_exc_code & 0xfffff000; | ||
383 | |||
384 | if (address & 0x80000000) { | ||
385 | /* high bit set -> a page has been swapped in by VM */ | ||
386 | address &= 0x7fffffff; | ||
387 | spin_lock(&pseudo_wait_spinlock); | ||
388 | last = NULL; | ||
389 | ptr = pseudo_lock_queue; | ||
390 | while (ptr != NULL) { | ||
391 | next = ptr->next; | ||
392 | if (address == ptr->address) { | ||
393 | /* | ||
394 | * This is one of the processes waiting | ||
395 | * for the page. Unchain from the queue. | ||
396 | * There can be more than one process | ||
397 | * waiting for the same page. VM presents | ||
398 | * an initial and a completion interrupt for | ||
399 | * every process that tries to access a | ||
400 | * page swapped out by VM. | ||
401 | */ | ||
402 | if (last == NULL) | ||
403 | pseudo_lock_queue = next; | ||
404 | else | ||
405 | last->next = next; | ||
406 | /* now wake up the process */ | ||
407 | ptr->resolved = 1; | ||
408 | wake_up(&ptr->queue); | ||
409 | } else | ||
410 | last = ptr; | ||
411 | ptr = next; | ||
412 | } | ||
413 | spin_unlock(&pseudo_wait_spinlock); | ||
414 | } else { | ||
415 | /* Pseudo page faults in kernel mode is a bad idea */ | ||
416 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) { | ||
417 | /* | ||
418 | * VM presents pseudo page faults if the interrupted | ||
419 | * state was not disabled for interrupts. So we can | ||
420 | * get pseudo page fault interrupts while running | ||
421 | * in kernel mode. We simply access the page here | ||
422 | * while we are running disabled. VM will then swap | ||
423 | * in the page synchronously. | ||
424 | */ | ||
425 | if (check_user_space(regs, error_code) == 0) | ||
426 | /* dereference a virtual kernel address */ | ||
427 | __asm__ __volatile__ ( | ||
428 | " ic 0,0(%0)" | ||
429 | : : "a" (address) : "0"); | ||
430 | else | ||
431 | /* dereference a virtual user address */ | ||
432 | __asm__ __volatile__ ( | ||
433 | " la 2,0(%0)\n" | ||
434 | " sacf 512\n" | ||
435 | " ic 2,0(2)\n" | ||
436 | "0:sacf 0\n" | ||
437 | ".section __ex_table,\"a\"\n" | ||
438 | " .align 4\n" | ||
439 | " .long 0b,0b\n" | ||
440 | ".previous" | ||
441 | : : "a" (address) : "2" ); | ||
442 | |||
443 | return; | ||
444 | } | ||
445 | /* initialize and add element to pseudo_lock_queue */ | ||
446 | init_waitqueue_head (&wait_struct.queue); | ||
447 | wait_struct.address = address; | ||
448 | wait_struct.resolved = 0; | ||
449 | spin_lock(&pseudo_wait_spinlock); | ||
450 | wait_struct.next = pseudo_lock_queue; | ||
451 | pseudo_lock_queue = &wait_struct; | ||
452 | spin_unlock(&pseudo_wait_spinlock); | ||
453 | /* | ||
454 | * The instruction that caused the program check will | ||
455 | * be repeated. Don't signal single step via SIGTRAP. | ||
456 | */ | ||
457 | clear_tsk_thread_flag(current, TIF_SINGLE_STEP); | ||
458 | /* go to sleep */ | ||
459 | wait_event(wait_struct.queue, wait_struct.resolved); | ||
460 | } | ||
461 | } | ||
462 | #endif /* CONFIG_ARCH_S390X */ | ||
463 | |||
464 | #ifdef CONFIG_PFAULT | ||
465 | /* | ||
466 | * 'pfault' pseudo page faults routines. | ||
467 | */ | ||
468 | static int pfault_disable = 0; | ||
469 | |||
470 | static int __init nopfault(char *str) | ||
471 | { | ||
472 | pfault_disable = 1; | ||
473 | return 1; | ||
474 | } | ||
475 | |||
476 | __setup("nopfault", nopfault); | ||
477 | |||
478 | typedef struct { | ||
479 | __u16 refdiagc; | ||
480 | __u16 reffcode; | ||
481 | __u16 refdwlen; | ||
482 | __u16 refversn; | ||
483 | __u64 refgaddr; | ||
484 | __u64 refselmk; | ||
485 | __u64 refcmpmk; | ||
486 | __u64 reserved; | ||
487 | } __attribute__ ((packed)) pfault_refbk_t; | ||
488 | |||
489 | int pfault_init(void) | ||
490 | { | ||
491 | pfault_refbk_t refbk = | ||
492 | { 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48, | ||
493 | __PF_RES_FIELD }; | ||
494 | int rc; | ||
495 | |||
496 | if (pfault_disable) | ||
497 | return -1; | ||
498 | __asm__ __volatile__( | ||
499 | " diag %1,%0,0x258\n" | ||
500 | "0: j 2f\n" | ||
501 | "1: la %0,8\n" | ||
502 | "2:\n" | ||
503 | ".section __ex_table,\"a\"\n" | ||
504 | " .align 4\n" | ||
505 | #ifndef CONFIG_ARCH_S390X | ||
506 | " .long 0b,1b\n" | ||
507 | #else /* CONFIG_ARCH_S390X */ | ||
508 | " .quad 0b,1b\n" | ||
509 | #endif /* CONFIG_ARCH_S390X */ | ||
510 | ".previous" | ||
511 | : "=d" (rc) : "a" (&refbk) : "cc" ); | ||
512 | __ctl_set_bit(0, 9); | ||
513 | return rc; | ||
514 | } | ||
515 | |||
516 | void pfault_fini(void) | ||
517 | { | ||
518 | pfault_refbk_t refbk = | ||
519 | { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL }; | ||
520 | |||
521 | if (pfault_disable) | ||
522 | return; | ||
523 | __ctl_clear_bit(0,9); | ||
524 | __asm__ __volatile__( | ||
525 | " diag %0,0,0x258\n" | ||
526 | "0:\n" | ||
527 | ".section __ex_table,\"a\"\n" | ||
528 | " .align 4\n" | ||
529 | #ifndef CONFIG_ARCH_S390X | ||
530 | " .long 0b,0b\n" | ||
531 | #else /* CONFIG_ARCH_S390X */ | ||
532 | " .quad 0b,0b\n" | ||
533 | #endif /* CONFIG_ARCH_S390X */ | ||
534 | ".previous" | ||
535 | : : "a" (&refbk) : "cc" ); | ||
536 | } | ||
537 | |||
538 | asmlinkage void | ||
539 | pfault_interrupt(struct pt_regs *regs, __u16 error_code) | ||
540 | { | ||
541 | struct task_struct *tsk; | ||
542 | __u16 subcode; | ||
543 | |||
544 | /* | ||
545 | * Get the external interruption subcode & pfault | ||
546 | * initial/completion signal bit. VM stores this | ||
547 | * in the 'cpu address' field associated with the | ||
548 | * external interrupt. | ||
549 | */ | ||
550 | subcode = S390_lowcore.cpu_addr; | ||
551 | if ((subcode & 0xff00) != __SUBCODE_MASK) | ||
552 | return; | ||
553 | |||
554 | /* | ||
555 | * Get the token (= address of the task structure of the affected task). | ||
556 | */ | ||
557 | tsk = *(struct task_struct **) __LC_PFAULT_INTPARM; | ||
558 | |||
559 | if (subcode & 0x0080) { | ||
560 | /* signal bit is set -> a page has been swapped in by VM */ | ||
561 | if (xchg(&tsk->thread.pfault_wait, -1) != 0) { | ||
562 | /* Initial interrupt was faster than the completion | ||
563 | * interrupt. pfault_wait is valid. Set pfault_wait | ||
564 | * back to zero and wake up the process. This can | ||
565 | * safely be done because the task is still sleeping | ||
566 | * and can't procude new pfaults. */ | ||
567 | tsk->thread.pfault_wait = 0; | ||
568 | wake_up_process(tsk); | ||
569 | } | ||
570 | } else { | ||
571 | /* signal bit not set -> a real page is missing. */ | ||
572 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
573 | if (xchg(&tsk->thread.pfault_wait, 1) != 0) { | ||
574 | /* Completion interrupt was faster than the initial | ||
575 | * interrupt (swapped in a -1 for pfault_wait). Set | ||
576 | * pfault_wait back to zero and exit. This can be | ||
577 | * done safely because tsk is running in kernel | ||
578 | * mode and can't produce new pfaults. */ | ||
579 | tsk->thread.pfault_wait = 0; | ||
580 | set_task_state(tsk, TASK_RUNNING); | ||
581 | } else | ||
582 | set_tsk_need_resched(tsk); | ||
583 | } | ||
584 | } | ||
585 | #endif | ||
586 | |||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c new file mode 100644 index 000000000000..8e723bc7f795 --- /dev/null +++ b/arch/s390/mm/init.c | |||
@@ -0,0 +1,310 @@ | |||
1 | /* | ||
2 | * arch/s390/mm/init.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Hartmut Penner (hp@de.ibm.com) | ||
7 | * | ||
8 | * Derived from "arch/i386/mm/init.c" | ||
9 | * Copyright (C) 1995 Linus Torvalds | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/signal.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/mman.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/swap.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/pagemap.h> | ||
26 | #include <linux/bootmem.h> | ||
27 | |||
28 | #include <asm/processor.h> | ||
29 | #include <asm/system.h> | ||
30 | #include <asm/uaccess.h> | ||
31 | #include <asm/pgtable.h> | ||
32 | #include <asm/pgalloc.h> | ||
33 | #include <asm/dma.h> | ||
34 | #include <asm/lowcore.h> | ||
35 | #include <asm/tlb.h> | ||
36 | #include <asm/tlbflush.h> | ||
37 | |||
38 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
39 | |||
40 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); | ||
41 | char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | ||
42 | |||
43 | void diag10(unsigned long addr) | ||
44 | { | ||
45 | if (addr >= 0x7ff00000) | ||
46 | return; | ||
47 | #ifdef __s390x__ | ||
48 | asm volatile ( | ||
49 | " sam31\n" | ||
50 | " diag %0,%0,0x10\n" | ||
51 | "0: sam64\n" | ||
52 | ".section __ex_table,\"a\"\n" | ||
53 | " .align 8\n" | ||
54 | " .quad 0b, 0b\n" | ||
55 | ".previous\n" | ||
56 | : : "a" (addr)); | ||
57 | #else | ||
58 | asm volatile ( | ||
59 | " diag %0,%0,0x10\n" | ||
60 | "0:\n" | ||
61 | ".section __ex_table,\"a\"\n" | ||
62 | " .align 4\n" | ||
63 | " .long 0b, 0b\n" | ||
64 | ".previous\n" | ||
65 | : : "a" (addr)); | ||
66 | #endif | ||
67 | } | ||
68 | |||
69 | void show_mem(void) | ||
70 | { | ||
71 | int i, total = 0, reserved = 0; | ||
72 | int shared = 0, cached = 0; | ||
73 | |||
74 | printk("Mem-info:\n"); | ||
75 | show_free_areas(); | ||
76 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | ||
77 | i = max_mapnr; | ||
78 | while (i-- > 0) { | ||
79 | total++; | ||
80 | if (PageReserved(mem_map+i)) | ||
81 | reserved++; | ||
82 | else if (PageSwapCache(mem_map+i)) | ||
83 | cached++; | ||
84 | else if (page_count(mem_map+i)) | ||
85 | shared += page_count(mem_map+i) - 1; | ||
86 | } | ||
87 | printk("%d pages of RAM\n",total); | ||
88 | printk("%d reserved pages\n",reserved); | ||
89 | printk("%d pages shared\n",shared); | ||
90 | printk("%d pages swap cached\n",cached); | ||
91 | } | ||
92 | |||
93 | /* References to section boundaries */ | ||
94 | |||
95 | extern unsigned long _text; | ||
96 | extern unsigned long _etext; | ||
97 | extern unsigned long _edata; | ||
98 | extern unsigned long __bss_start; | ||
99 | extern unsigned long _end; | ||
100 | |||
101 | extern unsigned long __init_begin; | ||
102 | extern unsigned long __init_end; | ||
103 | |||
104 | /* | ||
105 | * paging_init() sets up the page tables | ||
106 | */ | ||
107 | |||
108 | #ifndef CONFIG_ARCH_S390X | ||
109 | void __init paging_init(void) | ||
110 | { | ||
111 | pgd_t * pg_dir; | ||
112 | pte_t * pg_table; | ||
113 | pte_t pte; | ||
114 | int i; | ||
115 | unsigned long tmp; | ||
116 | unsigned long pfn = 0; | ||
117 | unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; | ||
118 | static const int ssm_mask = 0x04000000L; | ||
119 | |||
120 | /* unmap whole virtual address space */ | ||
121 | |||
122 | pg_dir = swapper_pg_dir; | ||
123 | |||
124 | for (i=0;i<KERNEL_PGD_PTRS;i++) | ||
125 | pmd_clear((pmd_t*)pg_dir++); | ||
126 | |||
127 | /* | ||
128 | * map whole physical memory to virtual memory (identity mapping) | ||
129 | */ | ||
130 | |||
131 | pg_dir = swapper_pg_dir; | ||
132 | |||
133 | while (pfn < max_low_pfn) { | ||
134 | /* | ||
135 | * pg_table is physical at this point | ||
136 | */ | ||
137 | pg_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | ||
138 | |||
139 | pg_dir->pgd0 = (_PAGE_TABLE | __pa(pg_table)); | ||
140 | pg_dir->pgd1 = (_PAGE_TABLE | (__pa(pg_table)+1024)); | ||
141 | pg_dir->pgd2 = (_PAGE_TABLE | (__pa(pg_table)+2048)); | ||
142 | pg_dir->pgd3 = (_PAGE_TABLE | (__pa(pg_table)+3072)); | ||
143 | pg_dir++; | ||
144 | |||
145 | for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { | ||
146 | pte = pfn_pte(pfn, PAGE_KERNEL); | ||
147 | if (pfn >= max_low_pfn) | ||
148 | pte_clear(&init_mm, 0, &pte); | ||
149 | set_pte(pg_table, pte); | ||
150 | pfn++; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | S390_lowcore.kernel_asce = pgdir_k; | ||
155 | |||
156 | /* enable virtual mapping in kernel mode */ | ||
157 | __asm__ __volatile__(" LCTL 1,1,%0\n" | ||
158 | " LCTL 7,7,%0\n" | ||
159 | " LCTL 13,13,%0\n" | ||
160 | " SSM %1" | ||
161 | : : "m" (pgdir_k), "m" (ssm_mask)); | ||
162 | |||
163 | local_flush_tlb(); | ||
164 | |||
165 | { | ||
166 | unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0}; | ||
167 | |||
168 | zones_size[ZONE_DMA] = max_low_pfn; | ||
169 | free_area_init(zones_size); | ||
170 | } | ||
171 | return; | ||
172 | } | ||
173 | |||
174 | #else /* CONFIG_ARCH_S390X */ | ||
175 | void __init paging_init(void) | ||
176 | { | ||
177 | pgd_t * pg_dir; | ||
178 | pmd_t * pm_dir; | ||
179 | pte_t * pt_dir; | ||
180 | pte_t pte; | ||
181 | int i,j,k; | ||
182 | unsigned long pfn = 0; | ||
183 | unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | | ||
184 | _KERN_REGION_TABLE; | ||
185 | static const int ssm_mask = 0x04000000L; | ||
186 | |||
187 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | ||
188 | unsigned long dma_pfn, high_pfn; | ||
189 | |||
190 | dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; | ||
191 | high_pfn = max_low_pfn; | ||
192 | |||
193 | if (dma_pfn > high_pfn) | ||
194 | zones_size[ZONE_DMA] = high_pfn; | ||
195 | else { | ||
196 | zones_size[ZONE_DMA] = dma_pfn; | ||
197 | zones_size[ZONE_NORMAL] = high_pfn - dma_pfn; | ||
198 | } | ||
199 | |||
200 | /* Initialize mem_map[]. */ | ||
201 | free_area_init(zones_size); | ||
202 | |||
203 | |||
204 | /* | ||
205 | * map whole physical memory to virtual memory (identity mapping) | ||
206 | */ | ||
207 | |||
208 | pg_dir = swapper_pg_dir; | ||
209 | |||
210 | for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) { | ||
211 | |||
212 | if (pfn >= max_low_pfn) { | ||
213 | pgd_clear(pg_dir); | ||
214 | continue; | ||
215 | } | ||
216 | |||
217 | pm_dir = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE*4); | ||
218 | pgd_populate(&init_mm, pg_dir, pm_dir); | ||
219 | |||
220 | for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { | ||
221 | if (pfn >= max_low_pfn) { | ||
222 | pmd_clear(pm_dir); | ||
223 | continue; | ||
224 | } | ||
225 | |||
226 | pt_dir = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | ||
227 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); | ||
228 | |||
229 | for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) { | ||
230 | pte = pfn_pte(pfn, PAGE_KERNEL); | ||
231 | if (pfn >= max_low_pfn) { | ||
232 | pte_clear(&init_mm, 0, &pte); | ||
233 | continue; | ||
234 | } | ||
235 | set_pte(pt_dir, pte); | ||
236 | pfn++; | ||
237 | } | ||
238 | } | ||
239 | } | ||
240 | |||
241 | S390_lowcore.kernel_asce = pgdir_k; | ||
242 | |||
243 | /* enable virtual mapping in kernel mode */ | ||
244 | __asm__ __volatile__("lctlg 1,1,%0\n\t" | ||
245 | "lctlg 7,7,%0\n\t" | ||
246 | "lctlg 13,13,%0\n\t" | ||
247 | "ssm %1" | ||
248 | : :"m" (pgdir_k), "m" (ssm_mask)); | ||
249 | |||
250 | local_flush_tlb(); | ||
251 | |||
252 | return; | ||
253 | } | ||
254 | #endif /* CONFIG_ARCH_S390X */ | ||
255 | |||
256 | void __init mem_init(void) | ||
257 | { | ||
258 | unsigned long codesize, reservedpages, datasize, initsize; | ||
259 | |||
260 | max_mapnr = num_physpages = max_low_pfn; | ||
261 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | ||
262 | |||
263 | /* clear the zero-page */ | ||
264 | memset(empty_zero_page, 0, PAGE_SIZE); | ||
265 | |||
266 | /* this will put all low memory onto the freelists */ | ||
267 | totalram_pages += free_all_bootmem(); | ||
268 | |||
269 | reservedpages = 0; | ||
270 | |||
271 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | ||
272 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
273 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
274 | printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", | ||
275 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
276 | max_mapnr << (PAGE_SHIFT-10), | ||
277 | codesize >> 10, | ||
278 | reservedpages << (PAGE_SHIFT-10), | ||
279 | datasize >>10, | ||
280 | initsize >> 10); | ||
281 | } | ||
282 | |||
283 | void free_initmem(void) | ||
284 | { | ||
285 | unsigned long addr; | ||
286 | |||
287 | addr = (unsigned long)(&__init_begin); | ||
288 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | ||
289 | ClearPageReserved(virt_to_page(addr)); | ||
290 | set_page_count(virt_to_page(addr), 1); | ||
291 | free_page(addr); | ||
292 | totalram_pages++; | ||
293 | } | ||
294 | printk ("Freeing unused kernel memory: %ldk freed\n", | ||
295 | ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10); | ||
296 | } | ||
297 | |||
298 | #ifdef CONFIG_BLK_DEV_INITRD | ||
299 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
300 | { | ||
301 | if (start < end) | ||
302 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
303 | for (; start < end; start += PAGE_SIZE) { | ||
304 | ClearPageReserved(virt_to_page(start)); | ||
305 | set_page_count(virt_to_page(start), 1); | ||
306 | free_page(start); | ||
307 | totalram_pages++; | ||
308 | } | ||
309 | } | ||
310 | #endif | ||
diff --git a/arch/s390/mm/ioremap.c b/arch/s390/mm/ioremap.c new file mode 100644 index 000000000000..c6c39d868bc8 --- /dev/null +++ b/arch/s390/mm/ioremap.c | |||
@@ -0,0 +1,138 @@ | |||
1 | /* | ||
2 | * arch/s390/mm/ioremap.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Hartmut Penner (hp@de.ibm.com) | ||
7 | * | ||
8 | * Derived from "arch/i386/mm/extable.c" | ||
9 | * (C) Copyright 1995 1996 Linus Torvalds | ||
10 | * | ||
11 | * Re-map IO memory to kernel address space so that we can access it. | ||
12 | * This is needed for high PCI addresses that aren't mapped in the | ||
13 | * 640k-1MB IO memory area on PC's | ||
14 | */ | ||
15 | |||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <asm/pgalloc.h> | ||
20 | #include <asm/cacheflush.h> | ||
21 | #include <asm/tlbflush.h> | ||
22 | |||
23 | static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | ||
24 | unsigned long phys_addr, unsigned long flags) | ||
25 | { | ||
26 | unsigned long end; | ||
27 | unsigned long pfn; | ||
28 | |||
29 | address &= ~PMD_MASK; | ||
30 | end = address + size; | ||
31 | if (end > PMD_SIZE) | ||
32 | end = PMD_SIZE; | ||
33 | if (address >= end) | ||
34 | BUG(); | ||
35 | pfn = phys_addr >> PAGE_SHIFT; | ||
36 | do { | ||
37 | if (!pte_none(*pte)) { | ||
38 | printk("remap_area_pte: page already exists\n"); | ||
39 | BUG(); | ||
40 | } | ||
41 | set_pte(pte, pfn_pte(pfn, __pgprot(flags))); | ||
42 | address += PAGE_SIZE; | ||
43 | pfn++; | ||
44 | pte++; | ||
45 | } while (address && (address < end)); | ||
46 | } | ||
47 | |||
48 | static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | ||
49 | unsigned long phys_addr, unsigned long flags) | ||
50 | { | ||
51 | unsigned long end; | ||
52 | |||
53 | address &= ~PGDIR_MASK; | ||
54 | end = address + size; | ||
55 | if (end > PGDIR_SIZE) | ||
56 | end = PGDIR_SIZE; | ||
57 | phys_addr -= address; | ||
58 | if (address >= end) | ||
59 | BUG(); | ||
60 | do { | ||
61 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | ||
62 | if (!pte) | ||
63 | return -ENOMEM; | ||
64 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | ||
65 | address = (address + PMD_SIZE) & PMD_MASK; | ||
66 | pmd++; | ||
67 | } while (address && (address < end)); | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | static int remap_area_pages(unsigned long address, unsigned long phys_addr, | ||
72 | unsigned long size, unsigned long flags) | ||
73 | { | ||
74 | int error; | ||
75 | pgd_t * dir; | ||
76 | unsigned long end = address + size; | ||
77 | |||
78 | phys_addr -= address; | ||
79 | dir = pgd_offset(&init_mm, address); | ||
80 | flush_cache_all(); | ||
81 | if (address >= end) | ||
82 | BUG(); | ||
83 | spin_lock(&init_mm.page_table_lock); | ||
84 | do { | ||
85 | pmd_t *pmd; | ||
86 | pmd = pmd_alloc(&init_mm, dir, address); | ||
87 | error = -ENOMEM; | ||
88 | if (!pmd) | ||
89 | break; | ||
90 | if (remap_area_pmd(pmd, address, end - address, | ||
91 | phys_addr + address, flags)) | ||
92 | break; | ||
93 | error = 0; | ||
94 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
95 | dir++; | ||
96 | } while (address && (address < end)); | ||
97 | spin_unlock(&init_mm.page_table_lock); | ||
98 | flush_tlb_all(); | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * Generic mapping function (not visible outside): | ||
104 | */ | ||
105 | |||
106 | /* | ||
107 | * Remap an arbitrary physical address space into the kernel virtual | ||
108 | * address space. Needed when the kernel wants to access high addresses | ||
109 | * directly. | ||
110 | */ | ||
111 | void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | ||
112 | { | ||
113 | void * addr; | ||
114 | struct vm_struct * area; | ||
115 | |||
116 | if (phys_addr < virt_to_phys(high_memory)) | ||
117 | return phys_to_virt(phys_addr); | ||
118 | if (phys_addr & ~PAGE_MASK) | ||
119 | return NULL; | ||
120 | size = PAGE_ALIGN(size); | ||
121 | if (!size || size > phys_addr + size) | ||
122 | return NULL; | ||
123 | area = get_vm_area(size, VM_IOREMAP); | ||
124 | if (!area) | ||
125 | return NULL; | ||
126 | addr = area->addr; | ||
127 | if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { | ||
128 | vfree(addr); | ||
129 | return NULL; | ||
130 | } | ||
131 | return addr; | ||
132 | } | ||
133 | |||
134 | void iounmap(void *addr) | ||
135 | { | ||
136 | if (addr > high_memory) | ||
137 | vfree(addr); | ||
138 | } | ||
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c new file mode 100644 index 000000000000..fb187e5a54b4 --- /dev/null +++ b/arch/s390/mm/mmap.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * linux/arch/s390/mm/mmap.c | ||
3 | * | ||
4 | * flexible mmap layout support | ||
5 | * | ||
6 | * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. | ||
7 | * All Rights Reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | * | ||
23 | * | ||
24 | * Started by Ingo Molnar <mingo@elte.hu> | ||
25 | */ | ||
26 | |||
27 | #include <linux/personality.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/module.h> | ||
30 | |||
31 | /* | ||
32 | * Top of mmap area (just below the process stack). | ||
33 | * | ||
34 | * Leave an at least ~128 MB hole. | ||
35 | */ | ||
36 | #define MIN_GAP (128*1024*1024) | ||
37 | #define MAX_GAP (TASK_SIZE/6*5) | ||
38 | |||
39 | static inline unsigned long mmap_base(void) | ||
40 | { | ||
41 | unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; | ||
42 | |||
43 | if (gap < MIN_GAP) | ||
44 | gap = MIN_GAP; | ||
45 | else if (gap > MAX_GAP) | ||
46 | gap = MAX_GAP; | ||
47 | |||
48 | return TASK_SIZE - (gap & PAGE_MASK); | ||
49 | } | ||
50 | |||
51 | static inline int mmap_is_legacy(void) | ||
52 | { | ||
53 | #ifdef CONFIG_ARCH_S390X | ||
54 | /* | ||
55 | * Force standard allocation for 64 bit programs. | ||
56 | */ | ||
57 | if (!test_thread_flag(TIF_31BIT)) | ||
58 | return 1; | ||
59 | #endif | ||
60 | return sysctl_legacy_va_layout || | ||
61 | (current->personality & ADDR_COMPAT_LAYOUT) || | ||
62 | current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY; | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * This function, called very early during the creation of a new | ||
67 | * process VM image, sets up which VM layout function to use: | ||
68 | */ | ||
69 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
70 | { | ||
71 | /* | ||
72 | * Fall back to the standard layout if the personality | ||
73 | * bit is set, or if the expected stack growth is unlimited: | ||
74 | */ | ||
75 | if (mmap_is_legacy()) { | ||
76 | mm->mmap_base = TASK_UNMAPPED_BASE; | ||
77 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
78 | mm->unmap_area = arch_unmap_area; | ||
79 | } else { | ||
80 | mm->mmap_base = mmap_base(); | ||
81 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
82 | mm->unmap_area = arch_unmap_area_topdown; | ||
83 | } | ||
84 | } | ||
85 | EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); | ||
86 | |||
diff --git a/arch/s390/oprofile/Kconfig b/arch/s390/oprofile/Kconfig new file mode 100644 index 000000000000..208220a5f23f --- /dev/null +++ b/arch/s390/oprofile/Kconfig | |||
@@ -0,0 +1,22 @@ | |||
1 | |||
2 | menu "Profiling support" | ||
3 | |||
4 | config PROFILING | ||
5 | bool "Profiling support" | ||
6 | help | ||
7 | Say Y here to enable profiling support mechanisms used by | ||
8 | profilers such as readprofile or OProfile. | ||
9 | |||
10 | |||
11 | config OPROFILE | ||
12 | tristate "OProfile system profiling" | ||
13 | depends on PROFILING | ||
14 | help | ||
15 | OProfile is a profiling system capable of profiling the | ||
16 | whole system, include the kernel, kernel modules, libraries, | ||
17 | and applications. | ||
18 | |||
19 | If unsure, say N. | ||
20 | |||
21 | endmenu | ||
22 | |||
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile new file mode 100644 index 000000000000..ec349276258a --- /dev/null +++ b/arch/s390/oprofile/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | obj-$(CONFIG_OPROFILE) += oprofile.o | ||
2 | |||
3 | DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ | ||
4 | oprof.o cpu_buffer.o buffer_sync.o \ | ||
5 | event_buffer.o oprofile_files.o \ | ||
6 | oprofilefs.o oprofile_stats.o \ | ||
7 | timer_int.o ) | ||
8 | |||
9 | oprofile-y := $(DRIVER_OBJS) init.o | ||
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c new file mode 100644 index 000000000000..a65ead0e200a --- /dev/null +++ b/arch/s390/oprofile/init.c | |||
@@ -0,0 +1,22 @@ | |||
1 | /** | ||
2 | * arch/s390/oprofile/init.c | ||
3 | * | ||
4 | * S390 Version | ||
5 | * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | ||
7 | * | ||
8 | * @remark Copyright 2002 OProfile authors | ||
9 | */ | ||
10 | |||
11 | #include <linux/oprofile.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/errno.h> | ||
14 | |||
15 | int __init oprofile_arch_init(struct oprofile_operations* ops) | ||
16 | { | ||
17 | return -ENODEV; | ||
18 | } | ||
19 | |||
20 | void oprofile_arch_exit(void) | ||
21 | { | ||
22 | } | ||