aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/s390/kernel
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile31
-rw-r--r--arch/s390/kernel/asm-offsets.c49
-rw-r--r--arch/s390/kernel/binfmt_elf32.c210
-rw-r--r--arch/s390/kernel/bitmap.S56
-rw-r--r--arch/s390/kernel/compat_exec_domain.c30
-rw-r--r--arch/s390/kernel/compat_ioctl.c73
-rw-r--r--arch/s390/kernel/compat_linux.c1045
-rw-r--r--arch/s390/kernel/compat_linux.h197
-rw-r--r--arch/s390/kernel/compat_ptrace.h83
-rw-r--r--arch/s390/kernel/compat_signal.c648
-rw-r--r--arch/s390/kernel/compat_wrapper.S1443
-rw-r--r--arch/s390/kernel/cpcmd.c111
-rw-r--r--arch/s390/kernel/debug.c1286
-rw-r--r--arch/s390/kernel/ebcdic.c400
-rw-r--r--arch/s390/kernel/entry.S868
-rw-r--r--arch/s390/kernel/entry64.S881
-rw-r--r--arch/s390/kernel/head.S772
-rw-r--r--arch/s390/kernel/head64.S769
-rw-r--r--arch/s390/kernel/init_task.c44
-rw-r--r--arch/s390/kernel/irq.c105
-rw-r--r--arch/s390/kernel/module.c405
-rw-r--r--arch/s390/kernel/process.c416
-rw-r--r--arch/s390/kernel/profile.c20
-rw-r--r--arch/s390/kernel/ptrace.c738
-rw-r--r--arch/s390/kernel/reipl.S78
-rw-r--r--arch/s390/kernel/reipl64.S96
-rw-r--r--arch/s390/kernel/s390_ext.c135
-rw-r--r--arch/s390/kernel/s390_ksyms.c65
-rw-r--r--arch/s390/kernel/semaphore.c108
-rw-r--r--arch/s390/kernel/setup.c632
-rw-r--r--arch/s390/kernel/signal.c527
-rw-r--r--arch/s390/kernel/smp.c840
-rw-r--r--arch/s390/kernel/sys_s390.c270
-rw-r--r--arch/s390/kernel/syscalls.S292
-rw-r--r--arch/s390/kernel/time.c382
-rw-r--r--arch/s390/kernel/traps.c738
-rw-r--r--arch/s390/kernel/vmlinux.lds.S130
-rw-r--r--arch/s390/kernel/vtime.c565
38 files changed, 15538 insertions, 0 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
new file mode 100644
index 000000000000..b41e0e199a7c
--- /dev/null
+++ b/arch/s390/kernel/Makefile
@@ -0,0 +1,31 @@
1#
2# Makefile for the linux kernel.
3#
4
5EXTRA_AFLAGS := -traditional
6
7obj-y := bitmap.o traps.o time.o process.o \
8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
9 semaphore.o s390_ext.o debug.o profile.o irq.o
10
11extra-$(CONFIG_ARCH_S390_31) += head.o
12extra-$(CONFIG_ARCH_S390X) += head64.o
13extra-y += init_task.o vmlinux.lds
14
15obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
16obj-$(CONFIG_SMP) += smp.o
17
18obj-$(CONFIG_S390_SUPPORT) += compat_linux.o compat_signal.o \
19 compat_ioctl.o compat_wrapper.o \
20 compat_exec_domain.o
21obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
22
23obj-$(CONFIG_ARCH_S390_31) += entry.o reipl.o
24obj-$(CONFIG_ARCH_S390X) += entry64.o reipl64.o
25
26obj-$(CONFIG_VIRT_TIMER) += vtime.o
27
28#
29# This is just to get the dependencies...
30#
31binfmt_elf32.o: $(TOPDIR)/fs/binfmt_elf.c
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
new file mode 100644
index 000000000000..3f7018e9dbe4
--- /dev/null
+++ b/arch/s390/kernel/asm-offsets.c
@@ -0,0 +1,49 @@
1/*
2 * Generate definitions needed by assembly language modules.
3 * This code generates raw asm output which is post-processed to extract
4 * and format the required data.
5 */
6
7#include <linux/config.h>
8#include <linux/sched.h>
9
10/* Use marker if you need to separate the values later */
11
12#define DEFINE(sym, val, marker) \
13 asm volatile("\n->" #sym " %0 " #val " " #marker : : "i" (val))
14
15#define BLANK() asm volatile("\n->" : : )
16
17int main(void)
18{
19 DEFINE(__THREAD_info, offsetof(struct task_struct, thread_info),);
20 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),);
21 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),);
22 DEFINE(__THREAD_mm_segment,
23 offsetof(struct task_struct, thread.mm_segment),);
24 BLANK();
25 DEFINE(__TASK_pid, offsetof(struct task_struct, pid),);
26 BLANK();
27 DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid),);
28 DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address),);
29 DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id),);
30 BLANK();
31 DEFINE(__TI_task, offsetof(struct thread_info, task),);
32 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain),);
33 DEFINE(__TI_flags, offsetof(struct thread_info, flags),);
34 DEFINE(__TI_cpu, offsetof(struct thread_info, cpu),);
35 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count),);
36 BLANK();
37 DEFINE(__PT_ARGS, offsetof(struct pt_regs, args),);
38 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw),);
39 DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs),);
40 DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2),);
41 DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc),);
42 DEFINE(__PT_TRAP, offsetof(struct pt_regs, trap),);
43 DEFINE(__PT_SIZE, sizeof(struct pt_regs),);
44 BLANK();
45 DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain),);
46 DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs),);
47 DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1),);
48 return 0;
49}
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
new file mode 100644
index 000000000000..03ba5893f17b
--- /dev/null
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -0,0 +1,210 @@
1/*
2 * Support for 32-bit Linux for S390 ELF binaries.
3 *
4 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Gerhard Tonn (ton@de.ibm.com)
6 *
7 * Heavily inspired by the 32-bit Sparc compat code which is
8 * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
9 * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
10 */
11
12#define __ASMS390_ELF_H
13
14#include <linux/time.h>
15
16/*
17 * These are used to set parameters in the core dumps.
18 */
19#define ELF_CLASS ELFCLASS32
20#define ELF_DATA ELFDATA2MSB
21#define ELF_ARCH EM_S390
22
23/*
24 * This is used to ensure we don't load something for the wrong architecture.
25 */
26#define elf_check_arch(x) \
27 (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
28 && (x)->e_ident[EI_CLASS] == ELF_CLASS)
29
30/* ELF register definitions */
31#define NUM_GPRS 16
32#define NUM_FPRS 16
33#define NUM_ACRS 16
34
35/* For SVR4/S390 the function pointer to be registered with `atexit` is
36 passed in R14. */
37#define ELF_PLAT_INIT(_r, load_addr) \
38 do { \
39 _r->gprs[14] = 0; \
40 } while(0)
41
42#define USE_ELF_CORE_DUMP
43#define ELF_EXEC_PAGESIZE 4096
44
45/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
46 use of this is to invoke "./ld.so someprog" to test out a new version of
47 the loader. We need to make sure that it is out of the way of the program
48 that it will "exec", and that there is sufficient room for the brk. */
49
50#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
51
52/* Wow, the "main" arch needs arch dependent functions too.. :) */
53
54/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
55 now struct_user_regs, they are different) */
56
57#define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs32(regs, &pr_reg);
58
59#define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs32(tsk, regs)
60
61#define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs)
62
63/* This yields a mask that user programs can use to figure out what
64 instruction set this CPU supports. */
65
66#define ELF_HWCAP (0)
67
68/* This yields a string that ld.so will use to load implementation
69 specific libraries for optimization. This is more specific in
70 intent than poking at uname or /proc/cpuinfo.
71
72 For the moment, we have only optimizations for the Intel generations,
73 but that could change... */
74
75#define ELF_PLATFORM (NULL)
76
77#define SET_PERSONALITY(ex, ibcs2) \
78do { \
79 if (ibcs2) \
80 set_personality(PER_SVR4); \
81 else if (current->personality != PER_LINUX32) \
82 set_personality(PER_LINUX); \
83 set_thread_flag(TIF_31BIT); \
84} while (0)
85
86#include "compat_linux.h"
87
88typedef _s390_fp_regs32 elf_fpregset_t;
89
90typedef struct
91{
92
93 _psw_t32 psw;
94 __u32 gprs[__NUM_GPRS];
95 __u32 acrs[__NUM_ACRS];
96 __u32 orig_gpr2;
97} s390_regs32;
98typedef s390_regs32 elf_gregset_t;
99
100static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
101{
102 int i;
103
104 memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
105 memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
106 for (i = 0; i < NUM_GPRS; i++)
107 regs->gprs[i] = ptregs->gprs[i];
108 save_access_regs(regs->acrs);
109 regs->orig_gpr2 = ptregs->orig_gpr2;
110 return 1;
111}
112
113static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
114{
115 struct pt_regs *ptregs = __KSTK_PTREGS(tsk);
116 int i;
117
118 memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
119 memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
120 for (i = 0; i < NUM_GPRS; i++)
121 regs->gprs[i] = ptregs->gprs[i];
122 memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
123 regs->orig_gpr2 = ptregs->orig_gpr2;
124 return 1;
125}
126
127static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
128{
129 if (tsk == current)
130 save_fp_regs((s390_fp_regs *) fpregs);
131 else
132 memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t));
133 return 1;
134}
135
136#include <asm/processor.h>
137#include <linux/module.h>
138#include <linux/config.h>
139#include <linux/elfcore.h>
140#include <linux/binfmts.h>
141#include <linux/compat.h>
142
143#define elf_prstatus elf_prstatus32
144struct elf_prstatus32
145{
146 struct elf_siginfo pr_info; /* Info associated with signal */
147 short pr_cursig; /* Current signal */
148 u32 pr_sigpend; /* Set of pending signals */
149 u32 pr_sighold; /* Set of held signals */
150 pid_t pr_pid;
151 pid_t pr_ppid;
152 pid_t pr_pgrp;
153 pid_t pr_sid;
154 struct compat_timeval pr_utime; /* User time */
155 struct compat_timeval pr_stime; /* System time */
156 struct compat_timeval pr_cutime; /* Cumulative user time */
157 struct compat_timeval pr_cstime; /* Cumulative system time */
158 elf_gregset_t pr_reg; /* GP registers */
159 int pr_fpvalid; /* True if math co-processor being used. */
160};
161
162#define elf_prpsinfo elf_prpsinfo32
163struct elf_prpsinfo32
164{
165 char pr_state; /* numeric process state */
166 char pr_sname; /* char for pr_state */
167 char pr_zomb; /* zombie */
168 char pr_nice; /* nice val */
169 u32 pr_flag; /* flags */
170 u16 pr_uid;
171 u16 pr_gid;
172 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
173 /* Lots missing */
174 char pr_fname[16]; /* filename of executable */
175 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
176};
177
178#include <linux/highuid.h>
179
180#undef NEW_TO_OLD_UID
181#undef NEW_TO_OLD_GID
182#define NEW_TO_OLD_UID(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
183#define NEW_TO_OLD_GID(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
184
185#define elf_addr_t u32
186/*
187#define init_elf_binfmt init_elf32_binfmt
188*/
189
190#undef start_thread
191#define start_thread start_thread31
192
193MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries,"
194 " Copyright 2000 IBM Corporation");
195MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
196
197#undef MODULE_DESCRIPTION
198#undef MODULE_AUTHOR
199
200#undef cputime_to_timeval
201#define cputime_to_timeval cputime_to_compat_timeval
202static __inline__ void
203cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
204{
205 value->tv_usec = cputime % 1000000;
206 value->tv_sec = cputime / 1000000;
207}
208
209#include "../../../fs/binfmt_elf.c"
210
diff --git a/arch/s390/kernel/bitmap.S b/arch/s390/kernel/bitmap.S
new file mode 100644
index 000000000000..dfb41f946e23
--- /dev/null
+++ b/arch/s390/kernel/bitmap.S
@@ -0,0 +1,56 @@
1/*
2 * arch/s390/kernel/bitmap.S
3 * Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
4 * See include/asm-s390/{bitops.h|posix_types.h} for details
5 *
6 * S390 version
7 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
9 */
10
11 .globl _oi_bitmap
12_oi_bitmap:
13 .byte 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80
14
15 .globl _ni_bitmap
16_ni_bitmap:
17 .byte 0xFE,0xFD,0xFB,0xF7,0xEF,0xDF,0xBF,0x7F
18
19 .globl _zb_findmap
20_zb_findmap:
21 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
22 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
23 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
24 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6
25 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
26 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
27 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
28 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7
29 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
30 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
31 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
32 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6
33 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
34 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
35 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
36 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8
37
38 .globl _sb_findmap
39_sb_findmap:
40 .byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
41 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
42 .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
43 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
44 .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
45 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
46 .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
47 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
48 .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
49 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
50 .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
51 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
52 .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
53 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
54 .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
55 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
56
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c
new file mode 100644
index 000000000000..71d27c493568
--- /dev/null
+++ b/arch/s390/kernel/compat_exec_domain.c
@@ -0,0 +1,30 @@
1/*
2 * Support for 32-bit Linux for S390 personality.
3 *
4 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Gerhard Tonn (ton@de.ibm.com)
6 *
7 *
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/personality.h>
13#include <linux/sched.h>
14
15struct exec_domain s390_exec_domain;
16
17static int __init
18s390_init (void)
19{
20 s390_exec_domain.name = "Linux/s390";
21 s390_exec_domain.handler = NULL;
22 s390_exec_domain.pers_low = PER_LINUX32;
23 s390_exec_domain.pers_high = PER_LINUX32;
24 s390_exec_domain.signal_map = default_exec_domain.signal_map;
25 s390_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
26 register_exec_domain(&s390_exec_domain);
27 return 0;
28}
29
30__initcall(s390_init);
diff --git a/arch/s390/kernel/compat_ioctl.c b/arch/s390/kernel/compat_ioctl.c
new file mode 100644
index 000000000000..96571ff7115d
--- /dev/null
+++ b/arch/s390/kernel/compat_ioctl.c
@@ -0,0 +1,73 @@
1/*
2 * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
3 *
4 * S390 version
5 * Copyright (C) 2000-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Gerhard Tonn (ton@de.ibm.com)
7 * Arnd Bergmann (arndb@de.ibm.com)
8 *
9 * Original implementation from 32-bit Sparc compat code which is
10 * Copyright (C) 2000 Silicon Graphics, Inc.
11 * Written by Ulf Carlsson (ulfc@engr.sgi.com)
12 */
13
14#include "compat_linux.h"
15#define INCLUDES
16#define CODE
17#include "../../../fs/compat_ioctl.c"
18#include <asm/dasd.h>
19#include <asm/tape390.h>
20
21static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd,
22 unsigned long arg, struct file *f)
23{
24 return sys_ioctl(fd, cmd, (unsigned long)compat_ptr(arg));
25}
26
27static int do_ioctl32_ulong(unsigned int fd, unsigned int cmd,
28 unsigned long arg, struct file *f)
29{
30 return sys_ioctl(fd, cmd, arg);
31}
32
33#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)do_ioctl32_pointer)
34#define ULONG_IOCTL(cmd) HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)do_ioctl32_ulong)
35#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler), NULL },
36
37struct ioctl_trans ioctl_start[] = {
38/* architecture independent ioctls */
39#include <linux/compat_ioctl.h>
40#define DECLARES
41#include "../../../fs/compat_ioctl.c"
42
43/* s390 only ioctls */
44#if defined(CONFIG_DASD) || defined(CONFIG_DASD_MODULE)
45COMPATIBLE_IOCTL(DASDAPIVER)
46COMPATIBLE_IOCTL(BIODASDDISABLE)
47COMPATIBLE_IOCTL(BIODASDENABLE)
48COMPATIBLE_IOCTL(BIODASDRSRV)
49COMPATIBLE_IOCTL(BIODASDRLSE)
50COMPATIBLE_IOCTL(BIODASDSLCK)
51COMPATIBLE_IOCTL(BIODASDINFO)
52COMPATIBLE_IOCTL(BIODASDINFO2)
53COMPATIBLE_IOCTL(BIODASDFMT)
54COMPATIBLE_IOCTL(BIODASDPRRST)
55COMPATIBLE_IOCTL(BIODASDQUIESCE)
56COMPATIBLE_IOCTL(BIODASDRESUME)
57COMPATIBLE_IOCTL(BIODASDPRRD)
58COMPATIBLE_IOCTL(BIODASDPSRD)
59COMPATIBLE_IOCTL(BIODASDGATTR)
60COMPATIBLE_IOCTL(BIODASDSATTR)
61
62#endif
63
64#if defined(CONFIG_S390_TAPE) || defined(CONFIG_S390_TAPE_MODULE)
65COMPATIBLE_IOCTL(TAPE390_DISPLAY)
66#endif
67
68/* s390 doesn't need handlers here */
69COMPATIBLE_IOCTL(TIOCGSERIAL)
70COMPATIBLE_IOCTL(TIOCSSERIAL)
71};
72
73int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
new file mode 100644
index 000000000000..614056222875
--- /dev/null
+++ b/arch/s390/kernel/compat_linux.c
@@ -0,0 +1,1045 @@
1/*
2 * arch/s390x/kernel/linux32.c
3 *
4 * S390 version
5 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Gerhard Tonn (ton@de.ibm.com)
8 * Thomas Spatzier (tspat@de.ibm.com)
9 *
10 * Conversion between 31bit and 64bit native syscalls.
11 *
12 * Heavily inspired by the 32-bit Sparc compat code which is
13 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
14 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
15 *
16 */
17
18
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/fs.h>
23#include <linux/mm.h>
24#include <linux/file.h>
25#include <linux/signal.h>
26#include <linux/resource.h>
27#include <linux/times.h>
28#include <linux/utsname.h>
29#include <linux/timex.h>
30#include <linux/smp.h>
31#include <linux/smp_lock.h>
32#include <linux/sem.h>
33#include <linux/msg.h>
34#include <linux/shm.h>
35#include <linux/slab.h>
36#include <linux/uio.h>
37#include <linux/nfs_fs.h>
38#include <linux/quota.h>
39#include <linux/module.h>
40#include <linux/sunrpc/svc.h>
41#include <linux/nfsd/nfsd.h>
42#include <linux/nfsd/cache.h>
43#include <linux/nfsd/xdr.h>
44#include <linux/nfsd/syscall.h>
45#include <linux/poll.h>
46#include <linux/personality.h>
47#include <linux/stat.h>
48#include <linux/filter.h>
49#include <linux/highmem.h>
50#include <linux/highuid.h>
51#include <linux/mman.h>
52#include <linux/ipv6.h>
53#include <linux/in.h>
54#include <linux/icmpv6.h>
55#include <linux/syscalls.h>
56#include <linux/sysctl.h>
57#include <linux/binfmts.h>
58#include <linux/compat.h>
59#include <linux/vfs.h>
60#include <linux/ptrace.h>
61
62#include <asm/types.h>
63#include <asm/ipc.h>
64#include <asm/uaccess.h>
65#include <asm/semaphore.h>
66
67#include <net/scm.h>
68#include <net/sock.h>
69
70#include "compat_linux.h"
71
72
73/* For this source file, we want overflow handling. */
74
75#undef high2lowuid
76#undef high2lowgid
77#undef low2highuid
78#undef low2highgid
79#undef SET_UID16
80#undef SET_GID16
81#undef NEW_TO_OLD_UID
82#undef NEW_TO_OLD_GID
83#undef SET_OLDSTAT_UID
84#undef SET_OLDSTAT_GID
85#undef SET_STAT_UID
86#undef SET_STAT_GID
87
88#define high2lowuid(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
89#define high2lowgid(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
90#define low2highuid(uid) ((uid) == (u16)-1) ? (uid_t)-1 : (uid_t)(uid)
91#define low2highgid(gid) ((gid) == (u16)-1) ? (gid_t)-1 : (gid_t)(gid)
92#define SET_UID16(var, uid) var = high2lowuid(uid)
93#define SET_GID16(var, gid) var = high2lowgid(gid)
94#define NEW_TO_OLD_UID(uid) high2lowuid(uid)
95#define NEW_TO_OLD_GID(gid) high2lowgid(gid)
96#define SET_OLDSTAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid)
97#define SET_OLDSTAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid)
98#define SET_STAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid)
99#define SET_STAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid)
100
101asmlinkage long sys32_chown16(const char * filename, u16 user, u16 group)
102{
103 return sys_chown(filename, low2highuid(user), low2highgid(group));
104}
105
106asmlinkage long sys32_lchown16(const char * filename, u16 user, u16 group)
107{
108 return sys_lchown(filename, low2highuid(user), low2highgid(group));
109}
110
111asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group)
112{
113 return sys_fchown(fd, low2highuid(user), low2highgid(group));
114}
115
116asmlinkage long sys32_setregid16(u16 rgid, u16 egid)
117{
118 return sys_setregid(low2highgid(rgid), low2highgid(egid));
119}
120
121asmlinkage long sys32_setgid16(u16 gid)
122{
123 return sys_setgid((gid_t)gid);
124}
125
126asmlinkage long sys32_setreuid16(u16 ruid, u16 euid)
127{
128 return sys_setreuid(low2highuid(ruid), low2highuid(euid));
129}
130
131asmlinkage long sys32_setuid16(u16 uid)
132{
133 return sys_setuid((uid_t)uid);
134}
135
136asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
137{
138 return sys_setresuid(low2highuid(ruid), low2highuid(euid),
139 low2highuid(suid));
140}
141
142asmlinkage long sys32_getresuid16(u16 *ruid, u16 *euid, u16 *suid)
143{
144 int retval;
145
146 if (!(retval = put_user(high2lowuid(current->uid), ruid)) &&
147 !(retval = put_user(high2lowuid(current->euid), euid)))
148 retval = put_user(high2lowuid(current->suid), suid);
149
150 return retval;
151}
152
153asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
154{
155 return sys_setresgid(low2highgid(rgid), low2highgid(egid),
156 low2highgid(sgid));
157}
158
159asmlinkage long sys32_getresgid16(u16 *rgid, u16 *egid, u16 *sgid)
160{
161 int retval;
162
163 if (!(retval = put_user(high2lowgid(current->gid), rgid)) &&
164 !(retval = put_user(high2lowgid(current->egid), egid)))
165 retval = put_user(high2lowgid(current->sgid), sgid);
166
167 return retval;
168}
169
170asmlinkage long sys32_setfsuid16(u16 uid)
171{
172 return sys_setfsuid((uid_t)uid);
173}
174
175asmlinkage long sys32_setfsgid16(u16 gid)
176{
177 return sys_setfsgid((gid_t)gid);
178}
179
180static int groups16_to_user(u16 *grouplist, struct group_info *group_info)
181{
182 int i;
183 u16 group;
184
185 for (i = 0; i < group_info->ngroups; i++) {
186 group = (u16)GROUP_AT(group_info, i);
187 if (put_user(group, grouplist+i))
188 return -EFAULT;
189 }
190
191 return 0;
192}
193
194static int groups16_from_user(struct group_info *group_info, u16 *grouplist)
195{
196 int i;
197 u16 group;
198
199 for (i = 0; i < group_info->ngroups; i++) {
200 if (get_user(group, grouplist+i))
201 return -EFAULT;
202 GROUP_AT(group_info, i) = (gid_t)group;
203 }
204
205 return 0;
206}
207
208asmlinkage long sys32_getgroups16(int gidsetsize, u16 *grouplist)
209{
210 int i;
211
212 if (gidsetsize < 0)
213 return -EINVAL;
214
215 get_group_info(current->group_info);
216 i = current->group_info->ngroups;
217 if (gidsetsize) {
218 if (i > gidsetsize) {
219 i = -EINVAL;
220 goto out;
221 }
222 if (groups16_to_user(grouplist, current->group_info)) {
223 i = -EFAULT;
224 goto out;
225 }
226 }
227out:
228 put_group_info(current->group_info);
229 return i;
230}
231
232asmlinkage long sys32_setgroups16(int gidsetsize, u16 *grouplist)
233{
234 struct group_info *group_info;
235 int retval;
236
237 if (!capable(CAP_SETGID))
238 return -EPERM;
239 if ((unsigned)gidsetsize > NGROUPS_MAX)
240 return -EINVAL;
241
242 group_info = groups_alloc(gidsetsize);
243 if (!group_info)
244 return -ENOMEM;
245 retval = groups16_from_user(group_info, grouplist);
246 if (retval) {
247 put_group_info(group_info);
248 return retval;
249 }
250
251 retval = set_current_groups(group_info);
252 put_group_info(group_info);
253
254 return retval;
255}
256
257asmlinkage long sys32_getuid16(void)
258{
259 return high2lowuid(current->uid);
260}
261
262asmlinkage long sys32_geteuid16(void)
263{
264 return high2lowuid(current->euid);
265}
266
267asmlinkage long sys32_getgid16(void)
268{
269 return high2lowgid(current->gid);
270}
271
272asmlinkage long sys32_getegid16(void)
273{
274 return high2lowgid(current->egid);
275}
276
277/* 32-bit timeval and related flotsam. */
278
279static inline long get_tv32(struct timeval *o, struct compat_timeval *i)
280{
281 return (!access_ok(VERIFY_READ, tv32, sizeof(*tv32)) ||
282 (__get_user(o->tv_sec, &i->tv_sec) ||
283 __get_user(o->tv_usec, &i->tv_usec)));
284}
285
286static inline long put_tv32(struct compat_timeval *o, struct timeval *i)
287{
288 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
289 (__put_user(i->tv_sec, &o->tv_sec) ||
290 __put_user(i->tv_usec, &o->tv_usec)));
291}
292
293/*
294 * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.
295 *
296 * This is really horribly ugly.
297 */
298asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr)
299{
300 if (call >> 16) /* hack for backward compatibility */
301 return -EINVAL;
302
303 call &= 0xffff;
304
305 switch (call) {
306 case SEMTIMEDOP:
307 return compat_sys_semtimedop(first, compat_ptr(ptr),
308 second, compat_ptr(third));
309 case SEMOP:
310 /* struct sembuf is the same on 32 and 64bit :)) */
311 return sys_semtimedop(first, compat_ptr(ptr),
312 second, NULL);
313 case SEMGET:
314 return sys_semget(first, second, third);
315 case SEMCTL:
316 return compat_sys_semctl(first, second, third,
317 compat_ptr(ptr));
318 case MSGSND:
319 return compat_sys_msgsnd(first, second, third,
320 compat_ptr(ptr));
321 case MSGRCV:
322 return compat_sys_msgrcv(first, second, 0, third,
323 0, compat_ptr(ptr));
324 case MSGGET:
325 return sys_msgget((key_t) first, second);
326 case MSGCTL:
327 return compat_sys_msgctl(first, second, compat_ptr(ptr));
328 case SHMAT:
329 return compat_sys_shmat(first, second, third,
330 0, compat_ptr(ptr));
331 case SHMDT:
332 return sys_shmdt(compat_ptr(ptr));
333 case SHMGET:
334 return sys_shmget(first, (unsigned)second, third);
335 case SHMCTL:
336 return compat_sys_shmctl(first, second, compat_ptr(ptr));
337 }
338
339 return -ENOSYS;
340}
341
342asmlinkage long sys32_truncate64(const char * path, unsigned long high, unsigned long low)
343{
344 if ((int)high < 0)
345 return -EINVAL;
346 else
347 return sys_truncate(path, (high << 32) | low);
348}
349
350asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
351{
352 if ((int)high < 0)
353 return -EINVAL;
354 else
355 return sys_ftruncate(fd, (high << 32) | low);
356}
357
358int cp_compat_stat(struct kstat *stat, struct compat_stat *statbuf)
359{
360 int err;
361
362 if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
363 return -EOVERFLOW;
364
365 err = put_user(old_encode_dev(stat->dev), &statbuf->st_dev);
366 err |= put_user(stat->ino, &statbuf->st_ino);
367 err |= put_user(stat->mode, &statbuf->st_mode);
368 err |= put_user(stat->nlink, &statbuf->st_nlink);
369 err |= put_user(high2lowuid(stat->uid), &statbuf->st_uid);
370 err |= put_user(high2lowgid(stat->gid), &statbuf->st_gid);
371 err |= put_user(old_encode_dev(stat->rdev), &statbuf->st_rdev);
372 err |= put_user(stat->size, &statbuf->st_size);
373 err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
374 err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
375 err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
376 err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
377 err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
378 err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
379 err |= put_user(stat->blksize, &statbuf->st_blksize);
380 err |= put_user(stat->blocks, &statbuf->st_blocks);
381/* fixme
382 err |= put_user(0, &statbuf->__unused4[0]);
383 err |= put_user(0, &statbuf->__unused4[1]);
384*/
385 return err;
386}
387
388struct sysinfo32 {
389 s32 uptime;
390 u32 loads[3];
391 u32 totalram;
392 u32 freeram;
393 u32 sharedram;
394 u32 bufferram;
395 u32 totalswap;
396 u32 freeswap;
397 unsigned short procs;
398 unsigned short pads;
399 u32 totalhigh;
400 u32 freehigh;
401 unsigned int mem_unit;
402 char _f[8];
403};
404
405asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
406{
407 struct sysinfo s;
408 int ret, err;
409 mm_segment_t old_fs = get_fs ();
410
411 set_fs (KERNEL_DS);
412 ret = sys_sysinfo(&s);
413 set_fs (old_fs);
414 err = put_user (s.uptime, &info->uptime);
415 err |= __put_user (s.loads[0], &info->loads[0]);
416 err |= __put_user (s.loads[1], &info->loads[1]);
417 err |= __put_user (s.loads[2], &info->loads[2]);
418 err |= __put_user (s.totalram, &info->totalram);
419 err |= __put_user (s.freeram, &info->freeram);
420 err |= __put_user (s.sharedram, &info->sharedram);
421 err |= __put_user (s.bufferram, &info->bufferram);
422 err |= __put_user (s.totalswap, &info->totalswap);
423 err |= __put_user (s.freeswap, &info->freeswap);
424 err |= __put_user (s.procs, &info->procs);
425 err |= __put_user (s.totalhigh, &info->totalhigh);
426 err |= __put_user (s.freehigh, &info->freehigh);
427 err |= __put_user (s.mem_unit, &info->mem_unit);
428 if (err)
429 return -EFAULT;
430 return ret;
431}
432
433asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
434 struct compat_timespec __user *interval)
435{
436 struct timespec t;
437 int ret;
438 mm_segment_t old_fs = get_fs ();
439
440 set_fs (KERNEL_DS);
441 ret = sys_sched_rr_get_interval(pid, &t);
442 set_fs (old_fs);
443 if (put_compat_timespec(&t, interval))
444 return -EFAULT;
445 return ret;
446}
447
448asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
449 compat_sigset_t __user *oset, size_t sigsetsize)
450{
451 sigset_t s;
452 compat_sigset_t s32;
453 int ret;
454 mm_segment_t old_fs = get_fs();
455
456 if (set) {
457 if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
458 return -EFAULT;
459 switch (_NSIG_WORDS) {
460 case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
461 case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
462 case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
463 case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
464 }
465 }
466 set_fs (KERNEL_DS);
467 ret = sys_rt_sigprocmask(how, set ? &s : NULL, oset ? &s : NULL, sigsetsize);
468 set_fs (old_fs);
469 if (ret) return ret;
470 if (oset) {
471 switch (_NSIG_WORDS) {
472 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
473 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
474 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
475 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
476 }
477 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
478 return -EFAULT;
479 }
480 return 0;
481}
482
483asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
484 size_t sigsetsize)
485{
486 sigset_t s;
487 compat_sigset_t s32;
488 int ret;
489 mm_segment_t old_fs = get_fs();
490
491 set_fs (KERNEL_DS);
492 ret = sys_rt_sigpending(&s, sigsetsize);
493 set_fs (old_fs);
494 if (!ret) {
495 switch (_NSIG_WORDS) {
496 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
497 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
498 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
499 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
500 }
501 if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
502 return -EFAULT;
503 }
504 return ret;
505}
506
507asmlinkage long
508sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
509{
510 siginfo_t info;
511 int ret;
512 mm_segment_t old_fs = get_fs();
513
514 if (copy_siginfo_from_user32(&info, uinfo))
515 return -EFAULT;
516 set_fs (KERNEL_DS);
517 ret = sys_rt_sigqueueinfo(pid, sig, &info);
518 set_fs (old_fs);
519 return ret;
520}
521
522/*
523 * sys32_execve() executes a new program after the asm stub has set
524 * things up for us. This should basically do what I want it to.
525 */
526asmlinkage long
527sys32_execve(struct pt_regs regs)
528{
529 int error;
530 char * filename;
531
532 filename = getname(compat_ptr(regs.orig_gpr2));
533 error = PTR_ERR(filename);
534 if (IS_ERR(filename))
535 goto out;
536 error = compat_do_execve(filename, compat_ptr(regs.gprs[3]),
537 compat_ptr(regs.gprs[4]), &regs);
538 if (error == 0)
539 {
540 task_lock(current);
541 current->ptrace &= ~PT_DTRACE;
542 task_unlock(current);
543 current->thread.fp_regs.fpc=0;
544 __asm__ __volatile__
545 ("sr 0,0\n\t"
546 "sfpc 0,0\n\t"
547 : : :"0");
548 }
549 putname(filename);
550out:
551 return error;
552}
553
554
555#ifdef CONFIG_MODULES
556
557asmlinkage long
558sys32_init_module(void __user *umod, unsigned long len,
559 const char __user *uargs)
560{
561 return sys_init_module(umod, len, uargs);
562}
563
564asmlinkage long
565sys32_delete_module(const char __user *name_user, unsigned int flags)
566{
567 return sys_delete_module(name_user, flags);
568}
569
570#else /* CONFIG_MODULES */
571
572asmlinkage long
573sys32_init_module(void __user *umod, unsigned long len,
574 const char __user *uargs)
575{
576 return -ENOSYS;
577}
578
579asmlinkage long
580sys32_delete_module(const char __user *name_user, unsigned int flags)
581{
582 return -ENOSYS;
583}
584
585#endif /* CONFIG_MODULES */
586
587/* Translations due to time_t size differences. Which affects all
588 sorts of things, like timeval and itimerval. */
589
590extern struct timezone sys_tz;
591
592asmlinkage long sys32_gettimeofday(struct compat_timeval *tv, struct timezone *tz)
593{
594 if (tv) {
595 struct timeval ktv;
596 do_gettimeofday(&ktv);
597 if (put_tv32(tv, &ktv))
598 return -EFAULT;
599 }
600 if (tz) {
601 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
602 return -EFAULT;
603 }
604 return 0;
605}
606
607static inline long get_ts32(struct timespec *o, struct compat_timeval *i)
608{
609 long usec;
610
611 if (!access_ok(VERIFY_READ, i, sizeof(*i)))
612 return -EFAULT;
613 if (__get_user(o->tv_sec, &i->tv_sec))
614 return -EFAULT;
615 if (__get_user(usec, &i->tv_usec))
616 return -EFAULT;
617 o->tv_nsec = usec * 1000;
618 return 0;
619}
620
621asmlinkage long sys32_settimeofday(struct compat_timeval *tv, struct timezone *tz)
622{
623 struct timespec kts;
624 struct timezone ktz;
625
626 if (tv) {
627 if (get_ts32(&kts, tv))
628 return -EFAULT;
629 }
630 if (tz) {
631 if (copy_from_user(&ktz, tz, sizeof(ktz)))
632 return -EFAULT;
633 }
634
635 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
636}
637
638/* These are here just in case some old sparc32 binary calls it. */
639asmlinkage long sys32_pause(void)
640{
641 current->state = TASK_INTERRUPTIBLE;
642 schedule();
643 return -ERESTARTNOHAND;
644}
645
646asmlinkage long sys32_pread64(unsigned int fd, char *ubuf,
647 size_t count, u32 poshi, u32 poslo)
648{
649 if ((compat_ssize_t) count < 0)
650 return -EINVAL;
651 return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
652}
653
654asmlinkage long sys32_pwrite64(unsigned int fd, const char *ubuf,
655 size_t count, u32 poshi, u32 poslo)
656{
657 if ((compat_ssize_t) count < 0)
658 return -EINVAL;
659 return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
660}
661
662asmlinkage compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count)
663{
664 return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count);
665}
666
667asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t *offset, size_t count)
668{
669 mm_segment_t old_fs = get_fs();
670 int ret;
671 off_t of;
672
673 if (offset && get_user(of, offset))
674 return -EFAULT;
675
676 set_fs(KERNEL_DS);
677 ret = sys_sendfile(out_fd, in_fd, offset ? &of : NULL, count);
678 set_fs(old_fs);
679
680 if (!ret && offset && put_user(of, offset))
681 return -EFAULT;
682
683 return ret;
684}
685
686asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
687 compat_loff_t *offset, s32 count)
688{
689 mm_segment_t old_fs = get_fs();
690 int ret;
691 loff_t lof;
692
693 if (offset && get_user(lof, offset))
694 return -EFAULT;
695
696 set_fs(KERNEL_DS);
697 ret = sys_sendfile64(out_fd, in_fd, offset ? &lof : NULL, count);
698 set_fs(old_fs);
699
700 if (offset && put_user(lof, offset))
701 return -EFAULT;
702
703 return ret;
704}
705
706/* Handle adjtimex compatibility. */
707
708struct timex32 {
709 u32 modes;
710 s32 offset, freq, maxerror, esterror;
711 s32 status, constant, precision, tolerance;
712 struct compat_timeval time;
713 s32 tick;
714 s32 ppsfreq, jitter, shift, stabil;
715 s32 jitcnt, calcnt, errcnt, stbcnt;
716 s32 :32; s32 :32; s32 :32; s32 :32;
717 s32 :32; s32 :32; s32 :32; s32 :32;
718 s32 :32; s32 :32; s32 :32; s32 :32;
719};
720
721extern int do_adjtimex(struct timex *);
722
723asmlinkage long sys32_adjtimex(struct timex32 *utp)
724{
725 struct timex txc;
726 int ret;
727
728 memset(&txc, 0, sizeof(struct timex));
729
730 if(get_user(txc.modes, &utp->modes) ||
731 __get_user(txc.offset, &utp->offset) ||
732 __get_user(txc.freq, &utp->freq) ||
733 __get_user(txc.maxerror, &utp->maxerror) ||
734 __get_user(txc.esterror, &utp->esterror) ||
735 __get_user(txc.status, &utp->status) ||
736 __get_user(txc.constant, &utp->constant) ||
737 __get_user(txc.precision, &utp->precision) ||
738 __get_user(txc.tolerance, &utp->tolerance) ||
739 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
740 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
741 __get_user(txc.tick, &utp->tick) ||
742 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
743 __get_user(txc.jitter, &utp->jitter) ||
744 __get_user(txc.shift, &utp->shift) ||
745 __get_user(txc.stabil, &utp->stabil) ||
746 __get_user(txc.jitcnt, &utp->jitcnt) ||
747 __get_user(txc.calcnt, &utp->calcnt) ||
748 __get_user(txc.errcnt, &utp->errcnt) ||
749 __get_user(txc.stbcnt, &utp->stbcnt))
750 return -EFAULT;
751
752 ret = do_adjtimex(&txc);
753
754 if(put_user(txc.modes, &utp->modes) ||
755 __put_user(txc.offset, &utp->offset) ||
756 __put_user(txc.freq, &utp->freq) ||
757 __put_user(txc.maxerror, &utp->maxerror) ||
758 __put_user(txc.esterror, &utp->esterror) ||
759 __put_user(txc.status, &utp->status) ||
760 __put_user(txc.constant, &utp->constant) ||
761 __put_user(txc.precision, &utp->precision) ||
762 __put_user(txc.tolerance, &utp->tolerance) ||
763 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
764 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
765 __put_user(txc.tick, &utp->tick) ||
766 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
767 __put_user(txc.jitter, &utp->jitter) ||
768 __put_user(txc.shift, &utp->shift) ||
769 __put_user(txc.stabil, &utp->stabil) ||
770 __put_user(txc.jitcnt, &utp->jitcnt) ||
771 __put_user(txc.calcnt, &utp->calcnt) ||
772 __put_user(txc.errcnt, &utp->errcnt) ||
773 __put_user(txc.stbcnt, &utp->stbcnt))
774 ret = -EFAULT;
775
776 return ret;
777}
778
779#ifdef CONFIG_SYSCTL
780struct __sysctl_args32 {
781 u32 name;
782 int nlen;
783 u32 oldval;
784 u32 oldlenp;
785 u32 newval;
786 u32 newlen;
787 u32 __unused[4];
788};
789
790asmlinkage long sys32_sysctl(struct __sysctl_args32 *args)
791{
792 struct __sysctl_args32 tmp;
793 int error;
794 size_t oldlen, *oldlenp = NULL;
795 unsigned long addr = (((long)&args->__unused[0]) + 7) & ~7;
796
797 if (copy_from_user(&tmp, args, sizeof(tmp)))
798 return -EFAULT;
799
800 if (tmp.oldval && tmp.oldlenp) {
801 /* Duh, this is ugly and might not work if sysctl_args
802 is in read-only memory, but do_sysctl does indirectly
803 a lot of uaccess in both directions and we'd have to
804 basically copy the whole sysctl.c here, and
805 glibc's __sysctl uses rw memory for the structure
806 anyway. */
807 if (get_user(oldlen, (u32 *)A(tmp.oldlenp)) ||
808 put_user(oldlen, (size_t *)addr))
809 return -EFAULT;
810 oldlenp = (size_t *)addr;
811 }
812
813 lock_kernel();
814 error = do_sysctl((int *)A(tmp.name), tmp.nlen, (void *)A(tmp.oldval),
815 oldlenp, (void *)A(tmp.newval), tmp.newlen);
816 unlock_kernel();
817 if (oldlenp) {
818 if (!error) {
819 if (get_user(oldlen, (size_t *)addr) ||
820 put_user(oldlen, (u32 *)A(tmp.oldlenp)))
821 error = -EFAULT;
822 }
823 copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
824 }
825 return error;
826}
827#endif
828
829struct stat64_emu31 {
830 unsigned long long st_dev;
831 unsigned int __pad1;
832#define STAT64_HAS_BROKEN_ST_INO 1
833 u32 __st_ino;
834 unsigned int st_mode;
835 unsigned int st_nlink;
836 u32 st_uid;
837 u32 st_gid;
838 unsigned long long st_rdev;
839 unsigned int __pad3;
840 long st_size;
841 u32 st_blksize;
842 unsigned char __pad4[4];
843 u32 __pad5; /* future possible st_blocks high bits */
844 u32 st_blocks; /* Number 512-byte blocks allocated. */
845 u32 st_atime;
846 u32 __pad6;
847 u32 st_mtime;
848 u32 __pad7;
849 u32 st_ctime;
850 u32 __pad8; /* will be high 32 bits of ctime someday */
851 unsigned long st_ino;
852};
853
854static int cp_stat64(struct stat64_emu31 *ubuf, struct kstat *stat)
855{
856 struct stat64_emu31 tmp;
857
858 memset(&tmp, 0, sizeof(tmp));
859
860 tmp.st_dev = huge_encode_dev(stat->dev);
861 tmp.st_ino = stat->ino;
862 tmp.__st_ino = (u32)stat->ino;
863 tmp.st_mode = stat->mode;
864 tmp.st_nlink = (unsigned int)stat->nlink;
865 tmp.st_uid = stat->uid;
866 tmp.st_gid = stat->gid;
867 tmp.st_rdev = huge_encode_dev(stat->rdev);
868 tmp.st_size = stat->size;
869 tmp.st_blksize = (u32)stat->blksize;
870 tmp.st_blocks = (u32)stat->blocks;
871 tmp.st_atime = (u32)stat->atime.tv_sec;
872 tmp.st_mtime = (u32)stat->mtime.tv_sec;
873 tmp.st_ctime = (u32)stat->ctime.tv_sec;
874
875 return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
876}
877
878asmlinkage long sys32_stat64(char * filename, struct stat64_emu31 * statbuf)
879{
880 struct kstat stat;
881 int ret = vfs_stat(filename, &stat);
882 if (!ret)
883 ret = cp_stat64(statbuf, &stat);
884 return ret;
885}
886
887asmlinkage long sys32_lstat64(char * filename, struct stat64_emu31 * statbuf)
888{
889 struct kstat stat;
890 int ret = vfs_lstat(filename, &stat);
891 if (!ret)
892 ret = cp_stat64(statbuf, &stat);
893 return ret;
894}
895
896asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 * statbuf)
897{
898 struct kstat stat;
899 int ret = vfs_fstat(fd, &stat);
900 if (!ret)
901 ret = cp_stat64(statbuf, &stat);
902 return ret;
903}
904
905/*
906 * Linux/i386 didn't use to be able to handle more than
907 * 4 system call parameters, so these system calls used a memory
908 * block for parameter passing..
909 */
910
911struct mmap_arg_struct_emu31 {
912 u32 addr;
913 u32 len;
914 u32 prot;
915 u32 flags;
916 u32 fd;
917 u32 offset;
918};
919
920/* common code for old and new mmaps */
921static inline long do_mmap2(
922 unsigned long addr, unsigned long len,
923 unsigned long prot, unsigned long flags,
924 unsigned long fd, unsigned long pgoff)
925{
926 struct file * file = NULL;
927 unsigned long error = -EBADF;
928
929 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
930 if (!(flags & MAP_ANONYMOUS)) {
931 file = fget(fd);
932 if (!file)
933 goto out;
934 }
935
936 down_write(&current->mm->mmap_sem);
937 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
938 if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) {
939 /* Result is out of bounds. */
940 do_munmap(current->mm, addr, len);
941 error = -ENOMEM;
942 }
943 up_write(&current->mm->mmap_sem);
944
945 if (file)
946 fput(file);
947out:
948 return error;
949}
950
951
952asmlinkage unsigned long
953old32_mmap(struct mmap_arg_struct_emu31 *arg)
954{
955 struct mmap_arg_struct_emu31 a;
956 int error = -EFAULT;
957
958 if (copy_from_user(&a, arg, sizeof(a)))
959 goto out;
960
961 error = -EINVAL;
962 if (a.offset & ~PAGE_MASK)
963 goto out;
964
965 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
966out:
967 return error;
968}
969
970asmlinkage long
971sys32_mmap2(struct mmap_arg_struct_emu31 *arg)
972{
973 struct mmap_arg_struct_emu31 a;
974 int error = -EFAULT;
975
976 if (copy_from_user(&a, arg, sizeof(a)))
977 goto out;
978 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
979out:
980 return error;
981}
982
983asmlinkage long sys32_read(unsigned int fd, char * buf, size_t count)
984{
985 if ((compat_ssize_t) count < 0)
986 return -EINVAL;
987
988 return sys_read(fd, buf, count);
989}
990
991asmlinkage long sys32_write(unsigned int fd, char * buf, size_t count)
992{
993 if ((compat_ssize_t) count < 0)
994 return -EINVAL;
995
996 return sys_write(fd, buf, count);
997}
998
999asmlinkage long sys32_clone(struct pt_regs regs)
1000{
1001 unsigned long clone_flags;
1002 unsigned long newsp;
1003 int *parent_tidptr, *child_tidptr;
1004
1005 clone_flags = regs.gprs[3] & 0xffffffffUL;
1006 newsp = regs.orig_gpr2 & 0x7fffffffUL;
1007 parent_tidptr = (int *) (regs.gprs[4] & 0x7fffffffUL);
1008 child_tidptr = (int *) (regs.gprs[5] & 0x7fffffffUL);
1009 if (!newsp)
1010 newsp = regs.gprs[15];
1011 return do_fork(clone_flags, newsp, &regs, 0,
1012 parent_tidptr, child_tidptr);
1013}
1014
1015/*
1016 * Wrapper function for sys_timer_create.
1017 */
1018extern asmlinkage long
1019sys_timer_create(clockid_t, struct sigevent *, timer_t *);
1020
1021asmlinkage long
1022sys32_timer_create(clockid_t which_clock, struct compat_sigevent *se32,
1023 timer_t *timer_id)
1024{
1025 struct sigevent se;
1026 timer_t ktimer_id;
1027 mm_segment_t old_fs;
1028 long ret;
1029
1030 if (se32 == NULL)
1031 return sys_timer_create(which_clock, NULL, timer_id);
1032
1033 if (get_compat_sigevent(&se, se32))
1034 return -EFAULT;
1035
1036 old_fs = get_fs();
1037 set_fs(KERNEL_DS);
1038 ret = sys_timer_create(which_clock, &se, &ktimer_id);
1039 set_fs(old_fs);
1040
1041 if (!ret)
1042 ret = put_user (ktimer_id, timer_id);
1043
1044 return ret;
1045}
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
new file mode 100644
index 000000000000..bf33dcfec7db
--- /dev/null
+++ b/arch/s390/kernel/compat_linux.h
@@ -0,0 +1,197 @@
1#ifndef _ASM_S390X_S390_H
2#define _ASM_S390X_S390_H
3
4#include <linux/config.h>
5#include <linux/compat.h>
6#include <linux/socket.h>
7#include <linux/syscalls.h>
8#include <linux/nfs_fs.h>
9#include <linux/sunrpc/svc.h>
10#include <linux/nfsd/nfsd.h>
11#include <linux/nfsd/export.h>
12
13/* Macro that masks the high order bit of an 32 bit pointer and converts it*/
14/* to a 64 bit pointer */
15#define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL))
16#define AA(__x) \
17 ((unsigned long)(__x))
18
19/* Now 32bit compatibility types */
20struct ipc_kludge_32 {
21 __u32 msgp; /* pointer */
22 __s32 msgtyp;
23};
24
25struct old_sigaction32 {
26 __u32 sa_handler; /* Really a pointer, but need to deal with 32 bits */
27 compat_old_sigset_t sa_mask; /* A 32 bit mask */
28 __u32 sa_flags;
29 __u32 sa_restorer; /* Another 32 bit pointer */
30};
31
32typedef struct compat_siginfo {
33 int si_signo;
34 int si_errno;
35 int si_code;
36
37 union {
38 int _pad[((128/sizeof(int)) - 3)];
39
40 /* kill() */
41 struct {
42 pid_t _pid; /* sender's pid */
43 uid_t _uid; /* sender's uid */
44 } _kill;
45
46 /* POSIX.1b timers */
47 struct {
48 timer_t _tid; /* timer id */
49 int _overrun; /* overrun count */
50 compat_sigval_t _sigval; /* same as below */
51 int _sys_private; /* not to be passed to user */
52 } _timer;
53
54 /* POSIX.1b signals */
55 struct {
56 pid_t _pid; /* sender's pid */
57 uid_t _uid; /* sender's uid */
58 compat_sigval_t _sigval;
59 } _rt;
60
61 /* SIGCHLD */
62 struct {
63 pid_t _pid; /* which child */
64 uid_t _uid; /* sender's uid */
65 int _status;/* exit code */
66 compat_clock_t _utime;
67 compat_clock_t _stime;
68 } _sigchld;
69
70 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
71 struct {
72 __u32 _addr; /* faulting insn/memory ref. - pointer */
73 } _sigfault;
74
75 /* SIGPOLL */
76 struct {
77 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
78 int _fd;
79 } _sigpoll;
80 } _sifields;
81} compat_siginfo_t;
82
83/*
84 * How these fields are to be accessed.
85 */
86#define si_pid _sifields._kill._pid
87#define si_uid _sifields._kill._uid
88#define si_status _sifields._sigchld._status
89#define si_utime _sifields._sigchld._utime
90#define si_stime _sifields._sigchld._stime
91#define si_value _sifields._rt._sigval
92#define si_int _sifields._rt._sigval.sival_int
93#define si_ptr _sifields._rt._sigval.sival_ptr
94#define si_addr _sifields._sigfault._addr
95#define si_band _sifields._sigpoll._band
96#define si_fd _sifields._sigpoll._fd
97#define si_tid _sifields._timer._tid
98#define si_overrun _sifields._timer._overrun
99
100/* asm/sigcontext.h */
101typedef union
102{
103 __u64 d;
104 __u32 f;
105} freg_t32;
106
107typedef struct
108{
109 unsigned int fpc;
110 freg_t32 fprs[__NUM_FPRS];
111} _s390_fp_regs32;
112
113typedef struct
114{
115 __u32 mask;
116 __u32 addr;
117} _psw_t32 __attribute__ ((aligned(8)));
118
119#define PSW32_MASK_PER 0x40000000UL
120#define PSW32_MASK_DAT 0x04000000UL
121#define PSW32_MASK_IO 0x02000000UL
122#define PSW32_MASK_EXT 0x01000000UL
123#define PSW32_MASK_KEY 0x00F00000UL
124#define PSW32_MASK_MCHECK 0x00040000UL
125#define PSW32_MASK_WAIT 0x00020000UL
126#define PSW32_MASK_PSTATE 0x00010000UL
127#define PSW32_MASK_ASC 0x0000C000UL
128#define PSW32_MASK_CC 0x00003000UL
129#define PSW32_MASK_PM 0x00000f00UL
130
131#define PSW32_ADDR_AMODE31 0x80000000UL
132#define PSW32_ADDR_INSN 0x7FFFFFFFUL
133
134#define PSW32_BASE_BITS 0x00080000UL
135
136#define PSW32_ASC_PRIMARY 0x00000000UL
137#define PSW32_ASC_ACCREG 0x00004000UL
138#define PSW32_ASC_SECONDARY 0x00008000UL
139#define PSW32_ASC_HOME 0x0000C000UL
140
141#define PSW32_USER_BITS (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \
142 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \
143 PSW32_MASK_PSTATE)
144
145#define PSW32_MASK_MERGE(CURRENT,NEW) \
146 (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
147 ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
148
149
150typedef struct
151{
152 _psw_t32 psw;
153 __u32 gprs[__NUM_GPRS];
154 __u32 acrs[__NUM_ACRS];
155} _s390_regs_common32;
156
157typedef struct
158{
159 _s390_regs_common32 regs;
160 _s390_fp_regs32 fpregs;
161} _sigregs32;
162
163#define _SIGCONTEXT_NSIG32 64
164#define _SIGCONTEXT_NSIG_BPW32 32
165#define __SIGNAL_FRAMESIZE32 96
166#define _SIGMASK_COPY_SIZE32 (sizeof(u32)*2)
167
168struct sigcontext32
169{
170 __u32 oldmask[_COMPAT_NSIG_WORDS];
171 __u32 sregs; /* pointer */
172};
173
174/* asm/signal.h */
175struct sigaction32 {
176 __u32 sa_handler; /* pointer */
177 __u32 sa_flags;
178 __u32 sa_restorer; /* pointer */
179 compat_sigset_t sa_mask; /* mask last for extensibility */
180};
181
182typedef struct {
183 __u32 ss_sp; /* pointer */
184 int ss_flags;
185 compat_size_t ss_size;
186} stack_t32;
187
188/* asm/ucontext.h */
189struct ucontext32 {
190 __u32 uc_flags;
191 __u32 uc_link; /* pointer */
192 stack_t32 uc_stack;
193 _sigregs32 uc_mcontext;
194 compat_sigset_t uc_sigmask; /* mask last for extensibility */
195};
196
197#endif /* _ASM_S390X_S390_H */
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
new file mode 100644
index 000000000000..419aef913ee1
--- /dev/null
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -0,0 +1,83 @@
1#ifndef _PTRACE32_H
2#define _PTRACE32_H
3
4#include "compat_linux.h" /* needed for _psw_t32 */
5
6typedef struct {
7 __u32 cr[3];
8} per_cr_words32;
9
10typedef struct {
11 __u16 perc_atmid; /* 0x096 */
12 __u32 address; /* 0x098 */
13 __u8 access_id; /* 0x0a1 */
14} per_lowcore_words32;
15
16typedef struct {
17 union {
18 per_cr_words32 words;
19 } control_regs;
20 /*
21 * Use these flags instead of setting em_instruction_fetch
22 * directly they are used so that single stepping can be
23 * switched on & off while not affecting other tracing
24 */
25 unsigned single_step : 1;
26 unsigned instruction_fetch : 1;
27 unsigned : 30;
28 /*
29 * These addresses are copied into cr10 & cr11 if single
30 * stepping is switched off
31 */
32 __u32 starting_addr;
33 __u32 ending_addr;
34 union {
35 per_lowcore_words32 words;
36 } lowcore;
37} per_struct32;
38
39struct user_regs_struct32
40{
41 _psw_t32 psw;
42 u32 gprs[NUM_GPRS];
43 u32 acrs[NUM_ACRS];
44 u32 orig_gpr2;
45 s390_fp_regs fp_regs;
46 /*
47 * These per registers are in here so that gdb can modify them
48 * itself as there is no "official" ptrace interface for hardware
49 * watchpoints. This is the way intel does it.
50 */
51 per_struct32 per_info;
52 u32 ieee_instruction_pointer;
53 /* Used to give failing instruction back to user for ieee exceptions */
54};
55
56struct user32 {
57 /* We start with the registers, to mimic the way that "memory"
58 is returned from the ptrace(3,...) function. */
59 struct user_regs_struct32 regs; /* Where the registers are actually stored */
60 /* The rest of this junk is to help gdb figure out what goes where */
61 u32 u_tsize; /* Text segment size (pages). */
62 u32 u_dsize; /* Data segment size (pages). */
63 u32 u_ssize; /* Stack segment size (pages). */
64 u32 start_code; /* Starting virtual address of text. */
65 u32 start_stack; /* Starting virtual address of stack area.
66 This is actually the bottom of the stack,
67 the top of the stack is always found in the
68 esp register. */
69 s32 signal; /* Signal that caused the core dump. */
70 u32 u_ar0; /* Used by gdb to help find the values for */
71 /* the registers. */
72 u32 magic; /* To uniquely identify a core file */
73 char u_comm[32]; /* User command that was responsible */
74};
75
76typedef struct
77{
78 __u32 len;
79 __u32 kernel_addr;
80 __u32 process_addr;
81} ptrace_area_emu31;
82
83#endif /* _PTRACE32_H */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
new file mode 100644
index 000000000000..d05d65ac9694
--- /dev/null
+++ b/arch/s390/kernel/compat_signal.c
@@ -0,0 +1,648 @@
1/*
2 * arch/s390/kernel/signal32.c
3 *
4 * S390 version
5 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
7 * Gerhard Tonn (ton@de.ibm.com)
8 *
9 * Copyright (C) 1991, 1992 Linus Torvalds
10 *
11 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
12 */
13
14#include <linux/config.h>
15#include <linux/compat.h>
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/kernel.h>
21#include <linux/signal.h>
22#include <linux/errno.h>
23#include <linux/wait.h>
24#include <linux/ptrace.h>
25#include <linux/unistd.h>
26#include <linux/stddef.h>
27#include <linux/tty.h>
28#include <linux/personality.h>
29#include <linux/binfmts.h>
30#include <asm/ucontext.h>
31#include <asm/uaccess.h>
32#include <asm/lowcore.h>
33#include "compat_linux.h"
34#include "compat_ptrace.h"
35
36#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
37
38typedef struct
39{
40 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
41 struct sigcontext32 sc;
42 _sigregs32 sregs;
43 int signo;
44 __u8 retcode[S390_SYSCALL_SIZE];
45} sigframe32;
46
47typedef struct
48{
49 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
50 __u8 retcode[S390_SYSCALL_SIZE];
51 compat_siginfo_t info;
52 struct ucontext32 uc;
53} rt_sigframe32;
54
55asmlinkage int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
56
57int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
58{
59 int err;
60
61 if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
62 return -EFAULT;
63
64 /* If you change siginfo_t structure, please be sure
65 this code is fixed accordingly.
66 It should never copy any pad contained in the structure
67 to avoid security leaks, but must copy the generic
68 3 ints plus the relevant union member.
69 This routine must convert siginfo from 64bit to 32bit as well
70 at the same time. */
71 err = __put_user(from->si_signo, &to->si_signo);
72 err |= __put_user(from->si_errno, &to->si_errno);
73 err |= __put_user((short)from->si_code, &to->si_code);
74 if (from->si_code < 0)
75 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
76 else {
77 switch (from->si_code >> 16) {
78 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
79 case __SI_MESGQ >> 16:
80 err |= __put_user(from->si_int, &to->si_int);
81 /* fallthrough */
82 case __SI_KILL >> 16:
83 err |= __put_user(from->si_pid, &to->si_pid);
84 err |= __put_user(from->si_uid, &to->si_uid);
85 break;
86 case __SI_CHLD >> 16:
87 err |= __put_user(from->si_pid, &to->si_pid);
88 err |= __put_user(from->si_uid, &to->si_uid);
89 err |= __put_user(from->si_utime, &to->si_utime);
90 err |= __put_user(from->si_stime, &to->si_stime);
91 err |= __put_user(from->si_status, &to->si_status);
92 break;
93 case __SI_FAULT >> 16:
94 err |= __put_user((unsigned long) from->si_addr,
95 &to->si_addr);
96 break;
97 case __SI_POLL >> 16:
98 err |= __put_user(from->si_band, &to->si_band);
99 err |= __put_user(from->si_fd, &to->si_fd);
100 break;
101 case __SI_TIMER >> 16:
102 err |= __put_user(from->si_tid, &to->si_tid);
103 err |= __put_user(from->si_overrun, &to->si_overrun);
104 err |= __put_user(from->si_int, &to->si_int);
105 break;
106 default:
107 break;
108 }
109 }
110 return err;
111}
112
113int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
114{
115 int err;
116 u32 tmp;
117
118 if (!access_ok (VERIFY_READ, from, sizeof(compat_siginfo_t)))
119 return -EFAULT;
120
121 err = __get_user(to->si_signo, &from->si_signo);
122 err |= __get_user(to->si_errno, &from->si_errno);
123 err |= __get_user(to->si_code, &from->si_code);
124
125 if (to->si_code < 0)
126 err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
127 else {
128 switch (to->si_code >> 16) {
129 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
130 case __SI_MESGQ >> 16:
131 err |= __get_user(to->si_int, &from->si_int);
132 /* fallthrough */
133 case __SI_KILL >> 16:
134 err |= __get_user(to->si_pid, &from->si_pid);
135 err |= __get_user(to->si_uid, &from->si_uid);
136 break;
137 case __SI_CHLD >> 16:
138 err |= __get_user(to->si_pid, &from->si_pid);
139 err |= __get_user(to->si_uid, &from->si_uid);
140 err |= __get_user(to->si_utime, &from->si_utime);
141 err |= __get_user(to->si_stime, &from->si_stime);
142 err |= __get_user(to->si_status, &from->si_status);
143 break;
144 case __SI_FAULT >> 16:
145 err |= __get_user(tmp, &from->si_addr);
146 to->si_addr = (void *)(u64) (tmp & PSW32_ADDR_INSN);
147 break;
148 case __SI_POLL >> 16:
149 err |= __get_user(to->si_band, &from->si_band);
150 err |= __get_user(to->si_fd, &from->si_fd);
151 break;
152 case __SI_TIMER >> 16:
153 err |= __get_user(to->si_tid, &from->si_tid);
154 err |= __get_user(to->si_overrun, &from->si_overrun);
155 err |= __get_user(to->si_int, &from->si_int);
156 break;
157 default:
158 break;
159 }
160 }
161 return err;
162}
163
164/*
165 * Atomically swap in the new signal mask, and wait for a signal.
166 */
167asmlinkage int
168sys32_sigsuspend(struct pt_regs * regs,int history0, int history1, old_sigset_t mask)
169{
170 sigset_t saveset;
171
172 mask &= _BLOCKABLE;
173 spin_lock_irq(&current->sighand->siglock);
174 saveset = current->blocked;
175 siginitset(&current->blocked, mask);
176 recalc_sigpending();
177 spin_unlock_irq(&current->sighand->siglock);
178 regs->gprs[2] = -EINTR;
179
180 while (1) {
181 set_current_state(TASK_INTERRUPTIBLE);
182 schedule();
183 if (do_signal(regs, &saveset))
184 return -EINTR;
185 }
186}
187
188asmlinkage int
189sys32_rt_sigsuspend(struct pt_regs * regs, compat_sigset_t __user *unewset,
190 size_t sigsetsize)
191{
192 sigset_t saveset, newset;
193 compat_sigset_t set32;
194
195 /* XXX: Don't preclude handling different sized sigset_t's. */
196 if (sigsetsize != sizeof(sigset_t))
197 return -EINVAL;
198
199 if (copy_from_user(&set32, unewset, sizeof(set32)))
200 return -EFAULT;
201 switch (_NSIG_WORDS) {
202 case 4: newset.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32);
203 case 3: newset.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32);
204 case 2: newset.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32);
205 case 1: newset.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32);
206 }
207 sigdelsetmask(&newset, ~_BLOCKABLE);
208
209 spin_lock_irq(&current->sighand->siglock);
210 saveset = current->blocked;
211 current->blocked = newset;
212 recalc_sigpending();
213 spin_unlock_irq(&current->sighand->siglock);
214 regs->gprs[2] = -EINTR;
215
216 while (1) {
217 set_current_state(TASK_INTERRUPTIBLE);
218 schedule();
219 if (do_signal(regs, &saveset))
220 return -EINTR;
221 }
222}
223
224asmlinkage long
225sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
226 struct old_sigaction32 __user *oact)
227{
228 struct k_sigaction new_ka, old_ka;
229 unsigned long sa_handler, sa_restorer;
230 int ret;
231
232 if (act) {
233 compat_old_sigset_t mask;
234 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
235 __get_user(sa_handler, &act->sa_handler) ||
236 __get_user(sa_restorer, &act->sa_restorer))
237 return -EFAULT;
238 new_ka.sa.sa_handler = (__sighandler_t) sa_handler;
239 new_ka.sa.sa_restorer = (void (*)(void)) sa_restorer;
240 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
241 __get_user(mask, &act->sa_mask);
242 siginitset(&new_ka.sa.sa_mask, mask);
243 }
244
245 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
246
247 if (!ret && oact) {
248 sa_handler = (unsigned long) old_ka.sa.sa_handler;
249 sa_restorer = (unsigned long) old_ka.sa.sa_restorer;
250 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
251 __put_user(sa_handler, &oact->sa_handler) ||
252 __put_user(sa_restorer, &oact->sa_restorer))
253 return -EFAULT;
254 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
255 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
256 }
257
258 return ret;
259}
260
261int
262do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact);
263
264asmlinkage long
265sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
266 struct sigaction32 __user *oact, size_t sigsetsize)
267{
268 struct k_sigaction new_ka, old_ka;
269 unsigned long sa_handler;
270 int ret;
271 compat_sigset_t set32;
272
273 /* XXX: Don't preclude handling different sized sigset_t's. */
274 if (sigsetsize != sizeof(compat_sigset_t))
275 return -EINVAL;
276
277 if (act) {
278 ret = get_user(sa_handler, &act->sa_handler);
279 ret |= __copy_from_user(&set32, &act->sa_mask,
280 sizeof(compat_sigset_t));
281 switch (_NSIG_WORDS) {
282 case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6]
283 | (((long)set32.sig[7]) << 32);
284 case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4]
285 | (((long)set32.sig[5]) << 32);
286 case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2]
287 | (((long)set32.sig[3]) << 32);
288 case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0]
289 | (((long)set32.sig[1]) << 32);
290 }
291 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
292
293 if (ret)
294 return -EFAULT;
295 new_ka.sa.sa_handler = (__sighandler_t) sa_handler;
296 }
297
298 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
299
300 if (!ret && oact) {
301 switch (_NSIG_WORDS) {
302 case 4:
303 set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32);
304 set32.sig[6] = old_ka.sa.sa_mask.sig[3];
305 case 3:
306 set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32);
307 set32.sig[4] = old_ka.sa.sa_mask.sig[2];
308 case 2:
309 set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32);
310 set32.sig[2] = old_ka.sa.sa_mask.sig[1];
311 case 1:
312 set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32);
313 set32.sig[0] = old_ka.sa.sa_mask.sig[0];
314 }
315 ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler);
316 ret |= __copy_to_user(&oact->sa_mask, &set32,
317 sizeof(compat_sigset_t));
318 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
319 }
320
321 return ret;
322}
323
324asmlinkage long
325sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss,
326 struct pt_regs *regs)
327{
328 stack_t kss, koss;
329 unsigned long ss_sp;
330 int ret, err = 0;
331 mm_segment_t old_fs = get_fs();
332
333 if (uss) {
334 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
335 return -EFAULT;
336 err |= __get_user(ss_sp, &uss->ss_sp);
337 err |= __get_user(kss.ss_size, &uss->ss_size);
338 err |= __get_user(kss.ss_flags, &uss->ss_flags);
339 if (err)
340 return -EFAULT;
341 kss.ss_sp = (void *) ss_sp;
342 }
343
344 set_fs (KERNEL_DS);
345 ret = do_sigaltstack((stack_t __user *) (uss ? &kss : NULL),
346 (stack_t __user *) (uoss ? &koss : NULL),
347 regs->gprs[15]);
348 set_fs (old_fs);
349
350 if (!ret && uoss) {
351 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
352 return -EFAULT;
353 ss_sp = (unsigned long) koss.ss_sp;
354 err |= __put_user(ss_sp, &uoss->ss_sp);
355 err |= __put_user(koss.ss_size, &uoss->ss_size);
356 err |= __put_user(koss.ss_flags, &uoss->ss_flags);
357 if (err)
358 return -EFAULT;
359 }
360 return ret;
361}
362
363static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
364{
365 _s390_regs_common32 regs32;
366 int err, i;
367
368 regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS,
369 (__u32)(regs->psw.mask >> 32));
370 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr;
371 for (i = 0; i < NUM_GPRS; i++)
372 regs32.gprs[i] = (__u32) regs->gprs[i];
373 save_access_regs(current->thread.acrs);
374 memcpy(regs32.acrs, current->thread.acrs, sizeof(regs32.acrs));
375 err = __copy_to_user(&sregs->regs, &regs32, sizeof(regs32));
376 if (err)
377 return err;
378 save_fp_regs(&current->thread.fp_regs);
379 /* s390_fp_regs and _s390_fp_regs32 are the same ! */
380 return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs,
381 sizeof(_s390_fp_regs32));
382}
383
384static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
385{
386 _s390_regs_common32 regs32;
387 int err, i;
388
389 /* Alwys make any pending restarted system call return -EINTR */
390 current_thread_info()->restart_block.fn = do_no_restart_syscall;
391
392 err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32));
393 if (err)
394 return err;
395 regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask,
396 (__u64)regs32.psw.mask << 32);
397 regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
398 for (i = 0; i < NUM_GPRS; i++)
399 regs->gprs[i] = (__u64) regs32.gprs[i];
400 memcpy(current->thread.acrs, regs32.acrs, sizeof(current->thread.acrs));
401 restore_access_regs(current->thread.acrs);
402
403 err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs,
404 sizeof(_s390_fp_regs32));
405 current->thread.fp_regs.fpc &= FPC_VALID_MASK;
406 if (err)
407 return err;
408
409 restore_fp_regs(&current->thread.fp_regs);
410 regs->trap = -1; /* disable syscall checks */
411 return 0;
412}
413
414asmlinkage long sys32_sigreturn(struct pt_regs *regs)
415{
416 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
417 sigset_t set;
418
419 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
420 goto badframe;
421 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
422 goto badframe;
423
424 sigdelsetmask(&set, ~_BLOCKABLE);
425 spin_lock_irq(&current->sighand->siglock);
426 current->blocked = set;
427 recalc_sigpending();
428 spin_unlock_irq(&current->sighand->siglock);
429
430 if (restore_sigregs32(regs, &frame->sregs))
431 goto badframe;
432
433 return regs->gprs[2];
434
435badframe:
436 force_sig(SIGSEGV, current);
437 return 0;
438}
439
440asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
441{
442 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
443 sigset_t set;
444 stack_t st;
445 __u32 ss_sp;
446 int err;
447 mm_segment_t old_fs = get_fs();
448
449 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
450 goto badframe;
451 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
452 goto badframe;
453
454 sigdelsetmask(&set, ~_BLOCKABLE);
455 spin_lock_irq(&current->sighand->siglock);
456 current->blocked = set;
457 recalc_sigpending();
458 spin_unlock_irq(&current->sighand->siglock);
459
460 if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
461 goto badframe;
462
463 err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp);
464 st.ss_sp = (void *) A((unsigned long)ss_sp);
465 err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size);
466 err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags);
467 if (err)
468 goto badframe;
469
470 /* It is more difficult to avoid calling this function than to
471 call it and ignore errors. */
472 set_fs (KERNEL_DS);
473 do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]);
474 set_fs (old_fs);
475
476 return regs->gprs[2];
477
478badframe:
479 force_sig(SIGSEGV, current);
480 return 0;
481}
482
483/*
484 * Set up a signal frame.
485 */
486
487
488/*
489 * Determine which stack to use..
490 */
491static inline void __user *
492get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
493{
494 unsigned long sp;
495
496 /* Default to using normal stack */
497 sp = (unsigned long) A(regs->gprs[15]);
498
499 /* This is the X/Open sanctioned signal stack switching. */
500 if (ka->sa.sa_flags & SA_ONSTACK) {
501 if (! on_sig_stack(sp))
502 sp = current->sas_ss_sp + current->sas_ss_size;
503 }
504
505 /* This is the legacy signal stack switching. */
506 else if (!user_mode(regs) &&
507 !(ka->sa.sa_flags & SA_RESTORER) &&
508 ka->sa.sa_restorer) {
509 sp = (unsigned long) ka->sa.sa_restorer;
510 }
511
512 return (void __user *)((sp - frame_size) & -8ul);
513}
514
515static inline int map_signal(int sig)
516{
517 if (current_thread_info()->exec_domain
518 && current_thread_info()->exec_domain->signal_invmap
519 && sig < 32)
520 return current_thread_info()->exec_domain->signal_invmap[sig];
521 else
522 return sig;
523}
524
525static void setup_frame32(int sig, struct k_sigaction *ka,
526 sigset_t *set, struct pt_regs * regs)
527{
528 sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(sigframe32));
529 if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32)))
530 goto give_sigsegv;
531
532 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32))
533 goto give_sigsegv;
534
535 if (save_sigregs32(regs, &frame->sregs))
536 goto give_sigsegv;
537 if (__put_user((unsigned long) &frame->sregs, &frame->sc.sregs))
538 goto give_sigsegv;
539
540 /* Set up to return from userspace. If provided, use a stub
541 already in userspace. */
542 if (ka->sa.sa_flags & SA_RESTORER) {
543 regs->gprs[14] = (__u64) ka->sa.sa_restorer;
544 } else {
545 regs->gprs[14] = (__u64) frame->retcode;
546 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
547 (u16 __user *)(frame->retcode)))
548 goto give_sigsegv;
549 }
550
551 /* Set up backchain. */
552 if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
553 goto give_sigsegv;
554
555 /* Set up registers for signal handler */
556 regs->gprs[15] = (__u64) frame;
557 regs->psw.addr = (__u64) ka->sa.sa_handler;
558
559 regs->gprs[2] = map_signal(sig);
560 regs->gprs[3] = (__u64) &frame->sc;
561
562 /* We forgot to include these in the sigcontext.
563 To avoid breaking binary compatibility, they are passed as args. */
564 regs->gprs[4] = current->thread.trap_no;
565 regs->gprs[5] = current->thread.prot_addr;
566
567 /* Place signal number on stack to allow backtrace from handler. */
568 if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
569 goto give_sigsegv;
570 return;
571
572give_sigsegv:
573 force_sigsegv(sig, current);
574}
575
576static void setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
577 sigset_t *set, struct pt_regs * regs)
578{
579 int err = 0;
580 rt_sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(rt_sigframe32));
581 if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32)))
582 goto give_sigsegv;
583
584 if (copy_siginfo_to_user32(&frame->info, info))
585 goto give_sigsegv;
586
587 /* Create the ucontext. */
588 err |= __put_user(0, &frame->uc.uc_flags);
589 err |= __put_user(0, &frame->uc.uc_link);
590 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
591 err |= __put_user(sas_ss_flags(regs->gprs[15]),
592 &frame->uc.uc_stack.ss_flags);
593 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
594 err |= save_sigregs32(regs, &frame->uc.uc_mcontext);
595 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
596 if (err)
597 goto give_sigsegv;
598
599 /* Set up to return from userspace. If provided, use a stub
600 already in userspace. */
601 if (ka->sa.sa_flags & SA_RESTORER) {
602 regs->gprs[14] = (__u64) ka->sa.sa_restorer;
603 } else {
604 regs->gprs[14] = (__u64) frame->retcode;
605 err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
606 (u16 __user *)(frame->retcode));
607 }
608
609 /* Set up backchain. */
610 if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
611 goto give_sigsegv;
612
613 /* Set up registers for signal handler */
614 regs->gprs[15] = (__u64) frame;
615 regs->psw.addr = (__u64) ka->sa.sa_handler;
616
617 regs->gprs[2] = map_signal(sig);
618 regs->gprs[3] = (__u64) &frame->info;
619 regs->gprs[4] = (__u64) &frame->uc;
620 return;
621
622give_sigsegv:
623 force_sigsegv(sig, current);
624}
625
626/*
627 * OK, we're invoking a handler
628 */
629
630void
631handle_signal32(unsigned long sig, struct k_sigaction *ka,
632 siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
633{
634 /* Set up the stack frame */
635 if (ka->sa.sa_flags & SA_SIGINFO)
636 setup_rt_frame32(sig, ka, info, oldset, regs);
637 else
638 setup_frame32(sig, ka, oldset, regs);
639
640 if (!(ka->sa.sa_flags & SA_NODEFER)) {
641 spin_lock_irq(&current->sighand->siglock);
642 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
643 sigaddset(&current->blocked,sig);
644 recalc_sigpending();
645 spin_unlock_irq(&current->sighand->siglock);
646 }
647}
648
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
new file mode 100644
index 000000000000..7a607b1d0380
--- /dev/null
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -0,0 +1,1443 @@
1/*
2* arch/s390/kernel/sys_wrapper31.S
3* wrapper for 31 bit compatible system calls.
4*
5* S390 version
6* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
7* Author(s): Gerhard Tonn (ton@de.ibm.com),
8* Thomas Spatzier (tspat@de.ibm.com)
9*/
10
11 .globl sys32_exit_wrapper
12sys32_exit_wrapper:
13 lgfr %r2,%r2 # int
14 jg sys_exit # branch to sys_exit
15
16 .globl sys32_read_wrapper
17sys32_read_wrapper:
18 llgfr %r2,%r2 # unsigned int
19 llgtr %r3,%r3 # char *
20 llgfr %r4,%r4 # size_t
21 jg sys32_read # branch to sys_read
22
23 .globl sys32_write_wrapper
24sys32_write_wrapper:
25 llgfr %r2,%r2 # unsigned int
26 llgtr %r3,%r3 # const char *
27 llgfr %r4,%r4 # size_t
28 jg sys32_write # branch to system call
29
30 .globl sys32_open_wrapper
31sys32_open_wrapper:
32 llgtr %r2,%r2 # const char *
33 lgfr %r3,%r3 # int
34 lgfr %r4,%r4 # int
35 jg sys_open # branch to system call
36
37 .globl sys32_close_wrapper
38sys32_close_wrapper:
39 llgfr %r2,%r2 # unsigned int
40 jg sys_close # branch to system call
41
42 .globl sys32_creat_wrapper
43sys32_creat_wrapper:
44 llgtr %r2,%r2 # const char *
45 lgfr %r3,%r3 # int
46 jg sys_creat # branch to system call
47
48 .globl sys32_link_wrapper
49sys32_link_wrapper:
50 llgtr %r2,%r2 # const char *
51 llgtr %r3,%r3 # const char *
52 jg sys_link # branch to system call
53
54 .globl sys32_unlink_wrapper
55sys32_unlink_wrapper:
56 llgtr %r2,%r2 # const char *
57 jg sys_unlink # branch to system call
58
59 .globl sys32_chdir_wrapper
60sys32_chdir_wrapper:
61 llgtr %r2,%r2 # const char *
62 jg sys_chdir # branch to system call
63
64 .globl sys32_time_wrapper
65sys32_time_wrapper:
66 llgtr %r2,%r2 # int *
67 jg compat_sys_time # branch to system call
68
69 .globl sys32_mknod_wrapper
70sys32_mknod_wrapper:
71 llgtr %r2,%r2 # const char *
72 lgfr %r3,%r3 # int
73 llgfr %r4,%r4 # dev
74 jg sys_mknod # branch to system call
75
76 .globl sys32_chmod_wrapper
77sys32_chmod_wrapper:
78 llgtr %r2,%r2 # const char *
79 llgfr %r3,%r3 # mode_t
80 jg sys_chmod # branch to system call
81
82 .globl sys32_lchown16_wrapper
83sys32_lchown16_wrapper:
84 llgtr %r2,%r2 # const char *
85 llgfr %r3,%r3 # __kernel_old_uid_emu31_t
86 llgfr %r4,%r4 # __kernel_old_uid_emu31_t
87 jg sys32_lchown16 # branch to system call
88
89 .globl sys32_lseek_wrapper
90sys32_lseek_wrapper:
91 llgfr %r2,%r2 # unsigned int
92 lgfr %r3,%r3 # off_t
93 llgfr %r4,%r4 # unsigned int
94 jg sys_lseek # branch to system call
95
96#sys32_getpid_wrapper # void
97
98 .globl sys32_mount_wrapper
99sys32_mount_wrapper:
100 llgtr %r2,%r2 # char *
101 llgtr %r3,%r3 # char *
102 llgtr %r4,%r4 # char *
103 llgfr %r5,%r5 # unsigned long
104 llgtr %r6,%r6 # void *
105 jg compat_sys_mount # branch to system call
106
107 .globl sys32_oldumount_wrapper
108sys32_oldumount_wrapper:
109 llgtr %r2,%r2 # char *
110 jg sys_oldumount # branch to system call
111
112 .globl sys32_setuid16_wrapper
113sys32_setuid16_wrapper:
114 llgfr %r2,%r2 # __kernel_old_uid_emu31_t
115 jg sys32_setuid16 # branch to system call
116
117#sys32_getuid16_wrapper # void
118
119 .globl sys32_ptrace_wrapper
120sys32_ptrace_wrapper:
121 lgfr %r2,%r2 # long
122 lgfr %r3,%r3 # long
123 llgtr %r4,%r4 # long
124 llgfr %r5,%r5 # long
125 jg sys_ptrace # branch to system call
126
127 .globl sys32_alarm_wrapper
128sys32_alarm_wrapper:
129 llgfr %r2,%r2 # unsigned int
130 jg sys_alarm # branch to system call
131
132#sys32_pause_wrapper # void
133
134 .globl compat_sys_utime_wrapper
135compat_sys_utime_wrapper:
136 llgtr %r2,%r2 # char *
137 llgtr %r3,%r3 # struct compat_utimbuf *
138 jg compat_sys_utime # branch to system call
139
140 .globl sys32_access_wrapper
141sys32_access_wrapper:
142 llgtr %r2,%r2 # const char *
143 lgfr %r3,%r3 # int
144 jg sys_access # branch to system call
145
146 .globl sys32_nice_wrapper
147sys32_nice_wrapper:
148 lgfr %r2,%r2 # int
149 jg sys_nice # branch to system call
150
151#sys32_sync_wrapper # void
152
153 .globl sys32_kill_wrapper
154sys32_kill_wrapper:
155 lgfr %r2,%r2 # int
156 lgfr %r3,%r3 # int
157 jg sys_kill # branch to system call
158
159 .globl sys32_rename_wrapper
160sys32_rename_wrapper:
161 llgtr %r2,%r2 # const char *
162 llgtr %r3,%r3 # const char *
163 jg sys_rename # branch to system call
164
165 .globl sys32_mkdir_wrapper
166sys32_mkdir_wrapper:
167 llgtr %r2,%r2 # const char *
168 lgfr %r3,%r3 # int
169 jg sys_mkdir # branch to system call
170
171 .globl sys32_rmdir_wrapper
172sys32_rmdir_wrapper:
173 llgtr %r2,%r2 # const char *
174 jg sys_rmdir # branch to system call
175
176 .globl sys32_dup_wrapper
177sys32_dup_wrapper:
178 llgfr %r2,%r2 # unsigned int
179 jg sys_dup # branch to system call
180
181 .globl sys32_pipe_wrapper
182sys32_pipe_wrapper:
183 llgtr %r2,%r2 # u32 *
184 jg sys_pipe # branch to system call
185
186 .globl compat_sys_times_wrapper
187compat_sys_times_wrapper:
188 llgtr %r2,%r2 # struct compat_tms *
189 jg compat_sys_times # branch to system call
190
191 .globl sys32_brk_wrapper
192sys32_brk_wrapper:
193 llgtr %r2,%r2 # unsigned long
194 jg sys_brk # branch to system call
195
196 .globl sys32_setgid16_wrapper
197sys32_setgid16_wrapper:
198 llgfr %r2,%r2 # __kernel_old_gid_emu31_t
199 jg sys32_setgid16 # branch to system call
200
201#sys32_getgid16_wrapper # void
202
203 .globl sys32_signal_wrapper
204sys32_signal_wrapper:
205 lgfr %r2,%r2 # int
206 llgtr %r3,%r3 # __sighandler_t
207 jg sys_signal
208
209#sys32_geteuid16_wrapper # void
210
211#sys32_getegid16_wrapper # void
212
213 .globl sys32_acct_wrapper
214sys32_acct_wrapper:
215 llgtr %r2,%r2 # char *
216 jg sys_acct # branch to system call
217
218 .globl sys32_umount_wrapper
219sys32_umount_wrapper:
220 llgtr %r2,%r2 # char *
221 lgfr %r3,%r3 # int
222 jg sys_umount # branch to system call
223
224 .globl compat_sys_ioctl_wrapper
225compat_sys_ioctl_wrapper:
226 llgfr %r2,%r2 # unsigned int
227 llgfr %r3,%r3 # unsigned int
228 llgfr %r4,%r4 # unsigned int
229 jg compat_sys_ioctl # branch to system call
230
231 .globl compat_sys_fcntl_wrapper
232compat_sys_fcntl_wrapper:
233 llgfr %r2,%r2 # unsigned int
234 llgfr %r3,%r3 # unsigned int
235 llgfr %r4,%r4 # unsigned long
236 jg compat_sys_fcntl # branch to system call
237
238 .globl sys32_setpgid_wrapper
239sys32_setpgid_wrapper:
240 lgfr %r2,%r2 # pid_t
241 lgfr %r3,%r3 # pid_t
242 jg sys_setpgid # branch to system call
243
244 .globl sys32_umask_wrapper
245sys32_umask_wrapper:
246 lgfr %r2,%r2 # int
247 jg sys_umask # branch to system call
248
249 .globl sys32_chroot_wrapper
250sys32_chroot_wrapper:
251 llgtr %r2,%r2 # char *
252 jg sys_chroot # branch to system call
253
254 .globl sys32_ustat_wrapper
255sys32_ustat_wrapper:
256 llgfr %r2,%r2 # dev_t
257 llgtr %r3,%r3 # struct ustat *
258 jg sys_ustat
259
260 .globl sys32_dup2_wrapper
261sys32_dup2_wrapper:
262 llgfr %r2,%r2 # unsigned int
263 llgfr %r3,%r3 # unsigned int
264 jg sys_dup2 # branch to system call
265
266#sys32_getppid_wrapper # void
267
268#sys32_getpgrp_wrapper # void
269
270#sys32_setsid_wrapper # void
271
272 .globl sys32_sigaction_wrapper
273sys32_sigaction_wrapper:
274 lgfr %r2,%r2 # int
275 llgtr %r3,%r3 # const struct old_sigaction *
276 llgtr %r4,%r4 # struct old_sigaction32 *
277 jg sys32_sigaction # branch to system call
278
279 .globl sys32_setreuid16_wrapper
280sys32_setreuid16_wrapper:
281 llgfr %r2,%r2 # __kernel_old_uid_emu31_t
282 llgfr %r3,%r3 # __kernel_old_uid_emu31_t
283 jg sys32_setreuid16 # branch to system call
284
285 .globl sys32_setregid16_wrapper
286sys32_setregid16_wrapper:
287 llgfr %r2,%r2 # __kernel_old_gid_emu31_t
288 llgfr %r3,%r3 # __kernel_old_gid_emu31_t
289 jg sys32_setregid16 # branch to system call
290
291#sys32_sigsuspend_wrapper # done in sigsuspend_glue
292
293 .globl compat_sys_sigpending_wrapper
294compat_sys_sigpending_wrapper:
295 llgtr %r2,%r2 # compat_old_sigset_t *
296 jg compat_sys_sigpending # branch to system call
297
298 .globl sys32_sethostname_wrapper
299sys32_sethostname_wrapper:
300 llgtr %r2,%r2 # char *
301 lgfr %r3,%r3 # int
302 jg sys_sethostname # branch to system call
303
304 .globl compat_sys_setrlimit_wrapper
305compat_sys_setrlimit_wrapper:
306 llgfr %r2,%r2 # unsigned int
307 llgtr %r3,%r3 # struct rlimit_emu31 *
308 jg compat_sys_setrlimit # branch to system call
309
310 .globl compat_sys_old_getrlimit_wrapper
311compat_sys_old_getrlimit_wrapper:
312 llgfr %r2,%r2 # unsigned int
313 llgtr %r3,%r3 # struct rlimit_emu31 *
314 jg compat_sys_old_getrlimit # branch to system call
315
316 .globl compat_sys_getrlimit_wrapper
317compat_sys_getrlimit_wrapper:
318 llgfr %r2,%r2 # unsigned int
319 llgtr %r3,%r3 # struct rlimit_emu31 *
320 jg compat_sys_getrlimit # branch to system call
321
322 .globl sys32_mmap2_wrapper
323sys32_mmap2_wrapper:
324 llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
325 jg sys32_mmap2 # branch to system call
326
327 .globl compat_sys_getrusage_wrapper
328compat_sys_getrusage_wrapper:
329 lgfr %r2,%r2 # int
330 llgtr %r3,%r3 # struct rusage_emu31 *
331 jg compat_sys_getrusage # branch to system call
332
333 .globl sys32_gettimeofday_wrapper
334sys32_gettimeofday_wrapper:
335 llgtr %r2,%r2 # struct timeval_emu31 *
336 llgtr %r3,%r3 # struct timezone *
337 jg sys32_gettimeofday # branch to system call
338
339 .globl sys32_settimeofday_wrapper
340sys32_settimeofday_wrapper:
341 llgtr %r2,%r2 # struct timeval_emu31 *
342 llgtr %r3,%r3 # struct timezone *
343 jg sys32_settimeofday # branch to system call
344
345 .globl sys32_getgroups16_wrapper
346sys32_getgroups16_wrapper:
347 lgfr %r2,%r2 # int
348 llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
349 jg sys32_getgroups16 # branch to system call
350
351 .globl sys32_setgroups16_wrapper
352sys32_setgroups16_wrapper:
353 lgfr %r2,%r2 # int
354 llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
355 jg sys32_setgroups16 # branch to system call
356
357 .globl sys32_symlink_wrapper
358sys32_symlink_wrapper:
359 llgtr %r2,%r2 # const char *
360 llgtr %r3,%r3 # const char *
361 jg sys_symlink # branch to system call
362
363 .globl sys32_readlink_wrapper
364sys32_readlink_wrapper:
365 llgtr %r2,%r2 # const char *
366 llgtr %r3,%r3 # char *
367 lgfr %r4,%r4 # int
368 jg sys_readlink # branch to system call
369
370 .globl sys32_uselib_wrapper
371sys32_uselib_wrapper:
372 llgtr %r2,%r2 # const char *
373 jg sys_uselib # branch to system call
374
375 .globl sys32_swapon_wrapper
376sys32_swapon_wrapper:
377 llgtr %r2,%r2 # const char *
378 lgfr %r3,%r3 # int
379 jg sys_swapon # branch to system call
380
381 .globl sys32_reboot_wrapper
382sys32_reboot_wrapper:
383 lgfr %r2,%r2 # int
384 lgfr %r3,%r3 # int
385 llgfr %r4,%r4 # unsigned int
386 llgtr %r5,%r5 # void *
387 jg sys_reboot # branch to system call
388
389 .globl old32_readdir_wrapper
390old32_readdir_wrapper:
391 llgfr %r2,%r2 # unsigned int
392 llgtr %r3,%r3 # void *
393 llgfr %r4,%r4 # unsigned int
394 jg compat_sys_old_readdir # branch to system call
395
396 .globl old32_mmap_wrapper
397old32_mmap_wrapper:
398 llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
399 jg old32_mmap # branch to system call
400
401 .globl sys32_munmap_wrapper
402sys32_munmap_wrapper:
403 llgfr %r2,%r2 # unsigned long
404 llgfr %r3,%r3 # size_t
405 jg sys_munmap # branch to system call
406
407 .globl sys32_truncate_wrapper
408sys32_truncate_wrapper:
409 llgtr %r2,%r2 # const char *
410 llgfr %r3,%r3 # unsigned long
411 jg sys_truncate # branch to system call
412
413 .globl sys32_ftruncate_wrapper
414sys32_ftruncate_wrapper:
415 llgfr %r2,%r2 # unsigned int
416 llgfr %r3,%r3 # unsigned long
417 jg sys_ftruncate # branch to system call
418
419 .globl sys32_fchmod_wrapper
420sys32_fchmod_wrapper:
421 llgfr %r2,%r2 # unsigned int
422 llgfr %r3,%r3 # mode_t
423 jg sys_fchmod # branch to system call
424
425 .globl sys32_fchown16_wrapper
426sys32_fchown16_wrapper:
427 llgfr %r2,%r2 # unsigned int
428 llgfr %r3,%r3 # compat_uid_t
429 llgfr %r4,%r4 # compat_uid_t
430 jg sys32_fchown16 # branch to system call
431
432 .globl sys32_getpriority_wrapper
433sys32_getpriority_wrapper:
434 lgfr %r2,%r2 # int
435 lgfr %r3,%r3 # int
436 jg sys_getpriority # branch to system call
437
438 .globl sys32_setpriority_wrapper
439sys32_setpriority_wrapper:
440 lgfr %r2,%r2 # int
441 lgfr %r3,%r3 # int
442 lgfr %r4,%r4 # int
443 jg sys_setpriority # branch to system call
444
445 .globl compat_sys_statfs_wrapper
446compat_sys_statfs_wrapper:
447 llgtr %r2,%r2 # char *
448 llgtr %r3,%r3 # struct compat_statfs *
449 jg compat_sys_statfs # branch to system call
450
451 .globl compat_sys_fstatfs_wrapper
452compat_sys_fstatfs_wrapper:
453 llgfr %r2,%r2 # unsigned int
454 llgtr %r3,%r3 # struct compat_statfs *
455 jg compat_sys_fstatfs # branch to system call
456
457 .globl compat_sys_socketcall_wrapper
458compat_sys_socketcall_wrapper:
459 lgfr %r2,%r2 # int
460 llgtr %r3,%r3 # u32 *
461 jg compat_sys_socketcall # branch to system call
462
463 .globl sys32_syslog_wrapper
464sys32_syslog_wrapper:
465 lgfr %r2,%r2 # int
466 llgtr %r3,%r3 # char *
467 lgfr %r4,%r4 # int
468 jg sys_syslog # branch to system call
469
470 .globl compat_sys_setitimer_wrapper
471compat_sys_setitimer_wrapper:
472 lgfr %r2,%r2 # int
473 llgtr %r3,%r3 # struct itimerval_emu31 *
474 llgtr %r4,%r4 # struct itimerval_emu31 *
475 jg compat_sys_setitimer # branch to system call
476
477 .globl compat_sys_getitimer_wrapper
478compat_sys_getitimer_wrapper:
479 lgfr %r2,%r2 # int
480 llgtr %r3,%r3 # struct itimerval_emu31 *
481 jg compat_sys_getitimer # branch to system call
482
483 .globl compat_sys_newstat_wrapper
484compat_sys_newstat_wrapper:
485 llgtr %r2,%r2 # char *
486 llgtr %r3,%r3 # struct stat_emu31 *
487 jg compat_sys_newstat # branch to system call
488
489 .globl compat_sys_newlstat_wrapper
490compat_sys_newlstat_wrapper:
491 llgtr %r2,%r2 # char *
492 llgtr %r3,%r3 # struct stat_emu31 *
493 jg compat_sys_newlstat # branch to system call
494
495 .globl compat_sys_newfstat_wrapper
496compat_sys_newfstat_wrapper:
497 llgfr %r2,%r2 # unsigned int
498 llgtr %r3,%r3 # struct stat_emu31 *
499 jg compat_sys_newfstat # branch to system call
500
501#sys32_vhangup_wrapper # void
502
503 .globl compat_sys_wait4_wrapper
504compat_sys_wait4_wrapper:
505 lgfr %r2,%r2 # pid_t
506 llgtr %r3,%r3 # unsigned int *
507 lgfr %r4,%r4 # int
508 llgtr %r5,%r5 # struct rusage *
509 jg compat_sys_wait4 # branch to system call
510
511 .globl sys32_swapoff_wrapper
512sys32_swapoff_wrapper:
513 llgtr %r2,%r2 # const char *
514 jg sys_swapoff # branch to system call
515
516 .globl sys32_sysinfo_wrapper
517sys32_sysinfo_wrapper:
518 llgtr %r2,%r2 # struct sysinfo_emu31 *
519 jg sys32_sysinfo # branch to system call
520
521 .globl sys32_ipc_wrapper
522sys32_ipc_wrapper:
523 llgfr %r2,%r2 # uint
524 lgfr %r3,%r3 # int
525 lgfr %r4,%r4 # int
526 lgfr %r5,%r5 # int
527 llgfr %r6,%r6 # u32
528 jg sys32_ipc # branch to system call
529
530 .globl sys32_fsync_wrapper
531sys32_fsync_wrapper:
532 llgfr %r2,%r2 # unsigned int
533 jg sys_fsync # branch to system call
534
535#sys32_sigreturn_wrapper # done in sigreturn_glue
536
537#sys32_clone_wrapper # done in clone_glue
538
539 .globl sys32_setdomainname_wrapper
540sys32_setdomainname_wrapper:
541 llgtr %r2,%r2 # char *
542 lgfr %r3,%r3 # int
543 jg sys_setdomainname # branch to system call
544
545 .globl sys32_newuname_wrapper
546sys32_newuname_wrapper:
547 llgtr %r2,%r2 # struct new_utsname *
548 jg s390x_newuname # branch to system call
549
550 .globl sys32_adjtimex_wrapper
551sys32_adjtimex_wrapper:
552 llgtr %r2,%r2 # struct timex_emu31 *
553 jg sys32_adjtimex # branch to system call
554
555 .globl sys32_mprotect_wrapper
556sys32_mprotect_wrapper:
557 llgtr %r2,%r2 # unsigned long (actually pointer
558 llgfr %r3,%r3 # size_t
559 llgfr %r4,%r4 # unsigned long
560 jg sys_mprotect # branch to system call
561
562 .globl compat_sys_sigprocmask_wrapper
563compat_sys_sigprocmask_wrapper:
564 lgfr %r2,%r2 # int
565 llgtr %r3,%r3 # compat_old_sigset_t *
566 llgtr %r4,%r4 # compat_old_sigset_t *
567 jg compat_sys_sigprocmask # branch to system call
568
569 .globl sys32_init_module_wrapper
570sys32_init_module_wrapper:
571 llgtr %r2,%r2 # void *
572 llgfr %r3,%r3 # unsigned long
573 llgtr %r4,%r4 # char *
574 jg sys32_init_module # branch to system call
575
576 .globl sys32_delete_module_wrapper
577sys32_delete_module_wrapper:
578 llgtr %r2,%r2 # const char *
579 llgfr %r3,%r3 # unsigned int
580 jg sys32_delete_module # branch to system call
581
582 .globl sys32_quotactl_wrapper
583sys32_quotactl_wrapper:
584 llgfr %r2,%r2 # unsigned int
585 llgtr %r3,%r3 # const char *
586 llgfr %r4,%r4 # qid_t
587 llgtr %r5,%r5 # caddr_t
588 jg sys_quotactl # branch to system call
589
590 .globl sys32_getpgid_wrapper
591sys32_getpgid_wrapper:
592 lgfr %r2,%r2 # pid_t
593 jg sys_getpgid # branch to system call
594
595 .globl sys32_fchdir_wrapper
596sys32_fchdir_wrapper:
597 llgfr %r2,%r2 # unsigned int
598 jg sys_fchdir # branch to system call
599
600 .globl sys32_bdflush_wrapper
601sys32_bdflush_wrapper:
602 lgfr %r2,%r2 # int
603 lgfr %r3,%r3 # long
604 jg sys_bdflush # branch to system call
605
606 .globl sys32_sysfs_wrapper
607sys32_sysfs_wrapper:
608 lgfr %r2,%r2 # int
609 llgfr %r3,%r3 # unsigned long
610 llgfr %r4,%r4 # unsigned long
611 jg sys_sysfs # branch to system call
612
613 .globl sys32_personality_wrapper
614sys32_personality_wrapper:
615 llgfr %r2,%r2 # unsigned long
616 jg s390x_personality # branch to system call
617
618 .globl sys32_setfsuid16_wrapper
619sys32_setfsuid16_wrapper:
620 llgfr %r2,%r2 # __kernel_old_uid_emu31_t
621 jg sys32_setfsuid16 # branch to system call
622
623 .globl sys32_setfsgid16_wrapper
624sys32_setfsgid16_wrapper:
625 llgfr %r2,%r2 # __kernel_old_gid_emu31_t
626 jg sys32_setfsgid16 # branch to system call
627
628 .globl sys32_llseek_wrapper
629sys32_llseek_wrapper:
630 llgfr %r2,%r2 # unsigned int
631 llgfr %r3,%r3 # unsigned long
632 llgfr %r4,%r4 # unsigned long
633 llgtr %r5,%r5 # loff_t *
634 llgfr %r6,%r6 # unsigned int
635 jg sys_llseek # branch to system call
636
637 .globl sys32_getdents_wrapper
638sys32_getdents_wrapper:
639 llgfr %r2,%r2 # unsigned int
640 llgtr %r3,%r3 # void *
641 llgfr %r4,%r4 # unsigned int
642 jg compat_sys_getdents # branch to system call
643
644 .globl compat_sys_select_wrapper
645compat_sys_select_wrapper:
646 lgfr %r2,%r2 # int
647 llgtr %r3,%r3 # compat_fd_set *
648 llgtr %r4,%r4 # compat_fd_set *
649 llgtr %r5,%r5 # compat_fd_set *
650 llgtr %r6,%r6 # struct compat_timeval *
651 jg compat_sys_select # branch to system call
652
653 .globl sys32_flock_wrapper
654sys32_flock_wrapper:
655 llgfr %r2,%r2 # unsigned int
656 llgfr %r3,%r3 # unsigned int
657 jg sys_flock # branch to system call
658
659 .globl sys32_msync_wrapper
660sys32_msync_wrapper:
661 llgfr %r2,%r2 # unsigned long
662 llgfr %r3,%r3 # size_t
663 lgfr %r4,%r4 # int
664 jg sys_msync # branch to system call
665
666 .globl compat_sys_readv_wrapper
667compat_sys_readv_wrapper:
668 lgfr %r2,%r2 # int
669 llgtr %r3,%r3 # const struct compat_iovec *
670 llgfr %r4,%r4 # unsigned long
671 jg compat_sys_readv # branch to system call
672
673 .globl compat_sys_writev_wrapper
674compat_sys_writev_wrapper:
675 lgfr %r2,%r2 # int
676 llgtr %r3,%r3 # const struct compat_iovec *
677 llgfr %r4,%r4 # unsigned long
678 jg compat_sys_writev # branch to system call
679
680 .globl sys32_getsid_wrapper
681sys32_getsid_wrapper:
682 lgfr %r2,%r2 # pid_t
683 jg sys_getsid # branch to system call
684
685 .globl sys32_fdatasync_wrapper
686sys32_fdatasync_wrapper:
687 llgfr %r2,%r2 # unsigned int
688 jg sys_fdatasync # branch to system call
689
690#sys32_sysctl_wrapper # tbd
691
692 .globl sys32_mlock_wrapper
693sys32_mlock_wrapper:
694 llgfr %r2,%r2 # unsigned long
695 llgfr %r3,%r3 # size_t
696 jg sys_mlock # branch to system call
697
698 .globl sys32_munlock_wrapper
699sys32_munlock_wrapper:
700 llgfr %r2,%r2 # unsigned long
701 llgfr %r3,%r3 # size_t
702 jg sys_munlock # branch to system call
703
704 .globl sys32_mlockall_wrapper
705sys32_mlockall_wrapper:
706 lgfr %r2,%r2 # int
707 jg sys_mlockall # branch to system call
708
709#sys32_munlockall_wrapper # void
710
711 .globl sys32_sched_setparam_wrapper
712sys32_sched_setparam_wrapper:
713 lgfr %r2,%r2 # pid_t
714 llgtr %r3,%r3 # struct sched_param *
715 jg sys_sched_setparam # branch to system call
716
717 .globl sys32_sched_getparam_wrapper
718sys32_sched_getparam_wrapper:
719 lgfr %r2,%r2 # pid_t
720 llgtr %r3,%r3 # struct sched_param *
721 jg sys_sched_getparam # branch to system call
722
723 .globl sys32_sched_setscheduler_wrapper
724sys32_sched_setscheduler_wrapper:
725 lgfr %r2,%r2 # pid_t
726 lgfr %r3,%r3 # int
727 llgtr %r4,%r4 # struct sched_param *
728 jg sys_sched_setscheduler # branch to system call
729
730 .globl sys32_sched_getscheduler_wrapper
731sys32_sched_getscheduler_wrapper:
732 lgfr %r2,%r2 # pid_t
733 jg sys_sched_getscheduler # branch to system call
734
735#sys32_sched_yield_wrapper # void
736
737 .globl sys32_sched_get_priority_max_wrapper
738sys32_sched_get_priority_max_wrapper:
739 lgfr %r2,%r2 # int
740 jg sys_sched_get_priority_max # branch to system call
741
742 .globl sys32_sched_get_priority_min_wrapper
743sys32_sched_get_priority_min_wrapper:
744 lgfr %r2,%r2 # int
745 jg sys_sched_get_priority_min # branch to system call
746
747 .globl sys32_sched_rr_get_interval_wrapper
748sys32_sched_rr_get_interval_wrapper:
749 lgfr %r2,%r2 # pid_t
750 llgtr %r3,%r3 # struct compat_timespec *
751 jg sys32_sched_rr_get_interval # branch to system call
752
753 .globl compat_sys_nanosleep_wrapper
754compat_sys_nanosleep_wrapper:
755 llgtr %r2,%r2 # struct compat_timespec *
756 llgtr %r3,%r3 # struct compat_timespec *
757 jg compat_sys_nanosleep # branch to system call
758
759 .globl sys32_mremap_wrapper
760sys32_mremap_wrapper:
761 llgfr %r2,%r2 # unsigned long
762 llgfr %r3,%r3 # unsigned long
763 llgfr %r4,%r4 # unsigned long
764 llgfr %r5,%r5 # unsigned long
765 llgfr %r6,%r6 # unsigned long
766 jg sys_mremap # branch to system call
767
768 .globl sys32_setresuid16_wrapper
769sys32_setresuid16_wrapper:
770 llgfr %r2,%r2 # __kernel_old_uid_emu31_t
771 llgfr %r3,%r3 # __kernel_old_uid_emu31_t
772 llgfr %r4,%r4 # __kernel_old_uid_emu31_t
773 jg sys32_setresuid16 # branch to system call
774
775 .globl sys32_getresuid16_wrapper
776sys32_getresuid16_wrapper:
777 llgtr %r2,%r2 # __kernel_old_uid_emu31_t *
778 llgtr %r3,%r3 # __kernel_old_uid_emu31_t *
779 llgtr %r4,%r4 # __kernel_old_uid_emu31_t *
780 jg sys32_getresuid16 # branch to system call
781
782 .globl sys32_poll_wrapper
783sys32_poll_wrapper:
784 llgtr %r2,%r2 # struct pollfd *
785 llgfr %r3,%r3 # unsigned int
786 lgfr %r4,%r4 # long
787 jg sys_poll # branch to system call
788
789 .globl compat_sys_nfsservctl_wrapper
790compat_sys_nfsservctl_wrapper:
791 lgfr %r2,%r2 # int
792 llgtr %r3,%r3 # struct compat_nfsctl_arg*
793 llgtr %r4,%r4 # union compat_nfsctl_res*
794 jg compat_sys_nfsservctl # branch to system call
795
796 .globl sys32_setresgid16_wrapper
797sys32_setresgid16_wrapper:
798 llgfr %r2,%r2 # __kernel_old_gid_emu31_t
799 llgfr %r3,%r3 # __kernel_old_gid_emu31_t
800 llgfr %r4,%r4 # __kernel_old_gid_emu31_t
801 jg sys32_setresgid16 # branch to system call
802
803 .globl sys32_getresgid16_wrapper
804sys32_getresgid16_wrapper:
805 llgtr %r2,%r2 # __kernel_old_gid_emu31_t *
806 llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
807 llgtr %r4,%r4 # __kernel_old_gid_emu31_t *
808 jg sys32_getresgid16 # branch to system call
809
810 .globl sys32_prctl_wrapper
811sys32_prctl_wrapper:
812 lgfr %r2,%r2 # int
813 llgfr %r3,%r3 # unsigned long
814 llgfr %r4,%r4 # unsigned long
815 llgfr %r5,%r5 # unsigned long
816 llgfr %r6,%r6 # unsigned long
817 jg sys_prctl # branch to system call
818
819#sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue
820
821 .globl sys32_rt_sigaction_wrapper
822sys32_rt_sigaction_wrapper:
823 lgfr %r2,%r2 # int
824 llgtr %r3,%r3 # const struct sigaction_emu31 *
825 llgtr %r4,%r4 # const struct sigaction_emu31 *
826 llgfr %r5,%r5 # size_t
827 jg sys32_rt_sigaction # branch to system call
828
829 .globl sys32_rt_sigprocmask_wrapper
830sys32_rt_sigprocmask_wrapper:
831 lgfr %r2,%r2 # int
832 llgtr %r3,%r3 # old_sigset_emu31 *
833 llgtr %r4,%r4 # old_sigset_emu31 *
834 llgfr %r5,%r5 # size_t
835 jg sys32_rt_sigprocmask # branch to system call
836
837 .globl sys32_rt_sigpending_wrapper
838sys32_rt_sigpending_wrapper:
839 llgtr %r2,%r2 # sigset_emu31 *
840 llgfr %r3,%r3 # size_t
841 jg sys32_rt_sigpending # branch to system call
842
843 .globl compat_sys_rt_sigtimedwait_wrapper
844compat_sys_rt_sigtimedwait_wrapper:
845 llgtr %r2,%r2 # const sigset_emu31_t *
846 llgtr %r3,%r3 # siginfo_emu31_t *
847 llgtr %r4,%r4 # const struct compat_timespec *
848 llgfr %r5,%r5 # size_t
849 jg compat_sys_rt_sigtimedwait # branch to system call
850
851 .globl sys32_rt_sigqueueinfo_wrapper
852sys32_rt_sigqueueinfo_wrapper:
853 lgfr %r2,%r2 # int
854 lgfr %r3,%r3 # int
855 llgtr %r4,%r4 # siginfo_emu31_t *
856 jg sys32_rt_sigqueueinfo # branch to system call
857
858#sys32_rt_sigsuspend_wrapper # done in rt_sigsuspend_glue
859
860 .globl sys32_pread64_wrapper
861sys32_pread64_wrapper:
862 llgfr %r2,%r2 # unsigned int
863 llgtr %r3,%r3 # char *
864 llgfr %r4,%r4 # size_t
865 llgfr %r5,%r5 # u32
866 llgfr %r6,%r6 # u32
867 jg sys32_pread64 # branch to system call
868
869 .globl sys32_pwrite64_wrapper
870sys32_pwrite64_wrapper:
871 llgfr %r2,%r2 # unsigned int
872 llgtr %r3,%r3 # const char *
873 llgfr %r4,%r4 # size_t
874 llgfr %r5,%r5 # u32
875 llgfr %r6,%r6 # u32
876 jg sys32_pwrite64 # branch to system call
877
878 .globl sys32_chown16_wrapper
879sys32_chown16_wrapper:
880 llgtr %r2,%r2 # const char *
881 llgfr %r3,%r3 # __kernel_old_uid_emu31_t
882 llgfr %r4,%r4 # __kernel_old_gid_emu31_t
883 jg sys32_chown16 # branch to system call
884
885 .globl sys32_getcwd_wrapper
886sys32_getcwd_wrapper:
887 llgtr %r2,%r2 # char *
888 llgfr %r3,%r3 # unsigned long
889 jg sys_getcwd # branch to system call
890
891 .globl sys32_capget_wrapper
892sys32_capget_wrapper:
893 llgtr %r2,%r2 # cap_user_header_t
894 llgtr %r3,%r3 # cap_user_data_t
895 jg sys_capget # branch to system call
896
897 .globl sys32_capset_wrapper
898sys32_capset_wrapper:
899 llgtr %r2,%r2 # cap_user_header_t
900 llgtr %r3,%r3 # const cap_user_data_t
901 jg sys_capset # branch to system call
902
903 .globl sys32_sigaltstack_wrapper
904sys32_sigaltstack_wrapper:
905 llgtr %r2,%r2 # const stack_emu31_t *
906 llgtr %r3,%r3 # stack_emu31_t *
907 jg sys32_sigaltstack
908
909 .globl sys32_sendfile_wrapper
910sys32_sendfile_wrapper:
911 lgfr %r2,%r2 # int
912 lgfr %r3,%r3 # int
913 llgtr %r4,%r4 # __kernel_off_emu31_t *
914 llgfr %r5,%r5 # size_t
915 jg sys32_sendfile # branch to system call
916
917#sys32_vfork_wrapper # done in vfork_glue
918
919 .globl sys32_truncate64_wrapper
920sys32_truncate64_wrapper:
921 llgtr %r2,%r2 # const char *
922 llgfr %r3,%r3 # unsigned long
923 llgfr %r4,%r4 # unsigned long
924 jg sys32_truncate64 # branch to system call
925
926 .globl sys32_ftruncate64_wrapper
927sys32_ftruncate64_wrapper:
928 llgfr %r2,%r2 # unsigned int
929 llgfr %r3,%r3 # unsigned long
930 llgfr %r4,%r4 # unsigned long
931 jg sys32_ftruncate64 # branch to system call
932
933 .globl sys32_lchown_wrapper
934sys32_lchown_wrapper:
935 llgtr %r2,%r2 # const char *
936 llgfr %r3,%r3 # uid_t
937 llgfr %r4,%r4 # gid_t
938 jg sys_lchown # branch to system call
939
940#sys32_getuid_wrapper # void
941#sys32_getgid_wrapper # void
942#sys32_geteuid_wrapper # void
943#sys32_getegid_wrapper # void
944
945 .globl sys32_setreuid_wrapper
946sys32_setreuid_wrapper:
947 llgfr %r2,%r2 # uid_t
948 llgfr %r3,%r3 # uid_t
949 jg sys_setreuid # branch to system call
950
951 .globl sys32_setregid_wrapper
952sys32_setregid_wrapper:
953 llgfr %r2,%r2 # gid_t
954 llgfr %r3,%r3 # gid_t
955 jg sys_setregid # branch to system call
956
957 .globl sys32_getgroups_wrapper
958sys32_getgroups_wrapper:
959 lgfr %r2,%r2 # int
960 llgtr %r3,%r3 # gid_t *
961 jg sys_getgroups # branch to system call
962
963 .globl sys32_setgroups_wrapper
964sys32_setgroups_wrapper:
965 lgfr %r2,%r2 # int
966 llgtr %r3,%r3 # gid_t *
967 jg sys_setgroups # branch to system call
968
969 .globl sys32_fchown_wrapper
970sys32_fchown_wrapper:
971 llgfr %r2,%r2 # unsigned int
972 llgfr %r3,%r3 # uid_t
973 llgfr %r4,%r4 # gid_t
974 jg sys_fchown # branch to system call
975
976 .globl sys32_setresuid_wrapper
977sys32_setresuid_wrapper:
978 llgfr %r2,%r2 # uid_t
979 llgfr %r3,%r3 # uid_t
980 llgfr %r4,%r4 # uid_t
981 jg sys_setresuid # branch to system call
982
983 .globl sys32_getresuid_wrapper
984sys32_getresuid_wrapper:
985 llgtr %r2,%r2 # uid_t *
986 llgtr %r3,%r3 # uid_t *
987 llgtr %r4,%r4 # uid_t *
988 jg sys_getresuid # branch to system call
989
990 .globl sys32_setresgid_wrapper
991sys32_setresgid_wrapper:
992 llgfr %r2,%r2 # gid_t
993 llgfr %r3,%r3 # gid_t
994 llgfr %r4,%r4 # gid_t
995 jg sys_setresgid # branch to system call
996
997 .globl sys32_getresgid_wrapper
998sys32_getresgid_wrapper:
999 llgtr %r2,%r2 # gid_t *
1000 llgtr %r3,%r3 # gid_t *
1001 llgtr %r4,%r4 # gid_t *
1002 jg sys_getresgid # branch to system call
1003
1004 .globl sys32_chown_wrapper
1005sys32_chown_wrapper:
1006 llgtr %r2,%r2 # const char *
1007 llgfr %r3,%r3 # uid_t
1008 llgfr %r4,%r4 # gid_t
1009 jg sys_chown # branch to system call
1010
1011 .globl sys32_setuid_wrapper
1012sys32_setuid_wrapper:
1013 llgfr %r2,%r2 # uid_t
1014 jg sys_setuid # branch to system call
1015
1016 .globl sys32_setgid_wrapper
1017sys32_setgid_wrapper:
1018 llgfr %r2,%r2 # gid_t
1019 jg sys_setgid # branch to system call
1020
1021 .globl sys32_setfsuid_wrapper
1022sys32_setfsuid_wrapper:
1023 llgfr %r2,%r2 # uid_t
1024 jg sys_setfsuid # branch to system call
1025
1026 .globl sys32_setfsgid_wrapper
1027sys32_setfsgid_wrapper:
1028 llgfr %r2,%r2 # gid_t
1029 jg sys_setfsgid # branch to system call
1030
1031 .globl sys32_pivot_root_wrapper
1032sys32_pivot_root_wrapper:
1033 llgtr %r2,%r2 # const char *
1034 llgtr %r3,%r3 # const char *
1035 jg sys_pivot_root # branch to system call
1036
1037 .globl sys32_mincore_wrapper
1038sys32_mincore_wrapper:
1039 llgfr %r2,%r2 # unsigned long
1040 llgfr %r3,%r3 # size_t
1041 llgtr %r4,%r4 # unsigned char *
1042 jg sys_mincore # branch to system call
1043
1044 .globl sys32_madvise_wrapper
1045sys32_madvise_wrapper:
1046 llgfr %r2,%r2 # unsigned long
1047 llgfr %r3,%r3 # size_t
1048 lgfr %r4,%r4 # int
1049 jg sys_madvise # branch to system call
1050
1051 .globl sys32_getdents64_wrapper
1052sys32_getdents64_wrapper:
1053 llgfr %r2,%r2 # unsigned int
1054 llgtr %r3,%r3 # void *
1055 llgfr %r4,%r4 # unsigned int
1056 jg sys_getdents64 # branch to system call
1057
1058 .globl compat_sys_fcntl64_wrapper
1059compat_sys_fcntl64_wrapper:
1060 llgfr %r2,%r2 # unsigned int
1061 llgfr %r3,%r3 # unsigned int
1062 llgfr %r4,%r4 # unsigned long
1063 jg compat_sys_fcntl64 # branch to system call
1064
1065 .globl sys32_stat64_wrapper
1066sys32_stat64_wrapper:
1067 llgtr %r2,%r2 # char *
1068 llgtr %r3,%r3 # struct stat64 *
1069 jg sys32_stat64 # branch to system call
1070
1071 .globl sys32_lstat64_wrapper
1072sys32_lstat64_wrapper:
1073 llgtr %r2,%r2 # char *
1074 llgtr %r3,%r3 # struct stat64 *
1075 jg sys32_lstat64 # branch to system call
1076
1077 .globl sys32_stime_wrapper
1078sys32_stime_wrapper:
1079 llgtr %r2,%r2 # long *
1080 jg compat_sys_stime # branch to system call
1081
1082 .globl sys32_sysctl_wrapper
1083sys32_sysctl_wrapper:
1084 llgtr %r2,%r2 # struct __sysctl_args32 *
1085 jg sys32_sysctl
1086
1087 .globl sys32_fstat64_wrapper
1088sys32_fstat64_wrapper:
1089 llgfr %r2,%r2 # unsigned long
1090 llgtr %r3,%r3 # struct stat64 *
1091 jg sys32_fstat64 # branch to system call
1092
1093 .globl compat_sys_futex_wrapper
1094compat_sys_futex_wrapper:
1095 llgtr %r2,%r2 # u32 *
1096 lgfr %r3,%r3 # int
1097 lgfr %r4,%r4 # int
1098 llgtr %r5,%r5 # struct compat_timespec *
1099 llgtr %r6,%r6 # u32 *
1100 lgf %r0,164(%r15) # int
1101 stg %r0,160(%r15)
1102 jg compat_sys_futex # branch to system call
1103
1104 .globl sys32_setxattr_wrapper
1105sys32_setxattr_wrapper:
1106 llgtr %r2,%r2 # char *
1107 llgtr %r3,%r3 # char *
1108 llgtr %r4,%r4 # void *
1109 llgfr %r5,%r5 # size_t
1110 lgfr %r6,%r6 # int
1111 jg sys_setxattr
1112
1113 .globl sys32_lsetxattr_wrapper
1114sys32_lsetxattr_wrapper:
1115 llgtr %r2,%r2 # char *
1116 llgtr %r3,%r3 # char *
1117 llgtr %r4,%r4 # void *
1118 llgfr %r5,%r5 # size_t
1119 lgfr %r6,%r6 # int
1120 jg sys_lsetxattr
1121
1122 .globl sys32_fsetxattr_wrapper
1123sys32_fsetxattr_wrapper:
1124 lgfr %r2,%r2 # int
1125 llgtr %r3,%r3 # char *
1126 llgtr %r4,%r4 # void *
1127 llgfr %r5,%r5 # size_t
1128 lgfr %r6,%r6 # int
1129 jg sys_fsetxattr
1130
1131 .globl sys32_getxattr_wrapper
1132sys32_getxattr_wrapper:
1133 llgtr %r2,%r2 # char *
1134 llgtr %r3,%r3 # char *
1135 llgtr %r4,%r4 # void *
1136 llgfr %r5,%r5 # size_t
1137 jg sys_getxattr
1138
1139 .globl sys32_lgetxattr_wrapper
1140sys32_lgetxattr_wrapper:
1141 llgtr %r2,%r2 # char *
1142 llgtr %r3,%r3 # char *
1143 llgtr %r4,%r4 # void *
1144 llgfr %r5,%r5 # size_t
1145 jg sys_lgetxattr
1146
1147 .globl sys32_fgetxattr_wrapper
1148sys32_fgetxattr_wrapper:
1149 lgfr %r2,%r2 # int
1150 llgtr %r3,%r3 # char *
1151 llgtr %r4,%r4 # void *
1152 llgfr %r5,%r5 # size_t
1153 jg sys_fgetxattr
1154
1155 .globl sys32_listxattr_wrapper
1156sys32_listxattr_wrapper:
1157 llgtr %r2,%r2 # char *
1158 llgtr %r3,%r3 # char *
1159 llgfr %r4,%r4 # size_t
1160 jg sys_listxattr
1161
1162 .globl sys32_llistxattr_wrapper
1163sys32_llistxattr_wrapper:
1164 llgtr %r2,%r2 # char *
1165 llgtr %r3,%r3 # char *
1166 llgfr %r4,%r4 # size_t
1167 jg sys_llistxattr
1168
1169 .globl sys32_flistxattr_wrapper
1170sys32_flistxattr_wrapper:
1171 lgfr %r2,%r2 # int
1172 llgtr %r3,%r3 # char *
1173 llgfr %r4,%r4 # size_t
1174 jg sys_flistxattr
1175
1176 .globl sys32_removexattr_wrapper
1177sys32_removexattr_wrapper:
1178 llgtr %r2,%r2 # char *
1179 llgtr %r3,%r3 # char *
1180 jg sys_removexattr
1181
1182 .globl sys32_lremovexattr_wrapper
1183sys32_lremovexattr_wrapper:
1184 llgtr %r2,%r2 # char *
1185 llgtr %r3,%r3 # char *
1186 jg sys_lremovexattr
1187
1188 .globl sys32_fremovexattr_wrapper
1189sys32_fremovexattr_wrapper:
1190 lgfr %r2,%r2 # int
1191 llgtr %r3,%r3 # char *
1192 jg sys_fremovexattr
1193
1194 .globl sys32_sched_setaffinity_wrapper
1195sys32_sched_setaffinity_wrapper:
1196 lgfr %r2,%r2 # int
1197 llgfr %r3,%r3 # unsigned int
1198 llgtr %r4,%r4 # unsigned long *
1199 jg compat_sys_sched_setaffinity
1200
1201 .globl sys32_sched_getaffinity_wrapper
1202sys32_sched_getaffinity_wrapper:
1203 lgfr %r2,%r2 # int
1204 llgfr %r3,%r3 # unsigned int
1205 llgtr %r4,%r4 # unsigned long *
1206 jg compat_sys_sched_getaffinity
1207
1208 .globl sys32_exit_group_wrapper
1209sys32_exit_group_wrapper:
1210 lgfr %r2,%r2 # int
1211 jg sys_exit_group # branch to system call
1212
1213 .globl sys32_set_tid_address_wrapper
1214sys32_set_tid_address_wrapper:
1215 llgtr %r2,%r2 # int *
1216 jg sys_set_tid_address # branch to system call
1217
1218 .globl sys_epoll_create_wrapper
1219sys_epoll_create_wrapper:
1220 lgfr %r2,%r2 # int
1221 jg sys_epoll_create # branch to system call
1222
1223 .globl sys_epoll_ctl_wrapper
1224sys_epoll_ctl_wrapper:
1225 lgfr %r2,%r2 # int
1226 lgfr %r3,%r3 # int
1227 lgfr %r4,%r4 # int
1228 llgtr %r5,%r5 # struct epoll_event *
1229 jg sys_epoll_ctl # branch to system call
1230
1231 .globl sys_epoll_wait_wrapper
1232sys_epoll_wait_wrapper:
1233 lgfr %r2,%r2 # int
1234 llgtr %r3,%r3 # struct epoll_event *
1235 lgfr %r4,%r4 # int
1236 lgfr %r5,%r5 # int
1237 jg sys_epoll_wait # branch to system call
1238
1239 .globl sys32_lookup_dcookie_wrapper
1240sys32_lookup_dcookie_wrapper:
1241 sllg %r2,%r2,32 # get high word of 64bit dcookie
1242 or %r2,%r3 # get low word of 64bit dcookie
1243 llgtr %r3,%r4 # char *
1244 llgfr %r4,%r5 # size_t
1245 jg sys_lookup_dcookie
1246
1247 .globl sys32_fadvise64_wrapper
1248sys32_fadvise64_wrapper:
1249 lgfr %r2,%r2 # int
1250 sllg %r3,%r3,32 # get high word of 64bit loff_t
1251 or %r3,%r4 # get low word of 64bit loff_t
1252 llgfr %r4,%r5 # size_t (unsigned long)
1253 lgfr %r5,%r6 # int
1254 jg sys_fadvise64
1255
1256 .globl sys32_fadvise64_64_wrapper
1257sys32_fadvise64_64_wrapper:
1258 llgtr %r2,%r2 # struct fadvise64_64_args *
1259 jg s390_fadvise64_64
1260
1261 .globl sys32_clock_settime_wrapper
1262sys32_clock_settime_wrapper:
1263 lgfr %r2,%r2 # clockid_t (int)
1264 llgtr %r3,%r3 # struct compat_timespec *
1265 jg compat_sys_clock_settime
1266
1267 .globl sys32_clock_gettime_wrapper
1268sys32_clock_gettime_wrapper:
1269 lgfr %r2,%r2 # clockid_t (int)
1270 llgtr %r3,%r3 # struct compat_timespec *
1271 jg compat_sys_clock_gettime
1272
1273 .globl sys32_clock_getres_wrapper
1274sys32_clock_getres_wrapper:
1275 lgfr %r2,%r2 # clockid_t (int)
1276 llgtr %r3,%r3 # struct compat_timespec *
1277 jg compat_sys_clock_getres
1278
1279 .globl sys32_clock_nanosleep_wrapper
1280sys32_clock_nanosleep_wrapper:
1281 lgfr %r2,%r2 # clockid_t (int)
1282 lgfr %r3,%r3 # int
1283 llgtr %r4,%r4 # struct compat_timespec *
1284 llgtr %r5,%r5 # struct compat_timespec *
1285 jg compat_sys_clock_nanosleep
1286
1287 .globl sys32_timer_create_wrapper
1288sys32_timer_create_wrapper:
1289 lgfr %r2,%r2 # timer_t (int)
1290 llgtr %r3,%r3 # struct compat_sigevent *
1291 llgtr %r4,%r4 # timer_t *
1292 jg sys32_timer_create
1293
1294 .globl sys32_timer_settime_wrapper
1295sys32_timer_settime_wrapper:
1296 lgfr %r2,%r2 # timer_t (int)
1297 lgfr %r3,%r3 # int
1298 llgtr %r4,%r4 # struct compat_itimerspec *
1299 llgtr %r5,%r5 # struct compat_itimerspec *
1300 jg compat_sys_timer_settime
1301
1302 .globl sys32_timer_gettime_wrapper
1303sys32_timer_gettime_wrapper:
1304 lgfr %r2,%r2 # timer_t (int)
1305 llgtr %r3,%r3 # struct compat_itimerspec *
1306 jg compat_sys_timer_gettime
1307
1308 .globl sys32_timer_getoverrun_wrapper
1309sys32_timer_getoverrun_wrapper:
1310 lgfr %r2,%r2 # timer_t (int)
1311 jg sys_timer_getoverrun
1312
1313 .globl sys32_timer_delete_wrapper
1314sys32_timer_delete_wrapper:
1315 lgfr %r2,%r2 # timer_t (int)
1316 jg sys_timer_delete
1317
1318 .globl sys32_io_setup_wrapper
1319sys32_io_setup_wrapper:
1320 llgfr %r2,%r2 # unsigned int
1321 llgtr %r3,%r3 # u32 *
1322 jg compat_sys_io_setup
1323
1324 .globl sys32_io_destroy_wrapper
1325sys32_io_destroy_wrapper:
1326 llgfr %r2,%r2 # (aio_context_t) u32
1327 jg sys_io_destroy
1328
1329 .globl sys32_io_getevents_wrapper
1330sys32_io_getevents_wrapper:
1331 llgfr %r2,%r2 # (aio_context_t) u32
1332 lgfr %r3,%r3 # long
1333 lgfr %r4,%r4 # long
1334 llgtr %r5,%r5 # struct io_event *
1335 llgtr %r6,%r6 # struct compat_timespec *
1336 jg compat_sys_io_getevents
1337
1338 .globl sys32_io_submit_wrapper
1339sys32_io_submit_wrapper:
1340 llgfr %r2,%r2 # (aio_context_t) u32
1341 lgfr %r3,%r3 # long
1342 llgtr %r4,%r4 # struct iocb **
1343 jg compat_sys_io_submit
1344
1345 .globl sys32_io_cancel_wrapper
1346sys32_io_cancel_wrapper:
1347 llgfr %r2,%r2 # (aio_context_t) u32
1348 llgtr %r3,%r3 # struct iocb *
1349 llgtr %r4,%r4 # struct io_event *
1350 jg sys_io_cancel
1351
1352 .globl compat_sys_statfs64_wrapper
1353compat_sys_statfs64_wrapper:
1354 llgtr %r2,%r2 # const char *
1355 llgfr %r3,%r3 # compat_size_t
1356 llgtr %r4,%r4 # struct compat_statfs64 *
1357 jg compat_sys_statfs64
1358
1359 .globl compat_sys_fstatfs64_wrapper
1360compat_sys_fstatfs64_wrapper:
1361 llgfr %r2,%r2 # unsigned int fd
1362 llgfr %r3,%r3 # compat_size_t
1363 llgtr %r4,%r4 # struct compat_statfs64 *
1364 jg compat_sys_fstatfs64
1365
1366 .globl compat_sys_mq_open_wrapper
1367compat_sys_mq_open_wrapper:
1368 llgtr %r2,%r2 # const char *
1369 lgfr %r3,%r3 # int
1370 llgfr %r4,%r4 # mode_t
1371 llgtr %r5,%r5 # struct compat_mq_attr *
1372 jg compat_sys_mq_open
1373
1374 .globl sys32_mq_unlink_wrapper
1375sys32_mq_unlink_wrapper:
1376 llgtr %r2,%r2 # const char *
1377 jg sys_mq_unlink
1378
1379 .globl compat_sys_mq_timedsend_wrapper
1380compat_sys_mq_timedsend_wrapper:
1381 lgfr %r2,%r2 # mqd_t
1382 llgtr %r3,%r3 # const char *
1383 llgfr %r4,%r4 # size_t
1384 llgfr %r5,%r5 # unsigned int
1385 llgtr %r6,%r6 # const struct compat_timespec *
1386 jg compat_sys_mq_timedsend
1387
1388 .globl compat_sys_mq_timedreceive_wrapper
1389compat_sys_mq_timedreceive_wrapper:
1390 lgfr %r2,%r2 # mqd_t
1391 llgtr %r3,%r3 # char *
1392 llgfr %r4,%r4 # size_t
1393 llgtr %r5,%r5 # unsigned int *
1394 llgtr %r6,%r6 # const struct compat_timespec *
1395 jg compat_sys_mq_timedreceive
1396
1397 .globl compat_sys_mq_notify_wrapper
1398compat_sys_mq_notify_wrapper:
1399 lgfr %r2,%r2 # mqd_t
1400 llgtr %r3,%r3 # struct compat_sigevent *
1401 jg compat_sys_mq_notify
1402
1403 .globl compat_sys_mq_getsetattr_wrapper
1404compat_sys_mq_getsetattr_wrapper:
1405 lgfr %r2,%r2 # mqd_t
1406 llgtr %r3,%r3 # struct compat_mq_attr *
1407 llgtr %r4,%r4 # struct compat_mq_attr *
1408 jg compat_sys_mq_getsetattr
1409
1410 .globl compat_sys_add_key_wrapper
1411compat_sys_add_key_wrapper:
1412 llgtr %r2,%r2 # const char *
1413 llgtr %r3,%r3 # const char *
1414 llgtr %r4,%r4 # const void *
1415 llgfr %r5,%r5 # size_t
1416 llgfr %r6,%r6 # (key_serial_t) u32
1417 jg sys_add_key
1418
1419 .globl compat_sys_request_key_wrapper
1420compat_sys_request_key_wrapper:
1421 llgtr %r2,%r2 # const char *
1422 llgtr %r3,%r3 # const char *
1423 llgtr %r4,%r4 # const void *
1424 llgfr %r5,%r5 # (key_serial_t) u32
1425 jg sys_request_key
1426
1427 .globl sys32_remap_file_pages_wrapper
1428sys32_remap_file_pages_wrapper:
1429 llgfr %r2,%r2 # unsigned long
1430 llgfr %r3,%r3 # unsigned long
1431 llgfr %r4,%r4 # unsigned long
1432 llgfr %r5,%r5 # unsigned long
1433 llgfr %r6,%r6 # unsigned long
1434 jg sys_remap_file_pages
1435
1436 .globl compat_sys_waitid_wrapper
1437compat_sys_waitid_wrapper:
1438 lgfr %r2,%r2 # int
1439 lgfr %r3,%r3 # pid_t
1440 llgtr %r4,%r4 # siginfo_emu31_t *
1441 lgfr %r5,%r5 # int
1442 llgtr %r6,%r6 # struct rusage_emu31 *
1443 jg compat_sys_waitid
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
new file mode 100644
index 000000000000..44df8dc07c59
--- /dev/null
+++ b/arch/s390/kernel/cpcmd.c
@@ -0,0 +1,111 @@
1/*
2 * arch/s390/kernel/cpcmd.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Christian Borntraeger (cborntra@de.ibm.com),
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/stddef.h>
15#include <linux/string.h>
16#include <asm/ebcdic.h>
17#include <asm/cpcmd.h>
18#include <asm/system.h>
19
20static DEFINE_SPINLOCK(cpcmd_lock);
21static char cpcmd_buf[240];
22
23/*
24 * the caller of __cpcmd has to ensure that the response buffer is below 2 GB
25 */
26void __cpcmd(char *cmd, char *response, int rlen)
27{
28 const int mask = 0x40000000L;
29 unsigned long flags;
30 int cmdlen;
31
32 spin_lock_irqsave(&cpcmd_lock, flags);
33 cmdlen = strlen(cmd);
34 BUG_ON(cmdlen > 240);
35 strcpy(cpcmd_buf, cmd);
36 ASCEBC(cpcmd_buf, cmdlen);
37
38 if (response != NULL && rlen > 0) {
39 memset(response, 0, rlen);
40#ifndef CONFIG_ARCH_S390X
41 asm volatile ("LRA 2,0(%0)\n\t"
42 "LR 4,%1\n\t"
43 "O 4,%4\n\t"
44 "LRA 3,0(%2)\n\t"
45 "LR 5,%3\n\t"
46 ".long 0x83240008 # Diagnose X'08'\n\t"
47 : /* no output */
48 : "a" (cpcmd_buf), "d" (cmdlen),
49 "a" (response), "d" (rlen), "m" (mask)
50 : "cc", "2", "3", "4", "5" );
51#else /* CONFIG_ARCH_S390X */
52 asm volatile (" lrag 2,0(%0)\n"
53 " lgr 4,%1\n"
54 " o 4,%4\n"
55 " lrag 3,0(%2)\n"
56 " lgr 5,%3\n"
57 " sam31\n"
58 " .long 0x83240008 # Diagnose X'08'\n"
59 " sam64"
60 : /* no output */
61 : "a" (cpcmd_buf), "d" (cmdlen),
62 "a" (response), "d" (rlen), "m" (mask)
63 : "cc", "2", "3", "4", "5" );
64#endif /* CONFIG_ARCH_S390X */
65 EBCASC(response, rlen);
66 } else {
67#ifndef CONFIG_ARCH_S390X
68 asm volatile ("LRA 2,0(%0)\n\t"
69 "LR 3,%1\n\t"
70 ".long 0x83230008 # Diagnose X'08'\n\t"
71 : /* no output */
72 : "a" (cpcmd_buf), "d" (cmdlen)
73 : "2", "3" );
74#else /* CONFIG_ARCH_S390X */
75 asm volatile (" lrag 2,0(%0)\n"
76 " lgr 3,%1\n"
77 " sam31\n"
78 " .long 0x83230008 # Diagnose X'08'\n"
79 " sam64"
80 : /* no output */
81 : "a" (cpcmd_buf), "d" (cmdlen)
82 : "2", "3" );
83#endif /* CONFIG_ARCH_S390X */
84 }
85 spin_unlock_irqrestore(&cpcmd_lock, flags);
86}
87
88EXPORT_SYMBOL(__cpcmd);
89
90#ifdef CONFIG_ARCH_S390X
91void cpcmd(char *cmd, char *response, int rlen)
92{
93 char *lowbuf;
94 if ((rlen == 0) || (response == NULL)
95 || !((unsigned long)response >> 31))
96 __cpcmd(cmd, response, rlen);
97 else {
98 lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
99 if (!lowbuf) {
100 printk(KERN_WARNING
101 "cpcmd: could not allocate response buffer\n");
102 return;
103 }
104 __cpcmd(cmd, lowbuf, rlen);
105 memcpy(response, lowbuf, rlen);
106 kfree(lowbuf);
107 }
108}
109
110EXPORT_SYMBOL(cpcmd);
111#endif /* CONFIG_ARCH_S390X */
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
new file mode 100644
index 000000000000..91f8ce5543d3
--- /dev/null
+++ b/arch/s390/kernel/debug.c
@@ -0,0 +1,1286 @@
1/*
2 * arch/s390/kernel/debug.c
3 * S/390 debug facility
4 *
5 * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH,
6 * IBM Corporation
7 * Author(s): Michael Holzheu (holzheu@de.ibm.com),
8 * Holger Smolinski (Holger.Smolinski@de.ibm.com)
9 *
10 * Bugreports to: <Linux390@de.ibm.com>
11 */
12
13#include <linux/config.h>
14#include <linux/stddef.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18#include <linux/ctype.h>
19#include <linux/sysctl.h>
20#include <asm/uaccess.h>
21#include <asm/semaphore.h>
22
23#include <linux/module.h>
24#include <linux/init.h>
25
26#include <asm/debug.h>
27
28#define DEBUG_PROLOG_ENTRY -1
29
30/* typedefs */
31
32typedef struct file_private_info {
33 loff_t offset; /* offset of last read in file */
34 int act_area; /* number of last formated area */
35 int act_entry; /* last formated entry (offset */
36 /* relative to beginning of last */
37 /* formated area) */
38 size_t act_entry_offset; /* up to this offset we copied */
39 /* in last read the last formated */
40 /* entry to userland */
41 char temp_buf[2048]; /* buffer for output */
42 debug_info_t *debug_info_org; /* original debug information */
43 debug_info_t *debug_info_snap; /* snapshot of debug information */
44 struct debug_view *view; /* used view of debug info */
45} file_private_info_t;
46
47typedef struct
48{
49 char *string;
50 /*
51 * This assumes that all args are converted into longs
52 * on L/390 this is the case for all types of parameter
53 * except of floats, and long long (32 bit)
54 *
55 */
56 long args[0];
57} debug_sprintf_entry_t;
58
59
60extern void tod_to_timeval(uint64_t todval, struct timeval *xtime);
61
62/* internal function prototyes */
63
64static int debug_init(void);
65static ssize_t debug_output(struct file *file, char __user *user_buf,
66 size_t user_len, loff_t * offset);
67static ssize_t debug_input(struct file *file, const char __user *user_buf,
68 size_t user_len, loff_t * offset);
69static int debug_open(struct inode *inode, struct file *file);
70static int debug_close(struct inode *inode, struct file *file);
71static debug_info_t* debug_info_create(char *name, int page_order, int nr_areas, int buf_size);
72static void debug_info_get(debug_info_t *);
73static void debug_info_put(debug_info_t *);
74static int debug_prolog_level_fn(debug_info_t * id,
75 struct debug_view *view, char *out_buf);
76static int debug_input_level_fn(debug_info_t * id, struct debug_view *view,
77 struct file *file, const char __user *user_buf,
78 size_t user_buf_size, loff_t * offset);
79static int debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
80 struct file *file, const char __user *user_buf,
81 size_t user_buf_size, loff_t * offset);
82static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
83 char *out_buf, const char *in_buf);
84static int debug_raw_format_fn(debug_info_t * id,
85 struct debug_view *view, char *out_buf,
86 const char *in_buf);
87static int debug_raw_header_fn(debug_info_t * id, struct debug_view *view,
88 int area, debug_entry_t * entry, char *out_buf);
89
90static int debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
91 char *out_buf, debug_sprintf_entry_t *curr_event);
92
93/* globals */
94
95struct debug_view debug_raw_view = {
96 "raw",
97 NULL,
98 &debug_raw_header_fn,
99 &debug_raw_format_fn,
100 NULL,
101 NULL
102};
103
104struct debug_view debug_hex_ascii_view = {
105 "hex_ascii",
106 NULL,
107 &debug_dflt_header_fn,
108 &debug_hex_ascii_format_fn,
109 NULL,
110 NULL
111};
112
113struct debug_view debug_level_view = {
114 "level",
115 &debug_prolog_level_fn,
116 NULL,
117 NULL,
118 &debug_input_level_fn,
119 NULL
120};
121
122struct debug_view debug_flush_view = {
123 "flush",
124 NULL,
125 NULL,
126 NULL,
127 &debug_input_flush_fn,
128 NULL
129};
130
131struct debug_view debug_sprintf_view = {
132 "sprintf",
133 NULL,
134 &debug_dflt_header_fn,
135 (debug_format_proc_t*)&debug_sprintf_format_fn,
136 NULL,
137 NULL
138};
139
140
141unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION;
142
143/* static globals */
144
145static debug_info_t *debug_area_first = NULL;
146static debug_info_t *debug_area_last = NULL;
147DECLARE_MUTEX(debug_lock);
148
149static int initialized;
150
151static struct file_operations debug_file_ops = {
152 .owner = THIS_MODULE,
153 .read = debug_output,
154 .write = debug_input,
155 .open = debug_open,
156 .release = debug_close,
157};
158
159static struct proc_dir_entry *debug_proc_root_entry;
160
161/* functions */
162
163/*
164 * debug_info_alloc
165 * - alloc new debug-info
166 */
167
168static debug_info_t* debug_info_alloc(char *name, int page_order,
169 int nr_areas, int buf_size)
170{
171 debug_info_t* rc;
172 int i;
173
174 /* alloc everything */
175
176 rc = (debug_info_t*) kmalloc(sizeof(debug_info_t), GFP_ATOMIC);
177 if(!rc)
178 goto fail_malloc_rc;
179 rc->active_entry = (int*)kmalloc(nr_areas * sizeof(int), GFP_ATOMIC);
180 if(!rc->active_entry)
181 goto fail_malloc_active_entry;
182 memset(rc->active_entry, 0, nr_areas * sizeof(int));
183 rc->areas = (debug_entry_t **) kmalloc(nr_areas *
184 sizeof(debug_entry_t *),
185 GFP_ATOMIC);
186 if (!rc->areas)
187 goto fail_malloc_areas;
188 for (i = 0; i < nr_areas; i++) {
189 rc->areas[i] = (debug_entry_t *) __get_free_pages(GFP_ATOMIC,
190 page_order);
191 if (!rc->areas[i]) {
192 for (i--; i >= 0; i--) {
193 free_pages((unsigned long) rc->areas[i],
194 page_order);
195 }
196 goto fail_malloc_areas2;
197 } else {
198 memset(rc->areas[i], 0, PAGE_SIZE << page_order);
199 }
200 }
201
202 /* initialize members */
203
204 spin_lock_init(&rc->lock);
205 rc->page_order = page_order;
206 rc->nr_areas = nr_areas;
207 rc->active_area = 0;
208 rc->level = DEBUG_DEFAULT_LEVEL;
209 rc->buf_size = buf_size;
210 rc->entry_size = sizeof(debug_entry_t) + buf_size;
211 strlcpy(rc->name, name, sizeof(rc->name));
212 memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
213#ifdef CONFIG_PROC_FS
214 memset(rc->proc_entries, 0 ,DEBUG_MAX_VIEWS *
215 sizeof(struct proc_dir_entry*));
216#endif /* CONFIG_PROC_FS */
217 atomic_set(&(rc->ref_count), 0);
218
219 return rc;
220
221fail_malloc_areas2:
222 kfree(rc->areas);
223fail_malloc_areas:
224 kfree(rc->active_entry);
225fail_malloc_active_entry:
226 kfree(rc);
227fail_malloc_rc:
228 return NULL;
229}
230
231/*
232 * debug_info_free
233 * - free memory debug-info
234 */
235
236static void debug_info_free(debug_info_t* db_info){
237 int i;
238 for (i = 0; i < db_info->nr_areas; i++) {
239 free_pages((unsigned long) db_info->areas[i],
240 db_info->page_order);
241 }
242 kfree(db_info->areas);
243 kfree(db_info->active_entry);
244 kfree(db_info);
245}
246
247/*
248 * debug_info_create
249 * - create new debug-info
250 */
251
252static debug_info_t* debug_info_create(char *name, int page_order,
253 int nr_areas, int buf_size)
254{
255 debug_info_t* rc;
256
257 rc = debug_info_alloc(name, page_order, nr_areas, buf_size);
258 if(!rc)
259 goto out;
260
261
262 /* create proc rood directory */
263 rc->proc_root_entry = proc_mkdir(rc->name, debug_proc_root_entry);
264
265 /* append new element to linked list */
266 if (debug_area_first == NULL) {
267 /* first element in list */
268 debug_area_first = rc;
269 rc->prev = NULL;
270 } else {
271 /* append element to end of list */
272 debug_area_last->next = rc;
273 rc->prev = debug_area_last;
274 }
275 debug_area_last = rc;
276 rc->next = NULL;
277
278 debug_info_get(rc);
279out:
280 return rc;
281}
282
283/*
284 * debug_info_copy
285 * - copy debug-info
286 */
287
288static debug_info_t* debug_info_copy(debug_info_t* in)
289{
290 int i;
291 debug_info_t* rc;
292 rc = debug_info_alloc(in->name, in->page_order,
293 in->nr_areas, in->buf_size);
294 if(!rc)
295 goto out;
296
297 for(i = 0; i < in->nr_areas; i++){
298 memcpy(rc->areas[i],in->areas[i], PAGE_SIZE << in->page_order);
299 }
300out:
301 return rc;
302}
303
304/*
305 * debug_info_get
306 * - increments reference count for debug-info
307 */
308
309static void debug_info_get(debug_info_t * db_info)
310{
311 if (db_info)
312 atomic_inc(&db_info->ref_count);
313}
314
315/*
316 * debug_info_put:
317 * - decreases reference count for debug-info and frees it if necessary
318 */
319
320static void debug_info_put(debug_info_t *db_info)
321{
322 int i;
323
324 if (!db_info)
325 return;
326 if (atomic_dec_and_test(&db_info->ref_count)) {
327#ifdef DEBUG
328 printk(KERN_INFO "debug: freeing debug area %p (%s)\n",
329 db_info, db_info->name);
330#endif
331 for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
332 if (db_info->views[i] == NULL)
333 continue;
334#ifdef CONFIG_PROC_FS
335 remove_proc_entry(db_info->proc_entries[i]->name,
336 db_info->proc_root_entry);
337#endif
338 }
339#ifdef CONFIG_PROC_FS
340 remove_proc_entry(db_info->proc_root_entry->name,
341 debug_proc_root_entry);
342#endif
343 if(db_info == debug_area_first)
344 debug_area_first = db_info->next;
345 if(db_info == debug_area_last)
346 debug_area_last = db_info->prev;
347 if(db_info->prev) db_info->prev->next = db_info->next;
348 if(db_info->next) db_info->next->prev = db_info->prev;
349 debug_info_free(db_info);
350 }
351}
352
353/*
354 * debug_format_entry:
355 * - format one debug entry and return size of formated data
356 */
357
358static int debug_format_entry(file_private_info_t *p_info)
359{
360 debug_info_t *id_org = p_info->debug_info_org;
361 debug_info_t *id_snap = p_info->debug_info_snap;
362 struct debug_view *view = p_info->view;
363 debug_entry_t *act_entry;
364 size_t len = 0;
365 if(p_info->act_entry == DEBUG_PROLOG_ENTRY){
366 /* print prolog */
367 if (view->prolog_proc)
368 len += view->prolog_proc(id_org, view,p_info->temp_buf);
369 goto out;
370 }
371
372 act_entry = (debug_entry_t *) ((char*)id_snap->areas[p_info->act_area] +
373 p_info->act_entry);
374
375 if (act_entry->id.stck == 0LL)
376 goto out; /* empty entry */
377 if (view->header_proc)
378 len += view->header_proc(id_org, view, p_info->act_area,
379 act_entry, p_info->temp_buf + len);
380 if (view->format_proc)
381 len += view->format_proc(id_org, view, p_info->temp_buf + len,
382 DEBUG_DATA(act_entry));
383 out:
384 return len;
385}
386
387/*
388 * debug_next_entry:
389 * - goto next entry in p_info
390 */
391
392extern inline int debug_next_entry(file_private_info_t *p_info)
393{
394 debug_info_t *id = p_info->debug_info_snap;
395 if(p_info->act_entry == DEBUG_PROLOG_ENTRY){
396 p_info->act_entry = 0;
397 goto out;
398 }
399 if ((p_info->act_entry += id->entry_size)
400 > ((PAGE_SIZE << (id->page_order))
401 - id->entry_size)){
402
403 /* next area */
404 p_info->act_entry = 0;
405 p_info->act_area++;
406 if(p_info->act_area >= id->nr_areas)
407 return 1;
408 }
409out:
410 return 0;
411}
412
413/*
414 * debug_output:
415 * - called for user read()
416 * - copies formated debug entries to the user buffer
417 */
418
419static ssize_t debug_output(struct file *file, /* file descriptor */
420 char __user *user_buf, /* user buffer */
421 size_t len, /* length of buffer */
422 loff_t *offset) /* offset in the file */
423{
424 size_t count = 0;
425 size_t entry_offset, size = 0;
426 file_private_info_t *p_info;
427
428 p_info = ((file_private_info_t *) file->private_data);
429 if (*offset != p_info->offset)
430 return -EPIPE;
431 if(p_info->act_area >= p_info->debug_info_snap->nr_areas)
432 return 0;
433
434 entry_offset = p_info->act_entry_offset;
435
436 while(count < len){
437 size = debug_format_entry(p_info);
438 size = min((len - count), (size - entry_offset));
439
440 if(size){
441 if (copy_to_user(user_buf + count,
442 p_info->temp_buf + entry_offset, size))
443 return -EFAULT;
444 }
445 count += size;
446 entry_offset = 0;
447 if(count != len)
448 if(debug_next_entry(p_info))
449 goto out;
450 }
451out:
452 p_info->offset = *offset + count;
453 p_info->act_entry_offset = size;
454 *offset = p_info->offset;
455 return count;
456}
457
458/*
459 * debug_input:
460 * - called for user write()
461 * - calls input function of view
462 */
463
464static ssize_t debug_input(struct file *file,
465 const char __user *user_buf, size_t length,
466 loff_t *offset)
467{
468 int rc = 0;
469 file_private_info_t *p_info;
470
471 down(&debug_lock);
472 p_info = ((file_private_info_t *) file->private_data);
473 if (p_info->view->input_proc)
474 rc = p_info->view->input_proc(p_info->debug_info_org,
475 p_info->view, file, user_buf,
476 length, offset);
477 else
478 rc = -EPERM;
479 up(&debug_lock);
480 return rc; /* number of input characters */
481}
482
483/*
484 * debug_open:
485 * - called for user open()
486 * - copies formated output to private_data area of the file
487 * handle
488 */
489
490static int debug_open(struct inode *inode, struct file *file)
491{
492 int i = 0, rc = 0;
493 file_private_info_t *p_info;
494 debug_info_t *debug_info, *debug_info_snapshot;
495
496#ifdef DEBUG
497 printk("debug_open\n");
498#endif
499 down(&debug_lock);
500
501 /* find debug log and view */
502
503 debug_info = debug_area_first;
504 while(debug_info != NULL){
505 for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
506 if (debug_info->views[i] == NULL)
507 continue;
508 else if (debug_info->proc_entries[i] ==
509 PDE(file->f_dentry->d_inode)) {
510 goto found; /* found view ! */
511 }
512 }
513 debug_info = debug_info->next;
514 }
515 /* no entry found */
516 rc = -EINVAL;
517 goto out;
518
519 found:
520
521 /* make snapshot of current debug areas to get it consistent */
522
523 debug_info_snapshot = debug_info_copy(debug_info);
524
525 if(!debug_info_snapshot){
526#ifdef DEBUG
527 printk(KERN_ERR "debug_open: debug_info_copy failed (out of mem)\n");
528#endif
529 rc = -ENOMEM;
530 goto out;
531 }
532
533 if ((file->private_data =
534 kmalloc(sizeof(file_private_info_t), GFP_ATOMIC)) == 0) {
535#ifdef DEBUG
536 printk(KERN_ERR "debug_open: kmalloc failed\n");
537#endif
538 debug_info_free(debug_info_snapshot);
539 rc = -ENOMEM;
540 goto out;
541 }
542 p_info = (file_private_info_t *) file->private_data;
543 p_info->offset = 0;
544 p_info->debug_info_snap = debug_info_snapshot;
545 p_info->debug_info_org = debug_info;
546 p_info->view = debug_info->views[i];
547 p_info->act_area = 0;
548 p_info->act_entry = DEBUG_PROLOG_ENTRY;
549 p_info->act_entry_offset = 0;
550
551 debug_info_get(debug_info);
552
553 out:
554 up(&debug_lock);
555 return rc;
556}
557
558/*
559 * debug_close:
560 * - called for user close()
561 * - deletes private_data area of the file handle
562 */
563
564static int debug_close(struct inode *inode, struct file *file)
565{
566 file_private_info_t *p_info;
567#ifdef DEBUG
568 printk("debug_close\n");
569#endif
570 p_info = (file_private_info_t *) file->private_data;
571 debug_info_free(p_info->debug_info_snap);
572 debug_info_put(p_info->debug_info_org);
573 kfree(file->private_data);
574 return 0; /* success */
575}
576
577/*
578 * debug_register:
579 * - creates and initializes debug area for the caller
580 * - returns handle for debug area
581 */
582
583debug_info_t *debug_register
584 (char *name, int page_order, int nr_areas, int buf_size)
585{
586 debug_info_t *rc = NULL;
587
588 if (!initialized)
589 BUG();
590 down(&debug_lock);
591
592 /* create new debug_info */
593
594 rc = debug_info_create(name, page_order, nr_areas, buf_size);
595 if(!rc)
596 goto out;
597 debug_register_view(rc, &debug_level_view);
598 debug_register_view(rc, &debug_flush_view);
599#ifdef DEBUG
600 printk(KERN_INFO
601 "debug: reserved %d areas of %d pages for debugging %s\n",
602 nr_areas, 1 << page_order, rc->name);
603#endif
604 out:
605 if (rc == NULL){
606 printk(KERN_ERR "debug: debug_register failed for %s\n",name);
607 }
608 up(&debug_lock);
609 return rc;
610}
611
612/*
613 * debug_unregister:
614 * - give back debug area
615 */
616
617void debug_unregister(debug_info_t * id)
618{
619 if (!id)
620 goto out;
621 down(&debug_lock);
622#ifdef DEBUG
623 printk(KERN_INFO "debug: unregistering %s\n", id->name);
624#endif
625 debug_info_put(id);
626 up(&debug_lock);
627
628 out:
629 return;
630}
631
632/*
633 * debug_set_level:
634 * - set actual debug level
635 */
636
637void debug_set_level(debug_info_t* id, int new_level)
638{
639 unsigned long flags;
640 if(!id)
641 return;
642 spin_lock_irqsave(&id->lock,flags);
643 if(new_level == DEBUG_OFF_LEVEL){
644 id->level = DEBUG_OFF_LEVEL;
645 printk(KERN_INFO "debug: %s: switched off\n",id->name);
646 } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
647 printk(KERN_INFO
648 "debug: %s: level %i is out of range (%i - %i)\n",
649 id->name, new_level, 0, DEBUG_MAX_LEVEL);
650 } else {
651 id->level = new_level;
652#ifdef DEBUG
653 printk(KERN_INFO
654 "debug: %s: new level %i\n",id->name,id->level);
655#endif
656 }
657 spin_unlock_irqrestore(&id->lock,flags);
658}
659
660
661/*
662 * proceed_active_entry:
663 * - set active entry to next in the ring buffer
664 */
665
666extern inline void proceed_active_entry(debug_info_t * id)
667{
668 if ((id->active_entry[id->active_area] += id->entry_size)
669 > ((PAGE_SIZE << (id->page_order)) - id->entry_size))
670 id->active_entry[id->active_area] = 0;
671}
672
673/*
674 * proceed_active_area:
675 * - set active area to next in the ring buffer
676 */
677
678extern inline void proceed_active_area(debug_info_t * id)
679{
680 id->active_area++;
681 id->active_area = id->active_area % id->nr_areas;
682}
683
684/*
685 * get_active_entry:
686 */
687
688extern inline debug_entry_t *get_active_entry(debug_info_t * id)
689{
690 return (debug_entry_t *) ((char *) id->areas[id->active_area] +
691 id->active_entry[id->active_area]);
692}
693
694/*
695 * debug_finish_entry:
696 * - set timestamp, caller address, cpu number etc.
697 */
698
699extern inline void debug_finish_entry(debug_info_t * id, debug_entry_t* active,
700 int level, int exception)
701{
702 STCK(active->id.stck);
703 active->id.fields.cpuid = smp_processor_id();
704 active->caller = __builtin_return_address(0);
705 active->id.fields.exception = exception;
706 active->id.fields.level = level;
707 proceed_active_entry(id);
708 if(exception)
709 proceed_active_area(id);
710}
711
712static int debug_stoppable=1;
713static int debug_active=1;
714
715#define CTL_S390DBF 5677
716#define CTL_S390DBF_STOPPABLE 5678
717#define CTL_S390DBF_ACTIVE 5679
718
719/*
720 * proc handler for the running debug_active sysctl
721 * always allow read, allow write only if debug_stoppable is set or
722 * if debug_active is already off
723 */
724static int s390dbf_procactive(ctl_table *table, int write, struct file *filp,
725 void __user *buffer, size_t *lenp, loff_t *ppos)
726{
727 if (!write || debug_stoppable || !debug_active)
728 return proc_dointvec(table, write, filp, buffer, lenp, ppos);
729 else
730 return 0;
731}
732
733
734static struct ctl_table s390dbf_table[] = {
735 {
736 .ctl_name = CTL_S390DBF_STOPPABLE,
737 .procname = "debug_stoppable",
738 .data = &debug_stoppable,
739 .maxlen = sizeof(int),
740 .mode = S_IRUGO | S_IWUSR,
741 .proc_handler = &proc_dointvec,
742 .strategy = &sysctl_intvec,
743 },
744 {
745 .ctl_name = CTL_S390DBF_ACTIVE,
746 .procname = "debug_active",
747 .data = &debug_active,
748 .maxlen = sizeof(int),
749 .mode = S_IRUGO | S_IWUSR,
750 .proc_handler = &s390dbf_procactive,
751 .strategy = &sysctl_intvec,
752 },
753 { .ctl_name = 0 }
754};
755
756static struct ctl_table s390dbf_dir_table[] = {
757 {
758 .ctl_name = CTL_S390DBF,
759 .procname = "s390dbf",
760 .maxlen = 0,
761 .mode = S_IRUGO | S_IXUGO,
762 .child = s390dbf_table,
763 },
764 { .ctl_name = 0 }
765};
766
767struct ctl_table_header *s390dbf_sysctl_header;
768
769void debug_stop_all(void)
770{
771 if (debug_stoppable)
772 debug_active = 0;
773}
774
775
776/*
777 * debug_event_common:
778 * - write debug entry with given size
779 */
780
781debug_entry_t *debug_event_common(debug_info_t * id, int level, const void *buf,
782 int len)
783{
784 unsigned long flags;
785 debug_entry_t *active;
786
787 if (!debug_active)
788 return NULL;
789 spin_lock_irqsave(&id->lock, flags);
790 active = get_active_entry(id);
791 memset(DEBUG_DATA(active), 0, id->buf_size);
792 memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
793 debug_finish_entry(id, active, level, 0);
794 spin_unlock_irqrestore(&id->lock, flags);
795
796 return active;
797}
798
799/*
800 * debug_exception_common:
801 * - write debug entry with given size and switch to next debug area
802 */
803
804debug_entry_t *debug_exception_common(debug_info_t * id, int level,
805 const void *buf, int len)
806{
807 unsigned long flags;
808 debug_entry_t *active;
809
810 if (!debug_active)
811 return NULL;
812 spin_lock_irqsave(&id->lock, flags);
813 active = get_active_entry(id);
814 memset(DEBUG_DATA(active), 0, id->buf_size);
815 memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
816 debug_finish_entry(id, active, level, 1);
817 spin_unlock_irqrestore(&id->lock, flags);
818
819 return active;
820}
821
822/*
823 * counts arguments in format string for sprintf view
824 */
825
826extern inline int debug_count_numargs(char *string)
827{
828 int numargs=0;
829
830 while(*string) {
831 if(*string++=='%')
832 numargs++;
833 }
834 return(numargs);
835}
836
837/*
838 * debug_sprintf_event:
839 */
840
841debug_entry_t *debug_sprintf_event(debug_info_t* id,
842 int level,char *string,...)
843{
844 va_list ap;
845 int numargs,idx;
846 unsigned long flags;
847 debug_sprintf_entry_t *curr_event;
848 debug_entry_t *active;
849
850 if((!id) || (level > id->level))
851 return NULL;
852 if (!debug_active)
853 return NULL;
854 numargs=debug_count_numargs(string);
855
856 spin_lock_irqsave(&id->lock, flags);
857 active = get_active_entry(id);
858 curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active);
859 va_start(ap,string);
860 curr_event->string=string;
861 for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++)
862 curr_event->args[idx]=va_arg(ap,long);
863 va_end(ap);
864 debug_finish_entry(id, active, level, 0);
865 spin_unlock_irqrestore(&id->lock, flags);
866
867 return active;
868}
869
870/*
871 * debug_sprintf_exception:
872 */
873
874debug_entry_t *debug_sprintf_exception(debug_info_t* id,
875 int level,char *string,...)
876{
877 va_list ap;
878 int numargs,idx;
879 unsigned long flags;
880 debug_sprintf_entry_t *curr_event;
881 debug_entry_t *active;
882
883 if((!id) || (level > id->level))
884 return NULL;
885 if (!debug_active)
886 return NULL;
887
888 numargs=debug_count_numargs(string);
889
890 spin_lock_irqsave(&id->lock, flags);
891 active = get_active_entry(id);
892 curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active);
893 va_start(ap,string);
894 curr_event->string=string;
895 for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++)
896 curr_event->args[idx]=va_arg(ap,long);
897 va_end(ap);
898 debug_finish_entry(id, active, level, 1);
899 spin_unlock_irqrestore(&id->lock, flags);
900
901 return active;
902}
903
904/*
905 * debug_init:
906 * - is called exactly once to initialize the debug feature
907 */
908
909static int __init debug_init(void)
910{
911 int rc = 0;
912
913 s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table, 1);
914 down(&debug_lock);
915#ifdef CONFIG_PROC_FS
916 debug_proc_root_entry = proc_mkdir(DEBUG_DIR_ROOT, NULL);
917#endif /* CONFIG_PROC_FS */
918 printk(KERN_INFO "debug: Initialization complete\n");
919 initialized = 1;
920 up(&debug_lock);
921
922 return rc;
923}
924
925/*
926 * debug_register_view:
927 */
928
929int debug_register_view(debug_info_t * id, struct debug_view *view)
930{
931 int rc = 0;
932 int i;
933 unsigned long flags;
934 mode_t mode = S_IFREG;
935 struct proc_dir_entry *pde;
936
937 if (!id)
938 goto out;
939 if (view->prolog_proc || view->format_proc || view->header_proc)
940 mode |= S_IRUSR;
941 if (view->input_proc)
942 mode |= S_IWUSR;
943 pde = create_proc_entry(view->name, mode, id->proc_root_entry);
944 if (!pde){
945 printk(KERN_WARNING "debug: create_proc_entry() failed! Cannot register view %s/%s\n", id->name,view->name);
946 rc = -1;
947 goto out;
948 }
949
950 spin_lock_irqsave(&id->lock, flags);
951 for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
952 if (id->views[i] == NULL)
953 break;
954 }
955 if (i == DEBUG_MAX_VIEWS) {
956 printk(KERN_WARNING "debug: cannot register view %s/%s\n",
957 id->name,view->name);
958 printk(KERN_WARNING
959 "debug: maximum number of views reached (%i)!\n", i);
960 remove_proc_entry(pde->name, id->proc_root_entry);
961 rc = -1;
962 }
963 else {
964 id->views[i] = view;
965 pde->proc_fops = &debug_file_ops;
966 id->proc_entries[i] = pde;
967 }
968 spin_unlock_irqrestore(&id->lock, flags);
969 out:
970 return rc;
971}
972
973/*
974 * debug_unregister_view:
975 */
976
977int debug_unregister_view(debug_info_t * id, struct debug_view *view)
978{
979 int rc = 0;
980 int i;
981 unsigned long flags;
982
983 if (!id)
984 goto out;
985 spin_lock_irqsave(&id->lock, flags);
986 for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
987 if (id->views[i] == view)
988 break;
989 }
990 if (i == DEBUG_MAX_VIEWS)
991 rc = -1;
992 else {
993#ifdef CONFIG_PROC_FS
994 remove_proc_entry(id->proc_entries[i]->name,
995 id->proc_root_entry);
996#endif
997 id->views[i] = NULL;
998 rc = 0;
999 }
1000 spin_unlock_irqrestore(&id->lock, flags);
1001 out:
1002 return rc;
1003}
1004
1005/*
1006 * functions for debug-views
1007 ***********************************
1008*/
1009
1010/*
1011 * prints out actual debug level
1012 */
1013
1014static int debug_prolog_level_fn(debug_info_t * id,
1015 struct debug_view *view, char *out_buf)
1016{
1017 int rc = 0;
1018
1019 if(id->level == -1) rc = sprintf(out_buf,"-\n");
1020 else rc = sprintf(out_buf, "%i\n", id->level);
1021 return rc;
1022}
1023
1024/*
1025 * reads new debug level
1026 */
1027
1028static int debug_input_level_fn(debug_info_t * id, struct debug_view *view,
1029 struct file *file, const char __user *user_buf,
1030 size_t in_buf_size, loff_t * offset)
1031{
1032 char input_buf[1];
1033 int rc = in_buf_size;
1034
1035 if (*offset != 0)
1036 goto out;
1037 if (copy_from_user(input_buf, user_buf, 1)){
1038 rc = -EFAULT;
1039 goto out;
1040 }
1041 if (isdigit(input_buf[0])) {
1042 int new_level = ((int) input_buf[0] - (int) '0');
1043 debug_set_level(id, new_level);
1044 } else if(input_buf[0] == '-') {
1045 debug_set_level(id, DEBUG_OFF_LEVEL);
1046 } else {
1047 printk(KERN_INFO "debug: level `%c` is not valid\n",
1048 input_buf[0]);
1049 }
1050 out:
1051 *offset += in_buf_size;
1052 return rc; /* number of input characters */
1053}
1054
1055
1056/*
1057 * flushes debug areas
1058 */
1059
1060void debug_flush(debug_info_t* id, int area)
1061{
1062 unsigned long flags;
1063 int i;
1064
1065 if(!id)
1066 return;
1067 spin_lock_irqsave(&id->lock,flags);
1068 if(area == DEBUG_FLUSH_ALL){
1069 id->active_area = 0;
1070 memset(id->active_entry, 0, id->nr_areas * sizeof(int));
1071 for (i = 0; i < id->nr_areas; i++)
1072 memset(id->areas[i], 0, PAGE_SIZE << id->page_order);
1073 printk(KERN_INFO "debug: %s: all areas flushed\n",id->name);
1074 } else if(area >= 0 && area < id->nr_areas) {
1075 id->active_entry[area] = 0;
1076 memset(id->areas[area], 0, PAGE_SIZE << id->page_order);
1077 printk(KERN_INFO
1078 "debug: %s: area %i has been flushed\n",
1079 id->name, area);
1080 } else {
1081 printk(KERN_INFO
1082 "debug: %s: area %i cannot be flushed (range: %i - %i)\n",
1083 id->name, area, 0, id->nr_areas-1);
1084 }
1085 spin_unlock_irqrestore(&id->lock,flags);
1086}
1087
1088/*
1089 * view function: flushes debug areas
1090 */
1091
1092static int debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
1093 struct file *file, const char __user *user_buf,
1094 size_t in_buf_size, loff_t * offset)
1095{
1096 char input_buf[1];
1097 int rc = in_buf_size;
1098
1099 if (*offset != 0)
1100 goto out;
1101 if (copy_from_user(input_buf, user_buf, 1)){
1102 rc = -EFAULT;
1103 goto out;
1104 }
1105 if(input_buf[0] == '-') {
1106 debug_flush(id, DEBUG_FLUSH_ALL);
1107 goto out;
1108 }
1109 if (isdigit(input_buf[0])) {
1110 int area = ((int) input_buf[0] - (int) '0');
1111 debug_flush(id, area);
1112 goto out;
1113 }
1114
1115 printk(KERN_INFO "debug: area `%c` is not valid\n", input_buf[0]);
1116
1117 out:
1118 *offset += in_buf_size;
1119 return rc; /* number of input characters */
1120}
1121
1122/*
1123 * prints debug header in raw format
1124 */
1125
1126int debug_raw_header_fn(debug_info_t * id, struct debug_view *view,
1127 int area, debug_entry_t * entry, char *out_buf)
1128{
1129 int rc;
1130
1131 rc = sizeof(debug_entry_t);
1132 memcpy(out_buf,entry,sizeof(debug_entry_t));
1133 return rc;
1134}
1135
1136/*
1137 * prints debug data in raw format
1138 */
1139
1140static int debug_raw_format_fn(debug_info_t * id, struct debug_view *view,
1141 char *out_buf, const char *in_buf)
1142{
1143 int rc;
1144
1145 rc = id->buf_size;
1146 memcpy(out_buf, in_buf, id->buf_size);
1147 return rc;
1148}
1149
1150/*
1151 * prints debug data in hex/ascii format
1152 */
1153
1154static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
1155 char *out_buf, const char *in_buf)
1156{
1157 int i, rc = 0;
1158
1159 for (i = 0; i < id->buf_size; i++) {
1160 rc += sprintf(out_buf + rc, "%02x ",
1161 ((unsigned char *) in_buf)[i]);
1162 }
1163 rc += sprintf(out_buf + rc, "| ");
1164 for (i = 0; i < id->buf_size; i++) {
1165 unsigned char c = in_buf[i];
1166 if (!isprint(c))
1167 rc += sprintf(out_buf + rc, ".");
1168 else
1169 rc += sprintf(out_buf + rc, "%c", c);
1170 }
1171 rc += sprintf(out_buf + rc, "\n");
1172 return rc;
1173}
1174
1175/*
1176 * prints header for debug entry
1177 */
1178
1179int debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
1180 int area, debug_entry_t * entry, char *out_buf)
1181{
1182 struct timeval time_val;
1183 unsigned long long time;
1184 char *except_str;
1185 unsigned long caller;
1186 int rc = 0;
1187 unsigned int level;
1188
1189 level = entry->id.fields.level;
1190 time = entry->id.stck;
1191 /* adjust todclock to 1970 */
1192 time -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
1193 tod_to_timeval(time, &time_val);
1194
1195 if (entry->id.fields.exception)
1196 except_str = "*";
1197 else
1198 except_str = "-";
1199 caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN;
1200 rc += sprintf(out_buf, "%02i %011lu:%06lu %1u %1s %02i %p ",
1201 area, time_val.tv_sec, time_val.tv_usec, level,
1202 except_str, entry->id.fields.cpuid, (void *) caller);
1203 return rc;
1204}
1205
1206/*
1207 * prints debug data sprintf-formated:
1208 * debug_sprinf_event/exception calls must be used together with this view
1209 */
1210
1211#define DEBUG_SPRINTF_MAX_ARGS 10
1212
1213int debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
1214 char *out_buf, debug_sprintf_entry_t *curr_event)
1215{
1216 int num_longs, num_used_args = 0,i, rc = 0;
1217 int index[DEBUG_SPRINTF_MAX_ARGS];
1218
1219 /* count of longs fit into one entry */
1220 num_longs = id->buf_size / sizeof(long);
1221
1222 if(num_longs < 1)
1223 goto out; /* bufsize of entry too small */
1224 if(num_longs == 1) {
1225 /* no args, we use only the string */
1226 strcpy(out_buf, curr_event->string);
1227 rc = strlen(curr_event->string);
1228 goto out;
1229 }
1230
1231 /* number of arguments used for sprintf (without the format string) */
1232 num_used_args = min(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1));
1233
1234 memset(index,0, DEBUG_SPRINTF_MAX_ARGS * sizeof(int));
1235
1236 for(i = 0; i < num_used_args; i++)
1237 index[i] = i;
1238
1239 rc = sprintf(out_buf, curr_event->string, curr_event->args[index[0]],
1240 curr_event->args[index[1]], curr_event->args[index[2]],
1241 curr_event->args[index[3]], curr_event->args[index[4]],
1242 curr_event->args[index[5]], curr_event->args[index[6]],
1243 curr_event->args[index[7]], curr_event->args[index[8]],
1244 curr_event->args[index[9]]);
1245
1246out:
1247
1248 return rc;
1249}
1250
1251/*
1252 * clean up module
1253 */
1254void __exit debug_exit(void)
1255{
1256#ifdef DEBUG
1257 printk("debug_cleanup_module: \n");
1258#endif
1259#ifdef CONFIG_PROC_FS
1260 remove_proc_entry(debug_proc_root_entry->name, NULL);
1261#endif /* CONFIG_PROC_FS */
1262 unregister_sysctl_table(s390dbf_sysctl_header);
1263 return;
1264}
1265
1266/*
1267 * module definitions
1268 */
1269core_initcall(debug_init);
1270module_exit(debug_exit);
1271MODULE_LICENSE("GPL");
1272
1273EXPORT_SYMBOL(debug_register);
1274EXPORT_SYMBOL(debug_unregister);
1275EXPORT_SYMBOL(debug_set_level);
1276EXPORT_SYMBOL(debug_stop_all);
1277EXPORT_SYMBOL(debug_register_view);
1278EXPORT_SYMBOL(debug_unregister_view);
1279EXPORT_SYMBOL(debug_event_common);
1280EXPORT_SYMBOL(debug_exception_common);
1281EXPORT_SYMBOL(debug_hex_ascii_view);
1282EXPORT_SYMBOL(debug_raw_view);
1283EXPORT_SYMBOL(debug_dflt_header_fn);
1284EXPORT_SYMBOL(debug_sprintf_view);
1285EXPORT_SYMBOL(debug_sprintf_exception);
1286EXPORT_SYMBOL(debug_sprintf_event);
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c
new file mode 100644
index 000000000000..bb0f973137f0
--- /dev/null
+++ b/arch/s390/kernel/ebcdic.c
@@ -0,0 +1,400 @@
1/*
2 * arch/s390/kernel/ebcdic.c
3 * ECBDIC -> ASCII, ASCII -> ECBDIC,
4 * upper to lower case (EBCDIC) conversion tables.
5 *
6 * S390 version
7 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 * Martin Peschke <peschke@fh-brandenburg.de>
10 */
11
12#include <linux/module.h>
13#include <asm/types.h>
14
15/*
16 * ASCII (IBM PC 437) -> EBCDIC 037
17 */
18__u8 _ascebc[256] =
19{
20 /*00 NUL SOH STX ETX EOT ENQ ACK BEL */
21 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
22 /*08 BS HT LF VT FF CR SO SI */
23 /* ->NL */
24 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
25 /*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */
26 0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26,
27 /*18 CAN EM SUB ESC FS GS RS US */
28 /* ->IGS ->IRS ->IUS */
29 0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F,
30 /*20 SP ! " # $ % & ' */
31 0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
32 /*28 ( ) * + , - . / */
33 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
34 /*30 0 1 2 3 4 5 6 7 */
35 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
36 /*38 8 9 : ; < = > ? */
37 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
38 /*40 @ A B C D E F G */
39 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
40 /*48 H I J K L M N O */
41 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
42 /*50 P Q R S T U V W */
43 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
44 /*58 X Y Z [ \ ] ^ _ */
45 0xE7, 0xE8, 0xE9, 0xBA, 0xE0, 0xBB, 0xB0, 0x6D,
46 /*60 ` a b c d e f g */
47 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
48 /*68 h i j k l m n o */
49 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
50 /*70 p q r s t u v w */
51 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
52 /*78 x y z { | } ~ DL */
53 0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07,
54 /*80*/
55 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
56 /*88*/
57 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
58 /*90*/
59 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
60 /*98*/
61 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
62 /*A0*/
63 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
64 /*A8*/
65 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
66 /*B0*/
67 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
68 /*B8*/
69 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
70 /*C0*/
71 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
72 /*C8*/
73 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
74 /*D0*/
75 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
76 /*D8*/
77 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
78 /*E0 sz */
79 0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
80 /*E8*/
81 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
82 /*F0*/
83 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
84 /*F8*/
85 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF
86};
87
88/*
89 * EBCDIC 037 -> ASCII (IBM PC 437)
90 */
91__u8 _ebcasc[256] =
92{
93 /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
94 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
95 /* 0x08 -GE -SPS -RPT VT FF CR SO SI */
96 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
97 /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
98 -ENP ->LF */
99 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
100 /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
101 -IUS */
102 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
103 /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
104 -INP */
105 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
106 /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
107 -SW */
108 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
109 /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
110 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
111 /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
112 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
113 /* 0x40 SP RSP ä ---- */
114 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
115 /* 0x48 . < ( + | */
116 0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
117 /* 0x50 & ---- */
118 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
119 /* 0x58 ß ! $ * ) ; */
120 0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
121 /* 0x60 - / ---- Ä ---- ---- ---- */
122 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
123 /* 0x68 ---- , % _ > ? */
124 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
125 /* 0x70 ---- ---- ---- ---- ---- ---- ---- */
126 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
127 /* 0x78 * ` : # @ ' = " */
128 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
129 /* 0x80 * a b c d e f g */
130 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
131 /* 0x88 h i ---- ---- ---- */
132 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
133 /* 0x90 ° j k l m n o p */
134 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
135 /* 0x98 q r ---- ---- */
136 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
137 /* 0xA0 ~ s t u v w x */
138 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
139 /* 0xA8 y z ---- ---- ---- ---- */
140 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
141 /* 0xB0 ^ ---- § ---- */
142 0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
143 /* 0xB8 ---- [ ] ---- ---- ---- ---- */
144 0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
145 /* 0xC0 { A B C D E F G */
146 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
147 /* 0xC8 H I ---- ö ---- */
148 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
149 /* 0xD0 } J K L M N O P */
150 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
151 /* 0xD8 Q R ---- ü */
152 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
153 /* 0xE0 \ S T U V W X */
154 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
155 /* 0xE8 Y Z ---- Ö ---- ---- ---- */
156 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
157 /* 0xF0 0 1 2 3 4 5 6 7 */
158 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
159 /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */
160 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
161};
162
163
164/*
165 * ASCII (IBM PC 437) -> EBCDIC 500
166 */
167__u8 _ascebc_500[256] =
168{
169 /*00 NUL SOH STX ETX EOT ENQ ACK BEL */
170 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
171 /*08 BS HT LF VT FF CR SO SI */
172 /* ->NL */
173 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
174 /*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */
175 0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26,
176 /*18 CAN EM SUB ESC FS GS RS US */
177 /* ->IGS ->IRS ->IUS */
178 0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F,
179 /*20 SP ! " # $ % & ' */
180 0x40, 0x4F, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
181 /*28 ( ) * + , - . / */
182 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
183 /*30 0 1 2 3 4 5 6 7 */
184 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
185 /*38 8 9 : ; < = > ? */
186 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
187 /*40 @ A B C D E F G */
188 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
189 /*48 H I J K L M N O */
190 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
191 /*50 P Q R S T U V W */
192 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
193 /*58 X Y Z [ \ ] ^ _ */
194 0xE7, 0xE8, 0xE9, 0x4A, 0xE0, 0x5A, 0x5F, 0x6D,
195 /*60 ` a b c d e f g */
196 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
197 /*68 h i j k l m n o */
198 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
199 /*70 p q r s t u v w */
200 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
201 /*78 x y z { | } ~ DL */
202 0xA7, 0xA8, 0xA9, 0xC0, 0xBB, 0xD0, 0xA1, 0x07,
203 /*80*/
204 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
205 /*88*/
206 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
207 /*90*/
208 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
209 /*98*/
210 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
211 /*A0*/
212 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
213 /*A8*/
214 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
215 /*B0*/
216 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
217 /*B8*/
218 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
219 /*C0*/
220 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
221 /*C8*/
222 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
223 /*D0*/
224 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
225 /*D8*/
226 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
227 /*E0 sz */
228 0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
229 /*E8*/
230 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
231 /*F0*/
232 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
233 /*F8*/
234 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF
235};
236
237/*
238 * EBCDIC 500 -> ASCII (IBM PC 437)
239 */
240__u8 _ebcasc_500[256] =
241{
242 /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
243 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
244 /* 0x08 -GE -SPS -RPT VT FF CR SO SI */
245 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
246 /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
247 -ENP ->LF */
248 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
249 /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
250 -IUS */
251 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
252 /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
253 -INP */
254 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
255 /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
256 -SW */
257 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
258 /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
259 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
260 /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
261 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
262 /* 0x40 SP RSP ä ---- */
263 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
264 /* 0x48 [ . < ( + ! */
265 0x87, 0xA4, 0x5B, 0x2E, 0x3C, 0x28, 0x2B, 0x21,
266 /* 0x50 & ---- */
267 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
268 /* 0x58 ß ] $ * ) ; ^ */
269 0x8D, 0xE1, 0x5D, 0x24, 0x2A, 0x29, 0x3B, 0x5E,
270 /* 0x60 - / ---- Ä ---- ---- ---- */
271 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
272 /* 0x68 ---- , % _ > ? */
273 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
274 /* 0x70 ---- ---- ---- ---- ---- ---- ---- */
275 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
276 /* 0x78 * ` : # @ ' = " */
277 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
278 /* 0x80 * a b c d e f g */
279 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
280 /* 0x88 h i ---- ---- ---- */
281 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
282 /* 0x90 ° j k l m n o p */
283 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
284 /* 0x98 q r ---- ---- */
285 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
286 /* 0xA0 ~ s t u v w x */
287 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
288 /* 0xA8 y z ---- ---- ---- ---- */
289 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
290 /* 0xB0 ---- § ---- */
291 0x9B, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
292 /* 0xB8 ---- | ---- ---- ---- ---- */
293 0xAB, 0x07, 0xAA, 0x7C, 0x07, 0x07, 0x07, 0x07,
294 /* 0xC0 { A B C D E F G */
295 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
296 /* 0xC8 H I ---- ö ---- */
297 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
298 /* 0xD0 } J K L M N O P */
299 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
300 /* 0xD8 Q R ---- ü */
301 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
302 /* 0xE0 \ S T U V W X */
303 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
304 /* 0xE8 Y Z ---- Ö ---- ---- ---- */
305 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
306 /* 0xF0 0 1 2 3 4 5 6 7 */
307 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
308 /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */
309 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
310};
311
312
313/*
314 * EBCDIC 037/500 conversion table:
315 * from upper to lower case
316 */
317__u8 _ebc_tolower[256] =
318{
319 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
320 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
321 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
322 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
323 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
324 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
325 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
326 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
327 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
328 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
329 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
330 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
331 0x60, 0x61, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
332 0x48, 0x49, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
333 0x70, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
334 0x58, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
335 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
336 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
337 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
338 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9C, 0x9F,
339 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
340 0xA8, 0xA9, 0xAA, 0xAB, 0x8C, 0x8D, 0x8E, 0xAF,
341 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
342 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
343 0xC0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
344 0x88, 0x89, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
345 0xD0, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
346 0x98, 0x99, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
347 0xE0, 0xE1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
348 0xA8, 0xA9, 0xEA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
349 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
350 0xF8, 0xF9, 0xFA, 0xDB, 0xDC, 0xDD, 0xDE, 0xFF
351};
352
353
354/*
355 * EBCDIC 037/500 conversion table:
356 * from lower to upper case
357 */
358__u8 _ebc_toupper[256] =
359{
360 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
361 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
362 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
363 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
364 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
365 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
366 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
367 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
368 0x40, 0x41, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
369 0x68, 0x69, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
370 0x50, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
371 0x78, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
372 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
373 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
374 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
375 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
376 0x80, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
377 0xC8, 0xC9, 0x8A, 0x8B, 0xAC, 0xAD, 0xAE, 0x8F,
378 0x90, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
379 0xD8, 0xD9, 0x9A, 0x9B, 0x9E, 0x9D, 0x9E, 0x9F,
380 0xA0, 0xA1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
381 0xE8, 0xE9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
382 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
383 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
384 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
385 0xC8, 0xC9, 0xCA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
386 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
387 0xD8, 0xD9, 0xDA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF,
388 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
389 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
390 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
391 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
392};
393
394EXPORT_SYMBOL(_ascebc_500);
395EXPORT_SYMBOL(_ebcasc_500);
396EXPORT_SYMBOL(_ascebc);
397EXPORT_SYMBOL(_ebcasc);
398EXPORT_SYMBOL(_ebc_tolower);
399EXPORT_SYMBOL(_ebc_toupper);
400
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
new file mode 100644
index 000000000000..c0e09b33febe
--- /dev/null
+++ b/arch/s390/kernel/entry.S
@@ -0,0 +1,868 @@
1/*
2 * arch/s390/kernel/entry.S
3 * S390 low-level entry points.
4 *
5 * S390 version
6 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Hartmut Penner (hp@de.ibm.com),
9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
10 */
11
12#include <linux/sys.h>
13#include <linux/linkage.h>
14#include <linux/config.h>
15#include <asm/cache.h>
16#include <asm/lowcore.h>
17#include <asm/errno.h>
18#include <asm/ptrace.h>
19#include <asm/thread_info.h>
20#include <asm/offsets.h>
21#include <asm/unistd.h>
22#include <asm/page.h>
23
24/*
25 * Stack layout for the system_call stack entry.
26 * The first few entries are identical to the user_regs_struct.
27 */
28SP_PTREGS = STACK_FRAME_OVERHEAD
29SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
30SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
31SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
32SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4
33SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
34SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12
35SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
36SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20
37SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
38SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28
39SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
40SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36
41SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
42SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44
43SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
44SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52
45SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
46SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60
47SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
48SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
49SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP
50SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
51
52_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
53 _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
54_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
55
56STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
57STACK_SIZE = 1 << STACK_SHIFT
58
59#define BASED(name) name-system_call(%r13)
60
61/*
62 * Register usage in interrupt handlers:
63 * R9 - pointer to current task structure
64 * R13 - pointer to literal pool
65 * R14 - return register for function calls
66 * R15 - kernel stack pointer
67 */
68
69 .macro STORE_TIMER lc_offset
70#ifdef CONFIG_VIRT_CPU_ACCOUNTING
71 stpt \lc_offset
72#endif
73 .endm
74
75#ifdef CONFIG_VIRT_CPU_ACCOUNTING
76 .macro UPDATE_VTIME lc_from,lc_to,lc_sum
77 lm %r10,%r11,\lc_from
78 sl %r10,\lc_to
79 sl %r11,\lc_to+4
80 bc 3,BASED(0f)
81 sl %r10,BASED(.Lc_1)
820: al %r10,\lc_sum
83 al %r11,\lc_sum+4
84 bc 12,BASED(1f)
85 al %r10,BASED(.Lc_1)
861: stm %r10,%r11,\lc_sum
87 .endm
88#endif
89
90 .macro SAVE_ALL_BASE savearea
91 stm %r12,%r15,\savearea
92 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
93 .endm
94
95 .macro SAVE_ALL psworg,savearea,sync
96 la %r12,\psworg
97 .if \sync
98 tm \psworg+1,0x01 # test problem state bit
99 bz BASED(2f) # skip stack setup save
100 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
101 .else
102 tm \psworg+1,0x01 # test problem state bit
103 bnz BASED(1f) # from user -> load async stack
104 clc \psworg+4(4),BASED(.Lcritical_end)
105 bhe BASED(0f)
106 clc \psworg+4(4),BASED(.Lcritical_start)
107 bl BASED(0f)
108 l %r14,BASED(.Lcleanup_critical)
109 basr %r14,%r14
110 tm 0(%r12),0x01 # retest problem state after cleanup
111 bnz BASED(1f)
1120: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
113 slr %r14,%r15
114 sra %r14,STACK_SHIFT
115 be BASED(2f)
1161: l %r15,__LC_ASYNC_STACK
117 .endif
118#ifdef CONFIG_CHECK_STACK
119 b BASED(3f)
1202: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
121 bz BASED(stack_overflow)
1223:
123#endif
1242: s %r15,BASED(.Lc_spsize) # make room for registers & psw
125 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
126 la %r12,\psworg
127 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
128 icm %r12,12,__LC_SVC_ILC
129 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
130 st %r12,SP_ILC(%r15)
131 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
132 la %r12,0
133 st %r12,__SF_BACKCHAIN(%r15) # clear back chain
134 .endm
135
136 .macro RESTORE_ALL sync
137 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) # move user PSW to lowcore
138 .if !\sync
139 ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
140 .endif
141 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
142 STORE_TIMER __LC_EXIT_TIMER
143 lpsw __LC_RETURN_PSW # back to caller
144 .endm
145
146/*
147 * Scheduler resume function, called by switch_to
148 * gpr2 = (task_struct *) prev
149 * gpr3 = (task_struct *) next
150 * Returns:
151 * gpr2 = prev
152 */
153 .globl __switch_to
154__switch_to:
155 basr %r1,0
156__switch_to_base:
157 tm __THREAD_per(%r3),0xe8 # new process is using per ?
158 bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine
159 stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff
160 clc __THREAD_per(12,%r3),__SF_EMPTY(%r15)
161 be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
162 lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
163__switch_to_noper:
164 stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
165 st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
166 l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
167 lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
168 st %r3,__LC_CURRENT # __LC_CURRENT = current task struct
169 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
170 l %r3,__THREAD_info(%r3) # load thread_info from task struct
171 st %r3,__LC_THREAD_INFO
172 ahi %r3,STACK_SIZE
173 st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
174 br %r14
175
176__critical_start:
177/*
178 * SVC interrupt handler routine. System calls are synchronous events and
179 * are executed with interrupts enabled.
180 */
181
182 .globl system_call
183system_call:
184 STORE_TIMER __LC_SYNC_ENTER_TIMER
185sysc_saveall:
186 SAVE_ALL_BASE __LC_SAVE_AREA
187 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
188 lh %r7,0x8a # get svc number from lowcore
189#ifdef CONFIG_VIRT_CPU_ACCOUNTING
190sysc_vtime:
191 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
192 bz BASED(sysc_do_svc)
193 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
194sysc_stime:
195 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
196sysc_update:
197 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
198#endif
199sysc_do_svc:
200 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
201 sla %r7,2 # *4 and test for svc 0
202 bnz BASED(sysc_nr_ok) # svc number > 0
203 # svc 0: system call number in %r1
204 cl %r1,BASED(.Lnr_syscalls)
205 bnl BASED(sysc_nr_ok)
206 lr %r7,%r1 # copy svc number to %r7
207 sla %r7,2 # *4
208sysc_nr_ok:
209 mvc SP_ARGS(4,%r15),SP_R7(%r15)
210sysc_do_restart:
211 tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
212 l %r8,sys_call_table-system_call(%r7,%r13) # get system call addr.
213 bnz BASED(sysc_tracesys)
214 basr %r14,%r8 # call sys_xxxx
215 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
216 # ATTENTION: check sys_execve_glue before
217 # changing anything here !!
218
219sysc_return:
220 tm SP_PSW+1(%r15),0x01 # returning to user ?
221 bno BASED(sysc_leave)
222 tm __TI_flags+3(%r9),_TIF_WORK_SVC
223 bnz BASED(sysc_work) # there is work to do (signals etc.)
224sysc_leave:
225 RESTORE_ALL 1
226
227#
228# recheck if there is more work to do
229#
230sysc_work_loop:
231 tm __TI_flags+3(%r9),_TIF_WORK_SVC
232 bz BASED(sysc_leave) # there is no work to do
233#
234# One of the work bits is on. Find out which one.
235#
236sysc_work:
237 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
238 bo BASED(sysc_reschedule)
239 tm __TI_flags+3(%r9),_TIF_SIGPENDING
240 bo BASED(sysc_sigpending)
241 tm __TI_flags+3(%r9),_TIF_RESTART_SVC
242 bo BASED(sysc_restart)
243 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
244 bo BASED(sysc_singlestep)
245 b BASED(sysc_leave)
246
247#
248# _TIF_NEED_RESCHED is set, call schedule
249#
250sysc_reschedule:
251 l %r1,BASED(.Lschedule)
252 la %r14,BASED(sysc_work_loop)
253 br %r1 # call scheduler
254
255#
256# _TIF_SIGPENDING is set, call do_signal
257#
258sysc_sigpending:
259 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
260 la %r2,SP_PTREGS(%r15) # load pt_regs
261 sr %r3,%r3 # clear *oldset
262 l %r1,BASED(.Ldo_signal)
263 basr %r14,%r1 # call do_signal
264 tm __TI_flags+3(%r9),_TIF_RESTART_SVC
265 bo BASED(sysc_restart)
266 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
267 bo BASED(sysc_singlestep)
268 b BASED(sysc_leave) # out of here, do NOT recheck
269
270#
271# _TIF_RESTART_SVC is set, set up registers and restart svc
272#
273sysc_restart:
274 ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
275 l %r7,SP_R2(%r15) # load new svc number
276 sla %r7,2
277 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
278 lm %r2,%r6,SP_R2(%r15) # load svc arguments
279 b BASED(sysc_do_restart) # restart svc
280
281#
282# _TIF_SINGLE_STEP is set, call do_single_step
283#
284sysc_singlestep:
285 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
286 mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check
287 la %r2,SP_PTREGS(%r15) # address of register-save area
288 l %r1,BASED(.Lhandle_per) # load adr. of per handler
289 la %r14,BASED(sysc_return) # load adr. of system return
290 br %r1 # branch to do_single_step
291
292__critical_end:
293
294#
295# call trace before and after sys_call
296#
297sysc_tracesys:
298 l %r1,BASED(.Ltrace)
299 la %r2,SP_PTREGS(%r15) # load pt_regs
300 la %r3,0
301 srl %r7,2
302 st %r7,SP_R2(%r15)
303 basr %r14,%r1
304 clc SP_R2(4,%r15),BASED(.Lnr_syscalls)
305 bnl BASED(sysc_tracenogo)
306 l %r7,SP_R2(%r15) # strace might have changed the
307 sll %r7,2 # system call
308 l %r8,sys_call_table-system_call(%r7,%r13)
309sysc_tracego:
310 lm %r3,%r6,SP_R3(%r15)
311 l %r2,SP_ORIG_R2(%r15)
312 basr %r14,%r8 # call sys_xxx
313 st %r2,SP_R2(%r15) # store return value
314sysc_tracenogo:
315 tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
316 bz BASED(sysc_return)
317 l %r1,BASED(.Ltrace)
318 la %r2,SP_PTREGS(%r15) # load pt_regs
319 la %r3,1
320 la %r14,BASED(sysc_return)
321 br %r1
322
323#
324# a new process exits the kernel with ret_from_fork
325#
326 .globl ret_from_fork
327ret_from_fork:
328 l %r13,__LC_SVC_NEW_PSW+4
329 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
330 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
331 bo BASED(0f)
332 st %r15,SP_R15(%r15) # store stack pointer for new kthread
3330: l %r1,BASED(.Lschedtail)
334 basr %r14,%r1
335 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
336 b BASED(sysc_return)
337
338#
339# clone, fork, vfork, exec and sigreturn need glue,
340# because they all expect pt_regs as parameter,
341# but are called with different parameter.
342# return-address is set up above
343#
344sys_clone_glue:
345 la %r2,SP_PTREGS(%r15) # load pt_regs
346 l %r1,BASED(.Lclone)
347 br %r1 # branch to sys_clone
348
349sys_fork_glue:
350 la %r2,SP_PTREGS(%r15) # load pt_regs
351 l %r1,BASED(.Lfork)
352 br %r1 # branch to sys_fork
353
354sys_vfork_glue:
355 la %r2,SP_PTREGS(%r15) # load pt_regs
356 l %r1,BASED(.Lvfork)
357 br %r1 # branch to sys_vfork
358
359sys_execve_glue:
360 la %r2,SP_PTREGS(%r15) # load pt_regs
361 l %r1,BASED(.Lexecve)
362 lr %r12,%r14 # save return address
363 basr %r14,%r1 # call sys_execve
364 ltr %r2,%r2 # check if execve failed
365 bnz 0(%r12) # it did fail -> store result in gpr2
366 b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8
367 # in system_call/sysc_tracesys
368
369sys_sigreturn_glue:
370 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
371 l %r1,BASED(.Lsigreturn)
372 br %r1 # branch to sys_sigreturn
373
374sys_rt_sigreturn_glue:
375 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
376 l %r1,BASED(.Lrt_sigreturn)
377 br %r1 # branch to sys_sigreturn
378
379#
380# sigsuspend and rt_sigsuspend need pt_regs as an additional
381# parameter and they have to skip the store of %r2 into the
382# user register %r2 because the return value was set in
383# sigsuspend and rt_sigsuspend already and must not be overwritten!
384#
385
386sys_sigsuspend_glue:
387 lr %r5,%r4 # move mask back
388 lr %r4,%r3 # move history1 parameter
389 lr %r3,%r2 # move history0 parameter
390 la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter
391 l %r1,BASED(.Lsigsuspend)
392 la %r14,4(%r14) # skip store of return value
393 br %r1 # branch to sys_sigsuspend
394
395sys_rt_sigsuspend_glue:
396 lr %r4,%r3 # move sigsetsize parameter
397 lr %r3,%r2 # move unewset parameter
398 la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter
399 l %r1,BASED(.Lrt_sigsuspend)
400 la %r14,4(%r14) # skip store of return value
401 br %r1 # branch to sys_rt_sigsuspend
402
403sys_sigaltstack_glue:
404 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
405 l %r1,BASED(.Lsigaltstack)
406 br %r1 # branch to sys_sigreturn
407
408
409/*
410 * Program check handler routine
411 */
412
413 .globl pgm_check_handler
414pgm_check_handler:
415/*
416 * First we need to check for a special case:
417 * Single stepping an instruction that disables the PER event mask will
418 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
419 * For a single stepped SVC the program check handler gets control after
420 * the SVC new PSW has been loaded. But we want to execute the SVC first and
421 * then handle the PER event. Therefore we update the SVC old PSW to point
422 * to the pgm_check_handler and branch to the SVC handler after we checked
423 * if we have to load the kernel stack register.
424 * For every other possible cause for PER event without the PER mask set
425 * we just ignore the PER event (FIXME: is there anything we have to do
426 * for LPSW?).
427 */
428 STORE_TIMER __LC_SYNC_ENTER_TIMER
429 SAVE_ALL_BASE __LC_SAVE_AREA
430 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
431 bnz BASED(pgm_per) # got per exception -> special case
432 SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
433#ifdef CONFIG_VIRT_CPU_ACCOUNTING
434 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
435 bz BASED(pgm_no_vtime)
436 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
437 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
438 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
439pgm_no_vtime:
440#endif
441 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
442 l %r3,__LC_PGM_ILC # load program interruption code
443 la %r8,0x7f
444 nr %r8,%r3
445pgm_do_call:
446 l %r7,BASED(.Ljump_table)
447 sll %r8,2
448 l %r7,0(%r8,%r7) # load address of handler routine
449 la %r2,SP_PTREGS(%r15) # address of register-save area
450 la %r14,BASED(sysc_return)
451 br %r7 # branch to interrupt-handler
452
453#
454# handle per exception
455#
456pgm_per:
457 tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
458 bnz BASED(pgm_per_std) # ok, normal per event from user space
459# ok its one of the special cases, now we need to find out which one
460 clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
461 be BASED(pgm_svcper)
462# no interesting special case, ignore PER event
463 lm %r12,%r15,__LC_SAVE_AREA
464 lpsw 0x28
465
466#
467# Normal per exception
468#
469pgm_per_std:
470 SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
471#ifdef CONFIG_VIRT_CPU_ACCOUNTING
472 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
473 bz BASED(pgm_no_vtime2)
474 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
475 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
476 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
477pgm_no_vtime2:
478#endif
479 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
480 l %r1,__TI_task(%r9)
481 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
482 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
483 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
484 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
485 l %r3,__LC_PGM_ILC # load program interruption code
486 la %r8,0x7f
487 nr %r8,%r3 # clear per-event-bit and ilc
488 be BASED(sysc_return) # only per or per+check ?
489 b BASED(pgm_do_call)
490
491#
492# it was a single stepped SVC that is causing all the trouble
493#
494pgm_svcper:
495 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
496#ifdef CONFIG_VIRT_CPU_ACCOUNTING
497 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
498 bz BASED(pgm_no_vtime3)
499 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
500 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
501 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
502pgm_no_vtime3:
503#endif
504 lh %r7,0x8a # get svc number from lowcore
505 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
506 l %r1,__TI_task(%r9)
507 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
508 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
509 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
510 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
511 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
512 b BASED(sysc_do_svc)
513
514/*
515 * IO interrupt handler routine
516 */
517
518 .globl io_int_handler
519io_int_handler:
520 STORE_TIMER __LC_ASYNC_ENTER_TIMER
521 stck __LC_INT_CLOCK
522 SAVE_ALL_BASE __LC_SAVE_AREA+16
523 SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0
524#ifdef CONFIG_VIRT_CPU_ACCOUNTING
525 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
526 bz BASED(io_no_vtime)
527 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
528 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
529 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
530io_no_vtime:
531#endif
532 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
533 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
534 la %r2,SP_PTREGS(%r15) # address of register-save area
535 basr %r14,%r1 # branch to standard irq handler
536
537io_return:
538 tm SP_PSW+1(%r15),0x01 # returning to user ?
539#ifdef CONFIG_PREEMPT
540 bno BASED(io_preempt) # no -> check for preemptive scheduling
541#else
542 bno BASED(io_leave) # no-> skip resched & signal
543#endif
544 tm __TI_flags+3(%r9),_TIF_WORK_INT
545 bnz BASED(io_work) # there is work to do (signals etc.)
546io_leave:
547 RESTORE_ALL 0
548
549#ifdef CONFIG_PREEMPT
550io_preempt:
551 icm %r0,15,__TI_precount(%r9)
552 bnz BASED(io_leave)
553 l %r1,SP_R15(%r15)
554 s %r1,BASED(.Lc_spsize)
555 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
556 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
557 lr %r15,%r1
558io_resume_loop:
559 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
560 bno BASED(io_leave)
561 mvc __TI_precount(4,%r9),BASED(.Lc_pactive)
562 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
563 l %r1,BASED(.Lschedule)
564 basr %r14,%r1 # call schedule
565 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
566 xc __TI_precount(4,%r9),__TI_precount(%r9)
567 b BASED(io_resume_loop)
568#endif
569
570#
571# switch to kernel stack, then check the TIF bits
572#
573io_work:
574 l %r1,__LC_KERNEL_STACK
575 s %r1,BASED(.Lc_spsize)
576 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
577 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
578 lr %r15,%r1
579#
580# One of the work bits is on. Find out which one.
581# Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED
582#
583io_work_loop:
584 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
585 bo BASED(io_reschedule)
586 tm __TI_flags+3(%r9),_TIF_SIGPENDING
587 bo BASED(io_sigpending)
588 b BASED(io_leave)
589
590#
591# _TIF_NEED_RESCHED is set, call schedule
592#
593io_reschedule:
594 l %r1,BASED(.Lschedule)
595 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
596 basr %r14,%r1 # call scheduler
597 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
598 tm __TI_flags+3(%r9),_TIF_WORK_INT
599 bz BASED(io_leave) # there is no work to do
600 b BASED(io_work_loop)
601
602#
603# _TIF_SIGPENDING is set, call do_signal
604#
605io_sigpending:
606 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
607 la %r2,SP_PTREGS(%r15) # load pt_regs
608 sr %r3,%r3 # clear *oldset
609 l %r1,BASED(.Ldo_signal)
610 basr %r14,%r1 # call do_signal
611 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
612 b BASED(io_leave) # out of here, do NOT recheck
613
614/*
615 * External interrupt handler routine
616 */
617
618 .globl ext_int_handler
619ext_int_handler:
620 STORE_TIMER __LC_ASYNC_ENTER_TIMER
621 stck __LC_INT_CLOCK
622 SAVE_ALL_BASE __LC_SAVE_AREA+16
623 SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0
624#ifdef CONFIG_VIRT_CPU_ACCOUNTING
625 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
626 bz BASED(ext_no_vtime)
627 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
628 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
629 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
630ext_no_vtime:
631#endif
632 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
633 la %r2,SP_PTREGS(%r15) # address of register-save area
634 lh %r3,__LC_EXT_INT_CODE # get interruption code
635 l %r1,BASED(.Ldo_extint)
636 basr %r14,%r1
637 b BASED(io_return)
638
639/*
640 * Machine check handler routines
641 */
642
643 .globl mcck_int_handler
644mcck_int_handler:
645 STORE_TIMER __LC_ASYNC_ENTER_TIMER
646 SAVE_ALL_BASE __LC_SAVE_AREA+32
647 SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0
648#ifdef CONFIG_VIRT_CPU_ACCOUNTING
649 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
650 bz BASED(mcck_no_vtime)
651 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
652 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
653 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
654mcck_no_vtime:
655#endif
656 l %r1,BASED(.Ls390_mcck)
657 basr %r14,%r1 # call machine check handler
658mcck_return:
659 RESTORE_ALL 0
660
661#ifdef CONFIG_SMP
662/*
663 * Restart interruption handler, kick starter for additional CPUs
664 */
665 .globl restart_int_handler
666restart_int_handler:
667 l %r15,__LC_SAVE_AREA+60 # load ksp
668 lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
669 lam %a0,%a15,__LC_AREGS_SAVE_AREA
670 lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone
671 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
672 basr %r14,0
673 l %r14,restart_addr-.(%r14)
674 br %r14 # branch to start_secondary
675restart_addr:
676 .long start_secondary
677#else
678/*
679 * If we do not run with SMP enabled, let the new CPU crash ...
680 */
681 .globl restart_int_handler
682restart_int_handler:
683 basr %r1,0
684restart_base:
685 lpsw restart_crash-restart_base(%r1)
686 .align 8
687restart_crash:
688 .long 0x000a0000,0x00000000
689restart_go:
690#endif
691
692#ifdef CONFIG_CHECK_STACK
693/*
694 * The synchronous or the asynchronous stack overflowed. We are dead.
695 * No need to properly save the registers, we are going to panic anyway.
696 * Setup a pt_regs so that show_trace can provide a good call trace.
697 */
698stack_overflow:
699 l %r15,__LC_PANIC_STACK # change to panic stack
700 sl %r15,BASED(.Lc_spsize)
701 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
702 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
703 la %r1,__LC_SAVE_AREA
704 ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ?
705 be BASED(0f)
706 ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ?
707 be BASED(0f)
708 la %r1,__LC_SAVE_AREA+16
7090: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack
710 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
711 l %r1,BASED(1f) # branch to kernel_stack_overflow
712 la %r2,SP_PTREGS(%r15) # load pt_regs
713 br %r1
7141: .long kernel_stack_overflow
715#endif
716
717cleanup_table_system_call:
718 .long system_call + 0x80000000, sysc_do_svc + 0x80000000
719cleanup_table_sysc_return:
720 .long sysc_return + 0x80000000, sysc_leave + 0x80000000
721cleanup_table_sysc_leave:
722 .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000
723cleanup_table_sysc_work_loop:
724 .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000
725
726cleanup_critical:
727 clc 4(4,%r12),BASED(cleanup_table_system_call)
728 bl BASED(0f)
729 clc 4(4,%r12),BASED(cleanup_table_system_call+4)
730 bl BASED(cleanup_system_call)
7310:
732 clc 4(4,%r12),BASED(cleanup_table_sysc_return)
733 bl BASED(0f)
734 clc 4(4,%r12),BASED(cleanup_table_sysc_return+4)
735 bl BASED(cleanup_sysc_return)
7360:
737 clc 4(4,%r12),BASED(cleanup_table_sysc_leave)
738 bl BASED(0f)
739 clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4)
740 bl BASED(cleanup_sysc_leave)
7410:
742 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop)
743 bl BASED(0f)
744 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
745 bl BASED(cleanup_sysc_leave)
7460:
747 br %r14
748
749cleanup_system_call:
750 mvc __LC_RETURN_PSW(8),0(%r12)
751#ifdef CONFIG_VIRT_CPU_ACCOUNTING
752 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
753 bh BASED(0f)
754 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
7550: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
756 bhe BASED(cleanup_vtime)
757#endif
758 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
759 bh BASED(0f)
760 mvc __LC_SAVE_AREA(16),__LC_SAVE_AREA+16
7610: st %r13,__LC_SAVE_AREA+20
762 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
763 st %r15,__LC_SAVE_AREA+28
764 lh %r7,0x8a
765#ifdef CONFIG_VIRT_CPU_ACCOUNTING
766cleanup_vtime:
767 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
768 bhe BASED(cleanup_stime)
769 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
770 bz BASED(cleanup_novtime)
771 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
772cleanup_stime:
773 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
774 bh BASED(cleanup_update)
775 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
776cleanup_update:
777 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
778cleanup_novtime:
779#endif
780 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
781 la %r12,__LC_RETURN_PSW
782 br %r14
783cleanup_system_call_insn:
784 .long sysc_saveall + 0x80000000
785#ifdef CONFIG_VIRT_CPU_ACCOUNTING
786 .long system_call + 0x80000000
787 .long sysc_vtime + 0x80000000
788 .long sysc_stime + 0x80000000
789 .long sysc_update + 0x80000000
790#endif
791
792cleanup_sysc_return:
793 mvc __LC_RETURN_PSW(4),0(%r12)
794 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return)
795 la %r12,__LC_RETURN_PSW
796 br %r14
797
798cleanup_sysc_leave:
799 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn)
800 be BASED(0f)
801#ifdef CONFIG_VIRT_CPU_ACCOUNTING
802 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
803 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
804 be BASED(0f)
805#endif
806 mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
807 mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
808 lm %r0,%r11,SP_R0(%r15)
809 l %r15,SP_R15(%r15)
8100: la %r12,__LC_RETURN_PSW
811 br %r14
812cleanup_sysc_leave_insn:
813#ifdef CONFIG_VIRT_CPU_ACCOUNTING
814 .long sysc_leave + 14 + 0x80000000
815#endif
816 .long sysc_leave + 10 + 0x80000000
817
818/*
819 * Integer constants
820 */
821 .align 4
822.Lc_spsize: .long SP_SIZE
823.Lc_overhead: .long STACK_FRAME_OVERHEAD
824.Lc_pactive: .long PREEMPT_ACTIVE
825.Lnr_syscalls: .long NR_syscalls
826.L0x018: .short 0x018
827.L0x020: .short 0x020
828.L0x028: .short 0x028
829.L0x030: .short 0x030
830.L0x038: .short 0x038
831.Lc_1: .long 1
832
833/*
834 * Symbol constants
835 */
836.Ls390_mcck: .long s390_do_machine_check
837.Ldo_IRQ: .long do_IRQ
838.Ldo_extint: .long do_extint
839.Ldo_signal: .long do_signal
840.Lhandle_per: .long do_single_step
841.Ljump_table: .long pgm_check_table
842.Lschedule: .long schedule
843.Lclone: .long sys_clone
844.Lexecve: .long sys_execve
845.Lfork: .long sys_fork
846.Lrt_sigreturn:.long sys_rt_sigreturn
847.Lrt_sigsuspend:
848 .long sys_rt_sigsuspend
849.Lsigreturn: .long sys_sigreturn
850.Lsigsuspend: .long sys_sigsuspend
851.Lsigaltstack: .long sys_sigaltstack
852.Ltrace: .long syscall_trace
853.Lvfork: .long sys_vfork
854.Lschedtail: .long schedule_tail
855
856.Lcritical_start:
857 .long __critical_start + 0x80000000
858.Lcritical_end:
859 .long __critical_end + 0x80000000
860.Lcleanup_critical:
861 .long cleanup_critical
862
863#define SYSCALL(esa,esame,emu) .long esa
864 .globl sys_call_table
865sys_call_table:
866#include "syscalls.S"
867#undef SYSCALL
868
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
new file mode 100644
index 000000000000..51527ab8c8f9
--- /dev/null
+++ b/arch/s390/kernel/entry64.S
@@ -0,0 +1,881 @@
1/*
2 * arch/s390/kernel/entry.S
3 * S390 low-level entry points.
4 *
5 * S390 version
6 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Hartmut Penner (hp@de.ibm.com),
9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
10 */
11
12#include <linux/sys.h>
13#include <linux/linkage.h>
14#include <linux/config.h>
15#include <asm/cache.h>
16#include <asm/lowcore.h>
17#include <asm/errno.h>
18#include <asm/ptrace.h>
19#include <asm/thread_info.h>
20#include <asm/offsets.h>
21#include <asm/unistd.h>
22#include <asm/page.h>
23
24/*
25 * Stack layout for the system_call stack entry.
26 * The first few entries are identical to the user_regs_struct.
27 */
28SP_PTREGS = STACK_FRAME_OVERHEAD
29SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
30SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
31SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
32SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
33SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
34SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
35SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
36SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
37SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
38SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
39SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64
40SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72
41SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80
42SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88
43SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96
44SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104
45SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112
46SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120
47SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
48SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
49SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP
50SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
51
52STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
53STACK_SIZE = 1 << STACK_SHIFT
54
55_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
56 _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
57_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
58
59#define BASED(name) name-system_call(%r13)
60
61 .macro STORE_TIMER lc_offset
62#ifdef CONFIG_VIRT_CPU_ACCOUNTING
63 stpt \lc_offset
64#endif
65 .endm
66
67#ifdef CONFIG_VIRT_CPU_ACCOUNTING
68 .macro UPDATE_VTIME lc_from,lc_to,lc_sum
69 lg %r10,\lc_from
70 slg %r10,\lc_to
71 alg %r10,\lc_sum
72 stg %r10,\lc_sum
73 .endm
74#endif
75
76/*
77 * Register usage in interrupt handlers:
78 * R9 - pointer to current task structure
79 * R13 - pointer to literal pool
80 * R14 - return register for function calls
81 * R15 - kernel stack pointer
82 */
83
84 .macro SAVE_ALL_BASE savearea
85 stmg %r12,%r15,\savearea
86 larl %r13,system_call
87 .endm
88
89 .macro SAVE_ALL psworg,savearea,sync
90 la %r12,\psworg
91 .if \sync
92 tm \psworg+1,0x01 # test problem state bit
93 jz 2f # skip stack setup save
94 lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
95 .else
96 tm \psworg+1,0x01 # test problem state bit
97 jnz 1f # from user -> load kernel stack
98 clc \psworg+8(8),BASED(.Lcritical_end)
99 jhe 0f
100 clc \psworg+8(8),BASED(.Lcritical_start)
101 jl 0f
102 brasl %r14,cleanup_critical
103 tm 0(%r12),0x01 # retest problem state after cleanup
104 jnz 1f
1050: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
106 slgr %r14,%r15
107 srag %r14,%r14,STACK_SHIFT
108 jz 2f
1091: lg %r15,__LC_ASYNC_STACK # load async stack
110 .endif
111#ifdef CONFIG_CHECK_STACK
112 j 3f
1132: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
114 jz stack_overflow
1153:
116#endif
1172: aghi %r15,-SP_SIZE # make room for registers & psw
118 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
119 la %r12,\psworg
120 stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
121 icm %r12,12,__LC_SVC_ILC
122 stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
123 st %r12,SP_ILC(%r15)
124 mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
125 la %r12,0
126 stg %r12,__SF_BACKCHAIN(%r15)
127 .endm
128
129 .macro RESTORE_ALL sync
130 mvc __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
131 .if !\sync
132 ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
133 .endif
134 lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
135 STORE_TIMER __LC_EXIT_TIMER
136 lpswe __LC_RETURN_PSW # back to caller
137 .endm
138
139/*
140 * Scheduler resume function, called by switch_to
141 * gpr2 = (task_struct *) prev
142 * gpr3 = (task_struct *) next
143 * Returns:
144 * gpr2 = prev
145 */
146 .globl __switch_to
147__switch_to:
148 tm __THREAD_per+4(%r3),0xe8 # is the new process using per ?
149 jz __switch_to_noper # if not we're fine
150 stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
151 clc __THREAD_per(24,%r3),__SF_EMPTY(%r15)
152 je __switch_to_noper # we got away without bashing TLB's
153 lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
154__switch_to_noper:
155 stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
156 stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
157 lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
158 lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
159 stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct
160 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
161 lg %r3,__THREAD_info(%r3) # load thread_info from task struct
162 stg %r3,__LC_THREAD_INFO
163 aghi %r3,STACK_SIZE
164 stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
165 br %r14
166
167__critical_start:
168/*
169 * SVC interrupt handler routine. System calls are synchronous events and
170 * are executed with interrupts enabled.
171 */
172
173 .globl system_call
174system_call:
175 STORE_TIMER __LC_SYNC_ENTER_TIMER
176sysc_saveall:
177 SAVE_ALL_BASE __LC_SAVE_AREA
178 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
179 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
180#ifdef CONFIG_VIRT_CPU_ACCOUNTING
181sysc_vtime:
182 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
183 jz sysc_do_svc
184 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
185sysc_stime:
186 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
187sysc_update:
188 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
189#endif
190sysc_do_svc:
191 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
192 slag %r7,%r7,2 # *4 and test for svc 0
193 jnz sysc_nr_ok
194 # svc 0: system call number in %r1
195 cl %r1,BASED(.Lnr_syscalls)
196 jnl sysc_nr_ok
197 lgfr %r7,%r1 # clear high word in r1
198 slag %r7,%r7,2 # svc 0: system call number in %r1
199sysc_nr_ok:
200 mvc SP_ARGS(8,%r15),SP_R7(%r15)
201sysc_do_restart:
202 larl %r10,sys_call_table
203#ifdef CONFIG_S390_SUPPORT
204 tm SP_PSW+3(%r15),0x01 # are we running in 31 bit mode ?
205 jo sysc_noemu
206 larl %r10,sys_call_table_emu # use 31 bit emulation system calls
207sysc_noemu:
208#endif
209 tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
210 lgf %r8,0(%r7,%r10) # load address of system call routine
211 jnz sysc_tracesys
212 basr %r14,%r8 # call sys_xxxx
213 stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
214 # ATTENTION: check sys_execve_glue before
215 # changing anything here !!
216
217sysc_return:
218 tm SP_PSW+1(%r15),0x01 # returning to user ?
219 jno sysc_leave
220 tm __TI_flags+7(%r9),_TIF_WORK_SVC
221 jnz sysc_work # there is work to do (signals etc.)
222sysc_leave:
223 RESTORE_ALL 1
224
225#
226# recheck if there is more work to do
227#
228sysc_work_loop:
229 tm __TI_flags+7(%r9),_TIF_WORK_SVC
230 jz sysc_leave # there is no work to do
231#
232# One of the work bits is on. Find out which one.
233#
234sysc_work:
235 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
236 jo sysc_reschedule
237 tm __TI_flags+7(%r9),_TIF_SIGPENDING
238 jo sysc_sigpending
239 tm __TI_flags+7(%r9),_TIF_RESTART_SVC
240 jo sysc_restart
241 tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
242 jo sysc_singlestep
243 j sysc_leave
244
245#
246# _TIF_NEED_RESCHED is set, call schedule
247#
248sysc_reschedule:
249 larl %r14,sysc_work_loop
250 jg schedule # return point is sysc_return
251
252#
253# _TIF_SIGPENDING is set, call do_signal
254#
255sysc_sigpending:
256 ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
257 la %r2,SP_PTREGS(%r15) # load pt_regs
258 sgr %r3,%r3 # clear *oldset
259 brasl %r14,do_signal # call do_signal
260 tm __TI_flags+7(%r9),_TIF_RESTART_SVC
261 jo sysc_restart
262 tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
263 jo sysc_singlestep
264 j sysc_leave # out of here, do NOT recheck
265
266#
267# _TIF_RESTART_SVC is set, set up registers and restart svc
268#
269sysc_restart:
270 ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
271 lg %r7,SP_R2(%r15) # load new svc number
272 slag %r7,%r7,2 # *4
273 mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
274 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
275 j sysc_do_restart # restart svc
276
277#
278# _TIF_SINGLE_STEP is set, call do_single_step
279#
280sysc_singlestep:
281 ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
282 lhi %r0,__LC_PGM_OLD_PSW
283 sth %r0,SP_TRAP(%r15) # set trap indication to pgm check
284 la %r2,SP_PTREGS(%r15) # address of register-save area
285 larl %r14,sysc_return # load adr. of system return
286 jg do_single_step # branch to do_sigtrap
287
288
289__critical_end:
290
291#
292# call syscall_trace before and after system call
293# special linkage: %r12 contains the return address for trace_svc
294#
295sysc_tracesys:
296 la %r2,SP_PTREGS(%r15) # load pt_regs
297 la %r3,0
298 srl %r7,2
299 stg %r7,SP_R2(%r15)
300 brasl %r14,syscall_trace
301 lghi %r0,NR_syscalls
302 clg %r0,SP_R2(%r15)
303 jnh sysc_tracenogo
304 lg %r7,SP_R2(%r15) # strace might have changed the
305 sll %r7,2 # system call
306 lgf %r8,0(%r7,%r10)
307sysc_tracego:
308 lmg %r3,%r6,SP_R3(%r15)
309 lg %r2,SP_ORIG_R2(%r15)
310 basr %r14,%r8 # call sys_xxx
311 stg %r2,SP_R2(%r15) # store return value
312sysc_tracenogo:
313 tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
314 jz sysc_return
315 la %r2,SP_PTREGS(%r15) # load pt_regs
316 la %r3,1
317 larl %r14,sysc_return # return point is sysc_return
318 jg syscall_trace
319
320#
321# a new process exits the kernel with ret_from_fork
322#
323 .globl ret_from_fork
324ret_from_fork:
325 lg %r13,__LC_SVC_NEW_PSW+8
326 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
327 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
328 jo 0f
329 stg %r15,SP_R15(%r15) # store stack pointer for new kthread
3300: brasl %r14,schedule_tail
331 stosm 24(%r15),0x03 # reenable interrupts
332 j sysc_return
333
334#
335# clone, fork, vfork, exec and sigreturn need glue,
336# because they all expect pt_regs as parameter,
337# but are called with different parameter.
338# return-address is set up above
339#
340sys_clone_glue:
341 la %r2,SP_PTREGS(%r15) # load pt_regs
342 jg sys_clone # branch to sys_clone
343
344#ifdef CONFIG_S390_SUPPORT
345sys32_clone_glue:
346 la %r2,SP_PTREGS(%r15) # load pt_regs
347 jg sys32_clone # branch to sys32_clone
348#endif
349
350sys_fork_glue:
351 la %r2,SP_PTREGS(%r15) # load pt_regs
352 jg sys_fork # branch to sys_fork
353
354sys_vfork_glue:
355 la %r2,SP_PTREGS(%r15) # load pt_regs
356 jg sys_vfork # branch to sys_vfork
357
358sys_execve_glue:
359 la %r2,SP_PTREGS(%r15) # load pt_regs
360 lgr %r12,%r14 # save return address
361 brasl %r14,sys_execve # call sys_execve
362 ltgr %r2,%r2 # check if execve failed
363 bnz 0(%r12) # it did fail -> store result in gpr2
364 b 6(%r12) # SKIP STG 2,SP_R2(15) in
365 # system_call/sysc_tracesys
366#ifdef CONFIG_S390_SUPPORT
367sys32_execve_glue:
368 la %r2,SP_PTREGS(%r15) # load pt_regs
369 lgr %r12,%r14 # save return address
370 brasl %r14,sys32_execve # call sys32_execve
371 ltgr %r2,%r2 # check if execve failed
372 bnz 0(%r12) # it did fail -> store result in gpr2
373 b 6(%r12) # SKIP STG 2,SP_R2(15) in
374 # system_call/sysc_tracesys
375#endif
376
377sys_sigreturn_glue:
378 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
379 jg sys_sigreturn # branch to sys_sigreturn
380
381#ifdef CONFIG_S390_SUPPORT
382sys32_sigreturn_glue:
383 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
384 jg sys32_sigreturn # branch to sys32_sigreturn
385#endif
386
387sys_rt_sigreturn_glue:
388 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
389 jg sys_rt_sigreturn # branch to sys_sigreturn
390
391#ifdef CONFIG_S390_SUPPORT
392sys32_rt_sigreturn_glue:
393 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
394 jg sys32_rt_sigreturn # branch to sys32_sigreturn
395#endif
396
397#
398# sigsuspend and rt_sigsuspend need pt_regs as an additional
399# parameter and they have to skip the store of %r2 into the
400# user register %r2 because the return value was set in
401# sigsuspend and rt_sigsuspend already and must not be overwritten!
402#
403
404sys_sigsuspend_glue:
405 lgr %r5,%r4 # move mask back
406 lgr %r4,%r3 # move history1 parameter
407 lgr %r3,%r2 # move history0 parameter
408 la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter
409 la %r14,6(%r14) # skip store of return value
410 jg sys_sigsuspend # branch to sys_sigsuspend
411
412#ifdef CONFIG_S390_SUPPORT
413sys32_sigsuspend_glue:
414 llgfr %r4,%r4 # unsigned long
415 lgr %r5,%r4 # move mask back
416 lgfr %r3,%r3 # int
417 lgr %r4,%r3 # move history1 parameter
418 lgfr %r2,%r2 # int
419 lgr %r3,%r2 # move history0 parameter
420 la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter
421 la %r14,6(%r14) # skip store of return value
422 jg sys32_sigsuspend # branch to sys32_sigsuspend
423#endif
424
425sys_rt_sigsuspend_glue:
426 lgr %r4,%r3 # move sigsetsize parameter
427 lgr %r3,%r2 # move unewset parameter
428 la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter
429 la %r14,6(%r14) # skip store of return value
430 jg sys_rt_sigsuspend # branch to sys_rt_sigsuspend
431
432#ifdef CONFIG_S390_SUPPORT
433sys32_rt_sigsuspend_glue:
434 llgfr %r3,%r3 # size_t
435 lgr %r4,%r3 # move sigsetsize parameter
436 llgtr %r2,%r2 # sigset_emu31_t *
437 lgr %r3,%r2 # move unewset parameter
438 la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter
439 la %r14,6(%r14) # skip store of return value
440 jg sys32_rt_sigsuspend # branch to sys32_rt_sigsuspend
441#endif
442
443sys_sigaltstack_glue:
444 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
445 jg sys_sigaltstack # branch to sys_sigreturn
446
447#ifdef CONFIG_S390_SUPPORT
448sys32_sigaltstack_glue:
449 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
450 jg sys32_sigaltstack_wrapper # branch to sys_sigreturn
451#endif
452
453/*
454 * Program check handler routine
455 */
456
457 .globl pgm_check_handler
458pgm_check_handler:
459/*
460 * First we need to check for a special case:
461 * Single stepping an instruction that disables the PER event mask will
462 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
463 * For a single stepped SVC the program check handler gets control after
464 * the SVC new PSW has been loaded. But we want to execute the SVC first and
465 * then handle the PER event. Therefore we update the SVC old PSW to point
466 * to the pgm_check_handler and branch to the SVC handler after we checked
467 * if we have to load the kernel stack register.
468 * For every other possible cause for PER event without the PER mask set
469 * we just ignore the PER event (FIXME: is there anything we have to do
470 * for LPSW?).
471 */
472 STORE_TIMER __LC_SYNC_ENTER_TIMER
473 SAVE_ALL_BASE __LC_SAVE_AREA
474 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
475 jnz pgm_per # got per exception -> special case
476 SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
477#ifdef CONFIG_VIRT_CPU_ACCOUNTING
478 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
479 jz pgm_no_vtime
480 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
481 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
482 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
483pgm_no_vtime:
484#endif
485 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
486 lgf %r3,__LC_PGM_ILC # load program interruption code
487 lghi %r8,0x7f
488 ngr %r8,%r3
489pgm_do_call:
490 sll %r8,3
491 larl %r1,pgm_check_table
492 lg %r1,0(%r8,%r1) # load address of handler routine
493 la %r2,SP_PTREGS(%r15) # address of register-save area
494 larl %r14,sysc_return
495 br %r1 # branch to interrupt-handler
496
497#
498# handle per exception
499#
500pgm_per:
501 tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
502 jnz pgm_per_std # ok, normal per event from user space
503# ok its one of the special cases, now we need to find out which one
504 clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
505 je pgm_svcper
506# no interesting special case, ignore PER event
507 lmg %r12,%r15,__LC_SAVE_AREA
508 lpswe __LC_PGM_OLD_PSW
509
510#
511# Normal per exception
512#
513pgm_per_std:
514 SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
515#ifdef CONFIG_VIRT_CPU_ACCOUNTING
516 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
517 jz pgm_no_vtime2
518 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
519 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
520 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
521pgm_no_vtime2:
522#endif
523 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
524 lg %r1,__TI_task(%r9)
525 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
526 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
527 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
528 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
529 lgf %r3,__LC_PGM_ILC # load program interruption code
530 lghi %r8,0x7f
531 ngr %r8,%r3 # clear per-event-bit and ilc
532 je sysc_return
533 j pgm_do_call
534
535#
536# it was a single stepped SVC that is causing all the trouble
537#
538pgm_svcper:
539 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
540#ifdef CONFIG_VIRT_CPU_ACCOUNTING
541 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
542 jz pgm_no_vtime3
543 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
544 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
545 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
546pgm_no_vtime3:
547#endif
548 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
549 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
550 lg %r1,__TI_task(%r9)
551 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
552 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
553 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
554 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
555 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
556 j sysc_do_svc
557
558/*
559 * IO interrupt handler routine
560 */
561 .globl io_int_handler
562io_int_handler:
563 STORE_TIMER __LC_ASYNC_ENTER_TIMER
564 stck __LC_INT_CLOCK
565 SAVE_ALL_BASE __LC_SAVE_AREA+32
566 SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0
567#ifdef CONFIG_VIRT_CPU_ACCOUNTING
568 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
569 jz io_no_vtime
570 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
571 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
572 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
573io_no_vtime:
574#endif
575 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
576 la %r2,SP_PTREGS(%r15) # address of register-save area
577 brasl %r14,do_IRQ # call standard irq handler
578
579io_return:
580 tm SP_PSW+1(%r15),0x01 # returning to user ?
581#ifdef CONFIG_PREEMPT
582 jno io_preempt # no -> check for preemptive scheduling
583#else
584 jno io_leave # no-> skip resched & signal
585#endif
586 tm __TI_flags+7(%r9),_TIF_WORK_INT
587 jnz io_work # there is work to do (signals etc.)
588io_leave:
589 RESTORE_ALL 0
590
591#ifdef CONFIG_PREEMPT
592io_preempt:
593 icm %r0,15,__TI_precount(%r9)
594 jnz io_leave
595 # switch to kernel stack
596 lg %r1,SP_R15(%r15)
597 aghi %r1,-SP_SIZE
598 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
599 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
600 lgr %r15,%r1
601io_resume_loop:
602 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
603 jno io_leave
604 larl %r1,.Lc_pactive
605 mvc __TI_precount(4,%r9),0(%r1)
606 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
607 brasl %r14,schedule # call schedule
608 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
609 xc __TI_precount(4,%r9),__TI_precount(%r9)
610 j io_resume_loop
611#endif
612
613#
614# switch to kernel stack, then check TIF bits
615#
616io_work:
617 lg %r1,__LC_KERNEL_STACK
618 aghi %r1,-SP_SIZE
619 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
620 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
621 lgr %r15,%r1
622#
623# One of the work bits is on. Find out which one.
624# Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED
625#
626io_work_loop:
627 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
628 jo io_reschedule
629 tm __TI_flags+7(%r9),_TIF_SIGPENDING
630 jo io_sigpending
631 j io_leave
632
633#
634# _TIF_NEED_RESCHED is set, call schedule
635#
636io_reschedule:
637 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
638 brasl %r14,schedule # call scheduler
639 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
640 tm __TI_flags+7(%r9),_TIF_WORK_INT
641 jz io_leave # there is no work to do
642 j io_work_loop
643
644#
645# _TIF_SIGPENDING is set, call do_signal
646#
647io_sigpending:
648 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
649 la %r2,SP_PTREGS(%r15) # load pt_regs
650 slgr %r3,%r3 # clear *oldset
651 brasl %r14,do_signal # call do_signal
652 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
653 j sysc_leave # out of here, do NOT recheck
654
655/*
656 * External interrupt handler routine
657 */
658 .globl ext_int_handler
659ext_int_handler:
660 STORE_TIMER __LC_ASYNC_ENTER_TIMER
661 stck __LC_INT_CLOCK
662 SAVE_ALL_BASE __LC_SAVE_AREA+32
663 SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0
664#ifdef CONFIG_VIRT_CPU_ACCOUNTING
665 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
666 jz ext_no_vtime
667 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
668 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
669 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
670ext_no_vtime:
671#endif
672 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
673 la %r2,SP_PTREGS(%r15) # address of register-save area
674 llgh %r3,__LC_EXT_INT_CODE # get interruption code
675 brasl %r14,do_extint
676 j io_return
677
678/*
679 * Machine check handler routines
680 */
681 .globl mcck_int_handler
682mcck_int_handler:
683 STORE_TIMER __LC_ASYNC_ENTER_TIMER
684 SAVE_ALL_BASE __LC_SAVE_AREA+64
685 SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64,0
686#ifdef CONFIG_VIRT_CPU_ACCOUNTING
687 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
688 jz mcck_no_vtime
689 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
690 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
691 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
692mcck_no_vtime:
693#endif
694 brasl %r14,s390_do_machine_check
695mcck_return:
696 RESTORE_ALL 0
697
698#ifdef CONFIG_SMP
699/*
700 * Restart interruption handler, kick starter for additional CPUs
701 */
702 .globl restart_int_handler
703restart_int_handler:
704 lg %r15,__LC_SAVE_AREA+120 # load ksp
705 lghi %r10,__LC_CREGS_SAVE_AREA
706 lctlg %c0,%c15,0(%r10) # get new ctl regs
707 lghi %r10,__LC_AREGS_SAVE_AREA
708 lam %a0,%a15,0(%r10)
709 lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone
710 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
711 jg start_secondary
712#else
713/*
714 * If we do not run with SMP enabled, let the new CPU crash ...
715 */
716 .globl restart_int_handler
717restart_int_handler:
718 basr %r1,0
719restart_base:
720 lpswe restart_crash-restart_base(%r1)
721 .align 8
722restart_crash:
723 .long 0x000a0000,0x00000000,0x00000000,0x00000000
724restart_go:
725#endif
726
727#ifdef CONFIG_CHECK_STACK
728/*
729 * The synchronous or the asynchronous stack overflowed. We are dead.
730 * No need to properly save the registers, we are going to panic anyway.
731 * Setup a pt_regs so that show_trace can provide a good call trace.
732 */
733stack_overflow:
734 lg %r15,__LC_PANIC_STACK # change to panic stack
735 aghi %r1,-SP_SIZE
736 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
737 stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
738 la %r1,__LC_SAVE_AREA
739 chi %r12,__LC_SVC_OLD_PSW
740 je 0f
741 chi %r12,__LC_PGM_OLD_PSW
742 je 0f
743 la %r1,__LC_SAVE_AREA+16
7440: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack
745 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
746 la %r2,SP_PTREGS(%r15) # load pt_regs
747 jg kernel_stack_overflow
748#endif
749
750cleanup_table_system_call:
751 .quad system_call, sysc_do_svc
752cleanup_table_sysc_return:
753 .quad sysc_return, sysc_leave
754cleanup_table_sysc_leave:
755 .quad sysc_leave, sysc_work_loop
756cleanup_table_sysc_work_loop:
757 .quad sysc_work_loop, sysc_reschedule
758
759cleanup_critical:
760 clc 8(8,%r12),BASED(cleanup_table_system_call)
761 jl 0f
762 clc 8(8,%r12),BASED(cleanup_table_system_call+8)
763 jl cleanup_system_call
7640:
765 clc 8(8,%r12),BASED(cleanup_table_sysc_return)
766 jl 0f
767 clc 8(8,%r12),BASED(cleanup_table_sysc_return+8)
768 jl cleanup_sysc_return
7690:
770 clc 8(8,%r12),BASED(cleanup_table_sysc_leave)
771 jl 0f
772 clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8)
773 jl cleanup_sysc_leave
7740:
775 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop)
776 jl 0f
777 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
778 jl cleanup_sysc_leave
7790:
780 br %r14
781
782cleanup_system_call:
783 mvc __LC_RETURN_PSW(16),0(%r12)
784#ifdef CONFIG_VIRT_CPU_ACCOUNTING
785 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
786 jh 0f
787 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
7880: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
789 jhe cleanup_vtime
790#endif
791 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
792 jh 0f
793 mvc __LC_SAVE_AREA(32),__LC_SAVE_AREA+32
7940: stg %r13,__LC_SAVE_AREA+40
795 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
796 stg %r15,__LC_SAVE_AREA+56
797 llgh %r7,__LC_SVC_INT_CODE
798#ifdef CONFIG_VIRT_CPU_ACCOUNTING
799cleanup_vtime:
800 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
801 jhe cleanup_stime
802 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
803 jz cleanup_novtime
804 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
805cleanup_stime:
806 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
807 jh cleanup_update
808 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
809cleanup_update:
810 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
811cleanup_novtime:
812#endif
813 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
814 la %r12,__LC_RETURN_PSW
815 br %r14
816cleanup_system_call_insn:
817 .quad sysc_saveall
818#ifdef CONFIG_VIRT_CPU_ACCOUNTING
819 .quad system_call
820 .quad sysc_vtime
821 .quad sysc_stime
822 .quad sysc_update
823#endif
824
825cleanup_sysc_return:
826 mvc __LC_RETURN_PSW(8),0(%r12)
827 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return)
828 la %r12,__LC_RETURN_PSW
829 br %r14
830
831cleanup_sysc_leave:
832 clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
833 je 0f
834#ifdef CONFIG_VIRT_CPU_ACCOUNTING
835 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
836 clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
837 je 0f
838#endif
839 mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
840 mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
841 lmg %r0,%r11,SP_R0(%r15)
842 lg %r15,SP_R15(%r15)
8430: la %r12,__LC_RETURN_PSW
844 br %r14
845cleanup_sysc_leave_insn:
846#ifdef CONFIG_VIRT_CPU_ACCOUNTING
847 .quad sysc_leave + 16
848#endif
849 .quad sysc_leave + 12
850
851/*
852 * Integer constants
853 */
854 .align 4
855.Lconst:
856.Lc_pactive: .long PREEMPT_ACTIVE
857.Lnr_syscalls: .long NR_syscalls
858.L0x0130: .short 0x130
859.L0x0140: .short 0x140
860.L0x0150: .short 0x150
861.L0x0160: .short 0x160
862.L0x0170: .short 0x170
863.Lcritical_start:
864 .quad __critical_start
865.Lcritical_end:
866 .quad __critical_end
867
868#define SYSCALL(esa,esame,emu) .long esame
869 .globl sys_call_table
870sys_call_table:
871#include "syscalls.S"
872#undef SYSCALL
873
874#ifdef CONFIG_S390_SUPPORT
875
876#define SYSCALL(esa,esame,emu) .long emu
877 .globl sys_call_table_emu
878sys_call_table_emu:
879#include "syscalls.S"
880#undef SYSCALL
881#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
new file mode 100644
index 000000000000..b804c55bd919
--- /dev/null
+++ b/arch/s390/kernel/head.S
@@ -0,0 +1,772 @@
1/*
2 * arch/s390/kernel/head.S
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Rob van der Heij (rvdhei@iae.nl)
9 *
10 * There are 5 different IPL methods
11 * 1) load the image directly into ram at address 0 and do an PSW restart
12 * 2) linload will load the image from address 0x10000 to memory 0x10000
13 * and start the code thru LPSW 0x0008000080010000 (VM only, deprecated)
14 * 3) generate the tape ipl header, store the generated image on a tape
15 * and ipl from it
16 * In case of SL tape you need to IPL 5 times to get past VOL1 etc
17 * 4) generate the vm reader ipl header, move the generated image to the
18 * VM reader (use option NOH!) and do a ipl from reader (VM only)
19 * 5) direct call of start by the SALIPL loader
20 * We use the cpuid to distinguish between VM and native ipl
21 * params for kernel are pushed to 0x10400 (see setup.h)
22
23 Changes:
24 Okt 25 2000 <rvdheij@iae.nl>
25 added code to skip HDR and EOF to allow SL tape IPL (5 retries)
26 changed first CCW from rewind to backspace block
27
28 */
29
30#include <linux/config.h>
31#include <asm/setup.h>
32#include <asm/lowcore.h>
33#include <asm/offsets.h>
34#include <asm/thread_info.h>
35#include <asm/page.h>
36
37#ifndef CONFIG_IPL
38 .org 0
39 .long 0x00080000,0x80000000+startup # Just a restart PSW
40#else
41#ifdef CONFIG_IPL_TAPE
42#define IPL_BS 1024
43 .org 0
44 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
45 .long 0x27000000,0x60000001 # by ipl to addresses 0-23.
46 .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs).
47 .long 0x00000000,0x00000000 # external old psw
48 .long 0x00000000,0x00000000 # svc old psw
49 .long 0x00000000,0x00000000 # program check old psw
50 .long 0x00000000,0x00000000 # machine check old psw
51 .long 0x00000000,0x00000000 # io old psw
52 .long 0x00000000,0x00000000
53 .long 0x00000000,0x00000000
54 .long 0x00000000,0x00000000
55 .long 0x000a0000,0x00000058 # external new psw
56 .long 0x000a0000,0x00000060 # svc new psw
57 .long 0x000a0000,0x00000068 # program check new psw
58 .long 0x000a0000,0x00000070 # machine check new psw
59 .long 0x00080000,0x80000000+.Lioint # io new psw
60
61 .org 0x100
62#
63# subroutine for loading from tape
64# Paramters:
65# R1 = device number
66# R2 = load address
67.Lloader:
68 st %r14,.Lldret
69 la %r3,.Lorbread # r3 = address of orb
70 la %r5,.Lirb # r5 = address of irb
71 st %r2,.Lccwread+4 # initialize CCW data addresses
72 lctl %c6,%c6,.Lcr6
73 slr %r2,%r2
74.Lldlp:
75 la %r6,3 # 3 retries
76.Lssch:
77 ssch 0(%r3) # load chunk of IPL_BS bytes
78 bnz .Llderr
79.Lw4end:
80 bas %r14,.Lwait4io
81 tm 8(%r5),0x82 # do we have a problem ?
82 bnz .Lrecov
83 slr %r7,%r7
84 icm %r7,3,10(%r5) # get residual count
85 lcr %r7,%r7
86 la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read
87 ar %r2,%r7 # add to total size
88 tm 8(%r5),0x01 # found a tape mark ?
89 bnz .Ldone
90 l %r0,.Lccwread+4 # update CCW data addresses
91 ar %r0,%r7
92 st %r0,.Lccwread+4
93 b .Lldlp
94.Ldone:
95 l %r14,.Lldret
96 br %r14 # r2 contains the total size
97.Lrecov:
98 bas %r14,.Lsense # do the sensing
99 bct %r6,.Lssch # dec. retry count & branch
100 b .Llderr
101#
102# Sense subroutine
103#
104.Lsense:
105 st %r14,.Lsnsret
106 la %r7,.Lorbsense
107 ssch 0(%r7) # start sense command
108 bnz .Llderr
109 bas %r14,.Lwait4io
110 l %r14,.Lsnsret
111 tm 8(%r5),0x82 # do we have a problem ?
112 bnz .Llderr
113 br %r14
114#
115# Wait for interrupt subroutine
116#
117.Lwait4io:
118 lpsw .Lwaitpsw
119.Lioint:
120 c %r1,0xb8 # compare subchannel number
121 bne .Lwait4io
122 tsch 0(%r5)
123 slr %r0,%r0
124 tm 8(%r5),0x82 # do we have a problem ?
125 bnz .Lwtexit
126 tm 8(%r5),0x04 # got device end ?
127 bz .Lwait4io
128.Lwtexit:
129 br %r14
130.Llderr:
131 lpsw .Lcrash
132
133 .align 8
134.Lorbread:
135 .long 0x00000000,0x0080ff00,.Lccwread
136 .align 8
137.Lorbsense:
138 .long 0x00000000,0x0080ff00,.Lccwsense
139 .align 8
140.Lccwread:
141 .long 0x02200000+IPL_BS,0x00000000
142.Lccwsense:
143 .long 0x04200001,0x00000000
144.Lwaitpsw:
145 .long 0x020a0000,0x80000000+.Lioint
146
147.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
148.Lcr6: .long 0xff000000
149 .align 8
150.Lcrash:.long 0x000a0000,0x00000000
151.Lldret:.long 0
152.Lsnsret: .long 0
153#endif /* CONFIG_IPL_TAPE */
154
155#ifdef CONFIG_IPL_VM
156#define IPL_BS 0x730
157 .org 0
158 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
159 .long 0x02000018,0x60000050 # by ipl to addresses 0-23.
160 .long 0x02000068,0x60000050 # (a PSW and two CCWs).
161 .fill 80-24,1,0x40 # bytes 24-79 are discarded !!
162 .long 0x020000f0,0x60000050 # The next 160 byte are loaded
163 .long 0x02000140,0x60000050 # to addresses 0x18-0xb7
164 .long 0x02000190,0x60000050 # They form the continuation
165 .long 0x020001e0,0x60000050 # of the CCW program started
166 .long 0x02000230,0x60000050 # by ipl and load the range
167 .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image
168 .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730
169 .long 0x02000320,0x60000050 # in memory. At the end of
170 .long 0x02000370,0x60000050 # the channel program the PSW
171 .long 0x020003c0,0x60000050 # at location 0 is loaded.
172 .long 0x02000410,0x60000050 # Initial processing starts
173 .long 0x02000460,0x60000050 # at 0xf0 = iplstart.
174 .long 0x020004b0,0x60000050
175 .long 0x02000500,0x60000050
176 .long 0x02000550,0x60000050
177 .long 0x020005a0,0x60000050
178 .long 0x020005f0,0x60000050
179 .long 0x02000640,0x60000050
180 .long 0x02000690,0x60000050
181 .long 0x020006e0,0x20000050
182
183 .org 0xf0
184#
185# subroutine for loading cards from the reader
186#
187.Lloader:
188 la %r3,.Lorb # r2 = address of orb into r2
189 la %r5,.Lirb # r4 = address of irb
190 la %r6,.Lccws
191 la %r7,20
192.Linit:
193 st %r2,4(%r6) # initialize CCW data addresses
194 la %r2,0x50(%r2)
195 la %r6,8(%r6)
196 bct 7,.Linit
197
198 lctl %c6,%c6,.Lcr6 # set IO subclass mask
199 slr %r2,%r2
200.Lldlp:
201 ssch 0(%r3) # load chunk of 1600 bytes
202 bnz .Llderr
203.Lwait4irq:
204 mvc __LC_IO_NEW_PSW(8),.Lnewpsw # set up IO interrupt psw
205 lpsw .Lwaitpsw
206.Lioint:
207 c %r1,0xb8 # compare subchannel number
208 bne .Lwait4irq
209 tsch 0(%r5)
210
211 slr %r0,%r0
212 ic %r0,8(%r5) # get device status
213 chi %r0,8 # channel end ?
214 be .Lcont
215 chi %r0,12 # channel end + device end ?
216 be .Lcont
217
218 l %r0,4(%r5)
219 s %r0,8(%r3) # r0/8 = number of ccws executed
220 mhi %r0,10 # *10 = number of bytes in ccws
221 lh %r3,10(%r5) # get residual count
222 sr %r0,%r3 # #ccws*80-residual=#bytes read
223 ar %r2,%r0
224
225 br %r14 # r2 contains the total size
226
227.Lcont:
228 ahi %r2,0x640 # add 0x640 to total size
229 la %r6,.Lccws
230 la %r7,20
231.Lincr:
232 l %r0,4(%r6) # update CCW data addresses
233 ahi %r0,0x640
234 st %r0,4(%r6)
235 ahi %r6,8
236 bct 7,.Lincr
237
238 b .Lldlp
239.Llderr:
240 lpsw .Lcrash
241
242 .align 8
243.Lorb: .long 0x00000000,0x0080ff00,.Lccws
244.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
245.Lcr6: .long 0xff000000
246.Lloadp:.long 0,0
247 .align 8
248.Lcrash:.long 0x000a0000,0x00000000
249.Lnewpsw:
250 .long 0x00080000,0x80000000+.Lioint
251.Lwaitpsw:
252 .long 0x020a0000,0x80000000+.Lioint
253
254 .align 8
255.Lccws: .rept 19
256 .long 0x02600050,0x00000000
257 .endr
258 .long 0x02200050,0x00000000
259#endif /* CONFIG_IPL_VM */
260
261iplstart:
262 lh %r1,0xb8 # test if subchannel number
263 bct %r1,.Lnoload # is valid
264 l %r1,0xb8 # load ipl subchannel number
265 la %r2,IPL_BS # load start address
266 bas %r14,.Lloader # load rest of ipl image
267 l %r12,.Lparm # pointer to parameter area
268 st %r1,IPL_DEVICE-PARMAREA(%r12) # store ipl device number
269
270#
271# load parameter file from ipl device
272#
273.Lagain1:
274 l %r2,INITRD_START-PARMAREA(%r12) # use ramdisk location as temp
275 bas %r14,.Lloader # load parameter file
276 ltr %r2,%r2 # got anything ?
277 bz .Lnopf
278 chi %r2,895
279 bnh .Lnotrunc
280 la %r2,895
281.Lnotrunc:
282 l %r4,INITRD_START-PARMAREA(%r12)
283 clc 0(3,%r4),.L_hdr # if it is HDRx
284 bz .Lagain1 # skip dataset header
285 clc 0(3,%r4),.L_eof # if it is EOFx
286 bz .Lagain1 # skip dateset trailer
287 la %r5,0(%r4,%r2)
288 lr %r3,%r2
289.Lidebc:
290 tm 0(%r5),0x80 # high order bit set ?
291 bo .Ldocv # yes -> convert from EBCDIC
292 ahi %r5,-1
293 bct %r3,.Lidebc
294 b .Lnocv
295.Ldocv:
296 l %r3,.Lcvtab
297 tr 0(256,%r4),0(%r3) # convert parameters to ascii
298 tr 256(256,%r4),0(%r3)
299 tr 512(256,%r4),0(%r3)
300 tr 768(122,%r4),0(%r3)
301.Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
302 mvc 0(256,%r3),0(%r4)
303 mvc 256(256,%r3),256(%r4)
304 mvc 512(256,%r3),512(%r4)
305 mvc 768(122,%r3),768(%r4)
306 slr %r0,%r0
307 b .Lcntlp
308.Ldelspc:
309 ic %r0,0(%r2,%r3)
310 chi %r0,0x20 # is it a space ?
311 be .Lcntlp
312 ahi %r2,1
313 b .Leolp
314.Lcntlp:
315 brct %r2,.Ldelspc
316.Leolp:
317 slr %r0,%r0
318 stc %r0,0(%r2,%r3) # terminate buffer
319.Lnopf:
320
321#
322# load ramdisk from ipl device
323#
324.Lagain2:
325 l %r2,INITRD_START-PARMAREA(%r12) # load adr. of ramdisk
326 bas %r14,.Lloader # load ramdisk
327 st %r2,INITRD_SIZE-PARMAREA(%r12) # store size of ramdisk
328 ltr %r2,%r2
329 bnz .Lrdcont
330 st %r2,INITRD_START-PARMAREA(%r12) # no ramdisk found, null it
331.Lrdcont:
332 l %r2,INITRD_START-PARMAREA(%r12)
333
334 clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
335 bz .Lagain2
336 clc 0(3,%r2),.L_eof
337 bz .Lagain2
338
339#ifdef CONFIG_IPL_VM
340#
341# reset files in VM reader
342#
343 stidp __LC_CPUID # store cpuid
344 tm __LC_CPUID,0xff # running VM ?
345 bno .Lnoreset
346 la %r2,.Lreset
347 lhi %r3,26
348 .long 0x83230008
349.Lnoreset:
350#endif
351
352#
353# everything loaded, go for it
354#
355.Lnoload:
356 l %r1,.Lstartup
357 br %r1
358
359.Lparm: .long PARMAREA
360.Lstartup: .long startup
361.Lcvtab:.long _ebcasc # ebcdic to ascii table
362.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
363 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
364 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
365.L_eof: .long 0xc5d6c600 /* C'EOF' */
366.L_hdr: .long 0xc8c4d900 /* C'HDR' */
367
368#endif /* CONFIG_IPL */
369
370#
371# SALIPL loader support. Based on a patch by Rob van der Heij.
372# This entry point is called directly from the SALIPL loader and
373# doesn't need a builtin ipl record.
374#
375 .org 0x800
376 .globl start
377start:
378 stm %r0,%r15,0x07b0 # store registers
379 basr %r12,%r0
380.base:
381 l %r11,.parm
382 l %r8,.cmd # pointer to command buffer
383
384 ltr %r9,%r9 # do we have SALIPL parameters?
385 bp .sk8x8
386
387 mvc 0(64,%r8),0x00b0 # copy saved registers
388 xc 64(240-64,%r8),0(%r8) # remainder of buffer
389 tr 0(64,%r8),.lowcase
390 b .gotr
391.sk8x8:
392 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
393.gotr:
394 l %r10,.tbl # EBCDIC to ASCII table
395 tr 0(240,%r8),0(%r10)
396 stidp __LC_CPUID # Are we running on VM maybe
397 cli __LC_CPUID,0xff
398 bnz .test
399 .long 0x83300060 # diag 3,0,x'0060' - storage size
400 b .done
401.test:
402 mvc 0x68(8),.pgmnw # set up pgm check handler
403 l %r2,.fourmeg
404 lr %r3,%r2
405 bctr %r3,%r0 # 4M-1
406.loop: iske %r0,%r3
407 ar %r3,%r2
408.pgmx:
409 sr %r3,%r2
410 la %r3,1(%r3)
411.done:
412 l %r1,.memsize
413 st %r3,0(%r1)
414 slr %r0,%r0
415 st %r0,INITRD_SIZE-PARMAREA(%r11)
416 st %r0,INITRD_START-PARMAREA(%r11)
417 j startup # continue with startup
418.tbl: .long _ebcasc # translate table
419.cmd: .long COMMAND_LINE # address of command line buffer
420.parm: .long PARMAREA
421.memsize: .long memory_size
422.fourmeg: .long 0x00400000 # 4M
423.pgmnw: .long 0x00080000,.pgmx
424.lowcase:
425 .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
426 .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
427 .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
428 .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
429 .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
430 .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
431 .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
432 .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
433 .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
434 .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
435 .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
436 .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
437 .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
438 .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
439 .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
440 .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
441
442 .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
443 .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
444 .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
445 .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
446 .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
447 .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
448 .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
449 .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
450 .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
451 .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi
452 .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
453 .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr
454 .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx
455 .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz
456 .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
457 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
458
459#
460# startup-code at 0x10000, running in real mode
461# this is called either by the ipl loader or directly by PSW restart
462# or linload or SALIPL
463#
464 .org 0x10000
465startup:basr %r13,0 # get base
466.LPG1: lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
467 la %r12,_pstart-.LPG1(%r13) # pointer to parameter area
468 # move IPL device to lowcore
469 mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
470
471#
472# clear bss memory
473#
474 l %r2,.Lbss_bgn-.LPG1(%r13) # start of bss
475 l %r3,.Lbss_end-.LPG1(%r13) # end of bss
476 sr %r3,%r2 # length of bss
477 sr %r4,%r4 #
478 sr %r5,%r5 # set src,length and pad to zero
479 sr %r0,%r0 #
480 mvcle %r2,%r4,0 # clear mem
481 jo .-4 # branch back, if not finish
482
483 l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
484.Lservicecall:
485 stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
486
487 stctl %r0, %r0,.Lcr-.LPG1(%r13) # get cr0
488 la %r1,0x200 # set bit 22
489 o %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
490 st %r1,.Lcr-.LPG1(%r13)
491 lctl %r0, %r0,.Lcr-.LPG1(%r13) # load modified cr0
492
493 mvc __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw
494 la %r1, .Lsclph-.LPG1(%r13)
495 a %r1,__LC_EXT_NEW_PSW+4 # set handler
496 st %r1,__LC_EXT_NEW_PSW+4
497
498 la %r4,_pstart-.LPG1(%r13) # %r4 is our index for sccb stuff
499 la %r1, .Lsccb-PARMAREA(%r4) # our sccb
500 .insn rre,0xb2200000,%r2,%r1 # service call
501 ipm %r1
502 srl %r1,28 # get cc code
503 xr %r3, %r3
504 chi %r1,3
505 be .Lfchunk-.LPG1(%r13) # leave
506 chi %r1,2
507 be .Lservicecall-.LPG1(%r13)
508 lpsw .Lwaitsclp-.LPG1(%r13)
509.Lsclph:
510 lh %r1,.Lsccbr-PARMAREA(%r4)
511 chi %r1,0x10 # 0x0010 is the sucess code
512 je .Lprocsccb # let's process the sccb
513 chi %r1,0x1f0
514 bne .Lfchunk-.LPG1(%r13) # unhandled error code
515 c %r2, .Lrcp-.LPG1(%r13) # Did we try Read SCP forced
516 bne .Lfchunk-.LPG1(%r13) # if no, give up
517 l %r2, .Lrcp2-.LPG1(%r13) # try with Read SCP
518 b .Lservicecall-.LPG1(%r13)
519.Lprocsccb:
520 lh %r1,.Lscpincr1-PARMAREA(%r4) # use this one if != 0
521 chi %r1,0x00
522 jne .Lscnd
523 l %r1,.Lscpincr2-PARMAREA(%r4) # otherwise use this one
524.Lscnd:
525 xr %r3,%r3 # same logic
526 ic %r3,.Lscpa1-PARMAREA(%r4)
527 chi %r3,0x00
528 jne .Lcompmem
529 l %r3,.Lscpa2-PARMAREA(%r13)
530.Lcompmem:
531 mr %r2,%r1 # mem in MB on 128-bit
532 l %r1,.Lonemb-.LPG1(%r13)
533 mr %r2,%r1 # mem size in bytes in %r3
534 b .Lfchunk-.LPG1(%r13)
535
536.Lpmask:
537 .byte 0
538.align 8
539.Lpcext:.long 0x00080000,0x80000000
540.Lcr:
541 .long 0x00 # place holder for cr0
542.Lwaitsclp:
543 .long 0x020A0000
544 .long .Lsclph
545.Lrcp:
546 .int 0x00120001 # Read SCP forced code
547.Lrcp2:
548 .int 0x00020001 # Read SCP code
549.Lonemb:
550 .int 0x100000
551.Lfchunk:
552
553#
554# find memory chunks.
555#
556 lr %r9,%r3 # end of mem
557 mvc __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13)
558 la %r1,1 # test in increments of 128KB
559 sll %r1,17
560 l %r3,.Lmchunk-.LPG1(%r13) # get pointer to memory_chunk array
561 slr %r4,%r4 # set start of chunk to zero
562 slr %r5,%r5 # set end of chunk to zero
563 slr %r6,%r6 # set access code to zero
564 la %r10, MEMORY_CHUNKS # number of chunks
565.Lloop:
566 tprot 0(%r5),0 # test protection of first byte
567 ipm %r7
568 srl %r7,28
569 clr %r6,%r7 # compare cc with last access code
570 be .Lsame-.LPG1(%r13)
571 b .Lchkmem-.LPG1(%r13)
572.Lsame:
573 ar %r5,%r1 # add 128KB to end of chunk
574 bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop
575.Lchkmem: # > 2GB or tprot got a program check
576 clr %r4,%r5 # chunk size > 0?
577 be .Lchkloop-.LPG1(%r13)
578 st %r4,0(%r3) # store start address of chunk
579 lr %r0,%r5
580 slr %r0,%r4
581 st %r0,4(%r3) # store size of chunk
582 st %r6,8(%r3) # store type of chunk
583 la %r3,12(%r3)
584 l %r4,.Lmemsize-.LPG1(%r13) # address of variable memory_size
585 st %r5,0(%r4) # store last end to memory size
586 ahi %r10,-1 # update chunk number
587.Lchkloop:
588 lr %r6,%r7 # set access code to last cc
589 # we got an exception or we're starting a new
590 # chunk , we must check if we should
591 # still try to find valid memory (if we detected
592 # the amount of available storage), and if we
593 # have chunks left
594 xr %r0,%r0
595 clr %r0,%r9 # did we detect memory?
596 je .Ldonemem # if not, leave
597 chi %r10,0 # do we have chunks left?
598 je .Ldonemem
599 alr %r5,%r1 # add 128KB to end of chunk
600 lr %r4,%r5 # potential new chunk
601 clr %r5,%r9 # should we go on?
602 jl .Lloop
603.Ldonemem:
604 l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags
605#
606# find out if we are running under VM
607#
608 stidp __LC_CPUID # store cpuid
609 tm __LC_CPUID,0xff # running under VM ?
610 bno .Lnovm-.LPG1(%r13)
611 oi 3(%r12),1 # set VM flag
612.Lnovm:
613 lh %r0,__LC_CPUID+4 # get cpu version
614 chi %r0,0x7490 # running on a P/390 ?
615 bne .Lnop390-.LPG1(%r13)
616 oi 3(%r12),4 # set P/390 flag
617.Lnop390:
618
619#
620# find out if we have an IEEE fpu
621#
622 mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13)
623 efpc %r0,0 # test IEEE extract fpc instruction
624 oi 3(%r12),2 # set IEEE fpu flag
625.Lchkfpu:
626
627#
628# find out if we have the CSP instruction
629#
630 mvc __LC_PGM_NEW_PSW(8),.Lpccsp-.LPG1(%r13)
631 la %r0,0
632 lr %r1,%r0
633 la %r2,4
634 csp %r0,%r2 # Test CSP instruction
635 oi 3(%r12),8 # set CSP flag
636.Lchkcsp:
637
638#
639# find out if we have the MVPG instruction
640#
641 mvc __LC_PGM_NEW_PSW(8),.Lpcmvpg-.LPG1(%r13)
642 sr %r0,%r0
643 la %r1,0
644 la %r2,0
645 mvpg %r1,%r2 # Test CSP instruction
646 oi 3(%r12),16 # set MVPG flag
647.Lchkmvpg:
648
649#
650# find out if we have the IDTE instruction
651#
652 mvc __LC_PGM_NEW_PSW(8),.Lpcidte-.LPG1(%r13)
653 .long 0xb2b10000 # store facility list
654 tm 0xc8,0x08 # check bit for clearing-by-ASCE
655 bno .Lchkidte-.LPG1(%r13)
656 lhi %r1,2094
657 lhi %r2,0
658 .long 0xb98e2001
659 oi 3(%r12),0x80 # set IDTE flag
660.Lchkidte:
661
662 lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
663 # virtual and never return ...
664 .align 8
665.Lentry:.long 0x00080000,0x80000000 + _stext
666.Lctl: .long 0x04b50002 # cr0: various things
667 .long 0 # cr1: primary space segment table
668 .long .Lduct # cr2: dispatchable unit control table
669 .long 0 # cr3: instruction authorization
670 .long 0 # cr4: instruction authorization
671 .long 0xffffffff # cr5: primary-aste origin
672 .long 0 # cr6: I/O interrupts
673 .long 0 # cr7: secondary space segment table
674 .long 0 # cr8: access registers translation
675 .long 0 # cr9: tracing off
676 .long 0 # cr10: tracing off
677 .long 0 # cr11: tracing off
678 .long 0 # cr12: tracing off
679 .long 0 # cr13: home space segment table
680 .long 0xc0000000 # cr14: machine check handling off
681 .long 0 # cr15: linkage stack operations
682.Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem
683.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
684.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
685.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
686.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
687.Lmemsize:.long memory_size
688.Lmchunk:.long memory_chunk
689.Lmflags:.long machine_flags
690.Lbss_bgn: .long __bss_start
691.Lbss_end: .long _end
692
693 .org PARMAREA-64
694.Lduct: .long 0,0,0,0,0,0,0,0
695 .long 0,0,0,0,0,0,0,0
696
697#
698# params at 10400 (setup.h)
699#
700 .org PARMAREA
701 .global _pstart
702_pstart:
703 .long 0,0 # IPL_DEVICE
704 .long 0,RAMDISK_ORIGIN # INITRD_START
705 .long 0,RAMDISK_SIZE # INITRD_SIZE
706
707 .org COMMAND_LINE
708 .byte "root=/dev/ram0 ro"
709 .byte 0
710 .org 0x11000
711.Lsccb:
712 .hword 0x1000 # length, one page
713 .byte 0x00,0x00,0x00
714 .byte 0x80 # variable response bit set
715.Lsccbr:
716 .hword 0x00 # response code
717.Lscpincr1:
718 .hword 0x00
719.Lscpa1:
720 .byte 0x00
721 .fill 89,1,0
722.Lscpa2:
723 .int 0x00
724.Lscpincr2:
725 .quad 0x00
726 .fill 3984,1,0
727 .org 0x12000
728 .global _pend
729_pend:
730
731#ifdef CONFIG_SHARED_KERNEL
732 .org 0x100000
733#endif
734
735#
736# startup-code, running in virtual mode
737#
738 .globl _stext
739_stext: basr %r13,0 # get base
740.LPG2:
741#
742# Setup stack
743#
744 l %r15,.Linittu-.LPG2(%r13)
745 mvc __LC_CURRENT(4),__TI_task(%r15)
746 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
747 st %r15,__LC_KERNEL_STACK # set end of kernel stack
748 ahi %r15,-96
749 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
750
751# check control registers
752 stctl %c0,%c15,0(%r15)
753 oi 2(%r15),0x20 # enable sigp external interrupts
754 oi 0(%r15),0x10 # switch on low address protection
755 lctl %c0,%c15,0(%r15)
756
757#
758 lam 0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess
759 l %r14,.Lstart-.LPG2(%r13)
760 basr %r14,%r14 # call start_kernel
761#
762# We returned from start_kernel ?!? PANIK
763#
764 basr %r13,0
765 lpsw .Ldw-.(%r13) # load disabled wait psw
766#
767 .align 8
768.Ldw: .long 0x000a0000,0x00000000
769.Linittu: .long init_thread_union
770.Lstart: .long start_kernel
771.Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
772
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
new file mode 100644
index 000000000000..8366793bc371
--- /dev/null
+++ b/arch/s390/kernel/head64.S
@@ -0,0 +1,769 @@
1/*
2 * arch/s390/kernel/head.S
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Rob van der Heij (rvdhei@iae.nl)
9 *
10 * There are 5 different IPL methods
11 * 1) load the image directly into ram at address 0 and do an PSW restart
12 * 2) linload will load the image from address 0x10000 to memory 0x10000
13 * and start the code thru LPSW 0x0008000080010000 (VM only, deprecated)
14 * 3) generate the tape ipl header, store the generated image on a tape
15 * and ipl from it
16 * In case of SL tape you need to IPL 5 times to get past VOL1 etc
17 * 4) generate the vm reader ipl header, move the generated image to the
18 * VM reader (use option NOH!) and do a ipl from reader (VM only)
19 * 5) direct call of start by the SALIPL loader
20 * We use the cpuid to distinguish between VM and native ipl
21 * params for kernel are pushed to 0x10400 (see setup.h)
22
23 Changes:
24 Okt 25 2000 <rvdheij@iae.nl>
25 added code to skip HDR and EOF to allow SL tape IPL (5 retries)
26 changed first CCW from rewind to backspace block
27
28 */
29
30#include <linux/config.h>
31#include <asm/setup.h>
32#include <asm/lowcore.h>
33#include <asm/offsets.h>
34#include <asm/thread_info.h>
35#include <asm/page.h>
36
37#ifndef CONFIG_IPL
38 .org 0
39 .long 0x00080000,0x80000000+startup # Just a restart PSW
40#else
41#ifdef CONFIG_IPL_TAPE
42#define IPL_BS 1024
43 .org 0
44 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
45 .long 0x27000000,0x60000001 # by ipl to addresses 0-23.
46 .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs).
47 .long 0x00000000,0x00000000 # external old psw
48 .long 0x00000000,0x00000000 # svc old psw
49 .long 0x00000000,0x00000000 # program check old psw
50 .long 0x00000000,0x00000000 # machine check old psw
51 .long 0x00000000,0x00000000 # io old psw
52 .long 0x00000000,0x00000000
53 .long 0x00000000,0x00000000
54 .long 0x00000000,0x00000000
55 .long 0x000a0000,0x00000058 # external new psw
56 .long 0x000a0000,0x00000060 # svc new psw
57 .long 0x000a0000,0x00000068 # program check new psw
58 .long 0x000a0000,0x00000070 # machine check new psw
59 .long 0x00080000,0x80000000+.Lioint # io new psw
60
61 .org 0x100
62#
63# subroutine for loading from tape
64# Paramters:
65# R1 = device number
66# R2 = load address
67.Lloader:
68 st %r14,.Lldret
69 la %r3,.Lorbread # r3 = address of orb
70 la %r5,.Lirb # r5 = address of irb
71 st %r2,.Lccwread+4 # initialize CCW data addresses
72 lctl %c6,%c6,.Lcr6
73 slr %r2,%r2
74.Lldlp:
75 la %r6,3 # 3 retries
76.Lssch:
77 ssch 0(%r3) # load chunk of IPL_BS bytes
78 bnz .Llderr
79.Lw4end:
80 bas %r14,.Lwait4io
81 tm 8(%r5),0x82 # do we have a problem ?
82 bnz .Lrecov
83 slr %r7,%r7
84 icm %r7,3,10(%r5) # get residual count
85 lcr %r7,%r7
86 la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read
87 ar %r2,%r7 # add to total size
88 tm 8(%r5),0x01 # found a tape mark ?
89 bnz .Ldone
90 l %r0,.Lccwread+4 # update CCW data addresses
91 ar %r0,%r7
92 st %r0,.Lccwread+4
93 b .Lldlp
94.Ldone:
95 l %r14,.Lldret
96 br %r14 # r2 contains the total size
97.Lrecov:
98 bas %r14,.Lsense # do the sensing
99 bct %r6,.Lssch # dec. retry count & branch
100 b .Llderr
101#
102# Sense subroutine
103#
104.Lsense:
105 st %r14,.Lsnsret
106 la %r7,.Lorbsense
107 ssch 0(%r7) # start sense command
108 bnz .Llderr
109 bas %r14,.Lwait4io
110 l %r14,.Lsnsret
111 tm 8(%r5),0x82 # do we have a problem ?
112 bnz .Llderr
113 br %r14
114#
115# Wait for interrupt subroutine
116#
117.Lwait4io:
118 lpsw .Lwaitpsw
119.Lioint:
120 c %r1,0xb8 # compare subchannel number
121 bne .Lwait4io
122 tsch 0(%r5)
123 slr %r0,%r0
124 tm 8(%r5),0x82 # do we have a problem ?
125 bnz .Lwtexit
126 tm 8(%r5),0x04 # got device end ?
127 bz .Lwait4io
128.Lwtexit:
129 br %r14
130.Llderr:
131 lpsw .Lcrash
132
133 .align 8
134.Lorbread:
135 .long 0x00000000,0x0080ff00,.Lccwread
136 .align 8
137.Lorbsense:
138 .long 0x00000000,0x0080ff00,.Lccwsense
139 .align 8
140.Lccwread:
141 .long 0x02200000+IPL_BS,0x00000000
142.Lccwsense:
143 .long 0x04200001,0x00000000
144.Lwaitpsw:
145 .long 0x020a0000,0x80000000+.Lioint
146
147.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
148.Lcr6: .long 0xff000000
149 .align 8
150.Lcrash:.long 0x000a0000,0x00000000
151.Lldret:.long 0
152.Lsnsret: .long 0
153#endif /* CONFIG_IPL_TAPE */
154
155#ifdef CONFIG_IPL_VM
156#define IPL_BS 0x730
157 .org 0
158 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
159 .long 0x02000018,0x60000050 # by ipl to addresses 0-23.
160 .long 0x02000068,0x60000050 # (a PSW and two CCWs).
161 .fill 80-24,1,0x40 # bytes 24-79 are discarded !!
162 .long 0x020000f0,0x60000050 # The next 160 byte are loaded
163 .long 0x02000140,0x60000050 # to addresses 0x18-0xb7
164 .long 0x02000190,0x60000050 # They form the continuation
165 .long 0x020001e0,0x60000050 # of the CCW program started
166 .long 0x02000230,0x60000050 # by ipl and load the range
167 .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image
168 .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730
169 .long 0x02000320,0x60000050 # in memory. At the end of
170 .long 0x02000370,0x60000050 # the channel program the PSW
171 .long 0x020003c0,0x60000050 # at location 0 is loaded.
172 .long 0x02000410,0x60000050 # Initial processing starts
173 .long 0x02000460,0x60000050 # at 0xf0 = iplstart.
174 .long 0x020004b0,0x60000050
175 .long 0x02000500,0x60000050
176 .long 0x02000550,0x60000050
177 .long 0x020005a0,0x60000050
178 .long 0x020005f0,0x60000050
179 .long 0x02000640,0x60000050
180 .long 0x02000690,0x60000050
181 .long 0x020006e0,0x20000050
182
183 .org 0xf0
184#
185# subroutine for loading cards from the reader
186#
187.Lloader:
188 la %r3,.Lorb # r2 = address of orb into r2
189 la %r5,.Lirb # r4 = address of irb
190 la %r6,.Lccws
191 la %r7,20
192.Linit:
193 st %r2,4(%r6) # initialize CCW data addresses
194 la %r2,0x50(%r2)
195 la %r6,8(%r6)
196 bct 7,.Linit
197
198 lctl %c6,%c6,.Lcr6 # set IO subclass mask
199 slr %r2,%r2
200.Lldlp:
201 ssch 0(%r3) # load chunk of 1600 bytes
202 bnz .Llderr
203.Lwait4irq:
204 mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
205 lpsw .Lwaitpsw
206.Lioint:
207 c %r1,0xb8 # compare subchannel number
208 bne .Lwait4irq
209 tsch 0(%r5)
210
211 slr %r0,%r0
212 ic %r0,8(%r5) # get device status
213 chi %r0,8 # channel end ?
214 be .Lcont
215 chi %r0,12 # channel end + device end ?
216 be .Lcont
217
218 l %r0,4(%r5)
219 s %r0,8(%r3) # r0/8 = number of ccws executed
220 mhi %r0,10 # *10 = number of bytes in ccws
221 lh %r3,10(%r5) # get residual count
222 sr %r0,%r3 # #ccws*80-residual=#bytes read
223 ar %r2,%r0
224
225 br %r14 # r2 contains the total size
226
227.Lcont:
228 ahi %r2,0x640 # add 0x640 to total size
229 la %r6,.Lccws
230 la %r7,20
231.Lincr:
232 l %r0,4(%r6) # update CCW data addresses
233 ahi %r0,0x640
234 st %r0,4(%r6)
235 ahi %r6,8
236 bct 7,.Lincr
237
238 b .Lldlp
239.Llderr:
240 lpsw .Lcrash
241
242 .align 8
243.Lorb: .long 0x00000000,0x0080ff00,.Lccws
244.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
245.Lcr6: .long 0xff000000
246.Lloadp:.long 0,0
247 .align 8
248.Lcrash:.long 0x000a0000,0x00000000
249.Lnewpsw:
250 .long 0x00080000,0x80000000+.Lioint
251.Lwaitpsw:
252 .long 0x020a0000,0x80000000+.Lioint
253
254 .align 8
255.Lccws: .rept 19
256 .long 0x02600050,0x00000000
257 .endr
258 .long 0x02200050,0x00000000
259#endif /* CONFIG_IPL_VM */
260
261iplstart:
262 lh %r1,0xb8 # test if subchannel number
263 bct %r1,.Lnoload # is valid
264 l %r1,0xb8 # load ipl subchannel number
265 la %r2,IPL_BS # load start address
266 bas %r14,.Lloader # load rest of ipl image
267 larl %r12,_pstart # pointer to parameter area
268 st %r1,IPL_DEVICE+4-PARMAREA(%r12) # store ipl device number
269
270#
271# load parameter file from ipl device
272#
273.Lagain1:
274 l %r2,INITRD_START+4-PARMAREA(%r12)# use ramdisk location as temp
275 bas %r14,.Lloader # load parameter file
276 ltr %r2,%r2 # got anything ?
277 bz .Lnopf
278 chi %r2,895
279 bnh .Lnotrunc
280 la %r2,895
281.Lnotrunc:
282 l %r4,INITRD_START+4-PARMAREA(%r12)
283 clc 0(3,%r4),.L_hdr # if it is HDRx
284 bz .Lagain1 # skip dataset header
285 clc 0(3,%r4),.L_eof # if it is EOFx
286 bz .Lagain1 # skip dateset trailer
287 la %r5,0(%r4,%r2)
288 lr %r3,%r2
289.Lidebc:
290 tm 0(%r5),0x80 # high order bit set ?
291 bo .Ldocv # yes -> convert from EBCDIC
292 ahi %r5,-1
293 bct %r3,.Lidebc
294 b .Lnocv
295.Ldocv:
296 l %r3,.Lcvtab
297 tr 0(256,%r4),0(%r3) # convert parameters to ascii
298 tr 256(256,%r4),0(%r3)
299 tr 512(256,%r4),0(%r3)
300 tr 768(122,%r4),0(%r3)
301.Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
302 mvc 0(256,%r3),0(%r4)
303 mvc 256(256,%r3),256(%r4)
304 mvc 512(256,%r3),512(%r4)
305 mvc 768(122,%r3),768(%r4)
306 slr %r0,%r0
307 b .Lcntlp
308.Ldelspc:
309 ic %r0,0(%r2,%r3)
310 chi %r0,0x20 # is it a space ?
311 be .Lcntlp
312 ahi %r2,1
313 b .Leolp
314.Lcntlp:
315 brct %r2,.Ldelspc
316.Leolp:
317 slr %r0,%r0
318 stc %r0,0(%r2,%r3) # terminate buffer
319.Lnopf:
320
321#
322# load ramdisk from ipl device
323#
324.Lagain2:
325 l %r2,INITRD_START+4-PARMAREA(%r12)# load adr. of ramdisk
326 bas %r14,.Lloader # load ramdisk
327 st %r2,INITRD_SIZE+4-PARMAREA(%r12) # store size of ramdisk
328 ltr %r2,%r2
329 bnz .Lrdcont
330 st %r2,INITRD_START+4-PARMAREA(%r12)# no ramdisk found, null it
331.Lrdcont:
332 l %r2,INITRD_START+4-PARMAREA(%r12)
333 clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
334 bz .Lagain2
335 clc 0(3,%r2),.L_eof
336 bz .Lagain2
337
338#ifdef CONFIG_IPL_VM
339#
340# reset files in VM reader
341#
342 stidp __LC_CPUID # store cpuid
343 tm __LC_CPUID,0xff # running VM ?
344 bno .Lnoreset
345 la %r2,.Lreset
346 lhi %r3,26
347 .long 0x83230008
348.Lnoreset:
349#endif
350
351#
352# everything loaded, go for it
353#
354.Lnoload:
355 l %r1,.Lstartup
356 br %r1
357
358.Lstartup: .long startup
359.Lcvtab:.long _ebcasc # ebcdic to ascii table
360.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
361 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
362 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
363.L_eof: .long 0xc5d6c600 /* C'EOF' */
364.L_hdr: .long 0xc8c4d900 /* C'HDR' */
365#endif /* CONFIG_IPL */
366
367#
368# SALIPL loader support. Based on a patch by Rob van der Heij.
369# This entry point is called directly from the SALIPL loader and
370# doesn't need a builtin ipl record.
371#
372 .org 0x800
373 .globl start
374start:
375 stm %r0,%r15,0x07b0 # store registers
376 basr %r12,%r0
377.base:
378 l %r11,.parm
379 l %r8,.cmd # pointer to command buffer
380
381 ltr %r9,%r9 # do we have SALIPL parameters?
382 bp .sk8x8
383
384 mvc 0(64,%r8),0x00b0 # copy saved registers
385 xc 64(240-64,%r8),0(%r8) # remainder of buffer
386 tr 0(64,%r8),.lowcase
387 b .gotr
388.sk8x8:
389 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
390.gotr:
391 l %r10,.tbl # EBCDIC to ASCII table
392 tr 0(240,%r8),0(%r10)
393 stidp __LC_CPUID # Are we running on VM maybe
394 cli __LC_CPUID,0xff
395 bnz .test
396 .long 0x83300060 # diag 3,0,x'0060' - storage size
397 b .done
398.test:
399 mvc 0x68(8),.pgmnw # set up pgm check handler
400 l %r2,.fourmeg
401 lr %r3,%r2
402 bctr %r3,%r0 # 4M-1
403.loop: iske %r0,%r3
404 ar %r3,%r2
405.pgmx:
406 sr %r3,%r2
407 la %r3,1(%r3)
408.done:
409 l %r1,.memsize
410 st %r3,4(%r1)
411 slr %r0,%r0
412 st %r0,INITRD_SIZE+4-PARMAREA(%r11)
413 st %r0,INITRD_START+4-PARMAREA(%r11)
414 j startup # continue with startup
415.tbl: .long _ebcasc # translate table
416.cmd: .long COMMAND_LINE # address of command line buffer
417.parm: .long PARMAREA
418.fourmeg: .long 0x00400000 # 4M
419.pgmnw: .long 0x00080000,.pgmx
420.memsize: .long memory_size
421.lowcase:
422 .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
423 .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
424 .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
425 .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
426 .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
427 .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
428 .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
429 .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
430 .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
431 .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
432 .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
433 .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
434 .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
435 .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
436 .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
437 .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
438
439 .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
440 .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
441 .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
442 .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
443 .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
444 .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
445 .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
446 .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
447 .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
448 .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi
449 .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
450 .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr
451 .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx
452 .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz
453 .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
454 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
455
456#
457# startup-code at 0x10000, running in real mode
458# this is called either by the ipl loader or directly by PSW restart
459# or linload or SALIPL
460#
461 .org 0x10000
462startup:basr %r13,0 # get base
463.LPG1: sll %r13,1 # remove high order bit
464 srl %r13,1
465 lhi %r1,1 # mode 1 = esame
466 slr %r0,%r0 # set cpuid to zero
467 sigp %r1,%r0,0x12 # switch to esame mode
468 sam64 # switch to 64 bit mode
469 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
470 larl %r12,_pstart # pointer to parameter area
471 # move IPL device to lowcore
472 mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
473
474#
475# clear bss memory
476#
477 larl %r2,__bss_start # start of bss segment
478 larl %r3,_end # end of bss segment
479 sgr %r3,%r2 # length of bss
480 sgr %r4,%r4 #
481 sgr %r5,%r5 # set src,length and pad to zero
482 mvcle %r2,%r4,0 # clear mem
483 jo .-4 # branch back, if not finish
484
485 l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
486.Lservicecall:
487 stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
488
489 stctg %r0,%r0,.Lcr-.LPG1(%r13) # get cr0
490 la %r1,0x200 # set bit 22
491 og %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
492 stg %r1,.Lcr-.LPG1(%r13)
493 lctlg %r0,%r0,.Lcr-.LPG1(%r13) # load modified cr0
494
495 mvc __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw
496 larl %r1,.Lsclph
497 stg %r1,__LC_EXT_NEW_PSW+8 # set handler
498
499 larl %r4,_pstart # %r4 is our index for sccb stuff
500 la %r1,.Lsccb-PARMAREA(%r4) # our sccb
501 .insn rre,0xb2200000,%r2,%r1 # service call
502 ipm %r1
503 srl %r1,28 # get cc code
504 xr %r3,%r3
505 chi %r1,3
506 be .Lfchunk-.LPG1(%r13) # leave
507 chi %r1,2
508 be .Lservicecall-.LPG1(%r13)
509 lpsw .Lwaitsclp-.LPG1(%r13)
510.Lsclph:
511 lh %r1,.Lsccbr-PARMAREA(%r4)
512 chi %r1,0x10 # 0x0010 is the sucess code
513 je .Lprocsccb # let's process the sccb
514 chi %r1,0x1f0
515 bne .Lfchunk-.LPG1(%r13) # unhandled error code
516 c %r2,.Lrcp-.LPG1(%r13) # Did we try Read SCP forced
517 bne .Lfchunk-.LPG1(%r13) # if no, give up
518 l %r2,.Lrcp2-.LPG1(%r13) # try with Read SCP
519 b .Lservicecall-.LPG1(%r13)
520.Lprocsccb:
521 lh %r1,.Lscpincr1-PARMAREA(%r4) # use this one if != 0
522 chi %r1,0x00
523 jne .Lscnd
524 lg %r1,.Lscpincr2-PARMAREA(%r4) # otherwise use this one
525.Lscnd:
526 xr %r3,%r3 # same logic
527 ic %r3,.Lscpa1-PARMAREA(%r4)
528 chi %r3,0x00
529 jne .Lcompmem
530 l %r3,.Lscpa2-PARMAREA(%r13)
531.Lcompmem:
532 mlgr %r2,%r1 # mem in MB on 128-bit
533 l %r1,.Lonemb-.LPG1(%r13)
534 mlgr %r2,%r1 # mem size in bytes in %r3
535 b .Lfchunk-.LPG1(%r13)
536
537.Lpmask:
538 .byte 0
539 .align 8
540.Lcr:
541 .quad 0x00 # place holder for cr0
542.Lwaitsclp:
543 .long 0x020A0000
544 .quad .Lsclph
545.Lrcp:
546 .int 0x00120001 # Read SCP forced code
547.Lrcp2:
548 .int 0x00020001 # Read SCP code
549.Lonemb:
550 .int 0x100000
551
552.Lfchunk:
553 # set program check new psw mask
554 mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
555
556#
557# find memory chunks.
558#
559 lgr %r9,%r3 # end of mem
560 larl %r1,.Lchkmem # set program check address
561 stg %r1,__LC_PGM_NEW_PSW+8
562 la %r1,1 # test in increments of 128KB
563 sllg %r1,%r1,17
564 larl %r3,memory_chunk
565 slgr %r4,%r4 # set start of chunk to zero
566 slgr %r5,%r5 # set end of chunk to zero
567 slr %r6,%r6 # set access code to zero
568 la %r10,MEMORY_CHUNKS # number of chunks
569.Lloop:
570 tprot 0(%r5),0 # test protection of first byte
571 ipm %r7
572 srl %r7,28
573 clr %r6,%r7 # compare cc with last access code
574 je .Lsame
575 j .Lchkmem
576.Lsame:
577 algr %r5,%r1 # add 128KB to end of chunk
578 # no need to check here,
579 brc 12,.Lloop # this is the same chunk
580.Lchkmem: # > 16EB or tprot got a program check
581 clgr %r4,%r5 # chunk size > 0?
582 je .Lchkloop
583 stg %r4,0(%r3) # store start address of chunk
584 lgr %r0,%r5
585 slgr %r0,%r4
586 stg %r0,8(%r3) # store size of chunk
587 st %r6,20(%r3) # store type of chunk
588 la %r3,24(%r3)
589 larl %r8,memory_size
590 stg %r5,0(%r8) # store memory size
591 ahi %r10,-1 # update chunk number
592.Lchkloop:
593 lr %r6,%r7 # set access code to last cc
594 # we got an exception or we're starting a new
595 # chunk , we must check if we should
596 # still try to find valid memory (if we detected
597 # the amount of available storage), and if we
598 # have chunks left
599 lghi %r4,1
600 sllg %r4,%r4,31
601 clgr %r5,%r4
602 je .Lhsaskip
603 xr %r0, %r0
604 clgr %r0, %r9 # did we detect memory?
605 je .Ldonemem # if not, leave
606 chi %r10, 0 # do we have chunks left?
607 je .Ldonemem
608.Lhsaskip:
609 algr %r5,%r1 # add 128KB to end of chunk
610 lgr %r4,%r5 # potential new chunk
611 clgr %r5,%r9 # should we go on?
612 jl .Lloop
613.Ldonemem:
614
615 larl %r12,machine_flags
616#
617# find out if we are running under VM
618#
619 stidp __LC_CPUID # store cpuid
620 tm __LC_CPUID,0xff # running under VM ?
621 bno 0f-.LPG1(%r13)
622 oi 7(%r12),1 # set VM flag
6230: lh %r0,__LC_CPUID+4 # get cpu version
624 chi %r0,0x7490 # running on a P/390 ?
625 bne 1f-.LPG1(%r13)
626 oi 7(%r12),4 # set P/390 flag
6271:
628
629#
630# find out if we have the MVPG instruction
631#
632 la %r1,0f-.LPG1(%r13) # set program check address
633 stg %r1,__LC_PGM_NEW_PSW+8
634 sgr %r0,%r0
635 lghi %r1,0
636 lghi %r2,0
637 mvpg %r1,%r2 # test MVPG instruction
638 oi 7(%r12),16 # set MVPG flag
6390:
640
641#
642# find out if the diag 0x44 works in 64 bit mode
643#
644 la %r1,0f-.LPG1(%r13) # set program check address
645 stg %r1,__LC_PGM_NEW_PSW+8
646 mvc __LC_DIAG44_OPCODE(8),.Lnop-.LPG1(%r13)
647 diag 0,0,0x44 # test diag 0x44
648 oi 7(%r12),32 # set diag44 flag
649 mvc __LC_DIAG44_OPCODE(8),.Ldiag44-.LPG1(%r13)
6500:
651
652#
653# find out if we have the IDTE instruction
654#
655 la %r1,0f-.LPG1(%r13) # set program check address
656 stg %r1,__LC_PGM_NEW_PSW+8
657 .long 0xb2b10000 # store facility list
658 tm 0xc8,0x08 # check bit for clearing-by-ASCE
659 bno 0f-.LPG1(%r13)
660 lhi %r1,2094
661 lhi %r2,0
662 .long 0xb98e2001
663 oi 7(%r12),0x80 # set IDTE flag
6640:
665
666 lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
667 # virtual and never return ...
668 .align 16
669.Lentry:.quad 0x0000000180000000,_stext
670.Lctl: .quad 0x04b50002 # cr0: various things
671 .quad 0 # cr1: primary space segment table
672 .quad .Lduct # cr2: dispatchable unit control table
673 .quad 0 # cr3: instruction authorization
674 .quad 0 # cr4: instruction authorization
675 .quad 0xffffffffffffffff # cr5: primary-aste origin
676 .quad 0 # cr6: I/O interrupts
677 .quad 0 # cr7: secondary space segment table
678 .quad 0 # cr8: access registers translation
679 .quad 0 # cr9: tracing off
680 .quad 0 # cr10: tracing off
681 .quad 0 # cr11: tracing off
682 .quad 0 # cr12: tracing off
683 .quad 0 # cr13: home space segment table
684 .quad 0xc0000000 # cr14: machine check handling off
685 .quad 0 # cr15: linkage stack operations
686.Lpcmsk:.quad 0x0000000180000000
687.L4malign:.quad 0xffffffffffc00000
688.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
689.Lnop: .long 0x07000700
690.Ldiag44:.long 0x83000044
691
692 .org PARMAREA-64
693.Lduct: .long 0,0,0,0,0,0,0,0
694 .long 0,0,0,0,0,0,0,0
695
696#
697# params at 10400 (setup.h)
698#
699 .org PARMAREA
700 .global _pstart
701_pstart:
702 .quad 0 # IPL_DEVICE
703 .quad RAMDISK_ORIGIN # INITRD_START
704 .quad RAMDISK_SIZE # INITRD_SIZE
705
706 .org COMMAND_LINE
707 .byte "root=/dev/ram0 ro"
708 .byte 0
709 .org 0x11000
710.Lsccb:
711 .hword 0x1000 # length, one page
712 .byte 0x00,0x00,0x00
713 .byte 0x80 # variable response bit set
714.Lsccbr:
715 .hword 0x00 # response code
716.Lscpincr1:
717 .hword 0x00
718.Lscpa1:
719 .byte 0x00
720 .fill 89,1,0
721.Lscpa2:
722 .int 0x00
723.Lscpincr2:
724 .quad 0x00
725 .fill 3984,1,0
726 .org 0x12000
727 .global _pend
728_pend:
729
730#ifdef CONFIG_SHARED_KERNEL
731 .org 0x100000
732#endif
733
734#
735# startup-code, running in virtual mode
736#
737 .globl _stext
738_stext: basr %r13,0 # get base
739.LPG2:
740#
741# Setup stack
742#
743 larl %r15,init_thread_union
744 lg %r14,__TI_task(%r15) # cache current in lowcore
745 stg %r14,__LC_CURRENT
746 aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
747 stg %r15,__LC_KERNEL_STACK # set end of kernel stack
748 aghi %r15,-160
749 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
750
751# check control registers
752 stctg %c0,%c15,0(%r15)
753 oi 6(%r15),0x20 # enable sigp external interrupts
754 oi 4(%r15),0x10 # switch on low address proctection
755 lctlg %c0,%c15,0(%r15)
756
757#
758 lam 0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess
759 brasl %r14,start_kernel # go to C code
760#
761# We returned from start_kernel ?!? PANIK
762#
763 basr %r13,0
764 lpswe .Ldw-.(%r13) # load disabled wait psw
765#
766 .align 8
767.Ldw: .quad 0x0002000180000000,0x0000000000000000
768.Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
769
diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c
new file mode 100644
index 000000000000..d73a74013e73
--- /dev/null
+++ b/arch/s390/kernel/init_task.c
@@ -0,0 +1,44 @@
1/*
2 * arch/s390/kernel/init_task.c
3 *
4 * S390 version
5 *
6 * Derived from "arch/i386/kernel/init_task.c"
7 */
8
9#include <linux/mm.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/init_task.h>
13#include <linux/mqueue.h>
14
15#include <asm/uaccess.h>
16#include <asm/pgtable.h>
17
18static struct fs_struct init_fs = INIT_FS;
19static struct files_struct init_files = INIT_FILES;
20static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
21static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
22struct mm_struct init_mm = INIT_MM(init_mm);
23
24EXPORT_SYMBOL(init_mm);
25
26/*
27 * Initial thread structure.
28 *
29 * We need to make sure that this is 8192-byte aligned due to the
30 * way process stacks are handled. This is done by having a special
31 * "init_task" linker map entry..
32 */
33union thread_union init_thread_union
34 __attribute__((__section__(".data.init_task"))) =
35 { INIT_THREAD_INFO(init_task) };
36
37/*
38 * Initial task structure.
39 *
40 * All other task structs will be allocated on slabs in fork.c
41 */
42struct task_struct init_task = INIT_TASK(init_task);
43
44EXPORT_SYMBOL(init_task);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
new file mode 100644
index 000000000000..480b6a5fef3a
--- /dev/null
+++ b/arch/s390/kernel/irq.c
@@ -0,0 +1,105 @@
1/*
2 * arch/s390/kernel/irq.c
3 *
4 * S390 version
5 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *
8 * This file contains interrupt related functions.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/seq_file.h>
16#include <linux/cpu.h>
17
18/*
19 * show_interrupts is needed by /proc/interrupts.
20 */
21int show_interrupts(struct seq_file *p, void *v)
22{
23 static const char *intrclass_names[] = { "EXT", "I/O", };
24 int i = *(loff_t *) v, j;
25
26 if (i == 0) {
27 seq_puts(p, " ");
28 for_each_online_cpu(j)
29 seq_printf(p, "CPU%d ",j);
30 seq_putc(p, '\n');
31 }
32
33 if (i < NR_IRQS) {
34 seq_printf(p, "%s: ", intrclass_names[i]);
35#ifndef CONFIG_SMP
36 seq_printf(p, "%10u ", kstat_irqs(i));
37#else
38 for_each_online_cpu(j)
39 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
40#endif
41 seq_putc(p, '\n');
42
43 }
44
45 return 0;
46}
47
48/*
49 * For compatibilty only. S/390 specific setup of interrupts et al. is done
50 * much later in init_channel_subsystem().
51 */
52void __init
53init_IRQ(void)
54{
55 /* nothing... */
56}
57
58/*
59 * Switch to the asynchronous interrupt stack for softirq execution.
60 */
61extern void __do_softirq(void);
62
63asmlinkage void do_softirq(void)
64{
65 unsigned long flags, old, new;
66
67 if (in_interrupt())
68 return;
69
70 local_irq_save(flags);
71
72 account_system_vtime(current);
73
74 local_bh_disable();
75
76 if (local_softirq_pending()) {
77 /* Get current stack pointer. */
78 asm volatile("la %0,0(15)" : "=a" (old));
79 /* Check against async. stack address range. */
80 new = S390_lowcore.async_stack;
81 if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
82 /* Need to switch to the async. stack. */
83 new -= STACK_FRAME_OVERHEAD;
84 ((struct stack_frame *) new)->back_chain = old;
85
86 asm volatile(" la 15,0(%0)\n"
87 " basr 14,%2\n"
88 " la 15,0(%1)\n"
89 : : "a" (new), "a" (old),
90 "a" (__do_softirq)
91 : "0", "1", "2", "3", "4", "5", "14",
92 "cc", "memory" );
93 } else
94 /* We are already on the async stack. */
95 __do_softirq();
96 }
97
98 account_system_vtime(current);
99
100 __local_bh_enable();
101
102 local_irq_restore(flags);
103}
104
105EXPORT_SYMBOL(do_softirq);
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
new file mode 100644
index 000000000000..607d506689c8
--- /dev/null
+++ b/arch/s390/kernel/module.c
@@ -0,0 +1,405 @@
1/*
2 * arch/s390/kernel/module.c - Kernel module help for s390.
3 *
4 * S390 version
5 * Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH,
6 * IBM Corporation
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 *
10 * based on i386 version
11 * Copyright (C) 2001 Rusty Russell.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27#include <linux/module.h>
28#include <linux/elf.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
31#include <linux/string.h>
32#include <linux/kernel.h>
33
34#if 0
35#define DEBUGP printk
36#else
37#define DEBUGP(fmt , ...)
38#endif
39
40#ifndef CONFIG_ARCH_S390X
41#define PLT_ENTRY_SIZE 12
42#else /* CONFIG_ARCH_S390X */
43#define PLT_ENTRY_SIZE 20
44#endif /* CONFIG_ARCH_S390X */
45
46void *module_alloc(unsigned long size)
47{
48 if (size == 0)
49 return NULL;
50 return vmalloc(size);
51}
52
53/* Free memory returned from module_alloc */
54void module_free(struct module *mod, void *module_region)
55{
56 vfree(module_region);
57 /* FIXME: If module_region == mod->init_region, trim exception
58 table entries. */
59}
60
61static inline void
62check_rela(Elf_Rela *rela, struct module *me)
63{
64 struct mod_arch_syminfo *info;
65
66 info = me->arch.syminfo + ELF_R_SYM (rela->r_info);
67 switch (ELF_R_TYPE (rela->r_info)) {
68 case R_390_GOT12: /* 12 bit GOT offset. */
69 case R_390_GOT16: /* 16 bit GOT offset. */
70 case R_390_GOT20: /* 20 bit GOT offset. */
71 case R_390_GOT32: /* 32 bit GOT offset. */
72 case R_390_GOT64: /* 64 bit GOT offset. */
73 case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
74 case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
75 case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
76 case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
77 case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
78 case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
79 case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
80 if (info->got_offset == -1UL) {
81 info->got_offset = me->arch.got_size;
82 me->arch.got_size += sizeof(void*);
83 }
84 break;
85 case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
86 case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
87 case R_390_PLT32: /* 32 bit PC relative PLT address. */
88 case R_390_PLT64: /* 64 bit PC relative PLT address. */
89 case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
90 case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
91 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
92 if (info->plt_offset == -1UL) {
93 info->plt_offset = me->arch.plt_size;
94 me->arch.plt_size += PLT_ENTRY_SIZE;
95 }
96 break;
97 case R_390_COPY:
98 case R_390_GLOB_DAT:
99 case R_390_JMP_SLOT:
100 case R_390_RELATIVE:
101 /* Only needed if we want to support loading of
102 modules linked with -shared. */
103 break;
104 }
105}
106
107/*
108 * Account for GOT and PLT relocations. We can't add sections for
109 * got and plt but we can increase the core module size.
110 */
111int
112module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
113 char *secstrings, struct module *me)
114{
115 Elf_Shdr *symtab;
116 Elf_Sym *symbols;
117 Elf_Rela *rela;
118 char *strings;
119 int nrela, i, j;
120
121 /* Find symbol table and string table. */
122 symtab = 0;
123 for (i = 0; i < hdr->e_shnum; i++)
124 switch (sechdrs[i].sh_type) {
125 case SHT_SYMTAB:
126 symtab = sechdrs + i;
127 break;
128 }
129 if (!symtab) {
130 printk(KERN_ERR "module %s: no symbol table\n", me->name);
131 return -ENOEXEC;
132 }
133
134 /* Allocate one syminfo structure per symbol. */
135 me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
136 me->arch.syminfo = vmalloc(me->arch.nsyms *
137 sizeof(struct mod_arch_syminfo));
138 if (!me->arch.syminfo)
139 return -ENOMEM;
140 symbols = (void *) hdr + symtab->sh_offset;
141 strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset;
142 for (i = 0; i < me->arch.nsyms; i++) {
143 if (symbols[i].st_shndx == SHN_UNDEF &&
144 strcmp(strings + symbols[i].st_name,
145 "_GLOBAL_OFFSET_TABLE_") == 0)
146 /* "Define" it as absolute. */
147 symbols[i].st_shndx = SHN_ABS;
148 me->arch.syminfo[i].got_offset = -1UL;
149 me->arch.syminfo[i].plt_offset = -1UL;
150 me->arch.syminfo[i].got_initialized = 0;
151 me->arch.syminfo[i].plt_initialized = 0;
152 }
153
154 /* Search for got/plt relocations. */
155 me->arch.got_size = me->arch.plt_size = 0;
156 for (i = 0; i < hdr->e_shnum; i++) {
157 if (sechdrs[i].sh_type != SHT_RELA)
158 continue;
159 nrela = sechdrs[i].sh_size / sizeof(Elf_Rela);
160 rela = (void *) hdr + sechdrs[i].sh_offset;
161 for (j = 0; j < nrela; j++)
162 check_rela(rela + j, me);
163 }
164
165 /* Increase core size by size of got & plt and set start
166 offsets for got and plt. */
167 me->core_size = ALIGN(me->core_size, 4);
168 me->arch.got_offset = me->core_size;
169 me->core_size += me->arch.got_size;
170 me->arch.plt_offset = me->core_size;
171 me->core_size += me->arch.plt_size;
172 return 0;
173}
174
175int
176apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
177 unsigned int relsec, struct module *me)
178{
179 printk(KERN_ERR "module %s: RELOCATION unsupported\n",
180 me->name);
181 return -ENOEXEC;
182}
183
184static inline int
185apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
186 struct module *me)
187{
188 struct mod_arch_syminfo *info;
189 Elf_Addr loc, val;
190 int r_type, r_sym;
191
192 /* This is where to make the change */
193 loc = base + rela->r_offset;
194 /* This is the symbol it is referring to. Note that all
195 undefined symbols have been resolved. */
196 r_sym = ELF_R_SYM(rela->r_info);
197 r_type = ELF_R_TYPE(rela->r_info);
198 info = me->arch.syminfo + r_sym;
199 val = symtab[r_sym].st_value;
200
201 switch (r_type) {
202 case R_390_8: /* Direct 8 bit. */
203 case R_390_12: /* Direct 12 bit. */
204 case R_390_16: /* Direct 16 bit. */
205 case R_390_20: /* Direct 20 bit. */
206 case R_390_32: /* Direct 32 bit. */
207 case R_390_64: /* Direct 64 bit. */
208 val += rela->r_addend;
209 if (r_type == R_390_8)
210 *(unsigned char *) loc = val;
211 else if (r_type == R_390_12)
212 *(unsigned short *) loc = (val & 0xfff) |
213 (*(unsigned short *) loc & 0xf000);
214 else if (r_type == R_390_16)
215 *(unsigned short *) loc = val;
216 else if (r_type == R_390_20)
217 *(unsigned int *) loc =
218 (*(unsigned int *) loc & 0xf00000ff) |
219 (val & 0xfff) << 16 | (val & 0xff000) >> 4;
220 else if (r_type == R_390_32)
221 *(unsigned int *) loc = val;
222 else if (r_type == R_390_64)
223 *(unsigned long *) loc = val;
224 break;
225 case R_390_PC16: /* PC relative 16 bit. */
226 case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
227 case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
228 case R_390_PC32: /* PC relative 32 bit. */
229 case R_390_PC64: /* PC relative 64 bit. */
230 val += rela->r_addend - loc;
231 if (r_type == R_390_PC16)
232 *(unsigned short *) loc = val;
233 else if (r_type == R_390_PC16DBL)
234 *(unsigned short *) loc = val >> 1;
235 else if (r_type == R_390_PC32DBL)
236 *(unsigned int *) loc = val >> 1;
237 else if (r_type == R_390_PC32)
238 *(unsigned int *) loc = val;
239 else if (r_type == R_390_PC64)
240 *(unsigned long *) loc = val;
241 break;
242 case R_390_GOT12: /* 12 bit GOT offset. */
243 case R_390_GOT16: /* 16 bit GOT offset. */
244 case R_390_GOT20: /* 20 bit GOT offset. */
245 case R_390_GOT32: /* 32 bit GOT offset. */
246 case R_390_GOT64: /* 64 bit GOT offset. */
247 case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
248 case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
249 case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
250 case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
251 case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
252 case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
253 case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
254 if (info->got_initialized == 0) {
255 Elf_Addr *gotent;
256
257 gotent = me->module_core + me->arch.got_offset +
258 info->got_offset;
259 *gotent = val;
260 info->got_initialized = 1;
261 }
262 val = info->got_offset + rela->r_addend;
263 if (r_type == R_390_GOT12 ||
264 r_type == R_390_GOTPLT12)
265 *(unsigned short *) loc = (val & 0xfff) |
266 (*(unsigned short *) loc & 0xf000);
267 else if (r_type == R_390_GOT16 ||
268 r_type == R_390_GOTPLT16)
269 *(unsigned short *) loc = val;
270 else if (r_type == R_390_GOT20 ||
271 r_type == R_390_GOTPLT20)
272 *(unsigned int *) loc =
273 (*(unsigned int *) loc & 0xf00000ff) |
274 (val & 0xfff) << 16 | (val & 0xff000) >> 4;
275 else if (r_type == R_390_GOT32 ||
276 r_type == R_390_GOTPLT32)
277 *(unsigned int *) loc = val;
278 else if (r_type == R_390_GOTENT ||
279 r_type == R_390_GOTPLTENT)
280 *(unsigned int *) loc =
281 (val + (Elf_Addr) me->module_core - loc) >> 1;
282 else if (r_type == R_390_GOT64 ||
283 r_type == R_390_GOTPLT64)
284 *(unsigned long *) loc = val;
285 break;
286 case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
287 case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
288 case R_390_PLT32: /* 32 bit PC relative PLT address. */
289 case R_390_PLT64: /* 64 bit PC relative PLT address. */
290 case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
291 case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
292 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
293 if (info->plt_initialized == 0) {
294 unsigned int *ip;
295 ip = me->module_core + me->arch.plt_offset +
296 info->plt_offset;
297#ifndef CONFIG_ARCH_S390X
298 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
299 ip[1] = 0x100607f1;
300 ip[2] = val;
301#else /* CONFIG_ARCH_S390X */
302 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
303 ip[1] = 0x100a0004;
304 ip[2] = 0x07f10000;
305 ip[3] = (unsigned int) (val >> 32);
306 ip[4] = (unsigned int) val;
307#endif /* CONFIG_ARCH_S390X */
308 info->plt_initialized = 1;
309 }
310 if (r_type == R_390_PLTOFF16 ||
311 r_type == R_390_PLTOFF32
312 || r_type == R_390_PLTOFF64
313 )
314 val = me->arch.plt_offset - me->arch.got_offset +
315 info->plt_offset + rela->r_addend;
316 else
317 val = (Elf_Addr) me->module_core +
318 me->arch.plt_offset + info->plt_offset +
319 rela->r_addend - loc;
320 if (r_type == R_390_PLT16DBL)
321 *(unsigned short *) loc = val >> 1;
322 else if (r_type == R_390_PLTOFF16)
323 *(unsigned short *) loc = val;
324 else if (r_type == R_390_PLT32DBL)
325 *(unsigned int *) loc = val >> 1;
326 else if (r_type == R_390_PLT32 ||
327 r_type == R_390_PLTOFF32)
328 *(unsigned int *) loc = val;
329 else if (r_type == R_390_PLT64 ||
330 r_type == R_390_PLTOFF64)
331 *(unsigned long *) loc = val;
332 break;
333 case R_390_GOTOFF16: /* 16 bit offset to GOT. */
334 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
335 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
336 val = val + rela->r_addend -
337 ((Elf_Addr) me->module_core + me->arch.got_offset);
338 if (r_type == R_390_GOTOFF16)
339 *(unsigned short *) loc = val;
340 else if (r_type == R_390_GOTOFF32)
341 *(unsigned int *) loc = val;
342 else if (r_type == R_390_GOTOFF64)
343 *(unsigned long *) loc = val;
344 break;
345 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
346 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
347 val = (Elf_Addr) me->module_core + me->arch.got_offset +
348 rela->r_addend - loc;
349 if (r_type == R_390_GOTPC)
350 *(unsigned int *) loc = val;
351 else if (r_type == R_390_GOTPCDBL)
352 *(unsigned int *) loc = val >> 1;
353 break;
354 case R_390_COPY:
355 case R_390_GLOB_DAT: /* Create GOT entry. */
356 case R_390_JMP_SLOT: /* Create PLT entry. */
357 case R_390_RELATIVE: /* Adjust by program base. */
358 /* Only needed if we want to support loading of
359 modules linked with -shared. */
360 break;
361 default:
362 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
363 me->name, r_type);
364 return -ENOEXEC;
365 }
366 return 0;
367}
368
369int
370apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
371 unsigned int symindex, unsigned int relsec,
372 struct module *me)
373{
374 Elf_Addr base;
375 Elf_Sym *symtab;
376 Elf_Rela *rela;
377 unsigned long i, n;
378 int rc;
379
380 DEBUGP("Applying relocate section %u to %u\n",
381 relsec, sechdrs[relsec].sh_info);
382 base = sechdrs[sechdrs[relsec].sh_info].sh_addr;
383 symtab = (Elf_Sym *) sechdrs[symindex].sh_addr;
384 rela = (Elf_Rela *) sechdrs[relsec].sh_addr;
385 n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
386
387 for (i = 0; i < n; i++, rela++) {
388 rc = apply_rela(rela, base, symtab, me);
389 if (rc)
390 return rc;
391 }
392 return 0;
393}
394
395int module_finalize(const Elf_Ehdr *hdr,
396 const Elf_Shdr *sechdrs,
397 struct module *me)
398{
399 vfree(me->arch.syminfo);
400 return 0;
401}
402
403void module_arch_cleanup(struct module *mod)
404{
405}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
new file mode 100644
index 000000000000..7aea25d6e300
--- /dev/null
+++ b/arch/s390/kernel/process.c
@@ -0,0 +1,416 @@
1/*
2 * arch/s390/kernel/process.c
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *
10 * Derived from "arch/i386/kernel/process.c"
11 * Copyright (C) 1995, Linus Torvalds
12 */
13
14/*
15 * This file handles the architecture-dependent parts of process handling..
16 */
17
18#include <linux/config.h>
19#include <linux/compiler.h>
20#include <linux/cpu.h>
21#include <linux/errno.h>
22#include <linux/sched.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/smp_lock.h>
27#include <linux/stddef.h>
28#include <linux/unistd.h>
29#include <linux/ptrace.h>
30#include <linux/slab.h>
31#include <linux/vmalloc.h>
32#include <linux/user.h>
33#include <linux/a.out.h>
34#include <linux/interrupt.h>
35#include <linux/delay.h>
36#include <linux/reboot.h>
37#include <linux/init.h>
38#include <linux/module.h>
39#include <linux/notifier.h>
40
41#include <asm/uaccess.h>
42#include <asm/pgtable.h>
43#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/processor.h>
46#include <asm/irq.h>
47#include <asm/timer.h>
48
49asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
50
51/*
52 * Return saved PC of a blocked thread. used in kernel/sched.
53 * resume in entry.S does not create a new stack frame, it
54 * just stores the registers %r6-%r15 to the frame given by
55 * schedule. We want to return the address of the caller of
56 * schedule, so we have to walk the backchain one time to
57 * find the frame schedule() store its return address.
58 */
59unsigned long thread_saved_pc(struct task_struct *tsk)
60{
61 struct stack_frame *sf;
62
63 sf = (struct stack_frame *) tsk->thread.ksp;
64 sf = (struct stack_frame *) sf->back_chain;
65 return sf->gprs[8];
66}
67
68/*
69 * Need to know about CPUs going idle?
70 */
71static struct notifier_block *idle_chain;
72
73int register_idle_notifier(struct notifier_block *nb)
74{
75 return notifier_chain_register(&idle_chain, nb);
76}
77EXPORT_SYMBOL(register_idle_notifier);
78
79int unregister_idle_notifier(struct notifier_block *nb)
80{
81 return notifier_chain_unregister(&idle_chain, nb);
82}
83EXPORT_SYMBOL(unregister_idle_notifier);
84
85void do_monitor_call(struct pt_regs *regs, long interruption_code)
86{
87 /* disable monitor call class 0 */
88 __ctl_clear_bit(8, 15);
89
90 notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
91 (void *)(long) smp_processor_id());
92}
93
94/*
95 * The idle loop on a S390...
96 */
97void default_idle(void)
98{
99 psw_t wait_psw;
100 unsigned long reg;
101 int cpu, rc;
102
103 local_irq_disable();
104 if (need_resched()) {
105 local_irq_enable();
106 schedule();
107 return;
108 }
109
110 /* CPU is going idle. */
111 cpu = smp_processor_id();
112 rc = notifier_call_chain(&idle_chain, CPU_IDLE, (void *)(long) cpu);
113 if (rc != NOTIFY_OK && rc != NOTIFY_DONE)
114 BUG();
115 if (rc != NOTIFY_OK) {
116 local_irq_enable();
117 return;
118 }
119
120 /* enable monitor call class 0 */
121 __ctl_set_bit(8, 15);
122
123#ifdef CONFIG_HOTPLUG_CPU
124 if (cpu_is_offline(smp_processor_id()))
125 cpu_die();
126#endif
127
128 /*
129 * Wait for external, I/O or machine check interrupt and
130 * switch off machine check bit after the wait has ended.
131 */
132 wait_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK | PSW_MASK_WAIT |
133 PSW_MASK_IO | PSW_MASK_EXT;
134#ifndef CONFIG_ARCH_S390X
135 asm volatile (
136 " basr %0,0\n"
137 "0: la %0,1f-0b(%0)\n"
138 " st %0,4(%1)\n"
139 " oi 4(%1),0x80\n"
140 " lpsw 0(%1)\n"
141 "1: la %0,2f-1b(%0)\n"
142 " st %0,4(%1)\n"
143 " oi 4(%1),0x80\n"
144 " ni 1(%1),0xf9\n"
145 " lpsw 0(%1)\n"
146 "2:"
147 : "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
148#else /* CONFIG_ARCH_S390X */
149 asm volatile (
150 " larl %0,0f\n"
151 " stg %0,8(%1)\n"
152 " lpswe 0(%1)\n"
153 "0: larl %0,1f\n"
154 " stg %0,8(%1)\n"
155 " ni 1(%1),0xf9\n"
156 " lpswe 0(%1)\n"
157 "1:"
158 : "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
159#endif /* CONFIG_ARCH_S390X */
160}
161
162void cpu_idle(void)
163{
164 for (;;)
165 default_idle();
166}
167
168void show_regs(struct pt_regs *regs)
169{
170 struct task_struct *tsk = current;
171
172 printk("CPU: %d %s\n", tsk->thread_info->cpu, print_tainted());
173 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
174 current->comm, current->pid, (void *) tsk,
175 (void *) tsk->thread.ksp);
176
177 show_registers(regs);
178 /* Show stack backtrace if pt_regs is from kernel mode */
179 if (!(regs->psw.mask & PSW_MASK_PSTATE))
180 show_trace(0,(unsigned long *) regs->gprs[15]);
181}
182
183extern void kernel_thread_starter(void);
184
185__asm__(".align 4\n"
186 "kernel_thread_starter:\n"
187 " la 2,0(10)\n"
188 " basr 14,9\n"
189 " la 2,0\n"
190 " br 11\n");
191
192int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
193{
194 struct pt_regs regs;
195
196 memset(&regs, 0, sizeof(regs));
197 regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
198 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
199 regs.gprs[9] = (unsigned long) fn;
200 regs.gprs[10] = (unsigned long) arg;
201 regs.gprs[11] = (unsigned long) do_exit;
202 regs.orig_gpr2 = -1;
203
204 /* Ok, create the new process.. */
205 return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
206 0, &regs, 0, NULL, NULL);
207}
208
209/*
210 * Free current thread data structures etc..
211 */
212void exit_thread(void)
213{
214}
215
216void flush_thread(void)
217{
218 clear_used_math();
219 clear_tsk_thread_flag(current, TIF_USEDFPU);
220}
221
222void release_thread(struct task_struct *dead_task)
223{
224}
225
226int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
227 unsigned long unused,
228 struct task_struct * p, struct pt_regs * regs)
229{
230 struct fake_frame
231 {
232 struct stack_frame sf;
233 struct pt_regs childregs;
234 } *frame;
235
236 frame = ((struct fake_frame *)
237 (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
238 p->thread.ksp = (unsigned long) frame;
239 /* Store access registers to kernel stack of new process. */
240 frame->childregs = *regs;
241 frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */
242 frame->childregs.gprs[15] = new_stackp;
243 frame->sf.back_chain = 0;
244
245 /* new return point is ret_from_fork */
246 frame->sf.gprs[8] = (unsigned long) ret_from_fork;
247
248 /* fake return stack for resume(), don't go back to schedule */
249 frame->sf.gprs[9] = (unsigned long) frame;
250
251 /* Save access registers to new thread structure. */
252 save_access_regs(&p->thread.acrs[0]);
253
254#ifndef CONFIG_ARCH_S390X
255 /*
256 * save fprs to current->thread.fp_regs to merge them with
257 * the emulated registers and then copy the result to the child.
258 */
259 save_fp_regs(&current->thread.fp_regs);
260 memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
261 sizeof(s390_fp_regs));
262 p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _SEGMENT_TABLE;
263 /* Set a new TLS ? */
264 if (clone_flags & CLONE_SETTLS)
265 p->thread.acrs[0] = regs->gprs[6];
266#else /* CONFIG_ARCH_S390X */
267 /* Save the fpu registers to new thread structure. */
268 save_fp_regs(&p->thread.fp_regs);
269 p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
270 /* Set a new TLS ? */
271 if (clone_flags & CLONE_SETTLS) {
272 if (test_thread_flag(TIF_31BIT)) {
273 p->thread.acrs[0] = (unsigned int) regs->gprs[6];
274 } else {
275 p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32);
276 p->thread.acrs[1] = (unsigned int) regs->gprs[6];
277 }
278 }
279#endif /* CONFIG_ARCH_S390X */
280 /* start new process with ar4 pointing to the correct address space */
281 p->thread.mm_segment = get_fs();
282 /* Don't copy debug registers */
283 memset(&p->thread.per_info,0,sizeof(p->thread.per_info));
284
285 return 0;
286}
287
288asmlinkage long sys_fork(struct pt_regs regs)
289{
290 return do_fork(SIGCHLD, regs.gprs[15], &regs, 0, NULL, NULL);
291}
292
293asmlinkage long sys_clone(struct pt_regs regs)
294{
295 unsigned long clone_flags;
296 unsigned long newsp;
297 int __user *parent_tidptr, *child_tidptr;
298
299 clone_flags = regs.gprs[3];
300 newsp = regs.orig_gpr2;
301 parent_tidptr = (int __user *) regs.gprs[4];
302 child_tidptr = (int __user *) regs.gprs[5];
303 if (!newsp)
304 newsp = regs.gprs[15];
305 return do_fork(clone_flags, newsp, &regs, 0,
306 parent_tidptr, child_tidptr);
307}
308
309/*
310 * This is trivial, and on the face of it looks like it
311 * could equally well be done in user mode.
312 *
313 * Not so, for quite unobvious reasons - register pressure.
314 * In user mode vfork() cannot have a stack frame, and if
315 * done by calling the "clone()" system call directly, you
316 * do not have enough call-clobbered registers to hold all
317 * the information you need.
318 */
319asmlinkage long sys_vfork(struct pt_regs regs)
320{
321 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
322 regs.gprs[15], &regs, 0, NULL, NULL);
323}
324
325/*
326 * sys_execve() executes a new program.
327 */
328asmlinkage long sys_execve(struct pt_regs regs)
329{
330 int error;
331 char * filename;
332
333 filename = getname((char __user *) regs.orig_gpr2);
334 error = PTR_ERR(filename);
335 if (IS_ERR(filename))
336 goto out;
337 error = do_execve(filename, (char __user * __user *) regs.gprs[3],
338 (char __user * __user *) regs.gprs[4], &regs);
339 if (error == 0) {
340 task_lock(current);
341 current->ptrace &= ~PT_DTRACE;
342 task_unlock(current);
343 current->thread.fp_regs.fpc = 0;
344 if (MACHINE_HAS_IEEE)
345 asm volatile("sfpc %0,%0" : : "d" (0));
346 }
347 putname(filename);
348out:
349 return error;
350}
351
352
353/*
354 * fill in the FPU structure for a core dump.
355 */
356int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
357{
358#ifndef CONFIG_ARCH_S390X
359 /*
360 * save fprs to current->thread.fp_regs to merge them with
361 * the emulated registers and then copy the result to the dump.
362 */
363 save_fp_regs(&current->thread.fp_regs);
364 memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
365#else /* CONFIG_ARCH_S390X */
366 save_fp_regs(fpregs);
367#endif /* CONFIG_ARCH_S390X */
368 return 1;
369}
370
371/*
372 * fill in the user structure for a core dump..
373 */
374void dump_thread(struct pt_regs * regs, struct user * dump)
375{
376
377/* changed the size calculations - should hopefully work better. lbt */
378 dump->magic = CMAGIC;
379 dump->start_code = 0;
380 dump->start_stack = regs->gprs[15] & ~(PAGE_SIZE - 1);
381 dump->u_tsize = current->mm->end_code >> PAGE_SHIFT;
382 dump->u_dsize = (current->mm->brk + PAGE_SIZE - 1) >> PAGE_SHIFT;
383 dump->u_dsize -= dump->u_tsize;
384 dump->u_ssize = 0;
385 if (dump->start_stack < TASK_SIZE)
386 dump->u_ssize = (TASK_SIZE - dump->start_stack) >> PAGE_SHIFT;
387 memcpy(&dump->regs, regs, sizeof(s390_regs));
388 dump_fpu (regs, &dump->regs.fp_regs);
389 dump->regs.per_info = current->thread.per_info;
390}
391
392unsigned long get_wchan(struct task_struct *p)
393{
394 struct stack_frame *sf, *low, *high;
395 unsigned long return_address;
396 int count;
397
398 if (!p || p == current || p->state == TASK_RUNNING || !p->thread_info)
399 return 0;
400 low = (struct stack_frame *) p->thread_info;
401 high = (struct stack_frame *)
402 ((unsigned long) p->thread_info + THREAD_SIZE) - 1;
403 sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
404 if (sf <= low || sf > high)
405 return 0;
406 for (count = 0; count < 16; count++) {
407 sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
408 if (sf <= low || sf > high)
409 return 0;
410 return_address = sf->gprs[8] & PSW_ADDR_INSN;
411 if (!in_sched_functions(return_address))
412 return return_address;
413 }
414 return 0;
415}
416
diff --git a/arch/s390/kernel/profile.c b/arch/s390/kernel/profile.c
new file mode 100644
index 000000000000..7ba777eec1a8
--- /dev/null
+++ b/arch/s390/kernel/profile.c
@@ -0,0 +1,20 @@
1/*
2 * arch/s390/kernel/profile.c
3 *
4 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
6 *
7 */
8#include <linux/proc_fs.h>
9#include <linux/profile.h>
10
11static struct proc_dir_entry * root_irq_dir;
12
13void init_irq_proc(void)
14{
15 /* create /proc/irq */
16 root_irq_dir = proc_mkdir("irq", 0);
17
18 /* create /proc/irq/prof_cpu_mask */
19 create_prof_cpu_mask(root_irq_dir);
20}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
new file mode 100644
index 000000000000..647233c02fc8
--- /dev/null
+++ b/arch/s390/kernel/ptrace.c
@@ -0,0 +1,738 @@
1/*
2 * arch/s390/kernel/ptrace.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Based on PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Derived from "arch/m68k/kernel/ptrace.c"
13 * Copyright (C) 1994 by Hamish Macdonald
14 * Taken from linux/kernel/ptrace.c and modified for M680x0.
15 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
16 *
17 * Modified by Cort Dougan (cort@cs.nmt.edu)
18 *
19 *
20 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file README.legal in the main directory of
22 * this archive for more details.
23 */
24
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/mm.h>
28#include <linux/smp.h>
29#include <linux/smp_lock.h>
30#include <linux/errno.h>
31#include <linux/ptrace.h>
32#include <linux/user.h>
33#include <linux/security.h>
34#include <linux/audit.h>
35
36#include <asm/segment.h>
37#include <asm/page.h>
38#include <asm/pgtable.h>
39#include <asm/pgalloc.h>
40#include <asm/system.h>
41#include <asm/uaccess.h>
42
43#ifdef CONFIG_S390_SUPPORT
44#include "compat_ptrace.h"
45#endif
46
47static void
48FixPerRegisters(struct task_struct *task)
49{
50 struct pt_regs *regs;
51 per_struct *per_info;
52
53 regs = __KSTK_PTREGS(task);
54 per_info = (per_struct *) &task->thread.per_info;
55 per_info->control_regs.bits.em_instruction_fetch =
56 per_info->single_step | per_info->instruction_fetch;
57
58 if (per_info->single_step) {
59 per_info->control_regs.bits.starting_addr = 0;
60#ifdef CONFIG_S390_SUPPORT
61 if (test_thread_flag(TIF_31BIT))
62 per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
63 else
64#endif
65 per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
66 } else {
67 per_info->control_regs.bits.starting_addr =
68 per_info->starting_addr;
69 per_info->control_regs.bits.ending_addr =
70 per_info->ending_addr;
71 }
72 /*
73 * if any of the control reg tracing bits are on
74 * we switch on per in the psw
75 */
76 if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
77 regs->psw.mask |= PSW_MASK_PER;
78 else
79 regs->psw.mask &= ~PSW_MASK_PER;
80
81 if (per_info->control_regs.bits.em_storage_alteration)
82 per_info->control_regs.bits.storage_alt_space_ctl = 1;
83 else
84 per_info->control_regs.bits.storage_alt_space_ctl = 0;
85}
86
87void
88set_single_step(struct task_struct *task)
89{
90 task->thread.per_info.single_step = 1;
91 FixPerRegisters(task);
92}
93
94void
95clear_single_step(struct task_struct *task)
96{
97 task->thread.per_info.single_step = 0;
98 FixPerRegisters(task);
99}
100
101/*
102 * Called by kernel/ptrace.c when detaching..
103 *
104 * Make sure single step bits etc are not set.
105 */
106void
107ptrace_disable(struct task_struct *child)
108{
109 /* make sure the single step bit is not set. */
110 clear_single_step(child);
111}
112
113#ifndef CONFIG_ARCH_S390X
114# define __ADDR_MASK 3
115#else
116# define __ADDR_MASK 7
117#endif
118
119/*
120 * Read the word at offset addr from the user area of a process. The
121 * trouble here is that the information is littered over different
122 * locations. The process registers are found on the kernel stack,
123 * the floating point stuff and the trace settings are stored in
124 * the task structure. In addition the different structures in
125 * struct user contain pad bytes that should be read as zeroes.
126 * Lovely...
127 */
128static int
129peek_user(struct task_struct *child, addr_t addr, addr_t data)
130{
131 struct user *dummy = NULL;
132 addr_t offset, tmp;
133
134 /*
135 * Stupid gdb peeks/pokes the access registers in 64 bit with
136 * an alignment of 4. Programmers from hell...
137 */
138 if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK)
139 return -EIO;
140
141 if (addr < (addr_t) &dummy->regs.acrs) {
142 /*
143 * psw and gprs are stored on the stack
144 */
145 tmp = *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr);
146 if (addr == (addr_t) &dummy->regs.psw.mask)
147 /* Remove per bit from user psw. */
148 tmp &= ~PSW_MASK_PER;
149
150 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
151 /*
152 * access registers are stored in the thread structure
153 */
154 offset = addr - (addr_t) &dummy->regs.acrs;
155 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
156
157 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
158 /*
159 * orig_gpr2 is stored on the kernel stack
160 */
161 tmp = (addr_t) __KSTK_PTREGS(child)->orig_gpr2;
162
163 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
164 /*
165 * floating point regs. are stored in the thread structure
166 */
167 offset = addr - (addr_t) &dummy->regs.fp_regs;
168 tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
169
170 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
171 /*
172 * per_info is found in the thread structure
173 */
174 offset = addr - (addr_t) &dummy->regs.per_info;
175 tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
176
177 } else
178 tmp = 0;
179
180 return put_user(tmp, (addr_t __user *) data);
181}
182
183/*
184 * Write a word to the user area of a process at location addr. This
185 * operation does have an additional problem compared to peek_user.
186 * Stores to the program status word and on the floating point
187 * control register needs to get checked for validity.
188 */
189static int
190poke_user(struct task_struct *child, addr_t addr, addr_t data)
191{
192 struct user *dummy = NULL;
193 addr_t offset;
194
195 /*
196 * Stupid gdb peeks/pokes the access registers in 64 bit with
197 * an alignment of 4. Programmers from hell indeed...
198 */
199 if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK)
200 return -EIO;
201
202 if (addr < (addr_t) &dummy->regs.acrs) {
203 /*
204 * psw and gprs are stored on the stack
205 */
206 if (addr == (addr_t) &dummy->regs.psw.mask &&
207#ifdef CONFIG_S390_SUPPORT
208 data != PSW_MASK_MERGE(PSW_USER32_BITS, data) &&
209#endif
210 data != PSW_MASK_MERGE(PSW_USER_BITS, data))
211 /* Invalid psw mask. */
212 return -EINVAL;
213#ifndef CONFIG_ARCH_S390X
214 if (addr == (addr_t) &dummy->regs.psw.addr)
215 /* I'd like to reject addresses without the
216 high order bit but older gdb's rely on it */
217 data |= PSW_ADDR_AMODE;
218#endif
219 *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr) = data;
220
221 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
222 /*
223 * access registers are stored in the thread structure
224 */
225 offset = addr - (addr_t) &dummy->regs.acrs;
226 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
227
228 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
229 /*
230 * orig_gpr2 is stored on the kernel stack
231 */
232 __KSTK_PTREGS(child)->orig_gpr2 = data;
233
234 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
235 /*
236 * floating point regs. are stored in the thread structure
237 */
238 if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
239 (data & ~FPC_VALID_MASK) != 0)
240 return -EINVAL;
241 offset = addr - (addr_t) &dummy->regs.fp_regs;
242 *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
243
244 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
245 /*
246 * per_info is found in the thread structure
247 */
248 offset = addr - (addr_t) &dummy->regs.per_info;
249 *(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
250
251 }
252
253 FixPerRegisters(child);
254 return 0;
255}
256
257static int
258do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
259{
260 unsigned long tmp;
261 ptrace_area parea;
262 int copied, ret;
263
264 switch (request) {
265 case PTRACE_PEEKTEXT:
266 case PTRACE_PEEKDATA:
267 /* Remove high order bit from address (only for 31 bit). */
268 addr &= PSW_ADDR_INSN;
269 /* read word at location addr. */
270 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
271 if (copied != sizeof(tmp))
272 return -EIO;
273 return put_user(tmp, (unsigned long __user *) data);
274
275 case PTRACE_PEEKUSR:
276 /* read the word at location addr in the USER area. */
277 return peek_user(child, addr, data);
278
279 case PTRACE_POKETEXT:
280 case PTRACE_POKEDATA:
281 /* Remove high order bit from address (only for 31 bit). */
282 addr &= PSW_ADDR_INSN;
283 /* write the word at location addr. */
284 copied = access_process_vm(child, addr, &data, sizeof(data),1);
285 if (copied != sizeof(data))
286 return -EIO;
287 return 0;
288
289 case PTRACE_POKEUSR:
290 /* write the word at location addr in the USER area */
291 return poke_user(child, addr, data);
292
293 case PTRACE_PEEKUSR_AREA:
294 case PTRACE_POKEUSR_AREA:
295 if (copy_from_user(&parea, (void __user *) addr,
296 sizeof(parea)))
297 return -EFAULT;
298 addr = parea.kernel_addr;
299 data = parea.process_addr;
300 copied = 0;
301 while (copied < parea.len) {
302 if (request == PTRACE_PEEKUSR_AREA)
303 ret = peek_user(child, addr, data);
304 else {
305 addr_t tmp;
306 if (get_user (tmp, (addr_t __user *) data))
307 return -EFAULT;
308 ret = poke_user(child, addr, tmp);
309 }
310 if (ret)
311 return ret;
312 addr += sizeof(unsigned long);
313 data += sizeof(unsigned long);
314 copied += sizeof(unsigned long);
315 }
316 return 0;
317 }
318 return ptrace_request(child, request, addr, data);
319}
320
321#ifdef CONFIG_S390_SUPPORT
322/*
323 * Now the fun part starts... a 31 bit program running in the
324 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
325 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
326 * to handle, the difference to the 64 bit versions of the requests
327 * is that the access is done in multiples of 4 byte instead of
328 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
329 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
330 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
331 * is a 31 bit program too, the content of struct user can be
332 * emulated. A 31 bit program peeking into the struct user of
333 * a 64 bit program is a no-no.
334 */
335
336/*
337 * Same as peek_user but for a 31 bit program.
338 */
339static int
340peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
341{
342 struct user32 *dummy32 = NULL;
343 per_struct32 *dummy_per32 = NULL;
344 addr_t offset;
345 __u32 tmp;
346
347 if (!test_thread_flag(TIF_31BIT) ||
348 (addr & 3) || addr > sizeof(struct user) - 3)
349 return -EIO;
350
351 if (addr < (addr_t) &dummy32->regs.acrs) {
352 /*
353 * psw and gprs are stored on the stack
354 */
355 if (addr == (addr_t) &dummy32->regs.psw.mask) {
356 /* Fake a 31 bit psw mask. */
357 tmp = (__u32)(__KSTK_PTREGS(child)->psw.mask >> 32);
358 tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp);
359 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
360 /* Fake a 31 bit psw address. */
361 tmp = (__u32) __KSTK_PTREGS(child)->psw.addr |
362 PSW32_ADDR_AMODE31;
363 } else {
364 /* gpr 0-15 */
365 tmp = *(__u32 *)((addr_t) &__KSTK_PTREGS(child)->psw +
366 addr*2 + 4);
367 }
368 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
369 /*
370 * access registers are stored in the thread structure
371 */
372 offset = addr - (addr_t) &dummy32->regs.acrs;
373 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
374
375 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
376 /*
377 * orig_gpr2 is stored on the kernel stack
378 */
379 tmp = *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4);
380
381 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
382 /*
383 * floating point regs. are stored in the thread structure
384 */
385 offset = addr - (addr_t) &dummy32->regs.fp_regs;
386 tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
387
388 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
389 /*
390 * per_info is found in the thread structure
391 */
392 offset = addr - (addr_t) &dummy32->regs.per_info;
393 /* This is magic. See per_struct and per_struct32. */
394 if ((offset >= (addr_t) &dummy_per32->control_regs &&
395 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
396 (offset >= (addr_t) &dummy_per32->starting_addr &&
397 offset <= (addr_t) &dummy_per32->ending_addr) ||
398 offset == (addr_t) &dummy_per32->lowcore.words.address)
399 offset = offset*2 + 4;
400 else
401 offset = offset*2;
402 tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
403
404 } else
405 tmp = 0;
406
407 return put_user(tmp, (__u32 __user *) data);
408}
409
410/*
411 * Same as poke_user but for a 31 bit program.
412 */
413static int
414poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
415{
416 struct user32 *dummy32 = NULL;
417 per_struct32 *dummy_per32 = NULL;
418 addr_t offset;
419 __u32 tmp;
420
421 if (!test_thread_flag(TIF_31BIT) ||
422 (addr & 3) || addr > sizeof(struct user32) - 3)
423 return -EIO;
424
425 tmp = (__u32) data;
426
427 if (addr < (addr_t) &dummy32->regs.acrs) {
428 /*
429 * psw, gprs, acrs and orig_gpr2 are stored on the stack
430 */
431 if (addr == (addr_t) &dummy32->regs.psw.mask) {
432 /* Build a 64 bit psw mask from 31 bit mask. */
433 if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp))
434 /* Invalid psw mask. */
435 return -EINVAL;
436 __KSTK_PTREGS(child)->psw.mask =
437 PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32);
438 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
439 /* Build a 64 bit psw address from 31 bit address. */
440 __KSTK_PTREGS(child)->psw.addr =
441 (__u64) tmp & PSW32_ADDR_INSN;
442 } else {
443 /* gpr 0-15 */
444 *(__u32*)((addr_t) &__KSTK_PTREGS(child)->psw
445 + addr*2 + 4) = tmp;
446 }
447 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
448 /*
449 * access registers are stored in the thread structure
450 */
451 offset = addr - (addr_t) &dummy32->regs.acrs;
452 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
453
454 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
455 /*
456 * orig_gpr2 is stored on the kernel stack
457 */
458 *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4) = tmp;
459
460 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
461 /*
462 * floating point regs. are stored in the thread structure
463 */
464 if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
465 (tmp & ~FPC_VALID_MASK) != 0)
466 /* Invalid floating point control. */
467 return -EINVAL;
468 offset = addr - (addr_t) &dummy32->regs.fp_regs;
469 *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
470
471 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
472 /*
473 * per_info is found in the thread structure.
474 */
475 offset = addr - (addr_t) &dummy32->regs.per_info;
476 /*
477 * This is magic. See per_struct and per_struct32.
478 * By incident the offsets in per_struct are exactly
479 * twice the offsets in per_struct32 for all fields.
480 * The 8 byte fields need special handling though,
481 * because the second half (bytes 4-7) is needed and
482 * not the first half.
483 */
484 if ((offset >= (addr_t) &dummy_per32->control_regs &&
485 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
486 (offset >= (addr_t) &dummy_per32->starting_addr &&
487 offset <= (addr_t) &dummy_per32->ending_addr) ||
488 offset == (addr_t) &dummy_per32->lowcore.words.address)
489 offset = offset*2 + 4;
490 else
491 offset = offset*2;
492 *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
493
494 }
495
496 FixPerRegisters(child);
497 return 0;
498}
499
500static int
501do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
502{
503 unsigned int tmp; /* 4 bytes !! */
504 ptrace_area_emu31 parea;
505 int copied, ret;
506
507 switch (request) {
508 case PTRACE_PEEKTEXT:
509 case PTRACE_PEEKDATA:
510 /* read word at location addr. */
511 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
512 if (copied != sizeof(tmp))
513 return -EIO;
514 return put_user(tmp, (unsigned int __user *) data);
515
516 case PTRACE_PEEKUSR:
517 /* read the word at location addr in the USER area. */
518 return peek_user_emu31(child, addr, data);
519
520 case PTRACE_POKETEXT:
521 case PTRACE_POKEDATA:
522 /* write the word at location addr. */
523 tmp = data;
524 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1);
525 if (copied != sizeof(tmp))
526 return -EIO;
527 return 0;
528
529 case PTRACE_POKEUSR:
530 /* write the word at location addr in the USER area */
531 return poke_user_emu31(child, addr, data);
532
533 case PTRACE_PEEKUSR_AREA:
534 case PTRACE_POKEUSR_AREA:
535 if (copy_from_user(&parea, (void __user *) addr,
536 sizeof(parea)))
537 return -EFAULT;
538 addr = parea.kernel_addr;
539 data = parea.process_addr;
540 copied = 0;
541 while (copied < parea.len) {
542 if (request == PTRACE_PEEKUSR_AREA)
543 ret = peek_user_emu31(child, addr, data);
544 else {
545 __u32 tmp;
546 if (get_user (tmp, (__u32 __user *) data))
547 return -EFAULT;
548 ret = poke_user_emu31(child, addr, tmp);
549 }
550 if (ret)
551 return ret;
552 addr += sizeof(unsigned int);
553 data += sizeof(unsigned int);
554 copied += sizeof(unsigned int);
555 }
556 return 0;
557 case PTRACE_GETEVENTMSG:
558 return put_user((__u32) child->ptrace_message,
559 (unsigned int __user *) data);
560 case PTRACE_GETSIGINFO:
561 if (child->last_siginfo == NULL)
562 return -EINVAL;
563 return copy_siginfo_to_user32((compat_siginfo_t __user *) data,
564 child->last_siginfo);
565 case PTRACE_SETSIGINFO:
566 if (child->last_siginfo == NULL)
567 return -EINVAL;
568 return copy_siginfo_from_user32(child->last_siginfo,
569 (compat_siginfo_t __user *) data);
570 }
571 return ptrace_request(child, request, addr, data);
572}
573#endif
574
575#define PT32_IEEE_IP 0x13c
576
577static int
578do_ptrace(struct task_struct *child, long request, long addr, long data)
579{
580 int ret;
581
582 if (request == PTRACE_ATTACH)
583 return ptrace_attach(child);
584
585 /*
586 * Special cases to get/store the ieee instructions pointer.
587 */
588 if (child == current) {
589 if (request == PTRACE_PEEKUSR && addr == PT_IEEE_IP)
590 return peek_user(child, addr, data);
591 if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP)
592 return poke_user(child, addr, data);
593#ifdef CONFIG_S390_SUPPORT
594 if (request == PTRACE_PEEKUSR &&
595 addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
596 return peek_user_emu31(child, addr, data);
597 if (request == PTRACE_POKEUSR &&
598 addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
599 return poke_user_emu31(child, addr, data);
600#endif
601 }
602
603 ret = ptrace_check_attach(child, request == PTRACE_KILL);
604 if (ret < 0)
605 return ret;
606
607 switch (request) {
608 case PTRACE_SYSCALL:
609 /* continue and stop at next (return from) syscall */
610 case PTRACE_CONT:
611 /* restart after signal. */
612 if ((unsigned long) data >= _NSIG)
613 return -EIO;
614 if (request == PTRACE_SYSCALL)
615 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
616 else
617 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
618 child->exit_code = data;
619 /* make sure the single step bit is not set. */
620 clear_single_step(child);
621 wake_up_process(child);
622 return 0;
623
624 case PTRACE_KILL:
625 /*
626 * make the child exit. Best I can do is send it a sigkill.
627 * perhaps it should be put in the status that it wants to
628 * exit.
629 */
630 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
631 return 0;
632 child->exit_code = SIGKILL;
633 /* make sure the single step bit is not set. */
634 clear_single_step(child);
635 wake_up_process(child);
636 return 0;
637
638 case PTRACE_SINGLESTEP:
639 /* set the trap flag. */
640 if ((unsigned long) data >= _NSIG)
641 return -EIO;
642 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
643 child->exit_code = data;
644 if (data)
645 set_tsk_thread_flag(child, TIF_SINGLE_STEP);
646 else
647 set_single_step(child);
648 /* give it a chance to run. */
649 wake_up_process(child);
650 return 0;
651
652 case PTRACE_DETACH:
653 /* detach a process that was attached. */
654 return ptrace_detach(child, data);
655
656
657 /* Do requests that differ for 31/64 bit */
658 default:
659#ifdef CONFIG_S390_SUPPORT
660 if (test_thread_flag(TIF_31BIT))
661 return do_ptrace_emu31(child, request, addr, data);
662#endif
663 return do_ptrace_normal(child, request, addr, data);
664 }
665 /* Not reached. */
666 return -EIO;
667}
668
669asmlinkage long
670sys_ptrace(long request, long pid, long addr, long data)
671{
672 struct task_struct *child;
673 int ret;
674
675 lock_kernel();
676
677 if (request == PTRACE_TRACEME) {
678 /* are we already being traced? */
679 ret = -EPERM;
680 if (current->ptrace & PT_PTRACED)
681 goto out;
682 ret = security_ptrace(current->parent, current);
683 if (ret)
684 goto out;
685 /* set the ptrace bit in the process flags. */
686 current->ptrace |= PT_PTRACED;
687 goto out;
688 }
689
690 ret = -EPERM;
691 if (pid == 1) /* you may not mess with init */
692 goto out;
693
694 ret = -ESRCH;
695 read_lock(&tasklist_lock);
696 child = find_task_by_pid(pid);
697 if (child)
698 get_task_struct(child);
699 read_unlock(&tasklist_lock);
700 if (!child)
701 goto out;
702
703 ret = do_ptrace(child, request, addr, data);
704
705 put_task_struct(child);
706out:
707 unlock_kernel();
708 return ret;
709}
710
711asmlinkage void
712syscall_trace(struct pt_regs *regs, int entryexit)
713{
714 if (unlikely(current->audit_context)) {
715 if (!entryexit)
716 audit_syscall_entry(current, regs->gprs[2],
717 regs->orig_gpr2, regs->gprs[3],
718 regs->gprs[4], regs->gprs[5]);
719 else
720 audit_syscall_exit(current, regs->gprs[2]);
721 }
722 if (!test_thread_flag(TIF_SYSCALL_TRACE))
723 return;
724 if (!(current->ptrace & PT_PTRACED))
725 return;
726 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
727 ? 0x80 : 0));
728
729 /*
730 * this isn't the same as continuing with a signal, but it will do
731 * for normal use. strace only continues with a signal if the
732 * stopping signal is not SIGTRAP. -brl
733 */
734 if (current->exit_code) {
735 send_sig(current->exit_code, current, 1);
736 current->exit_code = 0;
737 }
738}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
new file mode 100644
index 000000000000..658e5ac484f9
--- /dev/null
+++ b/arch/s390/kernel/reipl.S
@@ -0,0 +1,78 @@
1/*
2 * arch/s390/kernel/reipl.S
3 *
4 * S390 version
5 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
7 */
8
9#include <asm/lowcore.h>
10
11 .globl do_reipl
12do_reipl: basr %r13,0
13.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
14.Lpg1: lctl %c6,%c6,.Lall-.Lpg0(%r13)
15 stctl %c0,%c0,.Lctlsave-.Lpg0(%r13)
16 ni .Lctlsave-.Lpg0(%r13),0xef
17 lctl %c0,%c0,.Lctlsave-.Lpg0(%r13)
18 lr %r1,%r2
19 mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
20 stsch .Lschib-.Lpg0(%r13)
21 oi .Lschib+5-.Lpg0(%r13),0x84
22.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
23 msch .Lschib-.Lpg0(%r13)
24 lhi %r0,5
25.Lssch: ssch .Liplorb-.Lpg0(%r13)
26 jz .L001
27 brct %r0,.Lssch
28 bas %r14,.Ldisab-.Lpg0(%r13)
29.L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)
30.Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13)
31.Lcont: c %r1,__LC_SUBCHANNEL_ID
32 jnz .Ltpi
33 clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
34 jnz .Ltpi
35 tsch .Liplirb-.Lpg0(%r13)
36 tm .Liplirb+9-.Lpg0(%r13),0xbf
37 jz .L002
38 bas %r14,.Ldisab-.Lpg0(%r13)
39.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
40 jz .L003
41 bas %r14,.Ldisab-.Lpg0(%r13)
42.L003: spx .Lnull-.Lpg0(%r13)
43 st %r1,__LC_SUBCHANNEL_ID
44 lpsw 0
45 sigp 0,0,0(6)
46.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
47 lpsw .Ldispsw-.Lpg0(%r13)
48 .align 8
49.Lall: .long 0xff000000
50.Lnull: .long 0x00000000
51.Lctlsave: .long 0x00000000
52 .align 8
53.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
54.Lpcnew: .long 0x00080000,0x80000000+.Lecs
55.Lionew: .long 0x00080000,0x80000000+.Lcont
56.Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi
57.Ldispsw: .long 0x000a0000,0x00000000
58.Liplccws: .long 0x02000000,0x60000018
59 .long 0x08000008,0x20000001
60.Liplorb: .long 0x0049504c,0x0040ff80
61 .long 0x00000000+.Liplccws
62.Lschib: .long 0x00000000,0x00000000
63 .long 0x00000000,0x00000000
64 .long 0x00000000,0x00000000
65 .long 0x00000000,0x00000000
66 .long 0x00000000,0x00000000
67 .long 0x00000000,0x00000000
68.Liplirb: .long 0x00000000,0x00000000
69 .long 0x00000000,0x00000000
70 .long 0x00000000,0x00000000
71 .long 0x00000000,0x00000000
72 .long 0x00000000,0x00000000
73 .long 0x00000000,0x00000000
74 .long 0x00000000,0x00000000
75 .long 0x00000000,0x00000000
76
77
78
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
new file mode 100644
index 000000000000..4d090d60f3ef
--- /dev/null
+++ b/arch/s390/kernel/reipl64.S
@@ -0,0 +1,96 @@
1/*
2 * arch/s390/kernel/reipl.S
3 *
4 * S390 version
5 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
7 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
8 */
9
10#include <asm/lowcore.h>
11 .globl do_reipl
12do_reipl: basr %r13,0
13.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
14.Lpg1: lctlg %c6,%c6,.Lall-.Lpg0(%r13)
15 stctg %c0,%c0,.Lctlsave-.Lpg0(%r13)
16 ni .Lctlsave+4-.Lpg0(%r13),0xef
17 lctlg %c0,%c0,.Lctlsave-.Lpg0(%r13)
18 lgr %r1,%r2
19 mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
20 stsch .Lschib-.Lpg0(%r13)
21 oi .Lschib+5-.Lpg0(%r13),0x84
22.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
23 msch .Lschib-.Lpg0(%r13)
24 lghi %r0,5
25.Lssch: ssch .Liplorb-.Lpg0(%r13)
26 jz .L001
27 brct %r0,.Lssch
28 bas %r14,.Ldisab-.Lpg0(%r13)
29.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
30.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
31.Lcont: c %r1,__LC_SUBCHANNEL_ID
32 jnz .Ltpi
33 clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
34 jnz .Ltpi
35 tsch .Liplirb-.Lpg0(%r13)
36 tm .Liplirb+9-.Lpg0(%r13),0xbf
37 jz .L002
38 bas %r14,.Ldisab-.Lpg0(%r13)
39.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
40 jz .L003
41 bas %r14,.Ldisab-.Lpg0(%r13)
42.L003: spx .Lnull-.Lpg0(%r13)
43 st %r1,__LC_SUBCHANNEL_ID
44 lhi %r1,0 # mode 0 = esa
45 slr %r0,%r0 # set cpuid to zero
46 sigp %r1,%r0,0x12 # switch to esa mode
47 lpsw 0
48.Ldisab: sll %r14,1
49 srl %r14,1 # need to kill hi bit to avoid specification exceptions.
50 st %r14,.Ldispsw+12-.Lpg0(%r13)
51 lpswe .Ldispsw-.Lpg0(%r13)
52 .align 8
53.Lall: .quad 0x00000000ff000000
54.Lctlsave: .quad 0x0000000000000000
55.Lnull: .long 0x0000000000000000
56 .align 16
57/*
58 * These addresses have to be 31 bit otherwise
59 * the sigp will throw a specifcation exception
60 * when switching to ESA mode as bit 31 be set
61 * in the ESA psw.
62 * Bit 31 of the addresses has to be 0 for the
63 * 31bit lpswe instruction a fact they appear to have
64 * ommited from the pop.
65 */
66.Lnewpsw: .quad 0x0000000080000000
67 .quad .Lpg1
68.Lpcnew: .quad 0x0000000080000000
69 .quad .Lecs
70.Lionew: .quad 0x0000000080000000
71 .quad .Lcont
72.Lwaitpsw: .quad 0x0202000080000000
73 .quad .Ltpi
74.Ldispsw: .quad 0x0002000080000000
75 .quad 0x0000000000000000
76.Liplccws: .long 0x02000000,0x60000018
77 .long 0x08000008,0x20000001
78.Liplorb: .long 0x0049504c,0x0040ff80
79 .long 0x00000000+.Liplccws
80.Lschib: .long 0x00000000,0x00000000
81 .long 0x00000000,0x00000000
82 .long 0x00000000,0x00000000
83 .long 0x00000000,0x00000000
84 .long 0x00000000,0x00000000
85 .long 0x00000000,0x00000000
86.Liplirb: .long 0x00000000,0x00000000
87 .long 0x00000000,0x00000000
88 .long 0x00000000,0x00000000
89 .long 0x00000000,0x00000000
90 .long 0x00000000,0x00000000
91 .long 0x00000000,0x00000000
92 .long 0x00000000,0x00000000
93 .long 0x00000000,0x00000000
94
95
96
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
new file mode 100644
index 000000000000..3bdd38ec71da
--- /dev/null
+++ b/arch/s390/kernel/s390_ext.c
@@ -0,0 +1,135 @@
1/*
2 * arch/s390/kernel/s390_ext.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/errno.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16
17#include <asm/lowcore.h>
18#include <asm/s390_ext.h>
19#include <asm/irq.h>
20
21/*
22 * Simple hash strategy: index = code & 0xff;
23 * ext_int_hash[index] is the start of the list for all external interrupts
24 * that hash to this index. With the current set of external interrupts
25 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
26 * iucv and 0x2603 pfault) this is always the first element.
27 */
28ext_int_info_t *ext_int_hash[256] = { 0, };
29
30int register_external_interrupt(__u16 code, ext_int_handler_t handler)
31{
32 ext_int_info_t *p;
33 int index;
34
35 p = (ext_int_info_t *) kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
36 if (p == NULL)
37 return -ENOMEM;
38 p->code = code;
39 p->handler = handler;
40 index = code & 0xff;
41 p->next = ext_int_hash[index];
42 ext_int_hash[index] = p;
43 return 0;
44}
45
46int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
47 ext_int_info_t *p)
48{
49 int index;
50
51 if (p == NULL)
52 return -EINVAL;
53 p->code = code;
54 p->handler = handler;
55 index = code & 0xff;
56 p->next = ext_int_hash[index];
57 ext_int_hash[index] = p;
58 return 0;
59}
60
61int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
62{
63 ext_int_info_t *p, *q;
64 int index;
65
66 index = code & 0xff;
67 q = NULL;
68 p = ext_int_hash[index];
69 while (p != NULL) {
70 if (p->code == code && p->handler == handler)
71 break;
72 q = p;
73 p = p->next;
74 }
75 if (p == NULL)
76 return -ENOENT;
77 if (q != NULL)
78 q->next = p->next;
79 else
80 ext_int_hash[index] = p->next;
81 kfree(p);
82 return 0;
83}
84
85int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
86 ext_int_info_t *p)
87{
88 ext_int_info_t *q;
89 int index;
90
91 if (p == NULL || p->code != code || p->handler != handler)
92 return -EINVAL;
93 index = code & 0xff;
94 q = ext_int_hash[index];
95 if (p != q) {
96 while (q != NULL) {
97 if (q->next == p)
98 break;
99 q = q->next;
100 }
101 if (q == NULL)
102 return -ENOENT;
103 q->next = p->next;
104 } else
105 ext_int_hash[index] = p->next;
106 return 0;
107}
108
109void do_extint(struct pt_regs *regs, unsigned short code)
110{
111 ext_int_info_t *p;
112 int index;
113
114 irq_enter();
115 asm volatile ("mc 0,0");
116 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
117 /**
118 * Make sure that the i/o interrupt did not "overtake"
119 * the last HZ timer interrupt.
120 */
121 account_ticks(regs);
122 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
123 index = code & 0xff;
124 for (p = ext_int_hash[index]; p; p = p->next) {
125 if (likely(p->code == code)) {
126 if (likely(p->handler))
127 p->handler(regs, code);
128 }
129 }
130 irq_exit();
131}
132
133EXPORT_SYMBOL(register_external_interrupt);
134EXPORT_SYMBOL(unregister_external_interrupt);
135
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
new file mode 100644
index 000000000000..11fd6d556d8f
--- /dev/null
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -0,0 +1,65 @@
1/*
2 * arch/s390/kernel/s390_ksyms.c
3 *
4 * S390 version
5 */
6#include <linux/config.h>
7#include <linux/highuid.h>
8#include <linux/module.h>
9#include <linux/mm.h>
10#include <linux/smp.h>
11#include <linux/syscalls.h>
12#include <linux/interrupt.h>
13#include <linux/ioctl32.h>
14#include <asm/checksum.h>
15#include <asm/cpcmd.h>
16#include <asm/delay.h>
17#include <asm/pgalloc.h>
18#include <asm/setup.h>
19#ifdef CONFIG_IP_MULTICAST
20#include <net/arp.h>
21#endif
22
23/*
24 * memory management
25 */
26EXPORT_SYMBOL(_oi_bitmap);
27EXPORT_SYMBOL(_ni_bitmap);
28EXPORT_SYMBOL(_zb_findmap);
29EXPORT_SYMBOL(_sb_findmap);
30EXPORT_SYMBOL(__copy_from_user_asm);
31EXPORT_SYMBOL(__copy_to_user_asm);
32EXPORT_SYMBOL(__copy_in_user_asm);
33EXPORT_SYMBOL(__clear_user_asm);
34EXPORT_SYMBOL(__strncpy_from_user_asm);
35EXPORT_SYMBOL(__strnlen_user_asm);
36EXPORT_SYMBOL(diag10);
37EXPORT_SYMBOL(default_storage_key);
38
39/*
40 * semaphore ops
41 */
42EXPORT_SYMBOL(__up);
43EXPORT_SYMBOL(__down);
44EXPORT_SYMBOL(__down_interruptible);
45
46/*
47 * binfmt_elf loader
48 */
49extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs);
50EXPORT_SYMBOL(dump_fpu);
51EXPORT_SYMBOL(overflowuid);
52EXPORT_SYMBOL(overflowgid);
53EXPORT_SYMBOL(empty_zero_page);
54
55/*
56 * misc.
57 */
58EXPORT_SYMBOL(machine_flags);
59EXPORT_SYMBOL(__udelay);
60EXPORT_SYMBOL(kernel_thread);
61EXPORT_SYMBOL(csum_fold);
62EXPORT_SYMBOL(console_mode);
63EXPORT_SYMBOL(console_devno);
64EXPORT_SYMBOL(console_irq);
65EXPORT_SYMBOL(sys_wait4);
diff --git a/arch/s390/kernel/semaphore.c b/arch/s390/kernel/semaphore.c
new file mode 100644
index 000000000000..8dfb690c159f
--- /dev/null
+++ b/arch/s390/kernel/semaphore.c
@@ -0,0 +1,108 @@
1/*
2 * linux/arch/s390/kernel/semaphore.c
3 *
4 * S390 version
5 * Copyright (C) 1998-2000 IBM Corporation
6 * Author(s): Martin Schwidefsky
7 *
8 * Derived from "linux/arch/i386/kernel/semaphore.c
9 * Copyright (C) 1999, Linus Torvalds
10 *
11 */
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15
16#include <asm/semaphore.h>
17
18/*
19 * Atomically update sem->count. Equivalent to:
20 * old_val = sem->count.counter;
21 * new_val = ((old_val >= 0) ? old_val : 0) + incr;
22 * sem->count.counter = new_val;
23 * return old_val;
24 */
25static inline int __sem_update_count(struct semaphore *sem, int incr)
26{
27 int old_val, new_val;
28
29 __asm__ __volatile__(" l %0,0(%3)\n"
30 "0: ltr %1,%0\n"
31 " jhe 1f\n"
32 " lhi %1,0\n"
33 "1: ar %1,%4\n"
34 " cs %0,%1,0(%3)\n"
35 " jl 0b\n"
36 : "=&d" (old_val), "=&d" (new_val),
37 "=m" (sem->count)
38 : "a" (&sem->count), "d" (incr), "m" (sem->count)
39 : "cc" );
40 return old_val;
41}
42
43/*
44 * The inline function up() incremented count but the result
45 * was <= 0. This indicates that some process is waiting on
46 * the semaphore. The semaphore is free and we'll wake the
47 * first sleeping process, so we set count to 1 unless some
48 * other cpu has called up in the meantime in which case
49 * we just increment count by 1.
50 */
51void __up(struct semaphore *sem)
52{
53 __sem_update_count(sem, 1);
54 wake_up(&sem->wait);
55}
56
57/*
58 * The inline function down() decremented count and the result
59 * was < 0. The wait loop will atomically test and update the
60 * semaphore counter following the rules:
61 * count > 0: decrement count, wake up queue and exit.
62 * count <= 0: set count to -1, go to sleep.
63 */
64void __sched __down(struct semaphore * sem)
65{
66 struct task_struct *tsk = current;
67 DECLARE_WAITQUEUE(wait, tsk);
68
69 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
70 add_wait_queue_exclusive(&sem->wait, &wait);
71 while (__sem_update_count(sem, -1) <= 0) {
72 schedule();
73 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
74 }
75 remove_wait_queue(&sem->wait, &wait);
76 __set_task_state(tsk, TASK_RUNNING);
77 wake_up(&sem->wait);
78}
79
80/*
81 * Same as __down() with an additional test for signals.
82 * If a signal is pending the count is updated as follows:
83 * count > 0: wake up queue and exit.
84 * count <= 0: set count to 0, wake up queue and exit.
85 */
86int __sched __down_interruptible(struct semaphore * sem)
87{
88 int retval = 0;
89 struct task_struct *tsk = current;
90 DECLARE_WAITQUEUE(wait, tsk);
91
92 __set_task_state(tsk, TASK_INTERRUPTIBLE);
93 add_wait_queue_exclusive(&sem->wait, &wait);
94 while (__sem_update_count(sem, -1) <= 0) {
95 if (signal_pending(current)) {
96 __sem_update_count(sem, 0);
97 retval = -EINTR;
98 break;
99 }
100 schedule();
101 set_task_state(tsk, TASK_INTERRUPTIBLE);
102 }
103 remove_wait_queue(&sem->wait, &wait);
104 __set_task_state(tsk, TASK_RUNNING);
105 wake_up(&sem->wait);
106 return retval;
107}
108
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
new file mode 100644
index 000000000000..c879c40aa7a5
--- /dev/null
+++ b/arch/s390/kernel/setup.c
@@ -0,0 +1,632 @@
1/*
2 * arch/s390/kernel/setup.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "arch/i386/kernel/setup.c"
10 * Copyright (C) 1995, Linus Torvalds
11 */
12
13/*
14 * This file handles the architecture-dependent parts of initialization
15 */
16
17#include <linux/errno.h>
18#include <linux/module.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/a.out.h>
28#include <linux/tty.h>
29#include <linux/ioport.h>
30#include <linux/delay.h>
31#include <linux/config.h>
32#include <linux/init.h>
33#include <linux/initrd.h>
34#include <linux/bootmem.h>
35#include <linux/root_dev.h>
36#include <linux/console.h>
37#include <linux/seq_file.h>
38#include <linux/kernel_stat.h>
39
40#include <asm/uaccess.h>
41#include <asm/system.h>
42#include <asm/smp.h>
43#include <asm/mmu_context.h>
44#include <asm/cpcmd.h>
45#include <asm/lowcore.h>
46#include <asm/irq.h>
47
48/*
49 * Machine setup..
50 */
51unsigned int console_mode = 0;
52unsigned int console_devno = -1;
53unsigned int console_irq = -1;
54unsigned long memory_size = 0;
55unsigned long machine_flags = 0;
56unsigned int default_storage_key = 0;
57struct {
58 unsigned long addr, size, type;
59} memory_chunk[MEMORY_CHUNKS] = { { 0 } };
60#define CHUNK_READ_WRITE 0
61#define CHUNK_READ_ONLY 1
62volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
63
64/*
65 * Setup options
66 */
67extern int _text,_etext, _edata, _end;
68
69/*
70 * This is set up by the setup-routine at boot-time
71 * for S390 need to find out, what we have to setup
72 * using address 0x10400 ...
73 */
74
75#include <asm/setup.h>
76
77static char command_line[COMMAND_LINE_SIZE] = { 0, };
78
79static struct resource code_resource = {
80 .name = "Kernel code",
81 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
82};
83
84static struct resource data_resource = {
85 .name = "Kernel data",
86 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
87};
88
89/*
90 * cpu_init() initializes state that is per-CPU.
91 */
92void __devinit cpu_init (void)
93{
94 int addr = hard_smp_processor_id();
95
96 /*
97 * Store processor id in lowcore (used e.g. in timer_interrupt)
98 */
99 asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id));
100 S390_lowcore.cpu_data.cpu_addr = addr;
101
102 /*
103 * Force FPU initialization:
104 */
105 clear_thread_flag(TIF_USEDFPU);
106 clear_used_math();
107
108 atomic_inc(&init_mm.mm_count);
109 current->active_mm = &init_mm;
110 if (current->mm)
111 BUG();
112 enter_lazy_tlb(&init_mm, current);
113}
114
115/*
116 * VM halt and poweroff setup routines
117 */
118char vmhalt_cmd[128] = "";
119char vmpoff_cmd[128] = "";
120
121static inline void strncpy_skip_quote(char *dst, char *src, int n)
122{
123 int sx, dx;
124
125 dx = 0;
126 for (sx = 0; src[sx] != 0; sx++) {
127 if (src[sx] == '"') continue;
128 dst[dx++] = src[sx];
129 if (dx >= n) break;
130 }
131}
132
133static int __init vmhalt_setup(char *str)
134{
135 strncpy_skip_quote(vmhalt_cmd, str, 127);
136 vmhalt_cmd[127] = 0;
137 return 1;
138}
139
140__setup("vmhalt=", vmhalt_setup);
141
142static int __init vmpoff_setup(char *str)
143{
144 strncpy_skip_quote(vmpoff_cmd, str, 127);
145 vmpoff_cmd[127] = 0;
146 return 1;
147}
148
149__setup("vmpoff=", vmpoff_setup);
150
151/*
152 * condev= and conmode= setup parameter.
153 */
154
155static int __init condev_setup(char *str)
156{
157 int vdev;
158
159 vdev = simple_strtoul(str, &str, 0);
160 if (vdev >= 0 && vdev < 65536) {
161 console_devno = vdev;
162 console_irq = -1;
163 }
164 return 1;
165}
166
167__setup("condev=", condev_setup);
168
169static int __init conmode_setup(char *str)
170{
171#if defined(CONFIG_SCLP_CONSOLE)
172 if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
173 SET_CONSOLE_SCLP;
174#endif
175#if defined(CONFIG_TN3215_CONSOLE)
176 if (strncmp(str, "3215", 5) == 0)
177 SET_CONSOLE_3215;
178#endif
179#if defined(CONFIG_TN3270_CONSOLE)
180 if (strncmp(str, "3270", 5) == 0)
181 SET_CONSOLE_3270;
182#endif
183 return 1;
184}
185
186__setup("conmode=", conmode_setup);
187
188static void __init conmode_default(void)
189{
190 char query_buffer[1024];
191 char *ptr;
192
193 if (MACHINE_IS_VM) {
194 __cpcmd("QUERY CONSOLE", query_buffer, 1024);
195 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
196 ptr = strstr(query_buffer, "SUBCHANNEL =");
197 console_irq = simple_strtoul(ptr + 13, NULL, 16);
198 __cpcmd("QUERY TERM", query_buffer, 1024);
199 ptr = strstr(query_buffer, "CONMODE");
200 /*
201 * Set the conmode to 3215 so that the device recognition
202 * will set the cu_type of the console to 3215. If the
203 * conmode is 3270 and we don't set it back then both
204 * 3215 and the 3270 driver will try to access the console
205 * device (3215 as console and 3270 as normal tty).
206 */
207 __cpcmd("TERM CONMODE 3215", NULL, 0);
208 if (ptr == NULL) {
209#if defined(CONFIG_SCLP_CONSOLE)
210 SET_CONSOLE_SCLP;
211#endif
212 return;
213 }
214 if (strncmp(ptr + 8, "3270", 4) == 0) {
215#if defined(CONFIG_TN3270_CONSOLE)
216 SET_CONSOLE_3270;
217#elif defined(CONFIG_TN3215_CONSOLE)
218 SET_CONSOLE_3215;
219#elif defined(CONFIG_SCLP_CONSOLE)
220 SET_CONSOLE_SCLP;
221#endif
222 } else if (strncmp(ptr + 8, "3215", 4) == 0) {
223#if defined(CONFIG_TN3215_CONSOLE)
224 SET_CONSOLE_3215;
225#elif defined(CONFIG_TN3270_CONSOLE)
226 SET_CONSOLE_3270;
227#elif defined(CONFIG_SCLP_CONSOLE)
228 SET_CONSOLE_SCLP;
229#endif
230 }
231 } else if (MACHINE_IS_P390) {
232#if defined(CONFIG_TN3215_CONSOLE)
233 SET_CONSOLE_3215;
234#elif defined(CONFIG_TN3270_CONSOLE)
235 SET_CONSOLE_3270;
236#endif
237 } else {
238#if defined(CONFIG_SCLP_CONSOLE)
239 SET_CONSOLE_SCLP;
240#endif
241 }
242}
243
244#ifdef CONFIG_SMP
245extern void machine_restart_smp(char *);
246extern void machine_halt_smp(void);
247extern void machine_power_off_smp(void);
248
249void (*_machine_restart)(char *command) = machine_restart_smp;
250void (*_machine_halt)(void) = machine_halt_smp;
251void (*_machine_power_off)(void) = machine_power_off_smp;
252#else
253/*
254 * Reboot, halt and power_off routines for non SMP.
255 */
256extern void reipl(unsigned long devno);
257static void do_machine_restart_nonsmp(char * __unused)
258{
259 if (MACHINE_IS_VM)
260 cpcmd ("IPL", NULL, 0);
261 else
262 reipl (0x10000 | S390_lowcore.ipl_device);
263}
264
265static void do_machine_halt_nonsmp(void)
266{
267 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
268 cpcmd(vmhalt_cmd, NULL, 0);
269 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
270}
271
272static void do_machine_power_off_nonsmp(void)
273{
274 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
275 cpcmd(vmpoff_cmd, NULL, 0);
276 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
277}
278
279void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
280void (*_machine_halt)(void) = do_machine_halt_nonsmp;
281void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
282#endif
283
284 /*
285 * Reboot, halt and power_off stubs. They just call _machine_restart,
286 * _machine_halt or _machine_power_off.
287 */
288
289void machine_restart(char *command)
290{
291 console_unblank();
292 _machine_restart(command);
293}
294
295EXPORT_SYMBOL(machine_restart);
296
297void machine_halt(void)
298{
299 console_unblank();
300 _machine_halt();
301}
302
303EXPORT_SYMBOL(machine_halt);
304
305void machine_power_off(void)
306{
307 console_unblank();
308 _machine_power_off();
309}
310
311EXPORT_SYMBOL(machine_power_off);
312
313/*
314 * Setup function called from init/main.c just after the banner
315 * was printed.
316 */
317extern char _pstart, _pend, _stext;
318
319void __init setup_arch(char **cmdline_p)
320{
321 unsigned long bootmap_size;
322 unsigned long memory_start, memory_end;
323 char c = ' ', cn, *to = command_line, *from = COMMAND_LINE;
324 unsigned long start_pfn, end_pfn;
325 static unsigned int smptrap=0;
326 unsigned long delay = 0;
327 struct _lowcore *lc;
328 int i;
329
330 if (smptrap)
331 return;
332 smptrap=1;
333
334 /*
335 * print what head.S has found out about the machine
336 */
337#ifndef CONFIG_ARCH_S390X
338 printk((MACHINE_IS_VM) ?
339 "We are running under VM (31 bit mode)\n" :
340 "We are running native (31 bit mode)\n");
341 printk((MACHINE_HAS_IEEE) ?
342 "This machine has an IEEE fpu\n" :
343 "This machine has no IEEE fpu\n");
344#else /* CONFIG_ARCH_S390X */
345 printk((MACHINE_IS_VM) ?
346 "We are running under VM (64 bit mode)\n" :
347 "We are running native (64 bit mode)\n");
348#endif /* CONFIG_ARCH_S390X */
349
350 ROOT_DEV = Root_RAM0;
351 memory_start = (unsigned long) &_end; /* fixit if use $CODELO etc*/
352#ifndef CONFIG_ARCH_S390X
353 memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */
354 /*
355 * We need some free virtual space to be able to do vmalloc.
356 * On a machine with 2GB memory we make sure that we have at
357 * least 128 MB free space for vmalloc.
358 */
359 if (memory_end > 1920*1024*1024)
360 memory_end = 1920*1024*1024;
361#else /* CONFIG_ARCH_S390X */
362 memory_end = memory_size & ~0x200000UL; /* detected in head.s */
363#endif /* CONFIG_ARCH_S390X */
364 init_mm.start_code = PAGE_OFFSET;
365 init_mm.end_code = (unsigned long) &_etext;
366 init_mm.end_data = (unsigned long) &_edata;
367 init_mm.brk = (unsigned long) &_end;
368
369 code_resource.start = (unsigned long) &_text;
370 code_resource.end = (unsigned long) &_etext - 1;
371 data_resource.start = (unsigned long) &_etext;
372 data_resource.end = (unsigned long) &_edata - 1;
373
374 /* Save unparsed command line copy for /proc/cmdline */
375 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
376 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
377
378 for (;;) {
379 /*
380 * "mem=XXX[kKmM]" sets memsize
381 */
382 if (c == ' ' && strncmp(from, "mem=", 4) == 0) {
383 memory_end = simple_strtoul(from+4, &from, 0);
384 if ( *from == 'K' || *from == 'k' ) {
385 memory_end = memory_end << 10;
386 from++;
387 } else if ( *from == 'M' || *from == 'm' ) {
388 memory_end = memory_end << 20;
389 from++;
390 }
391 }
392 /*
393 * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
394 */
395 if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) {
396 delay = simple_strtoul(from+9, &from, 0);
397 if (*from == 's' || *from == 'S') {
398 delay = delay*1000000;
399 from++;
400 } else if (*from == 'm' || *from == 'M') {
401 delay = delay*60*1000000;
402 from++;
403 }
404 /* now wait for the requested amount of time */
405 udelay(delay);
406 }
407 cn = *(from++);
408 if (!cn)
409 break;
410 if (cn == '\n')
411 cn = ' '; /* replace newlines with space */
412 if (cn == 0x0d)
413 cn = ' '; /* replace 0x0d with space */
414 if (cn == ' ' && c == ' ')
415 continue; /* remove additional spaces */
416 c = cn;
417 if (to - command_line >= COMMAND_LINE_SIZE)
418 break;
419 *(to++) = c;
420 }
421 if (c == ' ' && to > command_line) to--;
422 *to = '\0';
423 *cmdline_p = command_line;
424
425 /*
426 * partially used pages are not usable - thus
427 * we are rounding upwards:
428 */
429 start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT;
430 end_pfn = max_pfn = memory_end >> PAGE_SHIFT;
431
432 /*
433 * Initialize the boot-time allocator (with low memory only):
434 */
435 bootmap_size = init_bootmem(start_pfn, end_pfn);
436
437 /*
438 * Register RAM areas with the bootmem allocator.
439 */
440 for (i = 0; i < 16 && memory_chunk[i].size > 0; i++) {
441 unsigned long start_chunk, end_chunk;
442
443 if (memory_chunk[i].type != CHUNK_READ_WRITE)
444 continue;
445 start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1);
446 start_chunk >>= PAGE_SHIFT;
447 end_chunk = (memory_chunk[i].addr + memory_chunk[i].size);
448 end_chunk >>= PAGE_SHIFT;
449 if (start_chunk < start_pfn)
450 start_chunk = start_pfn;
451 if (end_chunk > end_pfn)
452 end_chunk = end_pfn;
453 if (start_chunk < end_chunk)
454 free_bootmem(start_chunk << PAGE_SHIFT,
455 (end_chunk - start_chunk) << PAGE_SHIFT);
456 }
457
458 /*
459 * Reserve the bootmem bitmap itself as well. We do this in two
460 * steps (first step was init_bootmem()) because this catches
461 * the (very unlikely) case of us accidentally initializing the
462 * bootmem allocator with an invalid RAM area.
463 */
464 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
465
466#ifdef CONFIG_BLK_DEV_INITRD
467 if (INITRD_START) {
468 if (INITRD_START + INITRD_SIZE <= memory_end) {
469 reserve_bootmem(INITRD_START, INITRD_SIZE);
470 initrd_start = INITRD_START;
471 initrd_end = initrd_start + INITRD_SIZE;
472 } else {
473 printk("initrd extends beyond end of memory "
474 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
475 initrd_start + INITRD_SIZE, memory_end);
476 initrd_start = initrd_end = 0;
477 }
478 }
479#endif
480
481 for (i = 0; i < 16 && memory_chunk[i].size > 0; i++) {
482 struct resource *res;
483
484 res = alloc_bootmem_low(sizeof(struct resource));
485 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
486
487 switch (memory_chunk[i].type) {
488 case CHUNK_READ_WRITE:
489 res->name = "System RAM";
490 break;
491 case CHUNK_READ_ONLY:
492 res->name = "System ROM";
493 res->flags |= IORESOURCE_READONLY;
494 break;
495 default:
496 res->name = "reserved";
497 }
498 res->start = memory_chunk[i].addr;
499 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
500 request_resource(&iomem_resource, res);
501 request_resource(res, &code_resource);
502 request_resource(res, &data_resource);
503 }
504
505 /*
506 * Setup lowcore for boot cpu
507 */
508#ifndef CONFIG_ARCH_S390X
509 lc = (struct _lowcore *) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
510 memset(lc, 0, PAGE_SIZE);
511#else /* CONFIG_ARCH_S390X */
512 lc = (struct _lowcore *) __alloc_bootmem(2*PAGE_SIZE, 2*PAGE_SIZE, 0);
513 memset(lc, 0, 2*PAGE_SIZE);
514#endif /* CONFIG_ARCH_S390X */
515 lc->restart_psw.mask = PSW_BASE_BITS;
516 lc->restart_psw.addr =
517 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
518 lc->external_new_psw.mask = PSW_KERNEL_BITS;
519 lc->external_new_psw.addr =
520 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
521 lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
522 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
523 lc->program_new_psw.mask = PSW_KERNEL_BITS;
524 lc->program_new_psw.addr =
525 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
526 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
527 lc->mcck_new_psw.addr =
528 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
529 lc->io_new_psw.mask = PSW_KERNEL_BITS;
530 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
531 lc->ipl_device = S390_lowcore.ipl_device;
532 lc->jiffy_timer = -1LL;
533 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
534 lc->async_stack = (unsigned long)
535 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
536#ifdef CONFIG_CHECK_STACK
537 lc->panic_stack = (unsigned long)
538 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
539#endif
540 lc->current_task = (unsigned long) init_thread_union.thread_info.task;
541 lc->thread_info = (unsigned long) &init_thread_union;
542#ifdef CONFIG_ARCH_S390X
543 if (MACHINE_HAS_DIAG44)
544 lc->diag44_opcode = 0x83000044;
545 else
546 lc->diag44_opcode = 0x07000700;
547#endif /* CONFIG_ARCH_S390X */
548 set_prefix((u32)(unsigned long) lc);
549 cpu_init();
550 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
551
552 /*
553 * Create kernel page tables and switch to virtual addressing.
554 */
555 paging_init();
556
557 /* Setup default console */
558 conmode_default();
559}
560
561void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
562{
563 printk("cpu %d "
564#ifdef CONFIG_SMP
565 "phys_idx=%d "
566#endif
567 "vers=%02X ident=%06X machine=%04X unused=%04X\n",
568 cpuinfo->cpu_nr,
569#ifdef CONFIG_SMP
570 cpuinfo->cpu_addr,
571#endif
572 cpuinfo->cpu_id.version,
573 cpuinfo->cpu_id.ident,
574 cpuinfo->cpu_id.machine,
575 cpuinfo->cpu_id.unused);
576}
577
578/*
579 * show_cpuinfo - Get information on one CPU for use by procfs.
580 */
581
582static int show_cpuinfo(struct seq_file *m, void *v)
583{
584 struct cpuinfo_S390 *cpuinfo;
585 unsigned long n = (unsigned long) v - 1;
586
587 if (!n) {
588 seq_printf(m, "vendor_id : IBM/S390\n"
589 "# processors : %i\n"
590 "bogomips per cpu: %lu.%02lu\n",
591 num_online_cpus(), loops_per_jiffy/(500000/HZ),
592 (loops_per_jiffy/(5000/HZ))%100);
593 }
594 if (cpu_online(n)) {
595#ifdef CONFIG_SMP
596 if (smp_processor_id() == n)
597 cpuinfo = &S390_lowcore.cpu_data;
598 else
599 cpuinfo = &lowcore_ptr[n]->cpu_data;
600#else
601 cpuinfo = &S390_lowcore.cpu_data;
602#endif
603 seq_printf(m, "processor %li: "
604 "version = %02X, "
605 "identification = %06X, "
606 "machine = %04X\n",
607 n, cpuinfo->cpu_id.version,
608 cpuinfo->cpu_id.ident,
609 cpuinfo->cpu_id.machine);
610 }
611 return 0;
612}
613
614static void *c_start(struct seq_file *m, loff_t *pos)
615{
616 return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
617}
618static void *c_next(struct seq_file *m, void *v, loff_t *pos)
619{
620 ++*pos;
621 return c_start(m, pos);
622}
623static void c_stop(struct seq_file *m, void *v)
624{
625}
626struct seq_operations cpuinfo_op = {
627 .start = c_start,
628 .next = c_next,
629 .stop = c_stop,
630 .show = show_cpuinfo,
631};
632
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
new file mode 100644
index 000000000000..610c1d03e975
--- /dev/null
+++ b/arch/s390/kernel/signal.c
@@ -0,0 +1,527 @@
1/*
2 * arch/s390/kernel/signal.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
7 *
8 * Based on Intel version
9 *
10 * Copyright (C) 1991, 1992 Linus Torvalds
11 *
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
13 */
14
15#include <linux/config.h>
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/kernel.h>
21#include <linux/signal.h>
22#include <linux/errno.h>
23#include <linux/wait.h>
24#include <linux/ptrace.h>
25#include <linux/unistd.h>
26#include <linux/stddef.h>
27#include <linux/tty.h>
28#include <linux/personality.h>
29#include <linux/binfmts.h>
30#include <asm/ucontext.h>
31#include <asm/uaccess.h>
32#include <asm/lowcore.h>
33
34#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
35
36
37typedef struct
38{
39 __u8 callee_used_stack[__SIGNAL_FRAMESIZE];
40 struct sigcontext sc;
41 _sigregs sregs;
42 int signo;
43 __u8 retcode[S390_SYSCALL_SIZE];
44} sigframe;
45
46typedef struct
47{
48 __u8 callee_used_stack[__SIGNAL_FRAMESIZE];
49 __u8 retcode[S390_SYSCALL_SIZE];
50 struct siginfo info;
51 struct ucontext uc;
52} rt_sigframe;
53
54int do_signal(struct pt_regs *regs, sigset_t *oldset);
55
56/*
57 * Atomically swap in the new signal mask, and wait for a signal.
58 */
59asmlinkage int
60sys_sigsuspend(struct pt_regs * regs, int history0, int history1,
61 old_sigset_t mask)
62{
63 sigset_t saveset;
64
65 mask &= _BLOCKABLE;
66 spin_lock_irq(&current->sighand->siglock);
67 saveset = current->blocked;
68 siginitset(&current->blocked, mask);
69 recalc_sigpending();
70 spin_unlock_irq(&current->sighand->siglock);
71 regs->gprs[2] = -EINTR;
72
73 while (1) {
74 set_current_state(TASK_INTERRUPTIBLE);
75 schedule();
76 if (do_signal(regs, &saveset))
77 return -EINTR;
78 }
79}
80
81asmlinkage long
82sys_rt_sigsuspend(struct pt_regs *regs, sigset_t __user *unewset,
83 size_t sigsetsize)
84{
85 sigset_t saveset, newset;
86
87 /* XXX: Don't preclude handling different sized sigset_t's. */
88 if (sigsetsize != sizeof(sigset_t))
89 return -EINVAL;
90
91 if (copy_from_user(&newset, unewset, sizeof(newset)))
92 return -EFAULT;
93 sigdelsetmask(&newset, ~_BLOCKABLE);
94
95 spin_lock_irq(&current->sighand->siglock);
96 saveset = current->blocked;
97 current->blocked = newset;
98 recalc_sigpending();
99 spin_unlock_irq(&current->sighand->siglock);
100 regs->gprs[2] = -EINTR;
101
102 while (1) {
103 set_current_state(TASK_INTERRUPTIBLE);
104 schedule();
105 if (do_signal(regs, &saveset))
106 return -EINTR;
107 }
108}
109
110asmlinkage long
111sys_sigaction(int sig, const struct old_sigaction __user *act,
112 struct old_sigaction __user *oact)
113{
114 struct k_sigaction new_ka, old_ka;
115 int ret;
116
117 if (act) {
118 old_sigset_t mask;
119 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
120 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
121 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
122 return -EFAULT;
123 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
124 __get_user(mask, &act->sa_mask);
125 siginitset(&new_ka.sa.sa_mask, mask);
126 }
127
128 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
129
130 if (!ret && oact) {
131 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
132 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
133 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
134 return -EFAULT;
135 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
136 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
137 }
138
139 return ret;
140}
141
142asmlinkage long
143sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
144 struct pt_regs *regs)
145{
146 return do_sigaltstack(uss, uoss, regs->gprs[15]);
147}
148
149
150
151/* Returns non-zero on fault. */
152static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
153{
154 unsigned long old_mask = regs->psw.mask;
155 int err;
156
157 save_access_regs(current->thread.acrs);
158
159 /* Copy a 'clean' PSW mask to the user to avoid leaking
160 information about whether PER is currently on. */
161 regs->psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask);
162 err = __copy_to_user(&sregs->regs.psw, &regs->psw,
163 sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs));
164 regs->psw.mask = old_mask;
165 if (err != 0)
166 return err;
167 err = __copy_to_user(&sregs->regs.acrs, current->thread.acrs,
168 sizeof(sregs->regs.acrs));
169 if (err != 0)
170 return err;
171 /*
172 * We have to store the fp registers to current->thread.fp_regs
173 * to merge them with the emulated registers.
174 */
175 save_fp_regs(&current->thread.fp_regs);
176 return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs,
177 sizeof(s390_fp_regs));
178}
179
180/* Returns positive number on error */
181static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
182{
183 unsigned long old_mask = regs->psw.mask;
184 int err;
185
186 /* Alwys make any pending restarted system call return -EINTR */
187 current_thread_info()->restart_block.fn = do_no_restart_syscall;
188
189 err = __copy_from_user(&regs->psw, &sregs->regs.psw,
190 sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs));
191 regs->psw.mask = PSW_MASK_MERGE(old_mask, regs->psw.mask);
192 regs->psw.addr |= PSW_ADDR_AMODE;
193 if (err)
194 return err;
195 err = __copy_from_user(&current->thread.acrs, &sregs->regs.acrs,
196 sizeof(sregs->regs.acrs));
197 if (err)
198 return err;
199 restore_access_regs(current->thread.acrs);
200
201 err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs,
202 sizeof(s390_fp_regs));
203 current->thread.fp_regs.fpc &= FPC_VALID_MASK;
204 if (err)
205 return err;
206
207 restore_fp_regs(&current->thread.fp_regs);
208 regs->trap = -1; /* disable syscall checks */
209 return 0;
210}
211
212asmlinkage long sys_sigreturn(struct pt_regs *regs)
213{
214 sigframe __user *frame = (sigframe __user *)regs->gprs[15];
215 sigset_t set;
216
217 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
218 goto badframe;
219 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
220 goto badframe;
221
222 sigdelsetmask(&set, ~_BLOCKABLE);
223 spin_lock_irq(&current->sighand->siglock);
224 current->blocked = set;
225 recalc_sigpending();
226 spin_unlock_irq(&current->sighand->siglock);
227
228 if (restore_sigregs(regs, &frame->sregs))
229 goto badframe;
230
231 return regs->gprs[2];
232
233badframe:
234 force_sig(SIGSEGV, current);
235 return 0;
236}
237
238asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
239{
240 rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
241 sigset_t set;
242
243 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
244 goto badframe;
245 if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
246 goto badframe;
247
248 sigdelsetmask(&set, ~_BLOCKABLE);
249 spin_lock_irq(&current->sighand->siglock);
250 current->blocked = set;
251 recalc_sigpending();
252 spin_unlock_irq(&current->sighand->siglock);
253
254 if (restore_sigregs(regs, &frame->uc.uc_mcontext))
255 goto badframe;
256
257 /* It is more difficult to avoid calling this function than to
258 call it and ignore errors. */
259 do_sigaltstack(&frame->uc.uc_stack, NULL, regs->gprs[15]);
260 return regs->gprs[2];
261
262badframe:
263 force_sig(SIGSEGV, current);
264 return 0;
265}
266
267/*
268 * Set up a signal frame.
269 */
270
271
272/*
273 * Determine which stack to use..
274 */
275static inline void __user *
276get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
277{
278 unsigned long sp;
279
280 /* Default to using normal stack */
281 sp = regs->gprs[15];
282
283 /* This is the X/Open sanctioned signal stack switching. */
284 if (ka->sa.sa_flags & SA_ONSTACK) {
285 if (! sas_ss_flags(sp))
286 sp = current->sas_ss_sp + current->sas_ss_size;
287 }
288
289 /* This is the legacy signal stack switching. */
290 else if (!user_mode(regs) &&
291 !(ka->sa.sa_flags & SA_RESTORER) &&
292 ka->sa.sa_restorer) {
293 sp = (unsigned long) ka->sa.sa_restorer;
294 }
295
296 return (void __user *)((sp - frame_size) & -8ul);
297}
298
299static inline int map_signal(int sig)
300{
301 if (current_thread_info()->exec_domain
302 && current_thread_info()->exec_domain->signal_invmap
303 && sig < 32)
304 return current_thread_info()->exec_domain->signal_invmap[sig];
305 else
306 return sig;
307}
308
309static void setup_frame(int sig, struct k_sigaction *ka,
310 sigset_t *set, struct pt_regs * regs)
311{
312 sigframe __user *frame;
313
314 frame = get_sigframe(ka, regs, sizeof(sigframe));
315 if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe)))
316 goto give_sigsegv;
317
318 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE))
319 goto give_sigsegv;
320
321 if (save_sigregs(regs, &frame->sregs))
322 goto give_sigsegv;
323 if (__put_user(&frame->sregs, &frame->sc.sregs))
324 goto give_sigsegv;
325
326 /* Set up to return from userspace. If provided, use a stub
327 already in userspace. */
328 if (ka->sa.sa_flags & SA_RESTORER) {
329 regs->gprs[14] = (unsigned long)
330 ka->sa.sa_restorer | PSW_ADDR_AMODE;
331 } else {
332 regs->gprs[14] = (unsigned long)
333 frame->retcode | PSW_ADDR_AMODE;
334 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
335 (u16 __user *)(frame->retcode)))
336 goto give_sigsegv;
337 }
338
339 /* Set up backchain. */
340 if (__put_user(regs->gprs[15], (addr_t __user *) frame))
341 goto give_sigsegv;
342
343 /* Set up registers for signal handler */
344 regs->gprs[15] = (unsigned long) frame;
345 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
346
347 regs->gprs[2] = map_signal(sig);
348 regs->gprs[3] = (unsigned long) &frame->sc;
349
350 /* We forgot to include these in the sigcontext.
351 To avoid breaking binary compatibility, they are passed as args. */
352 regs->gprs[4] = current->thread.trap_no;
353 regs->gprs[5] = current->thread.prot_addr;
354
355 /* Place signal number on stack to allow backtrace from handler. */
356 if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
357 goto give_sigsegv;
358 return;
359
360give_sigsegv:
361 force_sigsegv(sig, current);
362}
363
364static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
365 sigset_t *set, struct pt_regs * regs)
366{
367 int err = 0;
368 rt_sigframe __user *frame;
369
370 frame = get_sigframe(ka, regs, sizeof(rt_sigframe));
371 if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe)))
372 goto give_sigsegv;
373
374 if (copy_siginfo_to_user(&frame->info, info))
375 goto give_sigsegv;
376
377 /* Create the ucontext. */
378 err |= __put_user(0, &frame->uc.uc_flags);
379 err |= __put_user(0, &frame->uc.uc_link);
380 err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
381 err |= __put_user(sas_ss_flags(regs->gprs[15]),
382 &frame->uc.uc_stack.ss_flags);
383 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
384 err |= save_sigregs(regs, &frame->uc.uc_mcontext);
385 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
386 if (err)
387 goto give_sigsegv;
388
389 /* Set up to return from userspace. If provided, use a stub
390 already in userspace. */
391 if (ka->sa.sa_flags & SA_RESTORER) {
392 regs->gprs[14] = (unsigned long)
393 ka->sa.sa_restorer | PSW_ADDR_AMODE;
394 } else {
395 regs->gprs[14] = (unsigned long)
396 frame->retcode | PSW_ADDR_AMODE;
397 err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
398 (u16 __user *)(frame->retcode));
399 }
400
401 /* Set up backchain. */
402 if (__put_user(regs->gprs[15], (addr_t __user *) frame))
403 goto give_sigsegv;
404
405 /* Set up registers for signal handler */
406 regs->gprs[15] = (unsigned long) frame;
407 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
408
409 regs->gprs[2] = map_signal(sig);
410 regs->gprs[3] = (unsigned long) &frame->info;
411 regs->gprs[4] = (unsigned long) &frame->uc;
412 return;
413
414give_sigsegv:
415 force_sigsegv(sig, current);
416}
417
418/*
419 * OK, we're invoking a handler
420 */
421
422static void
423handle_signal(unsigned long sig, struct k_sigaction *ka,
424 siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
425{
426 /* Set up the stack frame */
427 if (ka->sa.sa_flags & SA_SIGINFO)
428 setup_rt_frame(sig, ka, info, oldset, regs);
429 else
430 setup_frame(sig, ka, oldset, regs);
431
432 if (!(ka->sa.sa_flags & SA_NODEFER)) {
433 spin_lock_irq(&current->sighand->siglock);
434 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
435 sigaddset(&current->blocked,sig);
436 recalc_sigpending();
437 spin_unlock_irq(&current->sighand->siglock);
438 }
439}
440
441/*
442 * Note that 'init' is a special process: it doesn't get signals it doesn't
443 * want to handle. Thus you cannot kill init even with a SIGKILL even by
444 * mistake.
445 *
446 * Note that we go through the signals twice: once to check the signals that
447 * the kernel can handle, and then we build all the user-level signal handling
448 * stack-frames in one go after that.
449 */
450int do_signal(struct pt_regs *regs, sigset_t *oldset)
451{
452 unsigned long retval = 0, continue_addr = 0, restart_addr = 0;
453 siginfo_t info;
454 int signr;
455 struct k_sigaction ka;
456
457 /*
458 * We want the common case to go fast, which
459 * is why we may in certain cases get here from
460 * kernel mode. Just return without doing anything
461 * if so.
462 */
463 if (!user_mode(regs))
464 return 1;
465
466 if (!oldset)
467 oldset = &current->blocked;
468
469 /* Are we from a system call? */
470 if (regs->trap == __LC_SVC_OLD_PSW) {
471 continue_addr = regs->psw.addr;
472 restart_addr = continue_addr - regs->ilc;
473 retval = regs->gprs[2];
474
475 /* Prepare for system call restart. We do this here so that a
476 debugger will see the already changed PSW. */
477 if (retval == -ERESTARTNOHAND ||
478 retval == -ERESTARTSYS ||
479 retval == -ERESTARTNOINTR) {
480 regs->gprs[2] = regs->orig_gpr2;
481 regs->psw.addr = restart_addr;
482 } else if (retval == -ERESTART_RESTARTBLOCK) {
483 regs->gprs[2] = -EINTR;
484 }
485 }
486
487 /* Get signal to deliver. When running under ptrace, at this point
488 the debugger may change all our registers ... */
489 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
490
491 /* Depending on the signal settings we may need to revert the
492 decision to restart the system call. */
493 if (signr > 0 && regs->psw.addr == restart_addr) {
494 if (retval == -ERESTARTNOHAND
495 || (retval == -ERESTARTSYS
496 && !(current->sighand->action[signr-1].sa.sa_flags
497 & SA_RESTART))) {
498 regs->gprs[2] = -EINTR;
499 regs->psw.addr = continue_addr;
500 }
501 }
502
503 if (signr > 0) {
504 /* Whee! Actually deliver the signal. */
505#ifdef CONFIG_S390_SUPPORT
506 if (test_thread_flag(TIF_31BIT)) {
507 extern void handle_signal32(unsigned long sig,
508 struct k_sigaction *ka,
509 siginfo_t *info,
510 sigset_t *oldset,
511 struct pt_regs *regs);
512 handle_signal32(signr, &ka, &info, oldset, regs);
513 return 1;
514 }
515#endif
516 handle_signal(signr, &ka, &info, oldset, regs);
517 return 1;
518 }
519
520 /* Restart a different system call. */
521 if (retval == -ERESTART_RESTARTBLOCK
522 && regs->psw.addr == continue_addr) {
523 regs->gprs[2] = __NR_restart_syscall;
524 set_thread_flag(TIF_RESTART_SVC);
525 }
526 return 0;
527}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
new file mode 100644
index 000000000000..fdfcf0488b49
--- /dev/null
+++ b/arch/s390/kernel/smp.c
@@ -0,0 +1,840 @@
1/*
2 * arch/s390/kernel/smp.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 *
10 * based on other smp stuff by
11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
12 * (c) 1998 Ingo Molnar
13 *
14 * We work with logical cpu numbering everywhere we can. The only
15 * functions using the real cpu address (got from STAP) are the sigp
16 * functions. For all other functions we use the identity mapping.
17 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
18 * used e.g. to find the idle task belonging to a logical cpu. Every array
19 * in the kernel is sorted by the logical cpu number and not by the physical
20 * one which is causing all the confusion with __cpu_logical_map and
21 * cpu_number_map in other architectures.
22 */
23
24#include <linux/module.h>
25#include <linux/init.h>
26
27#include <linux/mm.h>
28#include <linux/spinlock.h>
29#include <linux/kernel_stat.h>
30#include <linux/smp_lock.h>
31
32#include <linux/delay.h>
33#include <linux/cache.h>
34#include <linux/interrupt.h>
35#include <linux/cpu.h>
36
37#include <asm/sigp.h>
38#include <asm/pgalloc.h>
39#include <asm/irq.h>
40#include <asm/s390_ext.h>
41#include <asm/cpcmd.h>
42#include <asm/tlbflush.h>
43
44/* prototypes */
45
46extern volatile int __cpu_logical_map[];
47
48/*
49 * An array with a pointer the lowcore of every CPU.
50 */
51
52struct _lowcore *lowcore_ptr[NR_CPUS];
53
54cpumask_t cpu_online_map;
55cpumask_t cpu_possible_map;
56
57static struct task_struct *current_set[NR_CPUS];
58
59EXPORT_SYMBOL(cpu_online_map);
60
61/*
62 * Reboot, halt and power_off routines for SMP.
63 */
64extern char vmhalt_cmd[];
65extern char vmpoff_cmd[];
66
67extern void reipl(unsigned long devno);
68
69static void smp_ext_bitcall(int, ec_bit_sig);
70static void smp_ext_bitcall_others(ec_bit_sig);
71
72/*
73 * Structure and data for smp_call_function(). This is designed to minimise
74 * static memory requirements. It also looks cleaner.
75 */
76static DEFINE_SPINLOCK(call_lock);
77
78struct call_data_struct {
79 void (*func) (void *info);
80 void *info;
81 atomic_t started;
82 atomic_t finished;
83 int wait;
84};
85
86static struct call_data_struct * call_data;
87
88/*
89 * 'Call function' interrupt callback
90 */
91static void do_call_function(void)
92{
93 void (*func) (void *info) = call_data->func;
94 void *info = call_data->info;
95 int wait = call_data->wait;
96
97 atomic_inc(&call_data->started);
98 (*func)(info);
99 if (wait)
100 atomic_inc(&call_data->finished);
101}
102
103/*
104 * this function sends a 'generic call function' IPI to all other CPUs
105 * in the system.
106 */
107
108int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
109 int wait)
110/*
111 * [SUMMARY] Run a function on all other CPUs.
112 * <func> The function to run. This must be fast and non-blocking.
113 * <info> An arbitrary pointer to pass to the function.
114 * <nonatomic> currently unused.
115 * <wait> If true, wait (atomically) until function has completed on other CPUs.
116 * [RETURNS] 0 on success, else a negative status code. Does not return until
117 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
118 *
119 * You must not call this function with disabled interrupts or from a
120 * hardware interrupt handler or from a bottom half handler.
121 */
122{
123 struct call_data_struct data;
124 int cpus = num_online_cpus()-1;
125
126 if (cpus <= 0)
127 return 0;
128
129 /* Can deadlock when called with interrupts disabled */
130 WARN_ON(irqs_disabled());
131
132 data.func = func;
133 data.info = info;
134 atomic_set(&data.started, 0);
135 data.wait = wait;
136 if (wait)
137 atomic_set(&data.finished, 0);
138
139 spin_lock(&call_lock);
140 call_data = &data;
141 /* Send a message to all other CPUs and wait for them to respond */
142 smp_ext_bitcall_others(ec_call_function);
143
144 /* Wait for response */
145 while (atomic_read(&data.started) != cpus)
146 cpu_relax();
147
148 if (wait)
149 while (atomic_read(&data.finished) != cpus)
150 cpu_relax();
151 spin_unlock(&call_lock);
152
153 return 0;
154}
155
156/*
157 * Call a function on one CPU
158 * cpu : the CPU the function should be executed on
159 *
160 * You must not call this function with disabled interrupts or from a
161 * hardware interrupt handler. You may call it from a bottom half.
162 *
163 * It is guaranteed that the called function runs on the specified CPU,
164 * preemption is disabled.
165 */
166int smp_call_function_on(void (*func) (void *info), void *info,
167 int nonatomic, int wait, int cpu)
168{
169 struct call_data_struct data;
170 int curr_cpu;
171
172 if (!cpu_online(cpu))
173 return -EINVAL;
174
175 /* disable preemption for local function call */
176 curr_cpu = get_cpu();
177
178 if (curr_cpu == cpu) {
179 /* direct call to function */
180 func(info);
181 put_cpu();
182 return 0;
183 }
184
185 data.func = func;
186 data.info = info;
187 atomic_set(&data.started, 0);
188 data.wait = wait;
189 if (wait)
190 atomic_set(&data.finished, 0);
191
192 spin_lock_bh(&call_lock);
193 call_data = &data;
194 smp_ext_bitcall(cpu, ec_call_function);
195
196 /* Wait for response */
197 while (atomic_read(&data.started) != 1)
198 cpu_relax();
199
200 if (wait)
201 while (atomic_read(&data.finished) != 1)
202 cpu_relax();
203
204 spin_unlock_bh(&call_lock);
205 put_cpu();
206 return 0;
207}
208EXPORT_SYMBOL(smp_call_function_on);
209
210static inline void do_send_stop(void)
211{
212 int cpu, rc;
213
214 /* stop all processors */
215 for_each_online_cpu(cpu) {
216 if (cpu == smp_processor_id())
217 continue;
218 do {
219 rc = signal_processor(cpu, sigp_stop);
220 } while (rc == sigp_busy);
221 }
222}
223
224static inline void do_store_status(void)
225{
226 int cpu, rc;
227
228 /* store status of all processors in their lowcores (real 0) */
229 for_each_online_cpu(cpu) {
230 if (cpu == smp_processor_id())
231 continue;
232 do {
233 rc = signal_processor_p(
234 (__u32)(unsigned long) lowcore_ptr[cpu], cpu,
235 sigp_store_status_at_address);
236 } while(rc == sigp_busy);
237 }
238}
239
240/*
241 * this function sends a 'stop' sigp to all other CPUs in the system.
242 * it goes straight through.
243 */
244void smp_send_stop(void)
245{
246 /* write magic number to zero page (absolute 0) */
247 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
248
249 /* stop other processors. */
250 do_send_stop();
251
252 /* store status of other processors. */
253 do_store_status();
254}
255
256/*
257 * Reboot, halt and power_off routines for SMP.
258 */
259
260static void do_machine_restart(void * __unused)
261{
262 int cpu;
263 static atomic_t cpuid = ATOMIC_INIT(-1);
264
265 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
266 signal_processor(smp_processor_id(), sigp_stop);
267
268 /* Wait for all other cpus to enter stopped state */
269 for_each_online_cpu(cpu) {
270 if (cpu == smp_processor_id())
271 continue;
272 while(!smp_cpu_not_running(cpu))
273 cpu_relax();
274 }
275
276 /* Store status of other cpus. */
277 do_store_status();
278
279 /*
280 * Finally call reipl. Because we waited for all other
281 * cpus to enter this function we know that they do
282 * not hold any s390irq-locks (the cpus have been
283 * interrupted by an external interrupt and s390irq
284 * locks are always held disabled).
285 */
286 if (MACHINE_IS_VM)
287 cpcmd ("IPL", NULL, 0);
288 else
289 reipl (0x10000 | S390_lowcore.ipl_device);
290}
291
292void machine_restart_smp(char * __unused)
293{
294 on_each_cpu(do_machine_restart, NULL, 0, 0);
295}
296
297static void do_wait_for_stop(void)
298{
299 unsigned long cr[16];
300
301 __ctl_store(cr, 0, 15);
302 cr[0] &= ~0xffff;
303 cr[6] = 0;
304 __ctl_load(cr, 0, 15);
305 for (;;)
306 enabled_wait();
307}
308
309static void do_machine_halt(void * __unused)
310{
311 static atomic_t cpuid = ATOMIC_INIT(-1);
312
313 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
314 smp_send_stop();
315 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
316 cpcmd(vmhalt_cmd, NULL, 0);
317 signal_processor(smp_processor_id(),
318 sigp_stop_and_store_status);
319 }
320 do_wait_for_stop();
321}
322
323void machine_halt_smp(void)
324{
325 on_each_cpu(do_machine_halt, NULL, 0, 0);
326}
327
328static void do_machine_power_off(void * __unused)
329{
330 static atomic_t cpuid = ATOMIC_INIT(-1);
331
332 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
333 smp_send_stop();
334 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
335 cpcmd(vmpoff_cmd, NULL, 0);
336 signal_processor(smp_processor_id(),
337 sigp_stop_and_store_status);
338 }
339 do_wait_for_stop();
340}
341
342void machine_power_off_smp(void)
343{
344 on_each_cpu(do_machine_power_off, NULL, 0, 0);
345}
346
347/*
348 * This is the main routine where commands issued by other
349 * cpus are handled.
350 */
351
352void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
353{
354 unsigned long bits;
355
356 /*
357 * handle bit signal external calls
358 *
359 * For the ec_schedule signal we have to do nothing. All the work
360 * is done automatically when we return from the interrupt.
361 */
362 bits = xchg(&S390_lowcore.ext_call_fast, 0);
363
364 if (test_bit(ec_call_function, &bits))
365 do_call_function();
366}
367
368/*
369 * Send an external call sigp to another cpu and return without waiting
370 * for its completion.
371 */
372static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
373{
374 /*
375 * Set signaling bit in lowcore of target cpu and kick it
376 */
377 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
378 while(signal_processor(cpu, sigp_external_call) == sigp_busy)
379 udelay(10);
380}
381
382/*
383 * Send an external call sigp to every other cpu in the system and
384 * return without waiting for its completion.
385 */
386static void smp_ext_bitcall_others(ec_bit_sig sig)
387{
388 int cpu;
389
390 for_each_online_cpu(cpu) {
391 if (cpu == smp_processor_id())
392 continue;
393 /*
394 * Set signaling bit in lowcore of target cpu and kick it
395 */
396 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
397 while (signal_processor(cpu, sigp_external_call) == sigp_busy)
398 udelay(10);
399 }
400}
401
402#ifndef CONFIG_ARCH_S390X
403/*
404 * this function sends a 'purge tlb' signal to another CPU.
405 */
406void smp_ptlb_callback(void *info)
407{
408 local_flush_tlb();
409}
410
411void smp_ptlb_all(void)
412{
413 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
414}
415EXPORT_SYMBOL(smp_ptlb_all);
416#endif /* ! CONFIG_ARCH_S390X */
417
418/*
419 * this function sends a 'reschedule' IPI to another CPU.
420 * it goes straight through and wastes no time serializing
421 * anything. Worst case is that we lose a reschedule ...
422 */
423void smp_send_reschedule(int cpu)
424{
425 smp_ext_bitcall(cpu, ec_schedule);
426}
427
428/*
429 * parameter area for the set/clear control bit callbacks
430 */
431typedef struct
432{
433 __u16 start_ctl;
434 __u16 end_ctl;
435 unsigned long orvals[16];
436 unsigned long andvals[16];
437} ec_creg_mask_parms;
438
439/*
440 * callback for setting/clearing control bits
441 */
442void smp_ctl_bit_callback(void *info) {
443 ec_creg_mask_parms *pp;
444 unsigned long cregs[16];
445 int i;
446
447 pp = (ec_creg_mask_parms *) info;
448 __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
449 for (i = pp->start_ctl; i <= pp->end_ctl; i++)
450 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
451 __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
452}
453
454/*
455 * Set a bit in a control register of all cpus
456 */
457void smp_ctl_set_bit(int cr, int bit) {
458 ec_creg_mask_parms parms;
459
460 parms.start_ctl = cr;
461 parms.end_ctl = cr;
462 parms.orvals[cr] = 1 << bit;
463 parms.andvals[cr] = -1L;
464 preempt_disable();
465 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
466 __ctl_set_bit(cr, bit);
467 preempt_enable();
468}
469
470/*
471 * Clear a bit in a control register of all cpus
472 */
473void smp_ctl_clear_bit(int cr, int bit) {
474 ec_creg_mask_parms parms;
475
476 parms.start_ctl = cr;
477 parms.end_ctl = cr;
478 parms.orvals[cr] = 0;
479 parms.andvals[cr] = ~(1L << bit);
480 preempt_disable();
481 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
482 __ctl_clear_bit(cr, bit);
483 preempt_enable();
484}
485
486/*
487 * Lets check how many CPUs we have.
488 */
489
490void
491__init smp_check_cpus(unsigned int max_cpus)
492{
493 int cpu, num_cpus;
494 __u16 boot_cpu_addr;
495
496 /*
497 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
498 */
499
500 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
501 current_thread_info()->cpu = 0;
502 num_cpus = 1;
503 for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) {
504 if ((__u16) cpu == boot_cpu_addr)
505 continue;
506 __cpu_logical_map[num_cpus] = (__u16) cpu;
507 if (signal_processor(num_cpus, sigp_sense) ==
508 sigp_not_operational)
509 continue;
510 cpu_set(num_cpus, cpu_present_map);
511 num_cpus++;
512 }
513
514 for (cpu = 1; cpu < max_cpus; cpu++)
515 cpu_set(cpu, cpu_possible_map);
516
517 printk("Detected %d CPU's\n",(int) num_cpus);
518 printk("Boot cpu address %2X\n", boot_cpu_addr);
519}
520
521/*
522 * Activate a secondary processor.
523 */
524extern void init_cpu_timer(void);
525extern void init_cpu_vtimer(void);
526extern int pfault_init(void);
527extern void pfault_fini(void);
528
529int __devinit start_secondary(void *cpuvoid)
530{
531 /* Setup the cpu */
532 cpu_init();
533 /* init per CPU timer */
534 init_cpu_timer();
535#ifdef CONFIG_VIRT_TIMER
536 init_cpu_vtimer();
537#endif
538#ifdef CONFIG_PFAULT
539 /* Enable pfault pseudo page faults on this cpu. */
540 pfault_init();
541#endif
542 /* Mark this cpu as online */
543 cpu_set(smp_processor_id(), cpu_online_map);
544 /* Switch on interrupts */
545 local_irq_enable();
546 /* Print info about this processor */
547 print_cpu_info(&S390_lowcore.cpu_data);
548 /* cpu_idle will call schedule for us */
549 cpu_idle();
550 return 0;
551}
552
553static void __init smp_create_idle(unsigned int cpu)
554{
555 struct task_struct *p;
556
557 /*
558 * don't care about the psw and regs settings since we'll never
559 * reschedule the forked task.
560 */
561 p = fork_idle(cpu);
562 if (IS_ERR(p))
563 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
564 current_set[cpu] = p;
565}
566
567/* Reserving and releasing of CPUs */
568
569static DEFINE_SPINLOCK(smp_reserve_lock);
570static int smp_cpu_reserved[NR_CPUS];
571
572int
573smp_get_cpu(cpumask_t cpu_mask)
574{
575 unsigned long flags;
576 int cpu;
577
578 spin_lock_irqsave(&smp_reserve_lock, flags);
579 /* Try to find an already reserved cpu. */
580 for_each_cpu_mask(cpu, cpu_mask) {
581 if (smp_cpu_reserved[cpu] != 0) {
582 smp_cpu_reserved[cpu]++;
583 /* Found one. */
584 goto out;
585 }
586 }
587 /* Reserve a new cpu from cpu_mask. */
588 for_each_cpu_mask(cpu, cpu_mask) {
589 if (cpu_online(cpu)) {
590 smp_cpu_reserved[cpu]++;
591 goto out;
592 }
593 }
594 cpu = -ENODEV;
595out:
596 spin_unlock_irqrestore(&smp_reserve_lock, flags);
597 return cpu;
598}
599
600void
601smp_put_cpu(int cpu)
602{
603 unsigned long flags;
604
605 spin_lock_irqsave(&smp_reserve_lock, flags);
606 smp_cpu_reserved[cpu]--;
607 spin_unlock_irqrestore(&smp_reserve_lock, flags);
608}
609
610static inline int
611cpu_stopped(int cpu)
612{
613 __u32 status;
614
615 /* Check for stopped state */
616 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
617 if (status & 0x40)
618 return 1;
619 }
620 return 0;
621}
622
623/* Upping and downing of CPUs */
624
625int
626__cpu_up(unsigned int cpu)
627{
628 struct task_struct *idle;
629 struct _lowcore *cpu_lowcore;
630 struct stack_frame *sf;
631 sigp_ccode ccode;
632 int curr_cpu;
633
634 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
635 __cpu_logical_map[cpu] = (__u16) curr_cpu;
636 if (cpu_stopped(cpu))
637 break;
638 }
639
640 if (!cpu_stopped(cpu))
641 return -ENODEV;
642
643 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
644 cpu, sigp_set_prefix);
645 if (ccode){
646 printk("sigp_set_prefix failed for cpu %d "
647 "with condition code %d\n",
648 (int) cpu, (int) ccode);
649 return -EIO;
650 }
651
652 idle = current_set[cpu];
653 cpu_lowcore = lowcore_ptr[cpu];
654 cpu_lowcore->kernel_stack = (unsigned long)
655 idle->thread_info + (THREAD_SIZE);
656 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
657 - sizeof(struct pt_regs)
658 - sizeof(struct stack_frame));
659 memset(sf, 0, sizeof(struct stack_frame));
660 sf->gprs[9] = (unsigned long) sf;
661 cpu_lowcore->save_area[15] = (unsigned long) sf;
662 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
663 __asm__ __volatile__("stam 0,15,0(%0)"
664 : : "a" (&cpu_lowcore->access_regs_save_area)
665 : "memory");
666 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
667 cpu_lowcore->current_task = (unsigned long) idle;
668 cpu_lowcore->cpu_data.cpu_nr = cpu;
669 eieio();
670 signal_processor(cpu,sigp_restart);
671
672 while (!cpu_online(cpu))
673 cpu_relax();
674 return 0;
675}
676
677int
678__cpu_disable(void)
679{
680 unsigned long flags;
681 ec_creg_mask_parms cr_parms;
682
683 spin_lock_irqsave(&smp_reserve_lock, flags);
684 if (smp_cpu_reserved[smp_processor_id()] != 0) {
685 spin_unlock_irqrestore(&smp_reserve_lock, flags);
686 return -EBUSY;
687 }
688
689#ifdef CONFIG_PFAULT
690 /* Disable pfault pseudo page faults on this cpu. */
691 pfault_fini();
692#endif
693
694 /* disable all external interrupts */
695
696 cr_parms.start_ctl = 0;
697 cr_parms.end_ctl = 0;
698 cr_parms.orvals[0] = 0;
699 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
700 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
701 smp_ctl_bit_callback(&cr_parms);
702
703 /* disable all I/O interrupts */
704
705 cr_parms.start_ctl = 6;
706 cr_parms.end_ctl = 6;
707 cr_parms.orvals[6] = 0;
708 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
709 1<<27 | 1<<26 | 1<<25 | 1<<24);
710 smp_ctl_bit_callback(&cr_parms);
711
712 /* disable most machine checks */
713
714 cr_parms.start_ctl = 14;
715 cr_parms.end_ctl = 14;
716 cr_parms.orvals[14] = 0;
717 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
718 smp_ctl_bit_callback(&cr_parms);
719
720 spin_unlock_irqrestore(&smp_reserve_lock, flags);
721 return 0;
722}
723
724void
725__cpu_die(unsigned int cpu)
726{
727 /* Wait until target cpu is down */
728 while (!smp_cpu_not_running(cpu))
729 cpu_relax();
730 printk("Processor %d spun down\n", cpu);
731}
732
733void
734cpu_die(void)
735{
736 idle_task_exit();
737 signal_processor(smp_processor_id(), sigp_stop);
738 BUG();
739 for(;;);
740}
741
742/*
743 * Cycle through the processors and setup structures.
744 */
745
746void __init smp_prepare_cpus(unsigned int max_cpus)
747{
748 unsigned long stack;
749 unsigned int cpu;
750 int i;
751
752 /* request the 0x1202 external interrupt */
753 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
754 panic("Couldn't request external interrupt 0x1202");
755 smp_check_cpus(max_cpus);
756 memset(lowcore_ptr,0,sizeof(lowcore_ptr));
757 /*
758 * Initialize prefix pages and stacks for all possible cpus
759 */
760 print_cpu_info(&S390_lowcore.cpu_data);
761
762 for(i = 0; i < NR_CPUS; i++) {
763 if (!cpu_possible(i))
764 continue;
765 lowcore_ptr[i] = (struct _lowcore *)
766 __get_free_pages(GFP_KERNEL|GFP_DMA,
767 sizeof(void*) == 8 ? 1 : 0);
768 stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
769 if (lowcore_ptr[i] == NULL || stack == 0ULL)
770 panic("smp_boot_cpus failed to allocate memory\n");
771
772 *(lowcore_ptr[i]) = S390_lowcore;
773 lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);
774#ifdef CONFIG_CHECK_STACK
775 stack = __get_free_pages(GFP_KERNEL,0);
776 if (stack == 0ULL)
777 panic("smp_boot_cpus failed to allocate memory\n");
778 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
779#endif
780 }
781 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
782
783 for_each_cpu(cpu)
784 if (cpu != smp_processor_id())
785 smp_create_idle(cpu);
786}
787
788void __devinit smp_prepare_boot_cpu(void)
789{
790 BUG_ON(smp_processor_id() != 0);
791
792 cpu_set(0, cpu_online_map);
793 cpu_set(0, cpu_present_map);
794 cpu_set(0, cpu_possible_map);
795 S390_lowcore.percpu_offset = __per_cpu_offset[0];
796 current_set[0] = current;
797}
798
799void smp_cpus_done(unsigned int max_cpus)
800{
801 cpu_present_map = cpu_possible_map;
802}
803
804/*
805 * the frequency of the profiling timer can be changed
806 * by writing a multiplier value into /proc/profile.
807 *
808 * usually you want to run this on all CPUs ;)
809 */
810int setup_profiling_timer(unsigned int multiplier)
811{
812 return 0;
813}
814
815static DEFINE_PER_CPU(struct cpu, cpu_devices);
816
817static int __init topology_init(void)
818{
819 int cpu;
820 int ret;
821
822 for_each_cpu(cpu) {
823 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
824 if (ret)
825 printk(KERN_WARNING "topology_init: register_cpu %d "
826 "failed (%d)\n", cpu, ret);
827 }
828 return 0;
829}
830
831subsys_initcall(topology_init);
832
833EXPORT_SYMBOL(cpu_possible_map);
834EXPORT_SYMBOL(lowcore_ptr);
835EXPORT_SYMBOL(smp_ctl_set_bit);
836EXPORT_SYMBOL(smp_ctl_clear_bit);
837EXPORT_SYMBOL(smp_call_function);
838EXPORT_SYMBOL(smp_get_cpu);
839EXPORT_SYMBOL(smp_put_cpu);
840
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
new file mode 100644
index 000000000000..efe6b83b53f7
--- /dev/null
+++ b/arch/s390/kernel/sys_s390.c
@@ -0,0 +1,270 @@
1/*
2 * arch/s390/kernel/sys_s390.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Thomas Spatzier (tspat@de.ibm.com)
8 *
9 * Derived from "arch/i386/kernel/sys_i386.c"
10 *
11 * This file contains various random system calls that
12 * have a non-standard calling sequence on the Linux/s390
13 * platform.
14 */
15
16#include <linux/errno.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/sem.h>
22#include <linux/msg.h>
23#include <linux/shm.h>
24#include <linux/stat.h>
25#include <linux/syscalls.h>
26#include <linux/mman.h>
27#include <linux/file.h>
28#include <linux/utsname.h>
29#ifdef CONFIG_ARCH_S390X
30#include <linux/personality.h>
31#endif /* CONFIG_ARCH_S390X */
32
33#include <asm/uaccess.h>
34#include <asm/ipc.h>
35
36/*
37 * sys_pipe() is the normal C calling standard for creating
38 * a pipe. It's not the way Unix traditionally does this, though.
39 */
40asmlinkage long sys_pipe(unsigned long __user *fildes)
41{
42 int fd[2];
43 int error;
44
45 error = do_pipe(fd);
46 if (!error) {
47 if (copy_to_user(fildes, fd, 2*sizeof(int)))
48 error = -EFAULT;
49 }
50 return error;
51}
52
53/* common code for old and new mmaps */
54static inline long do_mmap2(
55 unsigned long addr, unsigned long len,
56 unsigned long prot, unsigned long flags,
57 unsigned long fd, unsigned long pgoff)
58{
59 long error = -EBADF;
60 struct file * file = NULL;
61
62 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
63 if (!(flags & MAP_ANONYMOUS)) {
64 file = fget(fd);
65 if (!file)
66 goto out;
67 }
68
69 down_write(&current->mm->mmap_sem);
70 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
71 up_write(&current->mm->mmap_sem);
72
73 if (file)
74 fput(file);
75out:
76 return error;
77}
78
79/*
80 * Perform the select(nd, in, out, ex, tv) and mmap() system
81 * calls. Linux for S/390 isn't able to handle more than 5
82 * system call parameters, so these system calls used a memory
83 * block for parameter passing..
84 */
85
86struct mmap_arg_struct {
87 unsigned long addr;
88 unsigned long len;
89 unsigned long prot;
90 unsigned long flags;
91 unsigned long fd;
92 unsigned long offset;
93};
94
95asmlinkage long sys_mmap2(struct mmap_arg_struct __user *arg)
96{
97 struct mmap_arg_struct a;
98 int error = -EFAULT;
99
100 if (copy_from_user(&a, arg, sizeof(a)))
101 goto out;
102 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
103out:
104 return error;
105}
106
107asmlinkage long old_mmap(struct mmap_arg_struct __user *arg)
108{
109 struct mmap_arg_struct a;
110 long error = -EFAULT;
111
112 if (copy_from_user(&a, arg, sizeof(a)))
113 goto out;
114
115 error = -EINVAL;
116 if (a.offset & ~PAGE_MASK)
117 goto out;
118
119 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
120out:
121 return error;
122}
123
124#ifndef CONFIG_ARCH_S390X
125struct sel_arg_struct {
126 unsigned long n;
127 fd_set *inp, *outp, *exp;
128 struct timeval *tvp;
129};
130
131asmlinkage long old_select(struct sel_arg_struct __user *arg)
132{
133 struct sel_arg_struct a;
134
135 if (copy_from_user(&a, arg, sizeof(a)))
136 return -EFAULT;
137 /* sys_select() does the appropriate kernel locking */
138 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
139
140}
141#endif /* CONFIG_ARCH_S390X */
142
143/*
144 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
145 *
146 * This is really horribly ugly.
147 */
148asmlinkage long sys_ipc(uint call, int first, unsigned long second,
149 unsigned long third, void __user *ptr)
150{
151 struct ipc_kludge tmp;
152 int ret;
153
154 switch (call) {
155 case SEMOP:
156 return sys_semtimedop(first, (struct sembuf __user *)ptr,
157 (unsigned)second, NULL);
158 case SEMTIMEDOP:
159 return sys_semtimedop(first, (struct sembuf __user *)ptr,
160 (unsigned)second,
161 (const struct timespec __user *) third);
162 case SEMGET:
163 return sys_semget(first, (int)second, third);
164 case SEMCTL: {
165 union semun fourth;
166 if (!ptr)
167 return -EINVAL;
168 if (get_user(fourth.__pad, (void __user * __user *) ptr))
169 return -EFAULT;
170 return sys_semctl(first, (int)second, third, fourth);
171 }
172 case MSGSND:
173 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
174 (size_t)second, third);
175 break;
176 case MSGRCV:
177 if (!ptr)
178 return -EINVAL;
179 if (copy_from_user (&tmp, (struct ipc_kludge __user *) ptr,
180 sizeof (struct ipc_kludge)))
181 return -EFAULT;
182 return sys_msgrcv (first, tmp.msgp,
183 (size_t)second, tmp.msgtyp, third);
184 case MSGGET:
185 return sys_msgget((key_t)first, (int)second);
186 case MSGCTL:
187 return sys_msgctl(first, (int)second,
188 (struct msqid_ds __user *)ptr);
189
190 case SHMAT: {
191 ulong raddr;
192 ret = do_shmat(first, (char __user *)ptr,
193 (int)second, &raddr);
194 if (ret)
195 return ret;
196 return put_user (raddr, (ulong __user *) third);
197 break;
198 }
199 case SHMDT:
200 return sys_shmdt ((char __user *)ptr);
201 case SHMGET:
202 return sys_shmget(first, (size_t)second, third);
203 case SHMCTL:
204 return sys_shmctl(first, (int)second,
205 (struct shmid_ds __user *) ptr);
206 default:
207 return -ENOSYS;
208
209 }
210
211 return -EINVAL;
212}
213
214#ifdef CONFIG_ARCH_S390X
215asmlinkage long s390x_newuname(struct new_utsname __user *name)
216{
217 int ret = sys_newuname(name);
218
219 if (current->personality == PER_LINUX32 && !ret) {
220 ret = copy_to_user(name->machine, "s390\0\0\0\0", 8);
221 if (ret) ret = -EFAULT;
222 }
223 return ret;
224}
225
226asmlinkage long s390x_personality(unsigned long personality)
227{
228 int ret;
229
230 if (current->personality == PER_LINUX32 && personality == PER_LINUX)
231 personality = PER_LINUX32;
232 ret = sys_personality(personality);
233 if (ret == PER_LINUX32)
234 ret = PER_LINUX;
235
236 return ret;
237}
238#endif /* CONFIG_ARCH_S390X */
239
240/*
241 * Wrapper function for sys_fadvise64/fadvise64_64
242 */
243#ifndef CONFIG_ARCH_S390X
244
245asmlinkage long
246s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice)
247{
248 return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
249 len, advice);
250}
251
252#endif
253
254struct fadvise64_64_args {
255 int fd;
256 long long offset;
257 long long len;
258 int advice;
259};
260
261asmlinkage long
262s390_fadvise64_64(struct fadvise64_64_args __user *args)
263{
264 struct fadvise64_64_args a;
265
266 if ( copy_from_user(&a, args, sizeof(a)) )
267 return -EFAULT;
268 return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
269}
270
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
new file mode 100644
index 000000000000..515938628f82
--- /dev/null
+++ b/arch/s390/kernel/syscalls.S
@@ -0,0 +1,292 @@
1/*
2 * definitions for sys_call_table, each line represents an
3 * entry in the table in the form
4 * SYSCALL(31 bit syscall, 64 bit syscall, 31 bit emulated syscall)
5 *
6 * this file is meant to be included from entry.S and entry64.S
7 */
8
9#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall)
10
11NI_SYSCALL /* 0 */
12SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper)
13SYSCALL(sys_fork_glue,sys_fork_glue,sys_fork_glue)
14SYSCALL(sys_read,sys_read,sys32_read_wrapper)
15SYSCALL(sys_write,sys_write,sys32_write_wrapper)
16SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */
17SYSCALL(sys_close,sys_close,sys32_close_wrapper)
18SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
19SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
20SYSCALL(sys_link,sys_link,sys32_link_wrapper)
21SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */
22SYSCALL(sys_execve_glue,sys_execve_glue,sys32_execve_glue)
23SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper)
24SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */
25SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper)
26SYSCALL(sys_chmod,sys_chmod,sys32_chmod_wrapper) /* 15 */
27SYSCALL(sys_lchown16,sys_ni_syscall,sys32_lchown16_wrapper) /* old lchown16 syscall*/
28NI_SYSCALL /* old break syscall holder */
29NI_SYSCALL /* old stat syscall holder */
30SYSCALL(sys_lseek,sys_lseek,sys32_lseek_wrapper)
31SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */
32SYSCALL(sys_mount,sys_mount,sys32_mount_wrapper)
33SYSCALL(sys_oldumount,sys_oldumount,sys32_oldumount_wrapper)
34SYSCALL(sys_setuid16,sys_ni_syscall,sys32_setuid16_wrapper) /* old setuid16 syscall*/
35SYSCALL(sys_getuid16,sys_ni_syscall,sys32_getuid16) /* old getuid16 syscall*/
36SYSCALL(sys_stime,sys_ni_syscall,sys32_stime_wrapper) /* 25 old stime syscall */
37SYSCALL(sys_ptrace,sys_ptrace,sys32_ptrace_wrapper)
38SYSCALL(sys_alarm,sys_alarm,sys32_alarm_wrapper)
39NI_SYSCALL /* old fstat syscall */
40SYSCALL(sys_pause,sys_pause,sys32_pause)
41SYSCALL(sys_utime,sys_utime,compat_sys_utime_wrapper) /* 30 */
42NI_SYSCALL /* old stty syscall */
43NI_SYSCALL /* old gtty syscall */
44SYSCALL(sys_access,sys_access,sys32_access_wrapper)
45SYSCALL(sys_nice,sys_nice,sys32_nice_wrapper)
46NI_SYSCALL /* 35 old ftime syscall */
47SYSCALL(sys_sync,sys_sync,sys_sync)
48SYSCALL(sys_kill,sys_kill,sys32_kill_wrapper)
49SYSCALL(sys_rename,sys_rename,sys32_rename_wrapper)
50SYSCALL(sys_mkdir,sys_mkdir,sys32_mkdir_wrapper)
51SYSCALL(sys_rmdir,sys_rmdir,sys32_rmdir_wrapper) /* 40 */
52SYSCALL(sys_dup,sys_dup,sys32_dup_wrapper)
53SYSCALL(sys_pipe,sys_pipe,sys32_pipe_wrapper)
54SYSCALL(sys_times,sys_times,compat_sys_times_wrapper)
55NI_SYSCALL /* old prof syscall */
56SYSCALL(sys_brk,sys_brk,sys32_brk_wrapper) /* 45 */
57SYSCALL(sys_setgid16,sys_ni_syscall,sys32_setgid16_wrapper) /* old setgid16 syscall*/
58SYSCALL(sys_getgid16,sys_ni_syscall,sys32_getgid16) /* old getgid16 syscall*/
59SYSCALL(sys_signal,sys_signal,sys32_signal_wrapper)
60SYSCALL(sys_geteuid16,sys_ni_syscall,sys32_geteuid16) /* old geteuid16 syscall */
61SYSCALL(sys_getegid16,sys_ni_syscall,sys32_getegid16) /* 50 old getegid16 syscall */
62SYSCALL(sys_acct,sys_acct,sys32_acct_wrapper)
63SYSCALL(sys_umount,sys_umount,sys32_umount_wrapper)
64NI_SYSCALL /* old lock syscall */
65SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl_wrapper)
66SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl_wrapper) /* 55 */
67NI_SYSCALL /* intel mpx syscall */
68SYSCALL(sys_setpgid,sys_setpgid,sys32_setpgid_wrapper)
69NI_SYSCALL /* old ulimit syscall */
70NI_SYSCALL /* old uname syscall */
71SYSCALL(sys_umask,sys_umask,sys32_umask_wrapper) /* 60 */
72SYSCALL(sys_chroot,sys_chroot,sys32_chroot_wrapper)
73SYSCALL(sys_ustat,sys_ustat,sys32_ustat_wrapper)
74SYSCALL(sys_dup2,sys_dup2,sys32_dup2_wrapper)
75SYSCALL(sys_getppid,sys_getppid,sys_getppid)
76SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */
77SYSCALL(sys_setsid,sys_setsid,sys_setsid)
78SYSCALL(sys_sigaction,sys_sigaction,sys32_sigaction_wrapper)
79NI_SYSCALL /* old sgetmask syscall*/
80NI_SYSCALL /* old ssetmask syscall*/
81SYSCALL(sys_setreuid16,sys_ni_syscall,sys32_setreuid16_wrapper) /* old setreuid16 syscall */
82SYSCALL(sys_setregid16,sys_ni_syscall,sys32_setregid16_wrapper) /* old setregid16 syscall */
83SYSCALL(sys_sigsuspend_glue,sys_sigsuspend_glue,sys32_sigsuspend_glue)
84SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending_wrapper)
85SYSCALL(sys_sethostname,sys_sethostname,sys32_sethostname_wrapper)
86SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit_wrapper) /* 75 */
87SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit_wrapper)
88SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage_wrapper)
89SYSCALL(sys_gettimeofday,sys_gettimeofday,sys32_gettimeofday_wrapper)
90SYSCALL(sys_settimeofday,sys_settimeofday,sys32_settimeofday_wrapper)
91SYSCALL(sys_getgroups16,sys_ni_syscall,sys32_getgroups16_wrapper) /* 80 old getgroups16 syscall */
92SYSCALL(sys_setgroups16,sys_ni_syscall,sys32_setgroups16_wrapper) /* old setgroups16 syscall */
93NI_SYSCALL /* old select syscall */
94SYSCALL(sys_symlink,sys_symlink,sys32_symlink_wrapper)
95NI_SYSCALL /* old lstat syscall */
96SYSCALL(sys_readlink,sys_readlink,sys32_readlink_wrapper) /* 85 */
97SYSCALL(sys_uselib,sys_uselib,sys32_uselib_wrapper)
98SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper)
99SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
100SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */
101SYSCALL(old_mmap,old_mmap,old32_mmap_wrapper) /* 90 */
102SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
103SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper)
104SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper)
105SYSCALL(sys_fchmod,sys_fchmod,sys32_fchmod_wrapper)
106SYSCALL(sys_fchown16,sys_ni_syscall,sys32_fchown16_wrapper) /* 95 old fchown16 syscall*/
107SYSCALL(sys_getpriority,sys_getpriority,sys32_getpriority_wrapper)
108SYSCALL(sys_setpriority,sys_setpriority,sys32_setpriority_wrapper)
109NI_SYSCALL /* old profil syscall */
110SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs_wrapper)
111SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs_wrapper) /* 100 */
112NI_SYSCALL /* ioperm for i386 */
113SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall_wrapper)
114SYSCALL(sys_syslog,sys_syslog,sys32_syslog_wrapper)
115SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer_wrapper)
116SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer_wrapper) /* 105 */
117SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat_wrapper)
118SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat_wrapper)
119SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat_wrapper)
120NI_SYSCALL /* old uname syscall */
121SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,sys32_lookup_dcookie_wrapper) /* 110 */
122SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup)
123NI_SYSCALL /* old "idle" system call */
124NI_SYSCALL /* vm86old for i386 */
125SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper)
126SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */
127SYSCALL(sys_sysinfo,sys_sysinfo,sys32_sysinfo_wrapper)
128SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper)
129SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
130SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue)
131SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */
132SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
133SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
134NI_SYSCALL /* modify_ldt for i386 */
135SYSCALL(sys_adjtimex,sys_adjtimex,sys32_adjtimex_wrapper)
136SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */
137SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper)
138NI_SYSCALL /* old "create module" */
139SYSCALL(sys_init_module,sys_init_module,sys32_init_module_wrapper)
140SYSCALL(sys_delete_module,sys_delete_module,sys32_delete_module_wrapper)
141NI_SYSCALL /* 130: old get_kernel_syms */
142SYSCALL(sys_quotactl,sys_quotactl,sys32_quotactl_wrapper)
143SYSCALL(sys_getpgid,sys_getpgid,sys32_getpgid_wrapper)
144SYSCALL(sys_fchdir,sys_fchdir,sys32_fchdir_wrapper)
145SYSCALL(sys_bdflush,sys_bdflush,sys32_bdflush_wrapper)
146SYSCALL(sys_sysfs,sys_sysfs,sys32_sysfs_wrapper) /* 135 */
147SYSCALL(sys_personality,s390x_personality,sys32_personality_wrapper)
148NI_SYSCALL /* for afs_syscall */
149SYSCALL(sys_setfsuid16,sys_ni_syscall,sys32_setfsuid16_wrapper) /* old setfsuid16 syscall */
150SYSCALL(sys_setfsgid16,sys_ni_syscall,sys32_setfsgid16_wrapper) /* old setfsgid16 syscall */
151SYSCALL(sys_llseek,sys_llseek,sys32_llseek_wrapper) /* 140 */
152SYSCALL(sys_getdents,sys_getdents,sys32_getdents_wrapper)
153SYSCALL(sys_select,sys_select,compat_sys_select_wrapper)
154SYSCALL(sys_flock,sys_flock,sys32_flock_wrapper)
155SYSCALL(sys_msync,sys_msync,sys32_msync_wrapper)
156SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper) /* 145 */
157SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper)
158SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper)
159SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper)
160SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper)
161SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper) /* 150 */
162SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper)
163SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper)
164SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall)
165SYSCALL(sys_sched_setparam,sys_sched_setparam,sys32_sched_setparam_wrapper)
166SYSCALL(sys_sched_getparam,sys_sched_getparam,sys32_sched_getparam_wrapper) /* 155 */
167SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,sys32_sched_setscheduler_wrapper)
168SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,sys32_sched_getscheduler_wrapper)
169SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield)
170SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,sys32_sched_get_priority_max_wrapper)
171SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,sys32_sched_get_priority_min_wrapper) /* 160 */
172SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,sys32_sched_rr_get_interval_wrapper)
173SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep_wrapper)
174SYSCALL(sys_mremap,sys_mremap,sys32_mremap_wrapper)
175SYSCALL(sys_setresuid16,sys_ni_syscall,sys32_setresuid16_wrapper) /* old setresuid16 syscall */
176SYSCALL(sys_getresuid16,sys_ni_syscall,sys32_getresuid16_wrapper) /* 165 old getresuid16 syscall */
177NI_SYSCALL /* for vm86 */
178NI_SYSCALL /* old sys_query_module */
179SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper)
180SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper)
181SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */
182SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */
183SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
184SYSCALL(sys_rt_sigreturn_glue,sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue)
185SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper)
186SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */
187SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper)
188SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait_wrapper)
189SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,sys32_rt_sigqueueinfo_wrapper)
190SYSCALL(sys_rt_sigsuspend_glue,sys_rt_sigsuspend_glue,sys32_rt_sigsuspend_glue)
191SYSCALL(sys_pread64,sys_pread64,sys32_pread64_wrapper) /* 180 */
192SYSCALL(sys_pwrite64,sys_pwrite64,sys32_pwrite64_wrapper)
193SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall */
194SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
195SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
196SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */
197SYSCALL(sys_sigaltstack_glue,sys_sigaltstack_glue,sys32_sigaltstack_glue)
198SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper)
199NI_SYSCALL /* streams1 */
200NI_SYSCALL /* streams2 */
201SYSCALL(sys_vfork_glue,sys_vfork_glue,sys_vfork_glue) /* 190 */
202SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper)
203SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper)
204SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper)
205SYSCALL(sys_ftruncate64,sys_ni_syscall,sys32_ftruncate64_wrapper)
206SYSCALL(sys_stat64,sys_ni_syscall,sys32_stat64_wrapper) /* 195 */
207SYSCALL(sys_lstat64,sys_ni_syscall,sys32_lstat64_wrapper)
208SYSCALL(sys_fstat64,sys_ni_syscall,sys32_fstat64_wrapper)
209SYSCALL(sys_lchown,sys_lchown,sys32_lchown_wrapper)
210SYSCALL(sys_getuid,sys_getuid,sys_getuid)
211SYSCALL(sys_getgid,sys_getgid,sys_getgid) /* 200 */
212SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid)
213SYSCALL(sys_getegid,sys_getegid,sys_getegid)
214SYSCALL(sys_setreuid,sys_setreuid,sys32_setreuid_wrapper)
215SYSCALL(sys_setregid,sys_setregid,sys32_setregid_wrapper)
216SYSCALL(sys_getgroups,sys_getgroups,sys32_getgroups_wrapper) /* 205 */
217SYSCALL(sys_setgroups,sys_setgroups,sys32_setgroups_wrapper)
218SYSCALL(sys_fchown,sys_fchown,sys32_fchown_wrapper)
219SYSCALL(sys_setresuid,sys_setresuid,sys32_setresuid_wrapper)
220SYSCALL(sys_getresuid,sys_getresuid,sys32_getresuid_wrapper)
221SYSCALL(sys_setresgid,sys_setresgid,sys32_setresgid_wrapper) /* 210 */
222SYSCALL(sys_getresgid,sys_getresgid,sys32_getresgid_wrapper)
223SYSCALL(sys_chown,sys_chown,sys32_chown_wrapper)
224SYSCALL(sys_setuid,sys_setuid,sys32_setuid_wrapper)
225SYSCALL(sys_setgid,sys_setgid,sys32_setgid_wrapper)
226SYSCALL(sys_setfsuid,sys_setfsuid,sys32_setfsuid_wrapper) /* 215 */
227SYSCALL(sys_setfsgid,sys_setfsgid,sys32_setfsgid_wrapper)
228SYSCALL(sys_pivot_root,sys_pivot_root,sys32_pivot_root_wrapper)
229SYSCALL(sys_mincore,sys_mincore,sys32_mincore_wrapper)
230SYSCALL(sys_madvise,sys_madvise,sys32_madvise_wrapper)
231SYSCALL(sys_getdents64,sys_getdents64,sys32_getdents64_wrapper) /* 220 */
232SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64_wrapper)
233SYSCALL(sys_readahead,sys_readahead,sys32_readahead)
234SYSCALL(sys_sendfile64,sys_ni_syscall,sys32_sendfile64)
235SYSCALL(sys_setxattr,sys_setxattr,sys32_setxattr_wrapper)
236SYSCALL(sys_lsetxattr,sys_lsetxattr,sys32_lsetxattr_wrapper) /* 225 */
237SYSCALL(sys_fsetxattr,sys_fsetxattr,sys32_fsetxattr_wrapper)
238SYSCALL(sys_getxattr,sys_getxattr,sys32_getxattr_wrapper)
239SYSCALL(sys_lgetxattr,sys_lgetxattr,sys32_lgetxattr_wrapper)
240SYSCALL(sys_fgetxattr,sys_fgetxattr,sys32_fgetxattr_wrapper)
241SYSCALL(sys_listxattr,sys_listxattr,sys32_listxattr_wrapper) /* 230 */
242SYSCALL(sys_llistxattr,sys_llistxattr,sys32_llistxattr_wrapper)
243SYSCALL(sys_flistxattr,sys_flistxattr,sys32_flistxattr_wrapper)
244SYSCALL(sys_removexattr,sys_removexattr,sys32_removexattr_wrapper)
245SYSCALL(sys_lremovexattr,sys_lremovexattr,sys32_lremovexattr_wrapper)
246SYSCALL(sys_fremovexattr,sys_fremovexattr,sys32_fremovexattr_wrapper) /* 235 */
247SYSCALL(sys_gettid,sys_gettid,sys_gettid)
248SYSCALL(sys_tkill,sys_tkill,sys_tkill)
249SYSCALL(sys_futex,sys_futex,compat_sys_futex_wrapper)
250SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,sys32_sched_setaffinity_wrapper)
251SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,sys32_sched_getaffinity_wrapper) /* 240 */
252SYSCALL(sys_tgkill,sys_tgkill,sys_tgkill)
253NI_SYSCALL /* reserved for TUX */
254SYSCALL(sys_io_setup,sys_io_setup,sys32_io_setup_wrapper)
255SYSCALL(sys_io_destroy,sys_io_destroy,sys32_io_destroy_wrapper)
256SYSCALL(sys_io_getevents,sys_io_getevents,sys32_io_getevents_wrapper) /* 245 */
257SYSCALL(sys_io_submit,sys_io_submit,sys32_io_submit_wrapper)
258SYSCALL(sys_io_cancel,sys_io_cancel,sys32_io_cancel_wrapper)
259SYSCALL(sys_exit_group,sys_exit_group,sys32_exit_group_wrapper)
260SYSCALL(sys_epoll_create,sys_epoll_create,sys_epoll_create_wrapper)
261SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper) /* 250 */
262SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper)
263SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper)
264SYSCALL(s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper)
265SYSCALL(sys_timer_create,sys_timer_create,sys32_timer_create_wrapper)
266SYSCALL(sys_timer_settime,sys_timer_settime,sys32_timer_settime_wrapper) /* 255 */
267SYSCALL(sys_timer_gettime,sys_timer_gettime,sys32_timer_gettime_wrapper)
268SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,sys32_timer_getoverrun_wrapper)
269SYSCALL(sys_timer_delete,sys_timer_delete,sys32_timer_delete_wrapper)
270SYSCALL(sys_clock_settime,sys_clock_settime,sys32_clock_settime_wrapper)
271SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper) /* 260 */
272SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
273SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
274NI_SYSCALL /* reserved for vserver */
275SYSCALL(s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
276SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
277SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
278SYSCALL(sys_remap_file_pages,sys_remap_file_pages,sys32_remap_file_pages_wrapper)
279NI_SYSCALL /* 268 sys_mbind */
280NI_SYSCALL /* 269 sys_get_mempolicy */
281NI_SYSCALL /* 270 sys_set_mempolicy */
282SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open_wrapper)
283SYSCALL(sys_mq_unlink,sys_mq_unlink,sys32_mq_unlink_wrapper)
284SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend_wrapper)
285SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive_wrapper)
286SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify_wrapper) /* 275 */
287SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr_wrapper)
288NI_SYSCALL /* reserved for kexec */
289SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key_wrapper)
290SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key_wrapper)
291SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl) /* 280 */
292SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
new file mode 100644
index 000000000000..061e81138dc2
--- /dev/null
+++ b/arch/s390/kernel/time.c
@@ -0,0 +1,382 @@
1/*
2 * arch/s390/kernel/time.c
3 * Time of day based timer functions.
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Hartmut Penner (hp@de.ibm.com),
8 * Martin Schwidefsky (schwidefsky@de.ibm.com),
9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
10 *
11 * Derived from "arch/i386/kernel/time.c"
12 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
13 */
14
15#include <linux/config.h>
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/param.h>
21#include <linux/string.h>
22#include <linux/mm.h>
23#include <linux/interrupt.h>
24#include <linux/time.h>
25#include <linux/delay.h>
26#include <linux/init.h>
27#include <linux/smp.h>
28#include <linux/types.h>
29#include <linux/profile.h>
30#include <linux/timex.h>
31#include <linux/notifier.h>
32
33#include <asm/uaccess.h>
34#include <asm/delay.h>
35#include <asm/s390_ext.h>
36#include <asm/div64.h>
37#include <asm/irq.h>
38#include <asm/timer.h>
39
40/* change this if you have some constant time drift */
41#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
42#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
43
44/*
45 * Create a small time difference between the timer interrupts
46 * on the different cpus to avoid lock contention.
47 */
48#define CPU_DEVIATION (smp_processor_id() << 12)
49
50#define TICK_SIZE tick
51
52u64 jiffies_64 = INITIAL_JIFFIES;
53
54EXPORT_SYMBOL(jiffies_64);
55
56static ext_int_info_t ext_int_info_cc;
57static u64 init_timer_cc;
58static u64 jiffies_timer_cc;
59static u64 xtime_cc;
60
61extern unsigned long wall_jiffies;
62
63/*
64 * Scheduler clock - returns current time in nanosec units.
65 */
66unsigned long long sched_clock(void)
67{
68 return ((get_clock() - jiffies_timer_cc) * 1000) >> 12;
69}
70
71void tod_to_timeval(__u64 todval, struct timespec *xtime)
72{
73 unsigned long long sec;
74
75 sec = todval >> 12;
76 do_div(sec, 1000000);
77 xtime->tv_sec = sec;
78 todval -= (sec * 1000000) << 12;
79 xtime->tv_nsec = ((todval * 1000) >> 12);
80}
81
82static inline unsigned long do_gettimeoffset(void)
83{
84 __u64 now;
85
86 now = (get_clock() - jiffies_timer_cc) >> 12;
87 /* We require the offset from the latest update of xtime */
88 now -= (__u64) wall_jiffies*USECS_PER_JIFFY;
89 return (unsigned long) now;
90}
91
92/*
93 * This version of gettimeofday has microsecond resolution.
94 */
95void do_gettimeofday(struct timeval *tv)
96{
97 unsigned long flags;
98 unsigned long seq;
99 unsigned long usec, sec;
100
101 do {
102 seq = read_seqbegin_irqsave(&xtime_lock, flags);
103
104 sec = xtime.tv_sec;
105 usec = xtime.tv_nsec / 1000 + do_gettimeoffset();
106 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
107
108 while (usec >= 1000000) {
109 usec -= 1000000;
110 sec++;
111 }
112
113 tv->tv_sec = sec;
114 tv->tv_usec = usec;
115}
116
117EXPORT_SYMBOL(do_gettimeofday);
118
119int do_settimeofday(struct timespec *tv)
120{
121 time_t wtm_sec, sec = tv->tv_sec;
122 long wtm_nsec, nsec = tv->tv_nsec;
123
124 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
125 return -EINVAL;
126
127 write_seqlock_irq(&xtime_lock);
128 /* This is revolting. We need to set the xtime.tv_nsec
129 * correctly. However, the value in this location is
130 * is value at the last tick.
131 * Discover what correction gettimeofday
132 * would have done, and then undo it!
133 */
134 nsec -= do_gettimeoffset() * 1000;
135
136 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
137 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
138
139 set_normalized_timespec(&xtime, sec, nsec);
140 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
141
142 time_adjust = 0; /* stop active adjtime() */
143 time_status |= STA_UNSYNC;
144 time_maxerror = NTP_PHASE_LIMIT;
145 time_esterror = NTP_PHASE_LIMIT;
146 write_sequnlock_irq(&xtime_lock);
147 clock_was_set();
148 return 0;
149}
150
151EXPORT_SYMBOL(do_settimeofday);
152
153
154#ifdef CONFIG_PROFILING
155#define s390_do_profile(regs) profile_tick(CPU_PROFILING, regs)
156#else
157#define s390_do_profile(regs) do { ; } while(0)
158#endif /* CONFIG_PROFILING */
159
160
161/*
162 * timer_interrupt() needs to keep up the real-time clock,
163 * as well as call the "do_timer()" routine every clocktick
164 */
165void account_ticks(struct pt_regs *regs)
166{
167 __u64 tmp;
168 __u32 ticks, xticks;
169
170 /* Calculate how many ticks have passed. */
171 if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) {
172 /*
173 * We have to program the clock comparator even if
174 * no tick has passed. That happens if e.g. an i/o
175 * interrupt wakes up an idle processor that has
176 * switched off its hz timer.
177 */
178 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
179 asm volatile ("SCKC %0" : : "m" (tmp));
180 return;
181 }
182 tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
183 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
184 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
185 S390_lowcore.jiffy_timer +=
186 CLK_TICKS_PER_JIFFY * (__u64) ticks;
187 } else if (tmp >= CLK_TICKS_PER_JIFFY) {
188 ticks = 2;
189 S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
190 } else {
191 ticks = 1;
192 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
193 }
194
195 /* set clock comparator for next tick */
196 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
197 asm volatile ("SCKC %0" : : "m" (tmp));
198
199#ifdef CONFIG_SMP
200 /*
201 * Do not rely on the boot cpu to do the calls to do_timer.
202 * Spread it over all cpus instead.
203 */
204 write_seqlock(&xtime_lock);
205 if (S390_lowcore.jiffy_timer > xtime_cc) {
206 tmp = S390_lowcore.jiffy_timer - xtime_cc;
207 if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
208 xticks = __div(tmp, CLK_TICKS_PER_JIFFY);
209 xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
210 } else {
211 xticks = 1;
212 xtime_cc += CLK_TICKS_PER_JIFFY;
213 }
214 while (xticks--)
215 do_timer(regs);
216 }
217 write_sequnlock(&xtime_lock);
218#else
219 for (xticks = ticks; xticks > 0; xticks--)
220 do_timer(regs);
221#endif
222
223#ifdef CONFIG_VIRT_CPU_ACCOUNTING
224 account_user_vtime(current);
225#else
226 while (ticks--)
227 update_process_times(user_mode(regs));
228#endif
229
230 s390_do_profile(regs);
231}
232
233#ifdef CONFIG_NO_IDLE_HZ
234
235#ifdef CONFIG_NO_IDLE_HZ_INIT
236int sysctl_hz_timer = 0;
237#else
238int sysctl_hz_timer = 1;
239#endif
240
241/*
242 * Stop the HZ tick on the current CPU.
243 * Only cpu_idle may call this function.
244 */
245static inline void stop_hz_timer(void)
246{
247 __u64 timer;
248
249 if (sysctl_hz_timer != 0)
250 return;
251
252 cpu_set(smp_processor_id(), nohz_cpu_mask);
253
254 /*
255 * Leave the clock comparator set up for the next timer
256 * tick if either rcu or a softirq is pending.
257 */
258 if (rcu_pending(smp_processor_id()) || local_softirq_pending()) {
259 cpu_clear(smp_processor_id(), nohz_cpu_mask);
260 return;
261 }
262
263 /*
264 * This cpu is going really idle. Set up the clock comparator
265 * for the next event.
266 */
267 timer = (__u64) (next_timer_interrupt() - jiffies) + jiffies_64;
268 timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
269 asm volatile ("SCKC %0" : : "m" (timer));
270}
271
272/*
273 * Start the HZ tick on the current CPU.
274 * Only cpu_idle may call this function.
275 */
276static inline void start_hz_timer(void)
277{
278 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
279 return;
280 account_ticks(__KSTK_PTREGS(current));
281 cpu_clear(smp_processor_id(), nohz_cpu_mask);
282}
283
284static int nohz_idle_notify(struct notifier_block *self,
285 unsigned long action, void *hcpu)
286{
287 switch (action) {
288 case CPU_IDLE:
289 stop_hz_timer();
290 break;
291 case CPU_NOT_IDLE:
292 start_hz_timer();
293 break;
294 }
295 return NOTIFY_OK;
296}
297
298static struct notifier_block nohz_idle_nb = {
299 .notifier_call = nohz_idle_notify,
300};
301
302void __init nohz_init(void)
303{
304 if (register_idle_notifier(&nohz_idle_nb))
305 panic("Couldn't register idle notifier");
306}
307
308#endif
309
310/*
311 * Start the clock comparator on the current CPU.
312 */
313void init_cpu_timer(void)
314{
315 unsigned long cr0;
316 __u64 timer;
317
318 timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY;
319 S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY;
320 timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION;
321 asm volatile ("SCKC %0" : : "m" (timer));
322 /* allow clock comparator timer interrupt */
323 __ctl_store(cr0, 0, 0);
324 cr0 |= 0x800;
325 __ctl_load(cr0, 0, 0);
326}
327
328extern void vtime_init(void);
329
330/*
331 * Initialize the TOD clock and the CPU timer of
332 * the boot cpu.
333 */
334void __init time_init(void)
335{
336 __u64 set_time_cc;
337 int cc;
338
339 /* kick the TOD clock */
340 asm volatile ("STCK 0(%1)\n\t"
341 "IPM %0\n\t"
342 "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc)
343 : "memory", "cc");
344 switch (cc) {
345 case 0: /* clock in set state: all is fine */
346 break;
347 case 1: /* clock in non-set state: FIXME */
348 printk("time_init: TOD clock in non-set state\n");
349 break;
350 case 2: /* clock in error state: FIXME */
351 printk("time_init: TOD clock in error state\n");
352 break;
353 case 3: /* clock in stopped or not-operational state: FIXME */
354 printk("time_init: TOD clock stopped/non-operational\n");
355 break;
356 }
357 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
358
359 /* set xtime */
360 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
361 set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
362 (0x3c26700LL*1000000*4096);
363 tod_to_timeval(set_time_cc, &xtime);
364 set_normalized_timespec(&wall_to_monotonic,
365 -xtime.tv_sec, -xtime.tv_nsec);
366
367 /* request the clock comparator external interrupt */
368 if (register_early_external_interrupt(0x1004, 0,
369 &ext_int_info_cc) != 0)
370 panic("Couldn't request external interrupt 0x1004");
371
372 init_cpu_timer();
373
374#ifdef CONFIG_NO_IDLE_HZ
375 nohz_init();
376#endif
377
378#ifdef CONFIG_VIRT_TIMER
379 vtime_init();
380#endif
381}
382
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
new file mode 100644
index 000000000000..8b90e9528b91
--- /dev/null
+++ b/arch/s390/kernel/traps.c
@@ -0,0 +1,738 @@
1/*
2 * arch/s390/kernel/traps.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 *
9 * Derived from "arch/i386/kernel/traps.c"
10 * Copyright (C) 1991, 1992 Linus Torvalds
11 */
12
13/*
14 * 'Traps.c' handles hardware traps and faults after we have saved some
15 * state in 'asm.s'.
16 */
17#include <linux/config.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/ptrace.h>
23#include <linux/timer.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/smp_lock.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/delay.h>
30#include <linux/module.h>
31#include <linux/kallsyms.h>
32
33#include <asm/system.h>
34#include <asm/uaccess.h>
35#include <asm/io.h>
36#include <asm/atomic.h>
37#include <asm/mathemu.h>
38#include <asm/cpcmd.h>
39#include <asm/s390_ext.h>
40#include <asm/lowcore.h>
41#include <asm/debug.h>
42
43/* Called from entry.S only */
44extern void handle_per_exception(struct pt_regs *regs);
45
46typedef void pgm_check_handler_t(struct pt_regs *, long);
47pgm_check_handler_t *pgm_check_table[128];
48
49#ifdef CONFIG_SYSCTL
50#ifdef CONFIG_PROCESS_DEBUG
51int sysctl_userprocess_debug = 1;
52#else
53int sysctl_userprocess_debug = 0;
54#endif
55#endif
56
57extern pgm_check_handler_t do_protection_exception;
58extern pgm_check_handler_t do_dat_exception;
59extern pgm_check_handler_t do_pseudo_page_fault;
60#ifdef CONFIG_PFAULT
61extern int pfault_init(void);
62extern void pfault_fini(void);
63extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code);
64static ext_int_info_t ext_int_pfault;
65#endif
66extern pgm_check_handler_t do_monitor_call;
67
68#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
69
70#ifndef CONFIG_ARCH_S390X
71#define FOURLONG "%08lx %08lx %08lx %08lx\n"
72static int kstack_depth_to_print = 12;
73#else /* CONFIG_ARCH_S390X */
74#define FOURLONG "%016lx %016lx %016lx %016lx\n"
75static int kstack_depth_to_print = 20;
76#endif /* CONFIG_ARCH_S390X */
77
78/*
79 * For show_trace we have tree different stack to consider:
80 * - the panic stack which is used if the kernel stack has overflown
81 * - the asynchronous interrupt stack (cpu related)
82 * - the synchronous kernel stack (process related)
83 * The stack trace can start at any of the three stack and can potentially
84 * touch all of them. The order is: panic stack, async stack, sync stack.
85 */
86static unsigned long
87__show_trace(unsigned long sp, unsigned long low, unsigned long high)
88{
89 struct stack_frame *sf;
90 struct pt_regs *regs;
91
92 while (1) {
93 sp = sp & PSW_ADDR_INSN;
94 if (sp < low || sp > high - sizeof(*sf))
95 return sp;
96 sf = (struct stack_frame *) sp;
97 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
98 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
99 /* Follow the backchain. */
100 while (1) {
101 low = sp;
102 sp = sf->back_chain & PSW_ADDR_INSN;
103 if (!sp)
104 break;
105 if (sp <= low || sp > high - sizeof(*sf))
106 return sp;
107 sf = (struct stack_frame *) sp;
108 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
109 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
110 }
111 /* Zero backchain detected, check for interrupt frame. */
112 sp = (unsigned long) (sf + 1);
113 if (sp <= low || sp > high - sizeof(*regs))
114 return sp;
115 regs = (struct pt_regs *) sp;
116 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
117 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
118 low = sp;
119 sp = regs->gprs[15];
120 }
121}
122
123void show_trace(struct task_struct *task, unsigned long * stack)
124{
125 register unsigned long __r15 asm ("15");
126 unsigned long sp;
127
128 sp = (unsigned long) stack;
129 if (!sp)
130 sp = task ? task->thread.ksp : __r15;
131 printk("Call Trace:\n");
132#ifdef CONFIG_CHECK_STACK
133 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
134 S390_lowcore.panic_stack);
135#endif
136 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
137 S390_lowcore.async_stack);
138 if (task)
139 __show_trace(sp, (unsigned long) task->thread_info,
140 (unsigned long) task->thread_info + THREAD_SIZE);
141 else
142 __show_trace(sp, S390_lowcore.thread_info,
143 S390_lowcore.thread_info + THREAD_SIZE);
144 printk("\n");
145}
146
147void show_stack(struct task_struct *task, unsigned long *sp)
148{
149 register unsigned long * __r15 asm ("15");
150 unsigned long *stack;
151 int i;
152
153 // debugging aid: "show_stack(NULL);" prints the
154 // back trace for this cpu.
155
156 if (!sp)
157 sp = task ? (unsigned long *) task->thread.ksp : __r15;
158
159 stack = sp;
160 for (i = 0; i < kstack_depth_to_print; i++) {
161 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
162 break;
163 if (i && ((i * sizeof (long) % 32) == 0))
164 printk("\n ");
165 printk("%p ", (void *)*stack++);
166 }
167 printk("\n");
168 show_trace(task, sp);
169}
170
171/*
172 * The architecture-independent dump_stack generator
173 */
174void dump_stack(void)
175{
176 show_stack(0, 0);
177}
178
179EXPORT_SYMBOL(dump_stack);
180
181void show_registers(struct pt_regs *regs)
182{
183 mm_segment_t old_fs;
184 char *mode;
185 int i;
186
187 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
188 printk("%s PSW : %p %p",
189 mode, (void *) regs->psw.mask,
190 (void *) regs->psw.addr);
191 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
192 printk("%s GPRS: " FOURLONG, mode,
193 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
194 printk(" " FOURLONG,
195 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
196 printk(" " FOURLONG,
197 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
198 printk(" " FOURLONG,
199 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
200
201#if 0
202 /* FIXME: this isn't needed any more but it changes the ksymoops
203 * input. To remove or not to remove ... */
204 save_access_regs(regs->acrs);
205 printk("%s ACRS: %08x %08x %08x %08x\n", mode,
206 regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
207 printk(" %08x %08x %08x %08x\n",
208 regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
209 printk(" %08x %08x %08x %08x\n",
210 regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
211 printk(" %08x %08x %08x %08x\n",
212 regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
213#endif
214
215 /*
216 * Print the first 20 byte of the instruction stream at the
217 * time of the fault.
218 */
219 old_fs = get_fs();
220 if (regs->psw.mask & PSW_MASK_PSTATE)
221 set_fs(USER_DS);
222 else
223 set_fs(KERNEL_DS);
224 printk("%s Code: ", mode);
225 for (i = 0; i < 20; i++) {
226 unsigned char c;
227 if (__get_user(c, (char __user *)(regs->psw.addr + i))) {
228 printk(" Bad PSW.");
229 break;
230 }
231 printk("%02x ", c);
232 }
233 set_fs(old_fs);
234
235 printk("\n");
236}
237
238/* This is called from fs/proc/array.c */
239char *task_show_regs(struct task_struct *task, char *buffer)
240{
241 struct pt_regs *regs;
242
243 regs = __KSTK_PTREGS(task);
244 buffer += sprintf(buffer, "task: %p, ksp: %p\n",
245 task, (void *)task->thread.ksp);
246 buffer += sprintf(buffer, "User PSW : %p %p\n",
247 (void *) regs->psw.mask, (void *)regs->psw.addr);
248
249 buffer += sprintf(buffer, "User GPRS: " FOURLONG,
250 regs->gprs[0], regs->gprs[1],
251 regs->gprs[2], regs->gprs[3]);
252 buffer += sprintf(buffer, " " FOURLONG,
253 regs->gprs[4], regs->gprs[5],
254 regs->gprs[6], regs->gprs[7]);
255 buffer += sprintf(buffer, " " FOURLONG,
256 regs->gprs[8], regs->gprs[9],
257 regs->gprs[10], regs->gprs[11]);
258 buffer += sprintf(buffer, " " FOURLONG,
259 regs->gprs[12], regs->gprs[13],
260 regs->gprs[14], regs->gprs[15]);
261 buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
262 task->thread.acrs[0], task->thread.acrs[1],
263 task->thread.acrs[2], task->thread.acrs[3]);
264 buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
265 task->thread.acrs[4], task->thread.acrs[5],
266 task->thread.acrs[6], task->thread.acrs[7]);
267 buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
268 task->thread.acrs[8], task->thread.acrs[9],
269 task->thread.acrs[10], task->thread.acrs[11]);
270 buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
271 task->thread.acrs[12], task->thread.acrs[13],
272 task->thread.acrs[14], task->thread.acrs[15]);
273 return buffer;
274}
275
276DEFINE_SPINLOCK(die_lock);
277
278void die(const char * str, struct pt_regs * regs, long err)
279{
280 static int die_counter;
281
282 debug_stop_all();
283 console_verbose();
284 spin_lock_irq(&die_lock);
285 bust_spinlocks(1);
286 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
287 show_regs(regs);
288 bust_spinlocks(0);
289 spin_unlock_irq(&die_lock);
290 if (in_interrupt())
291 panic("Fatal exception in interrupt");
292 if (panic_on_oops)
293 panic("Fatal exception: panic_on_oops");
294 do_exit(SIGSEGV);
295}
296
297static void inline
298report_user_fault(long interruption_code, struct pt_regs *regs)
299{
300#if defined(CONFIG_SYSCTL)
301 if (!sysctl_userprocess_debug)
302 return;
303#endif
304#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
305 printk("User process fault: interruption code 0x%lX\n",
306 interruption_code);
307 show_regs(regs);
308#endif
309}
310
311static void inline do_trap(long interruption_code, int signr, char *str,
312 struct pt_regs *regs, siginfo_t *info)
313{
314 /*
315 * We got all needed information from the lowcore and can
316 * now safely switch on interrupts.
317 */
318 if (regs->psw.mask & PSW_MASK_PSTATE)
319 local_irq_enable();
320
321 if (regs->psw.mask & PSW_MASK_PSTATE) {
322 struct task_struct *tsk = current;
323
324 tsk->thread.trap_no = interruption_code & 0xffff;
325 force_sig_info(signr, info, tsk);
326 report_user_fault(interruption_code, regs);
327 } else {
328 const struct exception_table_entry *fixup;
329 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
330 if (fixup)
331 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
332 else
333 die(str, regs, interruption_code);
334 }
335}
336
337static inline void *get_check_address(struct pt_regs *regs)
338{
339 return (void *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
340}
341
342void do_single_step(struct pt_regs *regs)
343{
344 if ((current->ptrace & PT_PTRACED) != 0)
345 force_sig(SIGTRAP, current);
346}
347
348asmlinkage void
349default_trap_handler(struct pt_regs * regs, long interruption_code)
350{
351 if (regs->psw.mask & PSW_MASK_PSTATE) {
352 local_irq_enable();
353 do_exit(SIGSEGV);
354 report_user_fault(interruption_code, regs);
355 } else
356 die("Unknown program exception", regs, interruption_code);
357}
358
359#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
360asmlinkage void name(struct pt_regs * regs, long interruption_code) \
361{ \
362 siginfo_t info; \
363 info.si_signo = signr; \
364 info.si_errno = 0; \
365 info.si_code = sicode; \
366 info.si_addr = (void *)siaddr; \
367 do_trap(interruption_code, signr, str, regs, &info); \
368}
369
370DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
371 ILL_ILLADR, get_check_address(regs))
372DO_ERROR_INFO(SIGILL, "execute exception", execute_exception,
373 ILL_ILLOPN, get_check_address(regs))
374DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception,
375 FPE_INTDIV, get_check_address(regs))
376DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception,
377 FPE_INTOVF, get_check_address(regs))
378DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception,
379 FPE_FLTOVF, get_check_address(regs))
380DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception,
381 FPE_FLTUND, get_check_address(regs))
382DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception,
383 FPE_FLTRES, get_check_address(regs))
384DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception,
385 FPE_FLTDIV, get_check_address(regs))
386DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception,
387 FPE_FLTINV, get_check_address(regs))
388DO_ERROR_INFO(SIGILL, "operand exception", operand_exception,
389 ILL_ILLOPN, get_check_address(regs))
390DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op,
391 ILL_PRVOPC, get_check_address(regs))
392DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception,
393 ILL_ILLOPN, get_check_address(regs))
394DO_ERROR_INFO(SIGILL, "translation exception", translation_exception,
395 ILL_ILLOPN, get_check_address(regs))
396
397static inline void
398do_fp_trap(struct pt_regs *regs, void *location,
399 int fpc, long interruption_code)
400{
401 siginfo_t si;
402
403 si.si_signo = SIGFPE;
404 si.si_errno = 0;
405 si.si_addr = location;
406 si.si_code = 0;
407 /* FPC[2] is Data Exception Code */
408 if ((fpc & 0x00000300) == 0) {
409 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
410 if (fpc & 0x8000) /* invalid fp operation */
411 si.si_code = FPE_FLTINV;
412 else if (fpc & 0x4000) /* div by 0 */
413 si.si_code = FPE_FLTDIV;
414 else if (fpc & 0x2000) /* overflow */
415 si.si_code = FPE_FLTOVF;
416 else if (fpc & 0x1000) /* underflow */
417 si.si_code = FPE_FLTUND;
418 else if (fpc & 0x0800) /* inexact */
419 si.si_code = FPE_FLTRES;
420 }
421 current->thread.ieee_instruction_pointer = (addr_t) location;
422 do_trap(interruption_code, SIGFPE,
423 "floating point exception", regs, &si);
424}
425
426asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
427{
428 siginfo_t info;
429 __u8 opcode[6];
430 __u16 *location;
431 int signal = 0;
432
433 location = (__u16 *) get_check_address(regs);
434
435 /*
436 * We got all needed information from the lowcore and can
437 * now safely switch on interrupts.
438 */
439 if (regs->psw.mask & PSW_MASK_PSTATE)
440 local_irq_enable();
441
442 if (regs->psw.mask & PSW_MASK_PSTATE) {
443 get_user(*((__u16 *) opcode), (__u16 __user *) location);
444 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
445 if (current->ptrace & PT_PTRACED)
446 force_sig(SIGTRAP, current);
447 else
448 signal = SIGILL;
449#ifdef CONFIG_MATHEMU
450 } else if (opcode[0] == 0xb3) {
451 get_user(*((__u16 *) (opcode+2)), location+1);
452 signal = math_emu_b3(opcode, regs);
453 } else if (opcode[0] == 0xed) {
454 get_user(*((__u32 *) (opcode+2)),
455 (__u32 *)(location+1));
456 signal = math_emu_ed(opcode, regs);
457 } else if (*((__u16 *) opcode) == 0xb299) {
458 get_user(*((__u16 *) (opcode+2)), location+1);
459 signal = math_emu_srnm(opcode, regs);
460 } else if (*((__u16 *) opcode) == 0xb29c) {
461 get_user(*((__u16 *) (opcode+2)), location+1);
462 signal = math_emu_stfpc(opcode, regs);
463 } else if (*((__u16 *) opcode) == 0xb29d) {
464 get_user(*((__u16 *) (opcode+2)), location+1);
465 signal = math_emu_lfpc(opcode, regs);
466#endif
467 } else
468 signal = SIGILL;
469 } else
470 signal = SIGILL;
471
472#ifdef CONFIG_MATHEMU
473 if (signal == SIGFPE)
474 do_fp_trap(regs, location,
475 current->thread.fp_regs.fpc, interruption_code);
476 else if (signal == SIGSEGV) {
477 info.si_signo = signal;
478 info.si_errno = 0;
479 info.si_code = SEGV_MAPERR;
480 info.si_addr = (void *) location;
481 do_trap(interruption_code, signal,
482 "user address fault", regs, &info);
483 } else
484#endif
485 if (signal) {
486 info.si_signo = signal;
487 info.si_errno = 0;
488 info.si_code = ILL_ILLOPC;
489 info.si_addr = (void *) location;
490 do_trap(interruption_code, signal,
491 "illegal operation", regs, &info);
492 }
493}
494
495
496#ifdef CONFIG_MATHEMU
497asmlinkage void
498specification_exception(struct pt_regs * regs, long interruption_code)
499{
500 __u8 opcode[6];
501 __u16 *location = NULL;
502 int signal = 0;
503
504 location = (__u16 *) get_check_address(regs);
505
506 /*
507 * We got all needed information from the lowcore and can
508 * now safely switch on interrupts.
509 */
510 if (regs->psw.mask & PSW_MASK_PSTATE)
511 local_irq_enable();
512
513 if (regs->psw.mask & PSW_MASK_PSTATE) {
514 get_user(*((__u16 *) opcode), location);
515 switch (opcode[0]) {
516 case 0x28: /* LDR Rx,Ry */
517 signal = math_emu_ldr(opcode);
518 break;
519 case 0x38: /* LER Rx,Ry */
520 signal = math_emu_ler(opcode);
521 break;
522 case 0x60: /* STD R,D(X,B) */
523 get_user(*((__u16 *) (opcode+2)), location+1);
524 signal = math_emu_std(opcode, regs);
525 break;
526 case 0x68: /* LD R,D(X,B) */
527 get_user(*((__u16 *) (opcode+2)), location+1);
528 signal = math_emu_ld(opcode, regs);
529 break;
530 case 0x70: /* STE R,D(X,B) */
531 get_user(*((__u16 *) (opcode+2)), location+1);
532 signal = math_emu_ste(opcode, regs);
533 break;
534 case 0x78: /* LE R,D(X,B) */
535 get_user(*((__u16 *) (opcode+2)), location+1);
536 signal = math_emu_le(opcode, regs);
537 break;
538 default:
539 signal = SIGILL;
540 break;
541 }
542 } else
543 signal = SIGILL;
544
545 if (signal == SIGFPE)
546 do_fp_trap(regs, location,
547 current->thread.fp_regs.fpc, interruption_code);
548 else if (signal) {
549 siginfo_t info;
550 info.si_signo = signal;
551 info.si_errno = 0;
552 info.si_code = ILL_ILLOPN;
553 info.si_addr = location;
554 do_trap(interruption_code, signal,
555 "specification exception", regs, &info);
556 }
557}
558#else
559DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
560 ILL_ILLOPN, get_check_address(regs));
561#endif
562
563asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
564{
565 __u16 *location;
566 int signal = 0;
567
568 location = (__u16 *) get_check_address(regs);
569
570 /*
571 * We got all needed information from the lowcore and can
572 * now safely switch on interrupts.
573 */
574 if (regs->psw.mask & PSW_MASK_PSTATE)
575 local_irq_enable();
576
577 if (MACHINE_HAS_IEEE)
578 __asm__ volatile ("stfpc %0\n\t"
579 : "=m" (current->thread.fp_regs.fpc));
580
581#ifdef CONFIG_MATHEMU
582 else if (regs->psw.mask & PSW_MASK_PSTATE) {
583 __u8 opcode[6];
584 get_user(*((__u16 *) opcode), location);
585 switch (opcode[0]) {
586 case 0x28: /* LDR Rx,Ry */
587 signal = math_emu_ldr(opcode);
588 break;
589 case 0x38: /* LER Rx,Ry */
590 signal = math_emu_ler(opcode);
591 break;
592 case 0x60: /* STD R,D(X,B) */
593 get_user(*((__u16 *) (opcode+2)), location+1);
594 signal = math_emu_std(opcode, regs);
595 break;
596 case 0x68: /* LD R,D(X,B) */
597 get_user(*((__u16 *) (opcode+2)), location+1);
598 signal = math_emu_ld(opcode, regs);
599 break;
600 case 0x70: /* STE R,D(X,B) */
601 get_user(*((__u16 *) (opcode+2)), location+1);
602 signal = math_emu_ste(opcode, regs);
603 break;
604 case 0x78: /* LE R,D(X,B) */
605 get_user(*((__u16 *) (opcode+2)), location+1);
606 signal = math_emu_le(opcode, regs);
607 break;
608 case 0xb3:
609 get_user(*((__u16 *) (opcode+2)), location+1);
610 signal = math_emu_b3(opcode, regs);
611 break;
612 case 0xed:
613 get_user(*((__u32 *) (opcode+2)),
614 (__u32 *)(location+1));
615 signal = math_emu_ed(opcode, regs);
616 break;
617 case 0xb2:
618 if (opcode[1] == 0x99) {
619 get_user(*((__u16 *) (opcode+2)), location+1);
620 signal = math_emu_srnm(opcode, regs);
621 } else if (opcode[1] == 0x9c) {
622 get_user(*((__u16 *) (opcode+2)), location+1);
623 signal = math_emu_stfpc(opcode, regs);
624 } else if (opcode[1] == 0x9d) {
625 get_user(*((__u16 *) (opcode+2)), location+1);
626 signal = math_emu_lfpc(opcode, regs);
627 } else
628 signal = SIGILL;
629 break;
630 default:
631 signal = SIGILL;
632 break;
633 }
634 }
635#endif
636 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
637 signal = SIGFPE;
638 else
639 signal = SIGILL;
640 if (signal == SIGFPE)
641 do_fp_trap(regs, location,
642 current->thread.fp_regs.fpc, interruption_code);
643 else if (signal) {
644 siginfo_t info;
645 info.si_signo = signal;
646 info.si_errno = 0;
647 info.si_code = ILL_ILLOPN;
648 info.si_addr = location;
649 do_trap(interruption_code, signal,
650 "data exception", regs, &info);
651 }
652}
653
654asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code)
655{
656 siginfo_t info;
657
658 /* Set user psw back to home space mode. */
659 if (regs->psw.mask & PSW_MASK_PSTATE)
660 regs->psw.mask |= PSW_ASC_HOME;
661 /* Send SIGILL. */
662 info.si_signo = SIGILL;
663 info.si_errno = 0;
664 info.si_code = ILL_PRVOPC;
665 info.si_addr = get_check_address(regs);
666 do_trap(int_code, SIGILL, "space switch event", regs, &info);
667}
668
669asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
670{
671 die("Kernel stack overflow", regs, 0);
672 panic("Corrupt kernel stack, can't continue.");
673}
674
675
676/* init is done in lowcore.S and head.S */
677
678void __init trap_init(void)
679{
680 int i;
681
682 for (i = 0; i < 128; i++)
683 pgm_check_table[i] = &default_trap_handler;
684 pgm_check_table[1] = &illegal_op;
685 pgm_check_table[2] = &privileged_op;
686 pgm_check_table[3] = &execute_exception;
687 pgm_check_table[4] = &do_protection_exception;
688 pgm_check_table[5] = &addressing_exception;
689 pgm_check_table[6] = &specification_exception;
690 pgm_check_table[7] = &data_exception;
691 pgm_check_table[8] = &overflow_exception;
692 pgm_check_table[9] = &divide_exception;
693 pgm_check_table[0x0A] = &overflow_exception;
694 pgm_check_table[0x0B] = &divide_exception;
695 pgm_check_table[0x0C] = &hfp_overflow_exception;
696 pgm_check_table[0x0D] = &hfp_underflow_exception;
697 pgm_check_table[0x0E] = &hfp_significance_exception;
698 pgm_check_table[0x0F] = &hfp_divide_exception;
699 pgm_check_table[0x10] = &do_dat_exception;
700 pgm_check_table[0x11] = &do_dat_exception;
701 pgm_check_table[0x12] = &translation_exception;
702 pgm_check_table[0x13] = &special_op_exception;
703#ifndef CONFIG_ARCH_S390X
704 pgm_check_table[0x14] = &do_pseudo_page_fault;
705#else /* CONFIG_ARCH_S390X */
706 pgm_check_table[0x38] = &do_dat_exception;
707 pgm_check_table[0x39] = &do_dat_exception;
708 pgm_check_table[0x3A] = &do_dat_exception;
709 pgm_check_table[0x3B] = &do_dat_exception;
710#endif /* CONFIG_ARCH_S390X */
711 pgm_check_table[0x15] = &operand_exception;
712 pgm_check_table[0x1C] = &space_switch_exception;
713 pgm_check_table[0x1D] = &hfp_sqrt_exception;
714 pgm_check_table[0x40] = &do_monitor_call;
715
716 if (MACHINE_IS_VM) {
717 /*
718 * First try to get pfault pseudo page faults going.
719 * If this isn't available turn on pagex page faults.
720 */
721#ifdef CONFIG_PFAULT
722 /* request the 0x2603 external interrupt */
723 if (register_early_external_interrupt(0x2603, pfault_interrupt,
724 &ext_int_pfault) != 0)
725 panic("Couldn't request external interrupt 0x2603");
726
727 if (pfault_init() == 0)
728 return;
729
730 /* Tough luck, no pfault. */
731 unregister_early_external_interrupt(0x2603, pfault_interrupt,
732 &ext_int_pfault);
733#endif
734#ifndef CONFIG_ARCH_S390X
735 cpcmd("SET PAGEX ON", NULL, 0);
736#endif
737 }
738}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..89fdb3808bc0
--- /dev/null
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -0,0 +1,130 @@
1/* ld script to make s390 Linux kernel
2 * Written by Martin Schwidefsky (schwidefsky@de.ibm.com)
3 */
4
5#include <asm-generic/vmlinux.lds.h>
6#include <linux/config.h>
7
8#ifndef CONFIG_ARCH_S390X
9OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
10OUTPUT_ARCH(s390)
11ENTRY(_start)
12jiffies = jiffies_64 + 4;
13#else
14OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
15OUTPUT_ARCH(s390:64-bit)
16ENTRY(_start)
17jiffies = jiffies_64;
18#endif
19
20SECTIONS
21{
22 . = 0x00000000;
23 _text = .; /* Text and read-only data */
24 .text : {
25 *(.text)
26 SCHED_TEXT
27 LOCK_TEXT
28 *(.fixup)
29 *(.gnu.warning)
30 } = 0x0700
31
32 _etext = .; /* End of text section */
33
34 . = ALIGN(16); /* Exception table */
35 __start___ex_table = .;
36 __ex_table : { *(__ex_table) }
37 __stop___ex_table = .;
38
39 RODATA
40
41#ifdef CONFIG_SHARED_KERNEL
42 . = ALIGN(1048576); /* VM shared segments are 1MB aligned */
43
44 _eshared = .; /* End of shareable data */
45#endif
46
47 .data : { /* Data */
48 *(.data)
49 CONSTRUCTORS
50 }
51
52 . = ALIGN(4096);
53 __nosave_begin = .;
54 .data_nosave : { *(.data.nosave) }
55 . = ALIGN(4096);
56 __nosave_end = .;
57
58 . = ALIGN(4096);
59 .data.page_aligned : { *(.data.idt) }
60
61 . = ALIGN(32);
62 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
63
64 _edata = .; /* End of data section */
65
66 . = ALIGN(8192); /* init_task */
67 .data.init_task : { *(.data.init_task) }
68
69 /* will be freed after init */
70 . = ALIGN(4096); /* Init code and data */
71 __init_begin = .;
72 .init.text : {
73 _sinittext = .;
74 *(.init.text)
75 _einittext = .;
76 }
77 .init.data : { *(.init.data) }
78 . = ALIGN(256);
79 __setup_start = .;
80 .init.setup : { *(.init.setup) }
81 __setup_end = .;
82 __initcall_start = .;
83 .initcall.init : {
84 *(.initcall1.init)
85 *(.initcall2.init)
86 *(.initcall3.init)
87 *(.initcall4.init)
88 *(.initcall5.init)
89 *(.initcall6.init)
90 *(.initcall7.init)
91 }
92 __initcall_end = .;
93 __con_initcall_start = .;
94 .con_initcall.init : { *(.con_initcall.init) }
95 __con_initcall_end = .;
96 SECURITY_INIT
97 . = ALIGN(256);
98 __initramfs_start = .;
99 .init.ramfs : { *(.init.initramfs) }
100 . = ALIGN(2);
101 __initramfs_end = .;
102 . = ALIGN(256);
103 __per_cpu_start = .;
104 .data.percpu : { *(.data.percpu) }
105 __per_cpu_end = .;
106 . = ALIGN(4096);
107 __init_end = .;
108 /* freed after init ends here */
109
110 __bss_start = .; /* BSS */
111 .bss : { *(.bss) }
112 . = ALIGN(2);
113 __bss_stop = .;
114
115 _end = . ;
116
117 /* Sections to be discarded */
118 /DISCARD/ : {
119 *(.exitcall.exit)
120 }
121
122 /* Stabs debugging sections. */
123 .stab 0 : { *(.stab) }
124 .stabstr 0 : { *(.stabstr) }
125 .stab.excl 0 : { *(.stab.excl) }
126 .stab.exclstr 0 : { *(.stab.exclstr) }
127 .stab.index 0 : { *(.stab.index) }
128 .stab.indexstr 0 : { *(.stab.indexstr) }
129 .comment 0 : { *(.comment) }
130}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
new file mode 100644
index 000000000000..bb6cf02418a2
--- /dev/null
+++ b/arch/s390/kernel/vtime.c
@@ -0,0 +1,565 @@
1/*
2 * arch/s390/kernel/vtime.c
3 * Virtual cpu timer based timer functions.
4 *
5 * S390 version
6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
8 */
9
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/time.h>
14#include <linux/delay.h>
15#include <linux/init.h>
16#include <linux/smp.h>
17#include <linux/types.h>
18#include <linux/timex.h>
19#include <linux/notifier.h>
20#include <linux/kernel_stat.h>
21#include <linux/rcupdate.h>
22#include <linux/posix-timers.h>
23
24#include <asm/s390_ext.h>
25#include <asm/timer.h>
26
27#define VTIMER_MAGIC (TIMER_MAGIC + 1)
28static ext_int_info_t ext_int_info_timer;
29DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
30
31#ifdef CONFIG_VIRT_CPU_ACCOUNTING
32/*
33 * Update process times based on virtual cpu times stored by entry.S
34 * to the lowcore fields user_timer, system_timer & steal_clock.
35 */
36void account_user_vtime(struct task_struct *tsk)
37{
38 cputime_t cputime;
39 __u64 timer, clock;
40 int rcu_user_flag;
41
42 timer = S390_lowcore.last_update_timer;
43 clock = S390_lowcore.last_update_clock;
44 asm volatile (" STPT %0\n" /* Store current cpu timer value */
45 " STCK %1" /* Store current tod clock value */
46 : "=m" (S390_lowcore.last_update_timer),
47 "=m" (S390_lowcore.last_update_clock) );
48 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
49 S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock;
50
51 cputime = S390_lowcore.user_timer >> 12;
52 rcu_user_flag = cputime != 0;
53 S390_lowcore.user_timer -= cputime << 12;
54 S390_lowcore.steal_clock -= cputime << 12;
55 account_user_time(tsk, cputime);
56
57 cputime = S390_lowcore.system_timer >> 12;
58 S390_lowcore.system_timer -= cputime << 12;
59 S390_lowcore.steal_clock -= cputime << 12;
60 account_system_time(tsk, HARDIRQ_OFFSET, cputime);
61
62 cputime = S390_lowcore.steal_clock;
63 if ((__s64) cputime > 0) {
64 cputime >>= 12;
65 S390_lowcore.steal_clock -= cputime << 12;
66 account_steal_time(tsk, cputime);
67 }
68
69 run_local_timers();
70 if (rcu_pending(smp_processor_id()))
71 rcu_check_callbacks(smp_processor_id(), rcu_user_flag);
72 scheduler_tick();
73 run_posix_cpu_timers(tsk);
74}
75
76/*
77 * Update process times based on virtual cpu times stored by entry.S
78 * to the lowcore fields user_timer, system_timer & steal_clock.
79 */
80void account_system_vtime(struct task_struct *tsk)
81{
82 cputime_t cputime;
83 __u64 timer;
84
85 timer = S390_lowcore.last_update_timer;
86 asm volatile (" STPT %0" /* Store current cpu timer value */
87 : "=m" (S390_lowcore.last_update_timer) );
88 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
89
90 cputime = S390_lowcore.system_timer >> 12;
91 S390_lowcore.system_timer -= cputime << 12;
92 S390_lowcore.steal_clock -= cputime << 12;
93 account_system_time(tsk, 0, cputime);
94}
95
96static inline void set_vtimer(__u64 expires)
97{
98 __u64 timer;
99
100 asm volatile (" STPT %0\n" /* Store current cpu timer value */
101 " SPT %1" /* Set new value immediatly afterwards */
102 : "=m" (timer) : "m" (expires) );
103 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
104 S390_lowcore.last_update_timer = expires;
105
106 /* store expire time for this CPU timer */
107 per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
108}
109#else
110static inline void set_vtimer(__u64 expires)
111{
112 S390_lowcore.last_update_timer = expires;
113 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
114
115 /* store expire time for this CPU timer */
116 per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
117}
118#endif
119
120static void start_cpu_timer(void)
121{
122 struct vtimer_queue *vt_list;
123
124 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
125 set_vtimer(vt_list->idle);
126}
127
128static void stop_cpu_timer(void)
129{
130 __u64 done;
131 struct vtimer_queue *vt_list;
132
133 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
134
135 /* nothing to do */
136 if (list_empty(&vt_list->list)) {
137 vt_list->idle = VTIMER_MAX_SLICE;
138 goto fire;
139 }
140
141 /* store progress */
142 asm volatile ("STPT %0" : "=m" (done));
143
144 /*
145 * If done is negative we do not stop the CPU timer
146 * because we will get instantly an interrupt that
147 * will start the CPU timer again.
148 */
149 if (done & 1LL<<63)
150 return;
151 else
152 vt_list->offset += vt_list->to_expire - done;
153
154 /* save the actual expire value */
155 vt_list->idle = done;
156
157 /*
158 * We cannot halt the CPU timer, we just write a value that
159 * nearly never expires (only after 71 years) and re-write
160 * the stored expire value if we continue the timer
161 */
162 fire:
163 set_vtimer(VTIMER_MAX_SLICE);
164}
165
166/*
167 * Sorted add to a list. List is linear searched until first bigger
168 * element is found.
169 */
170static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
171{
172 struct vtimer_list *event;
173
174 list_for_each_entry(event, head, entry) {
175 if (event->expires > timer->expires) {
176 list_add_tail(&timer->entry, &event->entry);
177 return;
178 }
179 }
180 list_add_tail(&timer->entry, head);
181}
182
183/*
184 * Do the callback functions of expired vtimer events.
185 * Called from within the interrupt handler.
186 */
187static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
188{
189 struct vtimer_queue *vt_list;
190 struct vtimer_list *event, *tmp;
191 void (*fn)(unsigned long, struct pt_regs*);
192 unsigned long data;
193
194 if (list_empty(cb_list))
195 return;
196
197 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
198
199 list_for_each_entry_safe(event, tmp, cb_list, entry) {
200 fn = event->function;
201 data = event->data;
202 fn(data, regs);
203
204 if (!event->interval)
205 /* delete one shot timer */
206 list_del_init(&event->entry);
207 else {
208 /* move interval timer back to list */
209 spin_lock(&vt_list->lock);
210 list_del_init(&event->entry);
211 list_add_sorted(event, &vt_list->list);
212 spin_unlock(&vt_list->lock);
213 }
214 }
215}
216
217/*
218 * Handler for the virtual CPU timer.
219 */
220static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code)
221{
222 int cpu;
223 __u64 next, delta;
224 struct vtimer_queue *vt_list;
225 struct vtimer_list *event, *tmp;
226 struct list_head *ptr;
227 /* the callback queue */
228 struct list_head cb_list;
229
230 INIT_LIST_HEAD(&cb_list);
231 cpu = smp_processor_id();
232 vt_list = &per_cpu(virt_cpu_timer, cpu);
233
234 /* walk timer list, fire all expired events */
235 spin_lock(&vt_list->lock);
236
237 if (vt_list->to_expire < VTIMER_MAX_SLICE)
238 vt_list->offset += vt_list->to_expire;
239
240 list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
241 if (event->expires > vt_list->offset)
242 /* found first unexpired event, leave */
243 break;
244
245 /* re-charge interval timer, we have to add the offset */
246 if (event->interval)
247 event->expires = event->interval + vt_list->offset;
248
249 /* move expired timer to the callback queue */
250 list_move_tail(&event->entry, &cb_list);
251 }
252 spin_unlock(&vt_list->lock);
253 do_callbacks(&cb_list, regs);
254
255 /* next event is first in list */
256 spin_lock(&vt_list->lock);
257 if (!list_empty(&vt_list->list)) {
258 ptr = vt_list->list.next;
259 event = list_entry(ptr, struct vtimer_list, entry);
260 next = event->expires - vt_list->offset;
261
262 /* add the expired time from this interrupt handler
263 * and the callback functions
264 */
265 asm volatile ("STPT %0" : "=m" (delta));
266 delta = 0xffffffffffffffffLL - delta + 1;
267 vt_list->offset += delta;
268 next -= delta;
269 } else {
270 vt_list->offset = 0;
271 next = VTIMER_MAX_SLICE;
272 }
273 spin_unlock(&vt_list->lock);
274 set_vtimer(next);
275}
276
277void init_virt_timer(struct vtimer_list *timer)
278{
279 timer->magic = VTIMER_MAGIC;
280 timer->function = NULL;
281 INIT_LIST_HEAD(&timer->entry);
282 spin_lock_init(&timer->lock);
283}
284EXPORT_SYMBOL(init_virt_timer);
285
286static inline int check_vtimer(struct vtimer_list *timer)
287{
288 if (timer->magic != VTIMER_MAGIC)
289 return -EINVAL;
290 return 0;
291}
292
293static inline int vtimer_pending(struct vtimer_list *timer)
294{
295 return (!list_empty(&timer->entry));
296}
297
298/*
299 * this function should only run on the specified CPU
300 */
301static void internal_add_vtimer(struct vtimer_list *timer)
302{
303 unsigned long flags;
304 __u64 done;
305 struct vtimer_list *event;
306 struct vtimer_queue *vt_list;
307
308 vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
309 spin_lock_irqsave(&vt_list->lock, flags);
310
311 if (timer->cpu != smp_processor_id())
312 printk("internal_add_vtimer: BUG, running on wrong CPU");
313
314 /* if list is empty we only have to set the timer */
315 if (list_empty(&vt_list->list)) {
316 /* reset the offset, this may happen if the last timer was
317 * just deleted by mod_virt_timer and the interrupt
318 * didn't happen until here
319 */
320 vt_list->offset = 0;
321 goto fire;
322 }
323
324 /* save progress */
325 asm volatile ("STPT %0" : "=m" (done));
326
327 /* calculate completed work */
328 done = vt_list->to_expire - done + vt_list->offset;
329 vt_list->offset = 0;
330
331 list_for_each_entry(event, &vt_list->list, entry)
332 event->expires -= done;
333
334 fire:
335 list_add_sorted(timer, &vt_list->list);
336
337 /* get first element, which is the next vtimer slice */
338 event = list_entry(vt_list->list.next, struct vtimer_list, entry);
339
340 set_vtimer(event->expires);
341 spin_unlock_irqrestore(&vt_list->lock, flags);
342 /* release CPU aquired in prepare_vtimer or mod_virt_timer() */
343 put_cpu();
344}
345
346static inline int prepare_vtimer(struct vtimer_list *timer)
347{
348 if (check_vtimer(timer) || !timer->function) {
349 printk("add_virt_timer: uninitialized timer\n");
350 return -EINVAL;
351 }
352
353 if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
354 printk("add_virt_timer: invalid timer expire value!\n");
355 return -EINVAL;
356 }
357
358 if (vtimer_pending(timer)) {
359 printk("add_virt_timer: timer pending\n");
360 return -EBUSY;
361 }
362
363 timer->cpu = get_cpu();
364 return 0;
365}
366
367/*
368 * add_virt_timer - add an oneshot virtual CPU timer
369 */
370void add_virt_timer(void *new)
371{
372 struct vtimer_list *timer;
373
374 timer = (struct vtimer_list *)new;
375
376 if (prepare_vtimer(timer) < 0)
377 return;
378
379 timer->interval = 0;
380 internal_add_vtimer(timer);
381}
382EXPORT_SYMBOL(add_virt_timer);
383
384/*
385 * add_virt_timer_int - add an interval virtual CPU timer
386 */
387void add_virt_timer_periodic(void *new)
388{
389 struct vtimer_list *timer;
390
391 timer = (struct vtimer_list *)new;
392
393 if (prepare_vtimer(timer) < 0)
394 return;
395
396 timer->interval = timer->expires;
397 internal_add_vtimer(timer);
398}
399EXPORT_SYMBOL(add_virt_timer_periodic);
400
401/*
402 * If we change a pending timer the function must be called on the CPU
403 * where the timer is running on, e.g. by smp_call_function_on()
404 *
405 * The original mod_timer adds the timer if it is not pending. For compatibility
406 * we do the same. The timer will be added on the current CPU as a oneshot timer.
407 *
408 * returns whether it has modified a pending timer (1) or not (0)
409 */
410int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
411{
412 struct vtimer_queue *vt_list;
413 unsigned long flags;
414 int cpu;
415
416 if (check_vtimer(timer) || !timer->function) {
417 printk("mod_virt_timer: uninitialized timer\n");
418 return -EINVAL;
419 }
420
421 if (!expires || expires > VTIMER_MAX_SLICE) {
422 printk("mod_virt_timer: invalid expire range\n");
423 return -EINVAL;
424 }
425
426 /*
427 * This is a common optimization triggered by the
428 * networking code - if the timer is re-modified
429 * to be the same thing then just return:
430 */
431 if (timer->expires == expires && vtimer_pending(timer))
432 return 1;
433
434 cpu = get_cpu();
435 vt_list = &per_cpu(virt_cpu_timer, cpu);
436
437 /* disable interrupts before test if timer is pending */
438 spin_lock_irqsave(&vt_list->lock, flags);
439
440 /* if timer isn't pending add it on the current CPU */
441 if (!vtimer_pending(timer)) {
442 spin_unlock_irqrestore(&vt_list->lock, flags);
443 /* we do not activate an interval timer with mod_virt_timer */
444 timer->interval = 0;
445 timer->expires = expires;
446 timer->cpu = cpu;
447 internal_add_vtimer(timer);
448 return 0;
449 }
450
451 /* check if we run on the right CPU */
452 if (timer->cpu != cpu) {
453 printk("mod_virt_timer: running on wrong CPU, check your code\n");
454 spin_unlock_irqrestore(&vt_list->lock, flags);
455 put_cpu();
456 return -EINVAL;
457 }
458
459 list_del_init(&timer->entry);
460 timer->expires = expires;
461
462 /* also change the interval if we have an interval timer */
463 if (timer->interval)
464 timer->interval = expires;
465
466 /* the timer can't expire anymore so we can release the lock */
467 spin_unlock_irqrestore(&vt_list->lock, flags);
468 internal_add_vtimer(timer);
469 return 1;
470}
471EXPORT_SYMBOL(mod_virt_timer);
472
473/*
474 * delete a virtual timer
475 *
476 * returns whether the deleted timer was pending (1) or not (0)
477 */
478int del_virt_timer(struct vtimer_list *timer)
479{
480 unsigned long flags;
481 struct vtimer_queue *vt_list;
482
483 if (check_vtimer(timer)) {
484 printk("del_virt_timer: timer not initialized\n");
485 return -EINVAL;
486 }
487
488 /* check if timer is pending */
489 if (!vtimer_pending(timer))
490 return 0;
491
492 vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
493 spin_lock_irqsave(&vt_list->lock, flags);
494
495 /* we don't interrupt a running timer, just let it expire! */
496 list_del_init(&timer->entry);
497
498 /* last timer removed */
499 if (list_empty(&vt_list->list)) {
500 vt_list->to_expire = 0;
501 vt_list->offset = 0;
502 }
503
504 spin_unlock_irqrestore(&vt_list->lock, flags);
505 return 1;
506}
507EXPORT_SYMBOL(del_virt_timer);
508
509/*
510 * Start the virtual CPU timer on the current CPU.
511 */
512void init_cpu_vtimer(void)
513{
514 struct vtimer_queue *vt_list;
515 unsigned long cr0;
516
517 /* kick the virtual timer */
518 S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
519 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
520 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
521 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
522 __ctl_store(cr0, 0, 0);
523 cr0 |= 0x400;
524 __ctl_load(cr0, 0, 0);
525
526 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
527 INIT_LIST_HEAD(&vt_list->list);
528 spin_lock_init(&vt_list->lock);
529 vt_list->to_expire = 0;
530 vt_list->offset = 0;
531 vt_list->idle = 0;
532
533}
534
535static int vtimer_idle_notify(struct notifier_block *self,
536 unsigned long action, void *hcpu)
537{
538 switch (action) {
539 case CPU_IDLE:
540 stop_cpu_timer();
541 break;
542 case CPU_NOT_IDLE:
543 start_cpu_timer();
544 break;
545 }
546 return NOTIFY_OK;
547}
548
549static struct notifier_block vtimer_idle_nb = {
550 .notifier_call = vtimer_idle_notify,
551};
552
553void __init vtime_init(void)
554{
555 /* request the cpu timer external interrupt */
556 if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
557 &ext_int_info_timer) != 0)
558 panic("Couldn't request external interrupt 0x1005");
559
560 if (register_idle_notifier(&vtimer_idle_nb))
561 panic("Couldn't register idle notifier");
562
563 init_cpu_vtimer();
564}
565