aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig37
-rw-r--r--arch/s390/Makefile6
-rw-r--r--arch/s390/appldata/appldata_base.c8
-rw-r--r--arch/s390/appldata/appldata_os.c14
-rw-r--r--arch/s390/crypto/Makefile8
-rw-r--r--arch/s390/crypto/aes_s390.c276
-rw-r--r--arch/s390/crypto/crypt_s390.h (renamed from arch/s390/crypto/crypt_z990.h)267
-rw-r--r--arch/s390/crypto/crypt_s390_query.c129
-rw-r--r--arch/s390/crypto/crypt_z990_query.c111
-rw-r--r--arch/s390/crypto/des_s390.c466
-rw-r--r--arch/s390/crypto/des_z990.c284
-rw-r--r--arch/s390/crypto/sha1_s390.c (renamed from arch/s390/crypto/sha1_z990.c)32
-rw-r--r--arch/s390/crypto/sha256_s390.c166
-rw-r--r--arch/s390/defconfig65
-rw-r--r--arch/s390/kernel/Makefile18
-rw-r--r--arch/s390/kernel/binfmt_elf32.c2
-rw-r--r--arch/s390/kernel/compat_ioctl.c81
-rw-r--r--arch/s390/kernel/compat_linux.c35
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.S2
-rw-r--r--arch/s390/kernel/cpcmd.c16
-rw-r--r--arch/s390/kernel/crash.c2
-rw-r--r--arch/s390/kernel/entry64.S18
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/machine_kexec.c2
-rw-r--r--arch/s390/kernel/module.c12
-rw-r--r--arch/s390/kernel/process.c59
-rw-r--r--arch/s390/kernel/ptrace.c79
-rw-r--r--arch/s390/kernel/reipl_diag.c2
-rw-r--r--arch/s390/kernel/s390_ksyms.c1
-rw-r--r--arch/s390/kernel/setup.c25
-rw-r--r--arch/s390/kernel/signal.c8
-rw-r--r--arch/s390/kernel/smp.c16
-rw-r--r--arch/s390/kernel/sys_s390.c12
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/traps.c16
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kernel/vtime.c27
-rw-r--r--arch/s390/lib/Makefile4
-rw-r--r--arch/s390/lib/spinlock.c9
-rw-r--r--arch/s390/mm/extmem.c2
-rw-r--r--arch/s390/mm/fault.c18
-rw-r--r--arch/s390/mm/init.c8
-rw-r--r--arch/s390/mm/mmap.c2
-rw-r--r--arch/s390/oprofile/Makefile2
-rw-r--r--arch/s390/oprofile/backtrace.c79
-rw-r--r--arch/s390/oprofile/init.c4
47 files changed, 1531 insertions, 911 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 477ac2758bd5..b66602ad7b33 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -23,35 +23,22 @@ config GENERIC_BUST_SPINLOCK
23 23
24mainmenu "Linux Kernel Configuration" 24mainmenu "Linux Kernel Configuration"
25 25
26config ARCH_S390 26config S390
27 bool 27 bool
28 default y 28 default y
29 29
30config UID16
31 bool
32 default y
33 depends on ARCH_S390X = 'n'
34
35source "init/Kconfig" 30source "init/Kconfig"
36 31
37menu "Base setup" 32menu "Base setup"
38 33
39comment "Processor type and features" 34comment "Processor type and features"
40 35
41config ARCH_S390X 36config 64BIT
42 bool "64 bit kernel" 37 bool "64 bit kernel"
43 help 38 help
44 Select this option if you have a 64 bit IBM zSeries machine 39 Select this option if you have a 64 bit IBM zSeries machine
45 and want to use the 64 bit addressing mode. 40 and want to use the 64 bit addressing mode.
46 41
47config 64BIT
48 def_bool ARCH_S390X
49
50config ARCH_S390_31
51 bool
52 depends on ARCH_S390X = 'n'
53 default y
54
55config SMP 42config SMP
56 bool "Symmetric multi-processing support" 43 bool "Symmetric multi-processing support"
57 ---help--- 44 ---help---
@@ -101,20 +88,15 @@ config MATHEMU
101 on older S/390 machines. Say Y unless you know your machine doesn't 88 on older S/390 machines. Say Y unless you know your machine doesn't
102 need this. 89 need this.
103 90
104config S390_SUPPORT 91config COMPAT
105 bool "Kernel support for 31 bit emulation" 92 bool "Kernel support for 31 bit emulation"
106 depends on ARCH_S390X 93 depends on 64BIT
107 help 94 help
108 Select this option if you want to enable your system kernel to 95 Select this option if you want to enable your system kernel to
109 handle system-calls from ELF binaries for 31 bit ESA. This option 96 handle system-calls from ELF binaries for 31 bit ESA. This option
110 (and some other stuff like libraries and such) is needed for 97 (and some other stuff like libraries and such) is needed for
111 executing 31 bit applications. It is safe to say "Y". 98 executing 31 bit applications. It is safe to say "Y".
112 99
113config COMPAT
114 bool
115 depends on S390_SUPPORT
116 default y
117
118config SYSVIPC_COMPAT 100config SYSVIPC_COMPAT
119 bool 101 bool
120 depends on COMPAT && SYSVIPC 102 depends on COMPAT && SYSVIPC
@@ -122,7 +104,7 @@ config SYSVIPC_COMPAT
122 104
123config BINFMT_ELF32 105config BINFMT_ELF32
124 tristate "Kernel support for 31 bit ELF binaries" 106 tristate "Kernel support for 31 bit ELF binaries"
125 depends on S390_SUPPORT 107 depends on COMPAT
126 help 108 help
127 This allows you to run 32-bit Linux/ELF binaries on your zSeries 109 This allows you to run 32-bit Linux/ELF binaries on your zSeries
128 in 64 bit mode. Everybody wants this; say Y. 110 in 64 bit mode. Everybody wants this; say Y.
@@ -135,7 +117,7 @@ choice
135 117
136config MARCH_G5 118config MARCH_G5
137 bool "S/390 model G5 and G6" 119 bool "S/390 model G5 and G6"
138 depends on ARCH_S390_31 120 depends on !64BIT
139 help 121 help
140 Select this to build a 31 bit kernel that works 122 Select this to build a 31 bit kernel that works
141 on all S/390 and zSeries machines. 123 on all S/390 and zSeries machines.
@@ -240,8 +222,8 @@ config MACHCHK_WARNING
240config QDIO 222config QDIO
241 tristate "QDIO support" 223 tristate "QDIO support"
242 ---help--- 224 ---help---
243 This driver provides the Queued Direct I/O base support for the 225 This driver provides the Queued Direct I/O base support for
244 IBM S/390 (G5 and G6) and eServer zSeries (z800, z890, z900 and z990). 226 IBM mainframes.
245 227
246 For details please refer to the documentation provided by IBM at 228 For details please refer to the documentation provided by IBM at
247 <http://www10.software.ibm.com/developerworks/opensource/linux390> 229 <http://www10.software.ibm.com/developerworks/opensource/linux390>
@@ -263,7 +245,8 @@ config QDIO_DEBUG
263 bool "Extended debugging information" 245 bool "Extended debugging information"
264 depends on QDIO 246 depends on QDIO
265 help 247 help
266 Say Y here to get extended debugging output in /proc/s390dbf/qdio... 248 Say Y here to get extended debugging output in
249 /sys/kernel/debug/s390dbf/qdio...
267 Warning: this option reduces the performance of the QDIO module. 250 Warning: this option reduces the performance of the QDIO module.
268 251
269 If unsure, say N. 252 If unsure, say N.
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 73a09a6ee6c8..6c6b197898d0 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -13,16 +13,14 @@
13# Copyright (C) 1994 by Linus Torvalds 13# Copyright (C) 1994 by Linus Torvalds
14# 14#
15 15
16ifdef CONFIG_ARCH_S390_31 16ifndef CONFIG_64BIT
17LDFLAGS := -m elf_s390 17LDFLAGS := -m elf_s390
18CFLAGS += -m31 18CFLAGS += -m31
19AFLAGS += -m31 19AFLAGS += -m31
20UTS_MACHINE := s390 20UTS_MACHINE := s390
21STACK_SIZE := 8192 21STACK_SIZE := 8192
22CHECKFLAGS += -D__s390__ 22CHECKFLAGS += -D__s390__
23endif 23else
24
25ifdef CONFIG_ARCH_S390X
26LDFLAGS := -m elf64_s390 24LDFLAGS := -m elf64_s390
27MODFLAGS += -fpic -D__PIC__ 25MODFLAGS += -fpic -D__PIC__
28CFLAGS += -m64 26CFLAGS += -m64
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index dee6ab54984d..d06a8d71c71d 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -40,7 +40,7 @@
40 40
41#define TOD_MICRO 0x01000 /* nr. of TOD clock units 41#define TOD_MICRO 0x01000 /* nr. of TOD clock units
42 for 1 microsecond */ 42 for 1 microsecond */
43#ifndef CONFIG_ARCH_S390X 43#ifndef CONFIG_64BIT
44 44
45#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */ 45#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
46#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */ 46#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
@@ -54,13 +54,13 @@
54#define APPLDATA_GEN_EVENT_RECORD 0x82 54#define APPLDATA_GEN_EVENT_RECORD 0x82
55#define APPLDATA_START_CONFIG_REC 0x83 55#define APPLDATA_START_CONFIG_REC 0x83
56 56
57#endif /* CONFIG_ARCH_S390X */ 57#endif /* CONFIG_64BIT */
58 58
59 59
60/* 60/*
61 * Parameter list for DIAGNOSE X'DC' 61 * Parameter list for DIAGNOSE X'DC'
62 */ 62 */
63#ifndef CONFIG_ARCH_S390X 63#ifndef CONFIG_64BIT
64struct appldata_parameter_list { 64struct appldata_parameter_list {
65 u16 diag; /* The DIAGNOSE code X'00DC' */ 65 u16 diag; /* The DIAGNOSE code X'00DC' */
66 u8 function; /* The function code for the DIAGNOSE */ 66 u8 function; /* The function code for the DIAGNOSE */
@@ -82,7 +82,7 @@ struct appldata_parameter_list {
82 u64 product_id_addr; 82 u64 product_id_addr;
83 u64 buffer_addr; 83 u64 buffer_addr;
84}; 84};
85#endif /* CONFIG_ARCH_S390X */ 85#endif /* CONFIG_64BIT */
86 86
87/* 87/*
88 * /proc entries (sysctl) 88 * /proc entries (sysctl)
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index e0a476bf4fd6..99ddd3bf2fba 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -141,19 +141,19 @@ static void appldata_get_os_data(void *data)
141 j = 0; 141 j = 0;
142 for_each_online_cpu(i) { 142 for_each_online_cpu(i) {
143 os_data->os_cpu[j].per_cpu_user = 143 os_data->os_cpu[j].per_cpu_user =
144 kstat_cpu(i).cpustat.user; 144 cputime_to_jiffies(kstat_cpu(i).cpustat.user);
145 os_data->os_cpu[j].per_cpu_nice = 145 os_data->os_cpu[j].per_cpu_nice =
146 kstat_cpu(i).cpustat.nice; 146 cputime_to_jiffies(kstat_cpu(i).cpustat.nice);
147 os_data->os_cpu[j].per_cpu_system = 147 os_data->os_cpu[j].per_cpu_system =
148 kstat_cpu(i).cpustat.system; 148 cputime_to_jiffies(kstat_cpu(i).cpustat.system);
149 os_data->os_cpu[j].per_cpu_idle = 149 os_data->os_cpu[j].per_cpu_idle =
150 kstat_cpu(i).cpustat.idle; 150 cputime_to_jiffies(kstat_cpu(i).cpustat.idle);
151 os_data->os_cpu[j].per_cpu_irq = 151 os_data->os_cpu[j].per_cpu_irq =
152 kstat_cpu(i).cpustat.irq; 152 cputime_to_jiffies(kstat_cpu(i).cpustat.irq);
153 os_data->os_cpu[j].per_cpu_softirq = 153 os_data->os_cpu[j].per_cpu_softirq =
154 kstat_cpu(i).cpustat.softirq; 154 cputime_to_jiffies(kstat_cpu(i).cpustat.softirq);
155 os_data->os_cpu[j].per_cpu_iowait = 155 os_data->os_cpu[j].per_cpu_iowait =
156 kstat_cpu(i).cpustat.iowait; 156 cputime_to_jiffies(kstat_cpu(i).cpustat.iowait);
157 j++; 157 j++;
158 } 158 }
159 159
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 96a05e6b51e0..bfe2541dc5cf 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -2,7 +2,9 @@
2# Cryptographic API 2# Cryptographic API
3# 3#
4 4
5obj-$(CONFIG_CRYPTO_SHA1_Z990) += sha1_z990.o 5obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o
6obj-$(CONFIG_CRYPTO_DES_Z990) += des_z990.o des_check_key.o 6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o
7obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o
8obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
7 9
8obj-$(CONFIG_CRYPTO_TEST) += crypt_z990_query.o 10obj-$(CONFIG_CRYPTO_TEST) += crypt_s390_query.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
new file mode 100644
index 000000000000..c5ca2dc5d428
--- /dev/null
+++ b/arch/s390/crypto/aes_s390.c
@@ -0,0 +1,276 @@
1/*
2 * Cryptographic API.
3 *
4 * s390 implementation of the AES Cipher Algorithm.
5 *
6 * s390 Version:
7 * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation
8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 *
10 * Derived from "crypto/aes.c"
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option)
15 * any later version.
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/crypto.h>
22#include "crypt_s390.h"
23
24#define AES_MIN_KEY_SIZE 16
25#define AES_MAX_KEY_SIZE 32
26
27/* data block size for all key lengths */
28#define AES_BLOCK_SIZE 16
29
30int has_aes_128 = 0;
31int has_aes_192 = 0;
32int has_aes_256 = 0;
33
34struct s390_aes_ctx {
35 u8 iv[AES_BLOCK_SIZE];
36 u8 key[AES_MAX_KEY_SIZE];
37 int key_len;
38};
39
40static int aes_set_key(void *ctx, const u8 *in_key, unsigned int key_len,
41 u32 *flags)
42{
43 struct s390_aes_ctx *sctx = ctx;
44
45 switch (key_len) {
46 case 16:
47 if (!has_aes_128)
48 goto fail;
49 break;
50 case 24:
51 if (!has_aes_192)
52 goto fail;
53
54 break;
55 case 32:
56 if (!has_aes_256)
57 goto fail;
58 break;
59 default:
60 /* invalid key length */
61 goto fail;
62 break;
63 }
64
65 sctx->key_len = key_len;
66 memcpy(sctx->key, in_key, key_len);
67 return 0;
68fail:
69 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
70 return -EINVAL;
71}
72
73static void aes_encrypt(void *ctx, u8 *out, const u8 *in)
74{
75 const struct s390_aes_ctx *sctx = ctx;
76
77 switch (sctx->key_len) {
78 case 16:
79 crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
80 AES_BLOCK_SIZE);
81 break;
82 case 24:
83 crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
84 AES_BLOCK_SIZE);
85 break;
86 case 32:
87 crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
88 AES_BLOCK_SIZE);
89 break;
90 }
91}
92
93static void aes_decrypt(void *ctx, u8 *out, const u8 *in)
94{
95 const struct s390_aes_ctx *sctx = ctx;
96
97 switch (sctx->key_len) {
98 case 16:
99 crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
100 AES_BLOCK_SIZE);
101 break;
102 case 24:
103 crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
104 AES_BLOCK_SIZE);
105 break;
106 case 32:
107 crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
108 AES_BLOCK_SIZE);
109 break;
110 }
111}
112
113static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
114 const u8 *in, unsigned int nbytes)
115{
116 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
117 int ret;
118
119 /* only use complete blocks */
120 nbytes &= ~(AES_BLOCK_SIZE - 1);
121
122 switch (sctx->key_len) {
123 case 16:
124 ret = crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, nbytes);
125 BUG_ON((ret < 0) || (ret != nbytes));
126 break;
127 case 24:
128 ret = crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, nbytes);
129 BUG_ON((ret < 0) || (ret != nbytes));
130 break;
131 case 32:
132 ret = crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, nbytes);
133 BUG_ON((ret < 0) || (ret != nbytes));
134 break;
135 }
136 return nbytes;
137}
138
139static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
140 const u8 *in, unsigned int nbytes)
141{
142 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
143 int ret;
144
145 /* only use complete blocks */
146 nbytes &= ~(AES_BLOCK_SIZE - 1);
147
148 switch (sctx->key_len) {
149 case 16:
150 ret = crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, nbytes);
151 BUG_ON((ret < 0) || (ret != nbytes));
152 break;
153 case 24:
154 ret = crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, nbytes);
155 BUG_ON((ret < 0) || (ret != nbytes));
156 break;
157 case 32:
158 ret = crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, nbytes);
159 BUG_ON((ret < 0) || (ret != nbytes));
160 break;
161 }
162 return nbytes;
163}
164
165static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
166 const u8 *in, unsigned int nbytes)
167{
168 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
169 int ret;
170
171 /* only use complete blocks */
172 nbytes &= ~(AES_BLOCK_SIZE - 1);
173
174 memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE);
175 switch (sctx->key_len) {
176 case 16:
177 ret = crypt_s390_kmc(KMC_AES_128_ENCRYPT, &sctx->iv, out, in, nbytes);
178 BUG_ON((ret < 0) || (ret != nbytes));
179 break;
180 case 24:
181 ret = crypt_s390_kmc(KMC_AES_192_ENCRYPT, &sctx->iv, out, in, nbytes);
182 BUG_ON((ret < 0) || (ret != nbytes));
183 break;
184 case 32:
185 ret = crypt_s390_kmc(KMC_AES_256_ENCRYPT, &sctx->iv, out, in, nbytes);
186 BUG_ON((ret < 0) || (ret != nbytes));
187 break;
188 }
189 memcpy(desc->info, &sctx->iv, AES_BLOCK_SIZE);
190
191 return nbytes;
192}
193
194static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
195 const u8 *in, unsigned int nbytes)
196{
197 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
198 int ret;
199
200 /* only use complete blocks */
201 nbytes &= ~(AES_BLOCK_SIZE - 1);
202
203 memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE);
204 switch (sctx->key_len) {
205 case 16:
206 ret = crypt_s390_kmc(KMC_AES_128_DECRYPT, &sctx->iv, out, in, nbytes);
207 BUG_ON((ret < 0) || (ret != nbytes));
208 break;
209 case 24:
210 ret = crypt_s390_kmc(KMC_AES_192_DECRYPT, &sctx->iv, out, in, nbytes);
211 BUG_ON((ret < 0) || (ret != nbytes));
212 break;
213 case 32:
214 ret = crypt_s390_kmc(KMC_AES_256_DECRYPT, &sctx->iv, out, in, nbytes);
215 BUG_ON((ret < 0) || (ret != nbytes));
216 break;
217 }
218 return nbytes;
219}
220
221
222static struct crypto_alg aes_alg = {
223 .cra_name = "aes",
224 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
225 .cra_blocksize = AES_BLOCK_SIZE,
226 .cra_ctxsize = sizeof(struct s390_aes_ctx),
227 .cra_module = THIS_MODULE,
228 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
229 .cra_u = {
230 .cipher = {
231 .cia_min_keysize = AES_MIN_KEY_SIZE,
232 .cia_max_keysize = AES_MAX_KEY_SIZE,
233 .cia_setkey = aes_set_key,
234 .cia_encrypt = aes_encrypt,
235 .cia_decrypt = aes_decrypt,
236 .cia_encrypt_ecb = aes_encrypt_ecb,
237 .cia_decrypt_ecb = aes_decrypt_ecb,
238 .cia_encrypt_cbc = aes_encrypt_cbc,
239 .cia_decrypt_cbc = aes_decrypt_cbc,
240 }
241 }
242};
243
244static int __init aes_init(void)
245{
246 int ret;
247
248 if (crypt_s390_func_available(KM_AES_128_ENCRYPT))
249 has_aes_128 = 1;
250 if (crypt_s390_func_available(KM_AES_192_ENCRYPT))
251 has_aes_192 = 1;
252 if (crypt_s390_func_available(KM_AES_256_ENCRYPT))
253 has_aes_256 = 1;
254
255 if (!has_aes_128 && !has_aes_192 && !has_aes_256)
256 return -ENOSYS;
257
258 ret = crypto_register_alg(&aes_alg);
259 if (ret != 0)
260 printk(KERN_INFO "crypt_s390: aes_s390 couldn't be loaded.\n");
261 return ret;
262}
263
264static void __exit aes_fini(void)
265{
266 crypto_unregister_alg(&aes_alg);
267}
268
269module_init(aes_init);
270module_exit(aes_fini);
271
272MODULE_ALIAS("aes");
273
274MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
275MODULE_LICENSE("GPL");
276
diff --git a/arch/s390/crypto/crypt_z990.h b/arch/s390/crypto/crypt_s390.h
index 4df660b99e5a..d1c259a7fe33 100644
--- a/arch/s390/crypto/crypt_z990.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * Support for z990 cryptographic instructions. 4 * Support for s390 cryptographic instructions.
5 * 5 *
6 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 6 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 7 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
@@ -12,84 +12,108 @@
12 * any later version. 12 * any later version.
13 * 13 *
14 */ 14 */
15#ifndef _CRYPTO_ARCH_S390_CRYPT_Z990_H 15#ifndef _CRYPTO_ARCH_S390_CRYPT_S390_H
16#define _CRYPTO_ARCH_S390_CRYPT_Z990_H 16#define _CRYPTO_ARCH_S390_CRYPT_S390_H
17 17
18#include <asm/errno.h> 18#include <asm/errno.h>
19 19
20#define CRYPT_Z990_OP_MASK 0xFF00 20#define CRYPT_S390_OP_MASK 0xFF00
21#define CRYPT_Z990_FUNC_MASK 0x00FF 21#define CRYPT_S390_FUNC_MASK 0x00FF
22 22
23 23/* s930 cryptographic operations */
24/*z990 cryptographic operations*/ 24enum crypt_s390_operations {
25enum crypt_z990_operations { 25 CRYPT_S390_KM = 0x0100,
26 CRYPT_Z990_KM = 0x0100, 26 CRYPT_S390_KMC = 0x0200,
27 CRYPT_Z990_KMC = 0x0200, 27 CRYPT_S390_KIMD = 0x0300,
28 CRYPT_Z990_KIMD = 0x0300, 28 CRYPT_S390_KLMD = 0x0400,
29 CRYPT_Z990_KLMD = 0x0400, 29 CRYPT_S390_KMAC = 0x0500
30 CRYPT_Z990_KMAC = 0x0500
31}; 30};
32 31
33/*function codes for KM (CIPHER MESSAGE) instruction*/ 32/* function codes for KM (CIPHER MESSAGE) instruction
34enum crypt_z990_km_func { 33 * 0x80 is the decipher modifier bit
35 KM_QUERY = CRYPT_Z990_KM | 0, 34 */
36 KM_DEA_ENCRYPT = CRYPT_Z990_KM | 1, 35enum crypt_s390_km_func {
37 KM_DEA_DECRYPT = CRYPT_Z990_KM | 1 | 0x80, //modifier bit->decipher 36 KM_QUERY = CRYPT_S390_KM | 0x0,
38 KM_TDEA_128_ENCRYPT = CRYPT_Z990_KM | 2, 37 KM_DEA_ENCRYPT = CRYPT_S390_KM | 0x1,
39 KM_TDEA_128_DECRYPT = CRYPT_Z990_KM | 2 | 0x80, 38 KM_DEA_DECRYPT = CRYPT_S390_KM | 0x1 | 0x80,
40 KM_TDEA_192_ENCRYPT = CRYPT_Z990_KM | 3, 39 KM_TDEA_128_ENCRYPT = CRYPT_S390_KM | 0x2,
41 KM_TDEA_192_DECRYPT = CRYPT_Z990_KM | 3 | 0x80, 40 KM_TDEA_128_DECRYPT = CRYPT_S390_KM | 0x2 | 0x80,
41 KM_TDEA_192_ENCRYPT = CRYPT_S390_KM | 0x3,
42 KM_TDEA_192_DECRYPT = CRYPT_S390_KM | 0x3 | 0x80,
43 KM_AES_128_ENCRYPT = CRYPT_S390_KM | 0x12,
44 KM_AES_128_DECRYPT = CRYPT_S390_KM | 0x12 | 0x80,
45 KM_AES_192_ENCRYPT = CRYPT_S390_KM | 0x13,
46 KM_AES_192_DECRYPT = CRYPT_S390_KM | 0x13 | 0x80,
47 KM_AES_256_ENCRYPT = CRYPT_S390_KM | 0x14,
48 KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80,
42}; 49};
43 50
44/*function codes for KMC (CIPHER MESSAGE WITH CHAINING) instruction*/ 51/* function codes for KMC (CIPHER MESSAGE WITH CHAINING)
45enum crypt_z990_kmc_func { 52 * instruction
46 KMC_QUERY = CRYPT_Z990_KMC | 0, 53 */
47 KMC_DEA_ENCRYPT = CRYPT_Z990_KMC | 1, 54enum crypt_s390_kmc_func {
48 KMC_DEA_DECRYPT = CRYPT_Z990_KMC | 1 | 0x80, //modifier bit->decipher 55 KMC_QUERY = CRYPT_S390_KMC | 0x0,
49 KMC_TDEA_128_ENCRYPT = CRYPT_Z990_KMC | 2, 56 KMC_DEA_ENCRYPT = CRYPT_S390_KMC | 0x1,
50 KMC_TDEA_128_DECRYPT = CRYPT_Z990_KMC | 2 | 0x80, 57 KMC_DEA_DECRYPT = CRYPT_S390_KMC | 0x1 | 0x80,
51 KMC_TDEA_192_ENCRYPT = CRYPT_Z990_KMC | 3, 58 KMC_TDEA_128_ENCRYPT = CRYPT_S390_KMC | 0x2,
52 KMC_TDEA_192_DECRYPT = CRYPT_Z990_KMC | 3 | 0x80, 59 KMC_TDEA_128_DECRYPT = CRYPT_S390_KMC | 0x2 | 0x80,
60 KMC_TDEA_192_ENCRYPT = CRYPT_S390_KMC | 0x3,
61 KMC_TDEA_192_DECRYPT = CRYPT_S390_KMC | 0x3 | 0x80,
62 KMC_AES_128_ENCRYPT = CRYPT_S390_KMC | 0x12,
63 KMC_AES_128_DECRYPT = CRYPT_S390_KMC | 0x12 | 0x80,
64 KMC_AES_192_ENCRYPT = CRYPT_S390_KMC | 0x13,
65 KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80,
66 KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14,
67 KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80,
53}; 68};
54 69
55/*function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) instruction*/ 70/* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
56enum crypt_z990_kimd_func { 71 * instruction
57 KIMD_QUERY = CRYPT_Z990_KIMD | 0, 72 */
58 KIMD_SHA_1 = CRYPT_Z990_KIMD | 1, 73enum crypt_s390_kimd_func {
74 KIMD_QUERY = CRYPT_S390_KIMD | 0,
75 KIMD_SHA_1 = CRYPT_S390_KIMD | 1,
76 KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
59}; 77};
60 78
61/*function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) instruction*/ 79/* function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
62enum crypt_z990_klmd_func { 80 * instruction
63 KLMD_QUERY = CRYPT_Z990_KLMD | 0, 81 */
64 KLMD_SHA_1 = CRYPT_Z990_KLMD | 1, 82enum crypt_s390_klmd_func {
83 KLMD_QUERY = CRYPT_S390_KLMD | 0,
84 KLMD_SHA_1 = CRYPT_S390_KLMD | 1,
85 KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
65}; 86};
66 87
67/*function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) instruction*/ 88/* function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
68enum crypt_z990_kmac_func { 89 * instruction
69 KMAC_QUERY = CRYPT_Z990_KMAC | 0, 90 */
70 KMAC_DEA = CRYPT_Z990_KMAC | 1, 91enum crypt_s390_kmac_func {
71 KMAC_TDEA_128 = CRYPT_Z990_KMAC | 2, 92 KMAC_QUERY = CRYPT_S390_KMAC | 0,
72 KMAC_TDEA_192 = CRYPT_Z990_KMAC | 3 93 KMAC_DEA = CRYPT_S390_KMAC | 1,
94 KMAC_TDEA_128 = CRYPT_S390_KMAC | 2,
95 KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
73}; 96};
74 97
75/*status word for z990 crypto instructions' QUERY functions*/ 98/* status word for s390 crypto instructions' QUERY functions */
76struct crypt_z990_query_status { 99struct crypt_s390_query_status {
77 u64 high; 100 u64 high;
78 u64 low; 101 u64 low;
79}; 102};
80 103
81/* 104/*
82 * Standard fixup and ex_table sections for crypt_z990 inline functions. 105 * Standard fixup and ex_table sections for crypt_s390 inline functions.
83 * label 0: the z990 crypto operation 106 * label 0: the s390 crypto operation
84 * label 1: just after 1 to catch illegal operation exception on non-z990 107 * label 1: just after 1 to catch illegal operation exception
108 * (unsupported model)
85 * label 6: the return point after fixup 109 * label 6: the return point after fixup
86 * label 7: set error value if exception _in_ crypto operation 110 * label 7: set error value if exception _in_ crypto operation
87 * label 8: set error value if illegal operation exception 111 * label 8: set error value if illegal operation exception
88 * [ret] is the variable to receive the error code 112 * [ret] is the variable to receive the error code
89 * [ERR] is the error code value 113 * [ERR] is the error code value
90 */ 114 */
91#ifndef __s390x__ 115#ifndef CONFIG_64BIT
92#define __crypt_z990_fixup \ 116#define __crypt_s390_fixup \
93 ".section .fixup,\"ax\" \n" \ 117 ".section .fixup,\"ax\" \n" \
94 "7: lhi %0,%h[e1] \n" \ 118 "7: lhi %0,%h[e1] \n" \
95 " bras 1,9f \n" \ 119 " bras 1,9f \n" \
@@ -105,8 +129,8 @@ struct crypt_z990_query_status {
105 " .long 0b,7b \n" \ 129 " .long 0b,7b \n" \
106 " .long 1b,8b \n" \ 130 " .long 1b,8b \n" \
107 ".previous" 131 ".previous"
108#else /* __s390x__ */ 132#else /* CONFIG_64BIT */
109#define __crypt_z990_fixup \ 133#define __crypt_s390_fixup \
110 ".section .fixup,\"ax\" \n" \ 134 ".section .fixup,\"ax\" \n" \
111 "7: lhi %0,%h[e1] \n" \ 135 "7: lhi %0,%h[e1] \n" \
112 " jg 6b \n" \ 136 " jg 6b \n" \
@@ -118,25 +142,25 @@ struct crypt_z990_query_status {
118 " .quad 0b,7b \n" \ 142 " .quad 0b,7b \n" \
119 " .quad 1b,8b \n" \ 143 " .quad 1b,8b \n" \
120 ".previous" 144 ".previous"
121#endif /* __s390x__ */ 145#endif /* CONFIG_64BIT */
122 146
123/* 147/*
124 * Standard code for setting the result of z990 crypto instructions. 148 * Standard code for setting the result of s390 crypto instructions.
125 * %0: the register which will receive the result 149 * %0: the register which will receive the result
126 * [result]: the register containing the result (e.g. second operand length 150 * [result]: the register containing the result (e.g. second operand length
127 * to compute number of processed bytes]. 151 * to compute number of processed bytes].
128 */ 152 */
129#ifndef __s390x__ 153#ifndef CONFIG_64BIT
130#define __crypt_z990_set_result \ 154#define __crypt_s390_set_result \
131 " lr %0,%[result] \n" 155 " lr %0,%[result] \n"
132#else /* __s390x__ */ 156#else /* CONFIG_64BIT */
133#define __crypt_z990_set_result \ 157#define __crypt_s390_set_result \
134 " lgr %0,%[result] \n" 158 " lgr %0,%[result] \n"
135#endif 159#endif
136 160
137/* 161/*
138 * Executes the KM (CIPHER MESSAGE) operation of the z990 CPU. 162 * Executes the KM (CIPHER MESSAGE) operation of the CPU.
139 * @param func: the function code passed to KM; see crypt_z990_km_func 163 * @param func: the function code passed to KM; see crypt_s390_km_func
140 * @param param: address of parameter block; see POP for details on each func 164 * @param param: address of parameter block; see POP for details on each func
141 * @param dest: address of destination memory area 165 * @param dest: address of destination memory area
142 * @param src: address of source memory area 166 * @param src: address of source memory area
@@ -145,9 +169,9 @@ struct crypt_z990_query_status {
145 * for encryption/decryption funcs 169 * for encryption/decryption funcs
146 */ 170 */
147static inline int 171static inline int
148crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len) 172crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len)
149{ 173{
150 register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; 174 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
151 register void* __param asm("1") = param; 175 register void* __param asm("1") = param;
152 register u8* __dest asm("4") = dest; 176 register u8* __dest asm("4") = dest;
153 register const u8* __src asm("2") = src; 177 register const u8* __src asm("2") = src;
@@ -156,26 +180,26 @@ crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len)
156 180
157 ret = 0; 181 ret = 0;
158 __asm__ __volatile__ ( 182 __asm__ __volatile__ (
159 "0: .insn rre,0xB92E0000,%1,%2 \n" //KM opcode 183 "0: .insn rre,0xB92E0000,%1,%2 \n" /* KM opcode */
160 "1: brc 1,0b \n" //handle partial completion 184 "1: brc 1,0b \n" /* handle partial completion */
161 __crypt_z990_set_result 185 __crypt_s390_set_result
162 "6: \n" 186 "6: \n"
163 __crypt_z990_fixup 187 __crypt_s390_fixup
164 : "+d" (ret), "+a" (__dest), "+a" (__src), 188 : "+d" (ret), "+a" (__dest), "+a" (__src),
165 [result] "+d" (__src_len) 189 [result] "+d" (__src_len)
166 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 190 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
167 "a" (__param) 191 "a" (__param)
168 : "cc", "memory" 192 : "cc", "memory"
169 ); 193 );
170 if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ 194 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
171 ret = src_len - ret; 195 ret = src_len - ret;
172 } 196 }
173 return ret; 197 return ret;
174} 198}
175 199
176/* 200/*
177 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the z990 CPU. 201 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
178 * @param func: the function code passed to KM; see crypt_z990_kmc_func 202 * @param func: the function code passed to KM; see crypt_s390_kmc_func
179 * @param param: address of parameter block; see POP for details on each func 203 * @param param: address of parameter block; see POP for details on each func
180 * @param dest: address of destination memory area 204 * @param dest: address of destination memory area
181 * @param src: address of source memory area 205 * @param src: address of source memory area
@@ -184,9 +208,9 @@ crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len)
184 * for encryption/decryption funcs 208 * for encryption/decryption funcs
185 */ 209 */
186static inline int 210static inline int
187crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len) 211crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
188{ 212{
189 register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; 213 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
190 register void* __param asm("1") = param; 214 register void* __param asm("1") = param;
191 register u8* __dest asm("4") = dest; 215 register u8* __dest asm("4") = dest;
192 register const u8* __src asm("2") = src; 216 register const u8* __src asm("2") = src;
@@ -195,18 +219,18 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
195 219
196 ret = 0; 220 ret = 0;
197 __asm__ __volatile__ ( 221 __asm__ __volatile__ (
198 "0: .insn rre,0xB92F0000,%1,%2 \n" //KMC opcode 222 "0: .insn rre,0xB92F0000,%1,%2 \n" /* KMC opcode */
199 "1: brc 1,0b \n" //handle partial completion 223 "1: brc 1,0b \n" /* handle partial completion */
200 __crypt_z990_set_result 224 __crypt_s390_set_result
201 "6: \n" 225 "6: \n"
202 __crypt_z990_fixup 226 __crypt_s390_fixup
203 : "+d" (ret), "+a" (__dest), "+a" (__src), 227 : "+d" (ret), "+a" (__dest), "+a" (__src),
204 [result] "+d" (__src_len) 228 [result] "+d" (__src_len)
205 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 229 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
206 "a" (__param) 230 "a" (__param)
207 : "cc", "memory" 231 : "cc", "memory"
208 ); 232 );
209 if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ 233 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
210 ret = src_len - ret; 234 ret = src_len - ret;
211 } 235 }
212 return ret; 236 return ret;
@@ -214,8 +238,8 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
214 238
215/* 239/*
216 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation 240 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
217 * of the z990 CPU. 241 * of the CPU.
218 * @param func: the function code passed to KM; see crypt_z990_kimd_func 242 * @param func: the function code passed to KM; see crypt_s390_kimd_func
219 * @param param: address of parameter block; see POP for details on each func 243 * @param param: address of parameter block; see POP for details on each func
220 * @param src: address of source memory area 244 * @param src: address of source memory area
221 * @param src_len: length of src operand in bytes 245 * @param src_len: length of src operand in bytes
@@ -223,9 +247,9 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
223 * for digest funcs 247 * for digest funcs
224 */ 248 */
225static inline int 249static inline int
226crypt_z990_kimd(long func, void* param, const u8* src, long src_len) 250crypt_s390_kimd(long func, void* param, const u8* src, long src_len)
227{ 251{
228 register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; 252 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
229 register void* __param asm("1") = param; 253 register void* __param asm("1") = param;
230 register const u8* __src asm("2") = src; 254 register const u8* __src asm("2") = src;
231 register long __src_len asm("3") = src_len; 255 register long __src_len asm("3") = src_len;
@@ -233,25 +257,25 @@ crypt_z990_kimd(long func, void* param, const u8* src, long src_len)
233 257
234 ret = 0; 258 ret = 0;
235 __asm__ __volatile__ ( 259 __asm__ __volatile__ (
236 "0: .insn rre,0xB93E0000,%1,%1 \n" //KIMD opcode 260 "0: .insn rre,0xB93E0000,%1,%1 \n" /* KIMD opcode */
237 "1: brc 1,0b \n" /*handle partical completion of kimd*/ 261 "1: brc 1,0b \n" /* handle partical completion */
238 __crypt_z990_set_result 262 __crypt_s390_set_result
239 "6: \n" 263 "6: \n"
240 __crypt_z990_fixup 264 __crypt_s390_fixup
241 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 265 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
242 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 266 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
243 "a" (__param) 267 "a" (__param)
244 : "cc", "memory" 268 : "cc", "memory"
245 ); 269 );
246 if (ret >= 0 && (func & CRYPT_Z990_FUNC_MASK)){ 270 if (ret >= 0 && (func & CRYPT_S390_FUNC_MASK)){
247 ret = src_len - ret; 271 ret = src_len - ret;
248 } 272 }
249 return ret; 273 return ret;
250} 274}
251 275
252/* 276/*
253 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the z990 CPU. 277 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
254 * @param func: the function code passed to KM; see crypt_z990_klmd_func 278 * @param func: the function code passed to KM; see crypt_s390_klmd_func
255 * @param param: address of parameter block; see POP for details on each func 279 * @param param: address of parameter block; see POP for details on each func
256 * @param src: address of source memory area 280 * @param src: address of source memory area
257 * @param src_len: length of src operand in bytes 281 * @param src_len: length of src operand in bytes
@@ -259,9 +283,9 @@ crypt_z990_kimd(long func, void* param, const u8* src, long src_len)
259 * for digest funcs 283 * for digest funcs
260 */ 284 */
261static inline int 285static inline int
262crypt_z990_klmd(long func, void* param, const u8* src, long src_len) 286crypt_s390_klmd(long func, void* param, const u8* src, long src_len)
263{ 287{
264 register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; 288 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
265 register void* __param asm("1") = param; 289 register void* __param asm("1") = param;
266 register const u8* __src asm("2") = src; 290 register const u8* __src asm("2") = src;
267 register long __src_len asm("3") = src_len; 291 register long __src_len asm("3") = src_len;
@@ -269,17 +293,17 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
269 293
270 ret = 0; 294 ret = 0;
271 __asm__ __volatile__ ( 295 __asm__ __volatile__ (
272 "0: .insn rre,0xB93F0000,%1,%1 \n" //KLMD opcode 296 "0: .insn rre,0xB93F0000,%1,%1 \n" /* KLMD opcode */
273 "1: brc 1,0b \n" /*handle partical completion of klmd*/ 297 "1: brc 1,0b \n" /* handle partical completion */
274 __crypt_z990_set_result 298 __crypt_s390_set_result
275 "6: \n" 299 "6: \n"
276 __crypt_z990_fixup 300 __crypt_s390_fixup
277 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 301 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
278 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 302 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
279 "a" (__param) 303 "a" (__param)
280 : "cc", "memory" 304 : "cc", "memory"
281 ); 305 );
282 if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ 306 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
283 ret = src_len - ret; 307 ret = src_len - ret;
284 } 308 }
285 return ret; 309 return ret;
@@ -287,8 +311,8 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
287 311
288/* 312/*
289 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation 313 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
290 * of the z990 CPU. 314 * of the CPU.
291 * @param func: the function code passed to KM; see crypt_z990_klmd_func 315 * @param func: the function code passed to KM; see crypt_s390_klmd_func
292 * @param param: address of parameter block; see POP for details on each func 316 * @param param: address of parameter block; see POP for details on each func
293 * @param src: address of source memory area 317 * @param src: address of source memory area
294 * @param src_len: length of src operand in bytes 318 * @param src_len: length of src operand in bytes
@@ -296,9 +320,9 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
296 * for digest funcs 320 * for digest funcs
297 */ 321 */
298static inline int 322static inline int
299crypt_z990_kmac(long func, void* param, const u8* src, long src_len) 323crypt_s390_kmac(long func, void* param, const u8* src, long src_len)
300{ 324{
301 register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; 325 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
302 register void* __param asm("1") = param; 326 register void* __param asm("1") = param;
303 register const u8* __src asm("2") = src; 327 register const u8* __src asm("2") = src;
304 register long __src_len asm("3") = src_len; 328 register long __src_len asm("3") = src_len;
@@ -306,58 +330,58 @@ crypt_z990_kmac(long func, void* param, const u8* src, long src_len)
306 330
307 ret = 0; 331 ret = 0;
308 __asm__ __volatile__ ( 332 __asm__ __volatile__ (
309 "0: .insn rre,0xB91E0000,%5,%5 \n" //KMAC opcode 333 "0: .insn rre,0xB91E0000,%5,%5 \n" /* KMAC opcode */
310 "1: brc 1,0b \n" /*handle partical completion of klmd*/ 334 "1: brc 1,0b \n" /* handle partical completion */
311 __crypt_z990_set_result 335 __crypt_s390_set_result
312 "6: \n" 336 "6: \n"
313 __crypt_z990_fixup 337 __crypt_s390_fixup
314 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 338 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
315 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 339 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
316 "a" (__param) 340 "a" (__param)
317 : "cc", "memory" 341 : "cc", "memory"
318 ); 342 );
319 if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ 343 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
320 ret = src_len - ret; 344 ret = src_len - ret;
321 } 345 }
322 return ret; 346 return ret;
323} 347}
324 348
325/** 349/**
326 * Tests if a specific z990 crypto function is implemented on the machine. 350 * Tests if a specific crypto function is implemented on the machine.
327 * @param func: the function code of the specific function; 0 if op in general 351 * @param func: the function code of the specific function; 0 if op in general
328 * @return 1 if func available; 0 if func or op in general not available 352 * @return 1 if func available; 0 if func or op in general not available
329 */ 353 */
330static inline int 354static inline int
331crypt_z990_func_available(int func) 355crypt_s390_func_available(int func)
332{ 356{
333 int ret; 357 int ret;
334 358
335 struct crypt_z990_query_status status = { 359 struct crypt_s390_query_status status = {
336 .high = 0, 360 .high = 0,
337 .low = 0 361 .low = 0
338 }; 362 };
339 switch (func & CRYPT_Z990_OP_MASK){ 363 switch (func & CRYPT_S390_OP_MASK){
340 case CRYPT_Z990_KM: 364 case CRYPT_S390_KM:
341 ret = crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0); 365 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
342 break; 366 break;
343 case CRYPT_Z990_KMC: 367 case CRYPT_S390_KMC:
344 ret = crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0); 368 ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
345 break; 369 break;
346 case CRYPT_Z990_KIMD: 370 case CRYPT_S390_KIMD:
347 ret = crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0); 371 ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
348 break; 372 break;
349 case CRYPT_Z990_KLMD: 373 case CRYPT_S390_KLMD:
350 ret = crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0); 374 ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
351 break; 375 break;
352 case CRYPT_Z990_KMAC: 376 case CRYPT_S390_KMAC:
353 ret = crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0); 377 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
354 break; 378 break;
355 default: 379 default:
356 ret = 0; 380 ret = 0;
357 return ret; 381 return ret;
358 } 382 }
359 if (ret >= 0){ 383 if (ret >= 0){
360 func &= CRYPT_Z990_FUNC_MASK; 384 func &= CRYPT_S390_FUNC_MASK;
361 func &= 0x7f; //mask modifier bit 385 func &= 0x7f; //mask modifier bit
362 if (func < 64){ 386 if (func < 64){
363 ret = (status.high >> (64 - func - 1)) & 0x1; 387 ret = (status.high >> (64 - func - 1)) & 0x1;
@@ -370,5 +394,4 @@ crypt_z990_func_available(int func)
370 return ret; 394 return ret;
371} 395}
372 396
373 397#endif // _CRYPTO_ARCH_S390_CRYPT_S390_H
374#endif // _CRYPTO_ARCH_S390_CRYPT_Z990_H
diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c
new file mode 100644
index 000000000000..def02bdc44a4
--- /dev/null
+++ b/arch/s390/crypto/crypt_s390_query.c
@@ -0,0 +1,129 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for s390 cryptographic instructions.
5 * Testing module for querying processor crypto capabilities.
6 *
7 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <asm/errno.h>
20#include "crypt_s390.h"
21
22static void query_available_functions(void)
23{
24 printk(KERN_INFO "#####################\n");
25
26 /* query available KM functions */
27 printk(KERN_INFO "KM_QUERY: %d\n",
28 crypt_s390_func_available(KM_QUERY));
29 printk(KERN_INFO "KM_DEA: %d\n",
30 crypt_s390_func_available(KM_DEA_ENCRYPT));
31 printk(KERN_INFO "KM_TDEA_128: %d\n",
32 crypt_s390_func_available(KM_TDEA_128_ENCRYPT));
33 printk(KERN_INFO "KM_TDEA_192: %d\n",
34 crypt_s390_func_available(KM_TDEA_192_ENCRYPT));
35 printk(KERN_INFO "KM_AES_128: %d\n",
36 crypt_s390_func_available(KM_AES_128_ENCRYPT));
37 printk(KERN_INFO "KM_AES_192: %d\n",
38 crypt_s390_func_available(KM_AES_192_ENCRYPT));
39 printk(KERN_INFO "KM_AES_256: %d\n",
40 crypt_s390_func_available(KM_AES_256_ENCRYPT));
41
42 /* query available KMC functions */
43 printk(KERN_INFO "KMC_QUERY: %d\n",
44 crypt_s390_func_available(KMC_QUERY));
45 printk(KERN_INFO "KMC_DEA: %d\n",
46 crypt_s390_func_available(KMC_DEA_ENCRYPT));
47 printk(KERN_INFO "KMC_TDEA_128: %d\n",
48 crypt_s390_func_available(KMC_TDEA_128_ENCRYPT));
49 printk(KERN_INFO "KMC_TDEA_192: %d\n",
50 crypt_s390_func_available(KMC_TDEA_192_ENCRYPT));
51 printk(KERN_INFO "KMC_AES_128: %d\n",
52 crypt_s390_func_available(KMC_AES_128_ENCRYPT));
53 printk(KERN_INFO "KMC_AES_192: %d\n",
54 crypt_s390_func_available(KMC_AES_192_ENCRYPT));
55 printk(KERN_INFO "KMC_AES_256: %d\n",
56 crypt_s390_func_available(KMC_AES_256_ENCRYPT));
57
58 /* query available KIMD fucntions */
59 printk(KERN_INFO "KIMD_QUERY: %d\n",
60 crypt_s390_func_available(KIMD_QUERY));
61 printk(KERN_INFO "KIMD_SHA_1: %d\n",
62 crypt_s390_func_available(KIMD_SHA_1));
63 printk(KERN_INFO "KIMD_SHA_256: %d\n",
64 crypt_s390_func_available(KIMD_SHA_256));
65
66 /* query available KLMD functions */
67 printk(KERN_INFO "KLMD_QUERY: %d\n",
68 crypt_s390_func_available(KLMD_QUERY));
69 printk(KERN_INFO "KLMD_SHA_1: %d\n",
70 crypt_s390_func_available(KLMD_SHA_1));
71 printk(KERN_INFO "KLMD_SHA_256: %d\n",
72 crypt_s390_func_available(KLMD_SHA_256));
73
74 /* query available KMAC functions */
75 printk(KERN_INFO "KMAC_QUERY: %d\n",
76 crypt_s390_func_available(KMAC_QUERY));
77 printk(KERN_INFO "KMAC_DEA: %d\n",
78 crypt_s390_func_available(KMAC_DEA));
79 printk(KERN_INFO "KMAC_TDEA_128: %d\n",
80 crypt_s390_func_available(KMAC_TDEA_128));
81 printk(KERN_INFO "KMAC_TDEA_192: %d\n",
82 crypt_s390_func_available(KMAC_TDEA_192));
83}
84
85static int init(void)
86{
87 struct crypt_s390_query_status status = {
88 .high = 0,
89 .low = 0
90 };
91
92 printk(KERN_INFO "crypt_s390: querying available crypto functions\n");
93 crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
94 printk(KERN_INFO "KM:\t%016llx %016llx\n",
95 (unsigned long long) status.high,
96 (unsigned long long) status.low);
97 status.high = status.low = 0;
98 crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
99 printk(KERN_INFO "KMC:\t%016llx %016llx\n",
100 (unsigned long long) status.high,
101 (unsigned long long) status.low);
102 status.high = status.low = 0;
103 crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
104 printk(KERN_INFO "KIMD:\t%016llx %016llx\n",
105 (unsigned long long) status.high,
106 (unsigned long long) status.low);
107 status.high = status.low = 0;
108 crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
109 printk(KERN_INFO "KLMD:\t%016llx %016llx\n",
110 (unsigned long long) status.high,
111 (unsigned long long) status.low);
112 status.high = status.low = 0;
113 crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
114 printk(KERN_INFO "KMAC:\t%016llx %016llx\n",
115 (unsigned long long) status.high,
116 (unsigned long long) status.low);
117
118 query_available_functions();
119 return -ECANCELED;
120}
121
122static void __exit cleanup(void)
123{
124}
125
126module_init(init);
127module_exit(cleanup);
128
129MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/crypt_z990_query.c b/arch/s390/crypto/crypt_z990_query.c
deleted file mode 100644
index 7133983d1384..000000000000
--- a/arch/s390/crypto/crypt_z990_query.c
+++ /dev/null
@@ -1,111 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for z990 cryptographic instructions.
5 * Testing module for querying processor crypto capabilities.
6 *
7 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <asm/errno.h>
20#include "crypt_z990.h"
21
22static void
23query_available_functions(void)
24{
25 printk(KERN_INFO "#####################\n");
26 //query available KM functions
27 printk(KERN_INFO "KM_QUERY: %d\n",
28 crypt_z990_func_available(KM_QUERY));
29 printk(KERN_INFO "KM_DEA: %d\n",
30 crypt_z990_func_available(KM_DEA_ENCRYPT));
31 printk(KERN_INFO "KM_TDEA_128: %d\n",
32 crypt_z990_func_available(KM_TDEA_128_ENCRYPT));
33 printk(KERN_INFO "KM_TDEA_192: %d\n",
34 crypt_z990_func_available(KM_TDEA_192_ENCRYPT));
35 //query available KMC functions
36 printk(KERN_INFO "KMC_QUERY: %d\n",
37 crypt_z990_func_available(KMC_QUERY));
38 printk(KERN_INFO "KMC_DEA: %d\n",
39 crypt_z990_func_available(KMC_DEA_ENCRYPT));
40 printk(KERN_INFO "KMC_TDEA_128: %d\n",
41 crypt_z990_func_available(KMC_TDEA_128_ENCRYPT));
42 printk(KERN_INFO "KMC_TDEA_192: %d\n",
43 crypt_z990_func_available(KMC_TDEA_192_ENCRYPT));
44 //query available KIMD fucntions
45 printk(KERN_INFO "KIMD_QUERY: %d\n",
46 crypt_z990_func_available(KIMD_QUERY));
47 printk(KERN_INFO "KIMD_SHA_1: %d\n",
48 crypt_z990_func_available(KIMD_SHA_1));
49 //query available KLMD functions
50 printk(KERN_INFO "KLMD_QUERY: %d\n",
51 crypt_z990_func_available(KLMD_QUERY));
52 printk(KERN_INFO "KLMD_SHA_1: %d\n",
53 crypt_z990_func_available(KLMD_SHA_1));
54 //query available KMAC functions
55 printk(KERN_INFO "KMAC_QUERY: %d\n",
56 crypt_z990_func_available(KMAC_QUERY));
57 printk(KERN_INFO "KMAC_DEA: %d\n",
58 crypt_z990_func_available(KMAC_DEA));
59 printk(KERN_INFO "KMAC_TDEA_128: %d\n",
60 crypt_z990_func_available(KMAC_TDEA_128));
61 printk(KERN_INFO "KMAC_TDEA_192: %d\n",
62 crypt_z990_func_available(KMAC_TDEA_192));
63}
64
65static int
66init(void)
67{
68 struct crypt_z990_query_status status = {
69 .high = 0,
70 .low = 0
71 };
72
73 printk(KERN_INFO "crypt_z990: querying available crypto functions\n");
74 crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0);
75 printk(KERN_INFO "KM: %016llx %016llx\n",
76 (unsigned long long) status.high,
77 (unsigned long long) status.low);
78 status.high = status.low = 0;
79 crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0);
80 printk(KERN_INFO "KMC: %016llx %016llx\n",
81 (unsigned long long) status.high,
82 (unsigned long long) status.low);
83 status.high = status.low = 0;
84 crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0);
85 printk(KERN_INFO "KIMD: %016llx %016llx\n",
86 (unsigned long long) status.high,
87 (unsigned long long) status.low);
88 status.high = status.low = 0;
89 crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0);
90 printk(KERN_INFO "KLMD: %016llx %016llx\n",
91 (unsigned long long) status.high,
92 (unsigned long long) status.low);
93 status.high = status.low = 0;
94 crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0);
95 printk(KERN_INFO "KMAC: %016llx %016llx\n",
96 (unsigned long long) status.high,
97 (unsigned long long) status.low);
98
99 query_available_functions();
100 return -1;
101}
102
103static void __exit
104cleanup(void)
105{
106}
107
108module_init(init);
109module_exit(cleanup);
110
111MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
new file mode 100644
index 000000000000..e3c37aa0a199
--- /dev/null
+++ b/arch/s390/crypto/des_s390.c
@@ -0,0 +1,466 @@
1/*
2 * Cryptographic API.
3 *
4 * s390 implementation of the DES Cipher Algorithm.
5 *
6 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 */
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/crypto.h>
19
20#include "crypt_s390.h"
21#include "crypto_des.h"
22
23#define DES_BLOCK_SIZE 8
24#define DES_KEY_SIZE 8
25
26#define DES3_128_KEY_SIZE (2 * DES_KEY_SIZE)
27#define DES3_128_BLOCK_SIZE DES_BLOCK_SIZE
28
29#define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE)
30#define DES3_192_BLOCK_SIZE DES_BLOCK_SIZE
31
32struct crypt_s390_des_ctx {
33 u8 iv[DES_BLOCK_SIZE];
34 u8 key[DES_KEY_SIZE];
35};
36
37struct crypt_s390_des3_128_ctx {
38 u8 iv[DES_BLOCK_SIZE];
39 u8 key[DES3_128_KEY_SIZE];
40};
41
42struct crypt_s390_des3_192_ctx {
43 u8 iv[DES_BLOCK_SIZE];
44 u8 key[DES3_192_KEY_SIZE];
45};
46
47static int des_setkey(void *ctx, const u8 *key, unsigned int keylen,
48 u32 *flags)
49{
50 struct crypt_s390_des_ctx *dctx = ctx;
51 int ret;
52
53 /* test if key is valid (not a weak key) */
54 ret = crypto_des_check_key(key, keylen, flags);
55 if (ret == 0)
56 memcpy(dctx->key, key, keylen);
57 return ret;
58}
59
60static void des_encrypt(void *ctx, u8 *out, const u8 *in)
61{
62 struct crypt_s390_des_ctx *dctx = ctx;
63
64 crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
65}
66
67static void des_decrypt(void *ctx, u8 *out, const u8 *in)
68{
69 struct crypt_s390_des_ctx *dctx = ctx;
70
71 crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
72}
73
74static unsigned int des_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
75 const u8 *in, unsigned int nbytes)
76{
77 struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm);
78 int ret;
79
80 /* only use complete blocks */
81 nbytes &= ~(DES_BLOCK_SIZE - 1);
82 ret = crypt_s390_km(KM_DEA_ENCRYPT, sctx->key, out, in, nbytes);
83 BUG_ON((ret < 0) || (ret != nbytes));
84
85 return nbytes;
86}
87
88static unsigned int des_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
89 const u8 *in, unsigned int nbytes)
90{
91 struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm);
92 int ret;
93
94 /* only use complete blocks */
95 nbytes &= ~(DES_BLOCK_SIZE - 1);
96 ret = crypt_s390_km(KM_DEA_DECRYPT, sctx->key, out, in, nbytes);
97 BUG_ON((ret < 0) || (ret != nbytes));
98
99 return nbytes;
100}
101
102static unsigned int des_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
103 const u8 *in, unsigned int nbytes)
104{
105 struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm);
106 int ret;
107
108 /* only use complete blocks */
109 nbytes &= ~(DES_BLOCK_SIZE - 1);
110
111 memcpy(sctx->iv, desc->info, DES_BLOCK_SIZE);
112 ret = crypt_s390_kmc(KMC_DEA_ENCRYPT, &sctx->iv, out, in, nbytes);
113 BUG_ON((ret < 0) || (ret != nbytes));
114
115 memcpy(desc->info, sctx->iv, DES_BLOCK_SIZE);
116 return nbytes;
117}
118
119static unsigned int des_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
120 const u8 *in, unsigned int nbytes)
121{
122 struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm);
123 int ret;
124
125 /* only use complete blocks */
126 nbytes &= ~(DES_BLOCK_SIZE - 1);
127
128 memcpy(&sctx->iv, desc->info, DES_BLOCK_SIZE);
129 ret = crypt_s390_kmc(KMC_DEA_DECRYPT, &sctx->iv, out, in, nbytes);
130 BUG_ON((ret < 0) || (ret != nbytes));
131
132 return nbytes;
133}
134
135static struct crypto_alg des_alg = {
136 .cra_name = "des",
137 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
138 .cra_blocksize = DES_BLOCK_SIZE,
139 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
140 .cra_module = THIS_MODULE,
141 .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
142 .cra_u = {
143 .cipher = {
144 .cia_min_keysize = DES_KEY_SIZE,
145 .cia_max_keysize = DES_KEY_SIZE,
146 .cia_setkey = des_setkey,
147 .cia_encrypt = des_encrypt,
148 .cia_decrypt = des_decrypt,
149 .cia_encrypt_ecb = des_encrypt_ecb,
150 .cia_decrypt_ecb = des_decrypt_ecb,
151 .cia_encrypt_cbc = des_encrypt_cbc,
152 .cia_decrypt_cbc = des_decrypt_cbc,
153 }
154 }
155};
156
157/*
158 * RFC2451:
159 *
160 * For DES-EDE3, there is no known need to reject weak or
161 * complementation keys. Any weakness is obviated by the use of
162 * multiple keys.
163 *
164 * However, if the two independent 64-bit keys are equal,
165 * then the DES3 operation is simply the same as DES.
166 * Implementers MUST reject keys that exhibit this property.
167 *
168 */
169static int des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen,
170 u32 *flags)
171{
172 int i, ret;
173 struct crypt_s390_des3_128_ctx *dctx = ctx;
174 const u8* temp_key = key;
175
176 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) {
177 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
178 return -EINVAL;
179 }
180 for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) {
181 ret = crypto_des_check_key(temp_key, DES_KEY_SIZE, flags);
182 if (ret < 0)
183 return ret;
184 }
185 memcpy(dctx->key, key, keylen);
186 return 0;
187}
188
189static void des3_128_encrypt(void *ctx, u8 *dst, const u8 *src)
190{
191 struct crypt_s390_des3_128_ctx *dctx = ctx;
192
193 crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src,
194 DES3_128_BLOCK_SIZE);
195}
196
197static void des3_128_decrypt(void *ctx, u8 *dst, const u8 *src)
198{
199 struct crypt_s390_des3_128_ctx *dctx = ctx;
200
201 crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src,
202 DES3_128_BLOCK_SIZE);
203}
204
205static unsigned int des3_128_encrypt_ecb(const struct cipher_desc *desc,
206 u8 *out, const u8 *in,
207 unsigned int nbytes)
208{
209 struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm);
210 int ret;
211
212 /* only use complete blocks */
213 nbytes &= ~(DES3_128_BLOCK_SIZE - 1);
214 ret = crypt_s390_km(KM_TDEA_128_ENCRYPT, sctx->key, out, in, nbytes);
215 BUG_ON((ret < 0) || (ret != nbytes));
216
217 return nbytes;
218}
219
220static unsigned int des3_128_decrypt_ecb(const struct cipher_desc *desc,
221 u8 *out, const u8 *in,
222 unsigned int nbytes)
223{
224 struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm);
225 int ret;
226
227 /* only use complete blocks */
228 nbytes &= ~(DES3_128_BLOCK_SIZE - 1);
229 ret = crypt_s390_km(KM_TDEA_128_DECRYPT, sctx->key, out, in, nbytes);
230 BUG_ON((ret < 0) || (ret != nbytes));
231
232 return nbytes;
233}
234
235static unsigned int des3_128_encrypt_cbc(const struct cipher_desc *desc,
236 u8 *out, const u8 *in,
237 unsigned int nbytes)
238{
239 struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm);
240 int ret;
241
242 /* only use complete blocks */
243 nbytes &= ~(DES3_128_BLOCK_SIZE - 1);
244
245 memcpy(sctx->iv, desc->info, DES3_128_BLOCK_SIZE);
246 ret = crypt_s390_kmc(KMC_TDEA_128_ENCRYPT, &sctx->iv, out, in, nbytes);
247 BUG_ON((ret < 0) || (ret != nbytes));
248
249 memcpy(desc->info, sctx->iv, DES3_128_BLOCK_SIZE);
250 return nbytes;
251}
252
253static unsigned int des3_128_decrypt_cbc(const struct cipher_desc *desc,
254 u8 *out, const u8 *in,
255 unsigned int nbytes)
256{
257 struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm);
258 int ret;
259
260 /* only use complete blocks */
261 nbytes &= ~(DES3_128_BLOCK_SIZE - 1);
262
263 memcpy(&sctx->iv, desc->info, DES3_128_BLOCK_SIZE);
264 ret = crypt_s390_kmc(KMC_TDEA_128_DECRYPT, &sctx->iv, out, in, nbytes);
265 BUG_ON((ret < 0) || (ret != nbytes));
266
267 return nbytes;
268}
269
270static struct crypto_alg des3_128_alg = {
271 .cra_name = "des3_ede128",
272 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
273 .cra_blocksize = DES3_128_BLOCK_SIZE,
274 .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
275 .cra_module = THIS_MODULE,
276 .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list),
277 .cra_u = {
278 .cipher = {
279 .cia_min_keysize = DES3_128_KEY_SIZE,
280 .cia_max_keysize = DES3_128_KEY_SIZE,
281 .cia_setkey = des3_128_setkey,
282 .cia_encrypt = des3_128_encrypt,
283 .cia_decrypt = des3_128_decrypt,
284 .cia_encrypt_ecb = des3_128_encrypt_ecb,
285 .cia_decrypt_ecb = des3_128_decrypt_ecb,
286 .cia_encrypt_cbc = des3_128_encrypt_cbc,
287 .cia_decrypt_cbc = des3_128_decrypt_cbc,
288 }
289 }
290};
291
292/*
293 * RFC2451:
294 *
295 * For DES-EDE3, there is no known need to reject weak or
296 * complementation keys. Any weakness is obviated by the use of
297 * multiple keys.
298 *
299 * However, if the first two or last two independent 64-bit keys are
300 * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
301 * same as DES. Implementers MUST reject keys that exhibit this
302 * property.
303 *
304 */
305static int des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen,
306 u32 *flags)
307{
308 int i, ret;
309 struct crypt_s390_des3_192_ctx *dctx = ctx;
310 const u8* temp_key = key;
311
312 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
313 memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
314 DES_KEY_SIZE))) {
315
316 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
317 return -EINVAL;
318 }
319 for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) {
320 ret = crypto_des_check_key(temp_key, DES_KEY_SIZE, flags);
321 if (ret < 0)
322 return ret;
323 }
324 memcpy(dctx->key, key, keylen);
325 return 0;
326}
327
328static void des3_192_encrypt(void *ctx, u8 *dst, const u8 *src)
329{
330 struct crypt_s390_des3_192_ctx *dctx = ctx;
331
332 crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
333 DES3_192_BLOCK_SIZE);
334}
335
336static void des3_192_decrypt(void *ctx, u8 *dst, const u8 *src)
337{
338 struct crypt_s390_des3_192_ctx *dctx = ctx;
339
340 crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
341 DES3_192_BLOCK_SIZE);
342}
343
344static unsigned int des3_192_encrypt_ecb(const struct cipher_desc *desc,
345 u8 *out, const u8 *in,
346 unsigned int nbytes)
347{
348 struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm);
349 int ret;
350
351 /* only use complete blocks */
352 nbytes &= ~(DES3_192_BLOCK_SIZE - 1);
353 ret = crypt_s390_km(KM_TDEA_192_ENCRYPT, sctx->key, out, in, nbytes);
354 BUG_ON((ret < 0) || (ret != nbytes));
355
356 return nbytes;
357}
358
359static unsigned int des3_192_decrypt_ecb(const struct cipher_desc *desc,
360 u8 *out, const u8 *in,
361 unsigned int nbytes)
362{
363 struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm);
364 int ret;
365
366 /* only use complete blocks */
367 nbytes &= ~(DES3_192_BLOCK_SIZE - 1);
368 ret = crypt_s390_km(KM_TDEA_192_DECRYPT, sctx->key, out, in, nbytes);
369 BUG_ON((ret < 0) || (ret != nbytes));
370
371 return nbytes;
372}
373
374static unsigned int des3_192_encrypt_cbc(const struct cipher_desc *desc,
375 u8 *out, const u8 *in,
376 unsigned int nbytes)
377{
378 struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm);
379 int ret;
380
381 /* only use complete blocks */
382 nbytes &= ~(DES3_192_BLOCK_SIZE - 1);
383
384 memcpy(sctx->iv, desc->info, DES3_192_BLOCK_SIZE);
385 ret = crypt_s390_kmc(KMC_TDEA_192_ENCRYPT, &sctx->iv, out, in, nbytes);
386 BUG_ON((ret < 0) || (ret != nbytes));
387
388 memcpy(desc->info, sctx->iv, DES3_192_BLOCK_SIZE);
389 return nbytes;
390}
391
392static unsigned int des3_192_decrypt_cbc(const struct cipher_desc *desc,
393 u8 *out, const u8 *in,
394 unsigned int nbytes)
395{
396 struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm);
397 int ret;
398
399 /* only use complete blocks */
400 nbytes &= ~(DES3_192_BLOCK_SIZE - 1);
401
402 memcpy(&sctx->iv, desc->info, DES3_192_BLOCK_SIZE);
403 ret = crypt_s390_kmc(KMC_TDEA_192_DECRYPT, &sctx->iv, out, in, nbytes);
404 BUG_ON((ret < 0) || (ret != nbytes));
405
406 return nbytes;
407}
408
409static struct crypto_alg des3_192_alg = {
410 .cra_name = "des3_ede",
411 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
412 .cra_blocksize = DES3_192_BLOCK_SIZE,
413 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
414 .cra_module = THIS_MODULE,
415 .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list),
416 .cra_u = {
417 .cipher = {
418 .cia_min_keysize = DES3_192_KEY_SIZE,
419 .cia_max_keysize = DES3_192_KEY_SIZE,
420 .cia_setkey = des3_192_setkey,
421 .cia_encrypt = des3_192_encrypt,
422 .cia_decrypt = des3_192_decrypt,
423 .cia_encrypt_ecb = des3_192_encrypt_ecb,
424 .cia_decrypt_ecb = des3_192_decrypt_ecb,
425 .cia_encrypt_cbc = des3_192_encrypt_cbc,
426 .cia_decrypt_cbc = des3_192_decrypt_cbc,
427 }
428 }
429};
430
431static int init(void)
432{
433 int ret = 0;
434
435 if (!crypt_s390_func_available(KM_DEA_ENCRYPT) ||
436 !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) ||
437 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT))
438 return -ENOSYS;
439
440 ret |= (crypto_register_alg(&des_alg) == 0) ? 0:1;
441 ret |= (crypto_register_alg(&des3_128_alg) == 0) ? 0:2;
442 ret |= (crypto_register_alg(&des3_192_alg) == 0) ? 0:4;
443 if (ret) {
444 crypto_unregister_alg(&des3_192_alg);
445 crypto_unregister_alg(&des3_128_alg);
446 crypto_unregister_alg(&des_alg);
447 return -EEXIST;
448 }
449 return 0;
450}
451
452static void __exit fini(void)
453{
454 crypto_unregister_alg(&des3_192_alg);
455 crypto_unregister_alg(&des3_128_alg);
456 crypto_unregister_alg(&des_alg);
457}
458
459module_init(init);
460module_exit(fini);
461
462MODULE_ALIAS("des");
463MODULE_ALIAS("des3_ede");
464
465MODULE_LICENSE("GPL");
466MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
diff --git a/arch/s390/crypto/des_z990.c b/arch/s390/crypto/des_z990.c
deleted file mode 100644
index 813cf37b1177..000000000000
--- a/arch/s390/crypto/des_z990.c
+++ /dev/null
@@ -1,284 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * z990 implementation of the DES Cipher Algorithm.
5 *
6 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 */
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/errno.h>
20#include <asm/scatterlist.h>
21#include <linux/crypto.h>
22#include "crypt_z990.h"
23#include "crypto_des.h"
24
25#define DES_BLOCK_SIZE 8
26#define DES_KEY_SIZE 8
27
28#define DES3_128_KEY_SIZE (2 * DES_KEY_SIZE)
29#define DES3_128_BLOCK_SIZE DES_BLOCK_SIZE
30
31#define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE)
32#define DES3_192_BLOCK_SIZE DES_BLOCK_SIZE
33
34struct crypt_z990_des_ctx {
35 u8 iv[DES_BLOCK_SIZE];
36 u8 key[DES_KEY_SIZE];
37};
38
39struct crypt_z990_des3_128_ctx {
40 u8 iv[DES_BLOCK_SIZE];
41 u8 key[DES3_128_KEY_SIZE];
42};
43
44struct crypt_z990_des3_192_ctx {
45 u8 iv[DES_BLOCK_SIZE];
46 u8 key[DES3_192_KEY_SIZE];
47};
48
49static int
50des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
51{
52 struct crypt_z990_des_ctx *dctx;
53 int ret;
54
55 dctx = ctx;
56 //test if key is valid (not a weak key)
57 ret = crypto_des_check_key(key, keylen, flags);
58 if (ret == 0){
59 memcpy(dctx->key, key, keylen);
60 }
61 return ret;
62}
63
64
65static void
66des_encrypt(void *ctx, u8 *dst, const u8 *src)
67{
68 struct crypt_z990_des_ctx *dctx;
69
70 dctx = ctx;
71 crypt_z990_km(KM_DEA_ENCRYPT, dctx->key, dst, src, DES_BLOCK_SIZE);
72}
73
74static void
75des_decrypt(void *ctx, u8 *dst, const u8 *src)
76{
77 struct crypt_z990_des_ctx *dctx;
78
79 dctx = ctx;
80 crypt_z990_km(KM_DEA_DECRYPT, dctx->key, dst, src, DES_BLOCK_SIZE);
81}
82
83static struct crypto_alg des_alg = {
84 .cra_name = "des",
85 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
86 .cra_blocksize = DES_BLOCK_SIZE,
87 .cra_ctxsize = sizeof(struct crypt_z990_des_ctx),
88 .cra_module = THIS_MODULE,
89 .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
90 .cra_u = { .cipher = {
91 .cia_min_keysize = DES_KEY_SIZE,
92 .cia_max_keysize = DES_KEY_SIZE,
93 .cia_setkey = des_setkey,
94 .cia_encrypt = des_encrypt,
95 .cia_decrypt = des_decrypt } }
96};
97
98/*
99 * RFC2451:
100 *
101 * For DES-EDE3, there is no known need to reject weak or
102 * complementation keys. Any weakness is obviated by the use of
103 * multiple keys.
104 *
105 * However, if the two independent 64-bit keys are equal,
106 * then the DES3 operation is simply the same as DES.
107 * Implementers MUST reject keys that exhibit this property.
108 *
109 */
110static int
111des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
112{
113 int i, ret;
114 struct crypt_z990_des3_128_ctx *dctx;
115 const u8* temp_key = key;
116
117 dctx = ctx;
118 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) {
119
120 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
121 return -EINVAL;
122 }
123 for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) {
124 ret = crypto_des_check_key(temp_key, DES_KEY_SIZE, flags);
125 if (ret < 0)
126 return ret;
127 }
128 memcpy(dctx->key, key, keylen);
129 return 0;
130}
131
132static void
133des3_128_encrypt(void *ctx, u8 *dst, const u8 *src)
134{
135 struct crypt_z990_des3_128_ctx *dctx;
136
137 dctx = ctx;
138 crypt_z990_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src,
139 DES3_128_BLOCK_SIZE);
140}
141
142static void
143des3_128_decrypt(void *ctx, u8 *dst, const u8 *src)
144{
145 struct crypt_z990_des3_128_ctx *dctx;
146
147 dctx = ctx;
148 crypt_z990_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src,
149 DES3_128_BLOCK_SIZE);
150}
151
152static struct crypto_alg des3_128_alg = {
153 .cra_name = "des3_ede128",
154 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
155 .cra_blocksize = DES3_128_BLOCK_SIZE,
156 .cra_ctxsize = sizeof(struct crypt_z990_des3_128_ctx),
157 .cra_module = THIS_MODULE,
158 .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list),
159 .cra_u = { .cipher = {
160 .cia_min_keysize = DES3_128_KEY_SIZE,
161 .cia_max_keysize = DES3_128_KEY_SIZE,
162 .cia_setkey = des3_128_setkey,
163 .cia_encrypt = des3_128_encrypt,
164 .cia_decrypt = des3_128_decrypt } }
165};
166
167/*
168 * RFC2451:
169 *
170 * For DES-EDE3, there is no known need to reject weak or
171 * complementation keys. Any weakness is obviated by the use of
172 * multiple keys.
173 *
174 * However, if the first two or last two independent 64-bit keys are
175 * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
176 * same as DES. Implementers MUST reject keys that exhibit this
177 * property.
178 *
179 */
180static int
181des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
182{
183 int i, ret;
184 struct crypt_z990_des3_192_ctx *dctx;
185 const u8* temp_key;
186
187 dctx = ctx;
188 temp_key = key;
189 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
190 memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
191 DES_KEY_SIZE))) {
192
193 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
194 return -EINVAL;
195 }
196 for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) {
197 ret = crypto_des_check_key(temp_key, DES_KEY_SIZE, flags);
198 if (ret < 0){
199 return ret;
200 }
201 }
202 memcpy(dctx->key, key, keylen);
203 return 0;
204}
205
206static void
207des3_192_encrypt(void *ctx, u8 *dst, const u8 *src)
208{
209 struct crypt_z990_des3_192_ctx *dctx;
210
211 dctx = ctx;
212 crypt_z990_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
213 DES3_192_BLOCK_SIZE);
214}
215
216static void
217des3_192_decrypt(void *ctx, u8 *dst, const u8 *src)
218{
219 struct crypt_z990_des3_192_ctx *dctx;
220
221 dctx = ctx;
222 crypt_z990_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
223 DES3_192_BLOCK_SIZE);
224}
225
226static struct crypto_alg des3_192_alg = {
227 .cra_name = "des3_ede",
228 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
229 .cra_blocksize = DES3_192_BLOCK_SIZE,
230 .cra_ctxsize = sizeof(struct crypt_z990_des3_192_ctx),
231 .cra_module = THIS_MODULE,
232 .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list),
233 .cra_u = { .cipher = {
234 .cia_min_keysize = DES3_192_KEY_SIZE,
235 .cia_max_keysize = DES3_192_KEY_SIZE,
236 .cia_setkey = des3_192_setkey,
237 .cia_encrypt = des3_192_encrypt,
238 .cia_decrypt = des3_192_decrypt } }
239};
240
241
242
243static int
244init(void)
245{
246 int ret;
247
248 if (!crypt_z990_func_available(KM_DEA_ENCRYPT) ||
249 !crypt_z990_func_available(KM_TDEA_128_ENCRYPT) ||
250 !crypt_z990_func_available(KM_TDEA_192_ENCRYPT)){
251 return -ENOSYS;
252 }
253
254 ret = 0;
255 ret |= (crypto_register_alg(&des_alg) == 0)? 0:1;
256 ret |= (crypto_register_alg(&des3_128_alg) == 0)? 0:2;
257 ret |= (crypto_register_alg(&des3_192_alg) == 0)? 0:4;
258 if (ret){
259 crypto_unregister_alg(&des3_192_alg);
260 crypto_unregister_alg(&des3_128_alg);
261 crypto_unregister_alg(&des_alg);
262 return -EEXIST;
263 }
264
265 printk(KERN_INFO "crypt_z990: des_z990 loaded.\n");
266 return 0;
267}
268
269static void __exit
270fini(void)
271{
272 crypto_unregister_alg(&des3_192_alg);
273 crypto_unregister_alg(&des3_128_alg);
274 crypto_unregister_alg(&des_alg);
275}
276
277module_init(init);
278module_exit(fini);
279
280MODULE_ALIAS("des");
281MODULE_ALIAS("des3_ede");
282
283MODULE_LICENSE("GPL");
284MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
diff --git a/arch/s390/crypto/sha1_z990.c b/arch/s390/crypto/sha1_s390.c
index 298174ddf5b1..98c896b86dcd 100644
--- a/arch/s390/crypto/sha1_z990.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * z990 implementation of the SHA1 Secure Hash Algorithm. 4 * s390 implementation of the SHA1 Secure Hash Algorithm.
5 * 5 *
6 * Derived from cryptoapi implementation, adapted for in-place 6 * Derived from cryptoapi implementation, adapted for in-place
7 * scatterlist interface. Originally based on the public domain 7 * scatterlist interface. Originally based on the public domain
@@ -28,22 +28,22 @@
28#include <linux/crypto.h> 28#include <linux/crypto.h>
29#include <asm/scatterlist.h> 29#include <asm/scatterlist.h>
30#include <asm/byteorder.h> 30#include <asm/byteorder.h>
31#include "crypt_z990.h" 31#include "crypt_s390.h"
32 32
33#define SHA1_DIGEST_SIZE 20 33#define SHA1_DIGEST_SIZE 20
34#define SHA1_BLOCK_SIZE 64 34#define SHA1_BLOCK_SIZE 64
35 35
36struct crypt_z990_sha1_ctx { 36struct crypt_s390_sha1_ctx {
37 u64 count; 37 u64 count;
38 u32 state[5]; 38 u32 state[5];
39 u32 buf_len; 39 u32 buf_len;
40 u8 buffer[2 * SHA1_BLOCK_SIZE]; 40 u8 buffer[2 * SHA1_BLOCK_SIZE];
41}; 41};
42 42
43static void 43static void
44sha1_init(void *ctx) 44sha1_init(void *ctx)
45{ 45{
46 static const struct crypt_z990_sha1_ctx initstate = { 46 static const struct crypt_s390_sha1_ctx initstate = {
47 .state = { 47 .state = {
48 0x67452301, 48 0x67452301,
49 0xEFCDAB89, 49 0xEFCDAB89,
@@ -58,7 +58,7 @@ sha1_init(void *ctx)
58static void 58static void
59sha1_update(void *ctx, const u8 *data, unsigned int len) 59sha1_update(void *ctx, const u8 *data, unsigned int len)
60{ 60{
61 struct crypt_z990_sha1_ctx *sctx; 61 struct crypt_s390_sha1_ctx *sctx;
62 long imd_len; 62 long imd_len;
63 63
64 sctx = ctx; 64 sctx = ctx;
@@ -69,7 +69,7 @@ sha1_update(void *ctx, const u8 *data, unsigned int len)
69 //complete full block and hash 69 //complete full block and hash
70 memcpy(sctx->buffer + sctx->buf_len, data, 70 memcpy(sctx->buffer + sctx->buf_len, data,
71 SHA1_BLOCK_SIZE - sctx->buf_len); 71 SHA1_BLOCK_SIZE - sctx->buf_len);
72 crypt_z990_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, 72 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer,
73 SHA1_BLOCK_SIZE); 73 SHA1_BLOCK_SIZE);
74 data += SHA1_BLOCK_SIZE - sctx->buf_len; 74 data += SHA1_BLOCK_SIZE - sctx->buf_len;
75 len -= SHA1_BLOCK_SIZE - sctx->buf_len; 75 len -= SHA1_BLOCK_SIZE - sctx->buf_len;
@@ -79,7 +79,7 @@ sha1_update(void *ctx, const u8 *data, unsigned int len)
79 //rest of data contains full blocks? 79 //rest of data contains full blocks?
80 imd_len = len & ~0x3ful; 80 imd_len = len & ~0x3ful;
81 if (imd_len){ 81 if (imd_len){
82 crypt_z990_kimd(KIMD_SHA_1, sctx->state, data, imd_len); 82 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len);
83 data += imd_len; 83 data += imd_len;
84 len -= imd_len; 84 len -= imd_len;
85 } 85 }
@@ -92,7 +92,7 @@ sha1_update(void *ctx, const u8 *data, unsigned int len)
92 92
93 93
94static void 94static void
95pad_message(struct crypt_z990_sha1_ctx* sctx) 95pad_message(struct crypt_s390_sha1_ctx* sctx)
96{ 96{
97 int index; 97 int index;
98 98
@@ -113,11 +113,11 @@ pad_message(struct crypt_z990_sha1_ctx* sctx)
113static void 113static void
114sha1_final(void* ctx, u8 *out) 114sha1_final(void* ctx, u8 *out)
115{ 115{
116 struct crypt_z990_sha1_ctx *sctx = ctx; 116 struct crypt_s390_sha1_ctx *sctx = ctx;
117 117
118 //must perform manual padding 118 //must perform manual padding
119 pad_message(sctx); 119 pad_message(sctx);
120 crypt_z990_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); 120 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
121 //copy digest to out 121 //copy digest to out
122 memcpy(out, sctx->state, SHA1_DIGEST_SIZE); 122 memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
123 /* Wipe context */ 123 /* Wipe context */
@@ -128,7 +128,7 @@ static struct crypto_alg alg = {
128 .cra_name = "sha1", 128 .cra_name = "sha1",
129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
130 .cra_blocksize = SHA1_BLOCK_SIZE, 130 .cra_blocksize = SHA1_BLOCK_SIZE,
131 .cra_ctxsize = sizeof(struct crypt_z990_sha1_ctx), 131 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx),
132 .cra_module = THIS_MODULE, 132 .cra_module = THIS_MODULE,
133 .cra_list = LIST_HEAD_INIT(alg.cra_list), 133 .cra_list = LIST_HEAD_INIT(alg.cra_list),
134 .cra_u = { .digest = { 134 .cra_u = { .digest = {
@@ -143,10 +143,10 @@ init(void)
143{ 143{
144 int ret = -ENOSYS; 144 int ret = -ENOSYS;
145 145
146 if (crypt_z990_func_available(KIMD_SHA_1)){ 146 if (crypt_s390_func_available(KIMD_SHA_1)){
147 ret = crypto_register_alg(&alg); 147 ret = crypto_register_alg(&alg);
148 if (ret == 0){ 148 if (ret == 0){
149 printk(KERN_INFO "crypt_z990: sha1_z990 loaded.\n"); 149 printk(KERN_INFO "crypt_s390: sha1_s390 loaded.\n");
150 } 150 }
151 } 151 }
152 return ret; 152 return ret;
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
new file mode 100644
index 000000000000..1ec5e92b3454
--- /dev/null
+++ b/arch/s390/crypto/sha256_s390.c
@@ -0,0 +1,166 @@
1/*
2 * Cryptographic API.
3 *
4 * s390 implementation of the SHA256 Secure Hash Algorithm.
5 *
6 * s390 Version:
7 * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation
8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 *
10 * Derived from "crypto/sha256.c"
11 * and "arch/s390/crypto/sha1_s390.c"
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/crypto.h>
22
23#include "crypt_s390.h"
24
25#define SHA256_DIGEST_SIZE 32
26#define SHA256_BLOCK_SIZE 64
27
28struct s390_sha256_ctx {
29 u64 count;
30 u32 state[8];
31 u8 buf[2 * SHA256_BLOCK_SIZE];
32};
33
34static void sha256_init(void *ctx)
35{
36 struct s390_sha256_ctx *sctx = ctx;
37
38 sctx->state[0] = 0x6a09e667;
39 sctx->state[1] = 0xbb67ae85;
40 sctx->state[2] = 0x3c6ef372;
41 sctx->state[3] = 0xa54ff53a;
42 sctx->state[4] = 0x510e527f;
43 sctx->state[5] = 0x9b05688c;
44 sctx->state[6] = 0x1f83d9ab;
45 sctx->state[7] = 0x5be0cd19;
46 sctx->count = 0;
47 memset(sctx->buf, 0, sizeof(sctx->buf));
48}
49
50static void sha256_update(void *ctx, const u8 *data, unsigned int len)
51{
52 struct s390_sha256_ctx *sctx = ctx;
53 unsigned int index;
54 int ret;
55
56 /* how much is already in the buffer? */
57 index = sctx->count / 8 & 0x3f;
58
59 /* update message bit length */
60 sctx->count += len * 8;
61
62 if ((index + len) < SHA256_BLOCK_SIZE)
63 goto store;
64
65 /* process one stored block */
66 if (index) {
67 memcpy(sctx->buf + index, data, SHA256_BLOCK_SIZE - index);
68 ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf,
69 SHA256_BLOCK_SIZE);
70 BUG_ON(ret != SHA256_BLOCK_SIZE);
71 data += SHA256_BLOCK_SIZE - index;
72 len -= SHA256_BLOCK_SIZE - index;
73 }
74
75 /* process as many blocks as possible */
76 if (len >= SHA256_BLOCK_SIZE) {
77 ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, data,
78 len & ~(SHA256_BLOCK_SIZE - 1));
79 BUG_ON(ret != (len & ~(SHA256_BLOCK_SIZE - 1)));
80 data += ret;
81 len -= ret;
82 }
83
84store:
85 /* anything left? */
86 if (len)
87 memcpy(sctx->buf + index , data, len);
88}
89
90static void pad_message(struct s390_sha256_ctx* sctx)
91{
92 int index, end;
93
94 index = sctx->count / 8 & 0x3f;
95 end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE;
96
97 /* start pad with 1 */
98 sctx->buf[index] = 0x80;
99
100 /* pad with zeros */
101 index++;
102 memset(sctx->buf + index, 0x00, end - index - 8);
103
104 /* append message length */
105 memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count);
106
107 sctx->count = end * 8;
108}
109
110/* Add padding and return the message digest */
111static void sha256_final(void* ctx, u8 *out)
112{
113 struct s390_sha256_ctx *sctx = ctx;
114
115 /* must perform manual padding */
116 pad_message(sctx);
117
118 crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf,
119 sctx->count / 8);
120
121 /* copy digest to out */
122 memcpy(out, sctx->state, SHA256_DIGEST_SIZE);
123
124 /* wipe context */
125 memset(sctx, 0, sizeof *sctx);
126}
127
128static struct crypto_alg alg = {
129 .cra_name = "sha256",
130 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
131 .cra_blocksize = SHA256_BLOCK_SIZE,
132 .cra_ctxsize = sizeof(struct s390_sha256_ctx),
133 .cra_module = THIS_MODULE,
134 .cra_list = LIST_HEAD_INIT(alg.cra_list),
135 .cra_u = { .digest = {
136 .dia_digestsize = SHA256_DIGEST_SIZE,
137 .dia_init = sha256_init,
138 .dia_update = sha256_update,
139 .dia_final = sha256_final } }
140};
141
142static int init(void)
143{
144 int ret;
145
146 if (!crypt_s390_func_available(KIMD_SHA_256))
147 return -ENOSYS;
148
149 ret = crypto_register_alg(&alg);
150 if (ret != 0)
151 printk(KERN_INFO "crypt_s390: sha256_s390 couldn't be loaded.");
152 return ret;
153}
154
155static void __exit fini(void)
156{
157 crypto_unregister_alg(&alg);
158}
159
160module_init(init);
161module_exit(fini);
162
163MODULE_ALIAS("sha256");
164
165MODULE_LICENSE("GPL");
166MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 45d44c6bb39d..7d23edc6facb 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,12 +1,12 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.14-rc1 3# Linux kernel version: 2.6.15-rc2
4# Wed Sep 14 16:46:19 2005 4# Mon Nov 21 13:51:30 2005
5# 5#
6CONFIG_MMU=y 6CONFIG_MMU=y
7CONFIG_RWSEM_XCHGADD_ALGORITHM=y 7CONFIG_RWSEM_XCHGADD_ALGORITHM=y
8CONFIG_GENERIC_CALIBRATE_DELAY=y 8CONFIG_GENERIC_CALIBRATE_DELAY=y
9CONFIG_ARCH_S390=y 9CONFIG_S390=y
10CONFIG_UID16=y 10CONFIG_UID16=y
11 11
12# 12#
@@ -65,15 +65,31 @@ CONFIG_KMOD=y
65CONFIG_STOP_MACHINE=y 65CONFIG_STOP_MACHINE=y
66 66
67# 67#
68# Block layer
69#
70# CONFIG_LBD is not set
71
72#
73# IO Schedulers
74#
75CONFIG_IOSCHED_NOOP=y
76CONFIG_IOSCHED_AS=y
77CONFIG_IOSCHED_DEADLINE=y
78CONFIG_IOSCHED_CFQ=y
79CONFIG_DEFAULT_AS=y
80# CONFIG_DEFAULT_DEADLINE is not set
81# CONFIG_DEFAULT_CFQ is not set
82# CONFIG_DEFAULT_NOOP is not set
83CONFIG_DEFAULT_IOSCHED="anticipatory"
84
85#
68# Base setup 86# Base setup
69# 87#
70 88
71# 89#
72# Processor type and features 90# Processor type and features
73# 91#
74# CONFIG_ARCH_S390X is not set
75# CONFIG_64BIT is not set 92# CONFIG_64BIT is not set
76CONFIG_ARCH_S390_31=y
77CONFIG_SMP=y 93CONFIG_SMP=y
78CONFIG_NR_CPUS=32 94CONFIG_NR_CPUS=32
79CONFIG_HOTPLUG_CPU=y 95CONFIG_HOTPLUG_CPU=y
@@ -97,6 +113,7 @@ CONFIG_FLATMEM_MANUAL=y
97CONFIG_FLATMEM=y 113CONFIG_FLATMEM=y
98CONFIG_FLAT_NODE_MEM_MAP=y 114CONFIG_FLAT_NODE_MEM_MAP=y
99# CONFIG_SPARSEMEM_STATIC is not set 115# CONFIG_SPARSEMEM_STATIC is not set
116CONFIG_SPLIT_PTLOCK_CPUS=4
100 117
101# 118#
102# I/O subsystem configuration 119# I/O subsystem configuration
@@ -188,10 +205,18 @@ CONFIG_IPV6=y
188# CONFIG_NET_DIVERT is not set 205# CONFIG_NET_DIVERT is not set
189# CONFIG_ECONET is not set 206# CONFIG_ECONET is not set
190# CONFIG_WAN_ROUTER is not set 207# CONFIG_WAN_ROUTER is not set
208
209#
210# QoS and/or fair queueing
211#
191CONFIG_NET_SCHED=y 212CONFIG_NET_SCHED=y
192CONFIG_NET_SCH_CLK_JIFFIES=y 213CONFIG_NET_SCH_CLK_JIFFIES=y
193# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set 214# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
194# CONFIG_NET_SCH_CLK_CPU is not set 215# CONFIG_NET_SCH_CLK_CPU is not set
216
217#
218# Queueing/Scheduling
219#
195CONFIG_NET_SCH_CBQ=m 220CONFIG_NET_SCH_CBQ=m
196# CONFIG_NET_SCH_HTB is not set 221# CONFIG_NET_SCH_HTB is not set
197# CONFIG_NET_SCH_HFSC is not set 222# CONFIG_NET_SCH_HFSC is not set
@@ -204,8 +229,10 @@ CONFIG_NET_SCH_GRED=m
204CONFIG_NET_SCH_DSMARK=m 229CONFIG_NET_SCH_DSMARK=m
205# CONFIG_NET_SCH_NETEM is not set 230# CONFIG_NET_SCH_NETEM is not set
206# CONFIG_NET_SCH_INGRESS is not set 231# CONFIG_NET_SCH_INGRESS is not set
207CONFIG_NET_QOS=y 232
208CONFIG_NET_ESTIMATOR=y 233#
234# Classification
235#
209CONFIG_NET_CLS=y 236CONFIG_NET_CLS=y
210# CONFIG_NET_CLS_BASIC is not set 237# CONFIG_NET_CLS_BASIC is not set
211CONFIG_NET_CLS_TCINDEX=m 238CONFIG_NET_CLS_TCINDEX=m
@@ -214,18 +241,18 @@ CONFIG_NET_CLS_ROUTE=y
214CONFIG_NET_CLS_FW=m 241CONFIG_NET_CLS_FW=m
215CONFIG_NET_CLS_U32=m 242CONFIG_NET_CLS_U32=m
216# CONFIG_CLS_U32_PERF is not set 243# CONFIG_CLS_U32_PERF is not set
217# CONFIG_NET_CLS_IND is not set
218CONFIG_NET_CLS_RSVP=m 244CONFIG_NET_CLS_RSVP=m
219CONFIG_NET_CLS_RSVP6=m 245CONFIG_NET_CLS_RSVP6=m
220# CONFIG_NET_EMATCH is not set 246# CONFIG_NET_EMATCH is not set
221# CONFIG_NET_CLS_ACT is not set 247# CONFIG_NET_CLS_ACT is not set
222CONFIG_NET_CLS_POLICE=y 248CONFIG_NET_CLS_POLICE=y
249# CONFIG_NET_CLS_IND is not set
250CONFIG_NET_ESTIMATOR=y
223 251
224# 252#
225# Network testing 253# Network testing
226# 254#
227# CONFIG_NET_PKTGEN is not set 255# CONFIG_NET_PKTGEN is not set
228# CONFIG_NETFILTER_NETLINK is not set
229# CONFIG_HAMRADIO is not set 256# CONFIG_HAMRADIO is not set
230# CONFIG_IRDA is not set 257# CONFIG_IRDA is not set
231# CONFIG_BT is not set 258# CONFIG_BT is not set
@@ -276,6 +303,7 @@ CONFIG_SCSI_FC_ATTRS=y
276# 303#
277# SCSI low-level drivers 304# SCSI low-level drivers
278# 305#
306# CONFIG_ISCSI_TCP is not set
279# CONFIG_SCSI_SATA is not set 307# CONFIG_SCSI_SATA is not set
280# CONFIG_SCSI_DEBUG is not set 308# CONFIG_SCSI_DEBUG is not set
281CONFIG_ZFCP=y 309CONFIG_ZFCP=y
@@ -292,7 +320,6 @@ CONFIG_BLK_DEV_RAM=y
292CONFIG_BLK_DEV_RAM_COUNT=16 320CONFIG_BLK_DEV_RAM_COUNT=16
293CONFIG_BLK_DEV_RAM_SIZE=4096 321CONFIG_BLK_DEV_RAM_SIZE=4096
294CONFIG_BLK_DEV_INITRD=y 322CONFIG_BLK_DEV_INITRD=y
295# CONFIG_LBD is not set
296# CONFIG_CDROM_PKTCDVD is not set 323# CONFIG_CDROM_PKTCDVD is not set
297 324
298# 325#
@@ -305,15 +332,8 @@ CONFIG_DASD_PROFILE=y
305CONFIG_DASD_ECKD=y 332CONFIG_DASD_ECKD=y
306CONFIG_DASD_FBA=y 333CONFIG_DASD_FBA=y
307CONFIG_DASD_DIAG=y 334CONFIG_DASD_DIAG=y
335CONFIG_DASD_EER=m
308# CONFIG_DASD_CMB is not set 336# CONFIG_DASD_CMB is not set
309
310#
311# IO Schedulers
312#
313CONFIG_IOSCHED_NOOP=y
314CONFIG_IOSCHED_AS=y
315CONFIG_IOSCHED_DEADLINE=y
316CONFIG_IOSCHED_CFQ=y
317# CONFIG_ATA_OVER_ETH is not set 337# CONFIG_ATA_OVER_ETH is not set
318 338
319# 339#
@@ -378,7 +398,6 @@ CONFIG_S390_TAPE_34XX=m
378# CONFIG_VMLOGRDR is not set 398# CONFIG_VMLOGRDR is not set
379# CONFIG_VMCP is not set 399# CONFIG_VMCP is not set
380# CONFIG_MONREADER is not set 400# CONFIG_MONREADER is not set
381# CONFIG_DCSS_SHM is not set
382 401
383# 402#
384# Cryptographic devices 403# Cryptographic devices
@@ -593,6 +612,8 @@ CONFIG_DEBUG_PREEMPT=y
593# CONFIG_DEBUG_KOBJECT is not set 612# CONFIG_DEBUG_KOBJECT is not set
594# CONFIG_DEBUG_INFO is not set 613# CONFIG_DEBUG_INFO is not set
595CONFIG_DEBUG_FS=y 614CONFIG_DEBUG_FS=y
615# CONFIG_DEBUG_VM is not set
616# CONFIG_RCU_TORTURE_TEST is not set
596 617
597# 618#
598# Security options 619# Security options
@@ -609,17 +630,19 @@ CONFIG_CRYPTO=y
609# CONFIG_CRYPTO_MD4 is not set 630# CONFIG_CRYPTO_MD4 is not set
610# CONFIG_CRYPTO_MD5 is not set 631# CONFIG_CRYPTO_MD5 is not set
611# CONFIG_CRYPTO_SHA1 is not set 632# CONFIG_CRYPTO_SHA1 is not set
612# CONFIG_CRYPTO_SHA1_Z990 is not set 633# CONFIG_CRYPTO_SHA1_S390 is not set
613# CONFIG_CRYPTO_SHA256 is not set 634# CONFIG_CRYPTO_SHA256 is not set
635# CONFIG_CRYPTO_SHA256_S390 is not set
614# CONFIG_CRYPTO_SHA512 is not set 636# CONFIG_CRYPTO_SHA512 is not set
615# CONFIG_CRYPTO_WP512 is not set 637# CONFIG_CRYPTO_WP512 is not set
616# CONFIG_CRYPTO_TGR192 is not set 638# CONFIG_CRYPTO_TGR192 is not set
617# CONFIG_CRYPTO_DES is not set 639# CONFIG_CRYPTO_DES is not set
618# CONFIG_CRYPTO_DES_Z990 is not set 640# CONFIG_CRYPTO_DES_S390 is not set
619# CONFIG_CRYPTO_BLOWFISH is not set 641# CONFIG_CRYPTO_BLOWFISH is not set
620# CONFIG_CRYPTO_TWOFISH is not set 642# CONFIG_CRYPTO_TWOFISH is not set
621# CONFIG_CRYPTO_SERPENT is not set 643# CONFIG_CRYPTO_SERPENT is not set
622# CONFIG_CRYPTO_AES is not set 644# CONFIG_CRYPTO_AES is not set
645# CONFIG_CRYPTO_AES_S390 is not set
623# CONFIG_CRYPTO_CAST5 is not set 646# CONFIG_CRYPTO_CAST5 is not set
624# CONFIG_CRYPTO_CAST6 is not set 647# CONFIG_CRYPTO_CAST6 is not set
625# CONFIG_CRYPTO_TEA is not set 648# CONFIG_CRYPTO_TEA is not set
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 7434c32bc631..9269b5788fac 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -8,31 +8,25 @@ obj-y := bitmap.o traps.o time.o process.o \
8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
9 semaphore.o s390_ext.o debug.o profile.o irq.o reipl_diag.o 9 semaphore.o s390_ext.o debug.o profile.o irq.o reipl_diag.o
10 10
11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
13
11extra-y += head.o init_task.o vmlinux.lds 14extra-y += head.o init_task.o vmlinux.lds
12 15
13obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 16obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
14obj-$(CONFIG_SMP) += smp.o 17obj-$(CONFIG_SMP) += smp.o
15 18
16obj-$(CONFIG_S390_SUPPORT) += compat_linux.o compat_signal.o \ 19obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
17 compat_ioctl.o compat_wrapper.o \ 20 compat_wrapper.o compat_exec_domain.o
18 compat_exec_domain.o
19obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o 21obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
20 22
21obj-$(CONFIG_ARCH_S390_31) += entry.o reipl.o
22obj-$(CONFIG_ARCH_S390X) += entry64.o reipl64.o
23
24obj-$(CONFIG_VIRT_TIMER) += vtime.o 23obj-$(CONFIG_VIRT_TIMER) += vtime.o
25 24
26# Kexec part 25# Kexec part
27S390_KEXEC_OBJS := machine_kexec.o crash.o 26S390_KEXEC_OBJS := machine_kexec.o crash.o
28ifeq ($(CONFIG_ARCH_S390X),y) 27S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
29S390_KEXEC_OBJS += relocate_kernel64.o
30else
31S390_KEXEC_OBJS += relocate_kernel.o
32endif
33obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) 28obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
34 29
35
36# 30#
37# This is just to get the dependencies... 31# This is just to get the dependencies...
38# 32#
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index 03ba5893f17b..1f451c2cb071 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -112,7 +112,7 @@ static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
112 112
113static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs) 113static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
114{ 114{
115 struct pt_regs *ptregs = __KSTK_PTREGS(tsk); 115 struct pt_regs *ptregs = task_pt_regs(tsk);
116 int i; 116 int i;
117 117
118 memcpy(&regs->psw.mask, &ptregs->psw.mask, 4); 118 memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
diff --git a/arch/s390/kernel/compat_ioctl.c b/arch/s390/kernel/compat_ioctl.c
deleted file mode 100644
index 6504c4e69986..000000000000
--- a/arch/s390/kernel/compat_ioctl.c
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
3 *
4 * S390 version
5 * Copyright (C) 2000-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Gerhard Tonn (ton@de.ibm.com)
7 * Arnd Bergmann (arndb@de.ibm.com)
8 *
9 * Original implementation from 32-bit Sparc compat code which is
10 * Copyright (C) 2000 Silicon Graphics, Inc.
11 * Written by Ulf Carlsson (ulfc@engr.sgi.com)
12 */
13
14#include "compat_linux.h"
15#define INCLUDES
16#define CODE
17#include "../../../fs/compat_ioctl.c"
18#include <asm/dasd.h>
19#include <asm/cmb.h>
20#include <asm/tape390.h>
21#include <asm/ccwdev.h>
22#include "../../../drivers/s390/char/raw3270.h"
23
24static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd,
25 unsigned long arg, struct file *f)
26{
27 return sys_ioctl(fd, cmd, (unsigned long)compat_ptr(arg));
28}
29
30static int do_ioctl32_ulong(unsigned int fd, unsigned int cmd,
31 unsigned long arg, struct file *f)
32{
33 return sys_ioctl(fd, cmd, arg);
34}
35
36#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)do_ioctl32_pointer)
37#define ULONG_IOCTL(cmd) HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)do_ioctl32_ulong)
38#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler), NULL },
39
40struct ioctl_trans ioctl_start[] = {
41/* architecture independent ioctls */
42#include <linux/compat_ioctl.h>
43#define DECLARES
44#include "../../../fs/compat_ioctl.c"
45
46/* s390 only ioctls */
47COMPATIBLE_IOCTL(DASDAPIVER)
48COMPATIBLE_IOCTL(BIODASDDISABLE)
49COMPATIBLE_IOCTL(BIODASDENABLE)
50COMPATIBLE_IOCTL(BIODASDRSRV)
51COMPATIBLE_IOCTL(BIODASDRLSE)
52COMPATIBLE_IOCTL(BIODASDSLCK)
53COMPATIBLE_IOCTL(BIODASDINFO)
54COMPATIBLE_IOCTL(BIODASDINFO2)
55COMPATIBLE_IOCTL(BIODASDFMT)
56COMPATIBLE_IOCTL(BIODASDPRRST)
57COMPATIBLE_IOCTL(BIODASDQUIESCE)
58COMPATIBLE_IOCTL(BIODASDRESUME)
59COMPATIBLE_IOCTL(BIODASDPRRD)
60COMPATIBLE_IOCTL(BIODASDPSRD)
61COMPATIBLE_IOCTL(BIODASDGATTR)
62COMPATIBLE_IOCTL(BIODASDSATTR)
63COMPATIBLE_IOCTL(BIODASDCMFENABLE)
64COMPATIBLE_IOCTL(BIODASDCMFDISABLE)
65COMPATIBLE_IOCTL(BIODASDREADALLCMB)
66
67COMPATIBLE_IOCTL(TUBICMD)
68COMPATIBLE_IOCTL(TUBOCMD)
69COMPATIBLE_IOCTL(TUBGETI)
70COMPATIBLE_IOCTL(TUBGETO)
71COMPATIBLE_IOCTL(TUBSETMOD)
72COMPATIBLE_IOCTL(TUBGETMOD)
73
74COMPATIBLE_IOCTL(TAPE390_DISPLAY)
75
76/* s390 doesn't need handlers here */
77COMPATIBLE_IOCTL(TIOCGSERIAL)
78COMPATIBLE_IOCTL(TIOCSSERIAL)
79};
80
81int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index ed877d0f27e6..bf9a7a361b34 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -55,6 +55,7 @@
55#include <linux/syscalls.h> 55#include <linux/syscalls.h>
56#include <linux/sysctl.h> 56#include <linux/sysctl.h>
57#include <linux/binfmts.h> 57#include <linux/binfmts.h>
58#include <linux/capability.h>
58#include <linux/compat.h> 59#include <linux/compat.h>
59#include <linux/vfs.h> 60#include <linux/vfs.h>
60#include <linux/ptrace.h> 61#include <linux/ptrace.h>
@@ -279,7 +280,7 @@ asmlinkage long sys32_getegid16(void)
279 280
280static inline long get_tv32(struct timeval *o, struct compat_timeval *i) 281static inline long get_tv32(struct timeval *o, struct compat_timeval *i)
281{ 282{
282 return (!access_ok(VERIFY_READ, tv32, sizeof(*tv32)) || 283 return (!access_ok(VERIFY_READ, o, sizeof(*o)) ||
283 (__get_user(o->tv_sec, &i->tv_sec) || 284 (__get_user(o->tv_sec, &i->tv_sec) ||
284 __get_user(o->tv_usec, &i->tv_usec))); 285 __get_user(o->tv_usec, &i->tv_usec)));
285} 286}
@@ -1014,38 +1015,6 @@ asmlinkage long sys32_clone(struct pt_regs regs)
1014} 1015}
1015 1016
1016/* 1017/*
1017 * Wrapper function for sys_timer_create.
1018 */
1019extern asmlinkage long
1020sys_timer_create(clockid_t, struct sigevent *, timer_t *);
1021
1022asmlinkage long
1023sys32_timer_create(clockid_t which_clock, struct compat_sigevent *se32,
1024 timer_t *timer_id)
1025{
1026 struct sigevent se;
1027 timer_t ktimer_id;
1028 mm_segment_t old_fs;
1029 long ret;
1030
1031 if (se32 == NULL)
1032 return sys_timer_create(which_clock, NULL, timer_id);
1033
1034 if (get_compat_sigevent(&se, se32))
1035 return -EFAULT;
1036
1037 old_fs = get_fs();
1038 set_fs(KERNEL_DS);
1039 ret = sys_timer_create(which_clock, &se, &ktimer_id);
1040 set_fs(old_fs);
1041
1042 if (!ret)
1043 ret = put_user (ktimer_id, timer_id);
1044
1045 return ret;
1046}
1047
1048/*
1049 * 31 bit emulation wrapper functions for sys_fadvise64/fadvise64_64. 1018 * 31 bit emulation wrapper functions for sys_fadvise64/fadvise64_64.
1050 * These need to rewrite the advise values for POSIX_FADV_{DONTNEED,NOREUSE} 1019 * These need to rewrite the advise values for POSIX_FADV_{DONTNEED,NOREUSE}
1051 * because the 31 bit values differ from the 64 bit values. 1020 * because the 31 bit values differ from the 64 bit values.
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 4ff6808456ea..fa2b3bc22f20 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -467,8 +467,6 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
467 if (err) 467 if (err)
468 goto badframe; 468 goto badframe;
469 469
470 /* It is more difficult to avoid calling this function than to
471 call it and ignore errors. */
472 set_fs (KERNEL_DS); 470 set_fs (KERNEL_DS);
473 do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]); 471 do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]);
474 set_fs (old_fs); 472 set_fs (old_fs);
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 23fe94e58688..cfde1905d07d 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1289,7 +1289,7 @@ sys32_timer_create_wrapper:
1289 lgfr %r2,%r2 # timer_t (int) 1289 lgfr %r2,%r2 # timer_t (int)
1290 llgtr %r3,%r3 # struct compat_sigevent * 1290 llgtr %r3,%r3 # struct compat_sigevent *
1291 llgtr %r4,%r4 # timer_t * 1291 llgtr %r4,%r4 # timer_t *
1292 jg sys32_timer_create 1292 jg compat_sys_timer_create
1293 1293
1294 .globl sys32_timer_settime_wrapper 1294 .globl sys32_timer_settime_wrapper
1295sys32_timer_settime_wrapper: 1295sys32_timer_settime_wrapper:
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index d47fecb42cc5..4ef44e536b2c 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -39,7 +39,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
39 39
40 if (response != NULL && rlen > 0) { 40 if (response != NULL && rlen > 0) {
41 memset(response, 0, rlen); 41 memset(response, 0, rlen);
42#ifndef CONFIG_ARCH_S390X 42#ifndef CONFIG_64BIT
43 asm volatile ( "lra 2,0(%2)\n" 43 asm volatile ( "lra 2,0(%2)\n"
44 "lr 4,%3\n" 44 "lr 4,%3\n"
45 "o 4,%6\n" 45 "o 4,%6\n"
@@ -55,7 +55,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
55 : "a" (cpcmd_buf), "d" (cmdlen), 55 : "a" (cpcmd_buf), "d" (cmdlen),
56 "a" (response), "d" (rlen), "m" (mask) 56 "a" (response), "d" (rlen), "m" (mask)
57 : "cc", "2", "3", "4", "5" ); 57 : "cc", "2", "3", "4", "5" );
58#else /* CONFIG_ARCH_S390X */ 58#else /* CONFIG_64BIT */
59 asm volatile ( "lrag 2,0(%2)\n" 59 asm volatile ( "lrag 2,0(%2)\n"
60 "lgr 4,%3\n" 60 "lgr 4,%3\n"
61 "o 4,%6\n" 61 "o 4,%6\n"
@@ -73,11 +73,11 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
73 : "a" (cpcmd_buf), "d" (cmdlen), 73 : "a" (cpcmd_buf), "d" (cmdlen),
74 "a" (response), "d" (rlen), "m" (mask) 74 "a" (response), "d" (rlen), "m" (mask)
75 : "cc", "2", "3", "4", "5" ); 75 : "cc", "2", "3", "4", "5" );
76#endif /* CONFIG_ARCH_S390X */ 76#endif /* CONFIG_64BIT */
77 EBCASC(response, rlen); 77 EBCASC(response, rlen);
78 } else { 78 } else {
79 return_len = 0; 79 return_len = 0;
80#ifndef CONFIG_ARCH_S390X 80#ifndef CONFIG_64BIT
81 asm volatile ( "lra 2,0(%1)\n" 81 asm volatile ( "lra 2,0(%1)\n"
82 "lr 3,%2\n" 82 "lr 3,%2\n"
83 "diag 2,3,0x8\n" 83 "diag 2,3,0x8\n"
@@ -85,7 +85,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
85 : "=d" (return_code) 85 : "=d" (return_code)
86 : "a" (cpcmd_buf), "d" (cmdlen) 86 : "a" (cpcmd_buf), "d" (cmdlen)
87 : "2", "3" ); 87 : "2", "3" );
88#else /* CONFIG_ARCH_S390X */ 88#else /* CONFIG_64BIT */
89 asm volatile ( "lrag 2,0(%1)\n" 89 asm volatile ( "lrag 2,0(%1)\n"
90 "lgr 3,%2\n" 90 "lgr 3,%2\n"
91 "sam31\n" 91 "sam31\n"
@@ -95,7 +95,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
95 : "=d" (return_code) 95 : "=d" (return_code)
96 : "a" (cpcmd_buf), "d" (cmdlen) 96 : "a" (cpcmd_buf), "d" (cmdlen)
97 : "2", "3" ); 97 : "2", "3" );
98#endif /* CONFIG_ARCH_S390X */ 98#endif /* CONFIG_64BIT */
99 } 99 }
100 spin_unlock_irqrestore(&cpcmd_lock, flags); 100 spin_unlock_irqrestore(&cpcmd_lock, flags);
101 if (response_code != NULL) 101 if (response_code != NULL)
@@ -105,7 +105,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
105 105
106EXPORT_SYMBOL(__cpcmd); 106EXPORT_SYMBOL(__cpcmd);
107 107
108#ifdef CONFIG_ARCH_S390X 108#ifdef CONFIG_64BIT
109int cpcmd(const char *cmd, char *response, int rlen, int *response_code) 109int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
110{ 110{
111 char *lowbuf; 111 char *lowbuf;
@@ -129,4 +129,4 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
129} 129}
130 130
131EXPORT_SYMBOL(cpcmd); 131EXPORT_SYMBOL(cpcmd);
132#endif /* CONFIG_ARCH_S390X */ 132#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c
index 7bd169c58b0c..926cceeae0fa 100644
--- a/arch/s390/kernel/crash.c
+++ b/arch/s390/kernel/crash.c
@@ -10,8 +10,6 @@
10#include <linux/threads.h> 10#include <linux/threads.h>
11#include <linux/kexec.h> 11#include <linux/kexec.h>
12 12
13note_buf_t crash_notes[NR_CPUS];
14
15void machine_crash_shutdown(struct pt_regs *regs) 13void machine_crash_shutdown(struct pt_regs *regs)
16{ 14{
17} 15}
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 4eb71ffcf484..369ab4413ec7 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -213,7 +213,7 @@ sysc_nr_ok:
213 mvc SP_ARGS(8,%r15),SP_R7(%r15) 213 mvc SP_ARGS(8,%r15),SP_R7(%r15)
214sysc_do_restart: 214sysc_do_restart:
215 larl %r10,sys_call_table 215 larl %r10,sys_call_table
216#ifdef CONFIG_S390_SUPPORT 216#ifdef CONFIG_COMPAT
217 tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ? 217 tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ?
218 jno sysc_noemu 218 jno sysc_noemu
219 larl %r10,sys_call_table_emu # use 31 bit emulation system calls 219 larl %r10,sys_call_table_emu # use 31 bit emulation system calls
@@ -361,7 +361,7 @@ sys_clone_glue:
361 la %r2,SP_PTREGS(%r15) # load pt_regs 361 la %r2,SP_PTREGS(%r15) # load pt_regs
362 jg sys_clone # branch to sys_clone 362 jg sys_clone # branch to sys_clone
363 363
364#ifdef CONFIG_S390_SUPPORT 364#ifdef CONFIG_COMPAT
365sys32_clone_glue: 365sys32_clone_glue:
366 la %r2,SP_PTREGS(%r15) # load pt_regs 366 la %r2,SP_PTREGS(%r15) # load pt_regs
367 jg sys32_clone # branch to sys32_clone 367 jg sys32_clone # branch to sys32_clone
@@ -383,7 +383,7 @@ sys_execve_glue:
383 bnz 0(%r12) # it did fail -> store result in gpr2 383 bnz 0(%r12) # it did fail -> store result in gpr2
384 b 6(%r12) # SKIP STG 2,SP_R2(15) in 384 b 6(%r12) # SKIP STG 2,SP_R2(15) in
385 # system_call/sysc_tracesys 385 # system_call/sysc_tracesys
386#ifdef CONFIG_S390_SUPPORT 386#ifdef CONFIG_COMPAT
387sys32_execve_glue: 387sys32_execve_glue:
388 la %r2,SP_PTREGS(%r15) # load pt_regs 388 la %r2,SP_PTREGS(%r15) # load pt_regs
389 lgr %r12,%r14 # save return address 389 lgr %r12,%r14 # save return address
@@ -398,7 +398,7 @@ sys_sigreturn_glue:
398 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter 398 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
399 jg sys_sigreturn # branch to sys_sigreturn 399 jg sys_sigreturn # branch to sys_sigreturn
400 400
401#ifdef CONFIG_S390_SUPPORT 401#ifdef CONFIG_COMPAT
402sys32_sigreturn_glue: 402sys32_sigreturn_glue:
403 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter 403 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
404 jg sys32_sigreturn # branch to sys32_sigreturn 404 jg sys32_sigreturn # branch to sys32_sigreturn
@@ -408,7 +408,7 @@ sys_rt_sigreturn_glue:
408 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter 408 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
409 jg sys_rt_sigreturn # branch to sys_sigreturn 409 jg sys_rt_sigreturn # branch to sys_sigreturn
410 410
411#ifdef CONFIG_S390_SUPPORT 411#ifdef CONFIG_COMPAT
412sys32_rt_sigreturn_glue: 412sys32_rt_sigreturn_glue:
413 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter 413 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
414 jg sys32_rt_sigreturn # branch to sys32_sigreturn 414 jg sys32_rt_sigreturn # branch to sys32_sigreturn
@@ -429,7 +429,7 @@ sys_sigsuspend_glue:
429 la %r14,6(%r14) # skip store of return value 429 la %r14,6(%r14) # skip store of return value
430 jg sys_sigsuspend # branch to sys_sigsuspend 430 jg sys_sigsuspend # branch to sys_sigsuspend
431 431
432#ifdef CONFIG_S390_SUPPORT 432#ifdef CONFIG_COMPAT
433sys32_sigsuspend_glue: 433sys32_sigsuspend_glue:
434 llgfr %r4,%r4 # unsigned long 434 llgfr %r4,%r4 # unsigned long
435 lgr %r5,%r4 # move mask back 435 lgr %r5,%r4 # move mask back
@@ -449,7 +449,7 @@ sys_rt_sigsuspend_glue:
449 la %r14,6(%r14) # skip store of return value 449 la %r14,6(%r14) # skip store of return value
450 jg sys_rt_sigsuspend # branch to sys_rt_sigsuspend 450 jg sys_rt_sigsuspend # branch to sys_rt_sigsuspend
451 451
452#ifdef CONFIG_S390_SUPPORT 452#ifdef CONFIG_COMPAT
453sys32_rt_sigsuspend_glue: 453sys32_rt_sigsuspend_glue:
454 llgfr %r3,%r3 # size_t 454 llgfr %r3,%r3 # size_t
455 lgr %r4,%r3 # move sigsetsize parameter 455 lgr %r4,%r3 # move sigsetsize parameter
@@ -464,7 +464,7 @@ sys_sigaltstack_glue:
464 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter 464 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
465 jg sys_sigaltstack # branch to sys_sigreturn 465 jg sys_sigaltstack # branch to sys_sigreturn
466 466
467#ifdef CONFIG_S390_SUPPORT 467#ifdef CONFIG_COMPAT
468sys32_sigaltstack_glue: 468sys32_sigaltstack_glue:
469 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter 469 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
470 jg sys32_sigaltstack_wrapper # branch to sys_sigreturn 470 jg sys32_sigaltstack_wrapper # branch to sys_sigreturn
@@ -1009,7 +1009,7 @@ sys_call_table:
1009#include "syscalls.S" 1009#include "syscalls.S"
1010#undef SYSCALL 1010#undef SYSCALL
1011 1011
1012#ifdef CONFIG_S390_SUPPORT 1012#ifdef CONFIG_COMPAT
1013 1013
1014#define SYSCALL(esa,esame,emu) .long emu 1014#define SYSCALL(esa,esame,emu) .long emu
1015 .globl sys_call_table_emu 1015 .globl sys_call_table_emu
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index d31a97c89f68..ea88d066bf04 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -30,7 +30,7 @@
30#include <asm/thread_info.h> 30#include <asm/thread_info.h>
31#include <asm/page.h> 31#include <asm/page.h>
32 32
33#ifdef CONFIG_ARCH_S390X 33#ifdef CONFIG_64BIT
34#define ARCH_OFFSET 4 34#define ARCH_OFFSET 4
35#else 35#else
36#define ARCH_OFFSET 0 36#define ARCH_OFFSET 0
@@ -539,7 +539,7 @@ ipl_devno:
539 .word 0 539 .word 0
540.endm 540.endm
541 541
542#ifdef CONFIG_ARCH_S390X 542#ifdef CONFIG_64BIT
543#include "head64.S" 543#include "head64.S"
544#else 544#else
545#include "head31.S" 545#include "head31.S"
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 5aa71b05b8ae..f0ed5c642c74 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -85,7 +85,7 @@ kexec_halt_all_cpus(void *kernel_image)
85 pfault_fini(); 85 pfault_fini();
86#endif 86#endif
87 87
88 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) 88 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
89 signal_processor(smp_processor_id(), sigp_stop); 89 signal_processor(smp_processor_id(), sigp_stop);
90 90
91 /* Wait for all other cpus to enter stopped state */ 91 /* Wait for all other cpus to enter stopped state */
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 607d506689c8..c271cdab58e2 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -37,11 +37,11 @@
37#define DEBUGP(fmt , ...) 37#define DEBUGP(fmt , ...)
38#endif 38#endif
39 39
40#ifndef CONFIG_ARCH_S390X 40#ifndef CONFIG_64BIT
41#define PLT_ENTRY_SIZE 12 41#define PLT_ENTRY_SIZE 12
42#else /* CONFIG_ARCH_S390X */ 42#else /* CONFIG_64BIT */
43#define PLT_ENTRY_SIZE 20 43#define PLT_ENTRY_SIZE 20
44#endif /* CONFIG_ARCH_S390X */ 44#endif /* CONFIG_64BIT */
45 45
46void *module_alloc(unsigned long size) 46void *module_alloc(unsigned long size)
47{ 47{
@@ -294,17 +294,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
294 unsigned int *ip; 294 unsigned int *ip;
295 ip = me->module_core + me->arch.plt_offset + 295 ip = me->module_core + me->arch.plt_offset +
296 info->plt_offset; 296 info->plt_offset;
297#ifndef CONFIG_ARCH_S390X 297#ifndef CONFIG_64BIT
298 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ 298 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
299 ip[1] = 0x100607f1; 299 ip[1] = 0x100607f1;
300 ip[2] = val; 300 ip[2] = val;
301#else /* CONFIG_ARCH_S390X */ 301#else /* CONFIG_64BIT */
302 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ 302 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
303 ip[1] = 0x100a0004; 303 ip[1] = 0x100a0004;
304 ip[2] = 0x07f10000; 304 ip[2] = 0x07f10000;
305 ip[3] = (unsigned int) (val >> 32); 305 ip[3] = (unsigned int) (val >> 32);
306 ip[4] = (unsigned int) val; 306 ip[4] = (unsigned int) val;
307#endif /* CONFIG_ARCH_S390X */ 307#endif /* CONFIG_64BIT */
308 info->plt_initialized = 1; 308 info->plt_initialized = 1;
309 } 309 }
310 if (r_type == R_390_PLTOFF16 || 310 if (r_type == R_390_PLTOFF16 ||
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 78b64fe5e7c2..008c74526fd3 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -58,10 +58,18 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
58 */ 58 */
59unsigned long thread_saved_pc(struct task_struct *tsk) 59unsigned long thread_saved_pc(struct task_struct *tsk)
60{ 60{
61 struct stack_frame *sf; 61 struct stack_frame *sf, *low, *high;
62 62
63 sf = (struct stack_frame *) tsk->thread.ksp; 63 if (!tsk || !task_stack_page(tsk))
64 sf = (struct stack_frame *) sf->back_chain; 64 return 0;
65 low = task_stack_page(tsk);
66 high = (struct stack_frame *) task_pt_regs(tsk);
67 sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN);
68 if (sf <= low || sf > high)
69 return 0;
70 sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
71 if (sf <= low || sf > high)
72 return 0;
65 return sf->gprs[8]; 73 return sf->gprs[8];
66} 74}
67 75
@@ -153,7 +161,7 @@ void show_regs(struct pt_regs *regs)
153{ 161{
154 struct task_struct *tsk = current; 162 struct task_struct *tsk = current;
155 163
156 printk("CPU: %d %s\n", tsk->thread_info->cpu, print_tainted()); 164 printk("CPU: %d %s\n", task_thread_info(tsk)->cpu, print_tainted());
157 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 165 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
158 current->comm, current->pid, (void *) tsk, 166 current->comm, current->pid, (void *) tsk,
159 (void *) tsk->thread.ksp); 167 (void *) tsk->thread.ksp);
@@ -217,8 +225,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
217 struct pt_regs childregs; 225 struct pt_regs childregs;
218 } *frame; 226 } *frame;
219 227
220 frame = ((struct fake_frame *) 228 frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
221 (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
222 p->thread.ksp = (unsigned long) frame; 229 p->thread.ksp = (unsigned long) frame;
223 /* Store access registers to kernel stack of new process. */ 230 /* Store access registers to kernel stack of new process. */
224 frame->childregs = *regs; 231 frame->childregs = *regs;
@@ -235,7 +242,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
235 /* Save access registers to new thread structure. */ 242 /* Save access registers to new thread structure. */
236 save_access_regs(&p->thread.acrs[0]); 243 save_access_regs(&p->thread.acrs[0]);
237 244
238#ifndef CONFIG_ARCH_S390X 245#ifndef CONFIG_64BIT
239 /* 246 /*
240 * save fprs to current->thread.fp_regs to merge them with 247 * save fprs to current->thread.fp_regs to merge them with
241 * the emulated registers and then copy the result to the child. 248 * the emulated registers and then copy the result to the child.
@@ -247,7 +254,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
247 /* Set a new TLS ? */ 254 /* Set a new TLS ? */
248 if (clone_flags & CLONE_SETTLS) 255 if (clone_flags & CLONE_SETTLS)
249 p->thread.acrs[0] = regs->gprs[6]; 256 p->thread.acrs[0] = regs->gprs[6];
250#else /* CONFIG_ARCH_S390X */ 257#else /* CONFIG_64BIT */
251 /* Save the fpu registers to new thread structure. */ 258 /* Save the fpu registers to new thread structure. */
252 save_fp_regs(&p->thread.fp_regs); 259 save_fp_regs(&p->thread.fp_regs);
253 p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE; 260 p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
@@ -260,7 +267,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
260 p->thread.acrs[1] = (unsigned int) regs->gprs[6]; 267 p->thread.acrs[1] = (unsigned int) regs->gprs[6];
261 } 268 }
262 } 269 }
263#endif /* CONFIG_ARCH_S390X */ 270#endif /* CONFIG_64BIT */
264 /* start new process with ar4 pointing to the correct address space */ 271 /* start new process with ar4 pointing to the correct address space */
265 p->thread.mm_segment = get_fs(); 272 p->thread.mm_segment = get_fs();
266 /* Don't copy debug registers */ 273 /* Don't copy debug registers */
@@ -339,51 +346,29 @@ out:
339 */ 346 */
340int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) 347int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
341{ 348{
342#ifndef CONFIG_ARCH_S390X 349#ifndef CONFIG_64BIT
343 /* 350 /*
344 * save fprs to current->thread.fp_regs to merge them with 351 * save fprs to current->thread.fp_regs to merge them with
345 * the emulated registers and then copy the result to the dump. 352 * the emulated registers and then copy the result to the dump.
346 */ 353 */
347 save_fp_regs(&current->thread.fp_regs); 354 save_fp_regs(&current->thread.fp_regs);
348 memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs)); 355 memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
349#else /* CONFIG_ARCH_S390X */ 356#else /* CONFIG_64BIT */
350 save_fp_regs(fpregs); 357 save_fp_regs(fpregs);
351#endif /* CONFIG_ARCH_S390X */ 358#endif /* CONFIG_64BIT */
352 return 1; 359 return 1;
353} 360}
354 361
355/*
356 * fill in the user structure for a core dump..
357 */
358void dump_thread(struct pt_regs * regs, struct user * dump)
359{
360
361/* changed the size calculations - should hopefully work better. lbt */
362 dump->magic = CMAGIC;
363 dump->start_code = 0;
364 dump->start_stack = regs->gprs[15] & ~(PAGE_SIZE - 1);
365 dump->u_tsize = current->mm->end_code >> PAGE_SHIFT;
366 dump->u_dsize = (current->mm->brk + PAGE_SIZE - 1) >> PAGE_SHIFT;
367 dump->u_dsize -= dump->u_tsize;
368 dump->u_ssize = 0;
369 if (dump->start_stack < TASK_SIZE)
370 dump->u_ssize = (TASK_SIZE - dump->start_stack) >> PAGE_SHIFT;
371 memcpy(&dump->regs, regs, sizeof(s390_regs));
372 dump_fpu (regs, &dump->regs.fp_regs);
373 dump->regs.per_info = current->thread.per_info;
374}
375
376unsigned long get_wchan(struct task_struct *p) 362unsigned long get_wchan(struct task_struct *p)
377{ 363{
378 struct stack_frame *sf, *low, *high; 364 struct stack_frame *sf, *low, *high;
379 unsigned long return_address; 365 unsigned long return_address;
380 int count; 366 int count;
381 367
382 if (!p || p == current || p->state == TASK_RUNNING || !p->thread_info) 368 if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
383 return 0; 369 return 0;
384 low = (struct stack_frame *) p->thread_info; 370 low = task_stack_page(p);
385 high = (struct stack_frame *) 371 high = (struct stack_frame *) task_pt_regs(p);
386 ((unsigned long) p->thread_info + THREAD_SIZE) - 1;
387 sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN); 372 sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
388 if (sf <= low || sf > high) 373 if (sf <= low || sf > high)
389 return 0; 374 return 0;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 06afa3103ace..37dfe33dab73 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -42,7 +42,7 @@
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include <asm/unistd.h> 43#include <asm/unistd.h>
44 44
45#ifdef CONFIG_S390_SUPPORT 45#ifdef CONFIG_COMPAT
46#include "compat_ptrace.h" 46#include "compat_ptrace.h"
47#endif 47#endif
48 48
@@ -52,14 +52,14 @@ FixPerRegisters(struct task_struct *task)
52 struct pt_regs *regs; 52 struct pt_regs *regs;
53 per_struct *per_info; 53 per_struct *per_info;
54 54
55 regs = __KSTK_PTREGS(task); 55 regs = task_pt_regs(task);
56 per_info = (per_struct *) &task->thread.per_info; 56 per_info = (per_struct *) &task->thread.per_info;
57 per_info->control_regs.bits.em_instruction_fetch = 57 per_info->control_regs.bits.em_instruction_fetch =
58 per_info->single_step | per_info->instruction_fetch; 58 per_info->single_step | per_info->instruction_fetch;
59 59
60 if (per_info->single_step) { 60 if (per_info->single_step) {
61 per_info->control_regs.bits.starting_addr = 0; 61 per_info->control_regs.bits.starting_addr = 0;
62#ifdef CONFIG_S390_SUPPORT 62#ifdef CONFIG_COMPAT
63 if (test_thread_flag(TIF_31BIT)) 63 if (test_thread_flag(TIF_31BIT))
64 per_info->control_regs.bits.ending_addr = 0x7fffffffUL; 64 per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
65 else 65 else
@@ -112,7 +112,7 @@ ptrace_disable(struct task_struct *child)
112 clear_single_step(child); 112 clear_single_step(child);
113} 113}
114 114
115#ifndef CONFIG_ARCH_S390X 115#ifndef CONFIG_64BIT
116# define __ADDR_MASK 3 116# define __ADDR_MASK 3
117#else 117#else
118# define __ADDR_MASK 7 118# define __ADDR_MASK 7
@@ -138,7 +138,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
138 * an alignment of 4. Programmers from hell... 138 * an alignment of 4. Programmers from hell...
139 */ 139 */
140 mask = __ADDR_MASK; 140 mask = __ADDR_MASK;
141#ifdef CONFIG_ARCH_S390X 141#ifdef CONFIG_64BIT
142 if (addr >= (addr_t) &dummy->regs.acrs && 142 if (addr >= (addr_t) &dummy->regs.acrs &&
143 addr < (addr_t) &dummy->regs.orig_gpr2) 143 addr < (addr_t) &dummy->regs.orig_gpr2)
144 mask = 3; 144 mask = 3;
@@ -150,7 +150,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
150 /* 150 /*
151 * psw and gprs are stored on the stack 151 * psw and gprs are stored on the stack
152 */ 152 */
153 tmp = *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr); 153 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
154 if (addr == (addr_t) &dummy->regs.psw.mask) 154 if (addr == (addr_t) &dummy->regs.psw.mask)
155 /* Remove per bit from user psw. */ 155 /* Remove per bit from user psw. */
156 tmp &= ~PSW_MASK_PER; 156 tmp &= ~PSW_MASK_PER;
@@ -160,7 +160,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
160 * access registers are stored in the thread structure 160 * access registers are stored in the thread structure
161 */ 161 */
162 offset = addr - (addr_t) &dummy->regs.acrs; 162 offset = addr - (addr_t) &dummy->regs.acrs;
163#ifdef CONFIG_ARCH_S390X 163#ifdef CONFIG_64BIT
164 /* 164 /*
165 * Very special case: old & broken 64 bit gdb reading 165 * Very special case: old & broken 64 bit gdb reading
166 * from acrs[15]. Result is a 64 bit value. Read the 166 * from acrs[15]. Result is a 64 bit value. Read the
@@ -176,7 +176,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
176 /* 176 /*
177 * orig_gpr2 is stored on the kernel stack 177 * orig_gpr2 is stored on the kernel stack
178 */ 178 */
179 tmp = (addr_t) __KSTK_PTREGS(child)->orig_gpr2; 179 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
180 180
181 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 181 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
182 /* 182 /*
@@ -218,7 +218,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
218 * an alignment of 4. Programmers from hell indeed... 218 * an alignment of 4. Programmers from hell indeed...
219 */ 219 */
220 mask = __ADDR_MASK; 220 mask = __ADDR_MASK;
221#ifdef CONFIG_ARCH_S390X 221#ifdef CONFIG_64BIT
222 if (addr >= (addr_t) &dummy->regs.acrs && 222 if (addr >= (addr_t) &dummy->regs.acrs &&
223 addr < (addr_t) &dummy->regs.orig_gpr2) 223 addr < (addr_t) &dummy->regs.orig_gpr2)
224 mask = 3; 224 mask = 3;
@@ -231,26 +231,26 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
231 * psw and gprs are stored on the stack 231 * psw and gprs are stored on the stack
232 */ 232 */
233 if (addr == (addr_t) &dummy->regs.psw.mask && 233 if (addr == (addr_t) &dummy->regs.psw.mask &&
234#ifdef CONFIG_S390_SUPPORT 234#ifdef CONFIG_COMPAT
235 data != PSW_MASK_MERGE(PSW_USER32_BITS, data) && 235 data != PSW_MASK_MERGE(PSW_USER32_BITS, data) &&
236#endif 236#endif
237 data != PSW_MASK_MERGE(PSW_USER_BITS, data)) 237 data != PSW_MASK_MERGE(PSW_USER_BITS, data))
238 /* Invalid psw mask. */ 238 /* Invalid psw mask. */
239 return -EINVAL; 239 return -EINVAL;
240#ifndef CONFIG_ARCH_S390X 240#ifndef CONFIG_64BIT
241 if (addr == (addr_t) &dummy->regs.psw.addr) 241 if (addr == (addr_t) &dummy->regs.psw.addr)
242 /* I'd like to reject addresses without the 242 /* I'd like to reject addresses without the
243 high order bit but older gdb's rely on it */ 243 high order bit but older gdb's rely on it */
244 data |= PSW_ADDR_AMODE; 244 data |= PSW_ADDR_AMODE;
245#endif 245#endif
246 *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr) = data; 246 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
247 247
248 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { 248 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
249 /* 249 /*
250 * access registers are stored in the thread structure 250 * access registers are stored in the thread structure
251 */ 251 */
252 offset = addr - (addr_t) &dummy->regs.acrs; 252 offset = addr - (addr_t) &dummy->regs.acrs;
253#ifdef CONFIG_ARCH_S390X 253#ifdef CONFIG_64BIT
254 /* 254 /*
255 * Very special case: old & broken 64 bit gdb writing 255 * Very special case: old & broken 64 bit gdb writing
256 * to acrs[15] with a 64 bit value. Ignore the lower 256 * to acrs[15] with a 64 bit value. Ignore the lower
@@ -267,7 +267,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
267 /* 267 /*
268 * orig_gpr2 is stored on the kernel stack 268 * orig_gpr2 is stored on the kernel stack
269 */ 269 */
270 __KSTK_PTREGS(child)->orig_gpr2 = data; 270 task_pt_regs(child)->orig_gpr2 = data;
271 271
272 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 272 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
273 /* 273 /*
@@ -357,7 +357,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
357 return ptrace_request(child, request, addr, data); 357 return ptrace_request(child, request, addr, data);
358} 358}
359 359
360#ifdef CONFIG_S390_SUPPORT 360#ifdef CONFIG_COMPAT
361/* 361/*
362 * Now the fun part starts... a 31 bit program running in the 362 * Now the fun part starts... a 31 bit program running in the
363 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT, 363 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
@@ -393,15 +393,15 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
393 */ 393 */
394 if (addr == (addr_t) &dummy32->regs.psw.mask) { 394 if (addr == (addr_t) &dummy32->regs.psw.mask) {
395 /* Fake a 31 bit psw mask. */ 395 /* Fake a 31 bit psw mask. */
396 tmp = (__u32)(__KSTK_PTREGS(child)->psw.mask >> 32); 396 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
397 tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp); 397 tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp);
398 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 398 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
399 /* Fake a 31 bit psw address. */ 399 /* Fake a 31 bit psw address. */
400 tmp = (__u32) __KSTK_PTREGS(child)->psw.addr | 400 tmp = (__u32) task_pt_regs(child)->psw.addr |
401 PSW32_ADDR_AMODE31; 401 PSW32_ADDR_AMODE31;
402 } else { 402 } else {
403 /* gpr 0-15 */ 403 /* gpr 0-15 */
404 tmp = *(__u32 *)((addr_t) &__KSTK_PTREGS(child)->psw + 404 tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
405 addr*2 + 4); 405 addr*2 + 4);
406 } 406 }
407 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 407 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
@@ -415,7 +415,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
415 /* 415 /*
416 * orig_gpr2 is stored on the kernel stack 416 * orig_gpr2 is stored on the kernel stack
417 */ 417 */
418 tmp = *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4); 418 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
419 419
420 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 420 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
421 /* 421 /*
@@ -472,15 +472,15 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
472 if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp)) 472 if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp))
473 /* Invalid psw mask. */ 473 /* Invalid psw mask. */
474 return -EINVAL; 474 return -EINVAL;
475 __KSTK_PTREGS(child)->psw.mask = 475 task_pt_regs(child)->psw.mask =
476 PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32); 476 PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32);
477 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 477 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
478 /* Build a 64 bit psw address from 31 bit address. */ 478 /* Build a 64 bit psw address from 31 bit address. */
479 __KSTK_PTREGS(child)->psw.addr = 479 task_pt_regs(child)->psw.addr =
480 (__u64) tmp & PSW32_ADDR_INSN; 480 (__u64) tmp & PSW32_ADDR_INSN;
481 } else { 481 } else {
482 /* gpr 0-15 */ 482 /* gpr 0-15 */
483 *(__u32*)((addr_t) &__KSTK_PTREGS(child)->psw 483 *(__u32*)((addr_t) &task_pt_regs(child)->psw
484 + addr*2 + 4) = tmp; 484 + addr*2 + 4) = tmp;
485 } 485 }
486 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 486 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
@@ -494,7 +494,7 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
494 /* 494 /*
495 * orig_gpr2 is stored on the kernel stack 495 * orig_gpr2 is stored on the kernel stack
496 */ 496 */
497 *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4) = tmp; 497 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
498 498
499 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 499 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
500 /* 500 /*
@@ -629,7 +629,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
629 return peek_user(child, addr, data); 629 return peek_user(child, addr, data);
630 if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP) 630 if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP)
631 return poke_user(child, addr, data); 631 return poke_user(child, addr, data);
632#ifdef CONFIG_S390_SUPPORT 632#ifdef CONFIG_COMPAT
633 if (request == PTRACE_PEEKUSR && 633 if (request == PTRACE_PEEKUSR &&
634 addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT)) 634 addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
635 return peek_user_emu31(child, addr, data); 635 return peek_user_emu31(child, addr, data);
@@ -695,7 +695,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
695 695
696 /* Do requests that differ for 31/64 bit */ 696 /* Do requests that differ for 31/64 bit */
697 default: 697 default:
698#ifdef CONFIG_S390_SUPPORT 698#ifdef CONFIG_COMPAT
699 if (test_thread_flag(TIF_31BIT)) 699 if (test_thread_flag(TIF_31BIT))
700 return do_ptrace_emu31(child, request, addr, data); 700 return do_ptrace_emu31(child, request, addr, data);
701#endif 701#endif
@@ -712,35 +712,18 @@ sys_ptrace(long request, long pid, long addr, long data)
712 int ret; 712 int ret;
713 713
714 lock_kernel(); 714 lock_kernel();
715
716 if (request == PTRACE_TRACEME) { 715 if (request == PTRACE_TRACEME) {
717 /* are we already being traced? */ 716 ret = ptrace_traceme();
718 ret = -EPERM; 717 goto out;
719 if (current->ptrace & PT_PTRACED)
720 goto out;
721 ret = security_ptrace(current->parent, current);
722 if (ret)
723 goto out;
724 /* set the ptrace bit in the process flags. */
725 current->ptrace |= PT_PTRACED;
726 goto out;
727 } 718 }
728 719
729 ret = -EPERM; 720 child = ptrace_get_task_struct(pid);
730 if (pid == 1) /* you may not mess with init */ 721 if (IS_ERR(child)) {
731 goto out; 722 ret = PTR_ERR(child);
732
733 ret = -ESRCH;
734 read_lock(&tasklist_lock);
735 child = find_task_by_pid(pid);
736 if (child)
737 get_task_struct(child);
738 read_unlock(&tasklist_lock);
739 if (!child)
740 goto out; 723 goto out;
724 }
741 725
742 ret = do_ptrace(child, request, addr, data); 726 ret = do_ptrace(child, request, addr, data);
743
744 put_task_struct(child); 727 put_task_struct(child);
745out: 728out:
746 unlock_kernel(); 729 unlock_kernel();
diff --git a/arch/s390/kernel/reipl_diag.c b/arch/s390/kernel/reipl_diag.c
index 83cb42bc0b76..1f33951ba439 100644
--- a/arch/s390/kernel/reipl_diag.c
+++ b/arch/s390/kernel/reipl_diag.c
@@ -26,7 +26,7 @@ void reipl_diag(void)
26 " st %%r4,%0\n" 26 " st %%r4,%0\n"
27 " st %%r5,%1\n" 27 " st %%r5,%1\n"
28 ".section __ex_table,\"a\"\n" 28 ".section __ex_table,\"a\"\n"
29#ifdef __s390x__ 29#ifdef CONFIG_64BIT
30 " .align 8\n" 30 " .align 8\n"
31 " .quad 0b, 0b\n" 31 " .quad 0b, 0b\n"
32#else 32#else
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index bee654abb6d3..4176c77670c4 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -10,7 +10,6 @@
10#include <linux/smp.h> 10#include <linux/smp.h>
11#include <linux/syscalls.h> 11#include <linux/syscalls.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/ioctl32.h>
14#include <asm/checksum.h> 13#include <asm/checksum.h>
15#include <asm/cpcmd.h> 14#include <asm/cpcmd.h>
16#include <asm/delay.h> 15#include <asm/delay.h>
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 31e7b19348b7..de8784267473 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -268,7 +268,7 @@ static void do_machine_restart_nonsmp(char * __unused)
268 reipl_diag(); 268 reipl_diag();
269 269
270 if (MACHINE_IS_VM) 270 if (MACHINE_IS_VM)
271 cpcmd ("IPL", NULL, 0); 271 cpcmd ("IPL", NULL, 0, NULL);
272 else 272 else
273 reipl (0x10000 | S390_lowcore.ipl_device); 273 reipl (0x10000 | S390_lowcore.ipl_device);
274} 274}
@@ -276,14 +276,14 @@ static void do_machine_restart_nonsmp(char * __unused)
276static void do_machine_halt_nonsmp(void) 276static void do_machine_halt_nonsmp(void)
277{ 277{
278 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 278 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
279 cpcmd(vmhalt_cmd, NULL, 0); 279 cpcmd(vmhalt_cmd, NULL, 0, NULL);
280 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 280 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
281} 281}
282 282
283static void do_machine_power_off_nonsmp(void) 283static void do_machine_power_off_nonsmp(void)
284{ 284{
285 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 285 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
286 cpcmd(vmpoff_cmd, NULL, 0); 286 cpcmd(vmpoff_cmd, NULL, 0, NULL);
287 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 287 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
288} 288}
289 289
@@ -315,6 +315,11 @@ void machine_power_off(void)
315 _machine_power_off(); 315 _machine_power_off();
316} 316}
317 317
318/*
319 * Dummy power off function.
320 */
321void (*pm_power_off)(void) = machine_power_off;
322
318static void __init 323static void __init
319add_memory_hole(unsigned long start, unsigned long end) 324add_memory_hole(unsigned long start, unsigned long end)
320{ 325{
@@ -427,7 +432,7 @@ setup_lowcore(void)
427 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; 432 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
428 lc->current_task = (unsigned long) init_thread_union.thread_info.task; 433 lc->current_task = (unsigned long) init_thread_union.thread_info.task;
429 lc->thread_info = (unsigned long) &init_thread_union; 434 lc->thread_info = (unsigned long) &init_thread_union;
430#ifndef CONFIG_ARCH_S390X 435#ifndef CONFIG_64BIT
431 if (MACHINE_HAS_IEEE) { 436 if (MACHINE_HAS_IEEE) {
432 lc->extended_save_area_addr = (__u32) 437 lc->extended_save_area_addr = (__u32)
433 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); 438 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
@@ -562,21 +567,21 @@ setup_arch(char **cmdline_p)
562 /* 567 /*
563 * print what head.S has found out about the machine 568 * print what head.S has found out about the machine
564 */ 569 */
565#ifndef CONFIG_ARCH_S390X 570#ifndef CONFIG_64BIT
566 printk((MACHINE_IS_VM) ? 571 printk((MACHINE_IS_VM) ?
567 "We are running under VM (31 bit mode)\n" : 572 "We are running under VM (31 bit mode)\n" :
568 "We are running native (31 bit mode)\n"); 573 "We are running native (31 bit mode)\n");
569 printk((MACHINE_HAS_IEEE) ? 574 printk((MACHINE_HAS_IEEE) ?
570 "This machine has an IEEE fpu\n" : 575 "This machine has an IEEE fpu\n" :
571 "This machine has no IEEE fpu\n"); 576 "This machine has no IEEE fpu\n");
572#else /* CONFIG_ARCH_S390X */ 577#else /* CONFIG_64BIT */
573 printk((MACHINE_IS_VM) ? 578 printk((MACHINE_IS_VM) ?
574 "We are running under VM (64 bit mode)\n" : 579 "We are running under VM (64 bit mode)\n" :
575 "We are running native (64 bit mode)\n"); 580 "We are running native (64 bit mode)\n");
576#endif /* CONFIG_ARCH_S390X */ 581#endif /* CONFIG_64BIT */
577 582
578 ROOT_DEV = Root_RAM0; 583 ROOT_DEV = Root_RAM0;
579#ifndef CONFIG_ARCH_S390X 584#ifndef CONFIG_64BIT
580 memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */ 585 memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */
581 /* 586 /*
582 * We need some free virtual space to be able to do vmalloc. 587 * We need some free virtual space to be able to do vmalloc.
@@ -585,9 +590,9 @@ setup_arch(char **cmdline_p)
585 */ 590 */
586 if (memory_end > 1920*1024*1024) 591 if (memory_end > 1920*1024*1024)
587 memory_end = 1920*1024*1024; 592 memory_end = 1920*1024*1024;
588#else /* CONFIG_ARCH_S390X */ 593#else /* CONFIG_64BIT */
589 memory_end = memory_size & ~0x200000UL; /* detected in head.s */ 594 memory_end = memory_size & ~0x200000UL; /* detected in head.s */
590#endif /* CONFIG_ARCH_S390X */ 595#endif /* CONFIG_64BIT */
591 596
592 init_mm.start_code = PAGE_OFFSET; 597 init_mm.start_code = PAGE_OFFSET;
593 init_mm.end_code = (unsigned long) &_etext; 598 init_mm.end_code = (unsigned long) &_etext;
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 6e0110d71191..6ae4a77270b5 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -254,9 +254,9 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
254 if (restore_sigregs(regs, &frame->uc.uc_mcontext)) 254 if (restore_sigregs(regs, &frame->uc.uc_mcontext))
255 goto badframe; 255 goto badframe;
256 256
257 /* It is more difficult to avoid calling this function than to 257 if (do_sigaltstack(&frame->uc.uc_stack, NULL,
258 call it and ignore errors. */ 258 regs->gprs[15]) == -EFAULT)
259 do_sigaltstack(&frame->uc.uc_stack, NULL, regs->gprs[15]); 259 goto badframe;
260 return regs->gprs[2]; 260 return regs->gprs[2];
261 261
262badframe: 262badframe:
@@ -501,7 +501,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
501 501
502 if (signr > 0) { 502 if (signr > 0) {
503 /* Whee! Actually deliver the signal. */ 503 /* Whee! Actually deliver the signal. */
504#ifdef CONFIG_S390_SUPPORT 504#ifdef CONFIG_COMPAT
505 if (test_thread_flag(TIF_31BIT)) { 505 if (test_thread_flag(TIF_31BIT)) {
506 extern void handle_signal32(unsigned long sig, 506 extern void handle_signal32(unsigned long sig,
507 struct k_sigaction *ka, 507 struct k_sigaction *ka,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 5856b3fda6bf..cbfcfd02a43a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -263,7 +263,7 @@ static void do_machine_restart(void * __unused)
263 int cpu; 263 int cpu;
264 static atomic_t cpuid = ATOMIC_INIT(-1); 264 static atomic_t cpuid = ATOMIC_INIT(-1);
265 265
266 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) 266 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
267 signal_processor(smp_processor_id(), sigp_stop); 267 signal_processor(smp_processor_id(), sigp_stop);
268 268
269 /* Wait for all other cpus to enter stopped state */ 269 /* Wait for all other cpus to enter stopped state */
@@ -313,7 +313,7 @@ static void do_machine_halt(void * __unused)
313{ 313{
314 static atomic_t cpuid = ATOMIC_INIT(-1); 314 static atomic_t cpuid = ATOMIC_INIT(-1);
315 315
316 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { 316 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
317 smp_send_stop(); 317 smp_send_stop();
318 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 318 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
319 cpcmd(vmhalt_cmd, NULL, 0, NULL); 319 cpcmd(vmhalt_cmd, NULL, 0, NULL);
@@ -332,7 +332,7 @@ static void do_machine_power_off(void * __unused)
332{ 332{
333 static atomic_t cpuid = ATOMIC_INIT(-1); 333 static atomic_t cpuid = ATOMIC_INIT(-1);
334 334
335 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { 335 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
336 smp_send_stop(); 336 smp_send_stop();
337 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 337 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
338 cpcmd(vmpoff_cmd, NULL, 0, NULL); 338 cpcmd(vmpoff_cmd, NULL, 0, NULL);
@@ -402,7 +402,7 @@ static void smp_ext_bitcall_others(ec_bit_sig sig)
402 } 402 }
403} 403}
404 404
405#ifndef CONFIG_ARCH_S390X 405#ifndef CONFIG_64BIT
406/* 406/*
407 * this function sends a 'purge tlb' signal to another CPU. 407 * this function sends a 'purge tlb' signal to another CPU.
408 */ 408 */
@@ -416,7 +416,7 @@ void smp_ptlb_all(void)
416 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 416 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
417} 417}
418EXPORT_SYMBOL(smp_ptlb_all); 418EXPORT_SYMBOL(smp_ptlb_all);
419#endif /* ! CONFIG_ARCH_S390X */ 419#endif /* ! CONFIG_64BIT */
420 420
421/* 421/*
422 * this function sends a 'reschedule' IPI to another CPU. 422 * this function sends a 'reschedule' IPI to another CPU.
@@ -657,7 +657,7 @@ __cpu_up(unsigned int cpu)
657 idle = current_set[cpu]; 657 idle = current_set[cpu];
658 cpu_lowcore = lowcore_ptr[cpu]; 658 cpu_lowcore = lowcore_ptr[cpu];
659 cpu_lowcore->kernel_stack = (unsigned long) 659 cpu_lowcore->kernel_stack = (unsigned long)
660 idle->thread_info + (THREAD_SIZE); 660 task_stack_page(idle) + (THREAD_SIZE);
661 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 661 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
662 - sizeof(struct pt_regs) 662 - sizeof(struct pt_regs)
663 - sizeof(struct stack_frame)); 663 - sizeof(struct stack_frame));
@@ -783,7 +783,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
783 if (stack == 0ULL) 783 if (stack == 0ULL)
784 panic("smp_boot_cpus failed to allocate memory\n"); 784 panic("smp_boot_cpus failed to allocate memory\n");
785 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); 785 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
786#ifndef __s390x__ 786#ifndef CONFIG_64BIT
787 if (MACHINE_HAS_IEEE) { 787 if (MACHINE_HAS_IEEE) {
788 lowcore_ptr[i]->extended_save_area_addr = 788 lowcore_ptr[i]->extended_save_area_addr =
789 (__u32) __get_free_pages(GFP_KERNEL,0); 789 (__u32) __get_free_pages(GFP_KERNEL,0);
@@ -793,7 +793,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
793 } 793 }
794#endif 794#endif
795 } 795 }
796#ifndef __s390x__ 796#ifndef CONFIG_64BIT
797 if (MACHINE_HAS_IEEE) 797 if (MACHINE_HAS_IEEE)
798 ctl_set_bit(14, 29); /* enable extended save area */ 798 ctl_set_bit(14, 29); /* enable extended save area */
799#endif 799#endif
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index efe6b83b53f7..6a63553493c5 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -26,9 +26,7 @@
26#include <linux/mman.h> 26#include <linux/mman.h>
27#include <linux/file.h> 27#include <linux/file.h>
28#include <linux/utsname.h> 28#include <linux/utsname.h>
29#ifdef CONFIG_ARCH_S390X
30#include <linux/personality.h> 29#include <linux/personality.h>
31#endif /* CONFIG_ARCH_S390X */
32 30
33#include <asm/uaccess.h> 31#include <asm/uaccess.h>
34#include <asm/ipc.h> 32#include <asm/ipc.h>
@@ -121,7 +119,7 @@ out:
121 return error; 119 return error;
122} 120}
123 121
124#ifndef CONFIG_ARCH_S390X 122#ifndef CONFIG_64BIT
125struct sel_arg_struct { 123struct sel_arg_struct {
126 unsigned long n; 124 unsigned long n;
127 fd_set *inp, *outp, *exp; 125 fd_set *inp, *outp, *exp;
@@ -138,7 +136,7 @@ asmlinkage long old_select(struct sel_arg_struct __user *arg)
138 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); 136 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
139 137
140} 138}
141#endif /* CONFIG_ARCH_S390X */ 139#endif /* CONFIG_64BIT */
142 140
143/* 141/*
144 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 142 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
@@ -211,7 +209,7 @@ asmlinkage long sys_ipc(uint call, int first, unsigned long second,
211 return -EINVAL; 209 return -EINVAL;
212} 210}
213 211
214#ifdef CONFIG_ARCH_S390X 212#ifdef CONFIG_64BIT
215asmlinkage long s390x_newuname(struct new_utsname __user *name) 213asmlinkage long s390x_newuname(struct new_utsname __user *name)
216{ 214{
217 int ret = sys_newuname(name); 215 int ret = sys_newuname(name);
@@ -235,12 +233,12 @@ asmlinkage long s390x_personality(unsigned long personality)
235 233
236 return ret; 234 return ret;
237} 235}
238#endif /* CONFIG_ARCH_S390X */ 236#endif /* CONFIG_64BIT */
239 237
240/* 238/*
241 * Wrapper function for sys_fadvise64/fadvise64_64 239 * Wrapper function for sys_fadvise64/fadvise64_64
242 */ 240 */
243#ifndef CONFIG_ARCH_S390X 241#ifndef CONFIG_64BIT
244 242
245asmlinkage long 243asmlinkage long
246s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice) 244s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index c36353e8c140..7c0fe152a111 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -214,7 +214,7 @@ void account_ticks(struct pt_regs *regs)
214#endif 214#endif
215 215
216#ifdef CONFIG_VIRT_CPU_ACCOUNTING 216#ifdef CONFIG_VIRT_CPU_ACCOUNTING
217 account_user_vtime(current); 217 account_tick_vtime(current);
218#else 218#else
219 while (ticks--) 219 while (ticks--)
220 update_process_times(user_mode(regs)); 220 update_process_times(user_mode(regs));
@@ -282,7 +282,7 @@ static inline void start_hz_timer(void)
282{ 282{
283 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) 283 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
284 return; 284 return;
285 account_ticks(__KSTK_PTREGS(current)); 285 account_ticks(task_pt_regs(current));
286 cpu_clear(smp_processor_id(), nohz_cpu_mask); 286 cpu_clear(smp_processor_id(), nohz_cpu_mask);
287} 287}
288 288
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index c5bd36fae56b..5d21e9e6e7b4 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -67,13 +67,13 @@ extern pgm_check_handler_t do_monitor_call;
67 67
68#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 68#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
69 69
70#ifndef CONFIG_ARCH_S390X 70#ifndef CONFIG_64BIT
71#define FOURLONG "%08lx %08lx %08lx %08lx\n" 71#define FOURLONG "%08lx %08lx %08lx %08lx\n"
72static int kstack_depth_to_print = 12; 72static int kstack_depth_to_print = 12;
73#else /* CONFIG_ARCH_S390X */ 73#else /* CONFIG_64BIT */
74#define FOURLONG "%016lx %016lx %016lx %016lx\n" 74#define FOURLONG "%016lx %016lx %016lx %016lx\n"
75static int kstack_depth_to_print = 20; 75static int kstack_depth_to_print = 20;
76#endif /* CONFIG_ARCH_S390X */ 76#endif /* CONFIG_64BIT */
77 77
78/* 78/*
79 * For show_trace we have tree different stack to consider: 79 * For show_trace we have tree different stack to consider:
@@ -136,8 +136,8 @@ void show_trace(struct task_struct *task, unsigned long * stack)
136 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 136 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
137 S390_lowcore.async_stack); 137 S390_lowcore.async_stack);
138 if (task) 138 if (task)
139 __show_trace(sp, (unsigned long) task->thread_info, 139 __show_trace(sp, (unsigned long) task_stack_page(task),
140 (unsigned long) task->thread_info + THREAD_SIZE); 140 (unsigned long) task_stack_page(task) + THREAD_SIZE);
141 else 141 else
142 __show_trace(sp, S390_lowcore.thread_info, 142 __show_trace(sp, S390_lowcore.thread_info,
143 S390_lowcore.thread_info + THREAD_SIZE); 143 S390_lowcore.thread_info + THREAD_SIZE);
@@ -240,7 +240,7 @@ char *task_show_regs(struct task_struct *task, char *buffer)
240{ 240{
241 struct pt_regs *regs; 241 struct pt_regs *regs;
242 242
243 regs = __KSTK_PTREGS(task); 243 regs = task_pt_regs(task);
244 buffer += sprintf(buffer, "task: %p, ksp: %p\n", 244 buffer += sprintf(buffer, "task: %p, ksp: %p\n",
245 task, (void *)task->thread.ksp); 245 task, (void *)task->thread.ksp);
246 buffer += sprintf(buffer, "User PSW : %p %p\n", 246 buffer += sprintf(buffer, "User PSW : %p %p\n",
@@ -702,12 +702,12 @@ void __init trap_init(void)
702 pgm_check_table[0x11] = &do_dat_exception; 702 pgm_check_table[0x11] = &do_dat_exception;
703 pgm_check_table[0x12] = &translation_exception; 703 pgm_check_table[0x12] = &translation_exception;
704 pgm_check_table[0x13] = &special_op_exception; 704 pgm_check_table[0x13] = &special_op_exception;
705#ifdef CONFIG_ARCH_S390X 705#ifdef CONFIG_64BIT
706 pgm_check_table[0x38] = &do_dat_exception; 706 pgm_check_table[0x38] = &do_dat_exception;
707 pgm_check_table[0x39] = &do_dat_exception; 707 pgm_check_table[0x39] = &do_dat_exception;
708 pgm_check_table[0x3A] = &do_dat_exception; 708 pgm_check_table[0x3A] = &do_dat_exception;
709 pgm_check_table[0x3B] = &do_dat_exception; 709 pgm_check_table[0x3B] = &do_dat_exception;
710#endif /* CONFIG_ARCH_S390X */ 710#endif /* CONFIG_64BIT */
711 pgm_check_table[0x15] = &operand_exception; 711 pgm_check_table[0x15] = &operand_exception;
712 pgm_check_table[0x1C] = &space_switch_exception; 712 pgm_check_table[0x1C] = &space_switch_exception;
713 pgm_check_table[0x1D] = &hfp_sqrt_exception; 713 pgm_check_table[0x1D] = &hfp_sqrt_exception;
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 89fdb3808bc0..9289face3027 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -5,7 +5,7 @@
5#include <asm-generic/vmlinux.lds.h> 5#include <asm-generic/vmlinux.lds.h>
6#include <linux/config.h> 6#include <linux/config.h>
7 7
8#ifndef CONFIG_ARCH_S390X 8#ifndef CONFIG_64BIT
9OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") 9OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
10OUTPUT_ARCH(s390) 10OUTPUT_ARCH(s390)
11ENTRY(_start) 11ENTRY(_start)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 22a895ecb7a4..dfe6f0856617 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -32,7 +32,7 @@ DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
32 * Update process times based on virtual cpu times stored by entry.S 32 * Update process times based on virtual cpu times stored by entry.S
33 * to the lowcore fields user_timer, system_timer & steal_clock. 33 * to the lowcore fields user_timer, system_timer & steal_clock.
34 */ 34 */
35void account_user_vtime(struct task_struct *tsk) 35void account_tick_vtime(struct task_struct *tsk)
36{ 36{
37 cputime_t cputime; 37 cputime_t cputime;
38 __u64 timer, clock; 38 __u64 timer, clock;
@@ -76,6 +76,31 @@ void account_user_vtime(struct task_struct *tsk)
76 * Update process times based on virtual cpu times stored by entry.S 76 * Update process times based on virtual cpu times stored by entry.S
77 * to the lowcore fields user_timer, system_timer & steal_clock. 77 * to the lowcore fields user_timer, system_timer & steal_clock.
78 */ 78 */
79void account_vtime(struct task_struct *tsk)
80{
81 cputime_t cputime;
82 __u64 timer;
83
84 timer = S390_lowcore.last_update_timer;
85 asm volatile (" STPT %0" /* Store current cpu timer value */
86 : "=m" (S390_lowcore.last_update_timer) );
87 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
88
89 cputime = S390_lowcore.user_timer >> 12;
90 S390_lowcore.user_timer -= cputime << 12;
91 S390_lowcore.steal_clock -= cputime << 12;
92 account_user_time(tsk, cputime);
93
94 cputime = S390_lowcore.system_timer >> 12;
95 S390_lowcore.system_timer -= cputime << 12;
96 S390_lowcore.steal_clock -= cputime << 12;
97 account_system_time(tsk, 0, cputime);
98}
99
100/*
101 * Update process times based on virtual cpu times stored by entry.S
102 * to the lowcore fields user_timer, system_timer & steal_clock.
103 */
79void account_system_vtime(struct task_struct *tsk) 104void account_system_vtime(struct task_struct *tsk)
80{ 105{
81 cputime_t cputime; 106 cputime_t cputime;
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index b701efa1f00e..f20b51ff1d86 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -5,5 +5,5 @@
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y += delay.o string.o 7lib-y += delay.o string.o
8lib-$(CONFIG_ARCH_S390_31) += uaccess.o spinlock.o 8lib-y += $(if $(CONFIG_64BIT),uaccess64.o,uaccess.o)
9lib-$(CONFIG_ARCH_S390X) += uaccess64.o spinlock.o 9lib-$(CONFIG_SMP) += spinlock.o \ No newline at end of file
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 2dc14e9c8327..60f80a4eed4e 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -13,7 +13,6 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <asm/io.h> 14#include <asm/io.h>
15 15
16atomic_t spin_retry_counter;
17int spin_retry = 1000; 16int spin_retry = 1000;
18 17
19/** 18/**
@@ -29,7 +28,7 @@ __setup("spin_retry=", spin_retry_setup);
29static inline void 28static inline void
30_diag44(void) 29_diag44(void)
31{ 30{
32#ifdef __s390x__ 31#ifdef CONFIG_64BIT
33 if (MACHINE_HAS_DIAG44) 32 if (MACHINE_HAS_DIAG44)
34#endif 33#endif
35 asm volatile("diag 0,0,0x44"); 34 asm volatile("diag 0,0,0x44");
@@ -45,7 +44,6 @@ _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
45 _diag44(); 44 _diag44();
46 count = spin_retry; 45 count = spin_retry;
47 } 46 }
48 atomic_inc(&spin_retry_counter);
49 if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0) 47 if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
50 return; 48 return;
51 } 49 }
@@ -58,7 +56,6 @@ _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
58 int count = spin_retry; 56 int count = spin_retry;
59 57
60 while (count-- > 0) { 58 while (count-- > 0) {
61 atomic_inc(&spin_retry_counter);
62 if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0) 59 if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
63 return 1; 60 return 1;
64 } 61 }
@@ -77,7 +74,6 @@ _raw_read_lock_wait(raw_rwlock_t *rw)
77 _diag44(); 74 _diag44();
78 count = spin_retry; 75 count = spin_retry;
79 } 76 }
80 atomic_inc(&spin_retry_counter);
81 old = rw->lock & 0x7fffffffU; 77 old = rw->lock & 0x7fffffffU;
82 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 78 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
83 return; 79 return;
@@ -92,7 +88,6 @@ _raw_read_trylock_retry(raw_rwlock_t *rw)
92 int count = spin_retry; 88 int count = spin_retry;
93 89
94 while (count-- > 0) { 90 while (count-- > 0) {
95 atomic_inc(&spin_retry_counter);
96 old = rw->lock & 0x7fffffffU; 91 old = rw->lock & 0x7fffffffU;
97 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 92 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
98 return 1; 93 return 1;
@@ -111,7 +106,6 @@ _raw_write_lock_wait(raw_rwlock_t *rw)
111 _diag44(); 106 _diag44();
112 count = spin_retry; 107 count = spin_retry;
113 } 108 }
114 atomic_inc(&spin_retry_counter);
115 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 109 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
116 return; 110 return;
117 } 111 }
@@ -124,7 +118,6 @@ _raw_write_trylock_retry(raw_rwlock_t *rw)
124 int count = spin_retry; 118 int count = spin_retry;
125 119
126 while (count-- > 0) { 120 while (count-- > 0) {
127 atomic_inc(&spin_retry_counter);
128 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 121 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
129 return 1; 122 return 1;
130 } 123 }
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 506a33b51e4f..a9566bcab682 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -143,7 +143,7 @@ dcss_diag (__u8 func, void *parameter,
143 rx = (unsigned long) parameter; 143 rx = (unsigned long) parameter;
144 ry = (unsigned long) func; 144 ry = (unsigned long) func;
145 __asm__ __volatile__( 145 __asm__ __volatile__(
146#ifdef CONFIG_ARCH_S390X 146#ifdef CONFIG_64BIT
147 " sam31\n" // switch to 31 bit 147 " sam31\n" // switch to 31 bit
148 " diag %0,%1,0x64\n" 148 " diag %0,%1,0x64\n"
149 " sam64\n" // switch back to 64 bit 149 " sam64\n" // switch back to 64 bit
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fb2607c369ed..81ade401b073 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -31,17 +31,17 @@
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33 33
34#ifndef CONFIG_ARCH_S390X 34#ifndef CONFIG_64BIT
35#define __FAIL_ADDR_MASK 0x7ffff000 35#define __FAIL_ADDR_MASK 0x7ffff000
36#define __FIXUP_MASK 0x7fffffff 36#define __FIXUP_MASK 0x7fffffff
37#define __SUBCODE_MASK 0x0200 37#define __SUBCODE_MASK 0x0200
38#define __PF_RES_FIELD 0ULL 38#define __PF_RES_FIELD 0ULL
39#else /* CONFIG_ARCH_S390X */ 39#else /* CONFIG_64BIT */
40#define __FAIL_ADDR_MASK -4096L 40#define __FAIL_ADDR_MASK -4096L
41#define __FIXUP_MASK ~0L 41#define __FIXUP_MASK ~0L
42#define __SUBCODE_MASK 0x0600 42#define __SUBCODE_MASK 0x0600
43#define __PF_RES_FIELD 0x8000000000000000ULL 43#define __PF_RES_FIELD 0x8000000000000000ULL
44#endif /* CONFIG_ARCH_S390X */ 44#endif /* CONFIG_64BIT */
45 45
46#ifdef CONFIG_SYSCTL 46#ifdef CONFIG_SYSCTL
47extern int sysctl_userprocess_debug; 47extern int sysctl_userprocess_debug;
@@ -393,11 +393,11 @@ int pfault_init(void)
393 "2:\n" 393 "2:\n"
394 ".section __ex_table,\"a\"\n" 394 ".section __ex_table,\"a\"\n"
395 " .align 4\n" 395 " .align 4\n"
396#ifndef CONFIG_ARCH_S390X 396#ifndef CONFIG_64BIT
397 " .long 0b,1b\n" 397 " .long 0b,1b\n"
398#else /* CONFIG_ARCH_S390X */ 398#else /* CONFIG_64BIT */
399 " .quad 0b,1b\n" 399 " .quad 0b,1b\n"
400#endif /* CONFIG_ARCH_S390X */ 400#endif /* CONFIG_64BIT */
401 ".previous" 401 ".previous"
402 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" ); 402 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" );
403 __ctl_set_bit(0, 9); 403 __ctl_set_bit(0, 9);
@@ -417,11 +417,11 @@ void pfault_fini(void)
417 "0:\n" 417 "0:\n"
418 ".section __ex_table,\"a\"\n" 418 ".section __ex_table,\"a\"\n"
419 " .align 4\n" 419 " .align 4\n"
420#ifndef CONFIG_ARCH_S390X 420#ifndef CONFIG_64BIT
421 " .long 0b,0b\n" 421 " .long 0b,0b\n"
422#else /* CONFIG_ARCH_S390X */ 422#else /* CONFIG_64BIT */
423 " .quad 0b,0b\n" 423 " .quad 0b,0b\n"
424#endif /* CONFIG_ARCH_S390X */ 424#endif /* CONFIG_64BIT */
425 ".previous" 425 ".previous"
426 : : "a" (&refbk), "m" (refbk) : "cc" ); 426 : : "a" (&refbk), "m" (refbk) : "cc" );
427} 427}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 6ec5cd981e74..df953383724d 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -44,7 +44,7 @@ void diag10(unsigned long addr)
44{ 44{
45 if (addr >= 0x7ff00000) 45 if (addr >= 0x7ff00000)
46 return; 46 return;
47#ifdef __s390x__ 47#ifdef CONFIG_64BIT
48 asm volatile ( 48 asm volatile (
49 " sam31\n" 49 " sam31\n"
50 " diag %0,%0,0x10\n" 50 " diag %0,%0,0x10\n"
@@ -106,7 +106,7 @@ extern unsigned long __initdata zholes_size[];
106 * paging_init() sets up the page tables 106 * paging_init() sets up the page tables
107 */ 107 */
108 108
109#ifndef CONFIG_ARCH_S390X 109#ifndef CONFIG_64BIT
110void __init paging_init(void) 110void __init paging_init(void)
111{ 111{
112 pgd_t * pg_dir; 112 pgd_t * pg_dir;
@@ -175,7 +175,7 @@ void __init paging_init(void)
175 return; 175 return;
176} 176}
177 177
178#else /* CONFIG_ARCH_S390X */ 178#else /* CONFIG_64BIT */
179void __init paging_init(void) 179void __init paging_init(void)
180{ 180{
181 pgd_t * pg_dir; 181 pgd_t * pg_dir;
@@ -256,7 +256,7 @@ void __init paging_init(void)
256 256
257 return; 257 return;
258} 258}
259#endif /* CONFIG_ARCH_S390X */ 259#endif /* CONFIG_64BIT */
260 260
261void __init mem_init(void) 261void __init mem_init(void)
262{ 262{
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index fb187e5a54b4..356257c171de 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -50,7 +50,7 @@ static inline unsigned long mmap_base(void)
50 50
51static inline int mmap_is_legacy(void) 51static inline int mmap_is_legacy(void)
52{ 52{
53#ifdef CONFIG_ARCH_S390X 53#ifdef CONFIG_64BIT
54 /* 54 /*
55 * Force standard allocation for 64 bit programs. 55 * Force standard allocation for 64 bit programs.
56 */ 56 */
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
index ec349276258a..537b2d840e69 100644
--- a/arch/s390/oprofile/Makefile
+++ b/arch/s390/oprofile/Makefile
@@ -6,4 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
6 oprofilefs.o oprofile_stats.o \ 6 oprofilefs.o oprofile_stats.o \
7 timer_int.o ) 7 timer_int.o )
8 8
9oprofile-y := $(DRIVER_OBJS) init.o 9oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c
new file mode 100644
index 000000000000..bc4b84a35cad
--- /dev/null
+++ b/arch/s390/oprofile/backtrace.c
@@ -0,0 +1,79 @@
1/**
2 * arch/s390/oprofile/backtrace.c
3 *
4 * S390 Version
5 * Copyright (C) 2005 IBM Corporation, IBM Deutschland Entwicklung GmbH.
6 * Author(s): Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
7 */
8
9#include <linux/oprofile.h>
10
11#include <asm/processor.h> /* for struct stack_frame */
12
13static unsigned long
14__show_trace(unsigned int *depth, unsigned long sp,
15 unsigned long low, unsigned long high)
16{
17 struct stack_frame *sf;
18 struct pt_regs *regs;
19
20 while (*depth) {
21 sp = sp & PSW_ADDR_INSN;
22 if (sp < low || sp > high - sizeof(*sf))
23 return sp;
24 sf = (struct stack_frame *) sp;
25 (*depth)--;
26 oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
27
28 /* Follow the backchain. */
29 while (*depth) {
30 low = sp;
31 sp = sf->back_chain & PSW_ADDR_INSN;
32 if (!sp)
33 break;
34 if (sp <= low || sp > high - sizeof(*sf))
35 return sp;
36 sf = (struct stack_frame *) sp;
37 (*depth)--;
38 oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
39
40 }
41
42 if (*depth == 0)
43 break;
44
45 /* Zero backchain detected, check for interrupt frame. */
46 sp = (unsigned long) (sf + 1);
47 if (sp <= low || sp > high - sizeof(*regs))
48 return sp;
49 regs = (struct pt_regs *) sp;
50 (*depth)--;
51 oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
52 low = sp;
53 sp = regs->gprs[15];
54 }
55 return sp;
56}
57
58void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
59{
60 unsigned long head;
61 struct stack_frame* head_sf;
62
63 if (user_mode (regs))
64 return;
65
66 head = regs->gprs[15];
67 head_sf = (struct stack_frame*)head;
68
69 if (!head_sf->back_chain)
70 return;
71
72 head = head_sf->back_chain;
73
74 head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE,
75 S390_lowcore.async_stack);
76
77 __show_trace(&depth, head, S390_lowcore.thread_info,
78 S390_lowcore.thread_info + THREAD_SIZE);
79}
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index a65ead0e200a..7a995113b918 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -12,8 +12,12 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14 14
15
16extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
17
15int __init oprofile_arch_init(struct oprofile_operations* ops) 18int __init oprofile_arch_init(struct oprofile_operations* ops)
16{ 19{
20 ops->backtrace = s390_backtrace;
17 return -ENODEV; 21 return -ENODEV;
18} 22}
19 23