aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-09-22 20:10:23 -0400
committerJeff Garzik <jeff@garzik.org>2006-09-22 20:10:23 -0400
commit28eb177dfa5982d132edceed891cb3885df258bb (patch)
tree5f8fdc37ad1d8d0793e9c47da7d908b97c814ffb /arch/s390
parentfd8ae94eea9bb4269d6dff1b47b9dc741bd70d0b (diff)
parentdb392219c5f572610645696e3672f6ea38783a65 (diff)
Merge branch 'master' into upstream
Conflicts: net/ieee80211/ieee80211_crypt_tkip.c net/ieee80211/ieee80211_crypt_wep.c
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig17
-rw-r--r--arch/s390/appldata/appldata.h16
-rw-r--r--arch/s390/appldata/appldata_base.c81
-rw-r--r--arch/s390/appldata/appldata_os.c1
-rw-r--r--arch/s390/crypto/aes_s390.c285
-rw-r--r--arch/s390/crypto/crypt_s390.h3
-rw-r--r--arch/s390/crypto/des_s390.c559
-rw-r--r--arch/s390/crypto/sha1_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/hypfs/hypfs.h2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c16
-rw-r--r--arch/s390/hypfs/hypfs_diag.h2
-rw-r--r--arch/s390/hypfs/inode.c12
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/entry.S12
-rw-r--r--arch/s390/kernel/entry64.S16
-rw-r--r--arch/s390/kernel/head.S69
-rw-r--r--arch/s390/kernel/head31.S48
-rw-r--r--arch/s390/kernel/head64.S59
-rw-r--r--arch/s390/kernel/ipl.c942
-rw-r--r--arch/s390/kernel/kprobes.c657
-rw-r--r--arch/s390/kernel/reipl.S33
-rw-r--r--arch/s390/kernel/reipl64.S34
-rw-r--r--arch/s390/kernel/reipl_diag.c39
-rw-r--r--arch/s390/kernel/s390_ksyms.c6
-rw-r--r--arch/s390/kernel/setup.c272
-rw-r--r--arch/s390/kernel/signal.c40
-rw-r--r--arch/s390/kernel/smp.c10
-rw-r--r--arch/s390/kernel/traps.c31
-rw-r--r--arch/s390/kernel/vmlinux.lds.S3
-rw-r--r--arch/s390/lib/Makefile4
-rw-r--r--arch/s390/lib/uaccess.S211
-rw-r--r--arch/s390/lib/uaccess64.S207
-rw-r--r--arch/s390/lib/uaccess_mvcos.c156
-rw-r--r--arch/s390/lib/uaccess_std.c340
-rw-r--r--arch/s390/mm/cmm.c30
-rw-r--r--arch/s390/mm/fault.c40
-rw-r--r--arch/s390/mm/init.c36
39 files changed, 3031 insertions, 1266 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2f4f70c4dbb2..b216ca659cdf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -460,8 +460,7 @@ config S390_HYPFS_FS
460 information in an s390 hypervisor environment. 460 information in an s390 hypervisor environment.
461 461
462config KEXEC 462config KEXEC
463 bool "kexec system call (EXPERIMENTAL)" 463 bool "kexec system call"
464 depends on EXPERIMENTAL
465 help 464 help
466 kexec is a system call that implements the ability to shutdown your 465 kexec is a system call that implements the ability to shutdown your
467 current kernel, and to start another kernel. It is like a reboot 466 current kernel, and to start another kernel. It is like a reboot
@@ -487,8 +486,22 @@ source "drivers/net/Kconfig"
487 486
488source "fs/Kconfig" 487source "fs/Kconfig"
489 488
489menu "Instrumentation Support"
490
490source "arch/s390/oprofile/Kconfig" 491source "arch/s390/oprofile/Kconfig"
491 492
493config KPROBES
494 bool "Kprobes (EXPERIMENTAL)"
495 depends on EXPERIMENTAL && MODULES
496 help
497 Kprobes allows you to trap at almost any kernel address and
498 execute a callback function. register_kprobe() establishes
499 a probepoint and specifies the callback. Kprobes is useful
500 for kernel debugging, non-intrusive instrumentation and testing.
501 If in doubt, say "N".
502
503endmenu
504
492source "arch/s390/Kconfig.debug" 505source "arch/s390/Kconfig.debug"
493 506
494source "security/Kconfig" 507source "security/Kconfig"
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h
index 71d65eb30650..0429481dea63 100644
--- a/arch/s390/appldata/appldata.h
+++ b/arch/s390/appldata/appldata.h
@@ -29,22 +29,6 @@
29#define CTL_APPLDATA_NET_SUM 2125 29#define CTL_APPLDATA_NET_SUM 2125
30#define CTL_APPLDATA_PROC 2126 30#define CTL_APPLDATA_PROC 2126
31 31
32#ifndef CONFIG_64BIT
33
34#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
35#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
36#define APPLDATA_GEN_EVENT_RECORD 0x02
37#define APPLDATA_START_CONFIG_REC 0x03
38
39#else
40
41#define APPLDATA_START_INTERVAL_REC 0x80
42#define APPLDATA_STOP_REC 0x81
43#define APPLDATA_GEN_EVENT_RECORD 0x82
44#define APPLDATA_START_CONFIG_REC 0x83
45
46#endif /* CONFIG_64BIT */
47
48#define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x) 32#define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x)
49#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) 33#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
50#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) 34#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index a0a94e0ef8d1..b69ed742f981 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -14,20 +14,20 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <asm/uaccess.h>
18#include <asm/io.h>
19#include <asm/smp.h>
20#include <linux/interrupt.h> 17#include <linux/interrupt.h>
21#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
22#include <linux/page-flags.h> 19#include <linux/page-flags.h>
23#include <linux/swap.h> 20#include <linux/swap.h>
24#include <linux/pagemap.h> 21#include <linux/pagemap.h>
25#include <linux/sysctl.h> 22#include <linux/sysctl.h>
26#include <asm/timer.h>
27//#include <linux/kernel_stat.h>
28#include <linux/notifier.h> 23#include <linux/notifier.h>
29#include <linux/cpu.h> 24#include <linux/cpu.h>
30#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include <asm/appldata.h>
27#include <asm/timer.h>
28#include <asm/uaccess.h>
29#include <asm/io.h>
30#include <asm/smp.h>
31 31
32#include "appldata.h" 32#include "appldata.h"
33 33
@@ -39,34 +39,6 @@
39 39
40#define TOD_MICRO 0x01000 /* nr. of TOD clock units 40#define TOD_MICRO 0x01000 /* nr. of TOD clock units
41 for 1 microsecond */ 41 for 1 microsecond */
42
43/*
44 * Parameter list for DIAGNOSE X'DC'
45 */
46#ifndef CONFIG_64BIT
47struct appldata_parameter_list {
48 u16 diag; /* The DIAGNOSE code X'00DC' */
49 u8 function; /* The function code for the DIAGNOSE */
50 u8 parlist_length; /* Length of the parameter list */
51 u32 product_id_addr; /* Address of the 16-byte product ID */
52 u16 reserved;
53 u16 buffer_length; /* Length of the application data buffer */
54 u32 buffer_addr; /* Address of the application data buffer */
55};
56#else
57struct appldata_parameter_list {
58 u16 diag;
59 u8 function;
60 u8 parlist_length;
61 u32 unused01;
62 u16 reserved;
63 u16 buffer_length;
64 u32 unused02;
65 u64 product_id_addr;
66 u64 buffer_addr;
67};
68#endif /* CONFIG_64BIT */
69
70/* 42/*
71 * /proc entries (sysctl) 43 * /proc entries (sysctl)
72 */ 44 */
@@ -181,46 +153,17 @@ static void appldata_work_fn(void *data)
181int appldata_diag(char record_nr, u16 function, unsigned long buffer, 153int appldata_diag(char record_nr, u16 function, unsigned long buffer,
182 u16 length, char *mod_lvl) 154 u16 length, char *mod_lvl)
183{ 155{
184 unsigned long ry; 156 struct appldata_product_id id = {
185 struct appldata_product_id {
186 char prod_nr[7]; /* product nr. */
187 char prod_fn[2]; /* product function */
188 char record_nr; /* record nr. */
189 char version_nr[2]; /* version */
190 char release_nr[2]; /* release */
191 char mod_lvl[2]; /* modification lvl. */
192 } appldata_product_id = {
193 /* all strings are EBCDIC, record_nr is byte */
194 .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4, 157 .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
195 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ 158 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
196 .prod_fn = {0xD5, 0xD3}, /* "NL" */ 159 .prod_fn = 0xD5D3, /* "NL" */
197 .record_nr = record_nr, 160 .record_nr = record_nr,
198 .version_nr = {0xF2, 0xF6}, /* "26" */ 161 .version_nr = 0xF2F6, /* "26" */
199 .release_nr = {0xF0, 0xF1}, /* "01" */ 162 .release_nr = 0xF0F1, /* "01" */
200 .mod_lvl = {mod_lvl[0], mod_lvl[1]}, 163 .mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1],
201 };
202 struct appldata_parameter_list appldata_parameter_list = {
203 .diag = 0xDC,
204 .function = function,
205 .parlist_length =
206 sizeof(appldata_parameter_list),
207 .buffer_length = length,
208 .product_id_addr =
209 (unsigned long) &appldata_product_id,
210 .buffer_addr = virt_to_phys((void *) buffer)
211 }; 164 };
212 165
213 if (!MACHINE_IS_VM) 166 return appldata_asm(&id, function, (void *) buffer, length);
214 return -ENOSYS;
215 ry = -1;
216 asm volatile(
217 "diag %1,%0,0xDC\n\t"
218 : "=d" (ry)
219 : "d" (&appldata_parameter_list),
220 "m" (appldata_parameter_list),
221 "m" (appldata_product_id)
222 : "cc");
223 return (int) ry;
224} 167}
225/************************ timer, work, DIAG <END> ****************************/ 168/************************ timer, work, DIAG <END> ****************************/
226 169
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 161acc5c8a1b..76a15523ae9e 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -16,6 +16,7 @@
16#include <linux/kernel_stat.h> 16#include <linux/kernel_stat.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <asm/appldata.h>
19#include <asm/smp.h> 20#include <asm/smp.h>
20 21
21#include "appldata.h" 22#include "appldata.h"
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 5713c7e5bd16..15c9eec02928 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -16,9 +16,9 @@
16 * 16 *
17 */ 17 */
18 18
19#include <crypto/algapi.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/init.h> 21#include <linux/init.h>
21#include <linux/crypto.h>
22#include "crypt_s390.h" 22#include "crypt_s390.h"
23 23
24#define AES_MIN_KEY_SIZE 16 24#define AES_MIN_KEY_SIZE 16
@@ -34,13 +34,16 @@ int has_aes_256 = 0;
34struct s390_aes_ctx { 34struct s390_aes_ctx {
35 u8 iv[AES_BLOCK_SIZE]; 35 u8 iv[AES_BLOCK_SIZE];
36 u8 key[AES_MAX_KEY_SIZE]; 36 u8 key[AES_MAX_KEY_SIZE];
37 long enc;
38 long dec;
37 int key_len; 39 int key_len;
38}; 40};
39 41
40static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 42static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
41 unsigned int key_len, u32 *flags) 43 unsigned int key_len)
42{ 44{
43 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 45 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
46 u32 *flags = &tfm->crt_flags;
44 47
45 switch (key_len) { 48 switch (key_len) {
46 case 16: 49 case 16:
@@ -110,133 +113,206 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
110 } 113 }
111} 114}
112 115
113static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
114 const u8 *in, unsigned int nbytes)
115{
116 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
117 int ret;
118 116
119 /* only use complete blocks */ 117static struct crypto_alg aes_alg = {
120 nbytes &= ~(AES_BLOCK_SIZE - 1); 118 .cra_name = "aes",
119 .cra_driver_name = "aes-s390",
120 .cra_priority = CRYPT_S390_PRIORITY,
121 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
122 .cra_blocksize = AES_BLOCK_SIZE,
123 .cra_ctxsize = sizeof(struct s390_aes_ctx),
124 .cra_module = THIS_MODULE,
125 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
126 .cra_u = {
127 .cipher = {
128 .cia_min_keysize = AES_MIN_KEY_SIZE,
129 .cia_max_keysize = AES_MAX_KEY_SIZE,
130 .cia_setkey = aes_set_key,
131 .cia_encrypt = aes_encrypt,
132 .cia_decrypt = aes_decrypt,
133 }
134 }
135};
136
137static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
138 unsigned int key_len)
139{
140 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
121 141
122 switch (sctx->key_len) { 142 switch (key_len) {
123 case 16: 143 case 16:
124 ret = crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, nbytes); 144 sctx->enc = KM_AES_128_ENCRYPT;
125 BUG_ON((ret < 0) || (ret != nbytes)); 145 sctx->dec = KM_AES_128_DECRYPT;
126 break; 146 break;
127 case 24: 147 case 24:
128 ret = crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, nbytes); 148 sctx->enc = KM_AES_192_ENCRYPT;
129 BUG_ON((ret < 0) || (ret != nbytes)); 149 sctx->dec = KM_AES_192_DECRYPT;
130 break; 150 break;
131 case 32: 151 case 32:
132 ret = crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, nbytes); 152 sctx->enc = KM_AES_256_ENCRYPT;
133 BUG_ON((ret < 0) || (ret != nbytes)); 153 sctx->dec = KM_AES_256_DECRYPT;
134 break; 154 break;
135 } 155 }
136 return nbytes; 156
157 return aes_set_key(tfm, in_key, key_len);
137} 158}
138 159
139static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, 160static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
140 const u8 *in, unsigned int nbytes) 161 struct blkcipher_walk *walk)
141{ 162{
142 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); 163 int ret = blkcipher_walk_virt(desc, walk);
143 int ret; 164 unsigned int nbytes;
144 165
145 /* only use complete blocks */ 166 while ((nbytes = walk->nbytes)) {
146 nbytes &= ~(AES_BLOCK_SIZE - 1); 167 /* only use complete blocks */
168 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
169 u8 *out = walk->dst.virt.addr;
170 u8 *in = walk->src.virt.addr;
147 171
148 switch (sctx->key_len) { 172 ret = crypt_s390_km(func, param, out, in, n);
149 case 16: 173 BUG_ON((ret < 0) || (ret != n));
150 ret = crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, nbytes); 174
151 BUG_ON((ret < 0) || (ret != nbytes)); 175 nbytes &= AES_BLOCK_SIZE - 1;
152 break; 176 ret = blkcipher_walk_done(desc, walk, nbytes);
153 case 24:
154 ret = crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, nbytes);
155 BUG_ON((ret < 0) || (ret != nbytes));
156 break;
157 case 32:
158 ret = crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, nbytes);
159 BUG_ON((ret < 0) || (ret != nbytes));
160 break;
161 } 177 }
162 return nbytes; 178
179 return ret;
163} 180}
164 181
165static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, 182static int ecb_aes_encrypt(struct blkcipher_desc *desc,
166 const u8 *in, unsigned int nbytes) 183 struct scatterlist *dst, struct scatterlist *src,
184 unsigned int nbytes)
167{ 185{
168 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); 186 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
169 int ret; 187 struct blkcipher_walk walk;
170 188
171 /* only use complete blocks */ 189 blkcipher_walk_init(&walk, dst, src, nbytes);
172 nbytes &= ~(AES_BLOCK_SIZE - 1); 190 return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
191}
173 192
174 memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE); 193static int ecb_aes_decrypt(struct blkcipher_desc *desc,
175 switch (sctx->key_len) { 194 struct scatterlist *dst, struct scatterlist *src,
195 unsigned int nbytes)
196{
197 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
198 struct blkcipher_walk walk;
199
200 blkcipher_walk_init(&walk, dst, src, nbytes);
201 return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
202}
203
204static struct crypto_alg ecb_aes_alg = {
205 .cra_name = "ecb(aes)",
206 .cra_driver_name = "ecb-aes-s390",
207 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
208 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
209 .cra_blocksize = AES_BLOCK_SIZE,
210 .cra_ctxsize = sizeof(struct s390_aes_ctx),
211 .cra_type = &crypto_blkcipher_type,
212 .cra_module = THIS_MODULE,
213 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
214 .cra_u = {
215 .blkcipher = {
216 .min_keysize = AES_MIN_KEY_SIZE,
217 .max_keysize = AES_MAX_KEY_SIZE,
218 .setkey = ecb_aes_set_key,
219 .encrypt = ecb_aes_encrypt,
220 .decrypt = ecb_aes_decrypt,
221 }
222 }
223};
224
225static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
226 unsigned int key_len)
227{
228 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
229
230 switch (key_len) {
176 case 16: 231 case 16:
177 ret = crypt_s390_kmc(KMC_AES_128_ENCRYPT, &sctx->iv, out, in, nbytes); 232 sctx->enc = KMC_AES_128_ENCRYPT;
178 BUG_ON((ret < 0) || (ret != nbytes)); 233 sctx->dec = KMC_AES_128_DECRYPT;
179 break; 234 break;
180 case 24: 235 case 24:
181 ret = crypt_s390_kmc(KMC_AES_192_ENCRYPT, &sctx->iv, out, in, nbytes); 236 sctx->enc = KMC_AES_192_ENCRYPT;
182 BUG_ON((ret < 0) || (ret != nbytes)); 237 sctx->dec = KMC_AES_192_DECRYPT;
183 break; 238 break;
184 case 32: 239 case 32:
185 ret = crypt_s390_kmc(KMC_AES_256_ENCRYPT, &sctx->iv, out, in, nbytes); 240 sctx->enc = KMC_AES_256_ENCRYPT;
186 BUG_ON((ret < 0) || (ret != nbytes)); 241 sctx->dec = KMC_AES_256_DECRYPT;
187 break; 242 break;
188 } 243 }
189 memcpy(desc->info, &sctx->iv, AES_BLOCK_SIZE);
190 244
191 return nbytes; 245 return aes_set_key(tfm, in_key, key_len);
192} 246}
193 247
194static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, 248static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
195 const u8 *in, unsigned int nbytes) 249 struct blkcipher_walk *walk)
196{ 250{
197 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); 251 int ret = blkcipher_walk_virt(desc, walk);
198 int ret; 252 unsigned int nbytes = walk->nbytes;
199 253
200 /* only use complete blocks */ 254 if (!nbytes)
201 nbytes &= ~(AES_BLOCK_SIZE - 1); 255 goto out;
202 256
203 memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE); 257 memcpy(param, walk->iv, AES_BLOCK_SIZE);
204 switch (sctx->key_len) { 258 do {
205 case 16: 259 /* only use complete blocks */
206 ret = crypt_s390_kmc(KMC_AES_128_DECRYPT, &sctx->iv, out, in, nbytes); 260 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
207 BUG_ON((ret < 0) || (ret != nbytes)); 261 u8 *out = walk->dst.virt.addr;
208 break; 262 u8 *in = walk->src.virt.addr;
209 case 24: 263
210 ret = crypt_s390_kmc(KMC_AES_192_DECRYPT, &sctx->iv, out, in, nbytes); 264 ret = crypt_s390_kmc(func, param, out, in, n);
211 BUG_ON((ret < 0) || (ret != nbytes)); 265 BUG_ON((ret < 0) || (ret != n));
212 break; 266
213 case 32: 267 nbytes &= AES_BLOCK_SIZE - 1;
214 ret = crypt_s390_kmc(KMC_AES_256_DECRYPT, &sctx->iv, out, in, nbytes); 268 ret = blkcipher_walk_done(desc, walk, nbytes);
215 BUG_ON((ret < 0) || (ret != nbytes)); 269 } while ((nbytes = walk->nbytes));
216 break; 270 memcpy(walk->iv, param, AES_BLOCK_SIZE);
217 } 271
218 return nbytes; 272out:
273 return ret;
219} 274}
220 275
276static int cbc_aes_encrypt(struct blkcipher_desc *desc,
277 struct scatterlist *dst, struct scatterlist *src,
278 unsigned int nbytes)
279{
280 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
281 struct blkcipher_walk walk;
221 282
222static struct crypto_alg aes_alg = { 283 blkcipher_walk_init(&walk, dst, src, nbytes);
223 .cra_name = "aes", 284 return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
224 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 285}
286
287static int cbc_aes_decrypt(struct blkcipher_desc *desc,
288 struct scatterlist *dst, struct scatterlist *src,
289 unsigned int nbytes)
290{
291 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
292 struct blkcipher_walk walk;
293
294 blkcipher_walk_init(&walk, dst, src, nbytes);
295 return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
296}
297
298static struct crypto_alg cbc_aes_alg = {
299 .cra_name = "cbc(aes)",
300 .cra_driver_name = "cbc-aes-s390",
301 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
302 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
225 .cra_blocksize = AES_BLOCK_SIZE, 303 .cra_blocksize = AES_BLOCK_SIZE,
226 .cra_ctxsize = sizeof(struct s390_aes_ctx), 304 .cra_ctxsize = sizeof(struct s390_aes_ctx),
305 .cra_type = &crypto_blkcipher_type,
227 .cra_module = THIS_MODULE, 306 .cra_module = THIS_MODULE,
228 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), 307 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
229 .cra_u = { 308 .cra_u = {
230 .cipher = { 309 .blkcipher = {
231 .cia_min_keysize = AES_MIN_KEY_SIZE, 310 .min_keysize = AES_MIN_KEY_SIZE,
232 .cia_max_keysize = AES_MAX_KEY_SIZE, 311 .max_keysize = AES_MAX_KEY_SIZE,
233 .cia_setkey = aes_set_key, 312 .ivsize = AES_BLOCK_SIZE,
234 .cia_encrypt = aes_encrypt, 313 .setkey = cbc_aes_set_key,
235 .cia_decrypt = aes_decrypt, 314 .encrypt = cbc_aes_encrypt,
236 .cia_encrypt_ecb = aes_encrypt_ecb, 315 .decrypt = cbc_aes_decrypt,
237 .cia_decrypt_ecb = aes_decrypt_ecb,
238 .cia_encrypt_cbc = aes_encrypt_cbc,
239 .cia_decrypt_cbc = aes_decrypt_cbc,
240 } 316 }
241 } 317 }
242}; 318};
@@ -256,13 +332,40 @@ static int __init aes_init(void)
256 return -ENOSYS; 332 return -ENOSYS;
257 333
258 ret = crypto_register_alg(&aes_alg); 334 ret = crypto_register_alg(&aes_alg);
259 if (ret != 0) 335 if (ret != 0) {
260 printk(KERN_INFO "crypt_s390: aes_s390 couldn't be loaded.\n"); 336 printk(KERN_INFO "crypt_s390: aes-s390 couldn't be loaded.\n");
337 goto aes_err;
338 }
339
340 ret = crypto_register_alg(&ecb_aes_alg);
341 if (ret != 0) {
342 printk(KERN_INFO
343 "crypt_s390: ecb-aes-s390 couldn't be loaded.\n");
344 goto ecb_aes_err;
345 }
346
347 ret = crypto_register_alg(&cbc_aes_alg);
348 if (ret != 0) {
349 printk(KERN_INFO
350 "crypt_s390: cbc-aes-s390 couldn't be loaded.\n");
351 goto cbc_aes_err;
352 }
353
354out:
261 return ret; 355 return ret;
356
357cbc_aes_err:
358 crypto_unregister_alg(&ecb_aes_alg);
359ecb_aes_err:
360 crypto_unregister_alg(&aes_alg);
361aes_err:
362 goto out;
262} 363}
263 364
264static void __exit aes_fini(void) 365static void __exit aes_fini(void)
265{ 366{
367 crypto_unregister_alg(&cbc_aes_alg);
368 crypto_unregister_alg(&ecb_aes_alg);
266 crypto_unregister_alg(&aes_alg); 369 crypto_unregister_alg(&aes_alg);
267} 370}
268 371
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index d1c259a7fe33..efd836c2e4a6 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -20,6 +20,9 @@
20#define CRYPT_S390_OP_MASK 0xFF00 20#define CRYPT_S390_OP_MASK 0xFF00
21#define CRYPT_S390_FUNC_MASK 0x00FF 21#define CRYPT_S390_FUNC_MASK 0x00FF
22 22
23#define CRYPT_S390_PRIORITY 300
24#define CRYPT_S390_COMPOSITE_PRIORITY 400
25
23/* s930 cryptographic operations */ 26/* s930 cryptographic operations */
24enum crypt_s390_operations { 27enum crypt_s390_operations {
25 CRYPT_S390_KM = 0x0100, 28 CRYPT_S390_KM = 0x0100,
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index b3f7496a79b4..2aba04852fe3 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -13,9 +13,10 @@
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 */ 15 */
16
17#include <crypto/algapi.h>
16#include <linux/init.h> 18#include <linux/init.h>
17#include <linux/module.h> 19#include <linux/module.h>
18#include <linux/crypto.h>
19 20
20#include "crypt_s390.h" 21#include "crypt_s390.h"
21#include "crypto_des.h" 22#include "crypto_des.h"
@@ -45,9 +46,10 @@ struct crypt_s390_des3_192_ctx {
45}; 46};
46 47
47static int des_setkey(struct crypto_tfm *tfm, const u8 *key, 48static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
48 unsigned int keylen, u32 *flags) 49 unsigned int keylen)
49{ 50{
50 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); 51 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
52 u32 *flags = &tfm->crt_flags;
51 int ret; 53 int ret;
52 54
53 /* test if key is valid (not a weak key) */ 55 /* test if key is valid (not a weak key) */
@@ -71,85 +73,159 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
71 crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE); 73 crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
72} 74}
73 75
74static unsigned int des_encrypt_ecb(const struct cipher_desc *desc, u8 *out, 76static struct crypto_alg des_alg = {
75 const u8 *in, unsigned int nbytes) 77 .cra_name = "des",
78 .cra_driver_name = "des-s390",
79 .cra_priority = CRYPT_S390_PRIORITY,
80 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
81 .cra_blocksize = DES_BLOCK_SIZE,
82 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
83 .cra_module = THIS_MODULE,
84 .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
85 .cra_u = {
86 .cipher = {
87 .cia_min_keysize = DES_KEY_SIZE,
88 .cia_max_keysize = DES_KEY_SIZE,
89 .cia_setkey = des_setkey,
90 .cia_encrypt = des_encrypt,
91 .cia_decrypt = des_decrypt,
92 }
93 }
94};
95
96static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
97 void *param, struct blkcipher_walk *walk)
76{ 98{
77 struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); 99 int ret = blkcipher_walk_virt(desc, walk);
78 int ret; 100 unsigned int nbytes;
101
102 while ((nbytes = walk->nbytes)) {
103 /* only use complete blocks */
104 unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
105 u8 *out = walk->dst.virt.addr;
106 u8 *in = walk->src.virt.addr;
79 107
80 /* only use complete blocks */ 108 ret = crypt_s390_km(func, param, out, in, n);
81 nbytes &= ~(DES_BLOCK_SIZE - 1); 109 BUG_ON((ret < 0) || (ret != n));
82 ret = crypt_s390_km(KM_DEA_ENCRYPT, sctx->key, out, in, nbytes);
83 BUG_ON((ret < 0) || (ret != nbytes));
84 110
85 return nbytes; 111 nbytes &= DES_BLOCK_SIZE - 1;
112 ret = blkcipher_walk_done(desc, walk, nbytes);
113 }
114
115 return ret;
86} 116}
87 117
88static unsigned int des_decrypt_ecb(const struct cipher_desc *desc, u8 *out, 118static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
89 const u8 *in, unsigned int nbytes) 119 void *param, struct blkcipher_walk *walk)
90{ 120{
91 struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); 121 int ret = blkcipher_walk_virt(desc, walk);
92 int ret; 122 unsigned int nbytes = walk->nbytes;
123
124 if (!nbytes)
125 goto out;
126
127 memcpy(param, walk->iv, DES_BLOCK_SIZE);
128 do {
129 /* only use complete blocks */
130 unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
131 u8 *out = walk->dst.virt.addr;
132 u8 *in = walk->src.virt.addr;
93 133
94 /* only use complete blocks */ 134 ret = crypt_s390_kmc(func, param, out, in, n);
95 nbytes &= ~(DES_BLOCK_SIZE - 1); 135 BUG_ON((ret < 0) || (ret != n));
96 ret = crypt_s390_km(KM_DEA_DECRYPT, sctx->key, out, in, nbytes);
97 BUG_ON((ret < 0) || (ret != nbytes));
98 136
99 return nbytes; 137 nbytes &= DES_BLOCK_SIZE - 1;
138 ret = blkcipher_walk_done(desc, walk, nbytes);
139 } while ((nbytes = walk->nbytes));
140 memcpy(walk->iv, param, DES_BLOCK_SIZE);
141
142out:
143 return ret;
100} 144}
101 145
102static unsigned int des_encrypt_cbc(const struct cipher_desc *desc, u8 *out, 146static int ecb_des_encrypt(struct blkcipher_desc *desc,
103 const u8 *in, unsigned int nbytes) 147 struct scatterlist *dst, struct scatterlist *src,
148 unsigned int nbytes)
104{ 149{
105 struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); 150 struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
106 int ret; 151 struct blkcipher_walk walk;
107 152
108 /* only use complete blocks */ 153 blkcipher_walk_init(&walk, dst, src, nbytes);
109 nbytes &= ~(DES_BLOCK_SIZE - 1); 154 return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, sctx->key, &walk);
155}
110 156
111 memcpy(sctx->iv, desc->info, DES_BLOCK_SIZE); 157static int ecb_des_decrypt(struct blkcipher_desc *desc,
112 ret = crypt_s390_kmc(KMC_DEA_ENCRYPT, &sctx->iv, out, in, nbytes); 158 struct scatterlist *dst, struct scatterlist *src,
113 BUG_ON((ret < 0) || (ret != nbytes)); 159 unsigned int nbytes)
160{
161 struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
162 struct blkcipher_walk walk;
114 163
115 memcpy(desc->info, sctx->iv, DES_BLOCK_SIZE); 164 blkcipher_walk_init(&walk, dst, src, nbytes);
116 return nbytes; 165 return ecb_desall_crypt(desc, KM_DEA_DECRYPT, sctx->key, &walk);
117} 166}
118 167
119static unsigned int des_decrypt_cbc(const struct cipher_desc *desc, u8 *out, 168static struct crypto_alg ecb_des_alg = {
120 const u8 *in, unsigned int nbytes) 169 .cra_name = "ecb(des)",
170 .cra_driver_name = "ecb-des-s390",
171 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
172 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
173 .cra_blocksize = DES_BLOCK_SIZE,
174 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
175 .cra_type = &crypto_blkcipher_type,
176 .cra_module = THIS_MODULE,
177 .cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list),
178 .cra_u = {
179 .blkcipher = {
180 .min_keysize = DES_KEY_SIZE,
181 .max_keysize = DES_KEY_SIZE,
182 .setkey = des_setkey,
183 .encrypt = ecb_des_encrypt,
184 .decrypt = ecb_des_decrypt,
185 }
186 }
187};
188
189static int cbc_des_encrypt(struct blkcipher_desc *desc,
190 struct scatterlist *dst, struct scatterlist *src,
191 unsigned int nbytes)
121{ 192{
122 struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); 193 struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
123 int ret; 194 struct blkcipher_walk walk;
124 195
125 /* only use complete blocks */ 196 blkcipher_walk_init(&walk, dst, src, nbytes);
126 nbytes &= ~(DES_BLOCK_SIZE - 1); 197 return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, sctx->iv, &walk);
198}
127 199
128 memcpy(&sctx->iv, desc->info, DES_BLOCK_SIZE); 200static int cbc_des_decrypt(struct blkcipher_desc *desc,
129 ret = crypt_s390_kmc(KMC_DEA_DECRYPT, &sctx->iv, out, in, nbytes); 201 struct scatterlist *dst, struct scatterlist *src,
130 BUG_ON((ret < 0) || (ret != nbytes)); 202 unsigned int nbytes)
203{
204 struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
205 struct blkcipher_walk walk;
131 206
132 return nbytes; 207 blkcipher_walk_init(&walk, dst, src, nbytes);
208 return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, sctx->iv, &walk);
133} 209}
134 210
135static struct crypto_alg des_alg = { 211static struct crypto_alg cbc_des_alg = {
136 .cra_name = "des", 212 .cra_name = "cbc(des)",
137 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 213 .cra_driver_name = "cbc-des-s390",
214 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
215 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
138 .cra_blocksize = DES_BLOCK_SIZE, 216 .cra_blocksize = DES_BLOCK_SIZE,
139 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx), 217 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
218 .cra_type = &crypto_blkcipher_type,
140 .cra_module = THIS_MODULE, 219 .cra_module = THIS_MODULE,
141 .cra_list = LIST_HEAD_INIT(des_alg.cra_list), 220 .cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list),
142 .cra_u = { 221 .cra_u = {
143 .cipher = { 222 .blkcipher = {
144 .cia_min_keysize = DES_KEY_SIZE, 223 .min_keysize = DES_KEY_SIZE,
145 .cia_max_keysize = DES_KEY_SIZE, 224 .max_keysize = DES_KEY_SIZE,
146 .cia_setkey = des_setkey, 225 .ivsize = DES_BLOCK_SIZE,
147 .cia_encrypt = des_encrypt, 226 .setkey = des_setkey,
148 .cia_decrypt = des_decrypt, 227 .encrypt = cbc_des_encrypt,
149 .cia_encrypt_ecb = des_encrypt_ecb, 228 .decrypt = cbc_des_decrypt,
150 .cia_decrypt_ecb = des_decrypt_ecb,
151 .cia_encrypt_cbc = des_encrypt_cbc,
152 .cia_decrypt_cbc = des_decrypt_cbc,
153 } 229 }
154 } 230 }
155}; 231};
@@ -167,11 +243,12 @@ static struct crypto_alg des_alg = {
167 * 243 *
168 */ 244 */
169static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key, 245static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key,
170 unsigned int keylen, u32 *flags) 246 unsigned int keylen)
171{ 247{
172 int i, ret; 248 int i, ret;
173 struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); 249 struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
174 const u8* temp_key = key; 250 const u8 *temp_key = key;
251 u32 *flags = &tfm->crt_flags;
175 252
176 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { 253 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) {
177 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; 254 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
@@ -202,89 +279,111 @@ static void des3_128_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
202 DES3_128_BLOCK_SIZE); 279 DES3_128_BLOCK_SIZE);
203} 280}
204 281
205static unsigned int des3_128_encrypt_ecb(const struct cipher_desc *desc, 282static struct crypto_alg des3_128_alg = {
206 u8 *out, const u8 *in, 283 .cra_name = "des3_ede128",
207 unsigned int nbytes) 284 .cra_driver_name = "des3_ede128-s390",
208{ 285 .cra_priority = CRYPT_S390_PRIORITY,
209 struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); 286 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
210 int ret; 287 .cra_blocksize = DES3_128_BLOCK_SIZE,
288 .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
289 .cra_module = THIS_MODULE,
290 .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list),
291 .cra_u = {
292 .cipher = {
293 .cia_min_keysize = DES3_128_KEY_SIZE,
294 .cia_max_keysize = DES3_128_KEY_SIZE,
295 .cia_setkey = des3_128_setkey,
296 .cia_encrypt = des3_128_encrypt,
297 .cia_decrypt = des3_128_decrypt,
298 }
299 }
300};
211 301
212 /* only use complete blocks */ 302static int ecb_des3_128_encrypt(struct blkcipher_desc *desc,
213 nbytes &= ~(DES3_128_BLOCK_SIZE - 1); 303 struct scatterlist *dst,
214 ret = crypt_s390_km(KM_TDEA_128_ENCRYPT, sctx->key, out, in, nbytes); 304 struct scatterlist *src, unsigned int nbytes)
215 BUG_ON((ret < 0) || (ret != nbytes)); 305{
306 struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
307 struct blkcipher_walk walk;
216 308
217 return nbytes; 309 blkcipher_walk_init(&walk, dst, src, nbytes);
310 return ecb_desall_crypt(desc, KM_TDEA_128_ENCRYPT, sctx->key, &walk);
218} 311}
219 312
220static unsigned int des3_128_decrypt_ecb(const struct cipher_desc *desc, 313static int ecb_des3_128_decrypt(struct blkcipher_desc *desc,
221 u8 *out, const u8 *in, 314 struct scatterlist *dst,
222 unsigned int nbytes) 315 struct scatterlist *src, unsigned int nbytes)
223{ 316{
224 struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); 317 struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
225 int ret; 318 struct blkcipher_walk walk;
226 319
227 /* only use complete blocks */ 320 blkcipher_walk_init(&walk, dst, src, nbytes);
228 nbytes &= ~(DES3_128_BLOCK_SIZE - 1); 321 return ecb_desall_crypt(desc, KM_TDEA_128_DECRYPT, sctx->key, &walk);
229 ret = crypt_s390_km(KM_TDEA_128_DECRYPT, sctx->key, out, in, nbytes);
230 BUG_ON((ret < 0) || (ret != nbytes));
231
232 return nbytes;
233} 322}
234 323
235static unsigned int des3_128_encrypt_cbc(const struct cipher_desc *desc, 324static struct crypto_alg ecb_des3_128_alg = {
236 u8 *out, const u8 *in, 325 .cra_name = "ecb(des3_ede128)",
237 unsigned int nbytes) 326 .cra_driver_name = "ecb-des3_ede128-s390",
238{ 327 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
239 struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); 328 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
240 int ret; 329 .cra_blocksize = DES3_128_BLOCK_SIZE,
241 330 .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
242 /* only use complete blocks */ 331 .cra_type = &crypto_blkcipher_type,
243 nbytes &= ~(DES3_128_BLOCK_SIZE - 1); 332 .cra_module = THIS_MODULE,
333 .cra_list = LIST_HEAD_INIT(
334 ecb_des3_128_alg.cra_list),
335 .cra_u = {
336 .blkcipher = {
337 .min_keysize = DES3_128_KEY_SIZE,
338 .max_keysize = DES3_128_KEY_SIZE,
339 .setkey = des3_128_setkey,
340 .encrypt = ecb_des3_128_encrypt,
341 .decrypt = ecb_des3_128_decrypt,
342 }
343 }
344};
244 345
245 memcpy(sctx->iv, desc->info, DES3_128_BLOCK_SIZE); 346static int cbc_des3_128_encrypt(struct blkcipher_desc *desc,
246 ret = crypt_s390_kmc(KMC_TDEA_128_ENCRYPT, &sctx->iv, out, in, nbytes); 347 struct scatterlist *dst,
247 BUG_ON((ret < 0) || (ret != nbytes)); 348 struct scatterlist *src, unsigned int nbytes)
349{
350 struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
351 struct blkcipher_walk walk;
248 352
249 memcpy(desc->info, sctx->iv, DES3_128_BLOCK_SIZE); 353 blkcipher_walk_init(&walk, dst, src, nbytes);
250 return nbytes; 354 return cbc_desall_crypt(desc, KMC_TDEA_128_ENCRYPT, sctx->iv, &walk);
251} 355}
252 356
253static unsigned int des3_128_decrypt_cbc(const struct cipher_desc *desc, 357static int cbc_des3_128_decrypt(struct blkcipher_desc *desc,
254 u8 *out, const u8 *in, 358 struct scatterlist *dst,
255 unsigned int nbytes) 359 struct scatterlist *src, unsigned int nbytes)
256{ 360{
257 struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); 361 struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
258 int ret; 362 struct blkcipher_walk walk;
259
260 /* only use complete blocks */
261 nbytes &= ~(DES3_128_BLOCK_SIZE - 1);
262
263 memcpy(&sctx->iv, desc->info, DES3_128_BLOCK_SIZE);
264 ret = crypt_s390_kmc(KMC_TDEA_128_DECRYPT, &sctx->iv, out, in, nbytes);
265 BUG_ON((ret < 0) || (ret != nbytes));
266 363
267 return nbytes; 364 blkcipher_walk_init(&walk, dst, src, nbytes);
365 return cbc_desall_crypt(desc, KMC_TDEA_128_DECRYPT, sctx->iv, &walk);
268} 366}
269 367
270static struct crypto_alg des3_128_alg = { 368static struct crypto_alg cbc_des3_128_alg = {
271 .cra_name = "des3_ede128", 369 .cra_name = "cbc(des3_ede128)",
272 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 370 .cra_driver_name = "cbc-des3_ede128-s390",
371 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
372 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
273 .cra_blocksize = DES3_128_BLOCK_SIZE, 373 .cra_blocksize = DES3_128_BLOCK_SIZE,
274 .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx), 374 .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
375 .cra_type = &crypto_blkcipher_type,
275 .cra_module = THIS_MODULE, 376 .cra_module = THIS_MODULE,
276 .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list), 377 .cra_list = LIST_HEAD_INIT(
378 cbc_des3_128_alg.cra_list),
277 .cra_u = { 379 .cra_u = {
278 .cipher = { 380 .blkcipher = {
279 .cia_min_keysize = DES3_128_KEY_SIZE, 381 .min_keysize = DES3_128_KEY_SIZE,
280 .cia_max_keysize = DES3_128_KEY_SIZE, 382 .max_keysize = DES3_128_KEY_SIZE,
281 .cia_setkey = des3_128_setkey, 383 .ivsize = DES3_128_BLOCK_SIZE,
282 .cia_encrypt = des3_128_encrypt, 384 .setkey = des3_128_setkey,
283 .cia_decrypt = des3_128_decrypt, 385 .encrypt = cbc_des3_128_encrypt,
284 .cia_encrypt_ecb = des3_128_encrypt_ecb, 386 .decrypt = cbc_des3_128_decrypt,
285 .cia_decrypt_ecb = des3_128_decrypt_ecb,
286 .cia_encrypt_cbc = des3_128_encrypt_cbc,
287 .cia_decrypt_cbc = des3_128_decrypt_cbc,
288 } 387 }
289 } 388 }
290}; 389};
@@ -303,11 +402,12 @@ static struct crypto_alg des3_128_alg = {
303 * 402 *
304 */ 403 */
305static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, 404static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
306 unsigned int keylen, u32 *flags) 405 unsigned int keylen)
307{ 406{
308 int i, ret; 407 int i, ret;
309 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); 408 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
310 const u8* temp_key = key; 409 const u8 *temp_key = key;
410 u32 *flags = &tfm->crt_flags;
311 411
312 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && 412 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
313 memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], 413 memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
@@ -341,89 +441,111 @@ static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
341 DES3_192_BLOCK_SIZE); 441 DES3_192_BLOCK_SIZE);
342} 442}
343 443
344static unsigned int des3_192_encrypt_ecb(const struct cipher_desc *desc, 444static struct crypto_alg des3_192_alg = {
345 u8 *out, const u8 *in, 445 .cra_name = "des3_ede",
346 unsigned int nbytes) 446 .cra_driver_name = "des3_ede-s390",
347{ 447 .cra_priority = CRYPT_S390_PRIORITY,
348 struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); 448 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
349 int ret; 449 .cra_blocksize = DES3_192_BLOCK_SIZE,
450 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
451 .cra_module = THIS_MODULE,
452 .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list),
453 .cra_u = {
454 .cipher = {
455 .cia_min_keysize = DES3_192_KEY_SIZE,
456 .cia_max_keysize = DES3_192_KEY_SIZE,
457 .cia_setkey = des3_192_setkey,
458 .cia_encrypt = des3_192_encrypt,
459 .cia_decrypt = des3_192_decrypt,
460 }
461 }
462};
350 463
351 /* only use complete blocks */ 464static int ecb_des3_192_encrypt(struct blkcipher_desc *desc,
352 nbytes &= ~(DES3_192_BLOCK_SIZE - 1); 465 struct scatterlist *dst,
353 ret = crypt_s390_km(KM_TDEA_192_ENCRYPT, sctx->key, out, in, nbytes); 466 struct scatterlist *src, unsigned int nbytes)
354 BUG_ON((ret < 0) || (ret != nbytes)); 467{
468 struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
469 struct blkcipher_walk walk;
355 470
356 return nbytes; 471 blkcipher_walk_init(&walk, dst, src, nbytes);
472 return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, sctx->key, &walk);
357} 473}
358 474
359static unsigned int des3_192_decrypt_ecb(const struct cipher_desc *desc, 475static int ecb_des3_192_decrypt(struct blkcipher_desc *desc,
360 u8 *out, const u8 *in, 476 struct scatterlist *dst,
361 unsigned int nbytes) 477 struct scatterlist *src, unsigned int nbytes)
362{ 478{
363 struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); 479 struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
364 int ret; 480 struct blkcipher_walk walk;
365
366 /* only use complete blocks */
367 nbytes &= ~(DES3_192_BLOCK_SIZE - 1);
368 ret = crypt_s390_km(KM_TDEA_192_DECRYPT, sctx->key, out, in, nbytes);
369 BUG_ON((ret < 0) || (ret != nbytes));
370 481
371 return nbytes; 482 blkcipher_walk_init(&walk, dst, src, nbytes);
483 return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, sctx->key, &walk);
372} 484}
373 485
374static unsigned int des3_192_encrypt_cbc(const struct cipher_desc *desc, 486static struct crypto_alg ecb_des3_192_alg = {
375 u8 *out, const u8 *in, 487 .cra_name = "ecb(des3_ede)",
376 unsigned int nbytes) 488 .cra_driver_name = "ecb-des3_ede-s390",
377{ 489 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
378 struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); 490 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
379 int ret; 491 .cra_blocksize = DES3_192_BLOCK_SIZE,
380 492 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
381 /* only use complete blocks */ 493 .cra_type = &crypto_blkcipher_type,
382 nbytes &= ~(DES3_192_BLOCK_SIZE - 1); 494 .cra_module = THIS_MODULE,
495 .cra_list = LIST_HEAD_INIT(
496 ecb_des3_192_alg.cra_list),
497 .cra_u = {
498 .blkcipher = {
499 .min_keysize = DES3_192_KEY_SIZE,
500 .max_keysize = DES3_192_KEY_SIZE,
501 .setkey = des3_192_setkey,
502 .encrypt = ecb_des3_192_encrypt,
503 .decrypt = ecb_des3_192_decrypt,
504 }
505 }
506};
383 507
384 memcpy(sctx->iv, desc->info, DES3_192_BLOCK_SIZE); 508static int cbc_des3_192_encrypt(struct blkcipher_desc *desc,
385 ret = crypt_s390_kmc(KMC_TDEA_192_ENCRYPT, &sctx->iv, out, in, nbytes); 509 struct scatterlist *dst,
386 BUG_ON((ret < 0) || (ret != nbytes)); 510 struct scatterlist *src, unsigned int nbytes)
511{
512 struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
513 struct blkcipher_walk walk;
387 514
388 memcpy(desc->info, sctx->iv, DES3_192_BLOCK_SIZE); 515 blkcipher_walk_init(&walk, dst, src, nbytes);
389 return nbytes; 516 return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, sctx->iv, &walk);
390} 517}
391 518
392static unsigned int des3_192_decrypt_cbc(const struct cipher_desc *desc, 519static int cbc_des3_192_decrypt(struct blkcipher_desc *desc,
393 u8 *out, const u8 *in, 520 struct scatterlist *dst,
394 unsigned int nbytes) 521 struct scatterlist *src, unsigned int nbytes)
395{ 522{
396 struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); 523 struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
397 int ret; 524 struct blkcipher_walk walk;
398 525
399 /* only use complete blocks */ 526 blkcipher_walk_init(&walk, dst, src, nbytes);
400 nbytes &= ~(DES3_192_BLOCK_SIZE - 1); 527 return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, sctx->iv, &walk);
401
402 memcpy(&sctx->iv, desc->info, DES3_192_BLOCK_SIZE);
403 ret = crypt_s390_kmc(KMC_TDEA_192_DECRYPT, &sctx->iv, out, in, nbytes);
404 BUG_ON((ret < 0) || (ret != nbytes));
405
406 return nbytes;
407} 528}
408 529
409static struct crypto_alg des3_192_alg = { 530static struct crypto_alg cbc_des3_192_alg = {
410 .cra_name = "des3_ede", 531 .cra_name = "cbc(des3_ede)",
411 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 532 .cra_driver_name = "cbc-des3_ede-s390",
533 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
534 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
412 .cra_blocksize = DES3_192_BLOCK_SIZE, 535 .cra_blocksize = DES3_192_BLOCK_SIZE,
413 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), 536 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
537 .cra_type = &crypto_blkcipher_type,
414 .cra_module = THIS_MODULE, 538 .cra_module = THIS_MODULE,
415 .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list), 539 .cra_list = LIST_HEAD_INIT(
540 cbc_des3_192_alg.cra_list),
416 .cra_u = { 541 .cra_u = {
417 .cipher = { 542 .blkcipher = {
418 .cia_min_keysize = DES3_192_KEY_SIZE, 543 .min_keysize = DES3_192_KEY_SIZE,
419 .cia_max_keysize = DES3_192_KEY_SIZE, 544 .max_keysize = DES3_192_KEY_SIZE,
420 .cia_setkey = des3_192_setkey, 545 .ivsize = DES3_192_BLOCK_SIZE,
421 .cia_encrypt = des3_192_encrypt, 546 .setkey = des3_192_setkey,
422 .cia_decrypt = des3_192_decrypt, 547 .encrypt = cbc_des3_192_encrypt,
423 .cia_encrypt_ecb = des3_192_encrypt_ecb, 548 .decrypt = cbc_des3_192_decrypt,
424 .cia_decrypt_ecb = des3_192_decrypt_ecb,
425 .cia_encrypt_cbc = des3_192_encrypt_cbc,
426 .cia_decrypt_cbc = des3_192_decrypt_cbc,
427 } 549 }
428 } 550 }
429}; 551};
@@ -437,22 +559,69 @@ static int init(void)
437 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) 559 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT))
438 return -ENOSYS; 560 return -ENOSYS;
439 561
440 ret |= (crypto_register_alg(&des_alg) == 0) ? 0:1; 562 ret = crypto_register_alg(&des_alg);
441 ret |= (crypto_register_alg(&des3_128_alg) == 0) ? 0:2; 563 if (ret)
442 ret |= (crypto_register_alg(&des3_192_alg) == 0) ? 0:4; 564 goto des_err;
443 if (ret) { 565 ret = crypto_register_alg(&ecb_des_alg);
444 crypto_unregister_alg(&des3_192_alg); 566 if (ret)
445 crypto_unregister_alg(&des3_128_alg); 567 goto ecb_des_err;
446 crypto_unregister_alg(&des_alg); 568 ret = crypto_register_alg(&cbc_des_alg);
447 return -EEXIST; 569 if (ret)
448 } 570 goto cbc_des_err;
449 return 0; 571
572 ret = crypto_register_alg(&des3_128_alg);
573 if (ret)
574 goto des3_128_err;
575 ret = crypto_register_alg(&ecb_des3_128_alg);
576 if (ret)
577 goto ecb_des3_128_err;
578 ret = crypto_register_alg(&cbc_des3_128_alg);
579 if (ret)
580 goto cbc_des3_128_err;
581
582 ret = crypto_register_alg(&des3_192_alg);
583 if (ret)
584 goto des3_192_err;
585 ret = crypto_register_alg(&ecb_des3_192_alg);
586 if (ret)
587 goto ecb_des3_192_err;
588 ret = crypto_register_alg(&cbc_des3_192_alg);
589 if (ret)
590 goto cbc_des3_192_err;
591
592out:
593 return ret;
594
595cbc_des3_192_err:
596 crypto_unregister_alg(&ecb_des3_192_alg);
597ecb_des3_192_err:
598 crypto_unregister_alg(&des3_192_alg);
599des3_192_err:
600 crypto_unregister_alg(&cbc_des3_128_alg);
601cbc_des3_128_err:
602 crypto_unregister_alg(&ecb_des3_128_alg);
603ecb_des3_128_err:
604 crypto_unregister_alg(&des3_128_alg);
605des3_128_err:
606 crypto_unregister_alg(&cbc_des_alg);
607cbc_des_err:
608 crypto_unregister_alg(&ecb_des_alg);
609ecb_des_err:
610 crypto_unregister_alg(&des_alg);
611des_err:
612 goto out;
450} 613}
451 614
452static void __exit fini(void) 615static void __exit fini(void)
453{ 616{
617 crypto_unregister_alg(&cbc_des3_192_alg);
618 crypto_unregister_alg(&ecb_des3_192_alg);
454 crypto_unregister_alg(&des3_192_alg); 619 crypto_unregister_alg(&des3_192_alg);
620 crypto_unregister_alg(&cbc_des3_128_alg);
621 crypto_unregister_alg(&ecb_des3_128_alg);
455 crypto_unregister_alg(&des3_128_alg); 622 crypto_unregister_alg(&des3_128_alg);
623 crypto_unregister_alg(&cbc_des_alg);
624 crypto_unregister_alg(&ecb_des_alg);
456 crypto_unregister_alg(&des_alg); 625 crypto_unregister_alg(&des_alg);
457} 626}
458 627
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 9d34a35b1aa5..49ca8690ee39 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -126,6 +126,8 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out)
126 126
127static struct crypto_alg alg = { 127static struct crypto_alg alg = {
128 .cra_name = "sha1", 128 .cra_name = "sha1",
129 .cra_driver_name = "sha1-s390",
130 .cra_priority = CRYPT_S390_PRIORITY,
129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 131 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
130 .cra_blocksize = SHA1_BLOCK_SIZE, 132 .cra_blocksize = SHA1_BLOCK_SIZE,
131 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), 133 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx),
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index f573df30f31d..8e4e67503fe7 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -127,6 +127,8 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out)
127 127
128static struct crypto_alg alg = { 128static struct crypto_alg alg = {
129 .cra_name = "sha256", 129 .cra_name = "sha256",
130 .cra_driver_name = "sha256-s390",
131 .cra_priority = CRYPT_S390_PRIORITY,
130 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 132 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
131 .cra_blocksize = SHA256_BLOCK_SIZE, 133 .cra_blocksize = SHA256_BLOCK_SIZE,
132 .cra_ctxsize = sizeof(struct s390_sha256_ctx), 134 .cra_ctxsize = sizeof(struct s390_sha256_ctx),
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index f1d4591eddbb..35da53986b1b 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -428,6 +428,7 @@ CONFIG_S390_TAPE_34XX=m
428# CONFIG_VMLOGRDR is not set 428# CONFIG_VMLOGRDR is not set
429# CONFIG_VMCP is not set 429# CONFIG_VMCP is not set
430# CONFIG_MONREADER is not set 430# CONFIG_MONREADER is not set
431CONFIG_MONWRITER=m
431 432
432# 433#
433# Cryptographic devices 434# Cryptographic devices
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index ea5567be00fc..f3dbd91965c6 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * fs/hypfs/hypfs.h 2 * arch/s390/hypfs/hypfs.h
3 * Hypervisor filesystem for Linux on s390. 3 * Hypervisor filesystem for Linux on s390.
4 * 4 *
5 * Copyright (C) IBM Corp. 2006 5 * Copyright (C) IBM Corp. 2006
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 1785bce2b919..75144efbb92b 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * fs/hypfs/hypfs_diag.c 2 * arch/s390/hypfs/hypfs_diag.c
3 * Hypervisor filesystem for Linux on s390. Diag 204 and 224 3 * Hypervisor filesystem for Linux on s390. Diag 204 and 224
4 * implementation. 4 * implementation.
5 * 5 *
@@ -432,12 +432,14 @@ static int diag204_probe(void)
432 432
433 buf = diag204_get_buffer(INFO_EXT, &pages); 433 buf = diag204_get_buffer(INFO_EXT, &pages);
434 if (!IS_ERR(buf)) { 434 if (!IS_ERR(buf)) {
435 if (diag204(SUBC_STIB7 | INFO_EXT, pages, buf) >= 0) { 435 if (diag204((unsigned long)SUBC_STIB7 |
436 (unsigned long)INFO_EXT, pages, buf) >= 0) {
436 diag204_store_sc = SUBC_STIB7; 437 diag204_store_sc = SUBC_STIB7;
437 diag204_info_type = INFO_EXT; 438 diag204_info_type = INFO_EXT;
438 goto out; 439 goto out;
439 } 440 }
440 if (diag204(SUBC_STIB6 | INFO_EXT, pages, buf) >= 0) { 441 if (diag204((unsigned long)SUBC_STIB6 |
442 (unsigned long)INFO_EXT, pages, buf) >= 0) {
441 diag204_store_sc = SUBC_STIB7; 443 diag204_store_sc = SUBC_STIB7;
442 diag204_info_type = INFO_EXT; 444 diag204_info_type = INFO_EXT;
443 goto out; 445 goto out;
@@ -452,7 +454,8 @@ static int diag204_probe(void)
452 rc = PTR_ERR(buf); 454 rc = PTR_ERR(buf);
453 goto fail_alloc; 455 goto fail_alloc;
454 } 456 }
455 if (diag204(SUBC_STIB4 | INFO_SIMPLE, pages, buf) >= 0) { 457 if (diag204((unsigned long)SUBC_STIB4 |
458 (unsigned long)INFO_SIMPLE, pages, buf) >= 0) {
456 diag204_store_sc = SUBC_STIB4; 459 diag204_store_sc = SUBC_STIB4;
457 diag204_info_type = INFO_SIMPLE; 460 diag204_info_type = INFO_SIMPLE;
458 goto out; 461 goto out;
@@ -476,7 +479,8 @@ static void *diag204_store(void)
476 buf = diag204_get_buffer(diag204_info_type, &pages); 479 buf = diag204_get_buffer(diag204_info_type, &pages);
477 if (IS_ERR(buf)) 480 if (IS_ERR(buf))
478 goto out; 481 goto out;
479 if (diag204(diag204_store_sc | diag204_info_type, pages, buf) < 0) 482 if (diag204((unsigned long)diag204_store_sc |
483 (unsigned long)diag204_info_type, pages, buf) < 0)
480 return ERR_PTR(-ENOSYS); 484 return ERR_PTR(-ENOSYS);
481out: 485out:
482 return buf; 486 return buf;
@@ -531,7 +535,7 @@ __init int hypfs_diag_init(void)
531 return rc; 535 return rc;
532} 536}
533 537
534__exit void hypfs_diag_exit(void) 538void hypfs_diag_exit(void)
535{ 539{
536 diag224_delete_name_table(); 540 diag224_delete_name_table();
537 diag204_free_buffer(); 541 diag204_free_buffer();
diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h
index 793dea6b9bb6..256b384aebe1 100644
--- a/arch/s390/hypfs/hypfs_diag.h
+++ b/arch/s390/hypfs/hypfs_diag.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * fs/hypfs/hypfs_diag.h 2 * arch/s390/hypfs_diag.h
3 * Hypervisor filesystem for Linux on s390. 3 * Hypervisor filesystem for Linux on s390.
4 * 4 *
5 * Copyright (C) IBM Corp. 2006 5 * Copyright (C) IBM Corp. 2006
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 18c091925ea5..bdade5f2e325 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * fs/hypfs/inode.c 2 * arch/s390/hypfs/inode.c
3 * Hypervisor filesystem for Linux on s390. 3 * Hypervisor filesystem for Linux on s390.
4 * 4 *
5 * Copyright (C) IBM Corp. 2006 5 * Copyright (C) IBM Corp. 2006
@@ -312,10 +312,12 @@ static void hypfs_kill_super(struct super_block *sb)
312{ 312{
313 struct hypfs_sb_info *sb_info = sb->s_fs_info; 313 struct hypfs_sb_info *sb_info = sb->s_fs_info;
314 314
315 hypfs_delete_tree(sb->s_root); 315 if (sb->s_root) {
316 hypfs_remove(sb_info->update_file); 316 hypfs_delete_tree(sb->s_root);
317 kfree(sb->s_fs_info); 317 hypfs_remove(sb_info->update_file);
318 sb->s_fs_info = NULL; 318 kfree(sb->s_fs_info);
319 sb->s_fs_info = NULL;
320 }
319 kill_litter_super(sb); 321 kill_litter_super(sb);
320} 322}
321 323
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 9a33ed6ca696..aa978978d3d1 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -6,7 +6,7 @@ EXTRA_AFLAGS := -traditional
6 6
7obj-y := bitmap.o traps.o time.o process.o \ 7obj-y := bitmap.o traps.o time.o process.o \
8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
9 semaphore.o s390_ext.o debug.o profile.o irq.o reipl_diag.o 9 semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o
10 10
11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -24,6 +24,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
24 24
25obj-$(CONFIG_VIRT_TIMER) += vtime.o 25obj-$(CONFIG_VIRT_TIMER) += vtime.o
26obj-$(CONFIG_STACKTRACE) += stacktrace.o 26obj-$(CONFIG_STACKTRACE) += stacktrace.o
27obj-$(CONFIG_KPROBES) += kprobes.o
27 28
28# Kexec part 29# Kexec part
29S390_KEXEC_OBJS := machine_kexec.o crash.o 30S390_KEXEC_OBJS := machine_kexec.o crash.o
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 5b5799ac8f83..0c712b78a7e8 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -505,6 +505,8 @@ pgm_no_vtime2:
505 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 505 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
506 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 506 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
507 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 507 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
508 tm SP_PSW+1(%r15),0x01 # kernel per event ?
509 bz BASED(kernel_per)
508 l %r3,__LC_PGM_ILC # load program interruption code 510 l %r3,__LC_PGM_ILC # load program interruption code
509 la %r8,0x7f 511 la %r8,0x7f
510 nr %r8,%r3 # clear per-event-bit and ilc 512 nr %r8,%r3 # clear per-event-bit and ilc
@@ -536,6 +538,16 @@ pgm_no_vtime3:
536 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 538 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
537 b BASED(sysc_do_svc) 539 b BASED(sysc_do_svc)
538 540
541#
542# per was called from kernel, must be kprobes
543#
544kernel_per:
545 mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check
546 la %r2,SP_PTREGS(%r15) # address of register-save area
547 l %r1,BASED(.Lhandle_per) # load adr. of per handler
548 la %r14,BASED(sysc_leave) # load adr. of system return
549 br %r1 # branch to do_single_step
550
539/* 551/*
540 * IO interrupt handler routine 552 * IO interrupt handler routine
541 */ 553 */
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 56f5f613b868..29bbfbab7332 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -518,6 +518,8 @@ pgm_no_vtime2:
518#endif 518#endif
519 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 519 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
520 lg %r1,__TI_task(%r9) 520 lg %r1,__TI_task(%r9)
521 tm SP_PSW+1(%r15),0x01 # kernel per event ?
522 jz kernel_per
521 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 523 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
522 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 524 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
523 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 525 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
@@ -553,6 +555,16 @@ pgm_no_vtime3:
553 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 555 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
554 j sysc_do_svc 556 j sysc_do_svc
555 557
558#
559# per was called from kernel, must be kprobes
560#
561kernel_per:
562 lhi %r0,__LC_PGM_OLD_PSW
563 sth %r0,SP_TRAP(%r15) # set trap indication to pgm check
564 la %r2,SP_PTREGS(%r15) # address of register-save area
565 larl %r14,sysc_leave # load adr. of system ret, no work
566 jg do_single_step # branch to do_single_step
567
556/* 568/*
557 * IO interrupt handler routine 569 * IO interrupt handler routine
558 */ 570 */
@@ -815,7 +827,7 @@ restart_go:
815 */ 827 */
816stack_overflow: 828stack_overflow:
817 lg %r15,__LC_PANIC_STACK # change to panic stack 829 lg %r15,__LC_PANIC_STACK # change to panic stack
818 aghi %r1,-SP_SIZE 830 aghi %r15,-SP_SIZE
819 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack 831 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
820 stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 832 stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
821 la %r1,__LC_SAVE_AREA 833 la %r1,__LC_SAVE_AREA
@@ -823,7 +835,7 @@ stack_overflow:
823 je 0f 835 je 0f
824 chi %r12,__LC_PGM_OLD_PSW 836 chi %r12,__LC_PGM_OLD_PSW
825 je 0f 837 je 0f
826 la %r1,__LC_SAVE_AREA+16 838 la %r1,__LC_SAVE_AREA+32
8270: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack 8390: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack
828 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain 840 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
829 la %r2,SP_PTREGS(%r15) # load pt_regs 841 la %r2,SP_PTREGS(%r15) # load pt_regs
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index adad8863ee2f..0f1db268a8a9 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -272,7 +272,7 @@ iplstart:
272# load parameter file from ipl device 272# load parameter file from ipl device
273# 273#
274.Lagain1: 274.Lagain1:
275 l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # ramdisk loc. is temp 275 l %r2,.Linitrd # ramdisk loc. is temp
276 bas %r14,.Lloader # load parameter file 276 bas %r14,.Lloader # load parameter file
277 ltr %r2,%r2 # got anything ? 277 ltr %r2,%r2 # got anything ?
278 bz .Lnopf 278 bz .Lnopf
@@ -280,7 +280,7 @@ iplstart:
280 bnh .Lnotrunc 280 bnh .Lnotrunc
281 la %r2,895 281 la %r2,895
282.Lnotrunc: 282.Lnotrunc:
283 l %r4,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) 283 l %r4,.Linitrd
284 clc 0(3,%r4),.L_hdr # if it is HDRx 284 clc 0(3,%r4),.L_hdr # if it is HDRx
285 bz .Lagain1 # skip dataset header 285 bz .Lagain1 # skip dataset header
286 clc 0(3,%r4),.L_eof # if it is EOFx 286 clc 0(3,%r4),.L_eof # if it is EOFx
@@ -323,14 +323,15 @@ iplstart:
323# load ramdisk from ipl device 323# load ramdisk from ipl device
324# 324#
325.Lagain2: 325.Lagain2:
326 l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # addr of ramdisk 326 l %r2,.Linitrd # addr of ramdisk
327 st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
327 bas %r14,.Lloader # load ramdisk 328 bas %r14,.Lloader # load ramdisk
328 st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of ramdisk 329 st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of ramdisk
329 ltr %r2,%r2 330 ltr %r2,%r2
330 bnz .Lrdcont 331 bnz .Lrdcont
331 st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found 332 st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found
332.Lrdcont: 333.Lrdcont:
333 l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) 334 l %r2,.Linitrd
334 335
335 clc 0(3,%r2),.L_hdr # skip HDRx and EOFx 336 clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
336 bz .Lagain2 337 bz .Lagain2
@@ -379,6 +380,7 @@ iplstart:
379 l %r1,.Lstartup 380 l %r1,.Lstartup
380 br %r1 381 br %r1
381 382
383.Linitrd:.long _end + 0x400000 # default address of initrd
382.Lparm: .long PARMAREA 384.Lparm: .long PARMAREA
383.Lstartup: .long startup 385.Lstartup: .long startup
384.Lcvtab:.long _ebcasc # ebcdic to ascii table 386.Lcvtab:.long _ebcasc # ebcdic to ascii table
@@ -479,65 +481,6 @@ start:
479 .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 481 .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
480 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff 482 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
481 483
482.macro GET_IPL_DEVICE
483.Lget_ipl_device:
484 l %r1,0xb8 # get sid
485 sll %r1,15 # test if subchannel is enabled
486 srl %r1,31
487 ltr %r1,%r1
488 bz 2f-.LPG1(%r13) # subchannel disabled
489 l %r1,0xb8
490 la %r5,.Lipl_schib-.LPG1(%r13)
491 stsch 0(%r5) # get schib of subchannel
492 bnz 2f-.LPG1(%r13) # schib not available
493 tm 5(%r5),0x01 # devno valid?
494 bno 2f-.LPG1(%r13)
495 la %r6,ipl_parameter_flags-.LPG1(%r13)
496 oi 3(%r6),0x01 # set flag
497 la %r2,ipl_devno-.LPG1(%r13)
498 mvc 0(2,%r2),6(%r5) # store devno
499 tm 4(%r5),0x80 # qdio capable device?
500 bno 2f-.LPG1(%r13)
501 oi 3(%r6),0x02 # set flag
502
503 # copy ipl parameters
504
505 lhi %r0,4096
506 l %r2,20(%r0) # get address of parameter list
507 lhi %r3,IPL_PARMBLOCK_ORIGIN
508 st %r3,20(%r0)
509 lhi %r4,1
510 cr %r2,%r3 # start parameters < destination ?
511 jl 0f
512 lhi %r1,1 # copy direction is upwards
513 j 1f
5140: lhi %r1,-1 # copy direction is downwards
515 ar %r2,%r0
516 ar %r3,%r0
517 ar %r2,%r1
518 ar %r3,%r1
5191: mvc 0(1,%r3),0(%r2) # finally copy ipl parameters
520 ar %r3,%r1
521 ar %r2,%r1
522 sr %r0,%r4
523 jne 1b
524 b 2f-.LPG1(%r13)
525
526 .align 4
527.Lipl_schib:
528 .rept 13
529 .long 0
530 .endr
531
532 .globl ipl_parameter_flags
533ipl_parameter_flags:
534 .long 0
535 .globl ipl_devno
536ipl_devno:
537 .word 0
5382:
539.endm
540
541#ifdef CONFIG_64BIT 484#ifdef CONFIG_64BIT
542#include "head64.S" 485#include "head64.S"
543#else 486#else
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index a4dc61f3285e..1fa9fa1ca740 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -26,8 +26,8 @@ startup:basr %r13,0 # get base
26# 26#
27 .org PARMAREA 27 .org PARMAREA
28 .long 0,0 # IPL_DEVICE 28 .long 0,0 # IPL_DEVICE
29 .long 0,RAMDISK_ORIGIN # INITRD_START 29 .long 0,0 # INITRD_START
30 .long 0,RAMDISK_SIZE # INITRD_SIZE 30 .long 0,0 # INITRD_SIZE
31 31
32 .org COMMAND_LINE 32 .org COMMAND_LINE
33 .byte "root=/dev/ram0 ro" 33 .byte "root=/dev/ram0 ro"
@@ -37,12 +37,23 @@ startup:basr %r13,0 # get base
37 37
38startup_continue: 38startup_continue:
39 basr %r13,0 # get base 39 basr %r13,0 # get base
40.LPG1: GET_IPL_DEVICE 40.LPG1: mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
41 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 41 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
42 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 42 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
43 # move IPL device to lowcore 43 # move IPL device to lowcore
44 mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12) 44 mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
45#
46# Setup stack
47#
48 l %r15,.Linittu-.LPG1(%r13)
49 mvc __LC_CURRENT(4),__TI_task(%r15)
50 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
51 st %r15,__LC_KERNEL_STACK # set end of kernel stack
52 ahi %r15,-96
53 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
45 54
55 l %r14,.Lipl_save_parameters-.LPG1(%r13)
56 basr %r14,%r14
46# 57#
47# clear bss memory 58# clear bss memory
48# 59#
@@ -114,6 +125,10 @@ startup_continue:
114 b .Lfchunk-.LPG1(%r13) 125 b .Lfchunk-.LPG1(%r13)
115 126
116 .align 4 127 .align 4
128.Lipl_save_parameters:
129 .long ipl_save_parameters
130.Linittu:
131 .long init_thread_union
117.Lpmask: 132.Lpmask:
118 .byte 0 133 .byte 0
119.align 8 134.align 8
@@ -273,7 +288,23 @@ startup_continue:
273.Lbss_end: .long _end 288.Lbss_end: .long _end
274.Lparmaddr: .long PARMAREA 289.Lparmaddr: .long PARMAREA
275.Lsccbaddr: .long .Lsccb 290.Lsccbaddr: .long .Lsccb
291
292 .globl ipl_schib
293ipl_schib:
294 .rept 13
295 .long 0
296 .endr
297
298 .globl ipl_flags
299ipl_flags:
300 .long 0
301 .globl ipl_devno
302ipl_devno:
303 .word 0
304
276 .org 0x12000 305 .org 0x12000
306.globl s390_readinfo_sccb
307s390_readinfo_sccb:
277.Lsccb: 308.Lsccb:
278 .hword 0x1000 # length, one page 309 .hword 0x1000 # length, one page
279 .byte 0x00,0x00,0x00 310 .byte 0x00,0x00,0x00
@@ -302,16 +333,6 @@ startup_continue:
302 .globl _stext 333 .globl _stext
303_stext: basr %r13,0 # get base 334_stext: basr %r13,0 # get base
304.LPG3: 335.LPG3:
305#
306# Setup stack
307#
308 l %r15,.Linittu-.LPG3(%r13)
309 mvc __LC_CURRENT(4),__TI_task(%r15)
310 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
311 st %r15,__LC_KERNEL_STACK # set end of kernel stack
312 ahi %r15,-96
313 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
314
315# check control registers 336# check control registers
316 stctl %c0,%c15,0(%r15) 337 stctl %c0,%c15,0(%r15)
317 oi 2(%r15),0x40 # enable sigp emergency signal 338 oi 2(%r15),0x40 # enable sigp emergency signal
@@ -330,6 +351,5 @@ _stext: basr %r13,0 # get base
330# 351#
331 .align 8 352 .align 8
332.Ldw: .long 0x000a0000,0x00000000 353.Ldw: .long 0x000a0000,0x00000000
333.Linittu:.long init_thread_union
334.Lstart:.long start_kernel 354.Lstart:.long start_kernel
335.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 355.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 9d80c5b1ef95..a8bdd96494c7 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -26,8 +26,8 @@ startup:basr %r13,0 # get base
26# 26#
27 .org PARMAREA 27 .org PARMAREA
28 .quad 0 # IPL_DEVICE 28 .quad 0 # IPL_DEVICE
29 .quad RAMDISK_ORIGIN # INITRD_START 29 .quad 0 # INITRD_START
30 .quad RAMDISK_SIZE # INITRD_SIZE 30 .quad 0 # INITRD_SIZE
31 31
32 .org COMMAND_LINE 32 .org COMMAND_LINE
33 .byte "root=/dev/ram0 ro" 33 .byte "root=/dev/ram0 ro"
@@ -39,8 +39,8 @@ startup_continue:
39 basr %r13,0 # get base 39 basr %r13,0 # get base
40.LPG1: sll %r13,1 # remove high order bit 40.LPG1: sll %r13,1 # remove high order bit
41 srl %r13,1 41 srl %r13,1
42 GET_IPL_DEVICE
43 lhi %r1,1 # mode 1 = esame 42 lhi %r1,1 # mode 1 = esame
43 mvi __LC_AR_MODE_ID,1 # set esame flag
44 slr %r0,%r0 # set cpuid to zero 44 slr %r0,%r0 # set cpuid to zero
45 sigp %r1,%r0,0x12 # switch to esame mode 45 sigp %r1,%r0,0x12 # switch to esame mode
46 sam64 # switch to 64 bit mode 46 sam64 # switch to 64 bit mode
@@ -48,7 +48,18 @@ startup_continue:
48 lg %r12,.Lparmaddr-.LPG1(%r13)# pointer to parameter area 48 lg %r12,.Lparmaddr-.LPG1(%r13)# pointer to parameter area
49 # move IPL device to lowcore 49 # move IPL device to lowcore
50 mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12) 50 mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
51#
52# Setup stack
53#
54 larl %r15,init_thread_union
55 lg %r14,__TI_task(%r15) # cache current in lowcore
56 stg %r14,__LC_CURRENT
57 aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
58 stg %r15,__LC_KERNEL_STACK # set end of kernel stack
59 aghi %r15,-160
60 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
51 61
62 brasl %r14,ipl_save_parameters
52# 63#
53# clear bss memory 64# clear bss memory
54# 65#
@@ -239,6 +250,19 @@ startup_continue:
239 oi 7(%r12),0x80 # set IDTE flag 250 oi 7(%r12),0x80 # set IDTE flag
2400: 2510:
241 252
253#
254# find out if we have the MVCOS instruction
255#
256 la %r1,0f-.LPG1(%r13) # set program check address
257 stg %r1,__LC_PGM_NEW_PSW+8
258 .short 0xc800 # mvcos 0(%r0),0(%r0),%r0
259 .short 0x0000
260 .short 0x0000
2610: tm 0x8f,0x13 # special-operation exception?
262 bno 1f-.LPG1(%r13) # if yes, MVCOS is present
263 oi 6(%r12),2 # set MVCOS flag
2641:
265
242 lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, 266 lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
243 # virtual and never return ... 267 # virtual and never return ...
244 .align 16 268 .align 16
@@ -268,7 +292,22 @@ startup_continue:
268.Lparmaddr: 292.Lparmaddr:
269 .quad PARMAREA 293 .quad PARMAREA
270 294
295 .globl ipl_schib
296ipl_schib:
297 .rept 13
298 .long 0
299 .endr
300
301 .globl ipl_flags
302ipl_flags:
303 .long 0
304 .globl ipl_devno
305ipl_devno:
306 .word 0
307
271 .org 0x12000 308 .org 0x12000
309.globl s390_readinfo_sccb
310s390_readinfo_sccb:
272.Lsccb: 311.Lsccb:
273 .hword 0x1000 # length, one page 312 .hword 0x1000 # length, one page
274 .byte 0x00,0x00,0x00 313 .byte 0x00,0x00,0x00
@@ -297,24 +336,12 @@ startup_continue:
297 .globl _stext 336 .globl _stext
298_stext: basr %r13,0 # get base 337_stext: basr %r13,0 # get base
299.LPG3: 338.LPG3:
300#
301# Setup stack
302#
303 larl %r15,init_thread_union
304 lg %r14,__TI_task(%r15) # cache current in lowcore
305 stg %r14,__LC_CURRENT
306 aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
307 stg %r15,__LC_KERNEL_STACK # set end of kernel stack
308 aghi %r15,-160
309 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
310
311# check control registers 339# check control registers
312 stctg %c0,%c15,0(%r15) 340 stctg %c0,%c15,0(%r15)
313 oi 6(%r15),0x40 # enable sigp emergency signal 341 oi 6(%r15),0x40 # enable sigp emergency signal
314 oi 4(%r15),0x10 # switch on low address proctection 342 oi 4(%r15),0x10 # switch on low address proctection
315 lctlg %c0,%c15,0(%r15) 343 lctlg %c0,%c15,0(%r15)
316 344
317#
318 lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess 345 lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
319 brasl %r14,start_kernel # go to C code 346 brasl %r14,start_kernel # go to C code
320# 347#
@@ -322,7 +349,7 @@ _stext: basr %r13,0 # get base
322# 349#
323 basr %r13,0 350 basr %r13,0
324 lpswe .Ldw-.(%r13) # load disabled wait psw 351 lpswe .Ldw-.(%r13) # load disabled wait psw
325# 352
326 .align 8 353 .align 8
327.Ldw: .quad 0x0002000180000000,0x0000000000000000 354.Ldw: .quad 0x0002000180000000,0x0000000000000000
328.Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 355.Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
new file mode 100644
index 000000000000..6555cc48e28f
--- /dev/null
+++ b/arch/s390/kernel/ipl.c
@@ -0,0 +1,942 @@
1/*
2 * arch/s390/kernel/ipl.c
3 * ipl/reipl/dump support for Linux on s390.
4 *
5 * Copyright (C) IBM Corp. 2005,2006
6 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
7 * Heiko Carstens <heiko.carstens@de.ibm.com>
8 * Volker Sameske <sameske@de.ibm.com>
9 */
10
11#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/delay.h>
15#include <linux/reboot.h>
16#include <asm/smp.h>
17#include <asm/setup.h>
18#include <asm/cpcmd.h>
19#include <asm/cio.h>
20
21#define IPL_PARM_BLOCK_VERSION 0
22
23enum ipl_type {
24 IPL_TYPE_NONE = 1,
25 IPL_TYPE_UNKNOWN = 2,
26 IPL_TYPE_CCW = 4,
27 IPL_TYPE_FCP = 8,
28};
29
30#define IPL_NONE_STR "none"
31#define IPL_UNKNOWN_STR "unknown"
32#define IPL_CCW_STR "ccw"
33#define IPL_FCP_STR "fcp"
34
35static char *ipl_type_str(enum ipl_type type)
36{
37 switch (type) {
38 case IPL_TYPE_NONE:
39 return IPL_NONE_STR;
40 case IPL_TYPE_CCW:
41 return IPL_CCW_STR;
42 case IPL_TYPE_FCP:
43 return IPL_FCP_STR;
44 case IPL_TYPE_UNKNOWN:
45 default:
46 return IPL_UNKNOWN_STR;
47 }
48}
49
50enum ipl_method {
51 IPL_METHOD_NONE,
52 IPL_METHOD_CCW_CIO,
53 IPL_METHOD_CCW_DIAG,
54 IPL_METHOD_CCW_VM,
55 IPL_METHOD_FCP_RO_DIAG,
56 IPL_METHOD_FCP_RW_DIAG,
57 IPL_METHOD_FCP_RO_VM,
58};
59
60enum shutdown_action {
61 SHUTDOWN_REIPL,
62 SHUTDOWN_DUMP,
63 SHUTDOWN_STOP,
64};
65
66#define SHUTDOWN_REIPL_STR "reipl"
67#define SHUTDOWN_DUMP_STR "dump"
68#define SHUTDOWN_STOP_STR "stop"
69
70static char *shutdown_action_str(enum shutdown_action action)
71{
72 switch (action) {
73 case SHUTDOWN_REIPL:
74 return SHUTDOWN_REIPL_STR;
75 case SHUTDOWN_DUMP:
76 return SHUTDOWN_DUMP_STR;
77 case SHUTDOWN_STOP:
78 return SHUTDOWN_STOP_STR;
79 default:
80 BUG();
81 }
82}
83
84enum diag308_subcode {
85 DIAG308_IPL = 3,
86 DIAG308_DUMP = 4,
87 DIAG308_SET = 5,
88 DIAG308_STORE = 6,
89};
90
91enum diag308_ipl_type {
92 DIAG308_IPL_TYPE_FCP = 0,
93 DIAG308_IPL_TYPE_CCW = 2,
94};
95
96enum diag308_opt {
97 DIAG308_IPL_OPT_IPL = 0x10,
98 DIAG308_IPL_OPT_DUMP = 0x20,
99};
100
101enum diag308_rc {
102 DIAG308_RC_OK = 1,
103};
104
105static int diag308_set_works = 0;
106
107static int reipl_capabilities = IPL_TYPE_UNKNOWN;
108static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
109static enum ipl_method reipl_method = IPL_METHOD_NONE;
110static struct ipl_parameter_block *reipl_block_fcp;
111static struct ipl_parameter_block *reipl_block_ccw;
112
113static int dump_capabilities = IPL_TYPE_NONE;
114static enum ipl_type dump_type = IPL_TYPE_NONE;
115static enum ipl_method dump_method = IPL_METHOD_NONE;
116static struct ipl_parameter_block *dump_block_fcp;
117static struct ipl_parameter_block *dump_block_ccw;
118
119static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
120
121static int diag308(unsigned long subcode, void *addr)
122{
123 register unsigned long _addr asm("0") = (unsigned long)addr;
124 register unsigned long _rc asm("1") = 0;
125
126 asm volatile (
127 " diag %0,%2,0x308\n"
128 "0: \n"
129 ".section __ex_table,\"a\"\n"
130#ifdef CONFIG_64BIT
131 " .align 8\n"
132 " .quad 0b, 0b\n"
133#else
134 " .align 4\n"
135 " .long 0b, 0b\n"
136#endif
137 ".previous\n"
138 : "+d" (_addr), "+d" (_rc)
139 : "d" (subcode) : "cc", "memory" );
140
141 return _rc;
142}
143
144/* SYSFS */
145
146#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
147static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \
148 char *page) \
149{ \
150 return sprintf(page, _format, _value); \
151} \
152static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
153 __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL);
154
155#define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \
156static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \
157 char *page) \
158{ \
159 return sprintf(page, _fmt_out, \
160 (unsigned long long) _value); \
161} \
162static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\
163 const char *buf, size_t len) \
164{ \
165 unsigned long long value; \
166 if (sscanf(buf, _fmt_in, &value) != 1) \
167 return -EINVAL; \
168 _value = value; \
169 return len; \
170} \
171static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
172 __ATTR(_name,(S_IRUGO | S_IWUSR), \
173 sys_##_prefix##_##_name##_show, \
174 sys_##_prefix##_##_name##_store);
175
176static void make_attrs_ro(struct attribute **attrs)
177{
178 while (*attrs) {
179 (*attrs)->mode = S_IRUGO;
180 attrs++;
181 }
182}
183
184/*
185 * ipl section
186 */
187
188static enum ipl_type ipl_get_type(void)
189{
190 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
191
192 if (!(ipl_flags & IPL_DEVNO_VALID))
193 return IPL_TYPE_UNKNOWN;
194 if (!(ipl_flags & IPL_PARMBLOCK_VALID))
195 return IPL_TYPE_CCW;
196 if (ipl->hdr.version > IPL_MAX_SUPPORTED_VERSION)
197 return IPL_TYPE_UNKNOWN;
198 if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP)
199 return IPL_TYPE_UNKNOWN;
200 return IPL_TYPE_FCP;
201}
202
203static ssize_t ipl_type_show(struct subsystem *subsys, char *page)
204{
205 return sprintf(page, "%s\n", ipl_type_str(ipl_get_type()));
206}
207
208static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
209
210static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page)
211{
212 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
213
214 switch (ipl_get_type()) {
215 case IPL_TYPE_CCW:
216 return sprintf(page, "0.0.%04x\n", ipl_devno);
217 case IPL_TYPE_FCP:
218 return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
219 default:
220 return 0;
221 }
222}
223
224static struct subsys_attribute sys_ipl_device_attr =
225 __ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
226
227static ssize_t ipl_parameter_read(struct kobject *kobj, char *buf, loff_t off,
228 size_t count)
229{
230 unsigned int size = IPL_PARMBLOCK_SIZE;
231
232 if (off > size)
233 return 0;
234 if (off + count > size)
235 count = size - off;
236 memcpy(buf, (void *)IPL_PARMBLOCK_START + off, count);
237 return count;
238}
239
240static struct bin_attribute ipl_parameter_attr = {
241 .attr = {
242 .name = "binary_parameter",
243 .mode = S_IRUGO,
244 .owner = THIS_MODULE,
245 },
246 .size = PAGE_SIZE,
247 .read = &ipl_parameter_read,
248};
249
250static ssize_t ipl_scp_data_read(struct kobject *kobj, char *buf, loff_t off,
251 size_t count)
252{
253 unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
254 void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
255
256 if (off > size)
257 return 0;
258 if (off + count > size)
259 count = size - off;
260 memcpy(buf, scp_data + off, count);
261 return count;
262}
263
264static struct bin_attribute ipl_scp_data_attr = {
265 .attr = {
266 .name = "scp_data",
267 .mode = S_IRUGO,
268 .owner = THIS_MODULE,
269 },
270 .size = PAGE_SIZE,
271 .read = &ipl_scp_data_read,
272};
273
274/* FCP ipl device attributes */
275
276DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n", (unsigned long long)
277 IPL_PARMBLOCK_START->ipl_info.fcp.wwpn);
278DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n", (unsigned long long)
279 IPL_PARMBLOCK_START->ipl_info.fcp.lun);
280DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long)
281 IPL_PARMBLOCK_START->ipl_info.fcp.bootprog);
282DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long)
283 IPL_PARMBLOCK_START->ipl_info.fcp.br_lba);
284
285static struct attribute *ipl_fcp_attrs[] = {
286 &sys_ipl_type_attr.attr,
287 &sys_ipl_device_attr.attr,
288 &sys_ipl_fcp_wwpn_attr.attr,
289 &sys_ipl_fcp_lun_attr.attr,
290 &sys_ipl_fcp_bootprog_attr.attr,
291 &sys_ipl_fcp_br_lba_attr.attr,
292 NULL,
293};
294
295static struct attribute_group ipl_fcp_attr_group = {
296 .attrs = ipl_fcp_attrs,
297};
298
299/* CCW ipl device attributes */
300
301static struct attribute *ipl_ccw_attrs[] = {
302 &sys_ipl_type_attr.attr,
303 &sys_ipl_device_attr.attr,
304 NULL,
305};
306
307static struct attribute_group ipl_ccw_attr_group = {
308 .attrs = ipl_ccw_attrs,
309};
310
311/* UNKNOWN ipl device attributes */
312
313static struct attribute *ipl_unknown_attrs[] = {
314 &sys_ipl_type_attr.attr,
315 NULL,
316};
317
318static struct attribute_group ipl_unknown_attr_group = {
319 .attrs = ipl_unknown_attrs,
320};
321
322static decl_subsys(ipl, NULL, NULL);
323
324/*
325 * reipl section
326 */
327
328/* FCP reipl device attributes */
329
330DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
331 reipl_block_fcp->ipl_info.fcp.wwpn);
332DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n",
333 reipl_block_fcp->ipl_info.fcp.lun);
334DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
335 reipl_block_fcp->ipl_info.fcp.bootprog);
336DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
337 reipl_block_fcp->ipl_info.fcp.br_lba);
338DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
339 reipl_block_fcp->ipl_info.fcp.devno);
340
341static struct attribute *reipl_fcp_attrs[] = {
342 &sys_reipl_fcp_device_attr.attr,
343 &sys_reipl_fcp_wwpn_attr.attr,
344 &sys_reipl_fcp_lun_attr.attr,
345 &sys_reipl_fcp_bootprog_attr.attr,
346 &sys_reipl_fcp_br_lba_attr.attr,
347 NULL,
348};
349
350static struct attribute_group reipl_fcp_attr_group = {
351 .name = IPL_FCP_STR,
352 .attrs = reipl_fcp_attrs,
353};
354
355/* CCW reipl device attributes */
356
357DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
358 reipl_block_ccw->ipl_info.ccw.devno);
359
360static struct attribute *reipl_ccw_attrs[] = {
361 &sys_reipl_ccw_device_attr.attr,
362 NULL,
363};
364
365static struct attribute_group reipl_ccw_attr_group = {
366 .name = IPL_CCW_STR,
367 .attrs = reipl_ccw_attrs,
368};
369
370/* reipl type */
371
372static int reipl_set_type(enum ipl_type type)
373{
374 if (!(reipl_capabilities & type))
375 return -EINVAL;
376
377 switch(type) {
378 case IPL_TYPE_CCW:
379 if (MACHINE_IS_VM)
380 reipl_method = IPL_METHOD_CCW_VM;
381 else
382 reipl_method = IPL_METHOD_CCW_CIO;
383 break;
384 case IPL_TYPE_FCP:
385 if (diag308_set_works)
386 reipl_method = IPL_METHOD_FCP_RW_DIAG;
387 else if (MACHINE_IS_VM)
388 reipl_method = IPL_METHOD_FCP_RO_VM;
389 else
390 reipl_method = IPL_METHOD_FCP_RO_DIAG;
391 break;
392 default:
393 reipl_method = IPL_METHOD_NONE;
394 }
395 reipl_type = type;
396 return 0;
397}
398
399static ssize_t reipl_type_show(struct subsystem *subsys, char *page)
400{
401 return sprintf(page, "%s\n", ipl_type_str(reipl_type));
402}
403
404static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf,
405 size_t len)
406{
407 int rc = -EINVAL;
408
409 if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0)
410 rc = reipl_set_type(IPL_TYPE_CCW);
411 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
412 rc = reipl_set_type(IPL_TYPE_FCP);
413 return (rc != 0) ? rc : len;
414}
415
416static struct subsys_attribute reipl_type_attr =
417 __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
418
419static decl_subsys(reipl, NULL, NULL);
420
421/*
422 * dump section
423 */
424
425/* FCP dump device attributes */
426
427DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
428 dump_block_fcp->ipl_info.fcp.wwpn);
429DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
430 dump_block_fcp->ipl_info.fcp.lun);
431DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
432 dump_block_fcp->ipl_info.fcp.bootprog);
433DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
434 dump_block_fcp->ipl_info.fcp.br_lba);
435DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
436 dump_block_fcp->ipl_info.fcp.devno);
437
438static struct attribute *dump_fcp_attrs[] = {
439 &sys_dump_fcp_device_attr.attr,
440 &sys_dump_fcp_wwpn_attr.attr,
441 &sys_dump_fcp_lun_attr.attr,
442 &sys_dump_fcp_bootprog_attr.attr,
443 &sys_dump_fcp_br_lba_attr.attr,
444 NULL,
445};
446
447static struct attribute_group dump_fcp_attr_group = {
448 .name = IPL_FCP_STR,
449 .attrs = dump_fcp_attrs,
450};
451
452/* CCW dump device attributes */
453
454DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
455 dump_block_ccw->ipl_info.ccw.devno);
456
457static struct attribute *dump_ccw_attrs[] = {
458 &sys_dump_ccw_device_attr.attr,
459 NULL,
460};
461
462static struct attribute_group dump_ccw_attr_group = {
463 .name = IPL_CCW_STR,
464 .attrs = dump_ccw_attrs,
465};
466
467/* dump type */
468
469static int dump_set_type(enum ipl_type type)
470{
471 if (!(dump_capabilities & type))
472 return -EINVAL;
473 switch(type) {
474 case IPL_TYPE_CCW:
475 if (MACHINE_IS_VM)
476 dump_method = IPL_METHOD_CCW_VM;
477 else
478 dump_method = IPL_METHOD_CCW_CIO;
479 break;
480 case IPL_TYPE_FCP:
481 dump_method = IPL_METHOD_FCP_RW_DIAG;
482 break;
483 default:
484 dump_method = IPL_METHOD_NONE;
485 }
486 dump_type = type;
487 return 0;
488}
489
490static ssize_t dump_type_show(struct subsystem *subsys, char *page)
491{
492 return sprintf(page, "%s\n", ipl_type_str(dump_type));
493}
494
495static ssize_t dump_type_store(struct subsystem *subsys, const char *buf,
496 size_t len)
497{
498 int rc = -EINVAL;
499
500 if (strncmp(buf, IPL_NONE_STR, strlen(IPL_NONE_STR)) == 0)
501 rc = dump_set_type(IPL_TYPE_NONE);
502 else if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0)
503 rc = dump_set_type(IPL_TYPE_CCW);
504 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
505 rc = dump_set_type(IPL_TYPE_FCP);
506 return (rc != 0) ? rc : len;
507}
508
509static struct subsys_attribute dump_type_attr =
510 __ATTR(dump_type, 0644, dump_type_show, dump_type_store);
511
512static decl_subsys(dump, NULL, NULL);
513
514#ifdef CONFIG_SMP
515static void dump_smp_stop_all(void)
516{
517 int cpu;
518 preempt_disable();
519 for_each_online_cpu(cpu) {
520 if (cpu == smp_processor_id())
521 continue;
522 while (signal_processor(cpu, sigp_stop) == sigp_busy)
523 udelay(10);
524 }
525 preempt_enable();
526}
527#else
528#define dump_smp_stop_all() do { } while (0)
529#endif
530
531/*
532 * Shutdown actions section
533 */
534
535static decl_subsys(shutdown_actions, NULL, NULL);
536
537/* on panic */
538
539static ssize_t on_panic_show(struct subsystem *subsys, char *page)
540{
541 return sprintf(page, "%s\n", shutdown_action_str(on_panic_action));
542}
543
544static ssize_t on_panic_store(struct subsystem *subsys, const char *buf,
545 size_t len)
546{
547 if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0)
548 on_panic_action = SHUTDOWN_REIPL;
549 else if (strncmp(buf, SHUTDOWN_DUMP_STR,
550 strlen(SHUTDOWN_DUMP_STR)) == 0)
551 on_panic_action = SHUTDOWN_DUMP;
552 else if (strncmp(buf, SHUTDOWN_STOP_STR,
553 strlen(SHUTDOWN_STOP_STR)) == 0)
554 on_panic_action = SHUTDOWN_STOP;
555 else
556 return -EINVAL;
557
558 return len;
559}
560
561static struct subsys_attribute on_panic_attr =
562 __ATTR(on_panic, 0644, on_panic_show, on_panic_store);
563
564static void print_fcp_block(struct ipl_parameter_block *fcp_block)
565{
566 printk(KERN_EMERG "wwpn: %016llx\n",
567 (unsigned long long)fcp_block->ipl_info.fcp.wwpn);
568 printk(KERN_EMERG "lun: %016llx\n",
569 (unsigned long long)fcp_block->ipl_info.fcp.lun);
570 printk(KERN_EMERG "bootprog: %lld\n",
571 (unsigned long long)fcp_block->ipl_info.fcp.bootprog);
572 printk(KERN_EMERG "br_lba: %lld\n",
573 (unsigned long long)fcp_block->ipl_info.fcp.br_lba);
574 printk(KERN_EMERG "device: %llx\n",
575 (unsigned long long)fcp_block->ipl_info.fcp.devno);
576 printk(KERN_EMERG "opt: %x\n", fcp_block->ipl_info.fcp.opt);
577}
578
579void do_reipl(void)
580{
581 struct ccw_dev_id devid;
582 static char buf[100];
583
584 switch (reipl_type) {
585 case IPL_TYPE_CCW:
586 printk(KERN_EMERG "reboot on ccw device: 0.0.%04x\n",
587 reipl_block_ccw->ipl_info.ccw.devno);
588 break;
589 case IPL_TYPE_FCP:
590 printk(KERN_EMERG "reboot on fcp device:\n");
591 print_fcp_block(reipl_block_fcp);
592 break;
593 default:
594 break;
595 }
596
597 switch (reipl_method) {
598 case IPL_METHOD_CCW_CIO:
599 devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
600 devid.ssid = 0;
601 reipl_ccw_dev(&devid);
602 break;
603 case IPL_METHOD_CCW_VM:
604 sprintf(buf, "IPL %X", reipl_block_ccw->ipl_info.ccw.devno);
605 cpcmd(buf, NULL, 0, NULL);
606 break;
607 case IPL_METHOD_CCW_DIAG:
608 diag308(DIAG308_SET, reipl_block_ccw);
609 diag308(DIAG308_IPL, NULL);
610 break;
611 case IPL_METHOD_FCP_RW_DIAG:
612 diag308(DIAG308_SET, reipl_block_fcp);
613 diag308(DIAG308_IPL, NULL);
614 break;
615 case IPL_METHOD_FCP_RO_DIAG:
616 diag308(DIAG308_IPL, NULL);
617 break;
618 case IPL_METHOD_FCP_RO_VM:
619 cpcmd("IPL", NULL, 0, NULL);
620 break;
621 case IPL_METHOD_NONE:
622 default:
623 if (MACHINE_IS_VM)
624 cpcmd("IPL", NULL, 0, NULL);
625 diag308(DIAG308_IPL, NULL);
626 break;
627 }
628 panic("reipl failed!\n");
629}
630
631static void do_dump(void)
632{
633 struct ccw_dev_id devid;
634 static char buf[100];
635
636 switch (dump_type) {
637 case IPL_TYPE_CCW:
638 printk(KERN_EMERG "Automatic dump on ccw device: 0.0.%04x\n",
639 dump_block_ccw->ipl_info.ccw.devno);
640 break;
641 case IPL_TYPE_FCP:
642 printk(KERN_EMERG "Automatic dump on fcp device:\n");
643 print_fcp_block(dump_block_fcp);
644 break;
645 default:
646 return;
647 }
648
649 switch (dump_method) {
650 case IPL_METHOD_CCW_CIO:
651 dump_smp_stop_all();
652 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
653 devid.ssid = 0;
654 reipl_ccw_dev(&devid);
655 break;
656 case IPL_METHOD_CCW_VM:
657 dump_smp_stop_all();
658 sprintf(buf, "STORE STATUS");
659 cpcmd(buf, NULL, 0, NULL);
660 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
661 cpcmd(buf, NULL, 0, NULL);
662 break;
663 case IPL_METHOD_CCW_DIAG:
664 diag308(DIAG308_SET, dump_block_ccw);
665 diag308(DIAG308_DUMP, NULL);
666 break;
667 case IPL_METHOD_FCP_RW_DIAG:
668 diag308(DIAG308_SET, dump_block_fcp);
669 diag308(DIAG308_DUMP, NULL);
670 break;
671 case IPL_METHOD_NONE:
672 default:
673 return;
674 }
675 printk(KERN_EMERG "Dump failed!\n");
676}
677
678/* init functions */
679
680static int __init ipl_register_fcp_files(void)
681{
682 int rc;
683
684 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
685 &ipl_fcp_attr_group);
686 if (rc)
687 goto out;
688 rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
689 &ipl_parameter_attr);
690 if (rc)
691 goto out_ipl_parm;
692 rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
693 &ipl_scp_data_attr);
694 if (!rc)
695 goto out;
696
697 sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr);
698
699out_ipl_parm:
700 sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
701out:
702 return rc;
703}
704
705static int __init ipl_init(void)
706{
707 int rc;
708
709 rc = firmware_register(&ipl_subsys);
710 if (rc)
711 return rc;
712 switch (ipl_get_type()) {
713 case IPL_TYPE_CCW:
714 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
715 &ipl_ccw_attr_group);
716 break;
717 case IPL_TYPE_FCP:
718 rc = ipl_register_fcp_files();
719 break;
720 default:
721 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
722 &ipl_unknown_attr_group);
723 break;
724 }
725 if (rc)
726 firmware_unregister(&ipl_subsys);
727 return rc;
728}
729
730static void __init reipl_probe(void)
731{
732 void *buffer;
733
734 buffer = (void *) get_zeroed_page(GFP_KERNEL);
735 if (!buffer)
736 return;
737 if (diag308(DIAG308_STORE, buffer) == DIAG308_RC_OK)
738 diag308_set_works = 1;
739 free_page((unsigned long)buffer);
740}
741
742static int __init reipl_ccw_init(void)
743{
744 int rc;
745
746 reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
747 if (!reipl_block_ccw)
748 return -ENOMEM;
749 rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_ccw_attr_group);
750 if (rc) {
751 free_page((unsigned long)reipl_block_ccw);
752 return rc;
753 }
754 reipl_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN;
755 reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
756 reipl_block_ccw->hdr.blk0_len = sizeof(reipl_block_ccw->ipl_info.ccw);
757 reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
758 if (ipl_get_type() == IPL_TYPE_CCW)
759 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
760 reipl_capabilities |= IPL_TYPE_CCW;
761 return 0;
762}
763
764static int __init reipl_fcp_init(void)
765{
766 int rc;
767
768 if ((!diag308_set_works) && (ipl_get_type() != IPL_TYPE_FCP))
769 return 0;
770 if ((!diag308_set_works) && (ipl_get_type() == IPL_TYPE_FCP))
771 make_attrs_ro(reipl_fcp_attrs);
772
773 reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
774 if (!reipl_block_fcp)
775 return -ENOMEM;
776 rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_fcp_attr_group);
777 if (rc) {
778 free_page((unsigned long)reipl_block_fcp);
779 return rc;
780 }
781 if (ipl_get_type() == IPL_TYPE_FCP) {
782 memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
783 } else {
784 reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
785 reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
786 reipl_block_fcp->hdr.blk0_len =
787 sizeof(reipl_block_fcp->ipl_info.fcp);
788 reipl_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
789 reipl_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_IPL;
790 }
791 reipl_capabilities |= IPL_TYPE_FCP;
792 return 0;
793}
794
795static int __init reipl_init(void)
796{
797 int rc;
798
799 rc = firmware_register(&reipl_subsys);
800 if (rc)
801 return rc;
802 rc = subsys_create_file(&reipl_subsys, &reipl_type_attr);
803 if (rc) {
804 firmware_unregister(&reipl_subsys);
805 return rc;
806 }
807 rc = reipl_ccw_init();
808 if (rc)
809 return rc;
810 rc = reipl_fcp_init();
811 if (rc)
812 return rc;
813 rc = reipl_set_type(ipl_get_type());
814 if (rc)
815 return rc;
816 return 0;
817}
818
819static int __init dump_ccw_init(void)
820{
821 int rc;
822
823 dump_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
824 if (!dump_block_ccw)
825 return -ENOMEM;
826 rc = sysfs_create_group(&dump_subsys.kset.kobj, &dump_ccw_attr_group);
827 if (rc) {
828 free_page((unsigned long)dump_block_ccw);
829 return rc;
830 }
831 dump_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN;
832 dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
833 dump_block_ccw->hdr.blk0_len = sizeof(reipl_block_ccw->ipl_info.ccw);
834 dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
835 dump_capabilities |= IPL_TYPE_CCW;
836 return 0;
837}
838
839extern char s390_readinfo_sccb[];
840
841static int __init dump_fcp_init(void)
842{
843 int rc;
844
845 if(!(s390_readinfo_sccb[91] & 0x2))
846 return 0; /* LDIPL DUMP is not installed */
847 if (!diag308_set_works)
848 return 0;
849 dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
850 if (!dump_block_fcp)
851 return -ENOMEM;
852 rc = sysfs_create_group(&dump_subsys.kset.kobj, &dump_fcp_attr_group);
853 if (rc) {
854 free_page((unsigned long)dump_block_fcp);
855 return rc;
856 }
857 dump_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
858 dump_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
859 dump_block_fcp->hdr.blk0_len = sizeof(dump_block_fcp->ipl_info.fcp);
860 dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
861 dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP;
862 dump_capabilities |= IPL_TYPE_FCP;
863 return 0;
864}
865
866#define SHUTDOWN_ON_PANIC_PRIO 0
867
868static int shutdown_on_panic_notify(struct notifier_block *self,
869 unsigned long event, void *data)
870{
871 if (on_panic_action == SHUTDOWN_DUMP)
872 do_dump();
873 else if (on_panic_action == SHUTDOWN_REIPL)
874 do_reipl();
875 return NOTIFY_OK;
876}
877
878static struct notifier_block shutdown_on_panic_nb = {
879 .notifier_call = shutdown_on_panic_notify,
880 .priority = SHUTDOWN_ON_PANIC_PRIO
881};
882
883static int __init dump_init(void)
884{
885 int rc;
886
887 rc = firmware_register(&dump_subsys);
888 if (rc)
889 return rc;
890 rc = subsys_create_file(&dump_subsys, &dump_type_attr);
891 if (rc) {
892 firmware_unregister(&dump_subsys);
893 return rc;
894 }
895 rc = dump_ccw_init();
896 if (rc)
897 return rc;
898 rc = dump_fcp_init();
899 if (rc)
900 return rc;
901 dump_set_type(IPL_TYPE_NONE);
902 return 0;
903}
904
905static int __init shutdown_actions_init(void)
906{
907 int rc;
908
909 rc = firmware_register(&shutdown_actions_subsys);
910 if (rc)
911 return rc;
912 rc = subsys_create_file(&shutdown_actions_subsys, &on_panic_attr);
913 if (rc) {
914 firmware_unregister(&shutdown_actions_subsys);
915 return rc;
916 }
917 atomic_notifier_chain_register(&panic_notifier_list,
918 &shutdown_on_panic_nb);
919 return 0;
920}
921
922static int __init s390_ipl_init(void)
923{
924 int rc;
925
926 reipl_probe();
927 rc = ipl_init();
928 if (rc)
929 return rc;
930 rc = reipl_init();
931 if (rc)
932 return rc;
933 rc = dump_init();
934 if (rc)
935 return rc;
936 rc = shutdown_actions_init();
937 if (rc)
938 return rc;
939 return 0;
940}
941
942__initcall(s390_ipl_init);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
new file mode 100644
index 000000000000..ca28fb0b3790
--- /dev/null
+++ b/arch/s390/kernel/kprobes.c
@@ -0,0 +1,657 @@
1/*
2 * Kernel Probes (KProbes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2002, 2006
19 *
20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
21 */
22
23#include <linux/config.h>
24#include <linux/kprobes.h>
25#include <linux/ptrace.h>
26#include <linux/preempt.h>
27#include <linux/stop_machine.h>
28#include <asm/cacheflush.h>
29#include <asm/kdebug.h>
30#include <asm/sections.h>
31#include <asm/uaccess.h>
32#include <linux/module.h>
33
34DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
35DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
36
37int __kprobes arch_prepare_kprobe(struct kprobe *p)
38{
39 /* Make sure the probe isn't going on a difficult instruction */
40 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
41 return -EINVAL;
42
43 if ((unsigned long)p->addr & 0x01) {
44 printk("Attempt to register kprobe at an unaligned address\n");
45 return -EINVAL;
46 }
47
48 /* Use the get_insn_slot() facility for correctness */
49 if (!(p->ainsn.insn = get_insn_slot()))
50 return -ENOMEM;
51
52 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
53
54 get_instruction_type(&p->ainsn);
55 p->opcode = *p->addr;
56 return 0;
57}
58
59int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
60{
61 switch (*(__u8 *) instruction) {
62 case 0x0c: /* bassm */
63 case 0x0b: /* bsm */
64 case 0x83: /* diag */
65 case 0x44: /* ex */
66 return -EINVAL;
67 }
68 switch (*(__u16 *) instruction) {
69 case 0x0101: /* pr */
70 case 0xb25a: /* bsa */
71 case 0xb240: /* bakr */
72 case 0xb258: /* bsg */
73 case 0xb218: /* pc */
74 case 0xb228: /* pt */
75 return -EINVAL;
76 }
77 return 0;
78}
79
80void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
81{
82 /* default fixup method */
83 ainsn->fixup = FIXUP_PSW_NORMAL;
84
85 /* save r1 operand */
86 ainsn->reg = (*ainsn->insn & 0xf0) >> 4;
87
88 /* save the instruction length (pop 5-5) in bytes */
89 switch (*(__u8 *) (ainsn->insn) >> 4) {
90 case 0:
91 ainsn->ilen = 2;
92 break;
93 case 1:
94 case 2:
95 ainsn->ilen = 4;
96 break;
97 case 3:
98 ainsn->ilen = 6;
99 break;
100 }
101
102 switch (*(__u8 *) ainsn->insn) {
103 case 0x05: /* balr */
104 case 0x0d: /* basr */
105 ainsn->fixup = FIXUP_RETURN_REGISTER;
106 /* if r2 = 0, no branch will be taken */
107 if ((*ainsn->insn & 0x0f) == 0)
108 ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
109 break;
110 case 0x06: /* bctr */
111 case 0x07: /* bcr */
112 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
113 break;
114 case 0x45: /* bal */
115 case 0x4d: /* bas */
116 ainsn->fixup = FIXUP_RETURN_REGISTER;
117 break;
118 case 0x47: /* bc */
119 case 0x46: /* bct */
120 case 0x86: /* bxh */
121 case 0x87: /* bxle */
122 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
123 break;
124 case 0x82: /* lpsw */
125 ainsn->fixup = FIXUP_NOT_REQUIRED;
126 break;
127 case 0xb2: /* lpswe */
128 if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) {
129 ainsn->fixup = FIXUP_NOT_REQUIRED;
130 }
131 break;
132 case 0xa7: /* bras */
133 if ((*ainsn->insn & 0x0f) == 0x05) {
134 ainsn->fixup |= FIXUP_RETURN_REGISTER;
135 }
136 break;
137 case 0xc0:
138 if ((*ainsn->insn & 0x0f) == 0x00 /* larl */
139 || (*ainsn->insn & 0x0f) == 0x05) /* brasl */
140 ainsn->fixup |= FIXUP_RETURN_REGISTER;
141 break;
142 case 0xeb:
143 if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */
144 *(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */
145 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
146 }
147 break;
148 case 0xe3: /* bctg */
149 if (*(((__u8 *) ainsn->insn) + 5) == 0x46) {
150 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
151 }
152 break;
153 }
154}
155
156static int __kprobes swap_instruction(void *aref)
157{
158 struct ins_replace_args *args = aref;
159 int err = -EFAULT;
160
161 asm volatile(
162 "0: mvc 0(2,%2),0(%3)\n"
163 "1: la %0,0\n"
164 "2:\n"
165 EX_TABLE(0b,2b)
166 : "+d" (err), "=m" (*args->ptr)
167 : "a" (args->ptr), "a" (&args->new), "m" (args->new));
168 return err;
169}
170
171void __kprobes arch_arm_kprobe(struct kprobe *p)
172{
173 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
174 unsigned long status = kcb->kprobe_status;
175 struct ins_replace_args args;
176
177 args.ptr = p->addr;
178 args.old = p->opcode;
179 args.new = BREAKPOINT_INSTRUCTION;
180
181 kcb->kprobe_status = KPROBE_SWAP_INST;
182 stop_machine_run(swap_instruction, &args, NR_CPUS);
183 kcb->kprobe_status = status;
184}
185
186void __kprobes arch_disarm_kprobe(struct kprobe *p)
187{
188 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
189 unsigned long status = kcb->kprobe_status;
190 struct ins_replace_args args;
191
192 args.ptr = p->addr;
193 args.old = BREAKPOINT_INSTRUCTION;
194 args.new = p->opcode;
195
196 kcb->kprobe_status = KPROBE_SWAP_INST;
197 stop_machine_run(swap_instruction, &args, NR_CPUS);
198 kcb->kprobe_status = status;
199}
200
201void __kprobes arch_remove_kprobe(struct kprobe *p)
202{
203 mutex_lock(&kprobe_mutex);
204 free_insn_slot(p->ainsn.insn);
205 mutex_unlock(&kprobe_mutex);
206}
207
208static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
209{
210 per_cr_bits kprobe_per_regs[1];
211
212 memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
213 regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
214
215 /* Set up the per control reg info, will pass to lctl */
216 kprobe_per_regs[0].em_instruction_fetch = 1;
217 kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn;
218 kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1;
219
220 /* Set the PER control regs, turns on single step for this address */
221 __ctl_load(kprobe_per_regs, 9, 11);
222 regs->psw.mask |= PSW_MASK_PER;
223 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
224}
225
226static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
227{
228 kcb->prev_kprobe.kp = kprobe_running();
229 kcb->prev_kprobe.status = kcb->kprobe_status;
230 kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
231 memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
232 sizeof(kcb->kprobe_saved_ctl));
233}
234
235static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
236{
237 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
238 kcb->kprobe_status = kcb->prev_kprobe.status;
239 kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
240 memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
241 sizeof(kcb->kprobe_saved_ctl));
242}
243
244static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
245 struct kprobe_ctlblk *kcb)
246{
247 __get_cpu_var(current_kprobe) = p;
248 /* Save the interrupt and per flags */
249 kcb->kprobe_saved_imask = regs->psw.mask &
250 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
251 /* Save the control regs that govern PER */
252 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
253}
254
255/* Called with kretprobe_lock held */
256void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
257 struct pt_regs *regs)
258{
259 struct kretprobe_instance *ri;
260
261 if ((ri = get_free_rp_inst(rp)) != NULL) {
262 ri->rp = rp;
263 ri->task = current;
264 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
265
266 /* Replace the return addr with trampoline addr */
267 regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
268
269 add_rp_inst(ri);
270 } else {
271 rp->nmissed++;
272 }
273}
274
275static int __kprobes kprobe_handler(struct pt_regs *regs)
276{
277 struct kprobe *p;
278 int ret = 0;
279 unsigned long *addr = (unsigned long *)
280 ((regs->psw.addr & PSW_ADDR_INSN) - 2);
281 struct kprobe_ctlblk *kcb;
282
283 /*
284 * We don't want to be preempted for the entire
285 * duration of kprobe processing
286 */
287 preempt_disable();
288 kcb = get_kprobe_ctlblk();
289
290 /* Check we're not actually recursing */
291 if (kprobe_running()) {
292 p = get_kprobe(addr);
293 if (p) {
294 if (kcb->kprobe_status == KPROBE_HIT_SS &&
295 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
296 regs->psw.mask &= ~PSW_MASK_PER;
297 regs->psw.mask |= kcb->kprobe_saved_imask;
298 goto no_kprobe;
299 }
300 /* We have reentered the kprobe_handler(), since
301 * another probe was hit while within the handler.
302 * We here save the original kprobes variables and
303 * just single step on the instruction of the new probe
304 * without calling any user handlers.
305 */
306 save_previous_kprobe(kcb);
307 set_current_kprobe(p, regs, kcb);
308 kprobes_inc_nmissed_count(p);
309 prepare_singlestep(p, regs);
310 kcb->kprobe_status = KPROBE_REENTER;
311 return 1;
312 } else {
313 p = __get_cpu_var(current_kprobe);
314 if (p->break_handler && p->break_handler(p, regs)) {
315 goto ss_probe;
316 }
317 }
318 goto no_kprobe;
319 }
320
321 p = get_kprobe(addr);
322 if (!p) {
323 if (*addr != BREAKPOINT_INSTRUCTION) {
324 /*
325 * The breakpoint instruction was removed right
326 * after we hit it. Another cpu has removed
327 * either a probepoint or a debugger breakpoint
328 * at this address. In either case, no further
329 * handling of this interrupt is appropriate.
330 *
331 */
332 ret = 1;
333 }
334 /* Not one of ours: let kernel handle it */
335 goto no_kprobe;
336 }
337
338 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
339 set_current_kprobe(p, regs, kcb);
340 if (p->pre_handler && p->pre_handler(p, regs))
341 /* handler has already set things up, so skip ss setup */
342 return 1;
343
344ss_probe:
345 prepare_singlestep(p, regs);
346 kcb->kprobe_status = KPROBE_HIT_SS;
347 return 1;
348
349no_kprobe:
350 preempt_enable_no_resched();
351 return ret;
352}
353
354/*
355 * Function return probe trampoline:
356 * - init_kprobes() establishes a probepoint here
357 * - When the probed function returns, this probe
358 * causes the handlers to fire
359 */
360void __kprobes kretprobe_trampoline_holder(void)
361{
362 asm volatile(".global kretprobe_trampoline\n"
363 "kretprobe_trampoline: bcr 0,0\n");
364}
365
366/*
367 * Called when the probe at kretprobe trampoline is hit
368 */
369int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
370{
371 struct kretprobe_instance *ri = NULL;
372 struct hlist_head *head;
373 struct hlist_node *node, *tmp;
374 unsigned long flags, orig_ret_address = 0;
375 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
376
377 spin_lock_irqsave(&kretprobe_lock, flags);
378 head = kretprobe_inst_table_head(current);
379
380 /*
381 * It is possible to have multiple instances associated with a given
382 * task either because an multiple functions in the call path
383 * have a return probe installed on them, and/or more then one return
384 * return probe was registered for a target function.
385 *
386 * We can handle this because:
387 * - instances are always inserted at the head of the list
388 * - when multiple return probes are registered for the same
389 * function, the first instance's ret_addr will point to the
390 * real return address, and all the rest will point to
391 * kretprobe_trampoline
392 */
393 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
394 if (ri->task != current)
395 /* another task is sharing our hash bucket */
396 continue;
397
398 if (ri->rp && ri->rp->handler)
399 ri->rp->handler(ri, regs);
400
401 orig_ret_address = (unsigned long)ri->ret_addr;
402 recycle_rp_inst(ri);
403
404 if (orig_ret_address != trampoline_address) {
405 /*
406 * This is the real return address. Any other
407 * instances associated with this task are for
408 * other calls deeper on the call stack
409 */
410 break;
411 }
412 }
413 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
414 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
415
416 reset_current_kprobe();
417 spin_unlock_irqrestore(&kretprobe_lock, flags);
418 preempt_enable_no_resched();
419
420 /*
421 * By returning a non-zero value, we are telling
422 * kprobe_handler() that we don't want the post_handler
423 * to run (and have re-enabled preemption)
424 */
425 return 1;
426}
427
428/*
429 * Called after single-stepping. p->addr is the address of the
430 * instruction whose first byte has been replaced by the "breakpoint"
431 * instruction. To avoid the SMP problems that can occur when we
432 * temporarily put back the original opcode to single-step, we
433 * single-stepped a copy of the instruction. The address of this
434 * copy is p->ainsn.insn.
435 */
436static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
437{
438 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
439
440 regs->psw.addr &= PSW_ADDR_INSN;
441
442 if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
443 regs->psw.addr = (unsigned long)p->addr +
444 ((unsigned long)regs->psw.addr -
445 (unsigned long)p->ainsn.insn);
446
447 if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
448 if ((unsigned long)regs->psw.addr -
449 (unsigned long)p->ainsn.insn == p->ainsn.ilen)
450 regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
451
452 if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
453 regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr +
454 (regs->gprs[p->ainsn.reg] -
455 (unsigned long)p->ainsn.insn))
456 | PSW_ADDR_AMODE;
457
458 regs->psw.addr |= PSW_ADDR_AMODE;
459 /* turn off PER mode */
460 regs->psw.mask &= ~PSW_MASK_PER;
461 /* Restore the original per control regs */
462 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
463 regs->psw.mask |= kcb->kprobe_saved_imask;
464}
465
466static int __kprobes post_kprobe_handler(struct pt_regs *regs)
467{
468 struct kprobe *cur = kprobe_running();
469 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
470
471 if (!cur)
472 return 0;
473
474 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
475 kcb->kprobe_status = KPROBE_HIT_SSDONE;
476 cur->post_handler(cur, regs, 0);
477 }
478
479 resume_execution(cur, regs);
480
481 /*Restore back the original saved kprobes variables and continue. */
482 if (kcb->kprobe_status == KPROBE_REENTER) {
483 restore_previous_kprobe(kcb);
484 goto out;
485 }
486 reset_current_kprobe();
487out:
488 preempt_enable_no_resched();
489
490 /*
491 * if somebody else is singlestepping across a probe point, psw mask
492 * will have PER set, in which case, continue the remaining processing
493 * of do_single_step, as if this is not a probe hit.
494 */
495 if (regs->psw.mask & PSW_MASK_PER) {
496 return 0;
497 }
498
499 return 1;
500}
501
502static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
503{
504 struct kprobe *cur = kprobe_running();
505 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
506 const struct exception_table_entry *entry;
507
508 switch(kcb->kprobe_status) {
509 case KPROBE_SWAP_INST:
510 /* We are here because the instruction replacement failed */
511 return 0;
512 case KPROBE_HIT_SS:
513 case KPROBE_REENTER:
514 /*
515 * We are here because the instruction being single
516 * stepped caused a page fault. We reset the current
517 * kprobe and the nip points back to the probe address
518 * and allow the page fault handler to continue as a
519 * normal page fault.
520 */
521 regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE;
522 regs->psw.mask &= ~PSW_MASK_PER;
523 regs->psw.mask |= kcb->kprobe_saved_imask;
524 if (kcb->kprobe_status == KPROBE_REENTER)
525 restore_previous_kprobe(kcb);
526 else
527 reset_current_kprobe();
528 preempt_enable_no_resched();
529 break;
530 case KPROBE_HIT_ACTIVE:
531 case KPROBE_HIT_SSDONE:
532 /*
533 * We increment the nmissed count for accounting,
534 * we can also use npre/npostfault count for accouting
535 * these specific fault cases.
536 */
537 kprobes_inc_nmissed_count(cur);
538
539 /*
540 * We come here because instructions in the pre/post
541 * handler caused the page_fault, this could happen
542 * if handler tries to access user space by
543 * copy_from_user(), get_user() etc. Let the
544 * user-specified handler try to fix it first.
545 */
546 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
547 return 1;
548
549 /*
550 * In case the user-specified fault handler returned
551 * zero, try to fix up.
552 */
553 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
554 if (entry) {
555 regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
556 return 1;
557 }
558
559 /*
560 * fixup_exception() could not handle it,
561 * Let do_page_fault() fix it.
562 */
563 break;
564 default:
565 break;
566 }
567 return 0;
568}
569
570/*
571 * Wrapper routine to for handling exceptions.
572 */
573int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
574 unsigned long val, void *data)
575{
576 struct die_args *args = (struct die_args *)data;
577 int ret = NOTIFY_DONE;
578
579 switch (val) {
580 case DIE_BPT:
581 if (kprobe_handler(args->regs))
582 ret = NOTIFY_STOP;
583 break;
584 case DIE_SSTEP:
585 if (post_kprobe_handler(args->regs))
586 ret = NOTIFY_STOP;
587 break;
588 case DIE_TRAP:
589 case DIE_PAGE_FAULT:
590 /* kprobe_running() needs smp_processor_id() */
591 preempt_disable();
592 if (kprobe_running() &&
593 kprobe_fault_handler(args->regs, args->trapnr))
594 ret = NOTIFY_STOP;
595 preempt_enable();
596 break;
597 default:
598 break;
599 }
600 return ret;
601}
602
603int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
604{
605 struct jprobe *jp = container_of(p, struct jprobe, kp);
606 unsigned long addr;
607 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
608
609 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
610
611 /* setup return addr to the jprobe handler routine */
612 regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
613
614 /* r14 is the function return address */
615 kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
616 /* r15 is the stack pointer */
617 kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
618 addr = (unsigned long)kcb->jprobe_saved_r15;
619
620 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
621 MIN_STACK_SIZE(addr));
622 return 1;
623}
624
625void __kprobes jprobe_return(void)
626{
627 asm volatile(".word 0x0002");
628}
629
630void __kprobes jprobe_return_end(void)
631{
632 asm volatile("bcr 0,0");
633}
634
635int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
636{
637 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
638 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
639
640 /* Put the regs back */
641 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
642 /* put the stack back */
643 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
644 MIN_STACK_SIZE(stack_addr));
645 preempt_enable_no_resched();
646 return 1;
647}
648
649static struct kprobe trampoline_p = {
650 .addr = (kprobe_opcode_t *) & kretprobe_trampoline,
651 .pre_handler = trampoline_probe_handler
652};
653
654int __init arch_init_kprobes(void)
655{
656 return register_kprobe(&trampoline_p);
657}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 658e5ac484f9..4562cdbce8eb 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -8,13 +8,30 @@
8 8
9#include <asm/lowcore.h> 9#include <asm/lowcore.h>
10 10
11 .globl do_reipl 11 .globl do_reipl_asm
12do_reipl: basr %r13,0 12do_reipl_asm: basr %r13,0
13.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) 13.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
14.Lpg1: lctl %c6,%c6,.Lall-.Lpg0(%r13) 14
15 stctl %c0,%c0,.Lctlsave-.Lpg0(%r13) 15 # switch off lowcore protection
16 ni .Lctlsave-.Lpg0(%r13),0xef 16
17 lctl %c0,%c0,.Lctlsave-.Lpg0(%r13) 17.Lpg1: stctl %c0,%c0,.Lctlsave1-.Lpg0(%r13)
18 stctl %c0,%c0,.Lctlsave2-.Lpg0(%r13)
19 ni .Lctlsave1-.Lpg0(%r13),0xef
20 lctl %c0,%c0,.Lctlsave1-.Lpg0(%r13)
21
22 # do store status of all registers
23
24 stm %r0,%r15,__LC_GPREGS_SAVE_AREA
25 stctl %c0,%c15,__LC_CREGS_SAVE_AREA
26 mvc __LC_CREGS_SAVE_AREA(4),.Lctlsave2-.Lpg0(%r13)
27 stam %a0,%a15,__LC_AREGS_SAVE_AREA
28 stpx __LC_PREFIX_SAVE_AREA
29 stckc .Lclkcmp-.Lpg0(%r13)
30 mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
31 stpt __LC_CPU_TIMER_SAVE_AREA
32 st %r13, __LC_PSW_SAVE_AREA+4
33
34 lctl %c6,%c6,.Lall-.Lpg0(%r13)
18 lr %r1,%r2 35 lr %r1,%r2
19 mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) 36 mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
20 stsch .Lschib-.Lpg0(%r13) 37 stsch .Lschib-.Lpg0(%r13)
@@ -46,9 +63,11 @@ do_reipl: basr %r13,0
46.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) 63.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
47 lpsw .Ldispsw-.Lpg0(%r13) 64 lpsw .Ldispsw-.Lpg0(%r13)
48 .align 8 65 .align 8
66.Lclkcmp: .quad 0x0000000000000000
49.Lall: .long 0xff000000 67.Lall: .long 0xff000000
50.Lnull: .long 0x00000000 68.Lnull: .long 0x00000000
51.Lctlsave: .long 0x00000000 69.Lctlsave1: .long 0x00000000
70.Lctlsave2: .long 0x00000000
52 .align 8 71 .align 8
53.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 72.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
54.Lpcnew: .long 0x00080000,0x80000000+.Lecs 73.Lpcnew: .long 0x00080000,0x80000000+.Lecs
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 4d090d60f3ef..95bd1e234f63 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -8,13 +8,30 @@
8 */ 8 */
9 9
10#include <asm/lowcore.h> 10#include <asm/lowcore.h>
11 .globl do_reipl 11 .globl do_reipl_asm
12do_reipl: basr %r13,0 12do_reipl_asm: basr %r13,0
13.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) 13
14 # do store status of all registers
15
16.Lpg0: stg %r1,.Lregsave-.Lpg0(%r13)
17 lghi %r1,0x1000
18 stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1)
19 lg %r0,.Lregsave-.Lpg0(%r13)
20 stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1)
21 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1)
22 stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1)
23 stpx __LC_PREFIX_SAVE_AREA-0x1000(%r1)
24 stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1)
25 stckc .Lclkcmp-.Lpg0(%r13)
26 mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13)
27 stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1)
28 stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1)
29
30 lpswe .Lnewpsw-.Lpg0(%r13)
14.Lpg1: lctlg %c6,%c6,.Lall-.Lpg0(%r13) 31.Lpg1: lctlg %c6,%c6,.Lall-.Lpg0(%r13)
15 stctg %c0,%c0,.Lctlsave-.Lpg0(%r13) 32 stctg %c0,%c0,.Lregsave-.Lpg0(%r13)
16 ni .Lctlsave+4-.Lpg0(%r13),0xef 33 ni .Lregsave+4-.Lpg0(%r13),0xef
17 lctlg %c0,%c0,.Lctlsave-.Lpg0(%r13) 34 lctlg %c0,%c0,.Lregsave-.Lpg0(%r13)
18 lgr %r1,%r2 35 lgr %r1,%r2
19 mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) 36 mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
20 stsch .Lschib-.Lpg0(%r13) 37 stsch .Lschib-.Lpg0(%r13)
@@ -50,8 +67,9 @@ do_reipl: basr %r13,0
50 st %r14,.Ldispsw+12-.Lpg0(%r13) 67 st %r14,.Ldispsw+12-.Lpg0(%r13)
51 lpswe .Ldispsw-.Lpg0(%r13) 68 lpswe .Ldispsw-.Lpg0(%r13)
52 .align 8 69 .align 8
70.Lclkcmp: .quad 0x0000000000000000
53.Lall: .quad 0x00000000ff000000 71.Lall: .quad 0x00000000ff000000
54.Lctlsave: .quad 0x0000000000000000 72.Lregsave: .quad 0x0000000000000000
55.Lnull: .long 0x0000000000000000 73.Lnull: .long 0x0000000000000000
56 .align 16 74 .align 16
57/* 75/*
@@ -92,5 +110,3 @@ do_reipl: basr %r13,0
92 .long 0x00000000,0x00000000 110 .long 0x00000000,0x00000000
93 .long 0x00000000,0x00000000 111 .long 0x00000000,0x00000000
94 112
95
96
diff --git a/arch/s390/kernel/reipl_diag.c b/arch/s390/kernel/reipl_diag.c
deleted file mode 100644
index 1f33951ba439..000000000000
--- a/arch/s390/kernel/reipl_diag.c
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * This file contains the implementation of the
3 * Linux re-IPL support
4 *
5 * (C) Copyright IBM Corp. 2005
6 *
7 * Author(s): Volker Sameske (sameske@de.ibm.com)
8 *
9 */
10
11#include <linux/kernel.h>
12
13static unsigned int reipl_diag_rc1;
14static unsigned int reipl_diag_rc2;
15
16/*
17 * re-IPL the system using the last used IPL parameters
18 */
19void reipl_diag(void)
20{
21 asm volatile (
22 " la %%r4,0\n"
23 " la %%r5,0\n"
24 " diag %%r4,%2,0x308\n"
25 "0:\n"
26 " st %%r4,%0\n"
27 " st %%r5,%1\n"
28 ".section __ex_table,\"a\"\n"
29#ifdef CONFIG_64BIT
30 " .align 8\n"
31 " .quad 0b, 0b\n"
32#else
33 " .align 4\n"
34 " .long 0b, 0b\n"
35#endif
36 ".previous\n"
37 : "=m" (reipl_diag_rc1), "=m" (reipl_diag_rc2)
38 : "d" (3) : "cc", "4", "5" );
39}
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index c73a45467fa4..9f19e833a562 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -25,12 +25,6 @@ EXPORT_SYMBOL(_oi_bitmap);
25EXPORT_SYMBOL(_ni_bitmap); 25EXPORT_SYMBOL(_ni_bitmap);
26EXPORT_SYMBOL(_zb_findmap); 26EXPORT_SYMBOL(_zb_findmap);
27EXPORT_SYMBOL(_sb_findmap); 27EXPORT_SYMBOL(_sb_findmap);
28EXPORT_SYMBOL(__copy_from_user_asm);
29EXPORT_SYMBOL(__copy_to_user_asm);
30EXPORT_SYMBOL(__copy_in_user_asm);
31EXPORT_SYMBOL(__clear_user_asm);
32EXPORT_SYMBOL(__strncpy_from_user_asm);
33EXPORT_SYMBOL(__strnlen_user_asm);
34EXPORT_SYMBOL(diag10); 28EXPORT_SYMBOL(diag10);
35 29
36/* 30/*
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index c902f059c7aa..e3d9325f6022 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -37,6 +37,7 @@
37#include <linux/kernel_stat.h> 37#include <linux/kernel_stat.h>
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/notifier.h> 39#include <linux/notifier.h>
40#include <linux/pfn.h>
40 41
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42#include <asm/system.h> 43#include <asm/system.h>
@@ -50,6 +51,12 @@
50#include <asm/sections.h> 51#include <asm/sections.h>
51 52
52/* 53/*
54 * User copy operations.
55 */
56struct uaccess_ops uaccess;
57EXPORT_SYMBOL_GPL(uaccess);
58
59/*
53 * Machine setup.. 60 * Machine setup..
54 */ 61 */
55unsigned int console_mode = 0; 62unsigned int console_mode = 0;
@@ -284,16 +291,9 @@ void (*_machine_power_off)(void) = machine_power_off_smp;
284/* 291/*
285 * Reboot, halt and power_off routines for non SMP. 292 * Reboot, halt and power_off routines for non SMP.
286 */ 293 */
287extern void reipl(unsigned long devno);
288extern void reipl_diag(void);
289static void do_machine_restart_nonsmp(char * __unused) 294static void do_machine_restart_nonsmp(char * __unused)
290{ 295{
291 reipl_diag(); 296 do_reipl();
292
293 if (MACHINE_IS_VM)
294 cpcmd ("IPL", NULL, 0, NULL);
295 else
296 reipl (0x10000 | S390_lowcore.ipl_device);
297} 297}
298 298
299static void do_machine_halt_nonsmp(void) 299static void do_machine_halt_nonsmp(void)
@@ -501,13 +501,47 @@ setup_memory(void)
501 * partially used pages are not usable - thus 501 * partially used pages are not usable - thus
502 * we are rounding upwards: 502 * we are rounding upwards:
503 */ 503 */
504 start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT; 504 start_pfn = PFN_UP(__pa(&_end));
505 end_pfn = max_pfn = memory_end >> PAGE_SHIFT; 505 end_pfn = max_pfn = PFN_DOWN(memory_end);
506 506
507 /* Initialize storage key for kernel pages */ 507 /* Initialize storage key for kernel pages */
508 for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++) 508 for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++)
509 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); 509 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
510 510
511#ifdef CONFIG_BLK_DEV_INITRD
512 /*
513 * Move the initrd in case the bitmap of the bootmem allocater
514 * would overwrite it.
515 */
516
517 if (INITRD_START && INITRD_SIZE) {
518 unsigned long bmap_size;
519 unsigned long start;
520
521 bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
522 bmap_size = PFN_PHYS(bmap_size);
523
524 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
525 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
526
527 if (start + INITRD_SIZE > memory_end) {
528 printk("initrd extends beyond end of memory "
529 "(0x%08lx > 0x%08lx)\n"
530 "disabling initrd\n",
531 start + INITRD_SIZE, memory_end);
532 INITRD_START = INITRD_SIZE = 0;
533 } else {
534 printk("Moving initrd (0x%08lx -> 0x%08lx, "
535 "size: %ld)\n",
536 INITRD_START, start, INITRD_SIZE);
537 memmove((void *) start, (void *) INITRD_START,
538 INITRD_SIZE);
539 INITRD_START = start;
540 }
541 }
542 }
543#endif
544
511 /* 545 /*
512 * Initialize the boot-time allocator (with low memory only): 546 * Initialize the boot-time allocator (with low memory only):
513 */ 547 */
@@ -559,7 +593,7 @@ setup_memory(void)
559 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size); 593 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
560 594
561#ifdef CONFIG_BLK_DEV_INITRD 595#ifdef CONFIG_BLK_DEV_INITRD
562 if (INITRD_START) { 596 if (INITRD_START && INITRD_SIZE) {
563 if (INITRD_START + INITRD_SIZE <= memory_end) { 597 if (INITRD_START + INITRD_SIZE <= memory_end) {
564 reserve_bootmem(INITRD_START, INITRD_SIZE); 598 reserve_bootmem(INITRD_START, INITRD_SIZE);
565 initrd_start = INITRD_START; 599 initrd_start = INITRD_START;
@@ -613,6 +647,11 @@ setup_arch(char **cmdline_p)
613 647
614 memory_end = memory_size; 648 memory_end = memory_size;
615 649
650 if (MACHINE_HAS_MVCOS)
651 memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
652 else
653 memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
654
616 parse_early_param(); 655 parse_early_param();
617 656
618#ifndef CONFIG_64BIT 657#ifndef CONFIG_64BIT
@@ -720,214 +759,3 @@ struct seq_operations cpuinfo_op = {
720 .show = show_cpuinfo, 759 .show = show_cpuinfo,
721}; 760};
722 761
723#define DEFINE_IPL_ATTR(_name, _format, _value) \
724static ssize_t ipl_##_name##_show(struct subsystem *subsys, \
725 char *page) \
726{ \
727 return sprintf(page, _format, _value); \
728} \
729static struct subsys_attribute ipl_##_name##_attr = \
730 __ATTR(_name, S_IRUGO, ipl_##_name##_show, NULL);
731
732DEFINE_IPL_ATTR(wwpn, "0x%016llx\n", (unsigned long long)
733 IPL_PARMBLOCK_START->fcp.wwpn);
734DEFINE_IPL_ATTR(lun, "0x%016llx\n", (unsigned long long)
735 IPL_PARMBLOCK_START->fcp.lun);
736DEFINE_IPL_ATTR(bootprog, "%lld\n", (unsigned long long)
737 IPL_PARMBLOCK_START->fcp.bootprog);
738DEFINE_IPL_ATTR(br_lba, "%lld\n", (unsigned long long)
739 IPL_PARMBLOCK_START->fcp.br_lba);
740
741enum ipl_type_type {
742 ipl_type_unknown,
743 ipl_type_ccw,
744 ipl_type_fcp,
745};
746
747static enum ipl_type_type
748get_ipl_type(void)
749{
750 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
751
752 if (!IPL_DEVNO_VALID)
753 return ipl_type_unknown;
754 if (!IPL_PARMBLOCK_VALID)
755 return ipl_type_ccw;
756 if (ipl->hdr.header.version > IPL_MAX_SUPPORTED_VERSION)
757 return ipl_type_unknown;
758 if (ipl->fcp.pbt != IPL_TYPE_FCP)
759 return ipl_type_unknown;
760 return ipl_type_fcp;
761}
762
763static ssize_t
764ipl_type_show(struct subsystem *subsys, char *page)
765{
766 switch (get_ipl_type()) {
767 case ipl_type_ccw:
768 return sprintf(page, "ccw\n");
769 case ipl_type_fcp:
770 return sprintf(page, "fcp\n");
771 default:
772 return sprintf(page, "unknown\n");
773 }
774}
775
776static struct subsys_attribute ipl_type_attr = __ATTR_RO(ipl_type);
777
778static ssize_t
779ipl_device_show(struct subsystem *subsys, char *page)
780{
781 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
782
783 switch (get_ipl_type()) {
784 case ipl_type_ccw:
785 return sprintf(page, "0.0.%04x\n", ipl_devno);
786 case ipl_type_fcp:
787 return sprintf(page, "0.0.%04x\n", ipl->fcp.devno);
788 default:
789 return 0;
790 }
791}
792
793static struct subsys_attribute ipl_device_attr =
794 __ATTR(device, S_IRUGO, ipl_device_show, NULL);
795
796static struct attribute *ipl_fcp_attrs[] = {
797 &ipl_type_attr.attr,
798 &ipl_device_attr.attr,
799 &ipl_wwpn_attr.attr,
800 &ipl_lun_attr.attr,
801 &ipl_bootprog_attr.attr,
802 &ipl_br_lba_attr.attr,
803 NULL,
804};
805
806static struct attribute_group ipl_fcp_attr_group = {
807 .attrs = ipl_fcp_attrs,
808};
809
810static struct attribute *ipl_ccw_attrs[] = {
811 &ipl_type_attr.attr,
812 &ipl_device_attr.attr,
813 NULL,
814};
815
816static struct attribute_group ipl_ccw_attr_group = {
817 .attrs = ipl_ccw_attrs,
818};
819
820static struct attribute *ipl_unknown_attrs[] = {
821 &ipl_type_attr.attr,
822 NULL,
823};
824
825static struct attribute_group ipl_unknown_attr_group = {
826 .attrs = ipl_unknown_attrs,
827};
828
829static ssize_t
830ipl_parameter_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
831{
832 unsigned int size = IPL_PARMBLOCK_SIZE;
833
834 if (off > size)
835 return 0;
836 if (off + count > size)
837 count = size - off;
838
839 memcpy(buf, (void *) IPL_PARMBLOCK_START + off, count);
840 return count;
841}
842
843static struct bin_attribute ipl_parameter_attr = {
844 .attr = {
845 .name = "binary_parameter",
846 .mode = S_IRUGO,
847 .owner = THIS_MODULE,
848 },
849 .size = PAGE_SIZE,
850 .read = &ipl_parameter_read,
851};
852
853static ssize_t
854ipl_scp_data_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
855{
856 unsigned int size = IPL_PARMBLOCK_START->fcp.scp_data_len;
857 void *scp_data = &IPL_PARMBLOCK_START->fcp.scp_data;
858
859 if (off > size)
860 return 0;
861 if (off + count > size)
862 count = size - off;
863
864 memcpy(buf, scp_data + off, count);
865 return count;
866}
867
868static struct bin_attribute ipl_scp_data_attr = {
869 .attr = {
870 .name = "scp_data",
871 .mode = S_IRUGO,
872 .owner = THIS_MODULE,
873 },
874 .size = PAGE_SIZE,
875 .read = &ipl_scp_data_read,
876};
877
878static decl_subsys(ipl, NULL, NULL);
879
880static int ipl_register_fcp_files(void)
881{
882 int rc;
883
884 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
885 &ipl_fcp_attr_group);
886 if (rc)
887 goto out;
888 rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
889 &ipl_parameter_attr);
890 if (rc)
891 goto out_ipl_parm;
892 rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
893 &ipl_scp_data_attr);
894 if (!rc)
895 goto out;
896
897 sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr);
898
899out_ipl_parm:
900 sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
901out:
902 return rc;
903}
904
905static int __init
906ipl_device_sysfs_register(void) {
907 int rc;
908
909 rc = firmware_register(&ipl_subsys);
910 if (rc)
911 goto out;
912
913 switch (get_ipl_type()) {
914 case ipl_type_ccw:
915 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
916 &ipl_ccw_attr_group);
917 break;
918 case ipl_type_fcp:
919 rc = ipl_register_fcp_files();
920 break;
921 default:
922 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
923 &ipl_unknown_attr_group);
924 break;
925 }
926
927 if (rc)
928 firmware_unregister(&ipl_subsys);
929out:
930 return rc;
931}
932
933__initcall(ipl_device_sysfs_register);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index a887b686f279..642095ec7c07 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -114,29 +114,26 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
114static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) 114static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
115{ 115{
116 unsigned long old_mask = regs->psw.mask; 116 unsigned long old_mask = regs->psw.mask;
117 int err; 117 _sigregs user_sregs;
118 118
119 save_access_regs(current->thread.acrs); 119 save_access_regs(current->thread.acrs);
120 120
121 /* Copy a 'clean' PSW mask to the user to avoid leaking 121 /* Copy a 'clean' PSW mask to the user to avoid leaking
122 information about whether PER is currently on. */ 122 information about whether PER is currently on. */
123 regs->psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask); 123 regs->psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask);
124 err = __copy_to_user(&sregs->regs.psw, &regs->psw, 124 memcpy(&user_sregs.regs.psw, &regs->psw, sizeof(sregs->regs.psw) +
125 sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs)); 125 sizeof(sregs->regs.gprs));
126 regs->psw.mask = old_mask; 126 regs->psw.mask = old_mask;
127 if (err != 0) 127 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
128 return err; 128 sizeof(sregs->regs.acrs));
129 err = __copy_to_user(&sregs->regs.acrs, current->thread.acrs,
130 sizeof(sregs->regs.acrs));
131 if (err != 0)
132 return err;
133 /* 129 /*
134 * We have to store the fp registers to current->thread.fp_regs 130 * We have to store the fp registers to current->thread.fp_regs
135 * to merge them with the emulated registers. 131 * to merge them with the emulated registers.
136 */ 132 */
137 save_fp_regs(&current->thread.fp_regs); 133 save_fp_regs(&current->thread.fp_regs);
138 return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs, 134 memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
139 sizeof(s390_fp_regs)); 135 sizeof(s390_fp_regs));
136 return __copy_to_user(sregs, &user_sregs, sizeof(_sigregs));
140} 137}
141 138
142/* Returns positive number on error */ 139/* Returns positive number on error */
@@ -144,27 +141,25 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
144{ 141{
145 unsigned long old_mask = regs->psw.mask; 142 unsigned long old_mask = regs->psw.mask;
146 int err; 143 int err;
144 _sigregs user_sregs;
147 145
148 /* Alwys make any pending restarted system call return -EINTR */ 146 /* Alwys make any pending restarted system call return -EINTR */
149 current_thread_info()->restart_block.fn = do_no_restart_syscall; 147 current_thread_info()->restart_block.fn = do_no_restart_syscall;
150 148
151 err = __copy_from_user(&regs->psw, &sregs->regs.psw, 149 err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs));
152 sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs));
153 regs->psw.mask = PSW_MASK_MERGE(old_mask, regs->psw.mask); 150 regs->psw.mask = PSW_MASK_MERGE(old_mask, regs->psw.mask);
154 regs->psw.addr |= PSW_ADDR_AMODE; 151 regs->psw.addr |= PSW_ADDR_AMODE;
155 if (err) 152 if (err)
156 return err; 153 return err;
157 err = __copy_from_user(&current->thread.acrs, &sregs->regs.acrs, 154 memcpy(&regs->psw, &user_sregs.regs.psw, sizeof(sregs->regs.psw) +
158 sizeof(sregs->regs.acrs)); 155 sizeof(sregs->regs.gprs));
159 if (err) 156 memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
160 return err; 157 sizeof(sregs->regs.acrs));
161 restore_access_regs(current->thread.acrs); 158 restore_access_regs(current->thread.acrs);
162 159
163 err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs, 160 memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
164 sizeof(s390_fp_regs)); 161 sizeof(s390_fp_regs));
165 current->thread.fp_regs.fpc &= FPC_VALID_MASK; 162 current->thread.fp_regs.fpc &= FPC_VALID_MASK;
166 if (err)
167 return err;
168 163
169 restore_fp_regs(&current->thread.fp_regs); 164 restore_fp_regs(&current->thread.fp_regs);
170 regs->trap = -1; /* disable syscall checks */ 165 regs->trap = -1; /* disable syscall checks */
@@ -457,6 +452,7 @@ void do_signal(struct pt_regs *regs)
457 case -ERESTART_RESTARTBLOCK: 452 case -ERESTART_RESTARTBLOCK:
458 regs->gprs[2] = -EINTR; 453 regs->gprs[2] = -EINTR;
459 } 454 }
455 regs->trap = -1; /* Don't deal with this again. */
460 } 456 }
461 457
462 /* Get signal to deliver. When running under ptrace, at this point 458 /* Get signal to deliver. When running under ptrace, at this point
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 8e03219eea76..b2e6f4c8d382 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -59,9 +59,6 @@ static struct task_struct *current_set[NR_CPUS];
59extern char vmhalt_cmd[]; 59extern char vmhalt_cmd[];
60extern char vmpoff_cmd[]; 60extern char vmpoff_cmd[];
61 61
62extern void reipl(unsigned long devno);
63extern void reipl_diag(void);
64
65static void smp_ext_bitcall(int, ec_bit_sig); 62static void smp_ext_bitcall(int, ec_bit_sig);
66static void smp_ext_bitcall_others(ec_bit_sig); 63static void smp_ext_bitcall_others(ec_bit_sig);
67 64
@@ -279,12 +276,7 @@ static void do_machine_restart(void * __unused)
279 * interrupted by an external interrupt and s390irq 276 * interrupted by an external interrupt and s390irq
280 * locks are always held disabled). 277 * locks are always held disabled).
281 */ 278 */
282 reipl_diag(); 279 do_reipl();
283
284 if (MACHINE_IS_VM)
285 cpcmd ("IPL", NULL, 0, NULL);
286 else
287 reipl (0x10000 | S390_lowcore.ipl_device);
288} 280}
289 281
290void machine_restart_smp(char * __unused) 282void machine_restart_smp(char * __unused)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index bde1d1d59858..c4982c963424 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/kallsyms.h> 30#include <linux/kallsyms.h>
31#include <linux/reboot.h> 31#include <linux/reboot.h>
32#include <linux/kprobes.h>
32 33
33#include <asm/system.h> 34#include <asm/system.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -39,6 +40,7 @@
39#include <asm/s390_ext.h> 40#include <asm/s390_ext.h>
40#include <asm/lowcore.h> 41#include <asm/lowcore.h>
41#include <asm/debug.h> 42#include <asm/debug.h>
43#include <asm/kdebug.h>
42 44
43/* Called from entry.S only */ 45/* Called from entry.S only */
44extern void handle_per_exception(struct pt_regs *regs); 46extern void handle_per_exception(struct pt_regs *regs);
@@ -74,6 +76,20 @@ static int kstack_depth_to_print = 12;
74static int kstack_depth_to_print = 20; 76static int kstack_depth_to_print = 20;
75#endif /* CONFIG_64BIT */ 77#endif /* CONFIG_64BIT */
76 78
79ATOMIC_NOTIFIER_HEAD(s390die_chain);
80
81int register_die_notifier(struct notifier_block *nb)
82{
83 return atomic_notifier_chain_register(&s390die_chain, nb);
84}
85EXPORT_SYMBOL(register_die_notifier);
86
87int unregister_die_notifier(struct notifier_block *nb)
88{
89 return atomic_notifier_chain_unregister(&s390die_chain, nb);
90}
91EXPORT_SYMBOL(unregister_die_notifier);
92
77/* 93/*
78 * For show_trace we have tree different stack to consider: 94 * For show_trace we have tree different stack to consider:
79 * - the panic stack which is used if the kernel stack has overflown 95 * - the panic stack which is used if the kernel stack has overflown
@@ -305,8 +321,9 @@ report_user_fault(long interruption_code, struct pt_regs *regs)
305#endif 321#endif
306} 322}
307 323
308static void inline do_trap(long interruption_code, int signr, char *str, 324static void __kprobes inline do_trap(long interruption_code, int signr,
309 struct pt_regs *regs, siginfo_t *info) 325 char *str, struct pt_regs *regs,
326 siginfo_t *info)
310{ 327{
311 /* 328 /*
312 * We got all needed information from the lowcore and can 329 * We got all needed information from the lowcore and can
@@ -315,6 +332,10 @@ static void inline do_trap(long interruption_code, int signr, char *str,
315 if (regs->psw.mask & PSW_MASK_PSTATE) 332 if (regs->psw.mask & PSW_MASK_PSTATE)
316 local_irq_enable(); 333 local_irq_enable();
317 334
335 if (notify_die(DIE_TRAP, str, regs, interruption_code,
336 interruption_code, signr) == NOTIFY_STOP)
337 return;
338
318 if (regs->psw.mask & PSW_MASK_PSTATE) { 339 if (regs->psw.mask & PSW_MASK_PSTATE) {
319 struct task_struct *tsk = current; 340 struct task_struct *tsk = current;
320 341
@@ -336,8 +357,12 @@ static inline void __user *get_check_address(struct pt_regs *regs)
336 return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN); 357 return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
337} 358}
338 359
339void do_single_step(struct pt_regs *regs) 360void __kprobes do_single_step(struct pt_regs *regs)
340{ 361{
362 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
363 SIGTRAP) == NOTIFY_STOP){
364 return;
365 }
341 if ((current->ptrace & PT_PTRACED) != 0) 366 if ((current->ptrace & PT_PTRACED) != 0)
342 force_sig(SIGTRAP, current); 367 force_sig(SIGTRAP, current);
343} 368}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index ff5f7bb34f75..af9e69a03011 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -24,6 +24,7 @@ SECTIONS
24 *(.text) 24 *(.text)
25 SCHED_TEXT 25 SCHED_TEXT
26 LOCK_TEXT 26 LOCK_TEXT
27 KPROBES_TEXT
27 *(.fixup) 28 *(.fixup)
28 *(.gnu.warning) 29 *(.gnu.warning)
29 } = 0x0700 30 } = 0x0700
@@ -117,7 +118,7 @@ SECTIONS
117 118
118 /* Sections to be discarded */ 119 /* Sections to be discarded */
119 /DISCARD/ : { 120 /DISCARD/ : {
120 *(.exitcall.exit) 121 *(.exit.text) *(.exit.data) *(.exitcall.exit)
121 } 122 }
122 123
123 /* Stabs debugging sections. */ 124 /* Stabs debugging sections. */
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index e05d087a6eae..c42ffedfdb49 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,6 +4,6 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y += delay.o string.o 7lib-y += delay.o string.o uaccess_std.o
8lib-y += $(if $(CONFIG_64BIT),uaccess64.o,uaccess.o) 8lib-$(CONFIG_64BIT) += uaccess_mvcos.o
9lib-$(CONFIG_SMP) += spinlock.o 9lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/uaccess.S b/arch/s390/lib/uaccess.S
deleted file mode 100644
index 837275284d9f..000000000000
--- a/arch/s390/lib/uaccess.S
+++ /dev/null
@@ -1,211 +0,0 @@
1/*
2 * arch/s390/lib/uaccess.S
3 * __copy_{from|to}_user functions.
4 *
5 * s390
6 * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * These functions have standard call interface
10 */
11
12#include <linux/errno.h>
13#include <asm/lowcore.h>
14#include <asm/asm-offsets.h>
15
16 .text
17 .align 4
18 .globl __copy_from_user_asm
19 # %r2 = to, %r3 = n, %r4 = from
20__copy_from_user_asm:
21 slr %r0,%r0
220: mvcp 0(%r3,%r2),0(%r4),%r0
23 jnz 1f
24 slr %r2,%r2
25 br %r14
261: la %r2,256(%r2)
27 la %r4,256(%r4)
28 ahi %r3,-256
292: mvcp 0(%r3,%r2),0(%r4),%r0
30 jnz 1b
313: slr %r2,%r2
32 br %r14
334: lhi %r0,-4096
34 lr %r5,%r4
35 slr %r5,%r0
36 nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
37 slr %r5,%r4 # %r5 = #bytes to next user page boundary
38 clr %r3,%r5 # copy crosses next page boundary ?
39 jnh 6f # no, the current page faulted
40 # move with the reduced length which is < 256
415: mvcp 0(%r5,%r2),0(%r4),%r0
42 slr %r3,%r5
436: lr %r2,%r3
44 br %r14
45 .section __ex_table,"a"
46 .long 0b,4b
47 .long 2b,4b
48 .long 5b,6b
49 .previous
50
51 .align 4
52 .text
53 .globl __copy_to_user_asm
54 # %r2 = from, %r3 = n, %r4 = to
55__copy_to_user_asm:
56 slr %r0,%r0
570: mvcs 0(%r3,%r4),0(%r2),%r0
58 jnz 1f
59 slr %r2,%r2
60 br %r14
611: la %r2,256(%r2)
62 la %r4,256(%r4)
63 ahi %r3,-256
642: mvcs 0(%r3,%r4),0(%r2),%r0
65 jnz 1b
663: slr %r2,%r2
67 br %r14
684: lhi %r0,-4096
69 lr %r5,%r4
70 slr %r5,%r0
71 nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
72 slr %r5,%r4 # %r5 = #bytes to next user page boundary
73 clr %r3,%r5 # copy crosses next page boundary ?
74 jnh 6f # no, the current page faulted
75 # move with the reduced length which is < 256
765: mvcs 0(%r5,%r4),0(%r2),%r0
77 slr %r3,%r5
786: lr %r2,%r3
79 br %r14
80 .section __ex_table,"a"
81 .long 0b,4b
82 .long 2b,4b
83 .long 5b,6b
84 .previous
85
86 .align 4
87 .text
88 .globl __copy_in_user_asm
89 # %r2 = from, %r3 = n, %r4 = to
90__copy_in_user_asm:
91 ahi %r3,-1
92 jo 6f
93 sacf 256
94 bras %r1,4f
950: ahi %r3,257
961: mvc 0(1,%r4),0(%r2)
97 la %r2,1(%r2)
98 la %r4,1(%r4)
99 ahi %r3,-1
100 jnz 1b
1012: lr %r2,%r3
102 br %r14
1033: mvc 0(256,%r4),0(%r2)
104 la %r2,256(%r2)
105 la %r4,256(%r4)
1064: ahi %r3,-256
107 jnm 3b
1085: ex %r3,4(%r1)
109 sacf 0
1106: slr %r2,%r2
111 br %r14
112 .section __ex_table,"a"
113 .long 1b,2b
114 .long 3b,0b
115 .long 5b,0b
116 .previous
117
118 .align 4
119 .text
120 .globl __clear_user_asm
121 # %r2 = to, %r3 = n
122__clear_user_asm:
123 bras %r5,0f
124 .long empty_zero_page
1250: l %r5,0(%r5)
126 slr %r0,%r0
1271: mvcs 0(%r3,%r2),0(%r5),%r0
128 jnz 2f
129 slr %r2,%r2
130 br %r14
1312: la %r2,256(%r2)
132 ahi %r3,-256
1333: mvcs 0(%r3,%r2),0(%r5),%r0
134 jnz 2b
1354: slr %r2,%r2
136 br %r14
1375: lhi %r0,-4096
138 lr %r4,%r2
139 slr %r4,%r0
140 nr %r4,%r0 # %r4 = (%r2 + 4096) & -4096
141 slr %r4,%r2 # %r4 = #bytes to next user page boundary
142 clr %r3,%r4 # clear crosses next page boundary ?
143 jnh 7f # no, the current page faulted
144 # clear with the reduced length which is < 256
1456: mvcs 0(%r4,%r2),0(%r5),%r0
146 slr %r3,%r4
1477: lr %r2,%r3
148 br %r14
149 .section __ex_table,"a"
150 .long 1b,5b
151 .long 3b,5b
152 .long 6b,7b
153 .previous
154
155 .align 4
156 .text
157 .globl __strncpy_from_user_asm
158 # %r2 = count, %r3 = dst, %r4 = src
159__strncpy_from_user_asm:
160 lhi %r0,0
161 lr %r1,%r4
162 la %r4,0(%r4) # clear high order bit from %r4
163 la %r2,0(%r2,%r4) # %r2 points to first byte after string
164 sacf 256
1650: srst %r2,%r1
166 jo 0b
167 sacf 0
168 lr %r1,%r2
169 jh 1f # \0 found in string ?
170 ahi %r1,1 # include \0 in copy
1711: slr %r1,%r4 # %r1 = copy length (without \0)
172 slr %r2,%r4 # %r2 = return length (including \0)
1732: mvcp 0(%r1,%r3),0(%r4),%r0
174 jnz 3f
175 br %r14
1763: la %r3,256(%r3)
177 la %r4,256(%r4)
178 ahi %r1,-256
179 mvcp 0(%r1,%r3),0(%r4),%r0
180 jnz 3b
181 br %r14
1824: sacf 0
183 lhi %r2,-EFAULT
184 br %r14
185 .section __ex_table,"a"
186 .long 0b,4b
187 .previous
188
189 .align 4
190 .text
191 .globl __strnlen_user_asm
192 # %r2 = count, %r3 = src
193__strnlen_user_asm:
194 lhi %r0,0
195 lr %r1,%r3
196 la %r3,0(%r3) # clear high order bit from %r4
197 la %r2,0(%r2,%r3) # %r2 points to first byte after string
198 sacf 256
1990: srst %r2,%r1
200 jo 0b
201 sacf 0
202 ahi %r2,1 # strnlen_user result includes the \0
203 # or return count+1 if \0 not found
204 slr %r2,%r3
205 br %r14
2062: sacf 0
207 slr %r2,%r2 # return 0 on exception
208 br %r14
209 .section __ex_table,"a"
210 .long 0b,2b
211 .previous
diff --git a/arch/s390/lib/uaccess64.S b/arch/s390/lib/uaccess64.S
deleted file mode 100644
index 1f755be22f92..000000000000
--- a/arch/s390/lib/uaccess64.S
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * arch/s390x/lib/uaccess.S
3 * __copy_{from|to}_user functions.
4 *
5 * s390
6 * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * These functions have standard call interface
10 */
11
12#include <linux/errno.h>
13#include <asm/lowcore.h>
14#include <asm/asm-offsets.h>
15
16 .text
17 .align 4
18 .globl __copy_from_user_asm
19 # %r2 = to, %r3 = n, %r4 = from
20__copy_from_user_asm:
21 slgr %r0,%r0
220: mvcp 0(%r3,%r2),0(%r4),%r0
23 jnz 1f
24 slgr %r2,%r2
25 br %r14
261: la %r2,256(%r2)
27 la %r4,256(%r4)
28 aghi %r3,-256
292: mvcp 0(%r3,%r2),0(%r4),%r0
30 jnz 1b
313: slgr %r2,%r2
32 br %r14
334: lghi %r0,-4096
34 lgr %r5,%r4
35 slgr %r5,%r0
36 ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
37 slgr %r5,%r4 # %r5 = #bytes to next user page boundary
38 clgr %r3,%r5 # copy crosses next page boundary ?
39 jnh 6f # no, the current page faulted
40 # move with the reduced length which is < 256
415: mvcp 0(%r5,%r2),0(%r4),%r0
42 slgr %r3,%r5
436: lgr %r2,%r3
44 br %r14
45 .section __ex_table,"a"
46 .quad 0b,4b
47 .quad 2b,4b
48 .quad 5b,6b
49 .previous
50
51 .align 4
52 .text
53 .globl __copy_to_user_asm
54 # %r2 = from, %r3 = n, %r4 = to
55__copy_to_user_asm:
56 slgr %r0,%r0
570: mvcs 0(%r3,%r4),0(%r2),%r0
58 jnz 1f
59 slgr %r2,%r2
60 br %r14
611: la %r2,256(%r2)
62 la %r4,256(%r4)
63 aghi %r3,-256
642: mvcs 0(%r3,%r4),0(%r2),%r0
65 jnz 1b
663: slgr %r2,%r2
67 br %r14
684: lghi %r0,-4096
69 lgr %r5,%r4
70 slgr %r5,%r0
71 ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
72 slgr %r5,%r4 # %r5 = #bytes to next user page boundary
73 clgr %r3,%r5 # copy crosses next page boundary ?
74 jnh 6f # no, the current page faulted
75 # move with the reduced length which is < 256
765: mvcs 0(%r5,%r4),0(%r2),%r0
77 slgr %r3,%r5
786: lgr %r2,%r3
79 br %r14
80 .section __ex_table,"a"
81 .quad 0b,4b
82 .quad 2b,4b
83 .quad 5b,6b
84 .previous
85
86 .align 4
87 .text
88 .globl __copy_in_user_asm
89 # %r2 = from, %r3 = n, %r4 = to
90__copy_in_user_asm:
91 aghi %r3,-1
92 jo 6f
93 sacf 256
94 bras %r1,4f
950: aghi %r3,257
961: mvc 0(1,%r4),0(%r2)
97 la %r2,1(%r2)
98 la %r4,1(%r4)
99 aghi %r3,-1
100 jnz 1b
1012: lgr %r2,%r3
102 br %r14
1033: mvc 0(256,%r4),0(%r2)
104 la %r2,256(%r2)
105 la %r4,256(%r4)
1064: aghi %r3,-256
107 jnm 3b
1085: ex %r3,4(%r1)
109 sacf 0
1106: slgr %r2,%r2
111 br 14
112 .section __ex_table,"a"
113 .quad 1b,2b
114 .quad 3b,0b
115 .quad 5b,0b
116 .previous
117
118 .align 4
119 .text
120 .globl __clear_user_asm
121 # %r2 = to, %r3 = n
122__clear_user_asm:
123 slgr %r0,%r0
124 larl %r5,empty_zero_page
1251: mvcs 0(%r3,%r2),0(%r5),%r0
126 jnz 2f
127 slgr %r2,%r2
128 br %r14
1292: la %r2,256(%r2)
130 aghi %r3,-256
1313: mvcs 0(%r3,%r2),0(%r5),%r0
132 jnz 2b
1334: slgr %r2,%r2
134 br %r14
1355: lghi %r0,-4096
136 lgr %r4,%r2
137 slgr %r4,%r0
138 ngr %r4,%r0 # %r4 = (%r2 + 4096) & -4096
139 slgr %r4,%r2 # %r4 = #bytes to next user page boundary
140 clgr %r3,%r4 # clear crosses next page boundary ?
141 jnh 7f # no, the current page faulted
142 # clear with the reduced length which is < 256
1436: mvcs 0(%r4,%r2),0(%r5),%r0
144 slgr %r3,%r4
1457: lgr %r2,%r3
146 br %r14
147 .section __ex_table,"a"
148 .quad 1b,5b
149 .quad 3b,5b
150 .quad 6b,7b
151 .previous
152
153 .align 4
154 .text
155 .globl __strncpy_from_user_asm
156 # %r2 = count, %r3 = dst, %r4 = src
157__strncpy_from_user_asm:
158 lghi %r0,0
159 lgr %r1,%r4
160 la %r2,0(%r2,%r4) # %r2 points to first byte after string
161 sacf 256
1620: srst %r2,%r1
163 jo 0b
164 sacf 0
165 lgr %r1,%r2
166 jh 1f # \0 found in string ?
167 aghi %r1,1 # include \0 in copy
1681: slgr %r1,%r4 # %r1 = copy length (without \0)
169 slgr %r2,%r4 # %r2 = return length (including \0)
1702: mvcp 0(%r1,%r3),0(%r4),%r0
171 jnz 3f
172 br %r14
1733: la %r3,256(%r3)
174 la %r4,256(%r4)
175 aghi %r1,-256
176 mvcp 0(%r1,%r3),0(%r4),%r0
177 jnz 3b
178 br %r14
1794: sacf 0
180 lghi %r2,-EFAULT
181 br %r14
182 .section __ex_table,"a"
183 .quad 0b,4b
184 .previous
185
186 .align 4
187 .text
188 .globl __strnlen_user_asm
189 # %r2 = count, %r3 = src
190__strnlen_user_asm:
191 lghi %r0,0
192 lgr %r1,%r3
193 la %r2,0(%r2,%r3) # %r2 points to first byte after string
194 sacf 256
1950: srst %r2,%r1
196 jo 0b
197 sacf 0
198 aghi %r2,1 # strnlen_user result includes the \0
199 # or return count+1 if \0 not found
200 slgr %r2,%r3
201 br %r14
2022: sacf 0
203 slgr %r2,%r2 # return 0 on exception
204 br %r14
205 .section __ex_table,"a"
206 .quad 0b,2b
207 .previous
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
new file mode 100644
index 000000000000..86c96d6c191a
--- /dev/null
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -0,0 +1,156 @@
1/*
2 * arch/s390/lib/uaccess_mvcos.c
3 *
4 * Optimized user space space access functions based on mvcos.
5 *
6 * Copyright (C) IBM Corp. 2006
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
9 */
10
11#include <linux/errno.h>
12#include <linux/mm.h>
13#include <asm/uaccess.h>
14#include <asm/futex.h>
15
16#ifndef __s390x__
17#define AHI "ahi"
18#define ALR "alr"
19#define CLR "clr"
20#define LHI "lhi"
21#define SLR "slr"
22#else
23#define AHI "aghi"
24#define ALR "algr"
25#define CLR "clgr"
26#define LHI "lghi"
27#define SLR "slgr"
28#endif
29
30size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
31{
32 register unsigned long reg0 asm("0") = 0x81UL;
33 unsigned long tmp1, tmp2;
34
35 tmp1 = -4096UL;
36 asm volatile(
37 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
38 " jz 4f\n"
39 "1:"ALR" %0,%3\n"
40 " "SLR" %1,%3\n"
41 " "SLR" %2,%3\n"
42 " j 0b\n"
43 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
44 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
45 " "SLR" %4,%1\n"
46 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
47 " jnh 5f\n"
48 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
49 " "SLR" %0,%4\n"
50 " j 5f\n"
51 "4:"SLR" %0,%0\n"
52 "5: \n"
53 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
54 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
55 : "d" (reg0) : "cc", "memory");
56 return size;
57}
58
59size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
60{
61 register unsigned long reg0 asm("0") = 0x810000UL;
62 unsigned long tmp1, tmp2;
63
64 tmp1 = -4096UL;
65 asm volatile(
66 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
67 " jz 4f\n"
68 "1:"ALR" %0,%3\n"
69 " "SLR" %1,%3\n"
70 " "SLR" %2,%3\n"
71 " j 0b\n"
72 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
73 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
74 " "SLR" %4,%1\n"
75 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
76 " jnh 5f\n"
77 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
78 " "SLR" %0,%4\n"
79 " j 5f\n"
80 "4:"SLR" %0,%0\n"
81 "5: \n"
82 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
83 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
84 : "d" (reg0) : "cc", "memory");
85 return size;
86}
87
88size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from)
89{
90 register unsigned long reg0 asm("0") = 0x810081UL;
91 unsigned long tmp1, tmp2;
92
93 tmp1 = -4096UL;
94 /* FIXME: copy with reduced length. */
95 asm volatile(
96 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
97 " jz 2f\n"
98 "1:"ALR" %0,%3\n"
99 " "SLR" %1,%3\n"
100 " "SLR" %2,%3\n"
101 " j 0b\n"
102 "2:"SLR" %0,%0\n"
103 "3: \n"
104 EX_TABLE(0b,3b)
105 : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
106 : "d" (reg0) : "cc", "memory");
107 return size;
108}
109
110size_t clear_user_mvcos(size_t size, void __user *to)
111{
112 register unsigned long reg0 asm("0") = 0x810000UL;
113 unsigned long tmp1, tmp2;
114
115 tmp1 = -4096UL;
116 asm volatile(
117 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
118 " jz 4f\n"
119 "1:"ALR" %0,%2\n"
120 " "SLR" %1,%2\n"
121 " j 0b\n"
122 "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
123 " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
124 " "SLR" %3,%1\n"
125 " "CLR" %0,%3\n" /* copy crosses next page boundary? */
126 " jnh 5f\n"
127 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
128 " "SLR" %0,%3\n"
129 " j 5f\n"
130 "4:"SLR" %0,%0\n"
131 "5: \n"
132 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
133 : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
134 : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
135 return size;
136}
137
138extern size_t copy_from_user_std_small(size_t, const void __user *, void *);
139extern size_t copy_to_user_std_small(size_t, void __user *, const void *);
140extern size_t strnlen_user_std(size_t, const char __user *);
141extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
142extern int futex_atomic_op(int, int __user *, int, int *);
143extern int futex_atomic_cmpxchg(int __user *, int, int);
144
145struct uaccess_ops uaccess_mvcos = {
146 .copy_from_user = copy_from_user_mvcos,
147 .copy_from_user_small = copy_from_user_std_small,
148 .copy_to_user = copy_to_user_mvcos,
149 .copy_to_user_small = copy_to_user_std_small,
150 .copy_in_user = copy_in_user_mvcos,
151 .clear_user = clear_user_mvcos,
152 .strnlen_user = strnlen_user_std,
153 .strncpy_from_user = strncpy_from_user_std,
154 .futex_atomic_op = futex_atomic_op,
155 .futex_atomic_cmpxchg = futex_atomic_cmpxchg,
156};
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
new file mode 100644
index 000000000000..9a4d4a29ea79
--- /dev/null
+++ b/arch/s390/lib/uaccess_std.c
@@ -0,0 +1,340 @@
1/*
2 * arch/s390/lib/uaccess_std.c
3 *
4 * Standard user space access functions based on mvcp/mvcs and doing
5 * interesting things in the secondary space mode.
6 *
7 * Copyright (C) IBM Corp. 2006
8 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
9 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
10 */
11
12#include <linux/errno.h>
13#include <linux/mm.h>
14#include <asm/uaccess.h>
15#include <asm/futex.h>
16
17#ifndef __s390x__
18#define AHI "ahi"
19#define ALR "alr"
20#define CLR "clr"
21#define LHI "lhi"
22#define SLR "slr"
23#else
24#define AHI "aghi"
25#define ALR "algr"
26#define CLR "clgr"
27#define LHI "lghi"
28#define SLR "slgr"
29#endif
30
31size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
32{
33 unsigned long tmp1, tmp2;
34
35 tmp1 = -256UL;
36 asm volatile(
37 "0: mvcp 0(%0,%2),0(%1),%3\n"
38 " jz 5f\n"
39 "1:"ALR" %0,%3\n"
40 " la %1,256(%1)\n"
41 " la %2,256(%2)\n"
42 "2: mvcp 0(%0,%2),0(%1),%3\n"
43 " jnz 1b\n"
44 " j 5f\n"
45 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
46 " "LHI" %3,-4096\n"
47 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
48 " "SLR" %4,%1\n"
49 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
50 " jnh 6f\n"
51 "4: mvcp 0(%4,%2),0(%1),%3\n"
52 " "SLR" %0,%4\n"
53 " j 6f\n"
54 "5:"SLR" %0,%0\n"
55 "6: \n"
56 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
57 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
58 : : "cc", "memory");
59 return size;
60}
61
62size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x)
63{
64 unsigned long tmp1, tmp2;
65
66 tmp1 = 0UL;
67 asm volatile(
68 "0: mvcp 0(%0,%2),0(%1),%3\n"
69 " "SLR" %0,%0\n"
70 " j 3f\n"
71 "1: la %4,255(%1)\n" /* %4 = ptr + 255 */
72 " "LHI" %3,-4096\n"
73 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
74 " "SLR" %4,%1\n"
75 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
76 " jnh 3f\n"
77 "2: mvcp 0(%4,%2),0(%1),%3\n"
78 " "SLR" %0,%4\n"
79 "3:\n"
80 EX_TABLE(0b,1b) EX_TABLE(2b,3b)
81 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
82 : : "cc", "memory");
83 return size;
84}
85
86size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
87{
88 unsigned long tmp1, tmp2;
89
90 tmp1 = -256UL;
91 asm volatile(
92 "0: mvcs 0(%0,%1),0(%2),%3\n"
93 " jz 5f\n"
94 "1:"ALR" %0,%3\n"
95 " la %1,256(%1)\n"
96 " la %2,256(%2)\n"
97 "2: mvcs 0(%0,%1),0(%2),%3\n"
98 " jnz 1b\n"
99 " j 5f\n"
100 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
101 " "LHI" %3,-4096\n"
102 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
103 " "SLR" %4,%1\n"
104 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
105 " jnh 6f\n"
106 "4: mvcs 0(%4,%1),0(%2),%3\n"
107 " "SLR" %0,%4\n"
108 " j 6f\n"
109 "5:"SLR" %0,%0\n"
110 "6: \n"
111 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
112 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
113 : : "cc", "memory");
114 return size;
115}
116
117size_t copy_to_user_std_small(size_t size, void __user *ptr, const void *x)
118{
119 unsigned long tmp1, tmp2;
120
121 tmp1 = 0UL;
122 asm volatile(
123 "0: mvcs 0(%0,%1),0(%2),%3\n"
124 " "SLR" %0,%0\n"
125 " j 3f\n"
126 "1: la %4,255(%1)\n" /* ptr + 255 */
127 " "LHI" %3,-4096\n"
128 " nr %4,%3\n" /* (ptr + 255) & -4096UL */
129 " "SLR" %4,%1\n"
130 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
131 " jnh 3f\n"
132 "2: mvcs 0(%4,%1),0(%2),%3\n"
133 " "SLR" %0,%4\n"
134 "3:\n"
135 EX_TABLE(0b,1b) EX_TABLE(2b,3b)
136 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
137 : : "cc", "memory");
138 return size;
139}
140
141size_t copy_in_user_std(size_t size, void __user *to, const void __user *from)
142{
143 unsigned long tmp1;
144
145 asm volatile(
146 " "AHI" %0,-1\n"
147 " jo 5f\n"
148 " sacf 256\n"
149 " bras %3,3f\n"
150 "0:"AHI" %0,257\n"
151 "1: mvc 0(1,%1),0(%2)\n"
152 " la %1,1(%1)\n"
153 " la %2,1(%2)\n"
154 " "AHI" %0,-1\n"
155 " jnz 1b\n"
156 " j 5f\n"
157 "2: mvc 0(256,%1),0(%2)\n"
158 " la %1,256(%1)\n"
159 " la %2,256(%2)\n"
160 "3:"AHI" %0,-256\n"
161 " jnm 2b\n"
162 "4: ex %0,1b-0b(%3)\n"
163 " sacf 0\n"
164 "5: "SLR" %0,%0\n"
165 "6:\n"
166 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
167 : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
168 : : "cc", "memory");
169 return size;
170}
171
172size_t clear_user_std(size_t size, void __user *to)
173{
174 unsigned long tmp1, tmp2;
175
176 asm volatile(
177 " "AHI" %0,-1\n"
178 " jo 5f\n"
179 " sacf 256\n"
180 " bras %3,3f\n"
181 " xc 0(1,%1),0(%1)\n"
182 "0:"AHI" %0,257\n"
183 " la %2,255(%1)\n" /* %2 = ptr + 255 */
184 " srl %2,12\n"
185 " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
186 " "SLR" %2,%1\n"
187 " "CLR" %0,%2\n" /* clear crosses next page boundary? */
188 " jnh 5f\n"
189 " "AHI" %2,-1\n"
190 "1: ex %2,0(%3)\n"
191 " "AHI" %2,1\n"
192 " "SLR" %0,%2\n"
193 " j 5f\n"
194 "2: xc 0(256,%1),0(%1)\n"
195 " la %1,256(%1)\n"
196 "3:"AHI" %0,-256\n"
197 " jnm 2b\n"
198 "4: ex %0,0(%3)\n"
199 " sacf 0\n"
200 "5: "SLR" %0,%0\n"
201 "6:\n"
202 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
203 : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
204 : : "cc", "memory");
205 return size;
206}
207
208size_t strnlen_user_std(size_t size, const char __user *src)
209{
210 register unsigned long reg0 asm("0") = 0UL;
211 unsigned long tmp1, tmp2;
212
213 asm volatile(
214 " la %2,0(%1)\n"
215 " la %3,0(%0,%1)\n"
216 " "SLR" %0,%0\n"
217 " sacf 256\n"
218 "0: srst %3,%2\n"
219 " jo 0b\n"
220 " la %0,1(%3)\n" /* strnlen_user results includes \0 */
221 " "SLR" %0,%1\n"
222 "1: sacf 0\n"
223 EX_TABLE(0b,1b)
224 : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
225 : "d" (reg0) : "cc", "memory");
226 return size;
227}
228
229size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
230{
231 register unsigned long reg0 asm("0") = 0UL;
232 unsigned long tmp1, tmp2;
233
234 asm volatile(
235 " la %3,0(%1)\n"
236 " la %4,0(%0,%1)\n"
237 " sacf 256\n"
238 "0: srst %4,%3\n"
239 " jo 0b\n"
240 " sacf 0\n"
241 " la %0,0(%4)\n"
242 " jh 1f\n" /* found \0 in string ? */
243 " "AHI" %4,1\n" /* include \0 in copy */
244 "1:"SLR" %0,%1\n" /* %0 = return length (without \0) */
245 " "SLR" %4,%1\n" /* %4 = copy length (including \0) */
246 "2: mvcp 0(%4,%2),0(%1),%5\n"
247 " jz 9f\n"
248 "3:"AHI" %4,-256\n"
249 " la %1,256(%1)\n"
250 " la %2,256(%2)\n"
251 "4: mvcp 0(%4,%2),0(%1),%5\n"
252 " jnz 3b\n"
253 " j 9f\n"
254 "7: sacf 0\n"
255 "8:"LHI" %0,%6\n"
256 "9:\n"
257 EX_TABLE(0b,7b) EX_TABLE(2b,8b) EX_TABLE(4b,8b)
258 : "+a" (size), "+a" (src), "+d" (dst), "=a" (tmp1), "=a" (tmp2)
259 : "d" (reg0), "K" (-EFAULT) : "cc", "memory");
260 return size;
261}
262
263#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
264 asm volatile( \
265 " sacf 256\n" \
266 "0: l %1,0(%6)\n" \
267 "1:"insn \
268 "2: cs %1,%2,0(%6)\n" \
269 "3: jl 1b\n" \
270 " lhi %0,0\n" \
271 "4: sacf 0\n" \
272 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
273 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
274 "=m" (*uaddr) \
275 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
276 "m" (*uaddr) : "cc");
277
278int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
279{
280 int oldval = 0, newval, ret;
281
282 inc_preempt_count();
283
284 switch (op) {
285 case FUTEX_OP_SET:
286 __futex_atomic_op("lr %2,%5\n",
287 ret, oldval, newval, uaddr, oparg);
288 break;
289 case FUTEX_OP_ADD:
290 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
291 ret, oldval, newval, uaddr, oparg);
292 break;
293 case FUTEX_OP_OR:
294 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
295 ret, oldval, newval, uaddr, oparg);
296 break;
297 case FUTEX_OP_ANDN:
298 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
299 ret, oldval, newval, uaddr, oparg);
300 break;
301 case FUTEX_OP_XOR:
302 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
303 ret, oldval, newval, uaddr, oparg);
304 break;
305 default:
306 ret = -ENOSYS;
307 }
308 dec_preempt_count();
309 *old = oldval;
310 return ret;
311}
312
313int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval)
314{
315 int ret;
316
317 asm volatile(
318 " sacf 256\n"
319 " cs %1,%4,0(%5)\n"
320 "0: lr %0,%1\n"
321 "1: sacf 0\n"
322 EX_TABLE(0b,1b)
323 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
324 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
325 : "cc", "memory" );
326 return ret;
327}
328
329struct uaccess_ops uaccess_std = {
330 .copy_from_user = copy_from_user_std,
331 .copy_from_user_small = copy_from_user_std_small,
332 .copy_to_user = copy_to_user_std,
333 .copy_to_user_small = copy_to_user_std_small,
334 .copy_in_user = copy_in_user_std,
335 .clear_user = clear_user_std,
336 .strnlen_user = strnlen_user_std,
337 .strncpy_from_user = strncpy_from_user_std,
338 .futex_atomic_op = futex_atomic_op,
339 .futex_atomic_cmpxchg = futex_atomic_cmpxchg,
340};
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index ceea51cff03b..786a44dba5bf 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -53,22 +53,6 @@ static void cmm_timer_fn(unsigned long);
53static void cmm_set_timer(void); 53static void cmm_set_timer(void);
54 54
55static long 55static long
56cmm_strtoul(const char *cp, char **endp)
57{
58 unsigned int base = 10;
59
60 if (*cp == '0') {
61 base = 8;
62 cp++;
63 if ((*cp == 'x' || *cp == 'X') && isxdigit(cp[1])) {
64 base = 16;
65 cp++;
66 }
67 }
68 return simple_strtoul(cp, endp, base);
69}
70
71static long
72cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list) 56cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list)
73{ 57{
74 struct cmm_page_array *pa; 58 struct cmm_page_array *pa;
@@ -276,7 +260,7 @@ cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
276 return -EFAULT; 260 return -EFAULT;
277 buf[sizeof(buf) - 1] = '\0'; 261 buf[sizeof(buf) - 1] = '\0';
278 cmm_skip_blanks(buf, &p); 262 cmm_skip_blanks(buf, &p);
279 pages = cmm_strtoul(p, &p); 263 pages = simple_strtoul(p, &p, 0);
280 if (ctl == &cmm_table[0]) 264 if (ctl == &cmm_table[0])
281 cmm_set_pages(pages); 265 cmm_set_pages(pages);
282 else 266 else
@@ -317,9 +301,9 @@ cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
317 return -EFAULT; 301 return -EFAULT;
318 buf[sizeof(buf) - 1] = '\0'; 302 buf[sizeof(buf) - 1] = '\0';
319 cmm_skip_blanks(buf, &p); 303 cmm_skip_blanks(buf, &p);
320 pages = cmm_strtoul(p, &p); 304 pages = simple_strtoul(p, &p, 0);
321 cmm_skip_blanks(p, &p); 305 cmm_skip_blanks(p, &p);
322 seconds = cmm_strtoul(p, &p); 306 seconds = simple_strtoul(p, &p, 0);
323 cmm_set_timeout(pages, seconds); 307 cmm_set_timeout(pages, seconds);
324 } else { 308 } else {
325 len = sprintf(buf, "%ld %ld\n", 309 len = sprintf(buf, "%ld %ld\n",
@@ -382,24 +366,24 @@ cmm_smsg_target(char *from, char *msg)
382 if (strncmp(msg, "SHRINK", 6) == 0) { 366 if (strncmp(msg, "SHRINK", 6) == 0) {
383 if (!cmm_skip_blanks(msg + 6, &msg)) 367 if (!cmm_skip_blanks(msg + 6, &msg))
384 return; 368 return;
385 pages = cmm_strtoul(msg, &msg); 369 pages = simple_strtoul(msg, &msg, 0);
386 cmm_skip_blanks(msg, &msg); 370 cmm_skip_blanks(msg, &msg);
387 if (*msg == '\0') 371 if (*msg == '\0')
388 cmm_set_pages(pages); 372 cmm_set_pages(pages);
389 } else if (strncmp(msg, "RELEASE", 7) == 0) { 373 } else if (strncmp(msg, "RELEASE", 7) == 0) {
390 if (!cmm_skip_blanks(msg + 7, &msg)) 374 if (!cmm_skip_blanks(msg + 7, &msg))
391 return; 375 return;
392 pages = cmm_strtoul(msg, &msg); 376 pages = simple_strtoul(msg, &msg, 0);
393 cmm_skip_blanks(msg, &msg); 377 cmm_skip_blanks(msg, &msg);
394 if (*msg == '\0') 378 if (*msg == '\0')
395 cmm_add_timed_pages(pages); 379 cmm_add_timed_pages(pages);
396 } else if (strncmp(msg, "REUSE", 5) == 0) { 380 } else if (strncmp(msg, "REUSE", 5) == 0) {
397 if (!cmm_skip_blanks(msg + 5, &msg)) 381 if (!cmm_skip_blanks(msg + 5, &msg))
398 return; 382 return;
399 pages = cmm_strtoul(msg, &msg); 383 pages = simple_strtoul(msg, &msg, 0);
400 if (!cmm_skip_blanks(msg, &msg)) 384 if (!cmm_skip_blanks(msg, &msg))
401 return; 385 return;
402 seconds = cmm_strtoul(msg, &msg); 386 seconds = simple_strtoul(msg, &msg, 0);
403 cmm_skip_blanks(msg, &msg); 387 cmm_skip_blanks(msg, &msg);
404 if (*msg == '\0') 388 if (*msg == '\0')
405 cmm_set_timeout(pages, seconds); 389 cmm_set_timeout(pages, seconds);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7cd82575813d..44f0cda7e72e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -25,10 +25,12 @@
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/hardirq.h> 27#include <linux/hardirq.h>
28#include <linux/kprobes.h>
28 29
29#include <asm/system.h> 30#include <asm/system.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/kdebug.h>
32 34
33#ifndef CONFIG_64BIT 35#ifndef CONFIG_64BIT
34#define __FAIL_ADDR_MASK 0x7ffff000 36#define __FAIL_ADDR_MASK 0x7ffff000
@@ -48,6 +50,38 @@ extern int sysctl_userprocess_debug;
48 50
49extern void die(const char *,struct pt_regs *,long); 51extern void die(const char *,struct pt_regs *,long);
50 52
53#ifdef CONFIG_KPROBES
54ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
55int register_page_fault_notifier(struct notifier_block *nb)
56{
57 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
58}
59
60int unregister_page_fault_notifier(struct notifier_block *nb)
61{
62 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
63}
64
65static inline int notify_page_fault(enum die_val val, const char *str,
66 struct pt_regs *regs, long err, int trap, int sig)
67{
68 struct die_args args = {
69 .regs = regs,
70 .str = str,
71 .err = err,
72 .trapnr = trap,
73 .signr = sig
74 };
75 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
76}
77#else
78static inline int notify_page_fault(enum die_val val, const char *str,
79 struct pt_regs *regs, long err, int trap, int sig)
80{
81 return NOTIFY_DONE;
82}
83#endif
84
51extern spinlock_t timerlist_lock; 85extern spinlock_t timerlist_lock;
52 86
53/* 87/*
@@ -159,7 +193,7 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
159 * 11 Page translation -> Not present (nullification) 193 * 11 Page translation -> Not present (nullification)
160 * 3b Region third trans. -> Not present (nullification) 194 * 3b Region third trans. -> Not present (nullification)
161 */ 195 */
162static inline void 196static inline void __kprobes
163do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) 197do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
164{ 198{
165 struct task_struct *tsk; 199 struct task_struct *tsk;
@@ -173,6 +207,10 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
173 tsk = current; 207 tsk = current;
174 mm = tsk->mm; 208 mm = tsk->mm;
175 209
210 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
211 SIGSEGV) == NOTIFY_STOP)
212 return;
213
176 /* 214 /*
177 * Check for low-address protection. This needs to be treated 215 * Check for low-address protection. This needs to be treated
178 * as a special case because the translation exception code 216 * as a special case because the translation exception code
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 6e6b6de77770..cfd9b8f7a523 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -108,16 +108,23 @@ void __init paging_init(void)
108 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 108 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
109 static const int ssm_mask = 0x04000000L; 109 static const int ssm_mask = 0x04000000L;
110 unsigned long ro_start_pfn, ro_end_pfn; 110 unsigned long ro_start_pfn, ro_end_pfn;
111 unsigned long zones_size[MAX_NR_ZONES];
111 112
112 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 113 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
113 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 114 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
114 115
116 memset(zones_size, 0, sizeof(zones_size));
117 zones_size[ZONE_DMA] = max_low_pfn;
118 free_area_init_node(0, &contig_page_data, zones_size,
119 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
120 zholes_size);
121
115 /* unmap whole virtual address space */ 122 /* unmap whole virtual address space */
116 123
117 pg_dir = swapper_pg_dir; 124 pg_dir = swapper_pg_dir;
118 125
119 for (i=0;i<KERNEL_PGD_PTRS;i++) 126 for (i = 0; i < PTRS_PER_PGD; i++)
120 pmd_clear((pmd_t*)pg_dir++); 127 pmd_clear((pmd_t *) pg_dir++);
121 128
122 /* 129 /*
123 * map whole physical memory to virtual memory (identity mapping) 130 * map whole physical memory to virtual memory (identity mapping)
@@ -131,10 +138,7 @@ void __init paging_init(void)
131 */ 138 */
132 pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); 139 pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
133 140
134 pg_dir->pgd0 = (_PAGE_TABLE | __pa(pg_table)); 141 pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
135 pg_dir->pgd1 = (_PAGE_TABLE | (__pa(pg_table)+1024));
136 pg_dir->pgd2 = (_PAGE_TABLE | (__pa(pg_table)+2048));
137 pg_dir->pgd3 = (_PAGE_TABLE | (__pa(pg_table)+3072));
138 pg_dir++; 142 pg_dir++;
139 143
140 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { 144 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
@@ -143,8 +147,8 @@ void __init paging_init(void)
143 else 147 else
144 pte = pfn_pte(pfn, PAGE_KERNEL); 148 pte = pfn_pte(pfn, PAGE_KERNEL);
145 if (pfn >= max_low_pfn) 149 if (pfn >= max_low_pfn)
146 pte_clear(&init_mm, 0, &pte); 150 pte_val(pte) = _PAGE_TYPE_EMPTY;
147 set_pte(pg_table, pte); 151 set_pte(pg_table, pte);
148 pfn++; 152 pfn++;
149 } 153 }
150 } 154 }
@@ -159,16 +163,6 @@ void __init paging_init(void)
159 : : "m" (pgdir_k), "m" (ssm_mask)); 163 : : "m" (pgdir_k), "m" (ssm_mask));
160 164
161 local_flush_tlb(); 165 local_flush_tlb();
162
163 {
164 unsigned long zones_size[MAX_NR_ZONES];
165
166 memset(zones_size, 0, sizeof(zones_size));
167 zones_size[ZONE_DMA] = max_low_pfn;
168 free_area_init_node(0, &contig_page_data, zones_size,
169 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
170 zholes_size);
171 }
172 return; 166 return;
173} 167}
174 168
@@ -236,10 +230,8 @@ void __init paging_init(void)
236 pte = pfn_pte(pfn, __pgprot(_PAGE_RO)); 230 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
237 else 231 else
238 pte = pfn_pte(pfn, PAGE_KERNEL); 232 pte = pfn_pte(pfn, PAGE_KERNEL);
239 if (pfn >= max_low_pfn) { 233 if (pfn >= max_low_pfn)
240 pte_clear(&init_mm, 0, &pte); 234 pte_val(pte) = _PAGE_TYPE_EMPTY;
241 continue;
242 }
243 set_pte(pt_dir, pte); 235 set_pte(pt_dir, pte);
244 pfn++; 236 pfn++;
245 } 237 }